xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 4508233595eb5226a7f2fd0e94c4d5b4ef37fa9b)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #define SizeQuant   8		/* Buffer allocation size quantum:
44 				   all buffers allocated are a
45 				   multiple of this size.  This
46 				   MUST be a power of two. */
47 
48 #ifdef BGET_ENABLE_OPTION
49 #define BufDump     1		/* Define this symbol to enable the
50 				   bpoold() function which dumps the
51 				   buffers in a buffer pool. */
52 
53 #define BufValid    1		/* Define this symbol to enable the
54 				   bpoolv() function for validating
55 				   a buffer pool. */
56 
57 #define DumpData    1		/* Define this symbol to enable the
58 				   bufdump() function which allows
59 				   dumping the contents of an allocated
60 				   or free buffer. */
61 
62 #define BufStats    1		/* Define this symbol to enable the
63 				   bstats() function which calculates
64 				   the total free space in the buffer
65 				   pool, the largest available
66 				   buffer, and the total space
67 				   currently allocated. */
68 
69 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
70 				   pattern of garbage to trip up
71 				   miscreants who attempt to use
72 				   pointers into released buffers. */
73 
74 #define BestFit     1		/* Use a best fit algorithm when
75 				   searching for space for an
76 				   allocation request.  This uses
77 				   memory more efficiently, but
78 				   allocation will be much slower. */
79 
80 #define BECtl       1		/* Define this symbol to enable the
81 				   bectl() function for automatic
82 				   pool space control.  */
83 #endif
84 
85 #ifdef MEM_DEBUG
86 #undef NDEBUG
87 #define DumpData    1
88 #define BufValid    1
89 #define FreeWipe    1
90 #endif
91 
92 #if defined(CFG_TEE_CORE_DEBUG) && CFG_TEE_CORE_DEBUG != 0
93 #define BufStats    1
94 #endif
95 
96 #include <stdlib.h>
97 #include <stdint.h>
98 #include <stdbool.h>
99 #include <malloc.h>
100 #include "bget.c"		/* this is ugly, but this is bget */
101 #include <util.h>
102 
103 #ifdef __KERNEL__
104 /* Compiling for TEE Core */
105 #include <kernel/mutex.h>
106 
107 static struct mutex malloc_mu = MUTEX_INITIALIZER;
108 
109 static void malloc_lock(void)
110 {
111 	mutex_lock(&malloc_mu);
112 }
113 
114 static void malloc_unlock(void)
115 {
116 	mutex_unlock(&malloc_mu);
117 }
118 
119 #else /*__KERNEL__*/
120 /* Compiling for TA */
121 static void malloc_lock(void)
122 {
123 }
124 
125 static void malloc_unlock(void)
126 {
127 }
128 #endif /*__KERNEL__*/
129 
130 struct malloc_pool {
131 	void *buf;
132 	size_t len;
133 };
134 
135 static struct malloc_pool *malloc_pool;
136 static size_t malloc_pool_len;
137 
138 #ifdef BufStats
139 static size_t max_alloc_heap;
140 
141 static void raw_malloc_save_max_alloced_size(void)
142 {
143 	if (totalloc > max_alloc_heap)
144 		max_alloc_heap = totalloc;
145 }
146 
147 void malloc_reset_max_allocated(void)
148 {
149 	malloc_lock();
150 	max_alloc_heap = 0;
151 	malloc_unlock();
152 }
153 
154 size_t malloc_get_max_allocated(void)
155 {
156 	size_t r;
157 
158 	malloc_lock();
159 	r = max_alloc_heap;
160 	malloc_unlock();
161 	return r;
162 }
163 
164 size_t malloc_get_allocated(void)
165 {
166 	size_t r;
167 
168 	malloc_lock();
169 	r = totalloc;
170 	malloc_unlock();
171 	return r;
172 }
173 
174 #else /* BufStats */
175 
176 static void raw_malloc_save_max_alloced_size(void)
177 {
178 }
179 
180 void malloc_reset_max_allocated(void)
181 {
182 }
183 
184 size_t malloc_get_max_allocated(void)
185 {
186 	return 0;
187 }
188 
189 size_t malloc_get_allocated(void)
190 {
191 	return 0;
192 }
193 #endif /* BufStats */
194 
195 size_t malloc_get_heap_size(void)
196 {
197 	size_t n;
198 	size_t s = 0;
199 
200 	malloc_lock();
201 
202 	for (n = 0; n < malloc_pool_len; n++)
203 		s += malloc_pool[n].len;
204 
205 	malloc_unlock();
206 
207 	return s;
208 }
209 
210 #ifdef BufValid
211 static void raw_malloc_validate_pools(void)
212 {
213 	size_t n;
214 
215 	for (n = 0; n < malloc_pool_len; n++)
216 		bpoolv(malloc_pool[n].buf);
217 }
218 #else
219 static void raw_malloc_validate_pools(void)
220 {
221 }
222 #endif
223 
224 struct bpool_iterator {
225 	struct bfhead *next_buf;
226 	size_t pool_idx;
227 };
228 
229 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
230 {
231 	iterator->pool_idx = 0;
232 	iterator->next_buf = BFH(malloc_pool[0].buf);
233 }
234 
235 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
236 		size_t *len, bool *isfree)
237 {
238 	struct bfhead *b = iterator->next_buf;
239 	bufsize bs = b->bh.bsize;
240 
241 	if (bs == ESent)
242 		return false;
243 
244 	if (bs < 0) {
245 		/* Allocated buffer */
246 		bs = -bs;
247 
248 		*isfree = false;
249 	} else {
250 		/* Free Buffer */
251 		*isfree = true;
252 
253 		/* Assert that the free list links are intact */
254 		assert(b->ql.blink->ql.flink == b);
255 		assert(b->ql.flink->ql.blink == b);
256 	}
257 
258 	*buf = (uint8_t *)b + sizeof(struct bhead);
259 	*len = bs - sizeof(struct bhead);
260 
261 	iterator->next_buf = BFH((uint8_t *)b + bs);
262 	return true;
263 }
264 
265 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
266 {
267 	while (true) {
268 		size_t len;
269 		bool isfree;
270 
271 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
272 			if (isfree)
273 				continue;
274 			return true;
275 		}
276 
277 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
278 			return false;
279 
280 		iterator->pool_idx++;
281 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
282 	}
283 }
284 
285 /* Convenience macro for looping over all allocated buffers */
286 #define BPOOL_FOREACH(iterator, bp) \
287 		for (bpool_foreach_iterator_init((iterator)); \
288 			bpool_foreach((iterator), (bp));)
289 
290 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
291 {
292 	void *ptr;
293 	size_t s = hdr_size + ftr_size + pl_size;
294 
295 	malloc_lock();
296 
297 	/*
298 	 * Make sure that malloc has correct alignment of returned buffers.
299 	 * The assumption is that uintptr_t will be as wide as the largest
300 	 * required alignment of any type.
301 	 */
302 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
303 
304 	raw_malloc_validate_pools();
305 
306 	/* Check wrapping */
307 	if (s < pl_size)
308 		return NULL;
309 
310 	/* BGET doesn't like 0 sized allocations */
311 	if (!s)
312 		s++;
313 
314 	ptr = bget(s);
315 	raw_malloc_save_max_alloced_size();
316 
317 	malloc_unlock();
318 	return ptr;
319 }
320 
321 static void raw_free(void *ptr)
322 {
323 	malloc_lock();
324 
325 	raw_malloc_validate_pools();
326 
327 	if (ptr)
328 		brel(ptr);
329 
330 	malloc_unlock();
331 }
332 
333 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
334 		size_t pl_size)
335 {
336 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
337 	void *ptr;
338 
339 	malloc_lock();
340 
341 	raw_malloc_validate_pools();
342 
343 	/* Check wrapping */
344 	if (s < pl_nmemb || s < pl_size)
345 		return NULL;
346 
347 	/* BGET doesn't like 0 sized allocations */
348 	if (!s)
349 		s++;
350 
351 	ptr = bgetz(s);
352 	raw_malloc_save_max_alloced_size();
353 
354 	malloc_unlock();
355 
356 	return ptr;
357 }
358 
359 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
360 		size_t pl_size)
361 {
362 	size_t s = hdr_size + ftr_size + pl_size;
363 	void *p;
364 
365 	/* Check wrapping */
366 	if (s < pl_size)
367 		return NULL;
368 
369 	malloc_lock();
370 
371 	raw_malloc_validate_pools();
372 
373 	/* BGET doesn't like 0 sized allocations */
374 	if (!s)
375 		s++;
376 
377 	p = bgetr(ptr, s);
378 	raw_malloc_save_max_alloced_size();
379 
380 	malloc_unlock();
381 
382 	return p;
383 }
384 
385 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
386 {
387 	assert(BH((char *)bf + size) == bn);
388 	assert(bn->bsize < 0); /* Next block should be allocated */
389 	/* Next block shouldn't already have free block in front */
390 	assert(bn->prevfree == 0);
391 
392 	/* Create the free buf header */
393 	bf->bh.bsize = size;
394 	bf->bh.prevfree = 0;
395 
396 	/* Update next block to point to the new free buf header */
397 	bn->prevfree = size;
398 
399 	/* Insert the free buffer on the free list */
400 	assert(freelist.ql.blink->ql.flink == &freelist);
401 	assert(freelist.ql.flink->ql.blink == &freelist);
402 	bf->ql.flink = &freelist;
403 	bf->ql.blink = freelist.ql.blink;
404 	freelist.ql.blink = bf;
405 	bf->ql.blink->ql.flink = bf;
406 }
407 
408 static void brel_before(char *orig_buf, char *new_buf)
409 {
410 	struct bfhead *bf;
411 	struct bhead *b;
412 	bufsize size;
413 	bufsize orig_size;
414 
415 	assert(orig_buf < new_buf);
416 	/* There has to be room for the freebuf header */
417 	size = (bufsize)(new_buf - orig_buf);
418 	assert(size >= (SizeQ + sizeof(struct bhead)));
419 
420 	/* Point to head of original buffer */
421 	bf = BFH(orig_buf - sizeof(struct bhead));
422 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
423 
424 	/* Point to head of the becoming new allocated buffer */
425 	b = BH(new_buf - sizeof(struct bhead));
426 
427 	if (bf->bh.prevfree != 0) {
428 		/* Previous buffer is free, consolidate with that buffer */
429 		struct bfhead *bfp;
430 
431 		/* Update the previous free buffer */
432 		bfp = BFH((char *)bf - bf->bh.prevfree);
433 		assert(bfp->bh.bsize == bf->bh.prevfree);
434 		bfp->bh.bsize += size;
435 
436 		/* Make a new allocated buffer header */
437 		b->prevfree = bfp->bh.bsize;
438 		/* Make it negative since it's an allocated buffer */
439 		b->bsize = -(orig_size - size);
440 	} else {
441 		/*
442 		 * Previous buffer is allocated, create a new buffer and
443 		 * insert on the free list.
444 		 */
445 
446 		/* Make it negative since it's an allocated buffer */
447 		b->bsize = -(orig_size - size);
448 
449 		create_free_block(bf, size, b);
450 	}
451 
452 #ifdef BufStats
453 	totalloc -= size;
454 	assert(totalloc >= 0);
455 #endif
456 }
457 
458 static void brel_after(char *buf, bufsize size)
459 {
460 	struct bhead *b = BH(buf - sizeof(struct bhead));
461 	struct bhead *bn;
462 	bufsize new_size = size;
463 	bufsize free_size;
464 
465 	/* Select the size in the same way as in bget() */
466 	if (new_size < SizeQ)
467 		new_size = SizeQ;
468 #ifdef SizeQuant
469 #if SizeQuant > 1
470 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
471 #endif
472 #endif
473 	new_size += sizeof(struct bhead);
474 	assert(new_size <= -b->bsize);
475 
476 	/*
477 	 * Check if there's enough space at the end of the buffer to be
478 	 * able to free anything.
479 	 */
480 	free_size = -b->bsize - new_size;
481 	if (free_size < SizeQ + sizeof(struct bhead))
482 		return;
483 
484 	bn = BH((char *)b - b->bsize);
485 	/*
486 	 * Set the new size of the buffer;
487 	 */
488 	b->bsize = -new_size;
489 	if (bn->bsize > 0) {
490 		/* Next buffer is free, consolidate with that buffer */
491 		struct bfhead *bfn = BFH(bn);
492 		struct bfhead *nbf = BFH((char *)b + new_size);
493 		struct bhead *bnn = BH((char *)bn + bn->bsize);
494 
495 		assert(bfn->bh.prevfree == 0);
496 		assert(bnn->prevfree == bfn->bh.bsize);
497 
498 		/* Construct the new free header */
499 		nbf->bh.prevfree = 0;
500 		nbf->bh.bsize = bfn->bh.bsize + free_size;
501 
502 		/* Update the buffer after this to point to this header */
503 		bnn->prevfree += free_size;
504 
505 		/*
506 		 * Unlink the previous free buffer and link the new free
507 		 * buffer.
508 		 */
509 		assert(bfn->ql.blink->ql.flink == bfn);
510 		assert(bfn->ql.flink->ql.blink == bfn);
511 
512 		/* Assing blink and flink from old free buffer */
513 		nbf->ql.blink = bfn->ql.blink;
514 		nbf->ql.flink = bfn->ql.flink;
515 
516 		/* Replace the old free buffer with the new one */
517 		nbf->ql.blink->ql.flink = nbf;
518 		nbf->ql.flink->ql.blink = nbf;
519 	} else {
520 		/* New buffer is allocated, create a new free buffer */
521 		create_free_block(BFH((char *)b + new_size), free_size, bn);
522 	}
523 
524 #ifdef BufStats
525 	totalloc -= free_size;
526 	assert(totalloc >= 0);
527 #endif
528 
529 }
530 
531 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
532 		size_t size)
533 {
534 	size_t s;
535 	uintptr_t b;
536 
537 	malloc_lock();
538 
539 	raw_malloc_validate_pools();
540 
541 	if (!IS_POWER_OF_TWO(alignment))
542 		return NULL;
543 
544 	/*
545 	 * Normal malloc with headers always returns something SizeQuant
546 	 * aligned.
547 	 */
548 	if (alignment <= SizeQuant)
549 		return raw_malloc(hdr_size, ftr_size, size);
550 
551 	s = hdr_size + ftr_size + alignment + size +
552 	    SizeQ + sizeof(struct bhead);
553 
554 	/* Check wapping */
555 	if (s < alignment || s < size)
556 		return NULL;
557 
558 	b = (uintptr_t)bget(s);
559 	if (!b)
560 		return NULL;
561 
562 	if ((b + hdr_size) & (alignment - 1)) {
563 		/*
564 		 * Returned buffer is not aligned as requested if the
565 		 * hdr_size is added. Find an offset into the buffer
566 		 * that is far enough in to the buffer to be able to free
567 		 * what's in front.
568 		 */
569 		uintptr_t p;
570 
571 		/*
572 		 * Find the point where the buffer including supplied
573 		 * header size should start.
574 		 */
575 		p = b + hdr_size + alignment;
576 		p &= ~(alignment - 1);
577 		p -= hdr_size;
578 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
579 			p += alignment;
580 		assert((p + hdr_size + ftr_size + size) <= (b + s));
581 
582 		/* Free the front part of the buffer */
583 		brel_before((void *)b, (void *)p);
584 
585 		/* Set the new start of the buffer */
586 		b = p;
587 	}
588 
589 	/*
590 	 * Since b is now aligned, release what we don't need at the end of
591 	 * the buffer.
592 	 */
593 	brel_after((void *)b, hdr_size + ftr_size + size);
594 
595 	raw_malloc_save_max_alloced_size();
596 
597 	malloc_unlock();
598 
599 	return (void *)b;
600 }
601 
602 /* Most of the stuff in this function is copied from bgetr() in bget.c */
603 static bufsize bget_buf_size(void *buf)
604 {
605 	bufsize osize;          /* Old size of buffer */
606 	struct bhead *b;
607 
608 	b = BH(((char *)buf) - sizeof(struct bhead));
609 	osize = -b->bsize;
610 #ifdef BECtl
611 	if (osize == 0) {
612 		/*  Buffer acquired directly through acqfcn. */
613 		struct bdhead *bd;
614 
615 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
616 		osize = bd->tsize - sizeof(struct bdhead);
617 	} else
618 #endif
619 		osize -= sizeof(struct bhead);
620 	assert(osize > 0);
621 	return osize;
622 }
623 
624 #ifdef ENABLE_MDBG
625 
626 struct mdbg_hdr {
627 	const char *fname;
628 	uint16_t line;
629 	bool ignore;
630 	uint32_t pl_size;
631 	uint32_t magic;
632 };
633 
634 #define MDBG_HEADER_MAGIC	0xadadadad
635 #define MDBG_FOOTER_MAGIC	0xecececec
636 
637 /* TODO make this a per thread variable */
638 static enum mdbg_mode mdbg_mode = MDBG_MODE_DYNAMIC;
639 
640 static size_t mdbg_get_ftr_size(size_t pl_size)
641 {
642 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
643 
644 	return ftr_pad + sizeof(uint32_t);
645 }
646 
647 
648 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
649 {
650 	uint32_t *footer;
651 
652 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
653 			      mdbg_get_ftr_size(hdr->pl_size));
654 	footer--;
655 	return footer;
656 }
657 
658 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
659 		int lineno, size_t pl_size)
660 {
661 	uint32_t *footer;
662 
663 	hdr->fname = fname;
664 	hdr->line = lineno;
665 	hdr->pl_size = pl_size;
666 	hdr->magic = MDBG_HEADER_MAGIC;
667 	hdr->ignore = mdbg_mode == MDBG_MODE_STATIC;
668 
669 	footer = mdbg_get_footer(hdr);
670 	*footer = MDBG_FOOTER_MAGIC;
671 }
672 
673 void *mdbg_malloc(const char *fname, int lineno, size_t size)
674 {
675 	struct mdbg_hdr *hdr;
676 
677 	COMPILE_TIME_ASSERT(sizeof(struct mdbg_hdr) == sizeof(uint32_t) * 4);
678 
679 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
680 			  mdbg_get_ftr_size(size), size);
681 	if (hdr) {
682 		mdbg_update_hdr(hdr, fname, lineno, size);
683 		hdr++;
684 	}
685 	return hdr;
686 }
687 
688 static void assert_header(struct mdbg_hdr *hdr)
689 {
690 	assert(hdr->magic == MDBG_HEADER_MAGIC);
691 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
692 }
693 
694 void mdbg_free(void *ptr)
695 {
696 	struct mdbg_hdr *hdr = ptr;
697 
698 	if (hdr) {
699 		hdr--;
700 		assert_header(hdr);
701 		hdr->magic = 0;
702 		*mdbg_get_footer(hdr) = 0;
703 		raw_free(hdr);
704 	}
705 }
706 
707 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
708 {
709 	struct mdbg_hdr *hdr;
710 
711 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
712 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
713 	if (hdr) {
714 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
715 		hdr++;
716 	}
717 	return hdr;
718 }
719 
720 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
721 {
722 	struct mdbg_hdr *hdr = ptr;
723 
724 	if (hdr) {
725 		hdr--;
726 		assert_header(hdr);
727 	}
728 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
729 			   mdbg_get_ftr_size(size), size);
730 	if (hdr) {
731 		mdbg_update_hdr(hdr, fname, lineno, size);
732 		hdr++;
733 	}
734 	return hdr;
735 }
736 
737 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
738 		size_t size)
739 {
740 	struct mdbg_hdr *hdr;
741 
742 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
743 			   alignment, size);
744 	if (hdr) {
745 		mdbg_update_hdr(hdr, fname, lineno, size);
746 		hdr++;
747 	}
748 	return hdr;
749 }
750 
751 
752 static void *get_payload_start_size(void *raw_buf, size_t *size)
753 {
754 	struct mdbg_hdr *hdr = raw_buf;
755 
756 	assert(bget_buf_size(hdr) >= hdr->pl_size);
757 	*size = hdr->pl_size;
758 	return hdr + 1;
759 }
760 
761 void mdbg_check(int bufdump)
762 {
763 	struct bpool_iterator itr;
764 	void *b;
765 
766 	raw_malloc_validate_pools();
767 
768 	BPOOL_FOREACH(&itr, &b) {
769 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
770 
771 		assert_header(hdr);
772 
773 		if (bufdump > 0 || !hdr->ignore) {
774 			const char *fname = hdr->fname;
775 
776 			if (!fname)
777 				fname = "unknown";
778 
779 			DMSG("%s buffer: %d bytes %s:%d\n",
780 				hdr->ignore ? "Ignore" : "Orphaned",
781 				hdr->pl_size, fname, hdr->line);
782 		}
783 	}
784 
785 }
786 
787 enum mdbg_mode mdbg_set_mode(enum mdbg_mode mode)
788 {
789 	enum mdbg_mode old_mode = mdbg_mode;
790 
791 	mdbg_mode = mode;
792 	return old_mode;
793 }
794 
795 #else
796 
797 void *malloc(size_t size)
798 {
799 	return raw_malloc(0, 0, size);
800 }
801 
802 void free(void *ptr)
803 {
804 	raw_free(ptr);
805 }
806 
807 void *calloc(size_t nmemb, size_t size)
808 {
809 	return raw_calloc(0, 0, nmemb, size);
810 }
811 
812 void *realloc(void *ptr, size_t size)
813 {
814 	return raw_realloc(ptr, 0, 0, size);
815 }
816 
817 void *memalign(size_t alignment, size_t size)
818 {
819 	return raw_memalign(0, 0, alignment, size);
820 }
821 
822 static void *get_payload_start_size(void *ptr, size_t *size)
823 {
824 	*size = bget_buf_size(ptr);
825 	return ptr;
826 }
827 
828 #endif
829 
830 
831 
832 void malloc_init(void *buf, size_t len)
833 {
834 	/* Must not be called twice */
835 	assert(!malloc_pool);
836 
837 	malloc_add_pool(buf, len);
838 }
839 
840 void malloc_add_pool(void *buf, size_t len)
841 {
842 	void *p;
843 	size_t l;
844 	uintptr_t start = (uintptr_t)buf;
845 	uintptr_t end = start + len;
846 	enum mdbg_mode old_mode = mdbg_set_mode(MDBG_MODE_STATIC);
847 
848 	start = ROUNDUP(start, SizeQuant);
849 	end = ROUNDDOWN(end, SizeQuant);
850 	assert(start < end);
851 
852 	bpool((void *)start, end - start);
853 
854 	l = malloc_pool_len + 1;
855 	p = realloc(malloc_pool, sizeof(struct malloc_pool) * l);
856 	assert(p);
857 	malloc_pool = p;
858 	malloc_pool[malloc_pool_len].buf = (void *)start;
859 	malloc_pool[malloc_pool_len].len = end - start;
860 	malloc_pool_len = l;
861 	mdbg_set_mode(old_mode);
862 }
863 
864 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
865 {
866 	struct bpool_iterator itr;
867 	void *b;
868 	uint8_t *start_buf = buf;
869 	uint8_t *end_buf = start_buf + len;
870 	bool ret = false;
871 
872 	malloc_lock();
873 
874 	raw_malloc_validate_pools();
875 
876 	/* Check for wrapping */
877 	if (start_buf > end_buf)
878 		goto out;
879 
880 	BPOOL_FOREACH(&itr, &b) {
881 		uint8_t *start_b;
882 		uint8_t *end_b;
883 		size_t s;
884 
885 		start_b = get_payload_start_size(b, &s);
886 		end_b = start_b + s;
887 
888 		if (start_buf >= start_b && end_buf <= end_b) {
889 			ret = true;
890 			goto out;
891 		}
892 	}
893 
894 out:
895 	malloc_unlock();
896 
897 	return ret;
898 }
899 
900 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
901 {
902 	uintptr_t buf_start = (uintptr_t) buf;
903 	uintptr_t buf_end = buf_start + len;
904 	size_t n;
905 	bool ret = false;
906 
907 	malloc_lock();
908 
909 	raw_malloc_validate_pools();
910 
911 	for (n = 0; n < malloc_pool_len; n++) {
912 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
913 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
914 
915 		if (buf_start > buf_end || pool_start > pool_end) {
916 			ret = true;	/* Wrapping buffers, shouldn't happen */
917 			goto out;
918 		}
919 
920 		if (buf_end > pool_start || buf_start < pool_end) {
921 			ret = true;
922 			goto out;
923 		}
924 	}
925 
926 out:
927 	malloc_unlock();
928 	return ret;
929 }
930