xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 51ac0e23b5c2b3c84469a0de79c9f027a46d5747)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <stdlib.h>
103 #include <stdint.h>
104 #include <stdbool.h>
105 #include <malloc.h>
106 #include "bget.c"		/* this is ugly, but this is bget */
107 #include <util.h>
108 #include <trace.h>
109 
110 #ifdef __KERNEL__
111 /* Compiling for TEE Core */
112 #include <kernel/mutex.h>
113 
114 static struct mutex malloc_mu = MUTEX_INITIALIZER;
115 
116 static void malloc_lock(void)
117 {
118 	mutex_lock(&malloc_mu);
119 }
120 
121 static void malloc_unlock(void)
122 {
123 	mutex_unlock(&malloc_mu);
124 }
125 
126 #else /*__KERNEL__*/
127 /* Compiling for TA */
128 static void malloc_lock(void)
129 {
130 }
131 
132 static void malloc_unlock(void)
133 {
134 }
135 #endif /*__KERNEL__*/
136 
137 #if defined(ENABLE_MDBG)
138 #include <trace.h>
139 #endif
140 
141 struct malloc_pool {
142 	void *buf;
143 	size_t len;
144 };
145 
146 static struct malloc_pool *malloc_pool;
147 static size_t malloc_pool_len;
148 
149 #ifdef BufStats
150 static size_t max_alloc_heap;
151 
152 static void raw_malloc_save_max_alloced_size(void)
153 {
154 	if (totalloc > max_alloc_heap)
155 		max_alloc_heap = totalloc;
156 }
157 
158 void malloc_reset_max_allocated(void)
159 {
160 	malloc_lock();
161 	max_alloc_heap = 0;
162 	malloc_unlock();
163 }
164 
165 size_t malloc_get_max_allocated(void)
166 {
167 	size_t r;
168 
169 	malloc_lock();
170 	r = max_alloc_heap;
171 	malloc_unlock();
172 	return r;
173 }
174 
175 size_t malloc_get_allocated(void)
176 {
177 	size_t r;
178 
179 	malloc_lock();
180 	r = totalloc;
181 	malloc_unlock();
182 	return r;
183 }
184 
185 #else /* BufStats */
186 
187 static void raw_malloc_save_max_alloced_size(void)
188 {
189 }
190 
191 void malloc_reset_max_allocated(void)
192 {
193 }
194 
195 size_t malloc_get_max_allocated(void)
196 {
197 	return 0;
198 }
199 
200 size_t malloc_get_allocated(void)
201 {
202 	return 0;
203 }
204 #endif /* BufStats */
205 
206 size_t malloc_get_heap_size(void)
207 {
208 	size_t n;
209 	size_t s = 0;
210 
211 	malloc_lock();
212 
213 	for (n = 0; n < malloc_pool_len; n++)
214 		s += malloc_pool[n].len;
215 
216 	malloc_unlock();
217 
218 	return s;
219 }
220 
221 #ifdef BufValid
222 static void raw_malloc_validate_pools(void)
223 {
224 	size_t n;
225 
226 	for (n = 0; n < malloc_pool_len; n++)
227 		bpoolv(malloc_pool[n].buf);
228 }
229 #else
230 static void raw_malloc_validate_pools(void)
231 {
232 }
233 #endif
234 
235 struct bpool_iterator {
236 	struct bfhead *next_buf;
237 	size_t pool_idx;
238 };
239 
240 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
241 {
242 	iterator->pool_idx = 0;
243 	iterator->next_buf = BFH(malloc_pool[0].buf);
244 }
245 
246 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
247 		size_t *len, bool *isfree)
248 {
249 	struct bfhead *b = iterator->next_buf;
250 	bufsize bs = b->bh.bsize;
251 
252 	if (bs == ESent)
253 		return false;
254 
255 	if (bs < 0) {
256 		/* Allocated buffer */
257 		bs = -bs;
258 
259 		*isfree = false;
260 	} else {
261 		/* Free Buffer */
262 		*isfree = true;
263 
264 		/* Assert that the free list links are intact */
265 		assert(b->ql.blink->ql.flink == b);
266 		assert(b->ql.flink->ql.blink == b);
267 	}
268 
269 	*buf = (uint8_t *)b + sizeof(struct bhead);
270 	*len = bs - sizeof(struct bhead);
271 
272 	iterator->next_buf = BFH((uint8_t *)b + bs);
273 	return true;
274 }
275 
276 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
277 {
278 	while (true) {
279 		size_t len;
280 		bool isfree;
281 
282 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
283 			if (isfree)
284 				continue;
285 			return true;
286 		}
287 
288 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
289 			return false;
290 
291 		iterator->pool_idx++;
292 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
293 	}
294 }
295 
296 /* Convenience macro for looping over all allocated buffers */
297 #define BPOOL_FOREACH(iterator, bp) \
298 		for (bpool_foreach_iterator_init((iterator)); \
299 			bpool_foreach((iterator), (bp));)
300 
301 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
302 {
303 	void *ptr;
304 	size_t s = hdr_size + ftr_size + pl_size;
305 
306 	/*
307 	 * Make sure that malloc has correct alignment of returned buffers.
308 	 * The assumption is that uintptr_t will be as wide as the largest
309 	 * required alignment of any type.
310 	 */
311 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
312 
313 	raw_malloc_validate_pools();
314 
315 	/* Check wrapping */
316 	if (s < pl_size)
317 		return NULL;
318 
319 	/* BGET doesn't like 0 sized allocations */
320 	if (!s)
321 		s++;
322 
323 	ptr = bget(s);
324 	raw_malloc_save_max_alloced_size();
325 
326 	return ptr;
327 }
328 
329 static void raw_free(void *ptr)
330 {
331 	raw_malloc_validate_pools();
332 
333 	if (ptr)
334 		brel(ptr);
335 }
336 
337 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
338 		size_t pl_size)
339 {
340 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
341 	void *ptr;
342 
343 	raw_malloc_validate_pools();
344 
345 	/* Check wrapping */
346 	if (s < pl_nmemb || s < pl_size)
347 		return NULL;
348 
349 	/* BGET doesn't like 0 sized allocations */
350 	if (!s)
351 		s++;
352 
353 	ptr = bgetz(s);
354 	raw_malloc_save_max_alloced_size();
355 
356 	return ptr;
357 }
358 
359 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
360 		size_t pl_size)
361 {
362 	size_t s = hdr_size + ftr_size + pl_size;
363 	void *p;
364 
365 	/* Check wrapping */
366 	if (s < pl_size)
367 		return NULL;
368 
369 	raw_malloc_validate_pools();
370 
371 	/* BGET doesn't like 0 sized allocations */
372 	if (!s)
373 		s++;
374 
375 	p = bgetr(ptr, s);
376 	raw_malloc_save_max_alloced_size();
377 
378 	return p;
379 }
380 
381 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
382 {
383 	assert(BH((char *)bf + size) == bn);
384 	assert(bn->bsize < 0); /* Next block should be allocated */
385 	/* Next block shouldn't already have free block in front */
386 	assert(bn->prevfree == 0);
387 
388 	/* Create the free buf header */
389 	bf->bh.bsize = size;
390 	bf->bh.prevfree = 0;
391 
392 	/* Update next block to point to the new free buf header */
393 	bn->prevfree = size;
394 
395 	/* Insert the free buffer on the free list */
396 	assert(freelist.ql.blink->ql.flink == &freelist);
397 	assert(freelist.ql.flink->ql.blink == &freelist);
398 	bf->ql.flink = &freelist;
399 	bf->ql.blink = freelist.ql.blink;
400 	freelist.ql.blink = bf;
401 	bf->ql.blink->ql.flink = bf;
402 }
403 
404 static void brel_before(char *orig_buf, char *new_buf)
405 {
406 	struct bfhead *bf;
407 	struct bhead *b;
408 	bufsize size;
409 	bufsize orig_size;
410 
411 	assert(orig_buf < new_buf);
412 	/* There has to be room for the freebuf header */
413 	size = (bufsize)(new_buf - orig_buf);
414 	assert(size >= (SizeQ + sizeof(struct bhead)));
415 
416 	/* Point to head of original buffer */
417 	bf = BFH(orig_buf - sizeof(struct bhead));
418 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
419 
420 	/* Point to head of the becoming new allocated buffer */
421 	b = BH(new_buf - sizeof(struct bhead));
422 
423 	if (bf->bh.prevfree != 0) {
424 		/* Previous buffer is free, consolidate with that buffer */
425 		struct bfhead *bfp;
426 
427 		/* Update the previous free buffer */
428 		bfp = BFH((char *)bf - bf->bh.prevfree);
429 		assert(bfp->bh.bsize == bf->bh.prevfree);
430 		bfp->bh.bsize += size;
431 
432 		/* Make a new allocated buffer header */
433 		b->prevfree = bfp->bh.bsize;
434 		/* Make it negative since it's an allocated buffer */
435 		b->bsize = -(orig_size - size);
436 	} else {
437 		/*
438 		 * Previous buffer is allocated, create a new buffer and
439 		 * insert on the free list.
440 		 */
441 
442 		/* Make it negative since it's an allocated buffer */
443 		b->bsize = -(orig_size - size);
444 
445 		create_free_block(bf, size, b);
446 	}
447 
448 #ifdef BufStats
449 	totalloc -= size;
450 	assert(totalloc >= 0);
451 #endif
452 }
453 
454 static void brel_after(char *buf, bufsize size)
455 {
456 	struct bhead *b = BH(buf - sizeof(struct bhead));
457 	struct bhead *bn;
458 	bufsize new_size = size;
459 	bufsize free_size;
460 
461 	/* Select the size in the same way as in bget() */
462 	if (new_size < SizeQ)
463 		new_size = SizeQ;
464 #ifdef SizeQuant
465 #if SizeQuant > 1
466 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
467 #endif
468 #endif
469 	new_size += sizeof(struct bhead);
470 	assert(new_size <= -b->bsize);
471 
472 	/*
473 	 * Check if there's enough space at the end of the buffer to be
474 	 * able to free anything.
475 	 */
476 	free_size = -b->bsize - new_size;
477 	if (free_size < SizeQ + sizeof(struct bhead))
478 		return;
479 
480 	bn = BH((char *)b - b->bsize);
481 	/*
482 	 * Set the new size of the buffer;
483 	 */
484 	b->bsize = -new_size;
485 	if (bn->bsize > 0) {
486 		/* Next buffer is free, consolidate with that buffer */
487 		struct bfhead *bfn = BFH(bn);
488 		struct bfhead *nbf = BFH((char *)b + new_size);
489 		struct bhead *bnn = BH((char *)bn + bn->bsize);
490 
491 		assert(bfn->bh.prevfree == 0);
492 		assert(bnn->prevfree == bfn->bh.bsize);
493 
494 		/* Construct the new free header */
495 		nbf->bh.prevfree = 0;
496 		nbf->bh.bsize = bfn->bh.bsize + free_size;
497 
498 		/* Update the buffer after this to point to this header */
499 		bnn->prevfree += free_size;
500 
501 		/*
502 		 * Unlink the previous free buffer and link the new free
503 		 * buffer.
504 		 */
505 		assert(bfn->ql.blink->ql.flink == bfn);
506 		assert(bfn->ql.flink->ql.blink == bfn);
507 
508 		/* Assing blink and flink from old free buffer */
509 		nbf->ql.blink = bfn->ql.blink;
510 		nbf->ql.flink = bfn->ql.flink;
511 
512 		/* Replace the old free buffer with the new one */
513 		nbf->ql.blink->ql.flink = nbf;
514 		nbf->ql.flink->ql.blink = nbf;
515 	} else {
516 		/* New buffer is allocated, create a new free buffer */
517 		create_free_block(BFH((char *)b + new_size), free_size, bn);
518 	}
519 
520 #ifdef BufStats
521 	totalloc -= free_size;
522 	assert(totalloc >= 0);
523 #endif
524 
525 }
526 
527 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
528 		size_t size)
529 {
530 	size_t s;
531 	uintptr_t b;
532 
533 	raw_malloc_validate_pools();
534 
535 	if (!IS_POWER_OF_TWO(alignment))
536 		return NULL;
537 
538 	/*
539 	 * Normal malloc with headers always returns something SizeQuant
540 	 * aligned.
541 	 */
542 	if (alignment <= SizeQuant)
543 		return raw_malloc(hdr_size, ftr_size, size);
544 
545 	s = hdr_size + ftr_size + alignment + size +
546 	    SizeQ + sizeof(struct bhead);
547 
548 	/* Check wapping */
549 	if (s < alignment || s < size)
550 		return NULL;
551 
552 	b = (uintptr_t)bget(s);
553 	if (!b)
554 		return NULL;
555 
556 	if ((b + hdr_size) & (alignment - 1)) {
557 		/*
558 		 * Returned buffer is not aligned as requested if the
559 		 * hdr_size is added. Find an offset into the buffer
560 		 * that is far enough in to the buffer to be able to free
561 		 * what's in front.
562 		 */
563 		uintptr_t p;
564 
565 		/*
566 		 * Find the point where the buffer including supplied
567 		 * header size should start.
568 		 */
569 		p = b + hdr_size + alignment;
570 		p &= ~(alignment - 1);
571 		p -= hdr_size;
572 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
573 			p += alignment;
574 		assert((p + hdr_size + ftr_size + size) <= (b + s));
575 
576 		/* Free the front part of the buffer */
577 		brel_before((void *)b, (void *)p);
578 
579 		/* Set the new start of the buffer */
580 		b = p;
581 	}
582 
583 	/*
584 	 * Since b is now aligned, release what we don't need at the end of
585 	 * the buffer.
586 	 */
587 	brel_after((void *)b, hdr_size + ftr_size + size);
588 
589 	raw_malloc_save_max_alloced_size();
590 
591 	return (void *)b;
592 }
593 
594 /* Most of the stuff in this function is copied from bgetr() in bget.c */
595 static bufsize bget_buf_size(void *buf)
596 {
597 	bufsize osize;          /* Old size of buffer */
598 	struct bhead *b;
599 
600 	b = BH(((char *)buf) - sizeof(struct bhead));
601 	osize = -b->bsize;
602 #ifdef BECtl
603 	if (osize == 0) {
604 		/*  Buffer acquired directly through acqfcn. */
605 		struct bdhead *bd;
606 
607 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
608 		osize = bd->tsize - sizeof(struct bdhead);
609 	} else
610 #endif
611 		osize -= sizeof(struct bhead);
612 	assert(osize > 0);
613 	return osize;
614 }
615 
616 #ifdef ENABLE_MDBG
617 
618 struct mdbg_hdr {
619 	const char *fname;
620 	uint16_t line;
621 	uint32_t pl_size;
622 	uint32_t magic;
623 #if defined(ARM64)
624 	uint64_t pad;
625 #endif
626 };
627 
628 #define MDBG_HEADER_MAGIC	0xadadadad
629 #define MDBG_FOOTER_MAGIC	0xecececec
630 
631 static size_t mdbg_get_ftr_size(size_t pl_size)
632 {
633 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
634 
635 	return ftr_pad + sizeof(uint32_t);
636 }
637 
638 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
639 {
640 	uint32_t *footer;
641 
642 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
643 			      mdbg_get_ftr_size(hdr->pl_size));
644 	footer--;
645 	return footer;
646 }
647 
648 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
649 		int lineno, size_t pl_size)
650 {
651 	uint32_t *footer;
652 
653 	hdr->fname = fname;
654 	hdr->line = lineno;
655 	hdr->pl_size = pl_size;
656 	hdr->magic = MDBG_HEADER_MAGIC;
657 
658 	footer = mdbg_get_footer(hdr);
659 	*footer = MDBG_FOOTER_MAGIC;
660 }
661 
662 void *mdbg_malloc(const char *fname, int lineno, size_t size)
663 {
664 	struct mdbg_hdr *hdr;
665 
666 	malloc_lock();
667 
668 	/*
669 	 * Check struct mdbg_hdr doesn't get bad alignment.
670 	 * This is required by C standard: the buffer returned from
671 	 * malloc() should be aligned with a fundamental alignment.
672 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
673 	 */
674 	COMPILE_TIME_ASSERT(
675 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
676 
677 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
678 			  mdbg_get_ftr_size(size), size);
679 	if (hdr) {
680 		mdbg_update_hdr(hdr, fname, lineno, size);
681 		hdr++;
682 	}
683 
684 	malloc_unlock();
685 	return hdr;
686 }
687 
688 static void assert_header(struct mdbg_hdr *hdr)
689 {
690 	assert(hdr->magic == MDBG_HEADER_MAGIC);
691 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
692 }
693 
694 static void mdbg_free(void *ptr)
695 {
696 	struct mdbg_hdr *hdr = ptr;
697 
698 	if (hdr) {
699 		hdr--;
700 		assert_header(hdr);
701 		hdr->magic = 0;
702 		*mdbg_get_footer(hdr) = 0;
703 		raw_free(hdr);
704 	}
705 }
706 
707 void free(void *ptr)
708 {
709 	malloc_lock();
710 	mdbg_free(ptr);
711 	malloc_unlock();
712 }
713 
714 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
715 {
716 	struct mdbg_hdr *hdr;
717 
718 	malloc_lock();
719 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
720 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
721 	if (hdr) {
722 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
723 		hdr++;
724 	}
725 	malloc_unlock();
726 	return hdr;
727 }
728 
729 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
730 			    void *ptr, size_t size)
731 {
732 	struct mdbg_hdr *hdr = ptr;
733 
734 	if (hdr) {
735 		hdr--;
736 		assert_header(hdr);
737 	}
738 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
739 			   mdbg_get_ftr_size(size), size);
740 	if (hdr) {
741 		mdbg_update_hdr(hdr, fname, lineno, size);
742 		hdr++;
743 	}
744 	return hdr;
745 }
746 
747 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
748 {
749 	void *p;
750 
751 	malloc_lock();
752 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
753 	malloc_unlock();
754 	return p;
755 }
756 
757 #define realloc_unlocked(ptr, size) \
758 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
759 
760 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
761 		size_t size)
762 {
763 	struct mdbg_hdr *hdr;
764 
765 	malloc_lock();
766 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
767 			   alignment, size);
768 	if (hdr) {
769 		mdbg_update_hdr(hdr, fname, lineno, size);
770 		hdr++;
771 	}
772 	malloc_unlock();
773 	return hdr;
774 }
775 
776 
777 static void *get_payload_start_size(void *raw_buf, size_t *size)
778 {
779 	struct mdbg_hdr *hdr = raw_buf;
780 
781 	assert(bget_buf_size(hdr) >= hdr->pl_size);
782 	*size = hdr->pl_size;
783 	return hdr + 1;
784 }
785 
786 void mdbg_check(int bufdump)
787 {
788 	struct bpool_iterator itr;
789 	void *b;
790 
791 	malloc_lock();
792 	raw_malloc_validate_pools();
793 
794 	BPOOL_FOREACH(&itr, &b) {
795 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
796 
797 		assert_header(hdr);
798 
799 		if (bufdump > 0) {
800 			const char *fname = hdr->fname;
801 
802 			if (!fname)
803 				fname = "unknown";
804 
805 			DMSG("buffer: %d bytes %s:%d\n",
806 				hdr->pl_size, fname, hdr->line);
807 		}
808 	}
809 
810 	malloc_unlock();
811 }
812 
813 #else
814 
815 void *malloc(size_t size)
816 {
817 	void *p;
818 
819 	malloc_lock();
820 	p = raw_malloc(0, 0, size);
821 	malloc_unlock();
822 	return p;
823 }
824 
825 void free(void *ptr)
826 {
827 	malloc_lock();
828 	raw_free(ptr);
829 	malloc_unlock();
830 }
831 
832 void *calloc(size_t nmemb, size_t size)
833 {
834 	void *p;
835 
836 	malloc_lock();
837 	p = raw_calloc(0, 0, nmemb, size);
838 	malloc_unlock();
839 	return p;
840 }
841 
842 static void *realloc_unlocked(void *ptr, size_t size)
843 {
844 	return raw_realloc(ptr, 0, 0, size);
845 }
846 
847 void *realloc(void *ptr, size_t size)
848 {
849 	void *p;
850 
851 	malloc_lock();
852 	p = realloc_unlocked(ptr, size);
853 	malloc_unlock();
854 	return p;
855 }
856 
857 void *memalign(size_t alignment, size_t size)
858 {
859 	void *p;
860 
861 	malloc_lock();
862 	p = raw_memalign(0, 0, alignment, size);
863 	malloc_unlock();
864 	return p;
865 }
866 
867 static void *get_payload_start_size(void *ptr, size_t *size)
868 {
869 	*size = bget_buf_size(ptr);
870 	return ptr;
871 }
872 
873 #endif
874 
875 void malloc_add_pool(void *buf, size_t len)
876 {
877 	void *p;
878 	size_t l;
879 	uintptr_t start = (uintptr_t)buf;
880 	uintptr_t end = start + len;
881 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
882 					(~(SizeQuant - 1))) +
883 				sizeof(struct bhead) * 2;
884 
885 
886 	start = ROUNDUP(start, SizeQuant);
887 	end = ROUNDDOWN(end, SizeQuant);
888 	assert(start < end);
889 
890 	if ((end - start) < min_len) {
891 		DMSG("Skipping too small pool");
892 		return;
893 	}
894 
895 	malloc_lock();
896 	bpool((void *)start, end - start);
897 	l = malloc_pool_len + 1;
898 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
899 	assert(p);
900 	malloc_pool = p;
901 	malloc_pool[malloc_pool_len].buf = (void *)start;
902 	malloc_pool[malloc_pool_len].len = end - start;
903 	malloc_pool_len = l;
904 	malloc_unlock();
905 }
906 
907 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
908 {
909 	struct bpool_iterator itr;
910 	void *b;
911 	uint8_t *start_buf = buf;
912 	uint8_t *end_buf = start_buf + len;
913 	bool ret = false;
914 
915 	malloc_lock();
916 
917 	raw_malloc_validate_pools();
918 
919 	/* Check for wrapping */
920 	if (start_buf > end_buf)
921 		goto out;
922 
923 	BPOOL_FOREACH(&itr, &b) {
924 		uint8_t *start_b;
925 		uint8_t *end_b;
926 		size_t s;
927 
928 		start_b = get_payload_start_size(b, &s);
929 		end_b = start_b + s;
930 
931 		if (start_buf >= start_b && end_buf <= end_b) {
932 			ret = true;
933 			goto out;
934 		}
935 	}
936 
937 out:
938 	malloc_unlock();
939 
940 	return ret;
941 }
942 
943 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
944 {
945 	uintptr_t buf_start = (uintptr_t) buf;
946 	uintptr_t buf_end = buf_start + len;
947 	size_t n;
948 	bool ret = false;
949 
950 	malloc_lock();
951 
952 	raw_malloc_validate_pools();
953 
954 	for (n = 0; n < malloc_pool_len; n++) {
955 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
956 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
957 
958 		if (buf_start > buf_end || pool_start > pool_end) {
959 			ret = true;	/* Wrapping buffers, shouldn't happen */
960 			goto out;
961 		}
962 
963 		if (buf_end > pool_start || buf_start < pool_end) {
964 			ret = true;
965 			goto out;
966 		}
967 	}
968 
969 out:
970 	malloc_unlock();
971 	return ret;
972 }
973