xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 1d171f95db3ea1187f5ff0ae98c49ce400fc8eb6)
1 /*
2  * Copyright (c) 2014, STMicroelectronics International N.V.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  * this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  * this list of conditions and the following disclaimer in the documentation
13  * and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
19  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
21  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
22  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
23  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
24  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
25  * POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #define PROTOTYPES
29 
30 /*
31  *  BGET CONFIGURATION
32  *  ==================
33  */
34 /* #define BGET_ENABLE_ALL_OPTIONS */
35 #ifdef BGET_ENABLE_OPTION
36 #define TestProg    20000	/* Generate built-in test program
37 				   if defined.  The value specifies
38 				   how many buffer allocation attempts
39 				   the test program should make. */
40 #endif
41 
42 
43 #ifdef __LP64__
44 #define SizeQuant   16
45 #endif
46 #ifdef __ILP32__
47 #define SizeQuant   8
48 #endif
49 				/* Buffer allocation size quantum:
50 				   all buffers allocated are a
51 				   multiple of this size.  This
52 				   MUST be a power of two. */
53 
54 #ifdef BGET_ENABLE_OPTION
55 #define BufDump     1		/* Define this symbol to enable the
56 				   bpoold() function which dumps the
57 				   buffers in a buffer pool. */
58 
59 #define BufValid    1		/* Define this symbol to enable the
60 				   bpoolv() function for validating
61 				   a buffer pool. */
62 
63 #define DumpData    1		/* Define this symbol to enable the
64 				   bufdump() function which allows
65 				   dumping the contents of an allocated
66 				   or free buffer. */
67 
68 #define BufStats    1		/* Define this symbol to enable the
69 				   bstats() function which calculates
70 				   the total free space in the buffer
71 				   pool, the largest available
72 				   buffer, and the total space
73 				   currently allocated. */
74 
75 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
76 				   pattern of garbage to trip up
77 				   miscreants who attempt to use
78 				   pointers into released buffers. */
79 
80 #define BestFit     1		/* Use a best fit algorithm when
81 				   searching for space for an
82 				   allocation request.  This uses
83 				   memory more efficiently, but
84 				   allocation will be much slower. */
85 
86 #define BECtl       1		/* Define this symbol to enable the
87 				   bectl() function for automatic
88 				   pool space control.  */
89 #endif
90 
91 #ifdef MEM_DEBUG
92 #undef NDEBUG
93 #define DumpData    1
94 #define BufValid    1
95 #define FreeWipe    1
96 #endif
97 
98 #ifdef CFG_WITH_STATS
99 #define BufStats    1
100 #endif
101 
102 #include <compiler.h>
103 #include <stdlib.h>
104 #include <stdint.h>
105 #include <stdbool.h>
106 #include <malloc.h>
107 #include <util.h>
108 #include <trace.h>
109 
110 #if defined(__KERNEL__)
111 /* Compiling for TEE Core */
112 #include <kernel/asan.h>
113 #include <kernel/mutex.h>
114 
115 static void malloc_lock(void)
116 {
117 	mutex_lock(&__malloc_mu);
118 }
119 
120 static void malloc_unlock(void)
121 {
122 	mutex_unlock(&__malloc_mu);
123 }
124 
125 static void tag_asan_free(void *buf, size_t len)
126 {
127 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
128 }
129 
130 static void tag_asan_alloced(void *buf, size_t len)
131 {
132 	asan_tag_access(buf, (uint8_t *)buf + len);
133 }
134 
135 #else /*__KERNEL__*/
136 /* Compiling for TA */
137 static void malloc_lock(void)
138 {
139 }
140 
141 static void malloc_unlock(void)
142 {
143 }
144 
145 static void tag_asan_free(void *buf __unused, size_t len __unused)
146 {
147 }
148 
149 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
150 {
151 }
152 #endif /*__KERNEL__*/
153 
154 #include "bget.c"		/* this is ugly, but this is bget */
155 
156 struct malloc_pool {
157 	void *buf;
158 	size_t len;
159 };
160 
161 static struct malloc_pool *malloc_pool;
162 static size_t malloc_pool_len;
163 
164 #ifdef BufStats
165 static size_t max_alloc_heap;
166 
167 static void raw_malloc_save_max_alloced_size(void)
168 {
169 	if (totalloc > max_alloc_heap)
170 		max_alloc_heap = totalloc;
171 }
172 
173 void malloc_reset_max_allocated(void)
174 {
175 	malloc_lock();
176 	max_alloc_heap = 0;
177 	malloc_unlock();
178 }
179 
180 size_t malloc_get_max_allocated(void)
181 {
182 	size_t r;
183 
184 	malloc_lock();
185 	r = max_alloc_heap;
186 	malloc_unlock();
187 	return r;
188 }
189 
190 size_t malloc_get_allocated(void)
191 {
192 	size_t r;
193 
194 	malloc_lock();
195 	r = totalloc;
196 	malloc_unlock();
197 	return r;
198 }
199 
200 #else /* BufStats */
201 
202 static void raw_malloc_save_max_alloced_size(void)
203 {
204 }
205 
206 void malloc_reset_max_allocated(void)
207 {
208 }
209 
210 size_t malloc_get_max_allocated(void)
211 {
212 	return 0;
213 }
214 
215 size_t malloc_get_allocated(void)
216 {
217 	return 0;
218 }
219 #endif /* BufStats */
220 
221 size_t malloc_get_heap_size(void)
222 {
223 	size_t n;
224 	size_t s = 0;
225 
226 	malloc_lock();
227 
228 	for (n = 0; n < malloc_pool_len; n++)
229 		s += malloc_pool[n].len;
230 
231 	malloc_unlock();
232 
233 	return s;
234 }
235 
236 #ifdef BufValid
237 static void raw_malloc_validate_pools(void)
238 {
239 	size_t n;
240 
241 	for (n = 0; n < malloc_pool_len; n++)
242 		bpoolv(malloc_pool[n].buf);
243 }
244 #else
245 static void raw_malloc_validate_pools(void)
246 {
247 }
248 #endif
249 
250 struct bpool_iterator {
251 	struct bfhead *next_buf;
252 	size_t pool_idx;
253 };
254 
255 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
256 {
257 	iterator->pool_idx = 0;
258 	iterator->next_buf = BFH(malloc_pool[0].buf);
259 }
260 
261 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
262 		size_t *len, bool *isfree)
263 {
264 	struct bfhead *b = iterator->next_buf;
265 	bufsize bs = b->bh.bsize;
266 
267 	if (bs == ESent)
268 		return false;
269 
270 	if (bs < 0) {
271 		/* Allocated buffer */
272 		bs = -bs;
273 
274 		*isfree = false;
275 	} else {
276 		/* Free Buffer */
277 		*isfree = true;
278 
279 		/* Assert that the free list links are intact */
280 		assert(b->ql.blink->ql.flink == b);
281 		assert(b->ql.flink->ql.blink == b);
282 	}
283 
284 	*buf = (uint8_t *)b + sizeof(struct bhead);
285 	*len = bs - sizeof(struct bhead);
286 
287 	iterator->next_buf = BFH((uint8_t *)b + bs);
288 	return true;
289 }
290 
291 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
292 {
293 	while (true) {
294 		size_t len;
295 		bool isfree;
296 
297 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
298 			if (isfree)
299 				continue;
300 			return true;
301 		}
302 
303 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
304 			return false;
305 
306 		iterator->pool_idx++;
307 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
308 	}
309 }
310 
311 /* Convenience macro for looping over all allocated buffers */
312 #define BPOOL_FOREACH(iterator, bp) \
313 		for (bpool_foreach_iterator_init((iterator)); \
314 			bpool_foreach((iterator), (bp));)
315 
316 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size)
317 {
318 	void *ptr;
319 	size_t s = hdr_size + ftr_size + pl_size;
320 
321 	/*
322 	 * Make sure that malloc has correct alignment of returned buffers.
323 	 * The assumption is that uintptr_t will be as wide as the largest
324 	 * required alignment of any type.
325 	 */
326 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
327 
328 	raw_malloc_validate_pools();
329 
330 	/* Check wrapping */
331 	if (s < pl_size)
332 		return NULL;
333 
334 	/* BGET doesn't like 0 sized allocations */
335 	if (!s)
336 		s++;
337 
338 	ptr = bget(s);
339 	raw_malloc_save_max_alloced_size();
340 
341 	return ptr;
342 }
343 
344 static void raw_free(void *ptr)
345 {
346 	raw_malloc_validate_pools();
347 
348 	if (ptr)
349 		brel(ptr);
350 }
351 
352 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
353 		size_t pl_size)
354 {
355 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
356 	void *ptr;
357 
358 	raw_malloc_validate_pools();
359 
360 	/* Check wrapping */
361 	if (s < pl_nmemb || s < pl_size)
362 		return NULL;
363 
364 	/* BGET doesn't like 0 sized allocations */
365 	if (!s)
366 		s++;
367 
368 	ptr = bgetz(s);
369 	raw_malloc_save_max_alloced_size();
370 
371 	return ptr;
372 }
373 
374 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
375 		size_t pl_size)
376 {
377 	size_t s = hdr_size + ftr_size + pl_size;
378 	void *p;
379 
380 	/* Check wrapping */
381 	if (s < pl_size)
382 		return NULL;
383 
384 	raw_malloc_validate_pools();
385 
386 	/* BGET doesn't like 0 sized allocations */
387 	if (!s)
388 		s++;
389 
390 	p = bgetr(ptr, s);
391 	raw_malloc_save_max_alloced_size();
392 
393 	return p;
394 }
395 
396 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn)
397 {
398 	assert(BH((char *)bf + size) == bn);
399 	assert(bn->bsize < 0); /* Next block should be allocated */
400 	/* Next block shouldn't already have free block in front */
401 	assert(bn->prevfree == 0);
402 
403 	/* Create the free buf header */
404 	bf->bh.bsize = size;
405 	bf->bh.prevfree = 0;
406 
407 	/* Update next block to point to the new free buf header */
408 	bn->prevfree = size;
409 
410 	/* Insert the free buffer on the free list */
411 	assert(freelist.ql.blink->ql.flink == &freelist);
412 	assert(freelist.ql.flink->ql.blink == &freelist);
413 	bf->ql.flink = &freelist;
414 	bf->ql.blink = freelist.ql.blink;
415 	freelist.ql.blink = bf;
416 	bf->ql.blink->ql.flink = bf;
417 }
418 
419 static void brel_before(char *orig_buf, char *new_buf)
420 {
421 	struct bfhead *bf;
422 	struct bhead *b;
423 	bufsize size;
424 	bufsize orig_size;
425 
426 	assert(orig_buf < new_buf);
427 	/* There has to be room for the freebuf header */
428 	size = (bufsize)(new_buf - orig_buf);
429 	assert(size >= (SizeQ + sizeof(struct bhead)));
430 
431 	/* Point to head of original buffer */
432 	bf = BFH(orig_buf - sizeof(struct bhead));
433 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
434 
435 	/* Point to head of the becoming new allocated buffer */
436 	b = BH(new_buf - sizeof(struct bhead));
437 
438 	if (bf->bh.prevfree != 0) {
439 		/* Previous buffer is free, consolidate with that buffer */
440 		struct bfhead *bfp;
441 
442 		/* Update the previous free buffer */
443 		bfp = BFH((char *)bf - bf->bh.prevfree);
444 		assert(bfp->bh.bsize == bf->bh.prevfree);
445 		bfp->bh.bsize += size;
446 
447 		/* Make a new allocated buffer header */
448 		b->prevfree = bfp->bh.bsize;
449 		/* Make it negative since it's an allocated buffer */
450 		b->bsize = -(orig_size - size);
451 	} else {
452 		/*
453 		 * Previous buffer is allocated, create a new buffer and
454 		 * insert on the free list.
455 		 */
456 
457 		/* Make it negative since it's an allocated buffer */
458 		b->bsize = -(orig_size - size);
459 
460 		create_free_block(bf, size, b);
461 	}
462 
463 #ifdef BufStats
464 	totalloc -= size;
465 	assert(totalloc >= 0);
466 #endif
467 }
468 
469 static void brel_after(char *buf, bufsize size)
470 {
471 	struct bhead *b = BH(buf - sizeof(struct bhead));
472 	struct bhead *bn;
473 	bufsize new_size = size;
474 	bufsize free_size;
475 
476 	/* Select the size in the same way as in bget() */
477 	if (new_size < SizeQ)
478 		new_size = SizeQ;
479 #ifdef SizeQuant
480 #if SizeQuant > 1
481 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
482 #endif
483 #endif
484 	new_size += sizeof(struct bhead);
485 	assert(new_size <= -b->bsize);
486 
487 	/*
488 	 * Check if there's enough space at the end of the buffer to be
489 	 * able to free anything.
490 	 */
491 	free_size = -b->bsize - new_size;
492 	if (free_size < SizeQ + sizeof(struct bhead))
493 		return;
494 
495 	bn = BH((char *)b - b->bsize);
496 	/*
497 	 * Set the new size of the buffer;
498 	 */
499 	b->bsize = -new_size;
500 	if (bn->bsize > 0) {
501 		/* Next buffer is free, consolidate with that buffer */
502 		struct bfhead *bfn = BFH(bn);
503 		struct bfhead *nbf = BFH((char *)b + new_size);
504 		struct bhead *bnn = BH((char *)bn + bn->bsize);
505 
506 		assert(bfn->bh.prevfree == 0);
507 		assert(bnn->prevfree == bfn->bh.bsize);
508 
509 		/* Construct the new free header */
510 		nbf->bh.prevfree = 0;
511 		nbf->bh.bsize = bfn->bh.bsize + free_size;
512 
513 		/* Update the buffer after this to point to this header */
514 		bnn->prevfree += free_size;
515 
516 		/*
517 		 * Unlink the previous free buffer and link the new free
518 		 * buffer.
519 		 */
520 		assert(bfn->ql.blink->ql.flink == bfn);
521 		assert(bfn->ql.flink->ql.blink == bfn);
522 
523 		/* Assing blink and flink from old free buffer */
524 		nbf->ql.blink = bfn->ql.blink;
525 		nbf->ql.flink = bfn->ql.flink;
526 
527 		/* Replace the old free buffer with the new one */
528 		nbf->ql.blink->ql.flink = nbf;
529 		nbf->ql.flink->ql.blink = nbf;
530 	} else {
531 		/* New buffer is allocated, create a new free buffer */
532 		create_free_block(BFH((char *)b + new_size), free_size, bn);
533 	}
534 
535 #ifdef BufStats
536 	totalloc -= free_size;
537 	assert(totalloc >= 0);
538 #endif
539 
540 }
541 
542 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
543 		size_t size)
544 {
545 	size_t s;
546 	uintptr_t b;
547 
548 	raw_malloc_validate_pools();
549 
550 	if (!IS_POWER_OF_TWO(alignment))
551 		return NULL;
552 
553 	/*
554 	 * Normal malloc with headers always returns something SizeQuant
555 	 * aligned.
556 	 */
557 	if (alignment <= SizeQuant)
558 		return raw_malloc(hdr_size, ftr_size, size);
559 
560 	s = hdr_size + ftr_size + alignment + size +
561 	    SizeQ + sizeof(struct bhead);
562 
563 	/* Check wapping */
564 	if (s < alignment || s < size)
565 		return NULL;
566 
567 	b = (uintptr_t)bget(s);
568 	if (!b)
569 		return NULL;
570 
571 	if ((b + hdr_size) & (alignment - 1)) {
572 		/*
573 		 * Returned buffer is not aligned as requested if the
574 		 * hdr_size is added. Find an offset into the buffer
575 		 * that is far enough in to the buffer to be able to free
576 		 * what's in front.
577 		 */
578 		uintptr_t p;
579 
580 		/*
581 		 * Find the point where the buffer including supplied
582 		 * header size should start.
583 		 */
584 		p = b + hdr_size + alignment;
585 		p &= ~(alignment - 1);
586 		p -= hdr_size;
587 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
588 			p += alignment;
589 		assert((p + hdr_size + ftr_size + size) <= (b + s));
590 
591 		/* Free the front part of the buffer */
592 		brel_before((void *)b, (void *)p);
593 
594 		/* Set the new start of the buffer */
595 		b = p;
596 	}
597 
598 	/*
599 	 * Since b is now aligned, release what we don't need at the end of
600 	 * the buffer.
601 	 */
602 	brel_after((void *)b, hdr_size + ftr_size + size);
603 
604 	raw_malloc_save_max_alloced_size();
605 
606 	return (void *)b;
607 }
608 
609 /* Most of the stuff in this function is copied from bgetr() in bget.c */
610 static __maybe_unused bufsize bget_buf_size(void *buf)
611 {
612 	bufsize osize;          /* Old size of buffer */
613 	struct bhead *b;
614 
615 	b = BH(((char *)buf) - sizeof(struct bhead));
616 	osize = -b->bsize;
617 #ifdef BECtl
618 	if (osize == 0) {
619 		/*  Buffer acquired directly through acqfcn. */
620 		struct bdhead *bd;
621 
622 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
623 		osize = bd->tsize - sizeof(struct bdhead);
624 	} else
625 #endif
626 		osize -= sizeof(struct bhead);
627 	assert(osize > 0);
628 	return osize;
629 }
630 
631 #ifdef ENABLE_MDBG
632 
633 struct mdbg_hdr {
634 	const char *fname;
635 	uint16_t line;
636 	uint32_t pl_size;
637 	uint32_t magic;
638 #if defined(ARM64)
639 	uint64_t pad;
640 #endif
641 };
642 
643 #define MDBG_HEADER_MAGIC	0xadadadad
644 #define MDBG_FOOTER_MAGIC	0xecececec
645 
646 static size_t mdbg_get_ftr_size(size_t pl_size)
647 {
648 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
649 
650 	return ftr_pad + sizeof(uint32_t);
651 }
652 
653 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
654 {
655 	uint32_t *footer;
656 
657 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
658 			      mdbg_get_ftr_size(hdr->pl_size));
659 	footer--;
660 	return footer;
661 }
662 
663 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
664 		int lineno, size_t pl_size)
665 {
666 	uint32_t *footer;
667 
668 	hdr->fname = fname;
669 	hdr->line = lineno;
670 	hdr->pl_size = pl_size;
671 	hdr->magic = MDBG_HEADER_MAGIC;
672 
673 	footer = mdbg_get_footer(hdr);
674 	*footer = MDBG_FOOTER_MAGIC;
675 }
676 
677 void *mdbg_malloc(const char *fname, int lineno, size_t size)
678 {
679 	struct mdbg_hdr *hdr;
680 
681 	malloc_lock();
682 
683 	/*
684 	 * Check struct mdbg_hdr doesn't get bad alignment.
685 	 * This is required by C standard: the buffer returned from
686 	 * malloc() should be aligned with a fundamental alignment.
687 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
688 	 */
689 	COMPILE_TIME_ASSERT(
690 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
691 
692 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
693 			  mdbg_get_ftr_size(size), size);
694 	if (hdr) {
695 		mdbg_update_hdr(hdr, fname, lineno, size);
696 		hdr++;
697 	}
698 
699 	malloc_unlock();
700 	return hdr;
701 }
702 
703 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
704 {
705 	assert(hdr->magic == MDBG_HEADER_MAGIC);
706 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
707 }
708 
709 static void mdbg_free(void *ptr)
710 {
711 	struct mdbg_hdr *hdr = ptr;
712 
713 	if (hdr) {
714 		hdr--;
715 		assert_header(hdr);
716 		hdr->magic = 0;
717 		*mdbg_get_footer(hdr) = 0;
718 		raw_free(hdr);
719 	}
720 }
721 
722 void free(void *ptr)
723 {
724 	malloc_lock();
725 	mdbg_free(ptr);
726 	malloc_unlock();
727 }
728 
729 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
730 {
731 	struct mdbg_hdr *hdr;
732 
733 	malloc_lock();
734 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
735 			  mdbg_get_ftr_size(nmemb * size), nmemb, size);
736 	if (hdr) {
737 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
738 		hdr++;
739 	}
740 	malloc_unlock();
741 	return hdr;
742 }
743 
744 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
745 			    void *ptr, size_t size)
746 {
747 	struct mdbg_hdr *hdr = ptr;
748 
749 	if (hdr) {
750 		hdr--;
751 		assert_header(hdr);
752 	}
753 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
754 			   mdbg_get_ftr_size(size), size);
755 	if (hdr) {
756 		mdbg_update_hdr(hdr, fname, lineno, size);
757 		hdr++;
758 	}
759 	return hdr;
760 }
761 
762 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
763 {
764 	void *p;
765 
766 	malloc_lock();
767 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
768 	malloc_unlock();
769 	return p;
770 }
771 
772 #define realloc_unlocked(ptr, size) \
773 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
774 
775 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
776 		size_t size)
777 {
778 	struct mdbg_hdr *hdr;
779 
780 	malloc_lock();
781 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
782 			   alignment, size);
783 	if (hdr) {
784 		mdbg_update_hdr(hdr, fname, lineno, size);
785 		hdr++;
786 	}
787 	malloc_unlock();
788 	return hdr;
789 }
790 
791 
792 static void *get_payload_start_size(void *raw_buf, size_t *size)
793 {
794 	struct mdbg_hdr *hdr = raw_buf;
795 
796 	assert(bget_buf_size(hdr) >= hdr->pl_size);
797 	*size = hdr->pl_size;
798 	return hdr + 1;
799 }
800 
801 void mdbg_check(int bufdump)
802 {
803 	struct bpool_iterator itr;
804 	void *b;
805 
806 	malloc_lock();
807 	raw_malloc_validate_pools();
808 
809 	BPOOL_FOREACH(&itr, &b) {
810 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
811 
812 		assert_header(hdr);
813 
814 		if (bufdump > 0) {
815 			const char *fname = hdr->fname;
816 
817 			if (!fname)
818 				fname = "unknown";
819 
820 			IMSG("buffer: %d bytes %s:%d\n",
821 				hdr->pl_size, fname, hdr->line);
822 		}
823 	}
824 
825 	malloc_unlock();
826 }
827 
828 #else
829 
830 void *malloc(size_t size)
831 {
832 	void *p;
833 
834 	malloc_lock();
835 	p = raw_malloc(0, 0, size);
836 	malloc_unlock();
837 	return p;
838 }
839 
840 void free(void *ptr)
841 {
842 	malloc_lock();
843 	raw_free(ptr);
844 	malloc_unlock();
845 }
846 
847 void *calloc(size_t nmemb, size_t size)
848 {
849 	void *p;
850 
851 	malloc_lock();
852 	p = raw_calloc(0, 0, nmemb, size);
853 	malloc_unlock();
854 	return p;
855 }
856 
857 static void *realloc_unlocked(void *ptr, size_t size)
858 {
859 	return raw_realloc(ptr, 0, 0, size);
860 }
861 
862 void *realloc(void *ptr, size_t size)
863 {
864 	void *p;
865 
866 	malloc_lock();
867 	p = realloc_unlocked(ptr, size);
868 	malloc_unlock();
869 	return p;
870 }
871 
872 void *memalign(size_t alignment, size_t size)
873 {
874 	void *p;
875 
876 	malloc_lock();
877 	p = raw_memalign(0, 0, alignment, size);
878 	malloc_unlock();
879 	return p;
880 }
881 
882 static void *get_payload_start_size(void *ptr, size_t *size)
883 {
884 	*size = bget_buf_size(ptr);
885 	return ptr;
886 }
887 
888 #endif
889 
890 void malloc_add_pool(void *buf, size_t len)
891 {
892 	void *p;
893 	size_t l;
894 	uintptr_t start = (uintptr_t)buf;
895 	uintptr_t end = start + len;
896 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
897 					(~(SizeQuant - 1))) +
898 				sizeof(struct bhead) * 2;
899 
900 
901 	start = ROUNDUP(start, SizeQuant);
902 	end = ROUNDDOWN(end, SizeQuant);
903 	assert(start < end);
904 
905 	if ((end - start) < min_len) {
906 		DMSG("Skipping too small pool");
907 		return;
908 	}
909 
910 	malloc_lock();
911 	tag_asan_free((void *)start, end - start);
912 	bpool((void *)start, end - start);
913 	l = malloc_pool_len + 1;
914 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
915 	assert(p);
916 	malloc_pool = p;
917 	malloc_pool[malloc_pool_len].buf = (void *)start;
918 	malloc_pool[malloc_pool_len].len = end - start;
919 	malloc_pool_len = l;
920 	malloc_unlock();
921 }
922 
923 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
924 {
925 	struct bpool_iterator itr;
926 	void *b;
927 	uint8_t *start_buf = buf;
928 	uint8_t *end_buf = start_buf + len;
929 	bool ret = false;
930 
931 	malloc_lock();
932 
933 	raw_malloc_validate_pools();
934 
935 	/* Check for wrapping */
936 	if (start_buf > end_buf)
937 		goto out;
938 
939 	BPOOL_FOREACH(&itr, &b) {
940 		uint8_t *start_b;
941 		uint8_t *end_b;
942 		size_t s;
943 
944 		start_b = get_payload_start_size(b, &s);
945 		end_b = start_b + s;
946 
947 		if (start_buf >= start_b && end_buf <= end_b) {
948 			ret = true;
949 			goto out;
950 		}
951 	}
952 
953 out:
954 	malloc_unlock();
955 
956 	return ret;
957 }
958 
959 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
960 {
961 	uintptr_t buf_start = (uintptr_t) buf;
962 	uintptr_t buf_end = buf_start + len;
963 	size_t n;
964 	bool ret = false;
965 
966 	malloc_lock();
967 
968 	raw_malloc_validate_pools();
969 
970 	for (n = 0; n < malloc_pool_len; n++) {
971 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
972 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
973 
974 		if (buf_start > buf_end || pool_start > pool_end) {
975 			ret = true;	/* Wrapping buffers, shouldn't happen */
976 			goto out;
977 		}
978 
979 		if (buf_end > pool_start || buf_start < pool_end) {
980 			ret = true;
981 			goto out;
982 		}
983 	}
984 
985 out:
986 	malloc_unlock();
987 	return ret;
988 }
989