xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision bc879b1765afacd8a2b7673236037181011cabea)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #define PROTOTYPES
30 
31 /*
32  *  BGET CONFIGURATION
33  *  ==================
34  */
35 /* #define BGET_ENABLE_ALL_OPTIONS */
36 #ifdef BGET_ENABLE_OPTION
37 #define TestProg    20000	/* Generate built-in test program
38 				   if defined.  The value specifies
39 				   how many buffer allocation attempts
40 				   the test program should make. */
41 #endif
42 
43 
44 #ifdef __LP64__
45 #define SizeQuant   16
46 #endif
47 #ifdef __ILP32__
48 #define SizeQuant   8
49 #endif
50 				/* Buffer allocation size quantum:
51 				   all buffers allocated are a
52 				   multiple of this size.  This
53 				   MUST be a power of two. */
54 
55 #ifdef BGET_ENABLE_OPTION
56 #define BufDump     1		/* Define this symbol to enable the
57 				   bpoold() function which dumps the
58 				   buffers in a buffer pool. */
59 
60 #define BufValid    1		/* Define this symbol to enable the
61 				   bpoolv() function for validating
62 				   a buffer pool. */
63 
64 #define DumpData    1		/* Define this symbol to enable the
65 				   bufdump() function which allows
66 				   dumping the contents of an allocated
67 				   or free buffer. */
68 
69 #define BufStats    1		/* Define this symbol to enable the
70 				   bstats() function which calculates
71 				   the total free space in the buffer
72 				   pool, the largest available
73 				   buffer, and the total space
74 				   currently allocated. */
75 
76 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
77 				   pattern of garbage to trip up
78 				   miscreants who attempt to use
79 				   pointers into released buffers. */
80 
81 #define BestFit     1		/* Use a best fit algorithm when
82 				   searching for space for an
83 				   allocation request.  This uses
84 				   memory more efficiently, but
85 				   allocation will be much slower. */
86 
87 #define BECtl       1		/* Define this symbol to enable the
88 				   bectl() function for automatic
89 				   pool space control.  */
90 #endif
91 
92 #ifdef MEM_DEBUG
93 #undef NDEBUG
94 #define DumpData    1
95 #define BufValid    1
96 #define FreeWipe    1
97 #endif
98 
99 #ifdef CFG_WITH_STATS
100 #define BufStats    1
101 #endif
102 
103 #include <compiler.h>
104 #include <malloc.h>
105 #include <stdbool.h>
106 #include <stdint.h>
107 #include <stdlib.h>
108 #include <string.h>
109 #include <trace.h>
110 #include <util.h>
111 
112 #if defined(__KERNEL__)
113 /* Compiling for TEE Core */
114 #include <kernel/asan.h>
115 #include <kernel/thread.h>
116 #include <kernel/spinlock.h>
117 
118 static uint32_t malloc_lock(void)
119 {
120 	return cpu_spin_lock_xsave(&__malloc_spinlock);
121 }
122 
123 static void malloc_unlock(uint32_t exceptions)
124 {
125 	cpu_spin_unlock_xrestore(&__malloc_spinlock, exceptions);
126 }
127 
128 static void tag_asan_free(void *buf, size_t len)
129 {
130 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
131 }
132 
133 static void tag_asan_alloced(void *buf, size_t len)
134 {
135 	asan_tag_access(buf, (uint8_t *)buf + len);
136 }
137 
138 static void *memset_unchecked(void *s, int c, size_t n)
139 {
140 	return asan_memset_unchecked(s, c, n);
141 }
142 
143 #else /*__KERNEL__*/
144 /* Compiling for TA */
145 static uint32_t malloc_lock(void)
146 {
147 	return 0;
148 }
149 
150 static void malloc_unlock(uint32_t exceptions __unused)
151 {
152 }
153 
154 static void tag_asan_free(void *buf __unused, size_t len __unused)
155 {
156 }
157 
158 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
159 {
160 }
161 
162 static void *memset_unchecked(void *s, int c, size_t n)
163 {
164 	return memset(s, c, n);
165 }
166 
167 #endif /*__KERNEL__*/
168 
169 #include "bget.c"		/* this is ugly, but this is bget */
170 
171 struct malloc_pool {
172 	void *buf;
173 	size_t len;
174 };
175 
176 static struct bpoolset malloc_poolset = { .freelist = {
177 					  {0, 0},
178 					  {&malloc_poolset.freelist,
179 					   &malloc_poolset.freelist}}};
180 static struct malloc_pool *malloc_pool;
181 static size_t malloc_pool_len;
182 
183 #ifdef BufStats
184 
185 static struct malloc_stats mstats;
186 
187 static void raw_malloc_return_hook(void *p, size_t requested_size,
188 				   struct bpoolset *poolset)
189 {
190 	if (poolset->totalloc > mstats.max_allocated)
191 		mstats.max_allocated = poolset->totalloc;
192 
193 	if (!p) {
194 		mstats.num_alloc_fail++;
195 		if (requested_size > mstats.biggest_alloc_fail) {
196 			mstats.biggest_alloc_fail = requested_size;
197 			mstats.biggest_alloc_fail_used = poolset->totalloc;
198 		}
199 	}
200 }
201 
202 void malloc_reset_stats(void)
203 {
204 	uint32_t exceptions = malloc_lock();
205 
206 	mstats.max_allocated = 0;
207 	mstats.num_alloc_fail = 0;
208 	mstats.biggest_alloc_fail = 0;
209 	mstats.biggest_alloc_fail_used = 0;
210 	malloc_unlock(exceptions);
211 }
212 
213 void malloc_get_stats(struct malloc_stats *stats)
214 {
215 	uint32_t exceptions = malloc_lock();
216 
217 	memcpy(stats, &mstats, sizeof(*stats));
218 	stats->allocated = malloc_poolset.totalloc;
219 	malloc_unlock(exceptions);
220 }
221 
222 #else /* BufStats */
223 
224 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused,
225 				   struct bpoolset *poolset __unused)
226 {
227 }
228 
229 #endif /* BufStats */
230 
231 #ifdef BufValid
232 static void raw_malloc_validate_pools(void)
233 {
234 	size_t n;
235 
236 	for (n = 0; n < malloc_pool_len; n++)
237 		bpoolv(malloc_pool[n].buf);
238 }
239 #else
240 static void raw_malloc_validate_pools(void)
241 {
242 }
243 #endif
244 
245 struct bpool_iterator {
246 	struct bfhead *next_buf;
247 	size_t pool_idx;
248 };
249 
250 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
251 {
252 	iterator->pool_idx = 0;
253 	iterator->next_buf = BFH(malloc_pool[0].buf);
254 }
255 
256 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
257 		size_t *len, bool *isfree)
258 {
259 	struct bfhead *b = iterator->next_buf;
260 	bufsize bs = b->bh.bsize;
261 
262 	if (bs == ESent)
263 		return false;
264 
265 	if (bs < 0) {
266 		/* Allocated buffer */
267 		bs = -bs;
268 
269 		*isfree = false;
270 	} else {
271 		/* Free Buffer */
272 		*isfree = true;
273 
274 		/* Assert that the free list links are intact */
275 		assert(b->ql.blink->ql.flink == b);
276 		assert(b->ql.flink->ql.blink == b);
277 	}
278 
279 	*buf = (uint8_t *)b + sizeof(struct bhead);
280 	*len = bs - sizeof(struct bhead);
281 
282 	iterator->next_buf = BFH((uint8_t *)b + bs);
283 	return true;
284 }
285 
286 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
287 {
288 	while (true) {
289 		size_t len;
290 		bool isfree;
291 
292 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
293 			if (isfree)
294 				continue;
295 			return true;
296 		}
297 
298 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
299 			return false;
300 
301 		iterator->pool_idx++;
302 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
303 	}
304 }
305 
306 /* Convenience macro for looping over all allocated buffers */
307 #define BPOOL_FOREACH(iterator, bp) \
308 		for (bpool_foreach_iterator_init((iterator)); \
309 			bpool_foreach((iterator), (bp));)
310 
311 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
312 			struct bpoolset *poolset)
313 {
314 	void *ptr = NULL;
315 	size_t s = hdr_size + ftr_size + pl_size;
316 
317 	/*
318 	 * Make sure that malloc has correct alignment of returned buffers.
319 	 * The assumption is that uintptr_t will be as wide as the largest
320 	 * required alignment of any type.
321 	 */
322 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
323 
324 	raw_malloc_validate_pools();
325 
326 	/* Check wrapping */
327 	if (s < pl_size)
328 		goto out;
329 
330 	/* BGET doesn't like 0 sized allocations */
331 	if (!s)
332 		s++;
333 
334 	ptr = bget(s,  poolset);
335 out:
336 	raw_malloc_return_hook(ptr, pl_size, poolset);
337 
338 	return ptr;
339 }
340 
341 static void raw_free(void *ptr, struct bpoolset *poolset)
342 {
343 	raw_malloc_validate_pools();
344 
345 	if (ptr)
346 		brel(ptr, poolset);
347 }
348 
349 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
350 			size_t pl_size, struct bpoolset *poolset)
351 {
352 	size_t s = hdr_size + ftr_size + pl_nmemb * pl_size;
353 	void *ptr = NULL;
354 
355 	raw_malloc_validate_pools();
356 
357 	/* Check wrapping */
358 	if (s < pl_nmemb || s < pl_size)
359 		goto out;
360 
361 	/* BGET doesn't like 0 sized allocations */
362 	if (!s)
363 		s++;
364 
365 	ptr = bgetz(s, poolset);
366 out:
367 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size, poolset);
368 
369 	return ptr;
370 }
371 
372 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
373 			 size_t pl_size, struct bpoolset *poolset)
374 {
375 	size_t s = hdr_size + ftr_size + pl_size;
376 	void *p = NULL;
377 
378 	/* Check wrapping */
379 	if (s < pl_size)
380 		goto out;
381 
382 	raw_malloc_validate_pools();
383 
384 	/* BGET doesn't like 0 sized allocations */
385 	if (!s)
386 		s++;
387 
388 	p = bgetr(ptr, s, poolset);
389 out:
390 	raw_malloc_return_hook(p, pl_size, poolset);
391 
392 	return p;
393 }
394 
395 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn,
396 			      struct bpoolset *poolset)
397 {
398 	assert(BH((char *)bf + size) == bn);
399 	assert(bn->bsize < 0); /* Next block should be allocated */
400 	/* Next block shouldn't already have free block in front */
401 	assert(bn->prevfree == 0);
402 
403 	/* Create the free buf header */
404 	bf->bh.bsize = size;
405 	bf->bh.prevfree = 0;
406 
407 	/* Update next block to point to the new free buf header */
408 	bn->prevfree = size;
409 
410 	/* Insert the free buffer on the free list */
411 	assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
412 	assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
413 	bf->ql.flink = &poolset->freelist;
414 	bf->ql.blink = poolset->freelist.ql.blink;
415 	poolset->freelist.ql.blink = bf;
416 	bf->ql.blink->ql.flink = bf;
417 }
418 
419 static void brel_before(char *orig_buf, char *new_buf, struct bpoolset *poolset)
420 {
421 	struct bfhead *bf;
422 	struct bhead *b;
423 	bufsize size;
424 	bufsize orig_size;
425 
426 	assert(orig_buf < new_buf);
427 	/* There has to be room for the freebuf header */
428 	size = (bufsize)(new_buf - orig_buf);
429 	assert(size >= (SizeQ + sizeof(struct bhead)));
430 
431 	/* Point to head of original buffer */
432 	bf = BFH(orig_buf - sizeof(struct bhead));
433 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
434 
435 	/* Point to head of the becoming new allocated buffer */
436 	b = BH(new_buf - sizeof(struct bhead));
437 
438 	if (bf->bh.prevfree != 0) {
439 		/* Previous buffer is free, consolidate with that buffer */
440 		struct bfhead *bfp;
441 
442 		/* Update the previous free buffer */
443 		bfp = BFH((char *)bf - bf->bh.prevfree);
444 		assert(bfp->bh.bsize == bf->bh.prevfree);
445 		bfp->bh.bsize += size;
446 
447 		/* Make a new allocated buffer header */
448 		b->prevfree = bfp->bh.bsize;
449 		/* Make it negative since it's an allocated buffer */
450 		b->bsize = -(orig_size - size);
451 	} else {
452 		/*
453 		 * Previous buffer is allocated, create a new buffer and
454 		 * insert on the free list.
455 		 */
456 
457 		/* Make it negative since it's an allocated buffer */
458 		b->bsize = -(orig_size - size);
459 
460 		create_free_block(bf, size, b, poolset);
461 	}
462 
463 #ifdef BufStats
464 	poolset->totalloc -= size;
465 	assert(poolset->totalloc >= 0);
466 #endif
467 }
468 
469 static void brel_after(char *buf, bufsize size, struct bpoolset *poolset)
470 {
471 	struct bhead *b = BH(buf - sizeof(struct bhead));
472 	struct bhead *bn;
473 	bufsize new_size = size;
474 	bufsize free_size;
475 
476 	/* Select the size in the same way as in bget() */
477 	if (new_size < SizeQ)
478 		new_size = SizeQ;
479 #ifdef SizeQuant
480 #if SizeQuant > 1
481 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
482 #endif
483 #endif
484 	new_size += sizeof(struct bhead);
485 	assert(new_size <= -b->bsize);
486 
487 	/*
488 	 * Check if there's enough space at the end of the buffer to be
489 	 * able to free anything.
490 	 */
491 	free_size = -b->bsize - new_size;
492 	if (free_size < SizeQ + sizeof(struct bhead))
493 		return;
494 
495 	bn = BH((char *)b - b->bsize);
496 	/*
497 	 * Set the new size of the buffer;
498 	 */
499 	b->bsize = -new_size;
500 	if (bn->bsize > 0) {
501 		/* Next buffer is free, consolidate with that buffer */
502 		struct bfhead *bfn = BFH(bn);
503 		struct bfhead *nbf = BFH((char *)b + new_size);
504 		struct bhead *bnn = BH((char *)bn + bn->bsize);
505 
506 		assert(bfn->bh.prevfree == 0);
507 		assert(bnn->prevfree == bfn->bh.bsize);
508 
509 		/* Construct the new free header */
510 		nbf->bh.prevfree = 0;
511 		nbf->bh.bsize = bfn->bh.bsize + free_size;
512 
513 		/* Update the buffer after this to point to this header */
514 		bnn->prevfree += free_size;
515 
516 		/*
517 		 * Unlink the previous free buffer and link the new free
518 		 * buffer.
519 		 */
520 		assert(bfn->ql.blink->ql.flink == bfn);
521 		assert(bfn->ql.flink->ql.blink == bfn);
522 
523 		/* Assing blink and flink from old free buffer */
524 		nbf->ql.blink = bfn->ql.blink;
525 		nbf->ql.flink = bfn->ql.flink;
526 
527 		/* Replace the old free buffer with the new one */
528 		nbf->ql.blink->ql.flink = nbf;
529 		nbf->ql.flink->ql.blink = nbf;
530 	} else {
531 		/* New buffer is allocated, create a new free buffer */
532 		create_free_block(BFH((char *)b + new_size), free_size, bn, poolset);
533 	}
534 
535 #ifdef BufStats
536 	poolset->totalloc -= free_size;
537 	assert(poolset->totalloc >= 0);
538 #endif
539 
540 }
541 
542 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
543 			  size_t size, struct bpoolset *poolset)
544 {
545 	size_t s;
546 	uintptr_t b;
547 
548 	raw_malloc_validate_pools();
549 
550 	if (!IS_POWER_OF_TWO(alignment))
551 		return NULL;
552 
553 	/*
554 	 * Normal malloc with headers always returns something SizeQuant
555 	 * aligned.
556 	 */
557 	if (alignment <= SizeQuant)
558 		return raw_malloc(hdr_size, ftr_size, size, poolset);
559 
560 	s = hdr_size + ftr_size + alignment + size +
561 	    SizeQ + sizeof(struct bhead);
562 
563 	/* Check wapping */
564 	if (s < alignment || s < size)
565 		return NULL;
566 
567 	b = (uintptr_t)bget(s, poolset);
568 	if (!b)
569 		goto out;
570 
571 	if ((b + hdr_size) & (alignment - 1)) {
572 		/*
573 		 * Returned buffer is not aligned as requested if the
574 		 * hdr_size is added. Find an offset into the buffer
575 		 * that is far enough in to the buffer to be able to free
576 		 * what's in front.
577 		 */
578 		uintptr_t p;
579 
580 		/*
581 		 * Find the point where the buffer including supplied
582 		 * header size should start.
583 		 */
584 		p = b + hdr_size + alignment;
585 		p &= ~(alignment - 1);
586 		p -= hdr_size;
587 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
588 			p += alignment;
589 		assert((p + hdr_size + ftr_size + size) <= (b + s));
590 
591 		/* Free the front part of the buffer */
592 		brel_before((void *)b, (void *)p, poolset);
593 
594 		/* Set the new start of the buffer */
595 		b = p;
596 	}
597 
598 	/*
599 	 * Since b is now aligned, release what we don't need at the end of
600 	 * the buffer.
601 	 */
602 	brel_after((void *)b, hdr_size + ftr_size + size, poolset);
603 out:
604 	raw_malloc_return_hook((void *)b, size, poolset);
605 
606 	return (void *)b;
607 }
608 
609 /* Most of the stuff in this function is copied from bgetr() in bget.c */
610 static __maybe_unused bufsize bget_buf_size(void *buf)
611 {
612 	bufsize osize;          /* Old size of buffer */
613 	struct bhead *b;
614 
615 	b = BH(((char *)buf) - sizeof(struct bhead));
616 	osize = -b->bsize;
617 #ifdef BECtl
618 	if (osize == 0) {
619 		/*  Buffer acquired directly through acqfcn. */
620 		struct bdhead *bd;
621 
622 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
623 		osize = bd->tsize - sizeof(struct bdhead);
624 	} else
625 #endif
626 		osize -= sizeof(struct bhead);
627 	assert(osize > 0);
628 	return osize;
629 }
630 
631 #ifdef ENABLE_MDBG
632 
633 struct mdbg_hdr {
634 	const char *fname;
635 	uint16_t line;
636 	uint32_t pl_size;
637 	uint32_t magic;
638 #if defined(ARM64)
639 	uint64_t pad;
640 #endif
641 };
642 
643 #define MDBG_HEADER_MAGIC	0xadadadad
644 #define MDBG_FOOTER_MAGIC	0xecececec
645 
646 static size_t mdbg_get_ftr_size(size_t pl_size)
647 {
648 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
649 
650 	return ftr_pad + sizeof(uint32_t);
651 }
652 
653 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
654 {
655 	uint32_t *footer;
656 
657 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
658 			      mdbg_get_ftr_size(hdr->pl_size));
659 	footer--;
660 	return footer;
661 }
662 
663 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
664 		int lineno, size_t pl_size)
665 {
666 	uint32_t *footer;
667 
668 	hdr->fname = fname;
669 	hdr->line = lineno;
670 	hdr->pl_size = pl_size;
671 	hdr->magic = MDBG_HEADER_MAGIC;
672 
673 	footer = mdbg_get_footer(hdr);
674 	*footer = MDBG_FOOTER_MAGIC;
675 }
676 
677 void *mdbg_malloc(const char *fname, int lineno, size_t size)
678 {
679 	struct mdbg_hdr *hdr;
680 	uint32_t exceptions = malloc_lock();
681 
682 	/*
683 	 * Check struct mdbg_hdr doesn't get bad alignment.
684 	 * This is required by C standard: the buffer returned from
685 	 * malloc() should be aligned with a fundamental alignment.
686 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
687 	 */
688 	COMPILE_TIME_ASSERT(
689 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
690 
691 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
692 			  mdbg_get_ftr_size(size), size, &malloc_poolset);
693 	if (hdr) {
694 		mdbg_update_hdr(hdr, fname, lineno, size);
695 		hdr++;
696 	}
697 
698 	malloc_unlock(exceptions);
699 	return hdr;
700 }
701 
702 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
703 {
704 	assert(hdr->magic == MDBG_HEADER_MAGIC);
705 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
706 }
707 
708 static void mdbg_free(void *ptr)
709 {
710 	struct mdbg_hdr *hdr = ptr;
711 
712 	if (hdr) {
713 		hdr--;
714 		assert_header(hdr);
715 		hdr->magic = 0;
716 		*mdbg_get_footer(hdr) = 0;
717 		raw_free(hdr, &malloc_poolset);
718 	}
719 }
720 
721 void free(void *ptr)
722 {
723 	uint32_t exceptions = malloc_lock();
724 
725 	mdbg_free(ptr);
726 	malloc_unlock(exceptions);
727 }
728 
729 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
730 {
731 	struct mdbg_hdr *hdr;
732 	uint32_t exceptions = malloc_lock();
733 
734 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
735 			  mdbg_get_ftr_size(nmemb * size), nmemb, size,
736 			  &malloc_poolset);
737 	if (hdr) {
738 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
739 		hdr++;
740 	}
741 	malloc_unlock(exceptions);
742 	return hdr;
743 }
744 
745 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
746 			    void *ptr, size_t size)
747 {
748 	struct mdbg_hdr *hdr = ptr;
749 
750 	if (hdr) {
751 		hdr--;
752 		assert_header(hdr);
753 	}
754 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
755 			   mdbg_get_ftr_size(size), size, &malloc_poolset);
756 	if (hdr) {
757 		mdbg_update_hdr(hdr, fname, lineno, size);
758 		hdr++;
759 	}
760 	return hdr;
761 }
762 
763 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
764 {
765 	void *p;
766 	uint32_t exceptions = malloc_lock();
767 
768 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
769 	malloc_unlock(exceptions);
770 	return p;
771 }
772 
773 #define realloc_unlocked(ptr, size) \
774 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
775 
776 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
777 		size_t size)
778 {
779 	struct mdbg_hdr *hdr;
780 	uint32_t exceptions = malloc_lock();
781 
782 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
783 			   alignment, size, &malloc_poolset);
784 	if (hdr) {
785 		mdbg_update_hdr(hdr, fname, lineno, size);
786 		hdr++;
787 	}
788 	malloc_unlock(exceptions);
789 	return hdr;
790 }
791 
792 
793 static void *get_payload_start_size(void *raw_buf, size_t *size)
794 {
795 	struct mdbg_hdr *hdr = raw_buf;
796 
797 	assert(bget_buf_size(hdr) >= hdr->pl_size);
798 	*size = hdr->pl_size;
799 	return hdr + 1;
800 }
801 
802 void mdbg_check(int bufdump)
803 {
804 	struct bpool_iterator itr;
805 	void *b;
806 	uint32_t exceptions = malloc_lock();
807 
808 	raw_malloc_validate_pools();
809 
810 	BPOOL_FOREACH(&itr, &b) {
811 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
812 
813 		assert_header(hdr);
814 
815 		if (bufdump > 0) {
816 			const char *fname = hdr->fname;
817 
818 			if (!fname)
819 				fname = "unknown";
820 
821 			IMSG("buffer: %d bytes %s:%d\n",
822 				hdr->pl_size, fname, hdr->line);
823 		}
824 	}
825 
826 	malloc_unlock(exceptions);
827 }
828 
829 #else
830 
831 void *malloc(size_t size)
832 {
833 	void *p;
834 	uint32_t exceptions = malloc_lock();
835 
836 	p = raw_malloc(0, 0, size, &malloc_poolset);
837 	malloc_unlock(exceptions);
838 	return p;
839 }
840 
841 void free(void *ptr)
842 {
843 	uint32_t exceptions = malloc_lock();
844 
845 	raw_free(ptr, &malloc_poolset);
846 	malloc_unlock(exceptions);
847 }
848 
849 void *calloc(size_t nmemb, size_t size)
850 {
851 	void *p;
852 	uint32_t exceptions = malloc_lock();
853 
854 	p = raw_calloc(0, 0, nmemb, size, &malloc_poolset);
855 	malloc_unlock(exceptions);
856 	return p;
857 }
858 
859 static void *realloc_unlocked(void *ptr, size_t size)
860 {
861 	return raw_realloc(ptr, 0, 0, size, &malloc_poolset);
862 }
863 
864 void *realloc(void *ptr, size_t size)
865 {
866 	void *p;
867 	uint32_t exceptions = malloc_lock();
868 
869 	p = realloc_unlocked(ptr, size);
870 	malloc_unlock(exceptions);
871 	return p;
872 }
873 
874 void *memalign(size_t alignment, size_t size)
875 {
876 	void *p;
877 	uint32_t exceptions = malloc_lock();
878 
879 	p = raw_memalign(0, 0, alignment, size, &malloc_poolset);
880 	malloc_unlock(exceptions);
881 	return p;
882 }
883 
884 static void *get_payload_start_size(void *ptr, size_t *size)
885 {
886 	*size = bget_buf_size(ptr);
887 	return ptr;
888 }
889 
890 #endif
891 
892 void malloc_add_pool(void *buf, size_t len)
893 {
894 	void *p;
895 	size_t l;
896 	uint32_t exceptions;
897 	uintptr_t start = (uintptr_t)buf;
898 	uintptr_t end = start + len;
899 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
900 					(~(SizeQuant - 1))) +
901 				sizeof(struct bhead) * 2;
902 
903 
904 	start = ROUNDUP(start, SizeQuant);
905 	end = ROUNDDOWN(end, SizeQuant);
906 	assert(start < end);
907 
908 	if ((end - start) < min_len) {
909 		DMSG("Skipping too small pool");
910 		return;
911 	}
912 
913 	exceptions = malloc_lock();
914 
915 	tag_asan_free((void *)start, end - start);
916 	bpool((void *)start, end - start, &malloc_poolset);
917 	l = malloc_pool_len + 1;
918 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
919 	assert(p);
920 	malloc_pool = p;
921 	malloc_pool[malloc_pool_len].buf = (void *)start;
922 	malloc_pool[malloc_pool_len].len = end - start;
923 #ifdef BufStats
924 	mstats.size += malloc_pool[malloc_pool_len].len;
925 #endif
926 	malloc_pool_len = l;
927 	malloc_unlock(exceptions);
928 }
929 
930 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
931 {
932 	struct bpool_iterator itr;
933 	void *b;
934 	uint8_t *start_buf = buf;
935 	uint8_t *end_buf = start_buf + len;
936 	bool ret = false;
937 	uint32_t exceptions = malloc_lock();
938 
939 	raw_malloc_validate_pools();
940 
941 	/* Check for wrapping */
942 	if (start_buf > end_buf)
943 		goto out;
944 
945 	BPOOL_FOREACH(&itr, &b) {
946 		uint8_t *start_b;
947 		uint8_t *end_b;
948 		size_t s;
949 
950 		start_b = get_payload_start_size(b, &s);
951 		end_b = start_b + s;
952 
953 		if (start_buf >= start_b && end_buf <= end_b) {
954 			ret = true;
955 			goto out;
956 		}
957 	}
958 
959 out:
960 	malloc_unlock(exceptions);
961 
962 	return ret;
963 }
964 
965 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
966 {
967 	uintptr_t buf_start = (uintptr_t) buf;
968 	uintptr_t buf_end = buf_start + len;
969 	size_t n;
970 	bool ret = false;
971 	uint32_t exceptions = malloc_lock();
972 
973 	raw_malloc_validate_pools();
974 
975 	for (n = 0; n < malloc_pool_len; n++) {
976 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
977 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
978 
979 		if (buf_start > buf_end || pool_start > pool_end) {
980 			ret = true;	/* Wrapping buffers, shouldn't happen */
981 			goto out;
982 		}
983 
984 		if (buf_end > pool_start || buf_start < pool_end) {
985 			ret = true;
986 			goto out;
987 		}
988 	}
989 
990 out:
991 	malloc_unlock(exceptions);
992 	return ret;
993 }
994