xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision 2b38f7fd79836d1f15d1b57ea90932016641f659)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright notice,
10  * this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright notice,
13  * this list of conditions and the following disclaimer in the documentation
14  * and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
20  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26  * POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 #define PROTOTYPES
30 
31 /*
32  *  BGET CONFIGURATION
33  *  ==================
34  */
35 /* #define BGET_ENABLE_ALL_OPTIONS */
36 #ifdef BGET_ENABLE_OPTION
37 #define TestProg    20000	/* Generate built-in test program
38 				   if defined.  The value specifies
39 				   how many buffer allocation attempts
40 				   the test program should make. */
41 #endif
42 
43 
44 #ifdef __LP64__
45 #define SizeQuant   16
46 #endif
47 #ifdef __ILP32__
48 #define SizeQuant   8
49 #endif
50 				/* Buffer allocation size quantum:
51 				   all buffers allocated are a
52 				   multiple of this size.  This
53 				   MUST be a power of two. */
54 
55 #ifdef BGET_ENABLE_OPTION
56 #define BufDump     1		/* Define this symbol to enable the
57 				   bpoold() function which dumps the
58 				   buffers in a buffer pool. */
59 
60 #define BufValid    1		/* Define this symbol to enable the
61 				   bpoolv() function for validating
62 				   a buffer pool. */
63 
64 #define DumpData    1		/* Define this symbol to enable the
65 				   bufdump() function which allows
66 				   dumping the contents of an allocated
67 				   or free buffer. */
68 
69 #define BufStats    1		/* Define this symbol to enable the
70 				   bstats() function which calculates
71 				   the total free space in the buffer
72 				   pool, the largest available
73 				   buffer, and the total space
74 				   currently allocated. */
75 
76 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
77 				   pattern of garbage to trip up
78 				   miscreants who attempt to use
79 				   pointers into released buffers. */
80 
81 #define BestFit     1		/* Use a best fit algorithm when
82 				   searching for space for an
83 				   allocation request.  This uses
84 				   memory more efficiently, but
85 				   allocation will be much slower. */
86 
87 #define BECtl       1		/* Define this symbol to enable the
88 				   bectl() function for automatic
89 				   pool space control.  */
90 #endif
91 
92 #ifdef MEM_DEBUG
93 #undef NDEBUG
94 #define DumpData    1
95 #define BufValid    1
96 #define FreeWipe    1
97 #endif
98 
99 #ifdef CFG_WITH_STATS
100 #define BufStats    1
101 #endif
102 
103 #include <compiler.h>
104 #include <malloc.h>
105 #include <stdbool.h>
106 #include <stdint.h>
107 #include <stdlib.h>
108 #include <string.h>
109 #include <trace.h>
110 #include <util.h>
111 
112 #if defined(__KERNEL__)
113 /* Compiling for TEE Core */
114 #include <kernel/asan.h>
115 #include <kernel/thread.h>
116 #include <kernel/spinlock.h>
117 
118 static uint32_t malloc_lock(void)
119 {
120 	return cpu_spin_lock_xsave(&__malloc_spinlock);
121 }
122 
123 static void malloc_unlock(uint32_t exceptions)
124 {
125 	cpu_spin_unlock_xrestore(&__malloc_spinlock, exceptions);
126 }
127 
128 static void tag_asan_free(void *buf, size_t len)
129 {
130 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
131 }
132 
133 static void tag_asan_alloced(void *buf, size_t len)
134 {
135 	asan_tag_access(buf, (uint8_t *)buf + len);
136 }
137 
138 static void *memset_unchecked(void *s, int c, size_t n)
139 {
140 	return asan_memset_unchecked(s, c, n);
141 }
142 
143 #else /*__KERNEL__*/
144 /* Compiling for TA */
145 static uint32_t malloc_lock(void)
146 {
147 	return 0;
148 }
149 
150 static void malloc_unlock(uint32_t exceptions __unused)
151 {
152 }
153 
154 static void tag_asan_free(void *buf __unused, size_t len __unused)
155 {
156 }
157 
158 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
159 {
160 }
161 
162 static void *memset_unchecked(void *s, int c, size_t n)
163 {
164 	return memset(s, c, n);
165 }
166 
167 #endif /*__KERNEL__*/
168 
169 #include "bget.c"		/* this is ugly, but this is bget */
170 
171 struct malloc_pool {
172 	void *buf;
173 	size_t len;
174 };
175 
176 static struct bpoolset malloc_poolset = { .freelist = {
177 					  {0, 0},
178 					  {&malloc_poolset.freelist,
179 					   &malloc_poolset.freelist}}};
180 static struct malloc_pool *malloc_pool;
181 static size_t malloc_pool_len;
182 
183 #ifdef BufStats
184 
185 static struct malloc_stats mstats;
186 
187 static void raw_malloc_return_hook(void *p, size_t requested_size,
188 				   struct bpoolset *poolset)
189 {
190 	if (poolset->totalloc > mstats.max_allocated)
191 		mstats.max_allocated = poolset->totalloc;
192 
193 	if (!p) {
194 		mstats.num_alloc_fail++;
195 		if (requested_size > mstats.biggest_alloc_fail) {
196 			mstats.biggest_alloc_fail = requested_size;
197 			mstats.biggest_alloc_fail_used = poolset->totalloc;
198 		}
199 	}
200 }
201 
202 void malloc_reset_stats(void)
203 {
204 	uint32_t exceptions = malloc_lock();
205 
206 	mstats.max_allocated = 0;
207 	mstats.num_alloc_fail = 0;
208 	mstats.biggest_alloc_fail = 0;
209 	mstats.biggest_alloc_fail_used = 0;
210 	malloc_unlock(exceptions);
211 }
212 
213 void malloc_get_stats(struct malloc_stats *stats)
214 {
215 	uint32_t exceptions = malloc_lock();
216 
217 	memcpy(stats, &mstats, sizeof(*stats));
218 	stats->allocated = malloc_poolset.totalloc;
219 	malloc_unlock(exceptions);
220 }
221 
222 #else /* BufStats */
223 
224 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused,
225 				   struct bpoolset *poolset __unused)
226 {
227 }
228 
229 #endif /* BufStats */
230 
231 #ifdef BufValid
232 static void raw_malloc_validate_pools(void)
233 {
234 	size_t n;
235 
236 	for (n = 0; n < malloc_pool_len; n++)
237 		bpoolv(malloc_pool[n].buf);
238 }
239 #else
240 static void raw_malloc_validate_pools(void)
241 {
242 }
243 #endif
244 
245 struct bpool_iterator {
246 	struct bfhead *next_buf;
247 	size_t pool_idx;
248 };
249 
250 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
251 {
252 	iterator->pool_idx = 0;
253 	iterator->next_buf = BFH(malloc_pool[0].buf);
254 }
255 
256 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
257 		size_t *len, bool *isfree)
258 {
259 	struct bfhead *b = iterator->next_buf;
260 	bufsize bs = b->bh.bsize;
261 
262 	if (bs == ESent)
263 		return false;
264 
265 	if (bs < 0) {
266 		/* Allocated buffer */
267 		bs = -bs;
268 
269 		*isfree = false;
270 	} else {
271 		/* Free Buffer */
272 		*isfree = true;
273 
274 		/* Assert that the free list links are intact */
275 		assert(b->ql.blink->ql.flink == b);
276 		assert(b->ql.flink->ql.blink == b);
277 	}
278 
279 	*buf = (uint8_t *)b + sizeof(struct bhead);
280 	*len = bs - sizeof(struct bhead);
281 
282 	iterator->next_buf = BFH((uint8_t *)b + bs);
283 	return true;
284 }
285 
286 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
287 {
288 	while (true) {
289 		size_t len;
290 		bool isfree;
291 
292 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
293 			if (isfree)
294 				continue;
295 			return true;
296 		}
297 
298 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
299 			return false;
300 
301 		iterator->pool_idx++;
302 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
303 	}
304 }
305 
306 /* Convenience macro for looping over all allocated buffers */
307 #define BPOOL_FOREACH(iterator, bp) \
308 		for (bpool_foreach_iterator_init((iterator)); \
309 			bpool_foreach((iterator), (bp));)
310 
311 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
312 			struct bpoolset *poolset)
313 {
314 	void *ptr = NULL;
315 	bufsize s;
316 
317 	/*
318 	 * Make sure that malloc has correct alignment of returned buffers.
319 	 * The assumption is that uintptr_t will be as wide as the largest
320 	 * required alignment of any type.
321 	 */
322 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
323 
324 	raw_malloc_validate_pools();
325 
326 	/* Compute total size */
327 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
328 		goto out;
329 	if (ADD_OVERFLOW(s, ftr_size, &s))
330 		goto out;
331 
332 	/* BGET doesn't like 0 sized allocations */
333 	if (!s)
334 		s++;
335 
336 	ptr = bget(s,  poolset);
337 out:
338 	raw_malloc_return_hook(ptr, pl_size, poolset);
339 
340 	return ptr;
341 }
342 
343 static void raw_free(void *ptr, struct bpoolset *poolset)
344 {
345 	raw_malloc_validate_pools();
346 
347 	if (ptr)
348 		brel(ptr, poolset);
349 }
350 
351 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
352 			size_t pl_size, struct bpoolset *poolset)
353 {
354 	void *ptr = NULL;
355 	bufsize s;
356 
357 	raw_malloc_validate_pools();
358 
359 	/* Compute total size */
360 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
361 		goto out;
362 	if (ADD_OVERFLOW(s, hdr_size, &s))
363 		goto out;
364 	if (ADD_OVERFLOW(s, ftr_size, &s))
365 		goto out;
366 
367 	/* BGET doesn't like 0 sized allocations */
368 	if (!s)
369 		s++;
370 
371 	ptr = bgetz(s, poolset);
372 out:
373 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size, poolset);
374 
375 	return ptr;
376 }
377 
378 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
379 			 size_t pl_size, struct bpoolset *poolset)
380 {
381 	void *p = NULL;
382 	bufsize s;
383 
384 	/* Compute total size */
385 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
386 		goto out;
387 	if (ADD_OVERFLOW(s, ftr_size, &s))
388 		goto out;
389 
390 	raw_malloc_validate_pools();
391 
392 	/* BGET doesn't like 0 sized allocations */
393 	if (!s)
394 		s++;
395 
396 	p = bgetr(ptr, s, poolset);
397 out:
398 	raw_malloc_return_hook(p, pl_size, poolset);
399 
400 	return p;
401 }
402 
403 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn,
404 			      struct bpoolset *poolset)
405 {
406 	assert(BH((char *)bf + size) == bn);
407 	assert(bn->bsize < 0); /* Next block should be allocated */
408 	/* Next block shouldn't already have free block in front */
409 	assert(bn->prevfree == 0);
410 
411 	/* Create the free buf header */
412 	bf->bh.bsize = size;
413 	bf->bh.prevfree = 0;
414 
415 	/* Update next block to point to the new free buf header */
416 	bn->prevfree = size;
417 
418 	/* Insert the free buffer on the free list */
419 	assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
420 	assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
421 	bf->ql.flink = &poolset->freelist;
422 	bf->ql.blink = poolset->freelist.ql.blink;
423 	poolset->freelist.ql.blink = bf;
424 	bf->ql.blink->ql.flink = bf;
425 }
426 
427 static void brel_before(char *orig_buf, char *new_buf, struct bpoolset *poolset)
428 {
429 	struct bfhead *bf;
430 	struct bhead *b;
431 	bufsize size;
432 	bufsize orig_size;
433 
434 	assert(orig_buf < new_buf);
435 	/* There has to be room for the freebuf header */
436 	size = (bufsize)(new_buf - orig_buf);
437 	assert(size >= (SizeQ + sizeof(struct bhead)));
438 
439 	/* Point to head of original buffer */
440 	bf = BFH(orig_buf - sizeof(struct bhead));
441 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
442 
443 	/* Point to head of the becoming new allocated buffer */
444 	b = BH(new_buf - sizeof(struct bhead));
445 
446 	if (bf->bh.prevfree != 0) {
447 		/* Previous buffer is free, consolidate with that buffer */
448 		struct bfhead *bfp;
449 
450 		/* Update the previous free buffer */
451 		bfp = BFH((char *)bf - bf->bh.prevfree);
452 		assert(bfp->bh.bsize == bf->bh.prevfree);
453 		bfp->bh.bsize += size;
454 
455 		/* Make a new allocated buffer header */
456 		b->prevfree = bfp->bh.bsize;
457 		/* Make it negative since it's an allocated buffer */
458 		b->bsize = -(orig_size - size);
459 	} else {
460 		/*
461 		 * Previous buffer is allocated, create a new buffer and
462 		 * insert on the free list.
463 		 */
464 
465 		/* Make it negative since it's an allocated buffer */
466 		b->bsize = -(orig_size - size);
467 
468 		create_free_block(bf, size, b, poolset);
469 	}
470 
471 #ifdef BufStats
472 	poolset->totalloc -= size;
473 	assert(poolset->totalloc >= 0);
474 #endif
475 }
476 
477 static void brel_after(char *buf, bufsize size, struct bpoolset *poolset)
478 {
479 	struct bhead *b = BH(buf - sizeof(struct bhead));
480 	struct bhead *bn;
481 	bufsize new_size = size;
482 	bufsize free_size;
483 
484 	/* Select the size in the same way as in bget() */
485 	if (new_size < SizeQ)
486 		new_size = SizeQ;
487 #ifdef SizeQuant
488 #if SizeQuant > 1
489 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
490 #endif
491 #endif
492 	new_size += sizeof(struct bhead);
493 	assert(new_size <= -b->bsize);
494 
495 	/*
496 	 * Check if there's enough space at the end of the buffer to be
497 	 * able to free anything.
498 	 */
499 	free_size = -b->bsize - new_size;
500 	if (free_size < SizeQ + sizeof(struct bhead))
501 		return;
502 
503 	bn = BH((char *)b - b->bsize);
504 	/*
505 	 * Set the new size of the buffer;
506 	 */
507 	b->bsize = -new_size;
508 	if (bn->bsize > 0) {
509 		/* Next buffer is free, consolidate with that buffer */
510 		struct bfhead *bfn = BFH(bn);
511 		struct bfhead *nbf = BFH((char *)b + new_size);
512 		struct bhead *bnn = BH((char *)bn + bn->bsize);
513 
514 		assert(bfn->bh.prevfree == 0);
515 		assert(bnn->prevfree == bfn->bh.bsize);
516 
517 		/* Construct the new free header */
518 		nbf->bh.prevfree = 0;
519 		nbf->bh.bsize = bfn->bh.bsize + free_size;
520 
521 		/* Update the buffer after this to point to this header */
522 		bnn->prevfree += free_size;
523 
524 		/*
525 		 * Unlink the previous free buffer and link the new free
526 		 * buffer.
527 		 */
528 		assert(bfn->ql.blink->ql.flink == bfn);
529 		assert(bfn->ql.flink->ql.blink == bfn);
530 
531 		/* Assing blink and flink from old free buffer */
532 		nbf->ql.blink = bfn->ql.blink;
533 		nbf->ql.flink = bfn->ql.flink;
534 
535 		/* Replace the old free buffer with the new one */
536 		nbf->ql.blink->ql.flink = nbf;
537 		nbf->ql.flink->ql.blink = nbf;
538 	} else {
539 		/* New buffer is allocated, create a new free buffer */
540 		create_free_block(BFH((char *)b + new_size), free_size, bn, poolset);
541 	}
542 
543 #ifdef BufStats
544 	poolset->totalloc -= free_size;
545 	assert(poolset->totalloc >= 0);
546 #endif
547 
548 }
549 
550 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
551 			  size_t size, struct bpoolset *poolset)
552 {
553 	size_t s;
554 	uintptr_t b;
555 
556 	raw_malloc_validate_pools();
557 
558 	if (!IS_POWER_OF_TWO(alignment))
559 		return NULL;
560 
561 	/*
562 	 * Normal malloc with headers always returns something SizeQuant
563 	 * aligned.
564 	 */
565 	if (alignment <= SizeQuant)
566 		return raw_malloc(hdr_size, ftr_size, size, poolset);
567 
568 	s = hdr_size + ftr_size + alignment + size +
569 	    SizeQ + sizeof(struct bhead);
570 
571 	/* Check wapping */
572 	if (s < alignment || s < size)
573 		return NULL;
574 
575 	b = (uintptr_t)bget(s, poolset);
576 	if (!b)
577 		goto out;
578 
579 	if ((b + hdr_size) & (alignment - 1)) {
580 		/*
581 		 * Returned buffer is not aligned as requested if the
582 		 * hdr_size is added. Find an offset into the buffer
583 		 * that is far enough in to the buffer to be able to free
584 		 * what's in front.
585 		 */
586 		uintptr_t p;
587 
588 		/*
589 		 * Find the point where the buffer including supplied
590 		 * header size should start.
591 		 */
592 		p = b + hdr_size + alignment;
593 		p &= ~(alignment - 1);
594 		p -= hdr_size;
595 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
596 			p += alignment;
597 		assert((p + hdr_size + ftr_size + size) <= (b + s));
598 
599 		/* Free the front part of the buffer */
600 		brel_before((void *)b, (void *)p, poolset);
601 
602 		/* Set the new start of the buffer */
603 		b = p;
604 	}
605 
606 	/*
607 	 * Since b is now aligned, release what we don't need at the end of
608 	 * the buffer.
609 	 */
610 	brel_after((void *)b, hdr_size + ftr_size + size, poolset);
611 out:
612 	raw_malloc_return_hook((void *)b, size, poolset);
613 
614 	return (void *)b;
615 }
616 
617 /* Most of the stuff in this function is copied from bgetr() in bget.c */
618 static __maybe_unused bufsize bget_buf_size(void *buf)
619 {
620 	bufsize osize;          /* Old size of buffer */
621 	struct bhead *b;
622 
623 	b = BH(((char *)buf) - sizeof(struct bhead));
624 	osize = -b->bsize;
625 #ifdef BECtl
626 	if (osize == 0) {
627 		/*  Buffer acquired directly through acqfcn. */
628 		struct bdhead *bd;
629 
630 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
631 		osize = bd->tsize - sizeof(struct bdhead);
632 	} else
633 #endif
634 		osize -= sizeof(struct bhead);
635 	assert(osize > 0);
636 	return osize;
637 }
638 
639 #ifdef ENABLE_MDBG
640 
641 struct mdbg_hdr {
642 	const char *fname;
643 	uint16_t line;
644 	uint32_t pl_size;
645 	uint32_t magic;
646 #if defined(ARM64)
647 	uint64_t pad;
648 #endif
649 };
650 
651 #define MDBG_HEADER_MAGIC	0xadadadad
652 #define MDBG_FOOTER_MAGIC	0xecececec
653 
654 static size_t mdbg_get_ftr_size(size_t pl_size)
655 {
656 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
657 
658 	return ftr_pad + sizeof(uint32_t);
659 }
660 
661 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
662 {
663 	uint32_t *footer;
664 
665 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
666 			      mdbg_get_ftr_size(hdr->pl_size));
667 	footer--;
668 	return footer;
669 }
670 
671 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
672 		int lineno, size_t pl_size)
673 {
674 	uint32_t *footer;
675 
676 	hdr->fname = fname;
677 	hdr->line = lineno;
678 	hdr->pl_size = pl_size;
679 	hdr->magic = MDBG_HEADER_MAGIC;
680 
681 	footer = mdbg_get_footer(hdr);
682 	*footer = MDBG_FOOTER_MAGIC;
683 }
684 
685 void *mdbg_malloc(const char *fname, int lineno, size_t size)
686 {
687 	struct mdbg_hdr *hdr;
688 	uint32_t exceptions = malloc_lock();
689 
690 	/*
691 	 * Check struct mdbg_hdr doesn't get bad alignment.
692 	 * This is required by C standard: the buffer returned from
693 	 * malloc() should be aligned with a fundamental alignment.
694 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
695 	 */
696 	COMPILE_TIME_ASSERT(
697 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
698 
699 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
700 			  mdbg_get_ftr_size(size), size, &malloc_poolset);
701 	if (hdr) {
702 		mdbg_update_hdr(hdr, fname, lineno, size);
703 		hdr++;
704 	}
705 
706 	malloc_unlock(exceptions);
707 	return hdr;
708 }
709 
710 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
711 {
712 	assert(hdr->magic == MDBG_HEADER_MAGIC);
713 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
714 }
715 
716 static void mdbg_free(void *ptr)
717 {
718 	struct mdbg_hdr *hdr = ptr;
719 
720 	if (hdr) {
721 		hdr--;
722 		assert_header(hdr);
723 		hdr->magic = 0;
724 		*mdbg_get_footer(hdr) = 0;
725 		raw_free(hdr, &malloc_poolset);
726 	}
727 }
728 
729 void free(void *ptr)
730 {
731 	uint32_t exceptions = malloc_lock();
732 
733 	mdbg_free(ptr);
734 	malloc_unlock(exceptions);
735 }
736 
737 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
738 {
739 	struct mdbg_hdr *hdr;
740 	uint32_t exceptions = malloc_lock();
741 
742 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
743 			  mdbg_get_ftr_size(nmemb * size), nmemb, size,
744 			  &malloc_poolset);
745 	if (hdr) {
746 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
747 		hdr++;
748 	}
749 	malloc_unlock(exceptions);
750 	return hdr;
751 }
752 
753 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
754 			    void *ptr, size_t size)
755 {
756 	struct mdbg_hdr *hdr = ptr;
757 
758 	if (hdr) {
759 		hdr--;
760 		assert_header(hdr);
761 	}
762 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
763 			   mdbg_get_ftr_size(size), size, &malloc_poolset);
764 	if (hdr) {
765 		mdbg_update_hdr(hdr, fname, lineno, size);
766 		hdr++;
767 	}
768 	return hdr;
769 }
770 
771 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
772 {
773 	void *p;
774 	uint32_t exceptions = malloc_lock();
775 
776 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
777 	malloc_unlock(exceptions);
778 	return p;
779 }
780 
781 #define realloc_unlocked(ptr, size) \
782 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
783 
784 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
785 		size_t size)
786 {
787 	struct mdbg_hdr *hdr;
788 	uint32_t exceptions = malloc_lock();
789 
790 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
791 			   alignment, size, &malloc_poolset);
792 	if (hdr) {
793 		mdbg_update_hdr(hdr, fname, lineno, size);
794 		hdr++;
795 	}
796 	malloc_unlock(exceptions);
797 	return hdr;
798 }
799 
800 
801 static void *get_payload_start_size(void *raw_buf, size_t *size)
802 {
803 	struct mdbg_hdr *hdr = raw_buf;
804 
805 	assert(bget_buf_size(hdr) >= hdr->pl_size);
806 	*size = hdr->pl_size;
807 	return hdr + 1;
808 }
809 
810 void mdbg_check(int bufdump)
811 {
812 	struct bpool_iterator itr;
813 	void *b;
814 	uint32_t exceptions = malloc_lock();
815 
816 	raw_malloc_validate_pools();
817 
818 	BPOOL_FOREACH(&itr, &b) {
819 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
820 
821 		assert_header(hdr);
822 
823 		if (bufdump > 0) {
824 			const char *fname = hdr->fname;
825 
826 			if (!fname)
827 				fname = "unknown";
828 
829 			IMSG("buffer: %d bytes %s:%d\n",
830 				hdr->pl_size, fname, hdr->line);
831 		}
832 	}
833 
834 	malloc_unlock(exceptions);
835 }
836 
837 #else
838 
839 void *malloc(size_t size)
840 {
841 	void *p;
842 	uint32_t exceptions = malloc_lock();
843 
844 	p = raw_malloc(0, 0, size, &malloc_poolset);
845 	malloc_unlock(exceptions);
846 	return p;
847 }
848 
849 void free(void *ptr)
850 {
851 	uint32_t exceptions = malloc_lock();
852 
853 	raw_free(ptr, &malloc_poolset);
854 	malloc_unlock(exceptions);
855 }
856 
857 void *calloc(size_t nmemb, size_t size)
858 {
859 	void *p;
860 	uint32_t exceptions = malloc_lock();
861 
862 	p = raw_calloc(0, 0, nmemb, size, &malloc_poolset);
863 	malloc_unlock(exceptions);
864 	return p;
865 }
866 
867 static void *realloc_unlocked(void *ptr, size_t size)
868 {
869 	return raw_realloc(ptr, 0, 0, size, &malloc_poolset);
870 }
871 
872 void *realloc(void *ptr, size_t size)
873 {
874 	void *p;
875 	uint32_t exceptions = malloc_lock();
876 
877 	p = realloc_unlocked(ptr, size);
878 	malloc_unlock(exceptions);
879 	return p;
880 }
881 
882 void *memalign(size_t alignment, size_t size)
883 {
884 	void *p;
885 	uint32_t exceptions = malloc_lock();
886 
887 	p = raw_memalign(0, 0, alignment, size, &malloc_poolset);
888 	malloc_unlock(exceptions);
889 	return p;
890 }
891 
892 static void *get_payload_start_size(void *ptr, size_t *size)
893 {
894 	*size = bget_buf_size(ptr);
895 	return ptr;
896 }
897 
898 #endif
899 
900 void malloc_add_pool(void *buf, size_t len)
901 {
902 	void *p;
903 	size_t l;
904 	uint32_t exceptions;
905 	uintptr_t start = (uintptr_t)buf;
906 	uintptr_t end = start + len;
907 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
908 					(~(SizeQuant - 1))) +
909 				sizeof(struct bhead) * 2;
910 
911 
912 	start = ROUNDUP(start, SizeQuant);
913 	end = ROUNDDOWN(end, SizeQuant);
914 	assert(start < end);
915 
916 	if ((end - start) < min_len) {
917 		DMSG("Skipping too small pool");
918 		return;
919 	}
920 
921 	exceptions = malloc_lock();
922 
923 	tag_asan_free((void *)start, end - start);
924 	bpool((void *)start, end - start, &malloc_poolset);
925 	l = malloc_pool_len + 1;
926 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
927 	assert(p);
928 	malloc_pool = p;
929 	malloc_pool[malloc_pool_len].buf = (void *)start;
930 	malloc_pool[malloc_pool_len].len = end - start;
931 #ifdef BufStats
932 	mstats.size += malloc_pool[malloc_pool_len].len;
933 #endif
934 	malloc_pool_len = l;
935 	malloc_unlock(exceptions);
936 }
937 
938 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
939 {
940 	struct bpool_iterator itr;
941 	void *b;
942 	uint8_t *start_buf = buf;
943 	uint8_t *end_buf = start_buf + len;
944 	bool ret = false;
945 	uint32_t exceptions = malloc_lock();
946 
947 	raw_malloc_validate_pools();
948 
949 	/* Check for wrapping */
950 	if (start_buf > end_buf)
951 		goto out;
952 
953 	BPOOL_FOREACH(&itr, &b) {
954 		uint8_t *start_b;
955 		uint8_t *end_b;
956 		size_t s;
957 
958 		start_b = get_payload_start_size(b, &s);
959 		end_b = start_b + s;
960 
961 		if (start_buf >= start_b && end_buf <= end_b) {
962 			ret = true;
963 			goto out;
964 		}
965 	}
966 
967 out:
968 	malloc_unlock(exceptions);
969 
970 	return ret;
971 }
972 
973 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
974 {
975 	uintptr_t buf_start = (uintptr_t) buf;
976 	uintptr_t buf_end = buf_start + len;
977 	size_t n;
978 	bool ret = false;
979 	uint32_t exceptions = malloc_lock();
980 
981 	raw_malloc_validate_pools();
982 
983 	for (n = 0; n < malloc_pool_len; n++) {
984 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
985 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
986 
987 		if (buf_start > buf_end || pool_start > pool_end) {
988 			ret = true;	/* Wrapping buffers, shouldn't happen */
989 			goto out;
990 		}
991 
992 		if (buf_end > pool_start || buf_start < pool_end) {
993 			ret = true;
994 			goto out;
995 		}
996 	}
997 
998 out:
999 	malloc_unlock(exceptions);
1000 	return ret;
1001 }
1002