xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision a97bc4a084f1292c3a2cfd0c4593183b2f873e67)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  */
5 
6 #define PROTOTYPES
7 
8 /*
9  *  BGET CONFIGURATION
10  *  ==================
11  */
12 /* #define BGET_ENABLE_ALL_OPTIONS */
13 #ifdef BGET_ENABLE_OPTION
14 #define TestProg    20000	/* Generate built-in test program
15 				   if defined.  The value specifies
16 				   how many buffer allocation attempts
17 				   the test program should make. */
18 #endif
19 
20 
21 #ifdef __LP64__
22 #define SizeQuant   16
23 #endif
24 #ifdef __ILP32__
25 #define SizeQuant   8
26 #endif
27 				/* Buffer allocation size quantum:
28 				   all buffers allocated are a
29 				   multiple of this size.  This
30 				   MUST be a power of two. */
31 
32 #ifdef BGET_ENABLE_OPTION
33 #define BufDump     1		/* Define this symbol to enable the
34 				   bpoold() function which dumps the
35 				   buffers in a buffer pool. */
36 
37 #define BufValid    1		/* Define this symbol to enable the
38 				   bpoolv() function for validating
39 				   a buffer pool. */
40 
41 #define DumpData    1		/* Define this symbol to enable the
42 				   bufdump() function which allows
43 				   dumping the contents of an allocated
44 				   or free buffer. */
45 
46 #define BufStats    1		/* Define this symbol to enable the
47 				   bstats() function which calculates
48 				   the total free space in the buffer
49 				   pool, the largest available
50 				   buffer, and the total space
51 				   currently allocated. */
52 
53 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
54 				   pattern of garbage to trip up
55 				   miscreants who attempt to use
56 				   pointers into released buffers. */
57 
58 #define BestFit     1		/* Use a best fit algorithm when
59 				   searching for space for an
60 				   allocation request.  This uses
61 				   memory more efficiently, but
62 				   allocation will be much slower. */
63 
64 #define BECtl       1		/* Define this symbol to enable the
65 				   bectl() function for automatic
66 				   pool space control.  */
67 #endif
68 
69 #ifdef MEM_DEBUG
70 #undef NDEBUG
71 #define DumpData    1
72 #define BufValid    1
73 #define FreeWipe    1
74 #endif
75 
76 #ifdef CFG_WITH_STATS
77 #define BufStats    1
78 #endif
79 
80 #include <compiler.h>
81 #include <malloc.h>
82 #include <stdbool.h>
83 #include <stdint.h>
84 #include <stdlib.h>
85 #include <string.h>
86 #include <trace.h>
87 #include <util.h>
88 
89 #if defined(__KERNEL__)
90 /* Compiling for TEE Core */
91 #include <kernel/asan.h>
92 #include <kernel/thread.h>
93 #include <kernel/spinlock.h>
94 
95 static uint32_t malloc_lock(void)
96 {
97 	return cpu_spin_lock_xsave(&__malloc_spinlock);
98 }
99 
100 static void malloc_unlock(uint32_t exceptions)
101 {
102 	cpu_spin_unlock_xrestore(&__malloc_spinlock, exceptions);
103 }
104 
105 static void tag_asan_free(void *buf, size_t len)
106 {
107 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
108 }
109 
110 static void tag_asan_alloced(void *buf, size_t len)
111 {
112 	asan_tag_access(buf, (uint8_t *)buf + len);
113 }
114 
115 static void *memset_unchecked(void *s, int c, size_t n)
116 {
117 	return asan_memset_unchecked(s, c, n);
118 }
119 
120 #else /*__KERNEL__*/
121 /* Compiling for TA */
122 static uint32_t malloc_lock(void)
123 {
124 	return 0;
125 }
126 
127 static void malloc_unlock(uint32_t exceptions __unused)
128 {
129 }
130 
131 static void tag_asan_free(void *buf __unused, size_t len __unused)
132 {
133 }
134 
135 static void tag_asan_alloced(void *buf __unused, size_t len __unused)
136 {
137 }
138 
139 static void *memset_unchecked(void *s, int c, size_t n)
140 {
141 	return memset(s, c, n);
142 }
143 
144 #endif /*__KERNEL__*/
145 
146 #include "bget.c"		/* this is ugly, but this is bget */
147 
148 struct malloc_pool {
149 	void *buf;
150 	size_t len;
151 };
152 
153 static struct bpoolset malloc_poolset = { .freelist = {
154 					  {0, 0},
155 					  {&malloc_poolset.freelist,
156 					   &malloc_poolset.freelist}}};
157 static struct malloc_pool *malloc_pool;
158 static size_t malloc_pool_len;
159 
160 #ifdef BufStats
161 
162 static struct malloc_stats mstats;
163 
164 static void raw_malloc_return_hook(void *p, size_t requested_size,
165 				   struct bpoolset *poolset)
166 {
167 	if (poolset->totalloc > mstats.max_allocated)
168 		mstats.max_allocated = poolset->totalloc;
169 
170 	if (!p) {
171 		mstats.num_alloc_fail++;
172 		if (requested_size > mstats.biggest_alloc_fail) {
173 			mstats.biggest_alloc_fail = requested_size;
174 			mstats.biggest_alloc_fail_used = poolset->totalloc;
175 		}
176 	}
177 }
178 
179 void malloc_reset_stats(void)
180 {
181 	uint32_t exceptions = malloc_lock();
182 
183 	mstats.max_allocated = 0;
184 	mstats.num_alloc_fail = 0;
185 	mstats.biggest_alloc_fail = 0;
186 	mstats.biggest_alloc_fail_used = 0;
187 	malloc_unlock(exceptions);
188 }
189 
190 void malloc_get_stats(struct malloc_stats *stats)
191 {
192 	uint32_t exceptions = malloc_lock();
193 
194 	memcpy(stats, &mstats, sizeof(*stats));
195 	stats->allocated = malloc_poolset.totalloc;
196 	malloc_unlock(exceptions);
197 }
198 
199 #else /* BufStats */
200 
201 static void raw_malloc_return_hook(void *p __unused, size_t requested_size __unused,
202 				   struct bpoolset *poolset __unused)
203 {
204 }
205 
206 #endif /* BufStats */
207 
208 #ifdef BufValid
209 static void raw_malloc_validate_pools(void)
210 {
211 	size_t n;
212 
213 	for (n = 0; n < malloc_pool_len; n++)
214 		bpoolv(malloc_pool[n].buf);
215 }
216 #else
217 static void raw_malloc_validate_pools(void)
218 {
219 }
220 #endif
221 
222 struct bpool_iterator {
223 	struct bfhead *next_buf;
224 	size_t pool_idx;
225 };
226 
227 static void bpool_foreach_iterator_init(struct bpool_iterator *iterator)
228 {
229 	iterator->pool_idx = 0;
230 	iterator->next_buf = BFH(malloc_pool[0].buf);
231 }
232 
233 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
234 		size_t *len, bool *isfree)
235 {
236 	struct bfhead *b = iterator->next_buf;
237 	bufsize bs = b->bh.bsize;
238 
239 	if (bs == ESent)
240 		return false;
241 
242 	if (bs < 0) {
243 		/* Allocated buffer */
244 		bs = -bs;
245 
246 		*isfree = false;
247 	} else {
248 		/* Free Buffer */
249 		*isfree = true;
250 
251 		/* Assert that the free list links are intact */
252 		assert(b->ql.blink->ql.flink == b);
253 		assert(b->ql.flink->ql.blink == b);
254 	}
255 
256 	*buf = (uint8_t *)b + sizeof(struct bhead);
257 	*len = bs - sizeof(struct bhead);
258 
259 	iterator->next_buf = BFH((uint8_t *)b + bs);
260 	return true;
261 }
262 
263 static bool bpool_foreach(struct bpool_iterator *iterator, void **buf)
264 {
265 	while (true) {
266 		size_t len;
267 		bool isfree;
268 
269 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
270 			if (isfree)
271 				continue;
272 			return true;
273 		}
274 
275 		if ((iterator->pool_idx + 1) >= malloc_pool_len)
276 			return false;
277 
278 		iterator->pool_idx++;
279 		iterator->next_buf = BFH(malloc_pool[iterator->pool_idx].buf);
280 	}
281 }
282 
283 /* Convenience macro for looping over all allocated buffers */
284 #define BPOOL_FOREACH(iterator, bp) \
285 		for (bpool_foreach_iterator_init((iterator)); \
286 			bpool_foreach((iterator), (bp));)
287 
288 static void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
289 			struct bpoolset *poolset)
290 {
291 	void *ptr = NULL;
292 	bufsize s;
293 
294 	/*
295 	 * Make sure that malloc has correct alignment of returned buffers.
296 	 * The assumption is that uintptr_t will be as wide as the largest
297 	 * required alignment of any type.
298 	 */
299 	COMPILE_TIME_ASSERT(SizeQuant >= sizeof(uintptr_t));
300 
301 	raw_malloc_validate_pools();
302 
303 	/* Compute total size */
304 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
305 		goto out;
306 	if (ADD_OVERFLOW(s, ftr_size, &s))
307 		goto out;
308 
309 	/* BGET doesn't like 0 sized allocations */
310 	if (!s)
311 		s++;
312 
313 	ptr = bget(s,  poolset);
314 out:
315 	raw_malloc_return_hook(ptr, pl_size, poolset);
316 
317 	return ptr;
318 }
319 
320 static void raw_free(void *ptr, struct bpoolset *poolset)
321 {
322 	raw_malloc_validate_pools();
323 
324 	if (ptr)
325 		brel(ptr, poolset);
326 }
327 
328 static void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
329 			size_t pl_size, struct bpoolset *poolset)
330 {
331 	void *ptr = NULL;
332 	bufsize s;
333 
334 	raw_malloc_validate_pools();
335 
336 	/* Compute total size */
337 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
338 		goto out;
339 	if (ADD_OVERFLOW(s, hdr_size, &s))
340 		goto out;
341 	if (ADD_OVERFLOW(s, ftr_size, &s))
342 		goto out;
343 
344 	/* BGET doesn't like 0 sized allocations */
345 	if (!s)
346 		s++;
347 
348 	ptr = bgetz(s, poolset);
349 out:
350 	raw_malloc_return_hook(ptr, pl_nmemb * pl_size, poolset);
351 
352 	return ptr;
353 }
354 
355 static void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
356 			 size_t pl_size, struct bpoolset *poolset)
357 {
358 	void *p = NULL;
359 	bufsize s;
360 
361 	/* Compute total size */
362 	if (ADD_OVERFLOW(pl_size, hdr_size, &s))
363 		goto out;
364 	if (ADD_OVERFLOW(s, ftr_size, &s))
365 		goto out;
366 
367 	raw_malloc_validate_pools();
368 
369 	/* BGET doesn't like 0 sized allocations */
370 	if (!s)
371 		s++;
372 
373 	p = bgetr(ptr, s, poolset);
374 out:
375 	raw_malloc_return_hook(p, pl_size, poolset);
376 
377 	return p;
378 }
379 
380 static void create_free_block(struct bfhead *bf, bufsize size, struct bhead *bn,
381 			      struct bpoolset *poolset)
382 {
383 	assert(BH((char *)bf + size) == bn);
384 	assert(bn->bsize < 0); /* Next block should be allocated */
385 	/* Next block shouldn't already have free block in front */
386 	assert(bn->prevfree == 0);
387 
388 	/* Create the free buf header */
389 	bf->bh.bsize = size;
390 	bf->bh.prevfree = 0;
391 
392 	/* Update next block to point to the new free buf header */
393 	bn->prevfree = size;
394 
395 	/* Insert the free buffer on the free list */
396 	assert(poolset->freelist.ql.blink->ql.flink == &poolset->freelist);
397 	assert(poolset->freelist.ql.flink->ql.blink == &poolset->freelist);
398 	bf->ql.flink = &poolset->freelist;
399 	bf->ql.blink = poolset->freelist.ql.blink;
400 	poolset->freelist.ql.blink = bf;
401 	bf->ql.blink->ql.flink = bf;
402 }
403 
404 static void brel_before(char *orig_buf, char *new_buf, struct bpoolset *poolset)
405 {
406 	struct bfhead *bf;
407 	struct bhead *b;
408 	bufsize size;
409 	bufsize orig_size;
410 
411 	assert(orig_buf < new_buf);
412 	/* There has to be room for the freebuf header */
413 	size = (bufsize)(new_buf - orig_buf);
414 	assert(size >= (SizeQ + sizeof(struct bhead)));
415 
416 	/* Point to head of original buffer */
417 	bf = BFH(orig_buf - sizeof(struct bhead));
418 	orig_size = -bf->bh.bsize; /* negative since it's an allocated buffer */
419 
420 	/* Point to head of the becoming new allocated buffer */
421 	b = BH(new_buf - sizeof(struct bhead));
422 
423 	if (bf->bh.prevfree != 0) {
424 		/* Previous buffer is free, consolidate with that buffer */
425 		struct bfhead *bfp;
426 
427 		/* Update the previous free buffer */
428 		bfp = BFH((char *)bf - bf->bh.prevfree);
429 		assert(bfp->bh.bsize == bf->bh.prevfree);
430 		bfp->bh.bsize += size;
431 
432 		/* Make a new allocated buffer header */
433 		b->prevfree = bfp->bh.bsize;
434 		/* Make it negative since it's an allocated buffer */
435 		b->bsize = -(orig_size - size);
436 	} else {
437 		/*
438 		 * Previous buffer is allocated, create a new buffer and
439 		 * insert on the free list.
440 		 */
441 
442 		/* Make it negative since it's an allocated buffer */
443 		b->bsize = -(orig_size - size);
444 
445 		create_free_block(bf, size, b, poolset);
446 	}
447 
448 #ifdef BufStats
449 	poolset->totalloc -= size;
450 	assert(poolset->totalloc >= 0);
451 #endif
452 }
453 
454 static void brel_after(char *buf, bufsize size, struct bpoolset *poolset)
455 {
456 	struct bhead *b = BH(buf - sizeof(struct bhead));
457 	struct bhead *bn;
458 	bufsize new_size = size;
459 	bufsize free_size;
460 
461 	/* Select the size in the same way as in bget() */
462 	if (new_size < SizeQ)
463 		new_size = SizeQ;
464 #ifdef SizeQuant
465 #if SizeQuant > 1
466 	new_size = (new_size + (SizeQuant - 1)) & (~(SizeQuant - 1));
467 #endif
468 #endif
469 	new_size += sizeof(struct bhead);
470 	assert(new_size <= -b->bsize);
471 
472 	/*
473 	 * Check if there's enough space at the end of the buffer to be
474 	 * able to free anything.
475 	 */
476 	free_size = -b->bsize - new_size;
477 	if (free_size < SizeQ + sizeof(struct bhead))
478 		return;
479 
480 	bn = BH((char *)b - b->bsize);
481 	/*
482 	 * Set the new size of the buffer;
483 	 */
484 	b->bsize = -new_size;
485 	if (bn->bsize > 0) {
486 		/* Next buffer is free, consolidate with that buffer */
487 		struct bfhead *bfn = BFH(bn);
488 		struct bfhead *nbf = BFH((char *)b + new_size);
489 		struct bhead *bnn = BH((char *)bn + bn->bsize);
490 
491 		assert(bfn->bh.prevfree == 0);
492 		assert(bnn->prevfree == bfn->bh.bsize);
493 
494 		/* Construct the new free header */
495 		nbf->bh.prevfree = 0;
496 		nbf->bh.bsize = bfn->bh.bsize + free_size;
497 
498 		/* Update the buffer after this to point to this header */
499 		bnn->prevfree += free_size;
500 
501 		/*
502 		 * Unlink the previous free buffer and link the new free
503 		 * buffer.
504 		 */
505 		assert(bfn->ql.blink->ql.flink == bfn);
506 		assert(bfn->ql.flink->ql.blink == bfn);
507 
508 		/* Assing blink and flink from old free buffer */
509 		nbf->ql.blink = bfn->ql.blink;
510 		nbf->ql.flink = bfn->ql.flink;
511 
512 		/* Replace the old free buffer with the new one */
513 		nbf->ql.blink->ql.flink = nbf;
514 		nbf->ql.flink->ql.blink = nbf;
515 	} else {
516 		/* New buffer is allocated, create a new free buffer */
517 		create_free_block(BFH((char *)b + new_size), free_size, bn, poolset);
518 	}
519 
520 #ifdef BufStats
521 	poolset->totalloc -= free_size;
522 	assert(poolset->totalloc >= 0);
523 #endif
524 
525 }
526 
527 static void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
528 			  size_t size, struct bpoolset *poolset)
529 {
530 	size_t s;
531 	uintptr_t b;
532 
533 	raw_malloc_validate_pools();
534 
535 	if (!IS_POWER_OF_TWO(alignment))
536 		return NULL;
537 
538 	/*
539 	 * Normal malloc with headers always returns something SizeQuant
540 	 * aligned.
541 	 */
542 	if (alignment <= SizeQuant)
543 		return raw_malloc(hdr_size, ftr_size, size, poolset);
544 
545 	s = hdr_size + ftr_size + alignment + size +
546 	    SizeQ + sizeof(struct bhead);
547 
548 	/* Check wapping */
549 	if (s < alignment || s < size)
550 		return NULL;
551 
552 	b = (uintptr_t)bget(s, poolset);
553 	if (!b)
554 		goto out;
555 
556 	if ((b + hdr_size) & (alignment - 1)) {
557 		/*
558 		 * Returned buffer is not aligned as requested if the
559 		 * hdr_size is added. Find an offset into the buffer
560 		 * that is far enough in to the buffer to be able to free
561 		 * what's in front.
562 		 */
563 		uintptr_t p;
564 
565 		/*
566 		 * Find the point where the buffer including supplied
567 		 * header size should start.
568 		 */
569 		p = b + hdr_size + alignment;
570 		p &= ~(alignment - 1);
571 		p -= hdr_size;
572 		if ((p - b) < (SizeQ + sizeof(struct bhead)))
573 			p += alignment;
574 		assert((p + hdr_size + ftr_size + size) <= (b + s));
575 
576 		/* Free the front part of the buffer */
577 		brel_before((void *)b, (void *)p, poolset);
578 
579 		/* Set the new start of the buffer */
580 		b = p;
581 	}
582 
583 	/*
584 	 * Since b is now aligned, release what we don't need at the end of
585 	 * the buffer.
586 	 */
587 	brel_after((void *)b, hdr_size + ftr_size + size, poolset);
588 out:
589 	raw_malloc_return_hook((void *)b, size, poolset);
590 
591 	return (void *)b;
592 }
593 
594 /* Most of the stuff in this function is copied from bgetr() in bget.c */
595 static __maybe_unused bufsize bget_buf_size(void *buf)
596 {
597 	bufsize osize;          /* Old size of buffer */
598 	struct bhead *b;
599 
600 	b = BH(((char *)buf) - sizeof(struct bhead));
601 	osize = -b->bsize;
602 #ifdef BECtl
603 	if (osize == 0) {
604 		/*  Buffer acquired directly through acqfcn. */
605 		struct bdhead *bd;
606 
607 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
608 		osize = bd->tsize - sizeof(struct bdhead);
609 	} else
610 #endif
611 		osize -= sizeof(struct bhead);
612 	assert(osize > 0);
613 	return osize;
614 }
615 
616 #ifdef ENABLE_MDBG
617 
618 struct mdbg_hdr {
619 	const char *fname;
620 	uint16_t line;
621 	uint32_t pl_size;
622 	uint32_t magic;
623 #if defined(ARM64)
624 	uint64_t pad;
625 #endif
626 };
627 
628 #define MDBG_HEADER_MAGIC	0xadadadad
629 #define MDBG_FOOTER_MAGIC	0xecececec
630 
631 static size_t mdbg_get_ftr_size(size_t pl_size)
632 {
633 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
634 
635 	return ftr_pad + sizeof(uint32_t);
636 }
637 
638 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
639 {
640 	uint32_t *footer;
641 
642 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
643 			      mdbg_get_ftr_size(hdr->pl_size));
644 	footer--;
645 	return footer;
646 }
647 
648 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
649 		int lineno, size_t pl_size)
650 {
651 	uint32_t *footer;
652 
653 	hdr->fname = fname;
654 	hdr->line = lineno;
655 	hdr->pl_size = pl_size;
656 	hdr->magic = MDBG_HEADER_MAGIC;
657 
658 	footer = mdbg_get_footer(hdr);
659 	*footer = MDBG_FOOTER_MAGIC;
660 }
661 
662 void *mdbg_malloc(const char *fname, int lineno, size_t size)
663 {
664 	struct mdbg_hdr *hdr;
665 	uint32_t exceptions = malloc_lock();
666 
667 	/*
668 	 * Check struct mdbg_hdr doesn't get bad alignment.
669 	 * This is required by C standard: the buffer returned from
670 	 * malloc() should be aligned with a fundamental alignment.
671 	 * For ARM32, the required alignment is 8. For ARM64, it is 16.
672 	 */
673 	COMPILE_TIME_ASSERT(
674 		(sizeof(struct mdbg_hdr) % (__alignof(uintptr_t) * 2)) == 0);
675 
676 	hdr = raw_malloc(sizeof(struct mdbg_hdr),
677 			  mdbg_get_ftr_size(size), size, &malloc_poolset);
678 	if (hdr) {
679 		mdbg_update_hdr(hdr, fname, lineno, size);
680 		hdr++;
681 	}
682 
683 	malloc_unlock(exceptions);
684 	return hdr;
685 }
686 
687 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
688 {
689 	assert(hdr->magic == MDBG_HEADER_MAGIC);
690 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
691 }
692 
693 static void mdbg_free(void *ptr)
694 {
695 	struct mdbg_hdr *hdr = ptr;
696 
697 	if (hdr) {
698 		hdr--;
699 		assert_header(hdr);
700 		hdr->magic = 0;
701 		*mdbg_get_footer(hdr) = 0;
702 		raw_free(hdr, &malloc_poolset);
703 	}
704 }
705 
706 void free(void *ptr)
707 {
708 	uint32_t exceptions = malloc_lock();
709 
710 	mdbg_free(ptr);
711 	malloc_unlock(exceptions);
712 }
713 
714 void *mdbg_calloc(const char *fname, int lineno, size_t nmemb, size_t size)
715 {
716 	struct mdbg_hdr *hdr;
717 	uint32_t exceptions = malloc_lock();
718 
719 	hdr = raw_calloc(sizeof(struct mdbg_hdr),
720 			  mdbg_get_ftr_size(nmemb * size), nmemb, size,
721 			  &malloc_poolset);
722 	if (hdr) {
723 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
724 		hdr++;
725 	}
726 	malloc_unlock(exceptions);
727 	return hdr;
728 }
729 
730 static void *mdbg_realloc_unlocked(const char *fname, int lineno,
731 			    void *ptr, size_t size)
732 {
733 	struct mdbg_hdr *hdr = ptr;
734 
735 	if (hdr) {
736 		hdr--;
737 		assert_header(hdr);
738 	}
739 	hdr = raw_realloc(hdr, sizeof(struct mdbg_hdr),
740 			   mdbg_get_ftr_size(size), size, &malloc_poolset);
741 	if (hdr) {
742 		mdbg_update_hdr(hdr, fname, lineno, size);
743 		hdr++;
744 	}
745 	return hdr;
746 }
747 
748 void *mdbg_realloc(const char *fname, int lineno, void *ptr, size_t size)
749 {
750 	void *p;
751 	uint32_t exceptions = malloc_lock();
752 
753 	p = mdbg_realloc_unlocked(fname, lineno, ptr, size);
754 	malloc_unlock(exceptions);
755 	return p;
756 }
757 
758 #define realloc_unlocked(ptr, size) \
759 		mdbg_realloc_unlocked(__FILE__, __LINE__, (ptr), (size))
760 
761 void *mdbg_memalign(const char *fname, int lineno, size_t alignment,
762 		size_t size)
763 {
764 	struct mdbg_hdr *hdr;
765 	uint32_t exceptions = malloc_lock();
766 
767 	hdr = raw_memalign(sizeof(struct mdbg_hdr), mdbg_get_ftr_size(size),
768 			   alignment, size, &malloc_poolset);
769 	if (hdr) {
770 		mdbg_update_hdr(hdr, fname, lineno, size);
771 		hdr++;
772 	}
773 	malloc_unlock(exceptions);
774 	return hdr;
775 }
776 
777 
778 static void *get_payload_start_size(void *raw_buf, size_t *size)
779 {
780 	struct mdbg_hdr *hdr = raw_buf;
781 
782 	assert(bget_buf_size(hdr) >= hdr->pl_size);
783 	*size = hdr->pl_size;
784 	return hdr + 1;
785 }
786 
787 void mdbg_check(int bufdump)
788 {
789 	struct bpool_iterator itr;
790 	void *b;
791 	uint32_t exceptions = malloc_lock();
792 
793 	raw_malloc_validate_pools();
794 
795 	BPOOL_FOREACH(&itr, &b) {
796 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
797 
798 		assert_header(hdr);
799 
800 		if (bufdump > 0) {
801 			const char *fname = hdr->fname;
802 
803 			if (!fname)
804 				fname = "unknown";
805 
806 			IMSG("buffer: %d bytes %s:%d\n",
807 				hdr->pl_size, fname, hdr->line);
808 		}
809 	}
810 
811 	malloc_unlock(exceptions);
812 }
813 
814 #else
815 
816 void *malloc(size_t size)
817 {
818 	void *p;
819 	uint32_t exceptions = malloc_lock();
820 
821 	p = raw_malloc(0, 0, size, &malloc_poolset);
822 	malloc_unlock(exceptions);
823 	return p;
824 }
825 
826 void free(void *ptr)
827 {
828 	uint32_t exceptions = malloc_lock();
829 
830 	raw_free(ptr, &malloc_poolset);
831 	malloc_unlock(exceptions);
832 }
833 
834 void *calloc(size_t nmemb, size_t size)
835 {
836 	void *p;
837 	uint32_t exceptions = malloc_lock();
838 
839 	p = raw_calloc(0, 0, nmemb, size, &malloc_poolset);
840 	malloc_unlock(exceptions);
841 	return p;
842 }
843 
844 static void *realloc_unlocked(void *ptr, size_t size)
845 {
846 	return raw_realloc(ptr, 0, 0, size, &malloc_poolset);
847 }
848 
849 void *realloc(void *ptr, size_t size)
850 {
851 	void *p;
852 	uint32_t exceptions = malloc_lock();
853 
854 	p = realloc_unlocked(ptr, size);
855 	malloc_unlock(exceptions);
856 	return p;
857 }
858 
859 void *memalign(size_t alignment, size_t size)
860 {
861 	void *p;
862 	uint32_t exceptions = malloc_lock();
863 
864 	p = raw_memalign(0, 0, alignment, size, &malloc_poolset);
865 	malloc_unlock(exceptions);
866 	return p;
867 }
868 
869 static void *get_payload_start_size(void *ptr, size_t *size)
870 {
871 	*size = bget_buf_size(ptr);
872 	return ptr;
873 }
874 
875 #endif
876 
877 void malloc_add_pool(void *buf, size_t len)
878 {
879 	void *p;
880 	size_t l;
881 	uint32_t exceptions;
882 	uintptr_t start = (uintptr_t)buf;
883 	uintptr_t end = start + len;
884 	const size_t min_len = ((sizeof(struct malloc_pool) + (SizeQuant - 1)) &
885 					(~(SizeQuant - 1))) +
886 				sizeof(struct bhead) * 2;
887 
888 
889 	start = ROUNDUP(start, SizeQuant);
890 	end = ROUNDDOWN(end, SizeQuant);
891 	assert(start < end);
892 
893 	if ((end - start) < min_len) {
894 		DMSG("Skipping too small pool");
895 		return;
896 	}
897 
898 	exceptions = malloc_lock();
899 
900 	tag_asan_free((void *)start, end - start);
901 	bpool((void *)start, end - start, &malloc_poolset);
902 	l = malloc_pool_len + 1;
903 	p = realloc_unlocked(malloc_pool, sizeof(struct malloc_pool) * l);
904 	assert(p);
905 	malloc_pool = p;
906 	malloc_pool[malloc_pool_len].buf = (void *)start;
907 	malloc_pool[malloc_pool_len].len = end - start;
908 #ifdef BufStats
909 	mstats.size += malloc_pool[malloc_pool_len].len;
910 #endif
911 	malloc_pool_len = l;
912 	malloc_unlock(exceptions);
913 }
914 
915 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
916 {
917 	struct bpool_iterator itr;
918 	void *b;
919 	uint8_t *start_buf = buf;
920 	uint8_t *end_buf = start_buf + len;
921 	bool ret = false;
922 	uint32_t exceptions = malloc_lock();
923 
924 	raw_malloc_validate_pools();
925 
926 	/* Check for wrapping */
927 	if (start_buf > end_buf)
928 		goto out;
929 
930 	BPOOL_FOREACH(&itr, &b) {
931 		uint8_t *start_b;
932 		uint8_t *end_b;
933 		size_t s;
934 
935 		start_b = get_payload_start_size(b, &s);
936 		end_b = start_b + s;
937 
938 		if (start_buf >= start_b && end_buf <= end_b) {
939 			ret = true;
940 			goto out;
941 		}
942 	}
943 
944 out:
945 	malloc_unlock(exceptions);
946 
947 	return ret;
948 }
949 
950 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
951 {
952 	uintptr_t buf_start = (uintptr_t) buf;
953 	uintptr_t buf_end = buf_start + len;
954 	size_t n;
955 	bool ret = false;
956 	uint32_t exceptions = malloc_lock();
957 
958 	raw_malloc_validate_pools();
959 
960 	for (n = 0; n < malloc_pool_len; n++) {
961 		uintptr_t pool_start = (uintptr_t)malloc_pool[n].buf;
962 		uintptr_t pool_end = pool_start + malloc_pool[n].len;
963 
964 		if (buf_start > buf_end || pool_start > pool_end) {
965 			ret = true;	/* Wrapping buffers, shouldn't happen */
966 			goto out;
967 		}
968 
969 		if (buf_end > pool_start || buf_start < pool_end) {
970 			ret = true;
971 			goto out;
972 		}
973 	}
974 
975 out:
976 	malloc_unlock(exceptions);
977 	return ret;
978 }
979