xref: /optee_os/lib/libutils/isoc/bget_malloc.c (revision b8a0c52c847baf133e08f19f69759eb8a5de1a2c)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright (c) 2014, STMicroelectronics International N.V.
4  * Copyright (c) 2015-2025, Linaro Limited.
5  */
6 
7 #define PROTOTYPES
8 
9 /*
10  *  BGET CONFIGURATION
11  *  ==================
12  */
13 /* #define BGET_ENABLE_ALL_OPTIONS */
14 #ifdef BGET_ENABLE_OPTION
15 #define TestProg    20000	/* Generate built-in test program
16 				   if defined.  The value specifies
17 				   how many buffer allocation attempts
18 				   the test program should make. */
19 #endif
20 
21 
22 #ifdef __LP64__
23 #define SizeQuant   16
24 #endif
25 #ifdef __ILP32__
26 #define SizeQuant   8
27 #endif
28 				/* Buffer allocation size quantum:
29 				   all buffers allocated are a
30 				   multiple of this size.  This
31 				   MUST be a power of two. */
32 
33 #ifdef BGET_ENABLE_OPTION
34 #define BufDump     1		/* Define this symbol to enable the
35 				   bpoold() function which dumps the
36 				   buffers in a buffer pool. */
37 
38 #define BufValid    1		/* Define this symbol to enable the
39 				   bpoolv() function for validating
40 				   a buffer pool. */
41 
42 #define DumpData    1		/* Define this symbol to enable the
43 				   bufdump() function which allows
44 				   dumping the contents of an allocated
45 				   or free buffer. */
46 
47 #define BufStats    1		/* Define this symbol to enable the
48 				   bstats() function which calculates
49 				   the total free space in the buffer
50 				   pool, the largest available
51 				   buffer, and the total space
52 				   currently allocated. */
53 
54 #define FreeWipe    1		/* Wipe free buffers to a guaranteed
55 				   pattern of garbage to trip up
56 				   miscreants who attempt to use
57 				   pointers into released buffers. */
58 
59 #define BestFit     1		/* Use a best fit algorithm when
60 				   searching for space for an
61 				   allocation request.  This uses
62 				   memory more efficiently, but
63 				   allocation will be much slower. */
64 
65 #define BECtl       1		/* Define this symbol to enable the
66 				   bectl() function for automatic
67 				   pool space control.  */
68 #endif
69 
70 #ifdef MEM_DEBUG
71 #undef NDEBUG
72 #define DumpData    1
73 #define BufValid    1
74 #define FreeWipe    1
75 #endif
76 
77 #ifdef CFG_WITH_STATS
78 #define BufStats    1
79 #endif
80 
81 #include <asan.h>
82 #include <compiler.h>
83 #include <config.h>
84 #include <malloc.h>
85 #include <memtag.h>
86 #include <pta_stats.h>
87 #include <stdbool.h>
88 #include <stdint.h>
89 #include <stdlib_ext.h>
90 #include <stdlib.h>
91 #include <string.h>
92 #include <trace.h>
93 #include <util.h>
94 
95 #if defined(__KERNEL__)
96 /* Compiling for TEE Core */
97 #include <kernel/spinlock.h>
98 #include <kernel/unwind.h>
99 #endif
100 #if defined(__KERNEL__)
101 # include <kernel/panic.h>
102 # define bget_panic() panic()
103 #elif defined(__LDELF__)
104 # include <ldelf_syscalls.h>
105 # define bget_panic() _ldelf_panic(2)
106 #else
107 # include <utee_syscalls.h>
108 # define bget_panic() _utee_panic(TEE_ERROR_GENERIC)
109 #endif
110 
memset_unchecked(void * s,int c,size_t n)111 static void *memset_unchecked(void *s, int c, size_t n)
112 {
113 	return asan_memset_unchecked(s, c, n);
114 }
115 
memcpy_unchecked(void * dst,const void * src,size_t n)116 static __maybe_unused void *memcpy_unchecked(void *dst, const void *src,
117 					     size_t n)
118 {
119 	return asan_memcpy_unchecked(dst, src, n);
120 }
121 
122 #include "bget.c"		/* this is ugly, but this is bget */
123 
124 struct malloc_pool {
125 	void *buf;
126 	size_t len;
127 };
128 
129 struct malloc_ctx {
130 	struct bpoolset poolset;
131 	struct malloc_pool *pool;
132 	size_t pool_len;
133 #ifdef BufStats
134 	struct pta_stats_alloc mstats;
135 #endif
136 #ifdef __KERNEL__
137 	unsigned int spinlock;
138 #endif
139 };
140 
141 #ifdef __KERNEL__
142 
malloc_lock(struct malloc_ctx * ctx)143 static uint32_t malloc_lock(struct malloc_ctx *ctx)
144 {
145 	return cpu_spin_lock_xsave(&ctx->spinlock);
146 }
147 
malloc_unlock(struct malloc_ctx * ctx,uint32_t exceptions)148 static void malloc_unlock(struct malloc_ctx *ctx, uint32_t exceptions)
149 {
150 	cpu_spin_unlock_xrestore(&ctx->spinlock, exceptions);
151 }
152 
153 #else  /* __KERNEL__ */
154 
malloc_lock(struct malloc_ctx * ctx __unused)155 static uint32_t malloc_lock(struct malloc_ctx *ctx __unused)
156 {
157 	return 0;
158 }
159 
malloc_unlock(struct malloc_ctx * ctx __unused,uint32_t exceptions __unused)160 static void malloc_unlock(struct malloc_ctx *ctx __unused,
161 			  uint32_t exceptions __unused)
162 {
163 }
164 
165 #endif	/* __KERNEL__ */
166 
167 #define DEFINE_CTX(name) struct malloc_ctx name =		\
168 	{ .poolset = { .freelist = { {0, 0},			\
169 			{&name.poolset.freelist,		\
170 			 &name.poolset.freelist}}}}
171 
172 static DEFINE_CTX(malloc_ctx);
173 
174 #ifdef CFG_NS_VIRTUALIZATION
175 static __nex_data DEFINE_CTX(nex_malloc_ctx);
176 #endif
177 
print_oom(size_t req_size __maybe_unused,void * ctx __maybe_unused)178 static void print_oom(size_t req_size __maybe_unused, void *ctx __maybe_unused)
179 {
180 #if defined(__KERNEL__) && defined(CFG_CORE_DUMP_OOM)
181 	EMSG("Memory allocation failed: size %zu context %p", req_size, ctx);
182 	print_kernel_stack();
183 #endif
184 }
185 
186 /* Most of the stuff in this function is copied from bgetr() in bget.c */
bget_buf_size(void * buf)187 static __maybe_unused bufsize bget_buf_size(void *buf)
188 {
189 	bufsize osize;          /* Old size of buffer */
190 	struct bhead *b;
191 
192 	b = BH(((char *)buf) - sizeof(struct bhead));
193 	osize = -b->bsize;
194 #ifdef BECtl
195 	if (osize == 0) {
196 		/*  Buffer acquired directly through acqfcn. */
197 		struct bdhead *bd;
198 
199 		bd = BDH(((char *)buf) - sizeof(struct bdhead));
200 		osize = bd->tsize - sizeof(struct bdhead) - bd->offs;
201 	} else
202 #endif
203 		osize -= sizeof(struct bhead);
204 	assert(osize > 0);
205 	return osize;
206 }
207 
maybe_tag_buf(uint8_t * buf,size_t hdr_size,size_t requested_size)208 static void *maybe_tag_buf(uint8_t *buf, size_t hdr_size, size_t requested_size)
209 {
210 	if (!buf)
211 		return NULL;
212 
213 	COMPILE_TIME_ASSERT(MEMTAG_GRANULE_SIZE <= SizeQuant);
214 
215 	if (MEMTAG_IS_ENABLED) {
216 		size_t sz = 0;
217 
218 		/*
219 		 * MEMTAG needs actual allocated size (>= SizeQuant),
220 		 * unlike ASan which tags only requested bytes. For
221 		 * malloc(0), bget allocates SizeQuant, so we pass
222 		 * MAX(requested_size, SizeQuant) to ensure correct tagging.
223 		 */
224 		requested_size = MAX(requested_size, SizeQuant);
225 
226 		sz = ROUNDUP(requested_size, MEMTAG_GRANULE_SIZE);
227 
228 		/*
229 		 * Allocated buffer can be larger than requested when
230 		 * allocating with memalign(), but we should never tag more
231 		 * than allocated.
232 		 */
233 		assert(bget_buf_size(buf) >= sz + hdr_size);
234 		return memtag_set_random_tags(buf, sz + hdr_size);
235 	}
236 
237 	asan_tag_access(buf, buf + hdr_size + requested_size);
238 
239 	return buf;
240 }
241 
maybe_untag_buf(void * buf)242 static void *maybe_untag_buf(void *buf)
243 {
244 	if (!buf)
245 		return NULL;
246 
247 	if (MEMTAG_IS_ENABLED) {
248 		size_t sz = 0;
249 
250 		memtag_assert_tag(buf); /* Trying to catch double free early */
251 		sz = bget_buf_size(memtag_strip_tag(buf));
252 		return memtag_set_tags(buf, sz, 0);
253 	}
254 
255 	asan_tag_heap_free(buf, (uint8_t *)buf + bget_buf_size(buf));
256 
257 	return buf;
258 }
259 
strip_tag(void * buf)260 static void *strip_tag(void *buf)
261 {
262 	if (MEMTAG_IS_ENABLED)
263 		return memtag_strip_tag(buf);
264 	return buf;
265 }
266 
tag_asan_free(void * buf __maybe_unused,size_t len __maybe_unused)267 static void tag_asan_free(void *buf __maybe_unused, size_t len __maybe_unused)
268 {
269 	asan_tag_heap_free(buf, (uint8_t *)buf + len);
270 }
271 
272 #ifdef BufStats
273 
raw_malloc_return_hook(void * p,size_t hdr_size,size_t requested_size,struct malloc_ctx * ctx)274 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
275 				    size_t requested_size,
276 				    struct malloc_ctx *ctx)
277 {
278 	if (ctx->poolset.totalloc > ctx->mstats.max_allocated)
279 		ctx->mstats.max_allocated = ctx->poolset.totalloc;
280 
281 	if (!p) {
282 		ctx->mstats.num_alloc_fail++;
283 		print_oom(requested_size, ctx);
284 		if (requested_size > ctx->mstats.biggest_alloc_fail) {
285 			ctx->mstats.biggest_alloc_fail = requested_size;
286 			ctx->mstats.biggest_alloc_fail_used =
287 				ctx->poolset.totalloc;
288 		}
289 	}
290 
291 	return maybe_tag_buf(p, hdr_size, requested_size);
292 }
293 
gen_malloc_reset_stats(struct malloc_ctx * ctx)294 static void gen_malloc_reset_stats(struct malloc_ctx *ctx)
295 {
296 	uint32_t exceptions = malloc_lock(ctx);
297 
298 	ctx->mstats.max_allocated = 0;
299 	ctx->mstats.num_alloc_fail = 0;
300 	ctx->mstats.biggest_alloc_fail = 0;
301 	ctx->mstats.biggest_alloc_fail_used = 0;
302 	malloc_unlock(ctx, exceptions);
303 }
304 
malloc_reset_stats(void)305 void malloc_reset_stats(void)
306 {
307 	gen_malloc_reset_stats(&malloc_ctx);
308 }
309 
gen_malloc_get_stats(struct malloc_ctx * ctx,struct pta_stats_alloc * stats)310 static void gen_malloc_get_stats(struct malloc_ctx *ctx,
311 				 struct pta_stats_alloc *stats)
312 {
313 	uint32_t exceptions = malloc_lock(ctx);
314 
315 	raw_malloc_get_stats(ctx, stats);
316 	malloc_unlock(ctx, exceptions);
317 }
318 
malloc_get_stats(struct pta_stats_alloc * stats)319 void malloc_get_stats(struct pta_stats_alloc *stats)
320 {
321 	gen_malloc_get_stats(&malloc_ctx, stats);
322 }
323 
324 #else /* BufStats */
325 
raw_malloc_return_hook(void * p,size_t hdr_size,size_t requested_size,struct malloc_ctx * ctx)326 static void *raw_malloc_return_hook(void *p, size_t hdr_size,
327 				    size_t requested_size,
328 				    struct malloc_ctx *ctx )
329 {
330 	if (!p)
331 		print_oom(requested_size, ctx);
332 
333 	return maybe_tag_buf(p, hdr_size, requested_size);
334 }
335 
336 #endif /* BufStats */
337 
338 #ifdef BufValid
raw_malloc_validate_pools(struct malloc_ctx * ctx)339 static void raw_malloc_validate_pools(struct malloc_ctx *ctx)
340 {
341 	size_t n;
342 
343 	for (n = 0; n < ctx->pool_len; n++)
344 		bpoolv(ctx->pool[n].buf);
345 }
346 #else
raw_malloc_validate_pools(struct malloc_ctx * ctx __unused)347 static void raw_malloc_validate_pools(struct malloc_ctx *ctx __unused)
348 {
349 }
350 #endif
351 
352 struct bpool_iterator {
353 	struct bfhead *next_buf;
354 	size_t pool_idx;
355 };
356 
bpool_foreach_iterator_init(struct malloc_ctx * ctx,struct bpool_iterator * iterator)357 static void bpool_foreach_iterator_init(struct malloc_ctx *ctx,
358 					struct bpool_iterator *iterator)
359 {
360 	iterator->pool_idx = 0;
361 	iterator->next_buf = BFH(ctx->pool[0].buf);
362 }
363 
bpool_foreach_pool(struct bpool_iterator * iterator,void ** buf,size_t * len,bool * isfree)364 static bool bpool_foreach_pool(struct bpool_iterator *iterator, void **buf,
365 		size_t *len, bool *isfree)
366 {
367 	struct bfhead *b = iterator->next_buf;
368 	bufsize bs = b->bh.bsize;
369 
370 	if (bs == ESent)
371 		return false;
372 
373 	if (bs < 0) {
374 		/* Allocated buffer */
375 		bs = -bs;
376 
377 		*isfree = false;
378 	} else {
379 		/* Free Buffer */
380 		*isfree = true;
381 
382 		/* Assert that the free list links are intact */
383 		assert(b->ql.blink->ql.flink == b);
384 		assert(b->ql.flink->ql.blink == b);
385 	}
386 
387 	*buf = (uint8_t *)b + sizeof(struct bhead);
388 	*len = bs - sizeof(struct bhead);
389 
390 	iterator->next_buf = BFH((uint8_t *)b + bs);
391 	return true;
392 }
393 
bpool_foreach(struct malloc_ctx * ctx,struct bpool_iterator * iterator,void ** buf)394 static bool bpool_foreach(struct malloc_ctx *ctx,
395 			  struct bpool_iterator *iterator, void **buf)
396 {
397 	while (true) {
398 		size_t len;
399 		bool isfree;
400 
401 		if (bpool_foreach_pool(iterator, buf, &len, &isfree)) {
402 			if (isfree)
403 				continue;
404 			return true;
405 		}
406 
407 		if ((iterator->pool_idx + 1) >= ctx->pool_len)
408 			return false;
409 
410 		iterator->pool_idx++;
411 		iterator->next_buf = BFH(ctx->pool[iterator->pool_idx].buf);
412 	}
413 }
414 
415 /* Convenience macro for looping over all allocated buffers */
416 #define BPOOL_FOREACH(ctx, iterator, bp)		      \
417 	for (bpool_foreach_iterator_init((ctx),(iterator));   \
418 	     bpool_foreach((ctx),(iterator), (bp));)
419 
raw_malloc_flags(uint32_t flags,void * ptr,size_t hdr_size,size_t ftr_size,size_t alignment,size_t pl_nmemb,size_t pl_size,struct malloc_ctx * ctx)420 void *raw_malloc_flags(uint32_t flags, void *ptr, size_t hdr_size,
421 		       size_t ftr_size, size_t alignment, size_t pl_nmemb,
422 		       size_t pl_size, struct malloc_ctx *ctx)
423 {
424 	void *p = NULL;
425 	bufsize s = 0;
426 
427 	raw_malloc_validate_pools(ctx);
428 
429 	if (!alignment || !IS_POWER_OF_TWO(alignment))
430 		return NULL;
431 
432 	/* Compute total size, excluding hdr_size */
433 	if (MUL_OVERFLOW(pl_nmemb, pl_size, &s))
434 		goto out;
435 	if (ADD_OVERFLOW(s, ftr_size, &s))
436 		goto out;
437 
438 	/* BGET doesn't like 0 sized allocations */
439 	if (!s)
440 		s++;
441 
442 	if ((flags & MAF_ZERO_INIT) && !ptr)
443 		p = bgetz(alignment, hdr_size, s, &ctx->poolset);
444 	else
445 		p = bget(alignment, hdr_size, s, &ctx->poolset);
446 
447 	if (p && ptr) {
448 		void *old_ptr = maybe_untag_buf(ptr);
449 		bufsize old_sz = bget_buf_size(old_ptr);
450 		bufsize new_sz = s + hdr_size;
451 
452 		if (old_sz < new_sz) {
453 			memcpy_unchecked(p, old_ptr, old_sz);
454 			if (flags & MAF_ZERO_INIT)
455 				memset_unchecked((uint8_t *)p + old_sz, 0,
456 						 new_sz - old_sz);
457 		} else {
458 			memcpy_unchecked(p, old_ptr, new_sz);
459 		}
460 
461 		brel(old_ptr, &ctx->poolset, false /*!wipe*/);
462 	}
463 out:
464 	return raw_malloc_return_hook(p, hdr_size, pl_nmemb * pl_size, ctx);
465 }
466 
raw_memalign(size_t hdr_size,size_t ftr_size,size_t alignment,size_t pl_size,struct malloc_ctx * ctx)467 void *raw_memalign(size_t hdr_size, size_t ftr_size, size_t alignment,
468 		   size_t pl_size, struct malloc_ctx *ctx)
469 {
470 	return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, alignment,
471 				1, pl_size, ctx);
472 }
473 
raw_malloc(size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)474 void *raw_malloc(size_t hdr_size, size_t ftr_size, size_t pl_size,
475 		 struct malloc_ctx *ctx)
476 {
477 	return raw_malloc_flags(MAF_NULL, NULL, hdr_size, ftr_size, 1, 1,
478 				pl_size, ctx);
479 }
480 
raw_free(void * ptr,struct malloc_ctx * ctx,bool wipe)481 void raw_free(void *ptr, struct malloc_ctx *ctx, bool wipe)
482 {
483 	raw_malloc_validate_pools(ctx);
484 
485 	if (ptr)
486 		brel(maybe_untag_buf(ptr), &ctx->poolset, wipe);
487 }
488 
raw_calloc(size_t hdr_size,size_t ftr_size,size_t pl_nmemb,size_t pl_size,struct malloc_ctx * ctx)489 void *raw_calloc(size_t hdr_size, size_t ftr_size, size_t pl_nmemb,
490 		 size_t pl_size, struct malloc_ctx *ctx)
491 {
492 	return raw_malloc_flags(MAF_ZERO_INIT, NULL, hdr_size, ftr_size, 1,
493 				pl_nmemb, pl_size, ctx);
494 }
495 
raw_realloc(void * ptr,size_t hdr_size,size_t ftr_size,size_t pl_size,struct malloc_ctx * ctx)496 void *raw_realloc(void *ptr, size_t hdr_size, size_t ftr_size,
497 		  size_t pl_size, struct malloc_ctx *ctx)
498 {
499 	return raw_malloc_flags(MAF_NULL, ptr, hdr_size, ftr_size, 1, 1,
500 				pl_size, ctx);
501 }
502 
503 struct mdbg_hdr {
504 	const char *fname;
505 	uint16_t line;
506 #ifdef __LP64__
507 	uint64_t pad;
508 #endif
509 	uint32_t pl_size;
510 	uint32_t magic;
511 };
512 
513 #define MDBG_HEADER_MAGIC	0xadadadad
514 #define MDBG_FOOTER_MAGIC	0xecececec
515 
mdbg_get_ftr_size(size_t pl_size)516 static size_t mdbg_get_ftr_size(size_t pl_size)
517 {
518 	size_t ftr_pad = ROUNDUP(pl_size, sizeof(uint32_t)) - pl_size;
519 
520 	return ftr_pad + sizeof(uint32_t);
521 }
522 
mdbg_get_footer(struct mdbg_hdr * hdr)523 static uint32_t *mdbg_get_footer(struct mdbg_hdr *hdr)
524 {
525 	uint32_t *footer;
526 
527 	footer = (uint32_t *)((uint8_t *)(hdr + 1) + hdr->pl_size +
528 			      mdbg_get_ftr_size(hdr->pl_size));
529 	footer--;
530 	return strip_tag(footer);
531 }
532 
mdbg_update_hdr(struct mdbg_hdr * hdr,const char * fname,int lineno,size_t pl_size)533 static void mdbg_update_hdr(struct mdbg_hdr *hdr, const char *fname,
534 		int lineno, size_t pl_size)
535 {
536 	uint32_t *footer;
537 
538 	hdr->fname = fname;
539 	hdr->line = lineno;
540 	hdr->pl_size = pl_size;
541 	hdr->magic = MDBG_HEADER_MAGIC;
542 
543 	footer = mdbg_get_footer(hdr);
544 	*footer = MDBG_FOOTER_MAGIC;
545 }
546 
assert_header(struct mdbg_hdr * hdr __maybe_unused)547 static void assert_header(struct mdbg_hdr *hdr __maybe_unused)
548 {
549 	assert(hdr->magic == MDBG_HEADER_MAGIC);
550 	assert(*mdbg_get_footer(hdr) == MDBG_FOOTER_MAGIC);
551 }
552 
mem_alloc_unlocked(uint32_t flags,void * ptr,size_t alignment,size_t nmemb,size_t size,const char * fname,int lineno,struct malloc_ctx * ctx)553 static void *mem_alloc_unlocked(uint32_t flags, void *ptr, size_t alignment,
554 				size_t nmemb, size_t size, const char *fname,
555 				int lineno, struct malloc_ctx *ctx)
556 {
557 	struct mdbg_hdr *hdr = NULL;
558 	size_t ftr_size = 0;
559 	size_t hdr_size = 0;
560 
561 	/*
562 	 * Check struct mdbg_hdr works with BGET_HDR_QUANTUM.
563 	 */
564 	static_assert((sizeof(struct mdbg_hdr) % BGET_HDR_QUANTUM) == 0);
565 
566 	if (IS_ENABLED2(ENABLE_MDBG)) {
567 		if (ptr) {
568 			hdr = ptr;
569 			hdr--;
570 			assert_header(hdr);
571 		}
572 		ftr_size = mdbg_get_ftr_size(nmemb * size);
573 		hdr_size = sizeof(struct mdbg_hdr);
574 		ptr = hdr;
575 	}
576 
577 	ptr = raw_malloc_flags(flags, ptr, hdr_size, ftr_size, alignment, nmemb,
578 			       size, ctx);
579 
580 	if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
581 		hdr = ptr;
582 		mdbg_update_hdr(hdr, fname, lineno, nmemb * size);
583 		hdr++;
584 		ptr = hdr;
585 	}
586 
587 	return ptr;
588 }
589 
get_ctx(uint32_t flags __maybe_unused)590 static struct malloc_ctx *get_ctx(uint32_t flags __maybe_unused)
591 {
592 #ifdef CFG_NS_VIRTUALIZATION
593 	if (flags & MAF_NEX)
594 		return &nex_malloc_ctx;
595 #endif
596 	return &malloc_ctx;
597 }
598 
mem_alloc(uint32_t flags,void * ptr,size_t alignment,size_t nmemb,size_t size,const char * fname,int lineno)599 static void *mem_alloc(uint32_t flags, void *ptr, size_t alignment,
600 		       size_t nmemb, size_t size, const char *fname, int lineno)
601 {
602 	struct malloc_ctx *ctx = get_ctx(flags);
603 	uint32_t exceptions = 0;
604 	void *p = NULL;
605 
606 	exceptions = malloc_lock(ctx);
607 	p = mem_alloc_unlocked(flags, ptr, alignment, nmemb, size, fname,
608 			       lineno, ctx);
609 	malloc_unlock(ctx, exceptions);
610 
611 	return p;
612 }
613 
free_flags(uint32_t flags,void * ptr)614 void free_flags(uint32_t flags, void *ptr)
615 {
616 	struct malloc_ctx *ctx = get_ctx(flags);
617 	uint32_t exceptions = 0;
618 
619 	exceptions = malloc_lock(ctx);
620 
621 	if (IS_ENABLED2(ENABLE_MDBG) && ptr) {
622 		struct mdbg_hdr *hdr = ptr;
623 
624 		hdr--;
625 		assert_header(hdr);
626 		hdr->magic = 0;
627 		*mdbg_get_footer(hdr) = 0;
628 		ptr = hdr;
629 	}
630 
631 	raw_free(ptr, ctx, flags & MAF_FREE_WIPE);
632 
633 	malloc_unlock(ctx, exceptions);
634 }
635 
get_payload_start_size(void * raw_buf,size_t * size)636 static void *get_payload_start_size(void *raw_buf, size_t *size)
637 {
638 	if (IS_ENABLED2(ENABLE_MDBG)) {
639 		struct mdbg_hdr *hdr = raw_buf;
640 
641 		assert(bget_buf_size(hdr) >= hdr->pl_size);
642 		*size = hdr->pl_size;
643 		return hdr + 1;
644 	}
645 
646 	*size = bget_buf_size(raw_buf);
647 	return raw_buf;
648 }
649 
650 /* For use in raw_malloc_add_pool() below */
651 #define realloc_unlocked(ctx, ptr, size)                                      \
652 	mem_alloc_unlocked(MAF_NULL, (ptr), 1, 1, (size), __FILE__, __LINE__, \
653 			   (ctx))
654 
655 #ifdef ENABLE_MDBG
__mdbg_alloc(uint32_t flags,void * ptr,size_t alignment,size_t nmemb,size_t size,const char * fname,int lineno)656 void *__mdbg_alloc(uint32_t flags, void *ptr, size_t alignment, size_t nmemb,
657 		   size_t size, const char *fname, int lineno)
658 {
659 	return mem_alloc(flags, ptr, alignment, nmemb, size, fname, lineno);
660 }
661 
gen_mdbg_check(struct malloc_ctx * ctx,int bufdump)662 static void gen_mdbg_check(struct malloc_ctx *ctx, int bufdump)
663 {
664 	struct bpool_iterator itr;
665 	void *b;
666 	uint32_t exceptions = malloc_lock(ctx);
667 
668 	raw_malloc_validate_pools(ctx);
669 
670 	BPOOL_FOREACH(ctx, &itr, &b) {
671 		struct mdbg_hdr *hdr = (struct mdbg_hdr *)b;
672 
673 		assert_header(hdr);
674 
675 		if (bufdump > 0) {
676 			const char *fname = hdr->fname;
677 
678 			if (!fname)
679 				fname = "unknown";
680 
681 			IMSG("buffer: %d bytes %s:%d",
682 				hdr->pl_size, fname, hdr->line);
683 		}
684 	}
685 
686 	malloc_unlock(ctx, exceptions);
687 }
688 
mdbg_check(int bufdump)689 void mdbg_check(int bufdump)
690 {
691 	gen_mdbg_check(&malloc_ctx, bufdump);
692 }
693 #endif
694 
695 /*
696  * If malloc debug is enabled, malloc() and friends are redirected by macros
697  * to __mdbg_alloc() etc.
698  * We still want to export the standard entry points in case they are referenced
699  * by the application, either directly or via external libraries.
700  */
701 
702 #undef malloc
malloc(size_t size)703 void *malloc(size_t size)
704 {
705 	return mem_alloc(MAF_NULL, NULL, 1, 1, size, __FILE__, __LINE__);
706 }
707 
708 #undef malloc_flags
malloc_flags(uint32_t flags,void * ptr,size_t alignment,size_t size)709 void *malloc_flags(uint32_t flags, void *ptr, size_t alignment, size_t size)
710 {
711 	return mem_alloc(flags, ptr, alignment, 1, size, __FILE__, __LINE__);
712 }
713 
714 #undef calloc
calloc(size_t nmemb,size_t size)715 void *calloc(size_t nmemb, size_t size)
716 {
717 	return mem_alloc(MAF_ZERO_INIT, NULL, 1, nmemb, size, __FILE__,
718 			 __LINE__);
719 }
720 
721 #undef realloc
realloc(void * ptr,size_t size)722 void *realloc(void *ptr, size_t size)
723 {
724 	return mem_alloc(MAF_NULL, ptr, 1, 1, size, __FILE__, __LINE__);
725 }
726 
727 #undef memalign
memalign(size_t alignment,size_t size)728 void *memalign(size_t alignment, size_t size)
729 {
730 	return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
731 			 __LINE__);
732 }
733 
734 #if __STDC_VERSION__ >= 201112L
735 #undef aligned_alloc
aligned_alloc(size_t alignment,size_t size)736 void *aligned_alloc(size_t alignment, size_t size)
737 {
738 	if (size % alignment)
739 		return NULL;
740 
741 	return mem_alloc(MAF_NULL, NULL, alignment, 1, size, __FILE__,
742 			 __LINE__);
743 }
744 #endif /* __STDC_VERSION__ */
745 
free(void * ptr)746 void free(void *ptr)
747 {
748 	free_flags(MAF_NULL, ptr);
749 }
750 
free_wipe(void * ptr)751 void free_wipe(void *ptr)
752 {
753 	free_flags(MAF_FREE_WIPE, ptr);
754 }
755 
gen_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)756 static void gen_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
757 {
758 	uint32_t exceptions = malloc_lock(ctx);
759 
760 	raw_malloc_add_pool(ctx, buf, len);
761 	malloc_unlock(ctx, exceptions);
762 }
763 
gen_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)764 static bool gen_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
765 						void *buf, size_t len)
766 {
767 	uint32_t exceptions = malloc_lock(ctx);
768 	bool ret = false;
769 
770 	ret = raw_malloc_buffer_is_within_alloced(ctx, buf, len);
771 	malloc_unlock(ctx, exceptions);
772 
773 	return ret;
774 }
775 
gen_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)776 static bool gen_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
777 					    void *buf, size_t len)
778 {
779 	bool ret = false;
780 	uint32_t exceptions = malloc_lock(ctx);
781 
782 	ret = raw_malloc_buffer_overlaps_heap(ctx, buf, len);
783 	malloc_unlock(ctx, exceptions);
784 	return ret;
785 }
786 
raw_malloc_get_ctx_size(void)787 size_t raw_malloc_get_ctx_size(void)
788 {
789 	return sizeof(struct malloc_ctx);
790 }
791 
raw_malloc_init_ctx(struct malloc_ctx * ctx)792 void raw_malloc_init_ctx(struct malloc_ctx *ctx)
793 {
794 	memset(ctx, 0, sizeof(*ctx));
795 	ctx->poolset.freelist.ql.flink = &ctx->poolset.freelist;
796 	ctx->poolset.freelist.ql.blink = &ctx->poolset.freelist;
797 }
798 
raw_malloc_add_pool(struct malloc_ctx * ctx,void * buf,size_t len)799 void raw_malloc_add_pool(struct malloc_ctx *ctx, void *buf, size_t len)
800 {
801 	const size_t min_len = sizeof(struct bhead) + sizeof(struct bfhead);
802 	uintptr_t start = (uintptr_t)buf;
803 	uintptr_t end = start + len;
804 	void *p = NULL;
805 	size_t l = 0;
806 	int rc = 0;
807 
808 	start = ROUNDUP(start, SizeQuant);
809 	end = ROUNDDOWN(end, SizeQuant);
810 
811 	if (start > end || (end - start) < min_len) {
812 		DMSG("Skipping too small pool");
813 		return;
814 	}
815 
816 	/* First pool requires a bigger size */
817 	if (!ctx->pool_len && (end - start) < MALLOC_INITIAL_POOL_MIN_SIZE) {
818 		DMSG("Skipping too small initial pool");
819 		return;
820 	}
821 	rc = asan_user_map_shadow((void *)start, (void *)end,
822 				  ASAN_REG_MEM_POOL);
823 	if (rc) {
824 		EMSG("Failed to map ASAN shadow memory");
825 		bget_panic();
826 	}
827 	tag_asan_free((void *)start, end - start);
828 
829 	bpool((void *)start, end - start, &ctx->poolset);
830 	l = ctx->pool_len + 1;
831 	p = realloc_unlocked(ctx, ctx->pool, sizeof(struct malloc_pool) * l);
832 	assert(p);
833 	ctx->pool = p;
834 	ctx->pool[ctx->pool_len].buf = (void *)start;
835 	ctx->pool[ctx->pool_len].len = end - start;
836 #ifdef BufStats
837 	ctx->mstats.size += ctx->pool[ctx->pool_len].len;
838 #endif
839 	ctx->pool_len = l;
840 }
841 
raw_malloc_buffer_overlaps_heap(struct malloc_ctx * ctx,void * buf,size_t len)842 bool raw_malloc_buffer_overlaps_heap(struct malloc_ctx *ctx,
843 				     void *buf, size_t len)
844 {
845 	uintptr_t buf_start = (uintptr_t)strip_tag(buf);
846 	uintptr_t buf_end = buf_start + len;
847 	size_t n = 0;
848 
849 	raw_malloc_validate_pools(ctx);
850 
851 	for (n = 0; n < ctx->pool_len; n++) {
852 		uintptr_t pool_start = (uintptr_t)strip_tag(ctx->pool[n].buf);
853 		uintptr_t pool_end = pool_start + ctx->pool[n].len;
854 
855 		if (buf_start > buf_end || pool_start > pool_end)
856 			return true;	/* Wrapping buffers, shouldn't happen */
857 
858 		if ((buf_start >= pool_start && buf_start < pool_end) ||
859 		    (buf_end > pool_start && buf_end < pool_end))
860 			return true;
861 	}
862 
863 	return false;
864 }
865 
raw_malloc_buffer_is_within_alloced(struct malloc_ctx * ctx,void * buf,size_t len)866 bool raw_malloc_buffer_is_within_alloced(struct malloc_ctx *ctx,
867 					 void *buf, size_t len)
868 {
869 	struct bpool_iterator itr = { };
870 	void *b = NULL;
871 	uint8_t *start_buf = strip_tag(buf);
872 	uint8_t *end_buf = start_buf + len;
873 
874 	raw_malloc_validate_pools(ctx);
875 
876 	/* Check for wrapping */
877 	if (start_buf > end_buf)
878 		return false;
879 
880 	BPOOL_FOREACH(ctx, &itr, &b) {
881 		uint8_t *start_b = NULL;
882 		uint8_t *end_b = NULL;
883 		size_t s = 0;
884 
885 		start_b = strip_tag(get_payload_start_size(b, &s));
886 		end_b = start_b + s;
887 		if (start_buf >= start_b && end_buf <= end_b)
888 			return true;
889 	}
890 
891 	return false;
892 }
893 
894 #ifdef CFG_WITH_STATS
raw_malloc_get_stats(struct malloc_ctx * ctx,struct pta_stats_alloc * stats)895 void raw_malloc_get_stats(struct malloc_ctx *ctx, struct pta_stats_alloc *stats)
896 {
897 	memcpy_unchecked(stats, &ctx->mstats, sizeof(*stats));
898 	stats->allocated = ctx->poolset.totalloc;
899 	stats->free2_sum = ctx->poolset.free2_sum;
900 }
901 #endif
902 
malloc_add_pool(void * buf,size_t len)903 void malloc_add_pool(void *buf, size_t len)
904 {
905 	gen_malloc_add_pool(&malloc_ctx, buf, len);
906 }
907 
malloc_buffer_is_within_alloced(void * buf,size_t len)908 bool malloc_buffer_is_within_alloced(void *buf, size_t len)
909 {
910 	return gen_malloc_buffer_is_within_alloced(&malloc_ctx, buf, len);
911 }
912 
malloc_buffer_overlaps_heap(void * buf,size_t len)913 bool malloc_buffer_overlaps_heap(void *buf, size_t len)
914 {
915 	return gen_malloc_buffer_overlaps_heap(&malloc_ctx, buf, len);
916 }
917 
918 #ifdef CFG_NS_VIRTUALIZATION
919 
920 #ifndef ENABLE_MDBG
921 
nex_malloc(size_t size)922 void *nex_malloc(size_t size)
923 {
924 	return mem_alloc(MAF_NEX, NULL, 1, 1, size, __FILE__, __LINE__);
925 }
926 
nex_calloc(size_t nmemb,size_t size)927 void *nex_calloc(size_t nmemb, size_t size)
928 {
929 	return mem_alloc(MAF_NEX | MAF_ZERO_INIT, NULL, 1, nmemb, size,
930 			 __FILE__, __LINE__);
931 }
932 
nex_realloc(void * ptr,size_t size)933 void *nex_realloc(void *ptr, size_t size)
934 {
935 	return mem_alloc(MAF_NEX, ptr, 1, 1, size, __FILE__, __LINE__);
936 }
937 
nex_memalign(size_t alignment,size_t size)938 void *nex_memalign(size_t alignment, size_t size)
939 {
940 	return mem_alloc(MAF_NEX, NULL, alignment, 1, size, __FILE__, __LINE__);
941 }
942 
943 #else  /* ENABLE_MDBG */
944 
nex_mdbg_check(int bufdump)945 void nex_mdbg_check(int bufdump)
946 {
947 	gen_mdbg_check(&nex_malloc_ctx, bufdump);
948 }
949 
950 #endif	/* ENABLE_MDBG */
951 
nex_free(void * ptr)952 void nex_free(void *ptr)
953 {
954 	free_flags(MAF_NEX, ptr);
955 }
956 
nex_malloc_add_pool(void * buf,size_t len)957 void nex_malloc_add_pool(void *buf, size_t len)
958 {
959 	gen_malloc_add_pool(&nex_malloc_ctx, buf, len);
960 }
961 
nex_malloc_buffer_is_within_alloced(void * buf,size_t len)962 bool nex_malloc_buffer_is_within_alloced(void *buf, size_t len)
963 {
964 	return gen_malloc_buffer_is_within_alloced(&nex_malloc_ctx, buf, len);
965 }
966 
nex_malloc_buffer_overlaps_heap(void * buf,size_t len)967 bool nex_malloc_buffer_overlaps_heap(void *buf, size_t len)
968 {
969 	return gen_malloc_buffer_overlaps_heap(&nex_malloc_ctx, buf, len);
970 }
971 
972 #ifdef BufStats
973 
nex_malloc_reset_stats(void)974 void nex_malloc_reset_stats(void)
975 {
976 	gen_malloc_reset_stats(&nex_malloc_ctx);
977 }
978 
nex_malloc_get_stats(struct pta_stats_alloc * stats)979 void nex_malloc_get_stats(struct pta_stats_alloc *stats)
980 {
981 	gen_malloc_get_stats(&nex_malloc_ctx, stats);
982 }
983 
984 #endif
985 
986 #endif
987