xref: /OK3568_Linux_fs/u-boot/lib/sysmem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4  */
5 
6 #include <common.h>
7 #include <bidram.h>
8 #include <sysmem.h>
9 #include <lmb.h>
10 #include <malloc.h>
11 #include <asm/io.h>
12 
13 DECLARE_GLOBAL_DATA_PTR;
14 
15 #define SYSMEM_MAGIC		0x4D454D53	/* "SMEM" */
16 
17 #define LMB_ALLOC_ANYWHERE	0		/* sync with lmb.c */
18 #define SYSMEM_ALLOC_NO_ALIGN	1
19 #define SYSMEM_ALLOC_ANYWHERE	2
20 
21 #define SYSMEM_I(fmt, args...)	printf("Sysmem: "fmt, ##args)
22 #define SYSMEM_W(fmt, args...)	printf("Sysmem Warn: "fmt, ##args)
23 #define SYSMEM_E(fmt, args...)	printf("Sysmem Error: "fmt, ##args)
24 #define SYSMEM_D(fmt, args...)	 debug("Sysmem Debug: "fmt, ##args)
25 
26 struct memcheck {
27 	uint32_t magic;
28 };
29 
30 /* Global for platform, must in data section */
31 struct sysmem plat_sysmem __section(".data") = {
32 	.has_initf = false,
33 	.has_initr = false,
34 };
35 
sysmem_has_init(void)36 bool sysmem_has_init(void)
37 {
38 	return gd->flags & GD_FLG_RELOC ?
39 	       plat_sysmem.has_initr : plat_sysmem.has_initf;
40 }
41 
sysmem_is_overlap(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)42 static inline int sysmem_is_overlap(phys_addr_t base1, phys_size_t size1,
43 				    phys_addr_t base2, phys_size_t size2)
44 {
45 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
46 }
47 
sysmem_is_sub_region(struct memblock * sub,struct memblock * main)48 static inline int sysmem_is_sub_region(struct memblock *sub,
49 				       struct memblock *main)
50 {
51 	if (!sub || !main)
52 		return false;
53 
54 	return ((sub->base >= main->base) &&
55 		(sub->base + sub->size <= main->base + main->size));
56 }
57 
sysmem_dump(void)58 void sysmem_dump(void)
59 {
60 	struct sysmem *sysmem = &plat_sysmem;
61 	struct lmb *lmb = &sysmem->lmb;
62 	struct memblock *mem;
63 	struct memcheck *check;
64 	struct list_head *node;
65 	ulong memory_size = 0;
66 	ulong reserved_size = 0;
67 	ulong allocated_size = 0;
68 	bool overflow = false;
69 	ulong i;
70 
71 	if (!sysmem_has_init())
72 		return;
73 
74 	printf("\nsysmem_dump_all:\n");
75 
76 	/* Memory pool */
77 	printf("    --------------------------------------------------------------------\n");
78 	for (i = 0; i < lmb->memory.cnt; i++) {
79 		memory_size += lmb->memory.region[i].size;
80 		printf("    memory.rgn[%ld].addr     = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i,
81 		       (ulong)lmb->memory.region[i].base,
82 		       (ulong)lmb->memory.region[i].base +
83 		       (ulong)lmb->memory.region[i].size,
84 		       (ulong)lmb->memory.region[i].size);
85 	}
86 	printf("\n    memory.total	   = 0x%08lx (%ld MiB. %ld KiB)\n",
87 	       (ulong)memory_size,
88 	       SIZE_MB((ulong)memory_size),
89 	       SIZE_KB((ulong)memory_size));
90 
91 	/* Allocated */
92 	i = 0;
93 	printf("    --------------------------------------------------------------------\n");
94 	list_for_each(node, &sysmem->allocated_head) {
95 		mem = list_entry(node, struct memblock, node);
96 		allocated_size += mem->size;
97 		if (mem->attr.flags & F_OFC) {
98 			check = (struct memcheck *)
99 				(mem->base + mem->size - sizeof(*check));
100 			overflow = (check->magic != SYSMEM_MAGIC);
101 		} else if (mem->attr.flags & F_HOFC) {
102 			check = (struct memcheck *)
103 				(mem->base - sizeof(*check));
104 			overflow = (check->magic != SYSMEM_MAGIC);
105 		} else {
106 			overflow = false;
107 		}
108 
109 		printf("    allocated.rgn[%ld].name  = \"%s\" %s %s\n",
110 		       i, mem->attr.name, overflow ? "	   <Overflow!>" : "",
111 		       mem->orig_base != mem->base ? "<*>" : "");
112 		printf("		    .addr  = 0x%08lx - 0x%08lx (size: 0x%08lx)\n",
113 		       (ulong)mem->orig_base,
114 		       (ulong)(mem->orig_base + mem->size),
115 		       (ulong)mem->size);
116 		i++;
117 	}
118 
119 	/* Kernel 'reserved-memory' */
120 	i = 0;
121 	printf("\n");
122 	list_for_each(node, &sysmem->kmem_resv_head) {
123 		mem = list_entry(node, struct memblock, node);
124 		allocated_size += mem->size;
125 		printf("    kmem-resv.rgn[%ld].name  = \"%s\" %s\n",
126 		       i, mem->attr.name,
127 		       mem->orig_base != mem->base ? "<*>" : "");
128 		printf("		    .addr  = 0x%08lx - 0x%08lx (size: 0x%08lx)\n",
129 		       (ulong)mem->orig_base,
130 		       (ulong)(mem->orig_base + mem->size),
131 		       (ulong)mem->size);
132 		i++;
133 	}
134 
135 	printf("\n    framework malloc_r     = %3d MiB",
136 	       SIZE_MB(CONFIG_SYS_MALLOC_LEN));
137 	printf("\n    framework malloc_f     = %3d KiB\n",
138 	       SIZE_KB(CONFIG_SYS_MALLOC_F_LEN));
139 
140 	printf("\n    allocated.total	   = 0x%08lx (%ld MiB. %ld KiB)\n",
141 	       (ulong)allocated_size,
142 	       SIZE_MB((ulong)allocated_size),
143 	       SIZE_KB((ulong)allocated_size));
144 
145 	/* LMB core reserved */
146 	printf("    --------------------------------------------------------------------\n");
147 	reserved_size = 0;
148 	for (i = 0; i < lmb->reserved.cnt; i++) {
149 		reserved_size += lmb->reserved.region[i].size;
150 		printf("    LMB.allocated[%ld].addr  = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i,
151 		       (ulong)lmb->reserved.region[i].base,
152 		       (ulong)lmb->reserved.region[i].base +
153 		       (ulong)lmb->reserved.region[i].size,
154 		       (ulong)lmb->reserved.region[i].size);
155 	}
156 
157 	printf("\n    reserved.core.total	   = 0x%08lx (%ld MiB. %ld KiB)\n",
158 	       (ulong)reserved_size,
159 	       SIZE_MB((ulong)reserved_size),
160 	       SIZE_KB((ulong)reserved_size));
161 	printf("    --------------------------------------------------------------------\n\n");
162 }
163 
sysmem_overflow_check(void)164 void sysmem_overflow_check(void)
165 {
166 	struct sysmem *sysmem = &plat_sysmem;
167 	struct list_head *node, *knode;
168 	struct memcheck *check;
169 	struct memblock *kmem;
170 	struct memblock *smem;
171 	struct memblock *rmem;
172 	int overflow = 0, overlap = 0;
173 
174 	if (!sysmem_has_init())
175 		return;
176 
177 #ifdef CONFIG_BIDRAM
178 	/*
179 	 * Check kernel 'reserved-memory' overlap with invisible regions
180 	 *
181 	 * Here, only print warning message when overlap with invisible region
182 	 */
183 	list_for_each(knode, &sysmem->kmem_resv_head) {
184 		kmem = list_entry(knode, struct memblock, node);
185 		rmem = bidram_reserved_is_overlap(kmem->base, kmem->size);
186 		if (rmem) {
187 			const char *alias;
188 			int i, dump = 1;
189 
190 			/*
191 			 * Ignore the sub region of invisible region.
192 			 * eg: ramoops of SHM.
193 			 */
194 			alias = rmem->attr.alias[0];
195 			if (alias && sysmem_is_sub_region(kmem, rmem)) {
196 				for (i = 0; i < ALIAS_COUNT_MAX; i++, alias++) {
197 					alias = rmem->attr.alias[i];
198 					if (!alias)
199 						continue;
200 					if (!strncasecmp(kmem->attr.name, alias,
201 							 strlen(alias))) {
202 						dump = 0;
203 						break;
204 					}
205 				}
206 			}
207 
208 			if (dump)
209 				SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) "
210 					 "is overlap with [invisible] \"%s\" (0x%08lx - 0x%08lx)\n",
211 					 kmem->attr.name, (ulong)kmem->base,
212 					 (ulong)(kmem->base + kmem->size),
213 					 rmem->attr.name, (ulong)rmem->base,
214 					 (ulong)(rmem->base + rmem->size));
215 		}
216 	}
217 #endif
218 
219 	list_for_each(node, &sysmem->allocated_head) {
220 		smem = list_entry(node, struct memblock, node);
221 		/*
222 		 * Check kernel 'reserved-memory' overlap with sysmem allocated regions
223 		 */
224 		list_for_each(knode, &sysmem->kmem_resv_head) {
225 			kmem = list_entry(knode, struct memblock, node);
226 			if (sysmem_is_overlap(smem->base, smem->size,
227 					      kmem->base, kmem->size)) {
228 				if (smem->attr.flags & F_KMEM_CAN_OVERLAP)
229 					continue;
230 
231 				overlap = 1;
232 				SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) "
233 					 "is overlap with \"%s\" (0x%08lx - 0x%08lx)\n",
234 					 kmem->attr.name, (ulong)kmem->base,
235 					 (ulong)(kmem->base + kmem->size),
236 					 smem->attr.name, (ulong)smem->base,
237 					 (ulong)(smem->base + smem->size));
238 			}
239 		}
240 
241 		/*
242 		 * Check sysmem allocated regions overflow.
243 		 */
244 		if (smem->attr.flags & F_OFC) {
245 			check = (struct memcheck *)
246 				(smem->base + smem->size - sizeof(*check));
247 			overflow = (check->magic != SYSMEM_MAGIC);
248 		} else if (smem->attr.flags & F_HOFC) {
249 			check = (struct memcheck *)
250 				(smem->base - sizeof(*check));
251 			overflow = (check->magic != SYSMEM_MAGIC);
252 		} else {
253 			overflow = 0;
254 		}
255 
256 		if (overflow) {
257 			SYSMEM_E("Found there is region overflow!\n");
258 			break;
259 		}
260 	}
261 
262 	if (overflow || overlap)
263 		sysmem_dump();
264 }
265 
sysmem_add(phys_addr_t base,phys_size_t size)266 static int sysmem_add(phys_addr_t base, phys_size_t size)
267 {
268 	struct sysmem *sysmem = &plat_sysmem;
269 	int ret;
270 
271 	if (!size)
272 		return -EINVAL;
273 
274 	ret = lmb_add(&sysmem->lmb, base, size);
275 	if (ret < 0)
276 		SYSMEM_E("Failed to add sysmem at 0x%08lx for 0x%08lx size\n",
277 			 (ulong)base, (ulong)size);
278 
279 	return (ret >= 0) ? 0 : ret;
280 }
281 
sysmem_alias2name(const char * name,int * id)282 static const char *sysmem_alias2name(const char *name, int *id)
283 {
284 	const char *alias;
285 	int i, j;
286 	int match = 0;
287 
288 	for (i = 0; i < MEM_MAX; i++) {
289 		/* Pirmary name */
290 		if (mem_attr[i].name && !strcasecmp(mem_attr[i].name, name)) {
291 			match = 1;
292 			goto finish;
293 		}
294 
295 		/* Alias name */
296 		alias = mem_attr[i].alias[0];
297 		if (!alias)
298 			continue;
299 
300 		for (j = 0; j < ALIAS_COUNT_MAX; j++) {
301 			alias = mem_attr[i].alias[j];
302 			if (alias && !strcasecmp(alias, name)) {
303 				match = 1;
304 				goto finish;
305 			}
306 		}
307 	}
308 
309 finish:
310 	if (match) {
311 		*id = i;
312 		return mem_attr[i].name;
313 	}
314 
315 	return name;
316 }
317 
sysmem_alloc_align_base(enum memblk_id id,const char * mem_name,phys_addr_t base,phys_size_t size,ulong align)318 static void *sysmem_alloc_align_base(enum memblk_id id,
319 				     const char *mem_name,
320 				     phys_addr_t base,
321 				     phys_size_t size,
322 				     ulong align)
323 {
324 	struct sysmem *sysmem = &plat_sysmem;
325 	struct memblk_attr attr;
326 	struct memblock *mem;
327 	struct memcheck *check;
328 	struct list_head *node;
329 	const char *name;
330 	phys_addr_t paddr;
331 	phys_addr_t alloc_base;
332 	phys_size_t alloc_size;
333 	phys_addr_t orig_base = base;
334 
335 	if (!sysmem_has_init())
336 		goto out;
337 
338 	if (id == MEM_BY_NAME || id == MEM_KMEM_RESERVED) {
339 		if (!mem_name) {
340 			SYSMEM_E("NULL name for alloc sysmem\n");
341 			goto out;
342 		}
343 
344 		/* Find: name, id and attr by outer mem_name & id */
345 		name = sysmem_alias2name(mem_name, (int *)&id);
346 		attr = mem_attr[id];
347 		if (!attr.name)
348 			attr.name = strdup(name);
349 
350 		/* Always make kernel 'reserved-memory' alloc successfully */
351 		if (id == MEM_KMEM_RESERVED) {
352 			struct memblock *mem;
353 
354 			mem = malloc(sizeof(*mem));
355 			if (!mem) {
356 				SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name);
357 				return mem;
358 			}
359 
360 			attr.flags |= F_KMEM_RESERVED;
361 			mem->orig_base = orig_base;
362 			mem->base = base;
363 			mem->size = size;
364 			mem->attr = attr;
365 			sysmem->kmem_resv_cnt++;
366 			list_add_tail(&mem->node, &sysmem->kmem_resv_head);
367 
368 			return (void *)base;
369 		}
370 	} else if (id > MEM_UNK && id < MEM_MAX) {
371 		attr = mem_attr[id];
372 		name = attr.name;
373 
374 		/*
375 		 * Special handle for Android AVB alloc(on any where)
376 		 *
377 		 * Fixup base and place right after U-Boot stack, adding a lot
378 		 * of space(4KB) maybe safer.
379 		 */
380 		if (attr.flags & F_HIGHEST_MEM) {
381 			base = gd->start_addr_sp -
382 					CONFIG_SYS_STACK_SIZE - size - 0x1000;
383 
384 		/*
385 		 * The 0x0 address is usually allocated by 32-bit uncompressed
386 		 * kernel and this alloc action is just a peek.
387 		 *
388 		 * Due to LMB core doesn't support alloc at 0x0 address, we have
389 		 * to alloc the memblk backword a few bytes.
390 		 *
391 		 * ARCH_DMA_MINALIGN maybe a good choice.
392 		 */
393 		} else if (!base) {
394 			base += ARCH_DMA_MINALIGN;
395 		} else if (base < gd->bd->bi_dram[0].start) {
396 			/*
397 			 * On Rockchip platform:
398 			 *
399 			 * So far, we use F_IGNORE_INVISIBLE for uncompress
400 			 * kernel alloc, and for ARMv8 enabling AArch32 mode, the
401 			 * ATF is still AArch64 and ocuppies 0~1MB and shmem 1~2M.
402 			 * So let's ignore the region which overlap with them.
403 			 */
404 			if (attr.flags & F_IGNORE_INVISIBLE) {
405 				base = gd->bd->bi_dram[0].start;
406 			} else {
407 				SYSMEM_E("Failed to alloc invisible sub region 0x%08lx - 0x%08lx "
408 					 "of \"%s\" at 0x%08lx - 0x%08lx\n",
409 					 (ulong)base, (ulong)gd->bd->bi_dram[0].start,
410 					 name, (ulong)base, (ulong)(base + size));
411 				goto out;
412 			}
413 		}
414 	} else {
415 		SYSMEM_E("Unsupport memblk id %d for alloc sysmem\n", id);
416 		goto out;
417 	}
418 
419 	if (!size) {
420 		SYSMEM_E("\"%s\" size is 0 for alloc sysmem\n", name);
421 		goto out;
422 	}
423 
424 	/*
425 	 * Some modules use "sysmem_alloc()" to alloc region for storage
426 	 * read/write buffer, it should be aligned to cacheline size. eg: AVB.
427 	 *
428 	 * Aligned down to cacheline size if not aligned, otherwise the tail
429 	 * of region maybe overflow.
430 	 */
431 	if (attr.flags & F_CACHELINE_ALIGN &&
432 	    !IS_ALIGNED(base, ARCH_DMA_MINALIGN)) {
433 		base = ALIGN(base, ARCH_DMA_MINALIGN);
434 		base -= ARCH_DMA_MINALIGN;
435 	}
436 
437 	if (base != SYSMEM_ALLOC_ANYWHERE && !IS_ALIGNED(base, 4)) {
438 		SYSMEM_E("\"%s\" base=0x%08lx is not 4-byte aligned\n",
439 			 name, (ulong)base);
440 		goto out;
441 	}
442 
443 	/* Must be sizeof(long) byte aligned */
444 	size = ALIGN(size, sizeof(long));
445 
446 	SYSMEM_D("Enter alloc: \"%s\" 0x%08lx - 0x%08lx\n",
447 		 name, (ulong)base, (ulong)(base + size));
448 
449 	/* Already allocated ? */
450 	list_for_each(node, &sysmem->allocated_head) {
451 		mem = list_entry(node, struct memblock, node);
452 		SYSMEM_D("Has allcated: %s, 0x%08lx - 0x%08lx\n",
453 			 mem->attr.name, (ulong)mem->base,
454 			 (ulong)(mem->base + mem->size));
455 		if (!strcmp(mem->attr.name, name)) {
456 			/* Allow double alloc for same but smaller region */
457 			if (mem->base <= base && mem->size >= size)
458 				return (void *)base;
459 
460 			SYSMEM_E("Failed to double alloc for existence \"%s\"\n", name);
461 			goto out;
462 		} else if (sysmem_is_overlap(mem->base, mem->size, base, size)) {
463 			SYSMEM_E("\"%s\" (0x%08lx - 0x%08lx) alloc is "
464 				 "overlap with existence \"%s\" (0x%08lx - "
465 				 "0x%08lx)\n",
466 				 name, (ulong)base, (ulong)(base + size),
467 				 mem->attr.name, (ulong)mem->base,
468 				 (ulong)(mem->base + mem->size));
469 			goto out;
470 		}
471 	}
472 
473 	/* Add overflow check magic ? */
474 	if (attr.flags & F_OFC)
475 		alloc_size = size + sizeof(*check);
476 	else
477 		alloc_size = size;
478 
479 	/* Alloc anywhere ? */
480 	if (base == SYSMEM_ALLOC_ANYWHERE)
481 		alloc_base = LMB_ALLOC_ANYWHERE;
482 	else
483 		alloc_base = base + alloc_size;	/* LMB is align down alloc mechanism */
484 
485 	SYSMEM_D("DO alloc... base: 0x%08lx\n", (ulong)alloc_base);
486 
487 	paddr = lmb_alloc_base(&sysmem->lmb, alloc_size, align, alloc_base);
488 	if (paddr) {
489 		if ((paddr == base) || (base == SYSMEM_ALLOC_ANYWHERE)) {
490 			mem = malloc(sizeof(*mem));
491 			if (!mem) {
492 				SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name);
493 				goto out;
494 			}
495 			/* Record original base for dump */
496 			if (attr.flags & F_HIGHEST_MEM)
497 				mem->orig_base = base;
498 			else
499 				mem->orig_base = orig_base;
500 
501 			mem->base = paddr;
502 			mem->size = alloc_size;
503 			mem->attr = attr;
504 			sysmem->allocated_cnt++;
505 			list_add_tail(&mem->node, &sysmem->allocated_head);
506 
507 			/* Add overflow check magic */
508 			if (mem->attr.flags & F_OFC) {
509 				check = (struct memcheck *)(paddr + size);
510 				check->magic = SYSMEM_MAGIC;
511 			} else if (mem->attr.flags & F_HOFC) {
512 				check = (struct memcheck *)(paddr - sizeof(*check));
513 				check->magic = SYSMEM_MAGIC;
514 			}
515 		} else {
516 			SYSMEM_E("Failed to alloc \"%s\" expect at 0x%08lx - 0x%08lx "
517 				 "but at 0x%08lx - 0x%08lx\n",
518 				 name, (ulong)base, (ulong)(base + size),
519 				 (ulong)paddr, (ulong)(paddr + size));
520 			/* Free what we don't want allocated region */
521 			if (lmb_free(&sysmem->lmb, paddr, alloc_size) < 0)
522 				SYSMEM_E("Failed to free \"%s\"\n", name);
523 
524 			goto out;
525 		}
526 	} else {
527 		SYSMEM_E("Failed to alloc \"%s\" at 0x%08lx - 0x%08lx\n",
528 			 name, (ulong)base, (ulong)(base + size));
529 		goto out;
530 	}
531 
532 	SYSMEM_D("Exit alloc: \"%s\", paddr=0x%08lx, size=0x%08lx, align=0x%x, anywhere=%d\n",
533 		 name, (ulong)paddr, (ulong)size, (u32)align, !base);
534 
535 	return (void *)paddr;
536 
537 out:
538 	/*
539 	 * Why: base + sizeof(ulong) ?
540 	 * It's not a standard way to handle the case: the input base is 0.
541 	 * Because 0 equals NULL, but we don't want to return NULL when alloc
542 	 * successfully, so just return a !NULL value is okay.
543 	 *
544 	 * When it happens ?
545 	 * Maybe 32-bit platform would alloc region for uncompress kernel
546 	 * at 0 address.
547 	 */
548 	if (base == 0)
549 		base = base + sizeof(ulong);
550 
551 	return (attr.flags & (F_IGNORE_INVISIBLE | F_NO_FAIL_DUMP)) ?
552 			(void *)base : NULL;
553 }
554 
sysmem_alloc(enum memblk_id id,phys_size_t size)555 void *sysmem_alloc(enum memblk_id id, phys_size_t size)
556 {
557 	void *paddr;
558 
559 	paddr = sysmem_alloc_align_base(id,
560 					NULL,
561 					SYSMEM_ALLOC_ANYWHERE,
562 					size,
563 					ARCH_DMA_MINALIGN);
564 	if (!paddr)
565 		sysmem_dump();
566 
567 	return paddr;
568 }
569 
sysmem_alloc_by_name(const char * name,phys_size_t size)570 void *sysmem_alloc_by_name(const char *name, phys_size_t size)
571 {
572 	void *paddr;
573 
574 	paddr = sysmem_alloc_align_base(MEM_BY_NAME,
575 					name,
576 					SYSMEM_ALLOC_ANYWHERE,
577 					size,
578 					ARCH_DMA_MINALIGN);
579 	if (!paddr)
580 		sysmem_dump();
581 
582 	return paddr;
583 }
584 
sysmem_alloc_base(enum memblk_id id,phys_addr_t base,phys_size_t size)585 void *sysmem_alloc_base(enum memblk_id id, phys_addr_t base, phys_size_t size)
586 {
587 	void *paddr;
588 
589 	paddr = sysmem_alloc_align_base(id,
590 					NULL,
591 					base,
592 					size,
593 					SYSMEM_ALLOC_NO_ALIGN);
594 	if (!paddr)
595 		sysmem_dump();
596 
597 	return paddr;
598 }
599 
sysmem_alloc_base_by_name(const char * name,phys_addr_t base,phys_size_t size)600 void *sysmem_alloc_base_by_name(const char *name,
601 				phys_addr_t base, phys_size_t size)
602 {
603 	void *paddr;
604 
605 	paddr = sysmem_alloc_align_base(MEM_BY_NAME,
606 					name,
607 					base,
608 					size,
609 					SYSMEM_ALLOC_NO_ALIGN);
610 	if (!paddr)
611 		sysmem_dump();
612 
613 	return paddr;
614 }
615 
sysmem_fdt_reserve_alloc_base(const char * name,phys_addr_t base,phys_size_t size)616 void *sysmem_fdt_reserve_alloc_base(const char *name,
617 				    phys_addr_t base, phys_size_t size)
618 {
619 	void *paddr;
620 
621 	paddr = sysmem_alloc_align_base(MEM_KMEM_RESERVED,
622 					name,
623 					base,
624 					size,
625 					SYSMEM_ALLOC_NO_ALIGN);
626 	if (!paddr)
627 		sysmem_dump();
628 
629 	return paddr;
630 }
631 
sysmem_alloc_temporary_mem(phys_size_t size)632 ulong sysmem_alloc_temporary_mem(phys_size_t size)
633 {
634 	struct sysmem *sysmem = &plat_sysmem;
635 	phys_addr_t alloc_base;
636 	phys_addr_t paddr;
637 	phys_addr_t base;
638 	int ret;
639 
640 	if (!sysmem_has_init())
641 		return false;
642 
643 	base = (gd->start_addr_sp - CONFIG_SYS_STACK_SIZE - 0x2000) - size;
644 
645 	/* LMB is align down alloc mechanism */
646 	alloc_base = base + size;
647 	paddr = __lmb_alloc_base(&sysmem->lmb, size, SZ_1K, alloc_base);
648 	if (paddr) {
649 		/* If free failed, return false */
650 		ret = lmb_free(&sysmem->lmb, paddr, size);
651 		if (ret < 0) {
652 			SYSMEM_E("Can't free at 0x%08lx - 0x%08lx, ret=%d\n",
653 				 (ulong)paddr, (ulong)(paddr + size), ret);
654 			return 0;
655 		}
656 	}
657 
658 	return paddr;
659 }
660 
sysmem_free(phys_addr_t base)661 int sysmem_free(phys_addr_t base)
662 {
663 	struct sysmem *sysmem = &plat_sysmem;
664 	struct memblock *mem;
665 	struct list_head *node;
666 	int ret, found = 0;
667 
668 	if (!sysmem_has_init())
669 		return -ENOSYS;
670 
671 	/* Find existence */
672 	list_for_each(node, &sysmem->allocated_head) {
673 		mem = list_entry(node, struct memblock, node);
674 		if (mem->base == base || mem->orig_base == base) {
675 			found = 1;
676 			break;
677 		}
678 	}
679 
680 	if (!found) {
681 		SYSMEM_E("Failed to free no allocated sysmem at 0x%08lx\n",
682 			 (ulong)base);
683 		return -EINVAL;
684 	}
685 
686 	ret = lmb_free(&sysmem->lmb, mem->base, mem->size);
687 	if (ret >= 0) {
688 		SYSMEM_D("Free: \"%s\" 0x%08lx - 0x%08lx\n",
689 			 mem->attr.name, (ulong)mem->base,
690 			 (ulong)(mem->base + mem->size));
691 		sysmem->allocated_cnt--;
692 		list_del(&mem->node);
693 		free(mem);
694 	} else {
695 		SYSMEM_E("Failed to free \"%s\" at 0x%08lx\n",
696 			 mem->attr.name, (ulong)base);
697 	}
698 
699 	return (ret >= 0) ? 0 : ret;
700 }
701 
sysmem_initr(void)702 int sysmem_initr(void)
703 {
704 	return sysmem_init();
705 }
706 
sysmem_init(void)707 int sysmem_init(void)
708 {
709 	struct sysmem *sysmem = &plat_sysmem;
710 	phys_addr_t mem_start;
711 	phys_size_t mem_size;
712 	int ret;
713 
714 	lmb_init(&sysmem->lmb);
715 	INIT_LIST_HEAD(&sysmem->allocated_head);
716 	INIT_LIST_HEAD(&sysmem->kmem_resv_head);
717 	sysmem->allocated_cnt = 0;
718 	sysmem->kmem_resv_cnt = 0;
719 
720 	if (gd->flags & GD_FLG_RELOC) {
721 		sysmem->has_initr = true;
722 	} else {
723 		SYSMEM_I("init\n");
724 		sysmem->has_initf = true;
725 	}
726 
727 	/* Add all available system memory */
728 #ifdef CONFIG_NR_DRAM_BANKS
729 	int i;
730 
731 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
732 		if (!gd->bd->bi_dram[i].size)
733 			continue;
734 
735 		ret = sysmem_add(gd->bd->bi_dram[i].start,
736 				 gd->bd->bi_dram[i].size);
737 		if (ret) {
738 			SYSMEM_E("Failed to add sysmem from bi_dram[%d]\n", i);
739 			goto fail;
740 		}
741 	}
742 #else
743 	mem_start = env_get_bootm_low();
744 	mem_size = env_get_bootm_size();
745 	ret = sysmem_add(mem_start, mem_size);
746 	if (ret) {
747 		SYSMEM_E("Failed to add sysmem from bootm_low/size\n");
748 		goto fail;
749 	}
750 #endif
751 	/* Reserved for board */
752 	ret = board_sysmem_reserve(sysmem);
753 	if (ret) {
754 		SYSMEM_E("Failed to reserve sysmem for board\n");
755 		goto fail;
756 	}
757 
758 	/* Reserved for U-boot framework: 'reserve_xxx()' */
759 	mem_start = gd->start_addr_sp;
760 	mem_size = gd->ram_top - mem_start;
761 	if (!sysmem_alloc_base(MEM_UBOOT, mem_start, mem_size)) {
762 		SYSMEM_E("Failed to reserve sysmem for U-Boot framework\n");
763 		ret = -ENOMEM;
764 		goto fail;
765 	}
766 
767 	/* Reserved for U-Boot stack */
768 	mem_start = gd->start_addr_sp - CONFIG_SYS_STACK_SIZE;
769 	mem_size = CONFIG_SYS_STACK_SIZE;
770 	if (!sysmem_alloc_base(MEM_STACK, mem_start, mem_size)) {
771 		SYSMEM_E("Failed to reserve sysmem for stack\n");
772 		ret = -ENOMEM;
773 		goto fail;
774 	}
775 
776 	return 0;
777 
778 fail:
779 	if (ret && !(gd->flags & GD_FLG_RELOC)) {
780 		sysmem_dump();
781 		SYSMEM_W("Maybe malloc size %d MiB is too large?\n\n",
782 			 SIZE_MB(CONFIG_SYS_MALLOC_LEN));
783 	}
784 
785 	return ret;
786 }
787 
board_sysmem_reserve(struct sysmem * sysmem)788 __weak int board_sysmem_reserve(struct sysmem *sysmem)
789 {
790 	/* please define platform specific board_sysmem_reserve() */
791 	return 0;
792 }
793 
do_sysmem_dump(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])794 static int do_sysmem_dump(cmd_tbl_t *cmdtp, int flag,
795 			  int argc, char *const argv[])
796 {
797 	sysmem_dump();
798 	return 0;
799 }
800 
do_sysmem_search(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])801 static int do_sysmem_search(cmd_tbl_t *cmdtp, int flag,
802 			    int argc, char *const argv[])
803 {
804 	ulong addr, size;
805 
806 	if (argc != 2)
807 		return CMD_RET_USAGE;
808 
809 	size = simple_strtoul(argv[1], NULL, 16);
810 	if (!size)
811 		return CMD_RET_USAGE;
812 
813 	addr = sysmem_alloc_temporary_mem(size);
814 	if (!addr) {
815 		SYSMEM_I("No available region with size 0x%08lx\n", size);
816 	} else {
817 		SYSMEM_I("Available region at address: 0x%08lx\n",addr);
818 	}
819 	env_set_hex("smem_addr", addr);
820 
821 	return 0;
822 }
823 
824 U_BOOT_CMD(
825 	sysmem_dump, 1, 1, do_sysmem_dump,
826 	"Dump sysmem layout",
827 	""
828 );
829 
830 U_BOOT_CMD(
831 	sysmem_search, 2, 1, do_sysmem_search,
832 	"Search a available sysmem region",
833 	"<size in hex>"
834 );
835