xref: /OK3568_Linux_fs/u-boot/lib/sysmem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <common.h>
7*4882a593Smuzhiyun #include <bidram.h>
8*4882a593Smuzhiyun #include <sysmem.h>
9*4882a593Smuzhiyun #include <lmb.h>
10*4882a593Smuzhiyun #include <malloc.h>
11*4882a593Smuzhiyun #include <asm/io.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define SYSMEM_MAGIC		0x4D454D53	/* "SMEM" */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define LMB_ALLOC_ANYWHERE	0		/* sync with lmb.c */
18*4882a593Smuzhiyun #define SYSMEM_ALLOC_NO_ALIGN	1
19*4882a593Smuzhiyun #define SYSMEM_ALLOC_ANYWHERE	2
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define SYSMEM_I(fmt, args...)	printf("Sysmem: "fmt, ##args)
22*4882a593Smuzhiyun #define SYSMEM_W(fmt, args...)	printf("Sysmem Warn: "fmt, ##args)
23*4882a593Smuzhiyun #define SYSMEM_E(fmt, args...)	printf("Sysmem Error: "fmt, ##args)
24*4882a593Smuzhiyun #define SYSMEM_D(fmt, args...)	 debug("Sysmem Debug: "fmt, ##args)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun struct memcheck {
27*4882a593Smuzhiyun 	uint32_t magic;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* Global for platform, must in data section */
31*4882a593Smuzhiyun struct sysmem plat_sysmem __section(".data") = {
32*4882a593Smuzhiyun 	.has_initf = false,
33*4882a593Smuzhiyun 	.has_initr = false,
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
sysmem_has_init(void)36*4882a593Smuzhiyun bool sysmem_has_init(void)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	return gd->flags & GD_FLG_RELOC ?
39*4882a593Smuzhiyun 	       plat_sysmem.has_initr : plat_sysmem.has_initf;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
sysmem_is_overlap(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)42*4882a593Smuzhiyun static inline int sysmem_is_overlap(phys_addr_t base1, phys_size_t size1,
43*4882a593Smuzhiyun 				    phys_addr_t base2, phys_size_t size2)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
sysmem_is_sub_region(struct memblock * sub,struct memblock * main)48*4882a593Smuzhiyun static inline int sysmem_is_sub_region(struct memblock *sub,
49*4882a593Smuzhiyun 				       struct memblock *main)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	if (!sub || !main)
52*4882a593Smuzhiyun 		return false;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	return ((sub->base >= main->base) &&
55*4882a593Smuzhiyun 		(sub->base + sub->size <= main->base + main->size));
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
sysmem_dump(void)58*4882a593Smuzhiyun void sysmem_dump(void)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
61*4882a593Smuzhiyun 	struct lmb *lmb = &sysmem->lmb;
62*4882a593Smuzhiyun 	struct memblock *mem;
63*4882a593Smuzhiyun 	struct memcheck *check;
64*4882a593Smuzhiyun 	struct list_head *node;
65*4882a593Smuzhiyun 	ulong memory_size = 0;
66*4882a593Smuzhiyun 	ulong reserved_size = 0;
67*4882a593Smuzhiyun 	ulong allocated_size = 0;
68*4882a593Smuzhiyun 	bool overflow = false;
69*4882a593Smuzhiyun 	ulong i;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (!sysmem_has_init())
72*4882a593Smuzhiyun 		return;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	printf("\nsysmem_dump_all:\n");
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* Memory pool */
77*4882a593Smuzhiyun 	printf("    --------------------------------------------------------------------\n");
78*4882a593Smuzhiyun 	for (i = 0; i < lmb->memory.cnt; i++) {
79*4882a593Smuzhiyun 		memory_size += lmb->memory.region[i].size;
80*4882a593Smuzhiyun 		printf("    memory.rgn[%ld].addr     = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i,
81*4882a593Smuzhiyun 		       (ulong)lmb->memory.region[i].base,
82*4882a593Smuzhiyun 		       (ulong)lmb->memory.region[i].base +
83*4882a593Smuzhiyun 		       (ulong)lmb->memory.region[i].size,
84*4882a593Smuzhiyun 		       (ulong)lmb->memory.region[i].size);
85*4882a593Smuzhiyun 	}
86*4882a593Smuzhiyun 	printf("\n    memory.total	   = 0x%08lx (%ld MiB. %ld KiB)\n",
87*4882a593Smuzhiyun 	       (ulong)memory_size,
88*4882a593Smuzhiyun 	       SIZE_MB((ulong)memory_size),
89*4882a593Smuzhiyun 	       SIZE_KB((ulong)memory_size));
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/* Allocated */
92*4882a593Smuzhiyun 	i = 0;
93*4882a593Smuzhiyun 	printf("    --------------------------------------------------------------------\n");
94*4882a593Smuzhiyun 	list_for_each(node, &sysmem->allocated_head) {
95*4882a593Smuzhiyun 		mem = list_entry(node, struct memblock, node);
96*4882a593Smuzhiyun 		allocated_size += mem->size;
97*4882a593Smuzhiyun 		if (mem->attr.flags & F_OFC) {
98*4882a593Smuzhiyun 			check = (struct memcheck *)
99*4882a593Smuzhiyun 				(mem->base + mem->size - sizeof(*check));
100*4882a593Smuzhiyun 			overflow = (check->magic != SYSMEM_MAGIC);
101*4882a593Smuzhiyun 		} else if (mem->attr.flags & F_HOFC) {
102*4882a593Smuzhiyun 			check = (struct memcheck *)
103*4882a593Smuzhiyun 				(mem->base - sizeof(*check));
104*4882a593Smuzhiyun 			overflow = (check->magic != SYSMEM_MAGIC);
105*4882a593Smuzhiyun 		} else {
106*4882a593Smuzhiyun 			overflow = false;
107*4882a593Smuzhiyun 		}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 		printf("    allocated.rgn[%ld].name  = \"%s\" %s %s\n",
110*4882a593Smuzhiyun 		       i, mem->attr.name, overflow ? "	   <Overflow!>" : "",
111*4882a593Smuzhiyun 		       mem->orig_base != mem->base ? "<*>" : "");
112*4882a593Smuzhiyun 		printf("		    .addr  = 0x%08lx - 0x%08lx (size: 0x%08lx)\n",
113*4882a593Smuzhiyun 		       (ulong)mem->orig_base,
114*4882a593Smuzhiyun 		       (ulong)(mem->orig_base + mem->size),
115*4882a593Smuzhiyun 		       (ulong)mem->size);
116*4882a593Smuzhiyun 		i++;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* Kernel 'reserved-memory' */
120*4882a593Smuzhiyun 	i = 0;
121*4882a593Smuzhiyun 	printf("\n");
122*4882a593Smuzhiyun 	list_for_each(node, &sysmem->kmem_resv_head) {
123*4882a593Smuzhiyun 		mem = list_entry(node, struct memblock, node);
124*4882a593Smuzhiyun 		allocated_size += mem->size;
125*4882a593Smuzhiyun 		printf("    kmem-resv.rgn[%ld].name  = \"%s\" %s\n",
126*4882a593Smuzhiyun 		       i, mem->attr.name,
127*4882a593Smuzhiyun 		       mem->orig_base != mem->base ? "<*>" : "");
128*4882a593Smuzhiyun 		printf("		    .addr  = 0x%08lx - 0x%08lx (size: 0x%08lx)\n",
129*4882a593Smuzhiyun 		       (ulong)mem->orig_base,
130*4882a593Smuzhiyun 		       (ulong)(mem->orig_base + mem->size),
131*4882a593Smuzhiyun 		       (ulong)mem->size);
132*4882a593Smuzhiyun 		i++;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	printf("\n    framework malloc_r     = %3d MiB",
136*4882a593Smuzhiyun 	       SIZE_MB(CONFIG_SYS_MALLOC_LEN));
137*4882a593Smuzhiyun 	printf("\n    framework malloc_f     = %3d KiB\n",
138*4882a593Smuzhiyun 	       SIZE_KB(CONFIG_SYS_MALLOC_F_LEN));
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	printf("\n    allocated.total	   = 0x%08lx (%ld MiB. %ld KiB)\n",
141*4882a593Smuzhiyun 	       (ulong)allocated_size,
142*4882a593Smuzhiyun 	       SIZE_MB((ulong)allocated_size),
143*4882a593Smuzhiyun 	       SIZE_KB((ulong)allocated_size));
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* LMB core reserved */
146*4882a593Smuzhiyun 	printf("    --------------------------------------------------------------------\n");
147*4882a593Smuzhiyun 	reserved_size = 0;
148*4882a593Smuzhiyun 	for (i = 0; i < lmb->reserved.cnt; i++) {
149*4882a593Smuzhiyun 		reserved_size += lmb->reserved.region[i].size;
150*4882a593Smuzhiyun 		printf("    LMB.allocated[%ld].addr  = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i,
151*4882a593Smuzhiyun 		       (ulong)lmb->reserved.region[i].base,
152*4882a593Smuzhiyun 		       (ulong)lmb->reserved.region[i].base +
153*4882a593Smuzhiyun 		       (ulong)lmb->reserved.region[i].size,
154*4882a593Smuzhiyun 		       (ulong)lmb->reserved.region[i].size);
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	printf("\n    reserved.core.total	   = 0x%08lx (%ld MiB. %ld KiB)\n",
158*4882a593Smuzhiyun 	       (ulong)reserved_size,
159*4882a593Smuzhiyun 	       SIZE_MB((ulong)reserved_size),
160*4882a593Smuzhiyun 	       SIZE_KB((ulong)reserved_size));
161*4882a593Smuzhiyun 	printf("    --------------------------------------------------------------------\n\n");
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
sysmem_overflow_check(void)164*4882a593Smuzhiyun void sysmem_overflow_check(void)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
167*4882a593Smuzhiyun 	struct list_head *node, *knode;
168*4882a593Smuzhiyun 	struct memcheck *check;
169*4882a593Smuzhiyun 	struct memblock *kmem;
170*4882a593Smuzhiyun 	struct memblock *smem;
171*4882a593Smuzhiyun 	struct memblock *rmem;
172*4882a593Smuzhiyun 	int overflow = 0, overlap = 0;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (!sysmem_has_init())
175*4882a593Smuzhiyun 		return;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #ifdef CONFIG_BIDRAM
178*4882a593Smuzhiyun 	/*
179*4882a593Smuzhiyun 	 * Check kernel 'reserved-memory' overlap with invisible regions
180*4882a593Smuzhiyun 	 *
181*4882a593Smuzhiyun 	 * Here, only print warning message when overlap with invisible region
182*4882a593Smuzhiyun 	 */
183*4882a593Smuzhiyun 	list_for_each(knode, &sysmem->kmem_resv_head) {
184*4882a593Smuzhiyun 		kmem = list_entry(knode, struct memblock, node);
185*4882a593Smuzhiyun 		rmem = bidram_reserved_is_overlap(kmem->base, kmem->size);
186*4882a593Smuzhiyun 		if (rmem) {
187*4882a593Smuzhiyun 			const char *alias;
188*4882a593Smuzhiyun 			int i, dump = 1;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 			/*
191*4882a593Smuzhiyun 			 * Ignore the sub region of invisible region.
192*4882a593Smuzhiyun 			 * eg: ramoops of SHM.
193*4882a593Smuzhiyun 			 */
194*4882a593Smuzhiyun 			alias = rmem->attr.alias[0];
195*4882a593Smuzhiyun 			if (alias && sysmem_is_sub_region(kmem, rmem)) {
196*4882a593Smuzhiyun 				for (i = 0; i < ALIAS_COUNT_MAX; i++, alias++) {
197*4882a593Smuzhiyun 					alias = rmem->attr.alias[i];
198*4882a593Smuzhiyun 					if (!alias)
199*4882a593Smuzhiyun 						continue;
200*4882a593Smuzhiyun 					if (!strncasecmp(kmem->attr.name, alias,
201*4882a593Smuzhiyun 							 strlen(alias))) {
202*4882a593Smuzhiyun 						dump = 0;
203*4882a593Smuzhiyun 						break;
204*4882a593Smuzhiyun 					}
205*4882a593Smuzhiyun 				}
206*4882a593Smuzhiyun 			}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 			if (dump)
209*4882a593Smuzhiyun 				SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) "
210*4882a593Smuzhiyun 					 "is overlap with [invisible] \"%s\" (0x%08lx - 0x%08lx)\n",
211*4882a593Smuzhiyun 					 kmem->attr.name, (ulong)kmem->base,
212*4882a593Smuzhiyun 					 (ulong)(kmem->base + kmem->size),
213*4882a593Smuzhiyun 					 rmem->attr.name, (ulong)rmem->base,
214*4882a593Smuzhiyun 					 (ulong)(rmem->base + rmem->size));
215*4882a593Smuzhiyun 		}
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	list_for_each(node, &sysmem->allocated_head) {
220*4882a593Smuzhiyun 		smem = list_entry(node, struct memblock, node);
221*4882a593Smuzhiyun 		/*
222*4882a593Smuzhiyun 		 * Check kernel 'reserved-memory' overlap with sysmem allocated regions
223*4882a593Smuzhiyun 		 */
224*4882a593Smuzhiyun 		list_for_each(knode, &sysmem->kmem_resv_head) {
225*4882a593Smuzhiyun 			kmem = list_entry(knode, struct memblock, node);
226*4882a593Smuzhiyun 			if (sysmem_is_overlap(smem->base, smem->size,
227*4882a593Smuzhiyun 					      kmem->base, kmem->size)) {
228*4882a593Smuzhiyun 				if (smem->attr.flags & F_KMEM_CAN_OVERLAP)
229*4882a593Smuzhiyun 					continue;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 				overlap = 1;
232*4882a593Smuzhiyun 				SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) "
233*4882a593Smuzhiyun 					 "is overlap with \"%s\" (0x%08lx - 0x%08lx)\n",
234*4882a593Smuzhiyun 					 kmem->attr.name, (ulong)kmem->base,
235*4882a593Smuzhiyun 					 (ulong)(kmem->base + kmem->size),
236*4882a593Smuzhiyun 					 smem->attr.name, (ulong)smem->base,
237*4882a593Smuzhiyun 					 (ulong)(smem->base + smem->size));
238*4882a593Smuzhiyun 			}
239*4882a593Smuzhiyun 		}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		/*
242*4882a593Smuzhiyun 		 * Check sysmem allocated regions overflow.
243*4882a593Smuzhiyun 		 */
244*4882a593Smuzhiyun 		if (smem->attr.flags & F_OFC) {
245*4882a593Smuzhiyun 			check = (struct memcheck *)
246*4882a593Smuzhiyun 				(smem->base + smem->size - sizeof(*check));
247*4882a593Smuzhiyun 			overflow = (check->magic != SYSMEM_MAGIC);
248*4882a593Smuzhiyun 		} else if (smem->attr.flags & F_HOFC) {
249*4882a593Smuzhiyun 			check = (struct memcheck *)
250*4882a593Smuzhiyun 				(smem->base - sizeof(*check));
251*4882a593Smuzhiyun 			overflow = (check->magic != SYSMEM_MAGIC);
252*4882a593Smuzhiyun 		} else {
253*4882a593Smuzhiyun 			overflow = 0;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		if (overflow) {
257*4882a593Smuzhiyun 			SYSMEM_E("Found there is region overflow!\n");
258*4882a593Smuzhiyun 			break;
259*4882a593Smuzhiyun 		}
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (overflow || overlap)
263*4882a593Smuzhiyun 		sysmem_dump();
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
sysmem_add(phys_addr_t base,phys_size_t size)266*4882a593Smuzhiyun static int sysmem_add(phys_addr_t base, phys_size_t size)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
269*4882a593Smuzhiyun 	int ret;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	if (!size)
272*4882a593Smuzhiyun 		return -EINVAL;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	ret = lmb_add(&sysmem->lmb, base, size);
275*4882a593Smuzhiyun 	if (ret < 0)
276*4882a593Smuzhiyun 		SYSMEM_E("Failed to add sysmem at 0x%08lx for 0x%08lx size\n",
277*4882a593Smuzhiyun 			 (ulong)base, (ulong)size);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return (ret >= 0) ? 0 : ret;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
sysmem_alias2name(const char * name,int * id)282*4882a593Smuzhiyun static const char *sysmem_alias2name(const char *name, int *id)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	const char *alias;
285*4882a593Smuzhiyun 	int i, j;
286*4882a593Smuzhiyun 	int match = 0;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	for (i = 0; i < MEM_MAX; i++) {
289*4882a593Smuzhiyun 		/* Pirmary name */
290*4882a593Smuzhiyun 		if (mem_attr[i].name && !strcasecmp(mem_attr[i].name, name)) {
291*4882a593Smuzhiyun 			match = 1;
292*4882a593Smuzhiyun 			goto finish;
293*4882a593Smuzhiyun 		}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		/* Alias name */
296*4882a593Smuzhiyun 		alias = mem_attr[i].alias[0];
297*4882a593Smuzhiyun 		if (!alias)
298*4882a593Smuzhiyun 			continue;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		for (j = 0; j < ALIAS_COUNT_MAX; j++) {
301*4882a593Smuzhiyun 			alias = mem_attr[i].alias[j];
302*4882a593Smuzhiyun 			if (alias && !strcasecmp(alias, name)) {
303*4882a593Smuzhiyun 				match = 1;
304*4882a593Smuzhiyun 				goto finish;
305*4882a593Smuzhiyun 			}
306*4882a593Smuzhiyun 		}
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun finish:
310*4882a593Smuzhiyun 	if (match) {
311*4882a593Smuzhiyun 		*id = i;
312*4882a593Smuzhiyun 		return mem_attr[i].name;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	return name;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
sysmem_alloc_align_base(enum memblk_id id,const char * mem_name,phys_addr_t base,phys_size_t size,ulong align)318*4882a593Smuzhiyun static void *sysmem_alloc_align_base(enum memblk_id id,
319*4882a593Smuzhiyun 				     const char *mem_name,
320*4882a593Smuzhiyun 				     phys_addr_t base,
321*4882a593Smuzhiyun 				     phys_size_t size,
322*4882a593Smuzhiyun 				     ulong align)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
325*4882a593Smuzhiyun 	struct memblk_attr attr;
326*4882a593Smuzhiyun 	struct memblock *mem;
327*4882a593Smuzhiyun 	struct memcheck *check;
328*4882a593Smuzhiyun 	struct list_head *node;
329*4882a593Smuzhiyun 	const char *name;
330*4882a593Smuzhiyun 	phys_addr_t paddr;
331*4882a593Smuzhiyun 	phys_addr_t alloc_base;
332*4882a593Smuzhiyun 	phys_size_t alloc_size;
333*4882a593Smuzhiyun 	phys_addr_t orig_base = base;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (!sysmem_has_init())
336*4882a593Smuzhiyun 		goto out;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (id == MEM_BY_NAME || id == MEM_KMEM_RESERVED) {
339*4882a593Smuzhiyun 		if (!mem_name) {
340*4882a593Smuzhiyun 			SYSMEM_E("NULL name for alloc sysmem\n");
341*4882a593Smuzhiyun 			goto out;
342*4882a593Smuzhiyun 		}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		/* Find: name, id and attr by outer mem_name & id */
345*4882a593Smuzhiyun 		name = sysmem_alias2name(mem_name, (int *)&id);
346*4882a593Smuzhiyun 		attr = mem_attr[id];
347*4882a593Smuzhiyun 		if (!attr.name)
348*4882a593Smuzhiyun 			attr.name = strdup(name);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		/* Always make kernel 'reserved-memory' alloc successfully */
351*4882a593Smuzhiyun 		if (id == MEM_KMEM_RESERVED) {
352*4882a593Smuzhiyun 			struct memblock *mem;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 			mem = malloc(sizeof(*mem));
355*4882a593Smuzhiyun 			if (!mem) {
356*4882a593Smuzhiyun 				SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name);
357*4882a593Smuzhiyun 				return mem;
358*4882a593Smuzhiyun 			}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 			attr.flags |= F_KMEM_RESERVED;
361*4882a593Smuzhiyun 			mem->orig_base = orig_base;
362*4882a593Smuzhiyun 			mem->base = base;
363*4882a593Smuzhiyun 			mem->size = size;
364*4882a593Smuzhiyun 			mem->attr = attr;
365*4882a593Smuzhiyun 			sysmem->kmem_resv_cnt++;
366*4882a593Smuzhiyun 			list_add_tail(&mem->node, &sysmem->kmem_resv_head);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 			return (void *)base;
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 	} else if (id > MEM_UNK && id < MEM_MAX) {
371*4882a593Smuzhiyun 		attr = mem_attr[id];
372*4882a593Smuzhiyun 		name = attr.name;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		/*
375*4882a593Smuzhiyun 		 * Special handle for Android AVB alloc(on any where)
376*4882a593Smuzhiyun 		 *
377*4882a593Smuzhiyun 		 * Fixup base and place right after U-Boot stack, adding a lot
378*4882a593Smuzhiyun 		 * of space(4KB) maybe safer.
379*4882a593Smuzhiyun 		 */
380*4882a593Smuzhiyun 		if (attr.flags & F_HIGHEST_MEM) {
381*4882a593Smuzhiyun 			base = gd->start_addr_sp -
382*4882a593Smuzhiyun 					CONFIG_SYS_STACK_SIZE - size - 0x1000;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		/*
385*4882a593Smuzhiyun 		 * The 0x0 address is usually allocated by 32-bit uncompressed
386*4882a593Smuzhiyun 		 * kernel and this alloc action is just a peek.
387*4882a593Smuzhiyun 		 *
388*4882a593Smuzhiyun 		 * Due to LMB core doesn't support alloc at 0x0 address, we have
389*4882a593Smuzhiyun 		 * to alloc the memblk backword a few bytes.
390*4882a593Smuzhiyun 		 *
391*4882a593Smuzhiyun 		 * ARCH_DMA_MINALIGN maybe a good choice.
392*4882a593Smuzhiyun 		 */
393*4882a593Smuzhiyun 		} else if (!base) {
394*4882a593Smuzhiyun 			base += ARCH_DMA_MINALIGN;
395*4882a593Smuzhiyun 		} else if (base < gd->bd->bi_dram[0].start) {
396*4882a593Smuzhiyun 			/*
397*4882a593Smuzhiyun 			 * On Rockchip platform:
398*4882a593Smuzhiyun 			 *
399*4882a593Smuzhiyun 			 * So far, we use F_IGNORE_INVISIBLE for uncompress
400*4882a593Smuzhiyun 			 * kernel alloc, and for ARMv8 enabling AArch32 mode, the
401*4882a593Smuzhiyun 			 * ATF is still AArch64 and ocuppies 0~1MB and shmem 1~2M.
402*4882a593Smuzhiyun 			 * So let's ignore the region which overlap with them.
403*4882a593Smuzhiyun 			 */
404*4882a593Smuzhiyun 			if (attr.flags & F_IGNORE_INVISIBLE) {
405*4882a593Smuzhiyun 				base = gd->bd->bi_dram[0].start;
406*4882a593Smuzhiyun 			} else {
407*4882a593Smuzhiyun 				SYSMEM_E("Failed to alloc invisible sub region 0x%08lx - 0x%08lx "
408*4882a593Smuzhiyun 					 "of \"%s\" at 0x%08lx - 0x%08lx\n",
409*4882a593Smuzhiyun 					 (ulong)base, (ulong)gd->bd->bi_dram[0].start,
410*4882a593Smuzhiyun 					 name, (ulong)base, (ulong)(base + size));
411*4882a593Smuzhiyun 				goto out;
412*4882a593Smuzhiyun 			}
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 	} else {
415*4882a593Smuzhiyun 		SYSMEM_E("Unsupport memblk id %d for alloc sysmem\n", id);
416*4882a593Smuzhiyun 		goto out;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (!size) {
420*4882a593Smuzhiyun 		SYSMEM_E("\"%s\" size is 0 for alloc sysmem\n", name);
421*4882a593Smuzhiyun 		goto out;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/*
425*4882a593Smuzhiyun 	 * Some modules use "sysmem_alloc()" to alloc region for storage
426*4882a593Smuzhiyun 	 * read/write buffer, it should be aligned to cacheline size. eg: AVB.
427*4882a593Smuzhiyun 	 *
428*4882a593Smuzhiyun 	 * Aligned down to cacheline size if not aligned, otherwise the tail
429*4882a593Smuzhiyun 	 * of region maybe overflow.
430*4882a593Smuzhiyun 	 */
431*4882a593Smuzhiyun 	if (attr.flags & F_CACHELINE_ALIGN &&
432*4882a593Smuzhiyun 	    !IS_ALIGNED(base, ARCH_DMA_MINALIGN)) {
433*4882a593Smuzhiyun 		base = ALIGN(base, ARCH_DMA_MINALIGN);
434*4882a593Smuzhiyun 		base -= ARCH_DMA_MINALIGN;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (base != SYSMEM_ALLOC_ANYWHERE && !IS_ALIGNED(base, 4)) {
438*4882a593Smuzhiyun 		SYSMEM_E("\"%s\" base=0x%08lx is not 4-byte aligned\n",
439*4882a593Smuzhiyun 			 name, (ulong)base);
440*4882a593Smuzhiyun 		goto out;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	/* Must be sizeof(long) byte aligned */
444*4882a593Smuzhiyun 	size = ALIGN(size, sizeof(long));
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	SYSMEM_D("Enter alloc: \"%s\" 0x%08lx - 0x%08lx\n",
447*4882a593Smuzhiyun 		 name, (ulong)base, (ulong)(base + size));
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	/* Already allocated ? */
450*4882a593Smuzhiyun 	list_for_each(node, &sysmem->allocated_head) {
451*4882a593Smuzhiyun 		mem = list_entry(node, struct memblock, node);
452*4882a593Smuzhiyun 		SYSMEM_D("Has allcated: %s, 0x%08lx - 0x%08lx\n",
453*4882a593Smuzhiyun 			 mem->attr.name, (ulong)mem->base,
454*4882a593Smuzhiyun 			 (ulong)(mem->base + mem->size));
455*4882a593Smuzhiyun 		if (!strcmp(mem->attr.name, name)) {
456*4882a593Smuzhiyun 			/* Allow double alloc for same but smaller region */
457*4882a593Smuzhiyun 			if (mem->base <= base && mem->size >= size)
458*4882a593Smuzhiyun 				return (void *)base;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 			SYSMEM_E("Failed to double alloc for existence \"%s\"\n", name);
461*4882a593Smuzhiyun 			goto out;
462*4882a593Smuzhiyun 		} else if (sysmem_is_overlap(mem->base, mem->size, base, size)) {
463*4882a593Smuzhiyun 			SYSMEM_E("\"%s\" (0x%08lx - 0x%08lx) alloc is "
464*4882a593Smuzhiyun 				 "overlap with existence \"%s\" (0x%08lx - "
465*4882a593Smuzhiyun 				 "0x%08lx)\n",
466*4882a593Smuzhiyun 				 name, (ulong)base, (ulong)(base + size),
467*4882a593Smuzhiyun 				 mem->attr.name, (ulong)mem->base,
468*4882a593Smuzhiyun 				 (ulong)(mem->base + mem->size));
469*4882a593Smuzhiyun 			goto out;
470*4882a593Smuzhiyun 		}
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* Add overflow check magic ? */
474*4882a593Smuzhiyun 	if (attr.flags & F_OFC)
475*4882a593Smuzhiyun 		alloc_size = size + sizeof(*check);
476*4882a593Smuzhiyun 	else
477*4882a593Smuzhiyun 		alloc_size = size;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* Alloc anywhere ? */
480*4882a593Smuzhiyun 	if (base == SYSMEM_ALLOC_ANYWHERE)
481*4882a593Smuzhiyun 		alloc_base = LMB_ALLOC_ANYWHERE;
482*4882a593Smuzhiyun 	else
483*4882a593Smuzhiyun 		alloc_base = base + alloc_size;	/* LMB is align down alloc mechanism */
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	SYSMEM_D("DO alloc... base: 0x%08lx\n", (ulong)alloc_base);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	paddr = lmb_alloc_base(&sysmem->lmb, alloc_size, align, alloc_base);
488*4882a593Smuzhiyun 	if (paddr) {
489*4882a593Smuzhiyun 		if ((paddr == base) || (base == SYSMEM_ALLOC_ANYWHERE)) {
490*4882a593Smuzhiyun 			mem = malloc(sizeof(*mem));
491*4882a593Smuzhiyun 			if (!mem) {
492*4882a593Smuzhiyun 				SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name);
493*4882a593Smuzhiyun 				goto out;
494*4882a593Smuzhiyun 			}
495*4882a593Smuzhiyun 			/* Record original base for dump */
496*4882a593Smuzhiyun 			if (attr.flags & F_HIGHEST_MEM)
497*4882a593Smuzhiyun 				mem->orig_base = base;
498*4882a593Smuzhiyun 			else
499*4882a593Smuzhiyun 				mem->orig_base = orig_base;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 			mem->base = paddr;
502*4882a593Smuzhiyun 			mem->size = alloc_size;
503*4882a593Smuzhiyun 			mem->attr = attr;
504*4882a593Smuzhiyun 			sysmem->allocated_cnt++;
505*4882a593Smuzhiyun 			list_add_tail(&mem->node, &sysmem->allocated_head);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 			/* Add overflow check magic */
508*4882a593Smuzhiyun 			if (mem->attr.flags & F_OFC) {
509*4882a593Smuzhiyun 				check = (struct memcheck *)(paddr + size);
510*4882a593Smuzhiyun 				check->magic = SYSMEM_MAGIC;
511*4882a593Smuzhiyun 			} else if (mem->attr.flags & F_HOFC) {
512*4882a593Smuzhiyun 				check = (struct memcheck *)(paddr - sizeof(*check));
513*4882a593Smuzhiyun 				check->magic = SYSMEM_MAGIC;
514*4882a593Smuzhiyun 			}
515*4882a593Smuzhiyun 		} else {
516*4882a593Smuzhiyun 			SYSMEM_E("Failed to alloc \"%s\" expect at 0x%08lx - 0x%08lx "
517*4882a593Smuzhiyun 				 "but at 0x%08lx - 0x%08lx\n",
518*4882a593Smuzhiyun 				 name, (ulong)base, (ulong)(base + size),
519*4882a593Smuzhiyun 				 (ulong)paddr, (ulong)(paddr + size));
520*4882a593Smuzhiyun 			/* Free what we don't want allocated region */
521*4882a593Smuzhiyun 			if (lmb_free(&sysmem->lmb, paddr, alloc_size) < 0)
522*4882a593Smuzhiyun 				SYSMEM_E("Failed to free \"%s\"\n", name);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 			goto out;
525*4882a593Smuzhiyun 		}
526*4882a593Smuzhiyun 	} else {
527*4882a593Smuzhiyun 		SYSMEM_E("Failed to alloc \"%s\" at 0x%08lx - 0x%08lx\n",
528*4882a593Smuzhiyun 			 name, (ulong)base, (ulong)(base + size));
529*4882a593Smuzhiyun 		goto out;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	SYSMEM_D("Exit alloc: \"%s\", paddr=0x%08lx, size=0x%08lx, align=0x%x, anywhere=%d\n",
533*4882a593Smuzhiyun 		 name, (ulong)paddr, (ulong)size, (u32)align, !base);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	return (void *)paddr;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun out:
538*4882a593Smuzhiyun 	/*
539*4882a593Smuzhiyun 	 * Why: base + sizeof(ulong) ?
540*4882a593Smuzhiyun 	 * It's not a standard way to handle the case: the input base is 0.
541*4882a593Smuzhiyun 	 * Because 0 equals NULL, but we don't want to return NULL when alloc
542*4882a593Smuzhiyun 	 * successfully, so just return a !NULL value is okay.
543*4882a593Smuzhiyun 	 *
544*4882a593Smuzhiyun 	 * When it happens ?
545*4882a593Smuzhiyun 	 * Maybe 32-bit platform would alloc region for uncompress kernel
546*4882a593Smuzhiyun 	 * at 0 address.
547*4882a593Smuzhiyun 	 */
548*4882a593Smuzhiyun 	if (base == 0)
549*4882a593Smuzhiyun 		base = base + sizeof(ulong);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return (attr.flags & (F_IGNORE_INVISIBLE | F_NO_FAIL_DUMP)) ?
552*4882a593Smuzhiyun 			(void *)base : NULL;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
sysmem_alloc(enum memblk_id id,phys_size_t size)555*4882a593Smuzhiyun void *sysmem_alloc(enum memblk_id id, phys_size_t size)
556*4882a593Smuzhiyun {
557*4882a593Smuzhiyun 	void *paddr;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	paddr = sysmem_alloc_align_base(id,
560*4882a593Smuzhiyun 					NULL,
561*4882a593Smuzhiyun 					SYSMEM_ALLOC_ANYWHERE,
562*4882a593Smuzhiyun 					size,
563*4882a593Smuzhiyun 					ARCH_DMA_MINALIGN);
564*4882a593Smuzhiyun 	if (!paddr)
565*4882a593Smuzhiyun 		sysmem_dump();
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	return paddr;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
sysmem_alloc_by_name(const char * name,phys_size_t size)570*4882a593Smuzhiyun void *sysmem_alloc_by_name(const char *name, phys_size_t size)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	void *paddr;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	paddr = sysmem_alloc_align_base(MEM_BY_NAME,
575*4882a593Smuzhiyun 					name,
576*4882a593Smuzhiyun 					SYSMEM_ALLOC_ANYWHERE,
577*4882a593Smuzhiyun 					size,
578*4882a593Smuzhiyun 					ARCH_DMA_MINALIGN);
579*4882a593Smuzhiyun 	if (!paddr)
580*4882a593Smuzhiyun 		sysmem_dump();
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return paddr;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
sysmem_alloc_base(enum memblk_id id,phys_addr_t base,phys_size_t size)585*4882a593Smuzhiyun void *sysmem_alloc_base(enum memblk_id id, phys_addr_t base, phys_size_t size)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	void *paddr;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	paddr = sysmem_alloc_align_base(id,
590*4882a593Smuzhiyun 					NULL,
591*4882a593Smuzhiyun 					base,
592*4882a593Smuzhiyun 					size,
593*4882a593Smuzhiyun 					SYSMEM_ALLOC_NO_ALIGN);
594*4882a593Smuzhiyun 	if (!paddr)
595*4882a593Smuzhiyun 		sysmem_dump();
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	return paddr;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
sysmem_alloc_base_by_name(const char * name,phys_addr_t base,phys_size_t size)600*4882a593Smuzhiyun void *sysmem_alloc_base_by_name(const char *name,
601*4882a593Smuzhiyun 				phys_addr_t base, phys_size_t size)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	void *paddr;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	paddr = sysmem_alloc_align_base(MEM_BY_NAME,
606*4882a593Smuzhiyun 					name,
607*4882a593Smuzhiyun 					base,
608*4882a593Smuzhiyun 					size,
609*4882a593Smuzhiyun 					SYSMEM_ALLOC_NO_ALIGN);
610*4882a593Smuzhiyun 	if (!paddr)
611*4882a593Smuzhiyun 		sysmem_dump();
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	return paddr;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
sysmem_fdt_reserve_alloc_base(const char * name,phys_addr_t base,phys_size_t size)616*4882a593Smuzhiyun void *sysmem_fdt_reserve_alloc_base(const char *name,
617*4882a593Smuzhiyun 				    phys_addr_t base, phys_size_t size)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	void *paddr;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	paddr = sysmem_alloc_align_base(MEM_KMEM_RESERVED,
622*4882a593Smuzhiyun 					name,
623*4882a593Smuzhiyun 					base,
624*4882a593Smuzhiyun 					size,
625*4882a593Smuzhiyun 					SYSMEM_ALLOC_NO_ALIGN);
626*4882a593Smuzhiyun 	if (!paddr)
627*4882a593Smuzhiyun 		sysmem_dump();
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	return paddr;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
sysmem_alloc_temporary_mem(phys_size_t size)632*4882a593Smuzhiyun ulong sysmem_alloc_temporary_mem(phys_size_t size)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
635*4882a593Smuzhiyun 	phys_addr_t alloc_base;
636*4882a593Smuzhiyun 	phys_addr_t paddr;
637*4882a593Smuzhiyun 	phys_addr_t base;
638*4882a593Smuzhiyun 	int ret;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	if (!sysmem_has_init())
641*4882a593Smuzhiyun 		return false;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	base = (gd->start_addr_sp - CONFIG_SYS_STACK_SIZE - 0x2000) - size;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* LMB is align down alloc mechanism */
646*4882a593Smuzhiyun 	alloc_base = base + size;
647*4882a593Smuzhiyun 	paddr = __lmb_alloc_base(&sysmem->lmb, size, SZ_1K, alloc_base);
648*4882a593Smuzhiyun 	if (paddr) {
649*4882a593Smuzhiyun 		/* If free failed, return false */
650*4882a593Smuzhiyun 		ret = lmb_free(&sysmem->lmb, paddr, size);
651*4882a593Smuzhiyun 		if (ret < 0) {
652*4882a593Smuzhiyun 			SYSMEM_E("Can't free at 0x%08lx - 0x%08lx, ret=%d\n",
653*4882a593Smuzhiyun 				 (ulong)paddr, (ulong)(paddr + size), ret);
654*4882a593Smuzhiyun 			return 0;
655*4882a593Smuzhiyun 		}
656*4882a593Smuzhiyun 	}
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	return paddr;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
sysmem_free(phys_addr_t base)661*4882a593Smuzhiyun int sysmem_free(phys_addr_t base)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
664*4882a593Smuzhiyun 	struct memblock *mem;
665*4882a593Smuzhiyun 	struct list_head *node;
666*4882a593Smuzhiyun 	int ret, found = 0;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	if (!sysmem_has_init())
669*4882a593Smuzhiyun 		return -ENOSYS;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	/* Find existence */
672*4882a593Smuzhiyun 	list_for_each(node, &sysmem->allocated_head) {
673*4882a593Smuzhiyun 		mem = list_entry(node, struct memblock, node);
674*4882a593Smuzhiyun 		if (mem->base == base || mem->orig_base == base) {
675*4882a593Smuzhiyun 			found = 1;
676*4882a593Smuzhiyun 			break;
677*4882a593Smuzhiyun 		}
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (!found) {
681*4882a593Smuzhiyun 		SYSMEM_E("Failed to free no allocated sysmem at 0x%08lx\n",
682*4882a593Smuzhiyun 			 (ulong)base);
683*4882a593Smuzhiyun 		return -EINVAL;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	ret = lmb_free(&sysmem->lmb, mem->base, mem->size);
687*4882a593Smuzhiyun 	if (ret >= 0) {
688*4882a593Smuzhiyun 		SYSMEM_D("Free: \"%s\" 0x%08lx - 0x%08lx\n",
689*4882a593Smuzhiyun 			 mem->attr.name, (ulong)mem->base,
690*4882a593Smuzhiyun 			 (ulong)(mem->base + mem->size));
691*4882a593Smuzhiyun 		sysmem->allocated_cnt--;
692*4882a593Smuzhiyun 		list_del(&mem->node);
693*4882a593Smuzhiyun 		free(mem);
694*4882a593Smuzhiyun 	} else {
695*4882a593Smuzhiyun 		SYSMEM_E("Failed to free \"%s\" at 0x%08lx\n",
696*4882a593Smuzhiyun 			 mem->attr.name, (ulong)base);
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	return (ret >= 0) ? 0 : ret;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
sysmem_initr(void)702*4882a593Smuzhiyun int sysmem_initr(void)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	return sysmem_init();
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
sysmem_init(void)707*4882a593Smuzhiyun int sysmem_init(void)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	struct sysmem *sysmem = &plat_sysmem;
710*4882a593Smuzhiyun 	phys_addr_t mem_start;
711*4882a593Smuzhiyun 	phys_size_t mem_size;
712*4882a593Smuzhiyun 	int ret;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	lmb_init(&sysmem->lmb);
715*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sysmem->allocated_head);
716*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sysmem->kmem_resv_head);
717*4882a593Smuzhiyun 	sysmem->allocated_cnt = 0;
718*4882a593Smuzhiyun 	sysmem->kmem_resv_cnt = 0;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (gd->flags & GD_FLG_RELOC) {
721*4882a593Smuzhiyun 		sysmem->has_initr = true;
722*4882a593Smuzhiyun 	} else {
723*4882a593Smuzhiyun 		SYSMEM_I("init\n");
724*4882a593Smuzhiyun 		sysmem->has_initf = true;
725*4882a593Smuzhiyun 	}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/* Add all available system memory */
728*4882a593Smuzhiyun #ifdef CONFIG_NR_DRAM_BANKS
729*4882a593Smuzhiyun 	int i;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
732*4882a593Smuzhiyun 		if (!gd->bd->bi_dram[i].size)
733*4882a593Smuzhiyun 			continue;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 		ret = sysmem_add(gd->bd->bi_dram[i].start,
736*4882a593Smuzhiyun 				 gd->bd->bi_dram[i].size);
737*4882a593Smuzhiyun 		if (ret) {
738*4882a593Smuzhiyun 			SYSMEM_E("Failed to add sysmem from bi_dram[%d]\n", i);
739*4882a593Smuzhiyun 			goto fail;
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun #else
743*4882a593Smuzhiyun 	mem_start = env_get_bootm_low();
744*4882a593Smuzhiyun 	mem_size = env_get_bootm_size();
745*4882a593Smuzhiyun 	ret = sysmem_add(mem_start, mem_size);
746*4882a593Smuzhiyun 	if (ret) {
747*4882a593Smuzhiyun 		SYSMEM_E("Failed to add sysmem from bootm_low/size\n");
748*4882a593Smuzhiyun 		goto fail;
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun #endif
751*4882a593Smuzhiyun 	/* Reserved for board */
752*4882a593Smuzhiyun 	ret = board_sysmem_reserve(sysmem);
753*4882a593Smuzhiyun 	if (ret) {
754*4882a593Smuzhiyun 		SYSMEM_E("Failed to reserve sysmem for board\n");
755*4882a593Smuzhiyun 		goto fail;
756*4882a593Smuzhiyun 	}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	/* Reserved for U-boot framework: 'reserve_xxx()' */
759*4882a593Smuzhiyun 	mem_start = gd->start_addr_sp;
760*4882a593Smuzhiyun 	mem_size = gd->ram_top - mem_start;
761*4882a593Smuzhiyun 	if (!sysmem_alloc_base(MEM_UBOOT, mem_start, mem_size)) {
762*4882a593Smuzhiyun 		SYSMEM_E("Failed to reserve sysmem for U-Boot framework\n");
763*4882a593Smuzhiyun 		ret = -ENOMEM;
764*4882a593Smuzhiyun 		goto fail;
765*4882a593Smuzhiyun 	}
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	/* Reserved for U-Boot stack */
768*4882a593Smuzhiyun 	mem_start = gd->start_addr_sp - CONFIG_SYS_STACK_SIZE;
769*4882a593Smuzhiyun 	mem_size = CONFIG_SYS_STACK_SIZE;
770*4882a593Smuzhiyun 	if (!sysmem_alloc_base(MEM_STACK, mem_start, mem_size)) {
771*4882a593Smuzhiyun 		SYSMEM_E("Failed to reserve sysmem for stack\n");
772*4882a593Smuzhiyun 		ret = -ENOMEM;
773*4882a593Smuzhiyun 		goto fail;
774*4882a593Smuzhiyun 	}
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	return 0;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun fail:
779*4882a593Smuzhiyun 	if (ret && !(gd->flags & GD_FLG_RELOC)) {
780*4882a593Smuzhiyun 		sysmem_dump();
781*4882a593Smuzhiyun 		SYSMEM_W("Maybe malloc size %d MiB is too large?\n\n",
782*4882a593Smuzhiyun 			 SIZE_MB(CONFIG_SYS_MALLOC_LEN));
783*4882a593Smuzhiyun 	}
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	return ret;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
board_sysmem_reserve(struct sysmem * sysmem)788*4882a593Smuzhiyun __weak int board_sysmem_reserve(struct sysmem *sysmem)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	/* please define platform specific board_sysmem_reserve() */
791*4882a593Smuzhiyun 	return 0;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
do_sysmem_dump(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])794*4882a593Smuzhiyun static int do_sysmem_dump(cmd_tbl_t *cmdtp, int flag,
795*4882a593Smuzhiyun 			  int argc, char *const argv[])
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	sysmem_dump();
798*4882a593Smuzhiyun 	return 0;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
do_sysmem_search(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])801*4882a593Smuzhiyun static int do_sysmem_search(cmd_tbl_t *cmdtp, int flag,
802*4882a593Smuzhiyun 			    int argc, char *const argv[])
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	ulong addr, size;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	if (argc != 2)
807*4882a593Smuzhiyun 		return CMD_RET_USAGE;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	size = simple_strtoul(argv[1], NULL, 16);
810*4882a593Smuzhiyun 	if (!size)
811*4882a593Smuzhiyun 		return CMD_RET_USAGE;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	addr = sysmem_alloc_temporary_mem(size);
814*4882a593Smuzhiyun 	if (!addr) {
815*4882a593Smuzhiyun 		SYSMEM_I("No available region with size 0x%08lx\n", size);
816*4882a593Smuzhiyun 	} else {
817*4882a593Smuzhiyun 		SYSMEM_I("Available region at address: 0x%08lx\n",addr);
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 	env_set_hex("smem_addr", addr);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	return 0;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun U_BOOT_CMD(
825*4882a593Smuzhiyun 	sysmem_dump, 1, 1, do_sysmem_dump,
826*4882a593Smuzhiyun 	"Dump sysmem layout",
827*4882a593Smuzhiyun 	""
828*4882a593Smuzhiyun );
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun U_BOOT_CMD(
831*4882a593Smuzhiyun 	sysmem_search, 2, 1, do_sysmem_search,
832*4882a593Smuzhiyun 	"Search a available sysmem region",
833*4882a593Smuzhiyun 	"<size in hex>"
834*4882a593Smuzhiyun );
835