1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd
4 */
5
6 #include <common.h>
7 #include <bidram.h>
8 #include <sysmem.h>
9 #include <lmb.h>
10 #include <malloc.h>
11 #include <asm/io.h>
12
13 DECLARE_GLOBAL_DATA_PTR;
14
15 #define SYSMEM_MAGIC 0x4D454D53 /* "SMEM" */
16
17 #define LMB_ALLOC_ANYWHERE 0 /* sync with lmb.c */
18 #define SYSMEM_ALLOC_NO_ALIGN 1
19 #define SYSMEM_ALLOC_ANYWHERE 2
20
21 #define SYSMEM_I(fmt, args...) printf("Sysmem: "fmt, ##args)
22 #define SYSMEM_W(fmt, args...) printf("Sysmem Warn: "fmt, ##args)
23 #define SYSMEM_E(fmt, args...) printf("Sysmem Error: "fmt, ##args)
24 #define SYSMEM_D(fmt, args...) debug("Sysmem Debug: "fmt, ##args)
25
26 struct memcheck {
27 uint32_t magic;
28 };
29
30 /* Global for platform, must in data section */
31 struct sysmem plat_sysmem __section(".data") = {
32 .has_initf = false,
33 .has_initr = false,
34 };
35
sysmem_has_init(void)36 bool sysmem_has_init(void)
37 {
38 return gd->flags & GD_FLG_RELOC ?
39 plat_sysmem.has_initr : plat_sysmem.has_initf;
40 }
41
sysmem_is_overlap(phys_addr_t base1,phys_size_t size1,phys_addr_t base2,phys_size_t size2)42 static inline int sysmem_is_overlap(phys_addr_t base1, phys_size_t size1,
43 phys_addr_t base2, phys_size_t size2)
44 {
45 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
46 }
47
sysmem_is_sub_region(struct memblock * sub,struct memblock * main)48 static inline int sysmem_is_sub_region(struct memblock *sub,
49 struct memblock *main)
50 {
51 if (!sub || !main)
52 return false;
53
54 return ((sub->base >= main->base) &&
55 (sub->base + sub->size <= main->base + main->size));
56 }
57
sysmem_dump(void)58 void sysmem_dump(void)
59 {
60 struct sysmem *sysmem = &plat_sysmem;
61 struct lmb *lmb = &sysmem->lmb;
62 struct memblock *mem;
63 struct memcheck *check;
64 struct list_head *node;
65 ulong memory_size = 0;
66 ulong reserved_size = 0;
67 ulong allocated_size = 0;
68 bool overflow = false;
69 ulong i;
70
71 if (!sysmem_has_init())
72 return;
73
74 printf("\nsysmem_dump_all:\n");
75
76 /* Memory pool */
77 printf(" --------------------------------------------------------------------\n");
78 for (i = 0; i < lmb->memory.cnt; i++) {
79 memory_size += lmb->memory.region[i].size;
80 printf(" memory.rgn[%ld].addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i,
81 (ulong)lmb->memory.region[i].base,
82 (ulong)lmb->memory.region[i].base +
83 (ulong)lmb->memory.region[i].size,
84 (ulong)lmb->memory.region[i].size);
85 }
86 printf("\n memory.total = 0x%08lx (%ld MiB. %ld KiB)\n",
87 (ulong)memory_size,
88 SIZE_MB((ulong)memory_size),
89 SIZE_KB((ulong)memory_size));
90
91 /* Allocated */
92 i = 0;
93 printf(" --------------------------------------------------------------------\n");
94 list_for_each(node, &sysmem->allocated_head) {
95 mem = list_entry(node, struct memblock, node);
96 allocated_size += mem->size;
97 if (mem->attr.flags & F_OFC) {
98 check = (struct memcheck *)
99 (mem->base + mem->size - sizeof(*check));
100 overflow = (check->magic != SYSMEM_MAGIC);
101 } else if (mem->attr.flags & F_HOFC) {
102 check = (struct memcheck *)
103 (mem->base - sizeof(*check));
104 overflow = (check->magic != SYSMEM_MAGIC);
105 } else {
106 overflow = false;
107 }
108
109 printf(" allocated.rgn[%ld].name = \"%s\" %s %s\n",
110 i, mem->attr.name, overflow ? " <Overflow!>" : "",
111 mem->orig_base != mem->base ? "<*>" : "");
112 printf(" .addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n",
113 (ulong)mem->orig_base,
114 (ulong)(mem->orig_base + mem->size),
115 (ulong)mem->size);
116 i++;
117 }
118
119 /* Kernel 'reserved-memory' */
120 i = 0;
121 printf("\n");
122 list_for_each(node, &sysmem->kmem_resv_head) {
123 mem = list_entry(node, struct memblock, node);
124 allocated_size += mem->size;
125 printf(" kmem-resv.rgn[%ld].name = \"%s\" %s\n",
126 i, mem->attr.name,
127 mem->orig_base != mem->base ? "<*>" : "");
128 printf(" .addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n",
129 (ulong)mem->orig_base,
130 (ulong)(mem->orig_base + mem->size),
131 (ulong)mem->size);
132 i++;
133 }
134
135 printf("\n framework malloc_r = %3d MiB",
136 SIZE_MB(CONFIG_SYS_MALLOC_LEN));
137 printf("\n framework malloc_f = %3d KiB\n",
138 SIZE_KB(CONFIG_SYS_MALLOC_F_LEN));
139
140 printf("\n allocated.total = 0x%08lx (%ld MiB. %ld KiB)\n",
141 (ulong)allocated_size,
142 SIZE_MB((ulong)allocated_size),
143 SIZE_KB((ulong)allocated_size));
144
145 /* LMB core reserved */
146 printf(" --------------------------------------------------------------------\n");
147 reserved_size = 0;
148 for (i = 0; i < lmb->reserved.cnt; i++) {
149 reserved_size += lmb->reserved.region[i].size;
150 printf(" LMB.allocated[%ld].addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i,
151 (ulong)lmb->reserved.region[i].base,
152 (ulong)lmb->reserved.region[i].base +
153 (ulong)lmb->reserved.region[i].size,
154 (ulong)lmb->reserved.region[i].size);
155 }
156
157 printf("\n reserved.core.total = 0x%08lx (%ld MiB. %ld KiB)\n",
158 (ulong)reserved_size,
159 SIZE_MB((ulong)reserved_size),
160 SIZE_KB((ulong)reserved_size));
161 printf(" --------------------------------------------------------------------\n\n");
162 }
163
sysmem_overflow_check(void)164 void sysmem_overflow_check(void)
165 {
166 struct sysmem *sysmem = &plat_sysmem;
167 struct list_head *node, *knode;
168 struct memcheck *check;
169 struct memblock *kmem;
170 struct memblock *smem;
171 struct memblock *rmem;
172 int overflow = 0, overlap = 0;
173
174 if (!sysmem_has_init())
175 return;
176
177 #ifdef CONFIG_BIDRAM
178 /*
179 * Check kernel 'reserved-memory' overlap with invisible regions
180 *
181 * Here, only print warning message when overlap with invisible region
182 */
183 list_for_each(knode, &sysmem->kmem_resv_head) {
184 kmem = list_entry(knode, struct memblock, node);
185 rmem = bidram_reserved_is_overlap(kmem->base, kmem->size);
186 if (rmem) {
187 const char *alias;
188 int i, dump = 1;
189
190 /*
191 * Ignore the sub region of invisible region.
192 * eg: ramoops of SHM.
193 */
194 alias = rmem->attr.alias[0];
195 if (alias && sysmem_is_sub_region(kmem, rmem)) {
196 for (i = 0; i < ALIAS_COUNT_MAX; i++, alias++) {
197 alias = rmem->attr.alias[i];
198 if (!alias)
199 continue;
200 if (!strncasecmp(kmem->attr.name, alias,
201 strlen(alias))) {
202 dump = 0;
203 break;
204 }
205 }
206 }
207
208 if (dump)
209 SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) "
210 "is overlap with [invisible] \"%s\" (0x%08lx - 0x%08lx)\n",
211 kmem->attr.name, (ulong)kmem->base,
212 (ulong)(kmem->base + kmem->size),
213 rmem->attr.name, (ulong)rmem->base,
214 (ulong)(rmem->base + rmem->size));
215 }
216 }
217 #endif
218
219 list_for_each(node, &sysmem->allocated_head) {
220 smem = list_entry(node, struct memblock, node);
221 /*
222 * Check kernel 'reserved-memory' overlap with sysmem allocated regions
223 */
224 list_for_each(knode, &sysmem->kmem_resv_head) {
225 kmem = list_entry(knode, struct memblock, node);
226 if (sysmem_is_overlap(smem->base, smem->size,
227 kmem->base, kmem->size)) {
228 if (smem->attr.flags & F_KMEM_CAN_OVERLAP)
229 continue;
230
231 overlap = 1;
232 SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) "
233 "is overlap with \"%s\" (0x%08lx - 0x%08lx)\n",
234 kmem->attr.name, (ulong)kmem->base,
235 (ulong)(kmem->base + kmem->size),
236 smem->attr.name, (ulong)smem->base,
237 (ulong)(smem->base + smem->size));
238 }
239 }
240
241 /*
242 * Check sysmem allocated regions overflow.
243 */
244 if (smem->attr.flags & F_OFC) {
245 check = (struct memcheck *)
246 (smem->base + smem->size - sizeof(*check));
247 overflow = (check->magic != SYSMEM_MAGIC);
248 } else if (smem->attr.flags & F_HOFC) {
249 check = (struct memcheck *)
250 (smem->base - sizeof(*check));
251 overflow = (check->magic != SYSMEM_MAGIC);
252 } else {
253 overflow = 0;
254 }
255
256 if (overflow) {
257 SYSMEM_E("Found there is region overflow!\n");
258 break;
259 }
260 }
261
262 if (overflow || overlap)
263 sysmem_dump();
264 }
265
sysmem_add(phys_addr_t base,phys_size_t size)266 static int sysmem_add(phys_addr_t base, phys_size_t size)
267 {
268 struct sysmem *sysmem = &plat_sysmem;
269 int ret;
270
271 if (!size)
272 return -EINVAL;
273
274 ret = lmb_add(&sysmem->lmb, base, size);
275 if (ret < 0)
276 SYSMEM_E("Failed to add sysmem at 0x%08lx for 0x%08lx size\n",
277 (ulong)base, (ulong)size);
278
279 return (ret >= 0) ? 0 : ret;
280 }
281
sysmem_alias2name(const char * name,int * id)282 static const char *sysmem_alias2name(const char *name, int *id)
283 {
284 const char *alias;
285 int i, j;
286 int match = 0;
287
288 for (i = 0; i < MEM_MAX; i++) {
289 /* Pirmary name */
290 if (mem_attr[i].name && !strcasecmp(mem_attr[i].name, name)) {
291 match = 1;
292 goto finish;
293 }
294
295 /* Alias name */
296 alias = mem_attr[i].alias[0];
297 if (!alias)
298 continue;
299
300 for (j = 0; j < ALIAS_COUNT_MAX; j++) {
301 alias = mem_attr[i].alias[j];
302 if (alias && !strcasecmp(alias, name)) {
303 match = 1;
304 goto finish;
305 }
306 }
307 }
308
309 finish:
310 if (match) {
311 *id = i;
312 return mem_attr[i].name;
313 }
314
315 return name;
316 }
317
sysmem_alloc_align_base(enum memblk_id id,const char * mem_name,phys_addr_t base,phys_size_t size,ulong align)318 static void *sysmem_alloc_align_base(enum memblk_id id,
319 const char *mem_name,
320 phys_addr_t base,
321 phys_size_t size,
322 ulong align)
323 {
324 struct sysmem *sysmem = &plat_sysmem;
325 struct memblk_attr attr;
326 struct memblock *mem;
327 struct memcheck *check;
328 struct list_head *node;
329 const char *name;
330 phys_addr_t paddr;
331 phys_addr_t alloc_base;
332 phys_size_t alloc_size;
333 phys_addr_t orig_base = base;
334
335 if (!sysmem_has_init())
336 goto out;
337
338 /* If out of management range, just return */
339 if (base != SYSMEM_ALLOC_ANYWHERE && base < CONFIG_SYS_SDRAM_BASE)
340 return (void *)base;
341 if ((base + size >= CONFIG_SYS_SDRAM_BASE + SDRAM_MAX_SIZE) &&
342 (base + size <= SZ_4G))
343 return (void *)base;
344
345 if (id == MEM_BY_NAME || id == MEM_KMEM_RESERVED) {
346 if (!mem_name) {
347 SYSMEM_E("NULL name for alloc sysmem\n");
348 goto out;
349 }
350
351 /* Find: name, id and attr by outer mem_name & id */
352 name = sysmem_alias2name(mem_name, (int *)&id);
353 attr = mem_attr[id];
354 if (!attr.name)
355 attr.name = strdup(name);
356
357 /* Always make kernel 'reserved-memory' alloc successfully */
358 if (id == MEM_KMEM_RESERVED) {
359 struct memblock *mem;
360
361 mem = malloc(sizeof(*mem));
362 if (!mem) {
363 SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name);
364 return mem;
365 }
366
367 attr.flags |= F_KMEM_RESERVED;
368 mem->orig_base = orig_base;
369 mem->base = base;
370 mem->size = size;
371 mem->attr = attr;
372 sysmem->kmem_resv_cnt++;
373 list_add_tail(&mem->node, &sysmem->kmem_resv_head);
374
375 return (void *)base;
376 }
377 } else if (id > MEM_UNK && id < MEM_MAX) {
378 attr = mem_attr[id];
379 name = attr.name;
380
381 /*
382 * Special handle for Android AVB alloc(on any where)
383 *
384 * Fixup base and place right after U-Boot stack, adding a lot
385 * of space(4KB) maybe safer.
386 */
387 if (attr.flags & F_HIGHEST_MEM) {
388 base = gd->start_addr_sp -
389 CONFIG_SYS_STACK_SIZE - size - 0x1000;
390
391 /*
392 * The 0x0 address is usually allocated by 32-bit uncompressed
393 * kernel and this alloc action is just a peek.
394 *
395 * Due to LMB core doesn't support alloc at 0x0 address, we have
396 * to alloc the memblk backword a few bytes.
397 *
398 * ARCH_DMA_MINALIGN maybe a good choice.
399 */
400 } else if (!base) {
401 base += ARCH_DMA_MINALIGN;
402 } else if (base < gd->bd->bi_dram[0].start) {
403 /*
404 * On Rockchip platform:
405 *
406 * So far, we use F_IGNORE_INVISIBLE for uncompress
407 * kernel alloc, and for ARMv8 enabling AArch32 mode, the
408 * ATF is still AArch64 and ocuppies 0~1MB and shmem 1~2M.
409 * So let's ignore the region which overlap with them.
410 */
411 if (attr.flags & F_IGNORE_INVISIBLE) {
412 base = gd->bd->bi_dram[0].start;
413 } else {
414 SYSMEM_E("Failed to alloc invisible sub region 0x%08lx - 0x%08lx "
415 "of \"%s\" at 0x%08lx - 0x%08lx\n",
416 (ulong)base, (ulong)gd->bd->bi_dram[0].start,
417 name, (ulong)base, (ulong)(base + size));
418 goto out;
419 }
420 }
421 } else {
422 SYSMEM_E("Unsupport memblk id %d for alloc sysmem\n", id);
423 goto out;
424 }
425
426 if (!size) {
427 SYSMEM_E("\"%s\" size is 0 for alloc sysmem\n", name);
428 goto out;
429 }
430
431 /*
432 * Some modules use "sysmem_alloc()" to alloc region for storage
433 * read/write buffer, it should be aligned to cacheline size. eg: AVB.
434 *
435 * Aligned down to cacheline size if not aligned, otherwise the tail
436 * of region maybe overflow.
437 */
438 if (attr.flags & F_CACHELINE_ALIGN &&
439 !IS_ALIGNED(base, ARCH_DMA_MINALIGN)) {
440 base = ALIGN(base, ARCH_DMA_MINALIGN);
441 base -= ARCH_DMA_MINALIGN;
442 }
443
444 if (base != SYSMEM_ALLOC_ANYWHERE && !IS_ALIGNED(base, 4)) {
445 SYSMEM_E("\"%s\" base=0x%08lx is not 4-byte aligned\n",
446 name, (ulong)base);
447 goto out;
448 }
449
450 /* Must be sizeof(long) byte aligned */
451 size = ALIGN(size, sizeof(long));
452
453 SYSMEM_D("Enter alloc: \"%s\" 0x%08lx - 0x%08lx\n",
454 name, (ulong)base, (ulong)(base + size));
455
456 /* Already allocated ? */
457 list_for_each(node, &sysmem->allocated_head) {
458 mem = list_entry(node, struct memblock, node);
459 SYSMEM_D("Has allcated: %s, 0x%08lx - 0x%08lx\n",
460 mem->attr.name, (ulong)mem->base,
461 (ulong)(mem->base + mem->size));
462 if (!strcmp(mem->attr.name, name)) {
463 /* Allow double alloc for same but smaller region */
464 if (mem->base <= base && mem->size >= size)
465 return (void *)base;
466
467 SYSMEM_E("Failed to double alloc for existence \"%s\"\n", name);
468 goto out;
469 } else if (sysmem_is_overlap(mem->base, mem->size, base, size)) {
470 if (attr.flags & F_FAIL_WARNING)
471 SYSMEM_W("**Maybe** \"%s\" (0x%08lx - 0x%08lx) alloc is "
472 "overlap with existence \"%s\" (0x%08lx - "
473 "0x%08lx)\n",
474 name, (ulong)base, (ulong)(base + size),
475 mem->attr.name, (ulong)mem->base,
476 (ulong)(mem->base + mem->size));
477
478 else
479 SYSMEM_E("\"%s\" (0x%08lx - 0x%08lx) alloc is "
480 "overlap with existence \"%s\" (0x%08lx - "
481 "0x%08lx)\n",
482 name, (ulong)base, (ulong)(base + size),
483 mem->attr.name, (ulong)mem->base,
484 (ulong)(mem->base + mem->size));
485 goto out;
486 }
487 }
488
489 /* Add overflow check magic ? */
490 if (attr.flags & F_OFC)
491 alloc_size = size + sizeof(*check);
492 else
493 alloc_size = size;
494
495 /* Alloc anywhere ? */
496 if (base == SYSMEM_ALLOC_ANYWHERE)
497 alloc_base = LMB_ALLOC_ANYWHERE;
498 else
499 alloc_base = base + alloc_size; /* LMB is align down alloc mechanism */
500
501 SYSMEM_D("DO alloc... base: 0x%08lx\n", (ulong)alloc_base);
502
503 paddr = lmb_alloc_base(&sysmem->lmb, alloc_size, align, alloc_base);
504 if (paddr) {
505 if ((paddr == base) || (base == SYSMEM_ALLOC_ANYWHERE)) {
506 mem = malloc(sizeof(*mem));
507 if (!mem) {
508 SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name);
509 goto out;
510 }
511 /* Record original base for dump */
512 if (attr.flags & F_HIGHEST_MEM)
513 mem->orig_base = base;
514 else
515 mem->orig_base = orig_base;
516
517 mem->base = paddr;
518 mem->size = alloc_size;
519 mem->attr = attr;
520 sysmem->allocated_cnt++;
521 list_add_tail(&mem->node, &sysmem->allocated_head);
522
523 /* Add overflow check magic */
524 if (mem->attr.flags & F_OFC) {
525 check = (struct memcheck *)(paddr + size);
526 check->magic = SYSMEM_MAGIC;
527 } else if (mem->attr.flags & F_HOFC) {
528 check = (struct memcheck *)(paddr - sizeof(*check));
529 check->magic = SYSMEM_MAGIC;
530 }
531 } else {
532 SYSMEM_E("Failed to alloc \"%s\" expect at 0x%08lx - 0x%08lx "
533 "but at 0x%08lx - 0x%08lx\n",
534 name, (ulong)base, (ulong)(base + size),
535 (ulong)paddr, (ulong)(paddr + size));
536 /* Free what we don't want allocated region */
537 if (lmb_free(&sysmem->lmb, paddr, alloc_size) < 0)
538 SYSMEM_E("Failed to free \"%s\"\n", name);
539
540 goto out;
541 }
542 } else {
543 SYSMEM_E("Failed to alloc \"%s\" at 0x%08lx - 0x%08lx\n",
544 name, (ulong)base, (ulong)(base + size));
545 goto out;
546 }
547
548 SYSMEM_D("Exit alloc: \"%s\", paddr=0x%08lx, size=0x%08lx, align=0x%x, anywhere=%d\n",
549 name, (ulong)paddr, (ulong)size, (u32)align, !base);
550
551 return (void *)paddr;
552
553 out:
554 /*
555 * Why: base + sizeof(ulong) ?
556 * It's not a standard way to handle the case: the input base is 0.
557 * Because 0 equals NULL, but we don't want to return NULL when alloc
558 * successfully, so just return a !NULL value is okay.
559 *
560 * When it happens ?
561 * Maybe 32-bit platform would alloc region for uncompress kernel
562 * at 0 address.
563 */
564 if (base == 0)
565 base = base + sizeof(ulong);
566
567 return (attr.flags & (F_IGNORE_INVISIBLE | F_NO_FAIL_DUMP)) ?
568 (void *)base : NULL;
569 }
570
sysmem_alloc(enum memblk_id id,phys_size_t size)571 void *sysmem_alloc(enum memblk_id id, phys_size_t size)
572 {
573 void *paddr;
574
575 paddr = sysmem_alloc_align_base(id,
576 NULL,
577 SYSMEM_ALLOC_ANYWHERE,
578 size,
579 ARCH_DMA_MINALIGN);
580 if (!paddr)
581 sysmem_dump();
582
583 return paddr;
584 }
585
sysmem_alloc_by_name(const char * name,phys_size_t size)586 void *sysmem_alloc_by_name(const char *name, phys_size_t size)
587 {
588 void *paddr;
589
590 paddr = sysmem_alloc_align_base(MEM_BY_NAME,
591 name,
592 SYSMEM_ALLOC_ANYWHERE,
593 size,
594 ARCH_DMA_MINALIGN);
595 if (!paddr)
596 sysmem_dump();
597
598 return paddr;
599 }
600
sysmem_alloc_base(enum memblk_id id,phys_addr_t base,phys_size_t size)601 void *sysmem_alloc_base(enum memblk_id id, phys_addr_t base, phys_size_t size)
602 {
603 void *paddr;
604
605 paddr = sysmem_alloc_align_base(id,
606 NULL,
607 base,
608 size,
609 SYSMEM_ALLOC_NO_ALIGN);
610 if (!paddr)
611 sysmem_dump();
612
613 return paddr;
614 }
615
sysmem_alloc_base_by_name(const char * name,phys_addr_t base,phys_size_t size)616 void *sysmem_alloc_base_by_name(const char *name,
617 phys_addr_t base, phys_size_t size)
618 {
619 void *paddr;
620
621 paddr = sysmem_alloc_align_base(MEM_BY_NAME,
622 name,
623 base,
624 size,
625 SYSMEM_ALLOC_NO_ALIGN);
626 if (!paddr)
627 sysmem_dump();
628
629 return paddr;
630 }
631
sysmem_fdt_reserve_alloc_base(const char * name,phys_addr_t base,phys_size_t size)632 void *sysmem_fdt_reserve_alloc_base(const char *name,
633 phys_addr_t base, phys_size_t size)
634 {
635 void *paddr;
636
637 paddr = sysmem_alloc_align_base(MEM_KMEM_RESERVED,
638 name,
639 base,
640 size,
641 SYSMEM_ALLOC_NO_ALIGN);
642 if (!paddr)
643 sysmem_dump();
644
645 return paddr;
646 }
647
sysmem_alloc_temporary_mem(phys_size_t size)648 ulong sysmem_alloc_temporary_mem(phys_size_t size)
649 {
650 struct sysmem *sysmem = &plat_sysmem;
651 phys_addr_t alloc_base;
652 phys_addr_t paddr;
653 phys_addr_t base;
654 int ret;
655
656 if (!sysmem_has_init())
657 return false;
658
659 base = (gd->start_addr_sp - CONFIG_SYS_STACK_SIZE - 0x2000) - size;
660
661 /* LMB is align down alloc mechanism */
662 alloc_base = base + size;
663 paddr = __lmb_alloc_base(&sysmem->lmb, size, SZ_1K, alloc_base);
664 if (paddr) {
665 /* If free failed, return false */
666 ret = lmb_free(&sysmem->lmb, paddr, size);
667 if (ret < 0) {
668 SYSMEM_E("Can't free at 0x%08lx - 0x%08lx, ret=%d\n",
669 (ulong)paddr, (ulong)(paddr + size), ret);
670 return 0;
671 }
672 }
673
674 return paddr;
675 }
676
sysmem_free(phys_addr_t base)677 int sysmem_free(phys_addr_t base)
678 {
679 struct sysmem *sysmem = &plat_sysmem;
680 struct memblock *mem;
681 struct list_head *node;
682 int ret, found = 0;
683
684 if (!sysmem_has_init())
685 return -ENOSYS;
686
687 /* Find existence */
688 list_for_each(node, &sysmem->allocated_head) {
689 mem = list_entry(node, struct memblock, node);
690 if (mem->base == base || mem->orig_base == base) {
691 found = 1;
692 break;
693 }
694 }
695
696 if (!found) {
697 SYSMEM_E("Failed to free no allocated sysmem at 0x%08lx\n",
698 (ulong)base);
699 return -EINVAL;
700 }
701
702 ret = lmb_free(&sysmem->lmb, mem->base, mem->size);
703 if (ret >= 0) {
704 SYSMEM_D("Free: \"%s\" 0x%08lx - 0x%08lx\n",
705 mem->attr.name, (ulong)mem->base,
706 (ulong)(mem->base + mem->size));
707 sysmem->allocated_cnt--;
708 list_del(&mem->node);
709 free(mem);
710 } else {
711 SYSMEM_E("Failed to free \"%s\" at 0x%08lx\n",
712 mem->attr.name, (ulong)base);
713 }
714
715 return (ret >= 0) ? 0 : ret;
716 }
717
sysmem_initr(void)718 int sysmem_initr(void)
719 {
720 return sysmem_init();
721 }
722
sysmem_init(void)723 int sysmem_init(void)
724 {
725 struct sysmem *sysmem = &plat_sysmem;
726 phys_addr_t mem_start;
727 phys_size_t mem_size;
728 int ret;
729
730 lmb_init(&sysmem->lmb);
731 INIT_LIST_HEAD(&sysmem->allocated_head);
732 INIT_LIST_HEAD(&sysmem->kmem_resv_head);
733 sysmem->allocated_cnt = 0;
734 sysmem->kmem_resv_cnt = 0;
735
736 if (gd->flags & GD_FLG_RELOC) {
737 sysmem->has_initr = true;
738 } else {
739 SYSMEM_I("init\n");
740 sysmem->has_initf = true;
741 }
742
743 /* Add all available system memory */
744 #ifdef CONFIG_NR_DRAM_BANKS
745 int i;
746
747 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
748 if (!gd->bd->bi_dram[i].size)
749 continue;
750
751 ret = sysmem_add(gd->bd->bi_dram[i].start,
752 gd->bd->bi_dram[i].size);
753 if (ret) {
754 SYSMEM_E("Failed to add sysmem from bi_dram[%d]\n", i);
755 goto fail;
756 }
757 }
758 #else
759 mem_start = env_get_bootm_low();
760 mem_size = env_get_bootm_size();
761 ret = sysmem_add(mem_start, mem_size);
762 if (ret) {
763 SYSMEM_E("Failed to add sysmem from bootm_low/size\n");
764 goto fail;
765 }
766 #endif
767 /* Reserved for board */
768 ret = board_sysmem_reserve(sysmem);
769 if (ret) {
770 SYSMEM_E("Failed to reserve sysmem for board\n");
771 goto fail;
772 }
773
774 /* Reserved for U-boot framework: 'reserve_xxx()' */
775 mem_start = gd->start_addr_sp;
776 #ifdef CONFIG_PRAM
777 mem_size = gd->ram_top - (CONFIG_PRAM * 1024) - mem_start;
778 #else
779 mem_size = gd->ram_top - mem_start;
780 #endif
781 if (!sysmem_alloc_base(MEM_UBOOT, mem_start, mem_size)) {
782 SYSMEM_E("Failed to reserve sysmem for U-Boot framework\n");
783 ret = -ENOMEM;
784 goto fail;
785 }
786
787 /* Reserved for U-Boot stack */
788 mem_start = gd->start_addr_sp - CONFIG_SYS_STACK_SIZE;
789 mem_size = CONFIG_SYS_STACK_SIZE;
790 if (!sysmem_alloc_base(MEM_STACK, mem_start, mem_size)) {
791 SYSMEM_E("Failed to reserve sysmem for stack\n");
792 ret = -ENOMEM;
793 goto fail;
794 }
795
796 return 0;
797
798 fail:
799 if (ret && !(gd->flags & GD_FLG_RELOC)) {
800 sysmem_dump();
801 SYSMEM_W("Maybe malloc size %d MiB is too large?\n\n",
802 SIZE_MB(CONFIG_SYS_MALLOC_LEN));
803 }
804
805 return ret;
806 }
807
board_sysmem_reserve(struct sysmem * sysmem)808 __weak int board_sysmem_reserve(struct sysmem *sysmem)
809 {
810 /* please define platform specific board_sysmem_reserve() */
811 return 0;
812 }
813
do_sysmem_dump(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])814 static int do_sysmem_dump(cmd_tbl_t *cmdtp, int flag,
815 int argc, char *const argv[])
816 {
817 sysmem_dump();
818 return 0;
819 }
820
do_sysmem_search(cmd_tbl_t * cmdtp,int flag,int argc,char * const argv[])821 static int do_sysmem_search(cmd_tbl_t *cmdtp, int flag,
822 int argc, char *const argv[])
823 {
824 ulong addr, size;
825
826 if (argc != 2)
827 return CMD_RET_USAGE;
828
829 size = simple_strtoul(argv[1], NULL, 16);
830 if (!size)
831 return CMD_RET_USAGE;
832
833 addr = sysmem_alloc_temporary_mem(size);
834 if (!addr) {
835 SYSMEM_I("No available region with size 0x%08lx\n", size);
836 } else {
837 SYSMEM_I("Available region at address: 0x%08lx\n",addr);
838 }
839 env_set_hex("smem_addr", addr);
840
841 return 0;
842 }
843
844 U_BOOT_CMD(
845 sysmem_dump, 1, 1, do_sysmem_dump,
846 "Dump sysmem layout",
847 ""
848 );
849
850 U_BOOT_CMD(
851 sysmem_search, 2, 1, do_sysmem_search,
852 "Search a available sysmem region",
853 "<size in hex>"
854 );
855