1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd 4 */ 5 6 #include <common.h> 7 #include <bidram.h> 8 #include <sysmem.h> 9 #include <lmb.h> 10 #include <malloc.h> 11 #include <asm/io.h> 12 13 DECLARE_GLOBAL_DATA_PTR; 14 15 #define SYSMEM_MAGIC 0x4D454D53 /* "SMEM" */ 16 17 #define LMB_ALLOC_ANYWHERE 0 /* sync with lmb.c */ 18 #define SYSMEM_ALLOC_NO_ALIGN 1 19 #define SYSMEM_ALLOC_ANYWHERE 2 20 21 #define SYSMEM_I(fmt, args...) printf("Sysmem: "fmt, ##args) 22 #define SYSMEM_W(fmt, args...) printf("Sysmem Warn: "fmt, ##args) 23 #define SYSMEM_E(fmt, args...) printf("Sysmem Error: "fmt, ##args) 24 #define SYSMEM_D(fmt, args...) debug("Sysmem Debug: "fmt, ##args) 25 26 struct memcheck { 27 uint32_t magic; 28 }; 29 30 /* Global for platform, must in data section */ 31 struct sysmem plat_sysmem __section(".data") = { 32 .has_initf = false, 33 .has_initr = false, 34 }; 35 36 bool sysmem_has_init(void) 37 { 38 return gd->flags & GD_FLG_RELOC ? 39 plat_sysmem.has_initr : plat_sysmem.has_initf; 40 } 41 42 static inline int sysmem_is_overlap(phys_addr_t base1, phys_size_t size1, 43 phys_addr_t base2, phys_size_t size2) 44 { 45 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 46 } 47 48 static inline int sysmem_is_sub_region(struct memblock *sub, 49 struct memblock *main) 50 { 51 if (!sub || !main) 52 return false; 53 54 return ((sub->base >= main->base) && 55 (sub->base + sub->size <= main->base + main->size)); 56 } 57 58 void sysmem_dump(void) 59 { 60 struct sysmem *sysmem = &plat_sysmem; 61 struct lmb *lmb = &sysmem->lmb; 62 struct memblock *mem; 63 struct memcheck *check; 64 struct list_head *node; 65 ulong memory_size = 0; 66 ulong reserved_size = 0; 67 ulong allocated_size = 0; 68 bool overflow = false; 69 ulong i; 70 71 if (!sysmem_has_init()) 72 return; 73 74 printf("\nsysmem_dump_all:\n"); 75 76 /* Memory pool */ 77 printf(" --------------------------------------------------------------------\n"); 78 for (i = 0; i < lmb->memory.cnt; i++) { 79 memory_size += lmb->memory.region[i].size; 80 printf(" memory.rgn[%ld].addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i, 81 (ulong)lmb->memory.region[i].base, 82 (ulong)lmb->memory.region[i].base + 83 (ulong)lmb->memory.region[i].size, 84 (ulong)lmb->memory.region[i].size); 85 } 86 printf("\n memory.total = 0x%08lx (%ld MiB. %ld KiB)\n", 87 (ulong)memory_size, 88 SIZE_MB((ulong)memory_size), 89 SIZE_KB((ulong)memory_size)); 90 91 /* Allocated */ 92 i = 0; 93 printf(" --------------------------------------------------------------------\n"); 94 list_for_each(node, &sysmem->allocated_head) { 95 mem = list_entry(node, struct memblock, node); 96 allocated_size += mem->size; 97 if (mem->attr.flags & M_ATTR_OFC) { 98 check = (struct memcheck *) 99 (mem->base + mem->size - sizeof(*check)); 100 overflow = (check->magic != SYSMEM_MAGIC); 101 } else if (mem->attr.flags & M_ATTR_HOFC) { 102 check = (struct memcheck *) 103 (mem->base - sizeof(*check)); 104 overflow = (check->magic != SYSMEM_MAGIC); 105 } else { 106 overflow = false; 107 } 108 109 printf(" allocated.rgn[%ld].name = \"%s\" %s %s\n", 110 i, mem->attr.name, overflow ? " <Overflow!>" : "", 111 mem->orig_base != mem->base ? "<*>" : ""); 112 printf(" .addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", 113 (ulong)mem->orig_base, 114 (ulong)(mem->orig_base + mem->size), 115 (ulong)mem->size); 116 i++; 117 } 118 119 /* Kernel 'reserved-memory' */ 120 i = 0; 121 printf("\n"); 122 list_for_each(node, &sysmem->kmem_resv_head) { 123 mem = list_entry(node, struct memblock, node); 124 allocated_size += mem->size; 125 printf(" kmem-resv.rgn[%ld].name = \"%s\" %s\n", 126 i, mem->attr.name, 127 mem->orig_base != mem->base ? "<*>" : ""); 128 printf(" .addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", 129 (ulong)mem->orig_base, 130 (ulong)(mem->orig_base + mem->size), 131 (ulong)mem->size); 132 i++; 133 } 134 135 printf("\n framework malloc_r = %3d MiB", 136 SIZE_MB(CONFIG_SYS_MALLOC_LEN)); 137 printf("\n framework malloc_f = %3d KiB\n", 138 SIZE_KB(CONFIG_SYS_MALLOC_F_LEN)); 139 140 printf("\n allocated.total = 0x%08lx (%ld MiB. %ld KiB)\n", 141 (ulong)allocated_size, 142 SIZE_MB((ulong)allocated_size), 143 SIZE_KB((ulong)allocated_size)); 144 145 /* LMB core reserved */ 146 printf(" --------------------------------------------------------------------\n"); 147 reserved_size = 0; 148 for (i = 0; i < lmb->reserved.cnt; i++) { 149 reserved_size += lmb->reserved.region[i].size; 150 printf(" LMB.allocated[%ld].addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i, 151 (ulong)lmb->reserved.region[i].base, 152 (ulong)lmb->reserved.region[i].base + 153 (ulong)lmb->reserved.region[i].size, 154 (ulong)lmb->reserved.region[i].size); 155 } 156 157 printf("\n reserved.core.total = 0x%08lx (%ld MiB. %ld KiB)\n", 158 (ulong)reserved_size, 159 SIZE_MB((ulong)reserved_size), 160 SIZE_KB((ulong)reserved_size)); 161 printf(" --------------------------------------------------------------------\n\n"); 162 } 163 164 void sysmem_overflow_check(void) 165 { 166 struct sysmem *sysmem = &plat_sysmem; 167 struct list_head *node, *knode; 168 struct memcheck *check; 169 struct memblock *kmem; 170 struct memblock *smem; 171 struct memblock *rmem; 172 int overflow = 0, overlap = 0; 173 174 if (!sysmem_has_init()) 175 return; 176 177 #ifdef CONFIG_BIDRAM 178 /* 179 * Check kernel 'reserved-memory' overlap with invisible regions 180 * 181 * Here, only print warning message when overlap with invisible region 182 */ 183 list_for_each(knode, &sysmem->kmem_resv_head) { 184 kmem = list_entry(knode, struct memblock, node); 185 rmem = bidram_reserved_is_overlap(kmem->base, kmem->size); 186 if (rmem) { 187 const char *alias; 188 int i, dump = 1; 189 190 /* 191 * Ignore the sub region of invisible region. 192 * eg: ramoops of SHM. 193 */ 194 alias = rmem->attr.alias[0]; 195 if (alias && sysmem_is_sub_region(kmem, rmem)) { 196 for (i = 0; i < ALIAS_COUNT_MAX; i++, alias++) { 197 alias = rmem->attr.alias[i]; 198 if (!alias) 199 continue; 200 if (!strncasecmp(kmem->attr.name, alias, 201 strlen(alias))) { 202 dump = 0; 203 break; 204 } 205 } 206 } 207 208 if (dump) 209 SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) " 210 "is overlap with [invisible] \"%s\" (0x%08lx - 0x%08lx)\n", 211 kmem->attr.name, (ulong)kmem->base, 212 (ulong)(kmem->base + kmem->size), 213 rmem->attr.name, (ulong)rmem->base, 214 (ulong)(rmem->base + rmem->size)); 215 } 216 } 217 #endif 218 219 list_for_each(node, &sysmem->allocated_head) { 220 smem = list_entry(node, struct memblock, node); 221 /* 222 * Check kernel 'reserved-memory' overlap with sysmem allocated regions 223 */ 224 list_for_each(knode, &sysmem->kmem_resv_head) { 225 kmem = list_entry(knode, struct memblock, node); 226 if (sysmem_is_overlap(smem->base, smem->size, 227 kmem->base, kmem->size)) { 228 if (smem->attr.flags & M_ATTR_KMEM_CAN_OVERLAP) 229 continue; 230 231 overlap = 1; 232 SYSMEM_W("kernel 'reserved-memory' \"%s\"(0x%08lx - 0x%08lx) " 233 "is overlap with \"%s\" (0x%08lx - 0x%08lx)\n", 234 kmem->attr.name, (ulong)kmem->base, 235 (ulong)(kmem->base + kmem->size), 236 smem->attr.name, (ulong)smem->base, 237 (ulong)(smem->base + smem->size)); 238 } 239 } 240 241 /* 242 * Check sysmem allocated regions overflow. 243 */ 244 if (smem->attr.flags & M_ATTR_OFC) { 245 check = (struct memcheck *) 246 (smem->base + smem->size - sizeof(*check)); 247 overflow = (check->magic != SYSMEM_MAGIC); 248 } else if (smem->attr.flags & M_ATTR_HOFC) { 249 check = (struct memcheck *) 250 (smem->base - sizeof(*check)); 251 overflow = (check->magic != SYSMEM_MAGIC); 252 } else { 253 overflow = 0; 254 } 255 256 if (overflow) { 257 SYSMEM_E("Found there is region overflow!\n"); 258 break; 259 } 260 } 261 262 if (overflow || overlap) 263 sysmem_dump(); 264 } 265 266 static int sysmem_add(phys_addr_t base, phys_size_t size) 267 { 268 struct sysmem *sysmem = &plat_sysmem; 269 int ret; 270 271 if (!size) 272 return -EINVAL; 273 274 ret = lmb_add(&sysmem->lmb, base, size); 275 if (ret < 0) 276 SYSMEM_E("Failed to add sysmem at 0x%08lx for 0x%08lx size\n", 277 (ulong)base, (ulong)size); 278 279 return (ret >= 0) ? 0 : ret; 280 } 281 282 static const char *sysmem_alias2name(const char *name, int *id) 283 { 284 const char *alias; 285 int i, j; 286 int match = 0; 287 288 for (i = 0; i < MEMBLK_ID_MAX; i++) { 289 /* Pirmary name */ 290 if (mem_attr[i].name && !strcasecmp(mem_attr[i].name, name)) { 291 match = 1; 292 goto finish; 293 } 294 295 /* Alias name */ 296 alias = mem_attr[i].alias[0]; 297 if (!alias) 298 continue; 299 300 for (j = 0; j < ALIAS_COUNT_MAX; j++) { 301 alias = mem_attr[i].alias[j]; 302 if (alias && !strcasecmp(alias, name)) { 303 match = 1; 304 goto finish; 305 } 306 } 307 } 308 309 finish: 310 if (match) { 311 *id = i; 312 return mem_attr[i].name; 313 } 314 315 return name; 316 } 317 318 static void *sysmem_alloc_align_base(enum memblk_id id, 319 const char *mem_name, 320 phys_addr_t base, 321 phys_size_t size, 322 ulong align) 323 { 324 struct sysmem *sysmem = &plat_sysmem; 325 struct memblk_attr attr; 326 struct memblock *mem; 327 struct memcheck *check; 328 struct list_head *node; 329 const char *name; 330 phys_addr_t paddr; 331 phys_addr_t alloc_base; 332 phys_size_t alloc_size; 333 phys_addr_t orig_base = base; 334 335 if (!sysmem_has_init()) 336 goto out; 337 338 if (id == MEMBLK_ID_BY_NAME || id == MEMBLK_ID_KMEM_RESERVED) { 339 if (!mem_name) { 340 SYSMEM_E("NULL name for alloc sysmem\n"); 341 goto out; 342 } 343 344 /* Find: name, id and attr by outer mem_name & id */ 345 name = sysmem_alias2name(mem_name, (int *)&id); 346 attr = mem_attr[id]; 347 if (!attr.name) 348 attr.name = strdup(name); 349 350 /* Always make kernel 'reserved-memory' alloc successfully */ 351 if (id == MEMBLK_ID_KMEM_RESERVED) { 352 struct memblock *mem; 353 354 mem = malloc(sizeof(*mem)); 355 if (!mem) { 356 SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name); 357 return mem; 358 } 359 360 attr.flags |= M_ATTR_KMEM_RESERVED; 361 mem->orig_base = orig_base; 362 mem->base = base; 363 mem->size = size; 364 mem->attr = attr; 365 sysmem->kmem_resv_cnt++; 366 list_add_tail(&mem->node, &sysmem->kmem_resv_head); 367 368 return (void *)base; 369 } 370 } else if (id > MEMBLK_ID_UNK && id < MEMBLK_ID_MAX) { 371 attr = mem_attr[id]; 372 name = attr.name; 373 374 /* 375 * Special handle for Android AVB alloc(on any where) 376 * 377 * Fixup base and place right after U-Boot stack, adding a lot 378 * of space(4KB) maybe safer. 379 */ 380 if ((id == MEMBLK_ID_AVB_ANDROID) && 381 (base == SYSMEM_ALLOC_ANYWHERE)) { 382 base = gd->start_addr_sp - 383 CONFIG_SYS_STACK_SIZE - size - 0x1000; 384 385 } else if (base <= gd->bd->bi_dram[0].start) { 386 /* 387 * On Rockchip platform: 388 * 389 * So far, we use M_ATTR_IGNORE_INVISIBLE for uncompress 390 * kernel alloc, and for ARMv8 enabling AArch32 mode, the 391 * ATF is still AArch64 and ocuppies 0~1MB and shmem 1~2M. 392 * So let's ignore the region which overlap with them. 393 */ 394 if (attr.flags & M_ATTR_IGNORE_INVISIBLE) { 395 base = gd->bd->bi_dram[0].start; 396 } else { 397 SYSMEM_E("Failed to alloc invisible sub region 0x%08lx - 0x%08lx " 398 "of \"%s\" at 0x%08lx - 0x%08lx\n", 399 (ulong)base, (ulong)gd->bd->bi_dram[0].start, 400 name, (ulong)base, (ulong)(base + size)); 401 goto out; 402 } 403 } 404 } else { 405 SYSMEM_E("Unsupport memblk id %d for alloc sysmem\n", id); 406 goto out; 407 } 408 409 if (!size) { 410 SYSMEM_E("\"%s\" size is 0 for alloc sysmem\n", name); 411 goto out; 412 } 413 414 /* 415 * Some modules use "sysmem_alloc()" to alloc region for storage 416 * read/write buffer, it should be aligned to cacheline size. eg: AVB. 417 * 418 * Aligned down to cacheline size if not aligned, otherwise the tail 419 * of region maybe overflow. 420 */ 421 if (attr.flags & M_ATTR_CACHELINE_ALIGN && 422 !IS_ALIGNED(base, ARCH_DMA_MINALIGN)) { 423 base = ALIGN(base, ARCH_DMA_MINALIGN); 424 base -= ARCH_DMA_MINALIGN; 425 } 426 427 if (base != SYSMEM_ALLOC_ANYWHERE && !IS_ALIGNED(base, 4)) { 428 SYSMEM_E("\"%s\" base=0x%08lx is not 4-byte aligned\n", 429 name, (ulong)base); 430 goto out; 431 } 432 433 /* Must be sizeof(long) byte aligned */ 434 size = ALIGN(size, sizeof(long)); 435 436 SYSMEM_D("Enter alloc: \"%s\" 0x%08lx - 0x%08lx\n", 437 name, (ulong)base, (ulong)(base + size)); 438 439 /* Already allocated ? */ 440 list_for_each(node, &sysmem->allocated_head) { 441 mem = list_entry(node, struct memblock, node); 442 SYSMEM_D("Has allcated: %s, 0x%08lx - 0x%08lx\n", 443 mem->attr.name, (ulong)mem->base, 444 (ulong)(mem->base + mem->size)); 445 if (!strcmp(mem->attr.name, name)) { 446 /* Allow double alloc for same but smaller region */ 447 if (mem->base <= base && mem->size >= size) 448 return (void *)base; 449 450 SYSMEM_E("Failed to double alloc for existence \"%s\"\n", name); 451 goto out; 452 } else if (sysmem_is_overlap(mem->base, mem->size, base, size)) { 453 SYSMEM_E("\"%s\" (0x%08lx - 0x%08lx) alloc is " 454 "overlap with existence \"%s\" (0x%08lx - " 455 "0x%08lx)\n", 456 name, (ulong)base, (ulong)(base + size), 457 mem->attr.name, (ulong)mem->base, 458 (ulong)(mem->base + mem->size)); 459 goto out; 460 } 461 } 462 463 /* Add overflow check magic ? */ 464 if (attr.flags & M_ATTR_OFC) 465 alloc_size = size + sizeof(*check); 466 else 467 alloc_size = size; 468 469 /* Alloc anywhere ? */ 470 if (base == SYSMEM_ALLOC_ANYWHERE) 471 alloc_base = LMB_ALLOC_ANYWHERE; 472 else 473 alloc_base = base + alloc_size; /* LMB is align down alloc mechanism */ 474 475 paddr = lmb_alloc_base(&sysmem->lmb, alloc_size, align, alloc_base); 476 if (paddr) { 477 if ((paddr == base) || (base == SYSMEM_ALLOC_ANYWHERE)) { 478 mem = malloc(sizeof(*mem)); 479 if (!mem) { 480 SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name); 481 goto out; 482 } 483 484 /* Record original base for dump */ 485 mem->orig_base = orig_base; 486 mem->base = paddr; 487 mem->size = alloc_size; 488 mem->attr = attr; 489 sysmem->allocated_cnt++; 490 list_add_tail(&mem->node, &sysmem->allocated_head); 491 492 /* Add overflow check magic */ 493 if (mem->attr.flags & M_ATTR_OFC) { 494 check = (struct memcheck *)(paddr + size); 495 check->magic = SYSMEM_MAGIC; 496 } else if (mem->attr.flags & M_ATTR_HOFC) { 497 check = (struct memcheck *)(paddr - sizeof(*check)); 498 check->magic = SYSMEM_MAGIC; 499 } 500 } else { 501 SYSMEM_E("Failed to alloc \"%s\" expect at 0x%08lx - 0x%08lx " 502 "but at 0x%08lx - x%08lx\n", 503 name, (ulong)base, (ulong)(base + size), 504 (ulong)paddr, (ulong)(paddr + size)); 505 /* Free what we don't want allocated region */ 506 if (lmb_free(&sysmem->lmb, paddr, alloc_size) < 0) 507 SYSMEM_E("Failed to free \"%s\"\n", name); 508 509 goto out; 510 } 511 } else { 512 SYSMEM_E("Failed to alloc \"%s\" at 0x%08lx - 0x%08lx\n", 513 name, (ulong)base, (ulong)(base + size)); 514 goto out; 515 } 516 517 SYSMEM_D("Exit alloc: \"%s\", paddr=0x%08lx, size=0x%08lx, align=0x%x, anywhere=%d\n", 518 name, (ulong)paddr, (ulong)size, (u32)align, !base); 519 520 return (void *)paddr; 521 522 out: 523 /* 524 * Why: base + sizeof(ulong) ? 525 * It's not a standard way to handle the case: the input base is 0. 526 * Because 0 equals NULL, but we don't want to return NULL when alloc 527 * successfully, so just return a !NULL value is okay. 528 * 529 * When it happens ? 530 * Maybe 32-bit platform would alloc region for uncompress kernel 531 * at 0 address. 532 */ 533 if (base == 0) 534 base = base + sizeof(ulong); 535 536 return (attr.flags & M_ATTR_IGNORE_INVISIBLE) ? (void *)base : NULL; 537 } 538 539 void *sysmem_alloc(enum memblk_id id, phys_size_t size) 540 { 541 void *paddr; 542 543 paddr = sysmem_alloc_align_base(id, 544 NULL, 545 SYSMEM_ALLOC_ANYWHERE, 546 size, 547 SYSMEM_ALLOC_NO_ALIGN); 548 if (!paddr) 549 sysmem_dump(); 550 551 return paddr; 552 } 553 554 void *sysmem_alloc_by_name(const char *name, phys_size_t size) 555 { 556 void *paddr; 557 558 paddr = sysmem_alloc_align_base(MEMBLK_ID_BY_NAME, 559 name, 560 SYSMEM_ALLOC_ANYWHERE, 561 size, 562 SYSMEM_ALLOC_NO_ALIGN); 563 if (!paddr) 564 sysmem_dump(); 565 566 return paddr; 567 } 568 569 void *sysmem_alloc_base(enum memblk_id id, phys_addr_t base, phys_size_t size) 570 { 571 void *paddr; 572 573 paddr = sysmem_alloc_align_base(id, 574 NULL, 575 base, 576 size, 577 SYSMEM_ALLOC_NO_ALIGN); 578 if (!paddr) 579 sysmem_dump(); 580 581 return paddr; 582 } 583 584 void *sysmem_alloc_base_by_name(const char *name, 585 phys_addr_t base, phys_size_t size) 586 { 587 void *paddr; 588 589 paddr = sysmem_alloc_align_base(MEMBLK_ID_BY_NAME, 590 name, 591 base, 592 size, 593 SYSMEM_ALLOC_NO_ALIGN); 594 if (!paddr) 595 sysmem_dump(); 596 597 return paddr; 598 } 599 600 void *sysmem_fdt_reserve_alloc_base(const char *name, 601 phys_addr_t base, phys_size_t size) 602 { 603 void *paddr; 604 605 paddr = sysmem_alloc_align_base(MEMBLK_ID_KMEM_RESERVED, 606 name, 607 base, 608 size, 609 SYSMEM_ALLOC_NO_ALIGN); 610 if (!paddr) 611 sysmem_dump(); 612 613 return paddr; 614 } 615 616 bool sysmem_can_alloc(phys_size_t base, phys_size_t size) 617 { 618 struct sysmem *sysmem = &plat_sysmem; 619 phys_addr_t alloc_base; 620 phys_addr_t paddr; 621 int ret; 622 623 if (!sysmem_has_init()) 624 return false; 625 626 /* LMB is align down alloc mechanism */ 627 alloc_base = base + size; 628 paddr = __lmb_alloc_base(&sysmem->lmb, 629 size, 630 SYSMEM_ALLOC_NO_ALIGN, 631 alloc_base); 632 if (paddr) { 633 /* If free failed, return false */ 634 ret = lmb_free(&sysmem->lmb, base, size); 635 if (ret < 0) { 636 SYSMEM_E("Can't free at 0x%08lx - 0x%08lx, ret=%d\n", 637 (ulong)base, (ulong)(base + size), ret); 638 return false; 639 } 640 } else { 641 SYSMEM_D("Can't alloc at 0x%08lx - 0x%08lx\n", 642 (ulong)base, (ulong)(base + size)); 643 } 644 645 return (paddr == base) ? true : false; 646 } 647 648 int sysmem_free(phys_addr_t base) 649 { 650 struct sysmem *sysmem = &plat_sysmem; 651 struct memblock *mem; 652 struct list_head *node; 653 int ret, found = 0; 654 655 if (!sysmem_has_init()) 656 return -ENOSYS; 657 658 /* Find existence */ 659 list_for_each(node, &sysmem->allocated_head) { 660 mem = list_entry(node, struct memblock, node); 661 if (mem->base == base || mem->orig_base == base) { 662 found = 1; 663 break; 664 } 665 } 666 667 if (!found) { 668 SYSMEM_E("Failed to free no allocated sysmem at 0x%08lx\n", 669 (ulong)base); 670 return -EINVAL; 671 } 672 673 ret = lmb_free(&sysmem->lmb, mem->base, mem->size); 674 if (ret >= 0) { 675 SYSMEM_D("Free: \"%s\" 0x%08lx - 0x%08lx\n", 676 mem->attr.name, (ulong)mem->base, 677 (ulong)(mem->base + mem->size)); 678 sysmem->allocated_cnt--; 679 list_del(&mem->node); 680 free(mem); 681 } else { 682 SYSMEM_E("Failed to free \"%s\" at 0x%08lx\n", 683 mem->attr.name, (ulong)base); 684 } 685 686 return (ret >= 0) ? 0 : ret; 687 } 688 689 int sysmem_initr(void) 690 { 691 return sysmem_init(); 692 } 693 694 int sysmem_init(void) 695 { 696 struct sysmem *sysmem = &plat_sysmem; 697 phys_addr_t mem_start; 698 phys_size_t mem_size; 699 int ret; 700 701 lmb_init(&sysmem->lmb); 702 INIT_LIST_HEAD(&sysmem->allocated_head); 703 INIT_LIST_HEAD(&sysmem->kmem_resv_head); 704 sysmem->allocated_cnt = 0; 705 sysmem->kmem_resv_cnt = 0; 706 707 if (gd->flags & GD_FLG_RELOC) { 708 sysmem->has_initr = true; 709 } else { 710 SYSMEM_I("init\n"); 711 sysmem->has_initf = true; 712 } 713 714 /* Add all available system memory */ 715 #ifdef CONFIG_NR_DRAM_BANKS 716 int i; 717 718 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 719 if (!gd->bd->bi_dram[i].size) 720 continue; 721 722 ret = sysmem_add(gd->bd->bi_dram[i].start, 723 gd->bd->bi_dram[i].size); 724 if (ret) { 725 SYSMEM_E("Failed to add sysmem from bi_dram[%d]\n", i); 726 goto fail; 727 } 728 } 729 #else 730 mem_start = env_get_bootm_low(); 731 mem_size = env_get_bootm_size(); 732 ret = sysmem_add(mem_start, mem_size); 733 if (ret) { 734 SYSMEM_E("Failed to add sysmem from bootm_low/size\n"); 735 goto fail; 736 } 737 #endif 738 /* Reserved for board */ 739 ret = board_sysmem_reserve(sysmem); 740 if (ret) { 741 SYSMEM_E("Failed to reserve sysmem for board\n"); 742 goto fail; 743 } 744 745 /* Reserved for U-boot framework: 'reserve_xxx()' */ 746 mem_start = gd->start_addr_sp; 747 mem_size = gd->ram_top - mem_start; 748 if (!sysmem_alloc_base(MEMBLK_ID_UBOOT, mem_start, mem_size)) { 749 SYSMEM_E("Failed to reserve sysmem for U-Boot framework\n"); 750 ret = -ENOMEM; 751 goto fail; 752 } 753 754 /* Reserved for U-Boot stack */ 755 mem_start = gd->start_addr_sp - CONFIG_SYS_STACK_SIZE; 756 mem_size = CONFIG_SYS_STACK_SIZE; 757 if (!sysmem_alloc_base(MEMBLK_ID_STACK, mem_start, mem_size)) { 758 SYSMEM_E("Failed to reserve sysmem for stack\n"); 759 ret = -ENOMEM; 760 goto fail; 761 } 762 763 return 0; 764 765 fail: 766 if (ret && !(gd->flags & GD_FLG_RELOC)) { 767 sysmem_dump(); 768 SYSMEM_W("Maybe malloc size %d MiB is too large?\n\n", 769 SIZE_MB(CONFIG_SYS_MALLOC_LEN)); 770 } 771 772 return ret; 773 } 774 775 __weak int board_sysmem_reserve(struct sysmem *sysmem) 776 { 777 /* please define platform specific board_sysmem_reserve() */ 778 return 0; 779 } 780 781 static int do_dump_sysmem(cmd_tbl_t *cmdtp, int flag, 782 int argc, char *const argv[]) 783 { 784 sysmem_dump(); 785 return 0; 786 } 787 788 U_BOOT_CMD( 789 dump_sysmem, 1, 1, do_dump_sysmem, 790 "Dump sysmem layout", 791 "" 792 ); 793