1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2019 Fuzhou Rockchip Electronics Co., Ltd 4 */ 5 6 #include <common.h> 7 #include <sysmem.h> 8 #include <lmb.h> 9 #include <malloc.h> 10 #include <asm/io.h> 11 12 DECLARE_GLOBAL_DATA_PTR; 13 14 #define SYSMEM_MAGIC 0x4D454D53 /* "SMEM" */ 15 #define SYSMEM_ALLOC_ANYWHERE 0 16 #define SYSMEM_ALLOC_NO_ALIGN 1 17 18 #define SYSMEM_I(fmt, args...) printf("Sysmem: "fmt, ##args) 19 #define SYSMEM_W(fmt, args...) printf("Sysmem Warn: "fmt, ##args) 20 #define SYSMEM_E(fmt, args...) printf("Sysmem Error: "fmt, ##args) 21 #define SYSMEM_D(fmt, args...) debug("Sysmem Debug: "fmt, ##args) 22 23 struct memcheck { 24 uint32_t magic; 25 }; 26 27 /* Global for platform, must in data section */ 28 struct sysmem plat_sysmem __section(".data") = { 29 .has_initf = false, 30 .has_initr = false, 31 }; 32 33 bool sysmem_has_init(void) 34 { 35 return gd->flags & GD_FLG_RELOC ? 36 plat_sysmem.has_initr : plat_sysmem.has_initf; 37 } 38 39 void sysmem_dump(void) 40 { 41 struct sysmem *sysmem = &plat_sysmem; 42 struct lmb *lmb = &sysmem->lmb; 43 struct memblock *mem; 44 struct memcheck *check; 45 struct list_head *node; 46 ulong memory_size = 0; 47 ulong reserved_size = 0; 48 ulong allocated_size = 0; 49 bool overflow = false; 50 ulong i; 51 52 if (!sysmem_has_init()) 53 return; 54 55 printf("\nsysmem_dump_all:\n"); 56 57 /* Memory pool */ 58 printf(" --------------------------------------------------------------------\n"); 59 for (i = 0; i < lmb->memory.cnt; i++) { 60 memory_size += lmb->memory.region[i].size; 61 printf(" memory.rgn[%ld].addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i, 62 (ulong)lmb->memory.region[i].base, 63 (ulong)lmb->memory.region[i].base + 64 (ulong)lmb->memory.region[i].size, 65 (ulong)lmb->memory.region[i].size); 66 } 67 printf("\n memory.total = 0x%08lx (%ld MiB. %ld KiB)\n", 68 (ulong)memory_size, 69 SIZE_MB((ulong)memory_size), 70 SIZE_KB((ulong)memory_size)); 71 72 /* Allocated */ 73 i = 0; 74 printf(" --------------------------------------------------------------------\n"); 75 list_for_each(node, &sysmem->allocated_head) { 76 mem = list_entry(node, struct memblock, node); 77 allocated_size += mem->size; 78 if (mem->attr.flags & M_ATTR_OFC) { 79 check = (struct memcheck *) 80 (mem->base + mem->size - sizeof(*check)); 81 overflow = (check->magic != SYSMEM_MAGIC); 82 } else if (mem->attr.flags & M_ATTR_HOFC) { 83 check = (struct memcheck *) 84 (mem->base - sizeof(*check)); 85 overflow = (check->magic != SYSMEM_MAGIC); 86 } else { 87 overflow = false; 88 } 89 90 printf(" allocated.rgn[%ld].name = \"%s\" %s\n", 91 i, mem->attr.name, overflow ? " <Overflow!>" : ""); 92 printf(" .addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", 93 (ulong)mem->base, (ulong)(mem->base + mem->size), 94 (ulong)mem->size); 95 i++; 96 } 97 98 printf("\n malloc_r: %d MiB, malloc_f: %d KiB\n", 99 SIZE_MB(CONFIG_SYS_MALLOC_LEN), SIZE_KB(CONFIG_SYS_MALLOC_F_LEN)); 100 printf("\n allocated.total = 0x%08lx (%ld MiB. %ld KiB)\n", 101 (ulong)allocated_size, 102 SIZE_MB((ulong)allocated_size), 103 SIZE_KB((ulong)allocated_size)); 104 105 /* LMB core reserved */ 106 printf(" --------------------------------------------------------------------\n"); 107 reserved_size = 0; 108 for (i = 0; i < lmb->reserved.cnt; i++) { 109 reserved_size += lmb->reserved.region[i].size; 110 printf(" LMB.reserved[%ld].addr = 0x%08lx - 0x%08lx (size: 0x%08lx)\n", i, 111 (ulong)lmb->reserved.region[i].base, 112 (ulong)lmb->reserved.region[i].base + 113 (ulong)lmb->reserved.region[i].size, 114 (ulong)lmb->reserved.region[i].size); 115 } 116 117 printf("\n reserved.core.total = 0x%08lx (%ld MiB. %ld KiB)\n", 118 (ulong)reserved_size, 119 SIZE_MB((ulong)reserved_size), 120 SIZE_KB((ulong)reserved_size)); 121 printf(" --------------------------------------------------------------------\n\n"); 122 } 123 124 static inline int sysmem_is_overlap(phys_addr_t base1, phys_size_t size1, 125 phys_addr_t base2, phys_size_t size2) 126 { 127 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); 128 } 129 130 static int sysmem_add(phys_addr_t base, phys_size_t size) 131 { 132 struct sysmem *sysmem = &plat_sysmem; 133 int ret; 134 135 if (!size) 136 return -EINVAL; 137 138 ret = lmb_add(&sysmem->lmb, base, size); 139 if (ret < 0) 140 SYSMEM_E("Failed to add sysmem at 0x%08lx for 0x%08lx size\n", 141 (ulong)base, (ulong)size); 142 143 return (ret >= 0) ? 0 : ret; 144 } 145 146 static const char *sysmem_alias2name(const char *name, int *id) 147 { 148 const char *alias; 149 int n, i, j; 150 int match = 0; 151 152 for (i = 0; i < MEMBLK_ID_MAX; i++) { 153 /* Pirmary name */ 154 if (mem_attr[i].name && !strcasecmp(mem_attr[i].name, name)) { 155 match = 1; 156 goto finish; 157 } 158 159 /* Alias name */ 160 alias = mem_attr[i].alias[0]; 161 if (!alias) 162 continue; 163 164 n = ARRAY_SIZE(mem_attr[i].alias); 165 for (j = 0; j < n; j++, alias++) { 166 if (alias && !strcasecmp(alias, name)) { 167 match = 1; 168 goto finish; 169 } 170 } 171 } 172 173 finish: 174 if (match) { 175 *id = i; 176 return mem_attr[i].name; 177 } 178 179 return name; 180 } 181 182 static void *sysmem_alloc_align_base(enum memblk_id id, 183 const char *mem_name, 184 phys_addr_t base, 185 phys_size_t size, 186 ulong align) 187 { 188 struct sysmem *sysmem = &plat_sysmem; 189 struct memblk_attr attr; 190 struct memblock *mem; 191 struct memcheck *check; 192 struct list_head *node; 193 const char *name; 194 phys_addr_t paddr; 195 phys_addr_t alloc_base; 196 phys_size_t alloc_size; 197 phys_addr_t bank_base; 198 phys_size_t bank_size; 199 bool req_overlap = false; /* Only for kernel reserved-memory */ 200 int i; 201 202 if (!sysmem_has_init()) 203 goto out; 204 205 if (id == MEMBLK_ID_BY_NAME || id == MEMBLK_ID_FDT_RESV) { 206 if (!mem_name) { 207 SYSMEM_E("NULL name for alloc sysmem\n"); 208 goto out; 209 } else if (id == MEMBLK_ID_FDT_RESV) { 210 211 /* 212 * Allow fdt reserved memory to overlap with the region 213 * only used in U-Boot, like: stack, fastboot, u-boot... 214 * these regions are marked as M_ATTR_OVERLAP in flags. 215 * 216 * Here we check whether it overlaps with others, if 217 * so, set req_overlap as true. 218 */ 219 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 220 if (!gd->bd->bi_dram[i].size) 221 continue; 222 223 bank_base = gd->bd->bi_dram[i].start; 224 bank_size = gd->bd->bi_dram[i].size; 225 if (sysmem_is_overlap(base, size, 226 bank_base, bank_size)) { 227 req_overlap = true; 228 break; 229 } 230 } 231 232 /* 233 * If this request region is out size of all available 234 * region, ignore and return success. 235 */ 236 if (!req_overlap) 237 return (void *)base; 238 } 239 240 /* Find name, id and attr by outer mem_name */ 241 name = sysmem_alias2name(mem_name, (int *)&id); 242 attr = mem_attr[id]; 243 if (!attr.name) 244 attr.name = strdup(name); 245 } else if (id > MEMBLK_ID_UNK && id < MEMBLK_ID_MAX) { 246 attr = mem_attr[id]; 247 name = attr.name; 248 249 /* 250 * Fixup base and place right after U-Boot stack, adding a lot 251 * of space(4KB) maybe safer. 252 */ 253 if ((id == MEMBLK_ID_AVB_ANDROID) && 254 (base == SYSMEM_ALLOC_ANYWHERE)) 255 base = gd->start_addr_sp - 256 CONFIG_SYS_STACK_SIZE - size - 0x1000; 257 } else { 258 SYSMEM_E("Unsupport memblk id %d for alloc sysmem\n", id); 259 goto out; 260 } 261 262 if (!size) { 263 SYSMEM_E("\"%s\" size is 0 for alloc sysmem\n", name); 264 goto out; 265 } 266 267 if (!IS_ALIGNED(base, 4)) { 268 SYSMEM_E("\"%s\" base=0x%08lx is not 4-byte aligned\n", 269 name, (ulong)base); 270 goto out; 271 } 272 273 /* Must be 4-byte aligned */ 274 size = ALIGN(size, 4); 275 276 SYSMEM_D("Enter alloc: \"%s\" 0x%08lx - 0x%08lx\n", 277 name, (ulong)base, (ulong)(base + size)); 278 279 /* Already allocated ? */ 280 list_for_each(node, &sysmem->allocated_head) { 281 mem = list_entry(node, struct memblock, node); 282 SYSMEM_D("Has allcated: %s, 0x%08lx - 0x%08lx\n", 283 mem->attr.name, (ulong)mem->base, 284 (ulong)(mem->base + mem->size)); 285 if (!strcmp(mem->attr.name, name)) { 286 /* Allow double alloc for same but smaller region */ 287 if (mem->base <= base && mem->size >= size) 288 return (void *)base; 289 290 SYSMEM_E("Failed to double alloc for existence \"%s\"\n", name); 291 goto out; 292 } else if (sysmem_is_overlap(mem->base, mem->size, base, size)) { 293 /* 294 * If this new alloc region expects overlap and the old 295 * region is also allowed to be overlap, just do reserve. 296 */ 297 if (req_overlap && mem->attr.flags & M_ATTR_OVERLAP) { 298 if (lmb_reserve(&sysmem->lmb, base, size)) 299 SYSMEM_E("Failed to overlap alloc \"%s\" " 300 "at 0x%08lx - 0x%08lx\n", 301 name, (ulong)base, 302 (ulong)(base + size)); 303 return (void *)base; 304 } 305 306 SYSMEM_E("\"%s\" (0x%08lx - 0x%08lx) alloc is " 307 "overlap with existence \"%s\" (0x%08lx - " 308 "0x%08lx)\n", 309 name, (ulong)base, (ulong)(base + size), 310 mem->attr.name, (ulong)mem->base, 311 (ulong)(mem->base + mem->size)); 312 goto out; 313 } 314 } 315 316 /* Add overflow check magic ? */ 317 if (attr.flags & M_ATTR_OFC) 318 alloc_size = size + sizeof(*check); 319 else 320 alloc_size = size; 321 322 /* Alloc anywhere ? */ 323 if (base == SYSMEM_ALLOC_ANYWHERE) 324 alloc_base = base; 325 else 326 alloc_base = base + alloc_size; /* LMB is align down alloc mechanism */ 327 328 paddr = lmb_alloc_base(&sysmem->lmb, alloc_size, align, alloc_base); 329 if (paddr) { 330 if ((paddr == base) || (base == SYSMEM_ALLOC_ANYWHERE)) { 331 mem = malloc(sizeof(*mem)); 332 if (!mem) { 333 SYSMEM_E("No memory for \"%s\" alloc sysmem\n", name); 334 goto out; 335 } 336 337 mem->base = paddr; 338 mem->size = alloc_size; 339 mem->attr = attr; 340 sysmem->allocated_cnt++; 341 list_add_tail(&mem->node, &sysmem->allocated_head); 342 343 /* Add overflow check magic */ 344 if (mem->attr.flags & M_ATTR_OFC) { 345 check = (struct memcheck *)(paddr + size); 346 check->magic = SYSMEM_MAGIC; 347 } else if (mem->attr.flags & M_ATTR_HOFC) { 348 check = (struct memcheck *)(paddr - sizeof(*check)); 349 check->magic = SYSMEM_MAGIC; 350 } 351 } else { 352 SYSMEM_E("Failed to alloc \"%s\" expect at 0x%08lx - 0x%08lx " 353 "but at 0x%08lx - x%08lx\n", 354 name, (ulong)base, (ulong)(base + size), 355 (ulong)paddr, (ulong)(paddr + size)); 356 /* Free what we don't want allocated region */ 357 if (lmb_free(&sysmem->lmb, paddr, alloc_size) < 0) 358 SYSMEM_E("Failed to free \"%s\"\n", name); 359 360 goto out; 361 } 362 } else { 363 SYSMEM_E("Failed to alloc \"%s\" at 0x%08lx - 0x%08lx\n", 364 name, (ulong)base, (ulong)(base + size)); 365 goto out; 366 } 367 368 SYSMEM_D("Exit alloc: \"%s\", paddr=0x%08lx, size=0x%08lx, align=0x%x, anywhere=%d\n", 369 name, (ulong)paddr, (ulong)size, (u32)align, !base); 370 371 return (void *)paddr; 372 373 out: 374 return (attr.flags & M_ATTR_PEEK) ? (void *)base : NULL; 375 } 376 377 void *sysmem_alloc(enum memblk_id id, phys_size_t size) 378 { 379 void *paddr; 380 381 paddr = sysmem_alloc_align_base(id, 382 NULL, 383 SYSMEM_ALLOC_ANYWHERE, 384 size, 385 SYSMEM_ALLOC_NO_ALIGN); 386 if (!paddr) 387 sysmem_dump(); 388 389 return paddr; 390 } 391 392 void *sysmem_alloc_base(enum memblk_id id, phys_addr_t base, phys_size_t size) 393 { 394 void *paddr; 395 396 paddr = sysmem_alloc_align_base(id, 397 NULL, 398 base, 399 size, 400 SYSMEM_ALLOC_NO_ALIGN); 401 if (!paddr) 402 sysmem_dump(); 403 404 return paddr; 405 } 406 407 void *sysmem_alloc_base_by_name(const char *name, 408 phys_addr_t base, phys_size_t size) 409 { 410 void *paddr; 411 412 paddr = sysmem_alloc_align_base(MEMBLK_ID_BY_NAME, 413 name, 414 base, 415 size, 416 SYSMEM_ALLOC_NO_ALIGN); 417 if (!paddr) 418 sysmem_dump(); 419 420 return paddr; 421 } 422 423 void *sysmem_fdt_reserve_alloc_base(const char *name, 424 phys_addr_t base, phys_size_t size) 425 { 426 void *paddr; 427 428 paddr = sysmem_alloc_align_base(MEMBLK_ID_FDT_RESV, 429 name, 430 base, 431 size, 432 SYSMEM_ALLOC_NO_ALIGN); 433 if (!paddr) 434 sysmem_dump(); 435 436 return paddr; 437 } 438 439 bool sysmem_can_alloc(phys_size_t base, phys_size_t size) 440 { 441 struct sysmem *sysmem = &plat_sysmem; 442 phys_addr_t alloc_base; 443 phys_addr_t paddr; 444 int ret; 445 446 if (!sysmem_has_init()) 447 return false; 448 449 /* LMB is align down alloc mechanism */ 450 alloc_base = base + size; 451 paddr = __lmb_alloc_base(&sysmem->lmb, 452 size, 453 SYSMEM_ALLOC_NO_ALIGN, 454 alloc_base); 455 if (paddr) { 456 /* If free failed, return false */ 457 ret = lmb_free(&sysmem->lmb, base, size); 458 if (ret < 0) { 459 SYSMEM_E("Can't free at 0x%08lx - 0x%08lx, ret=%d\n", 460 (ulong)base, (ulong)(base + size), ret); 461 return false; 462 } 463 } else { 464 SYSMEM_D("Can't alloc at 0x%08lx - 0x%08lx\n", 465 (ulong)base, (ulong)(base + size)); 466 } 467 468 return (paddr == base) ? true : false; 469 } 470 471 int sysmem_free(phys_addr_t base) 472 { 473 struct sysmem *sysmem = &plat_sysmem; 474 struct memblock *mem; 475 struct list_head *node; 476 int ret, found = 0; 477 478 if (!sysmem_has_init()) 479 return -ENOSYS; 480 481 /* Find existence */ 482 list_for_each(node, &sysmem->allocated_head) { 483 mem = list_entry(node, struct memblock, node); 484 if (mem->base == base) { 485 found = 1; 486 break; 487 } 488 } 489 490 if (!found) { 491 SYSMEM_E("Failed to free no allocated sysmem at 0x%08lx\n", 492 (ulong)base); 493 return -EINVAL; 494 } 495 496 ret = lmb_free(&sysmem->lmb, mem->base, mem->size); 497 if (ret >= 0) { 498 SYSMEM_D("Free: \"%s\" 0x%08lx - 0x%08lx\n", 499 mem->attr.name, (ulong)mem->base, 500 (ulong)(mem->base + mem->size)); 501 sysmem->allocated_cnt--; 502 list_del(&mem->node); 503 free(mem); 504 } else { 505 SYSMEM_E("Failed to free \"%s\" at 0x%08lx\n", 506 mem->attr.name, (ulong)base); 507 } 508 509 return (ret >= 0) ? 0 : ret; 510 } 511 512 int sysmem_initr(void) 513 { 514 return sysmem_init(); 515 } 516 517 int sysmem_init(void) 518 { 519 struct sysmem *sysmem = &plat_sysmem; 520 phys_addr_t mem_start; 521 phys_size_t mem_size; 522 int ret; 523 524 lmb_init(&sysmem->lmb); 525 INIT_LIST_HEAD(&sysmem->allocated_head); 526 sysmem->allocated_cnt = 0; 527 if (gd->flags & GD_FLG_RELOC) { 528 sysmem->has_initr = true; 529 } else { 530 SYSMEM_I("init\n"); 531 sysmem->has_initf = true; 532 } 533 534 /* Add all available system memory */ 535 #ifdef CONFIG_NR_DRAM_BANKS 536 int i; 537 538 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) { 539 if (!gd->bd->bi_dram[i].size) 540 continue; 541 542 ret = sysmem_add(gd->bd->bi_dram[i].start, 543 gd->bd->bi_dram[i].size); 544 if (ret) { 545 SYSMEM_E("Failed to add sysmem from bi_dram[%d]\n", i); 546 goto fail; 547 } 548 } 549 #else 550 mem_start = env_get_bootm_low(); 551 mem_size = env_get_bootm_size(); 552 ret = sysmem_add(mem_start, mem_size); 553 if (ret) { 554 SYSMEM_E("Failed to add sysmem from bootm_low/size\n"); 555 goto fail; 556 } 557 #endif 558 /* Reserved for board */ 559 ret = board_sysmem_reserve(sysmem); 560 if (ret) { 561 SYSMEM_E("Failed to reserve sysmem for board\n"); 562 goto fail; 563 } 564 565 /* Reserved for U-boot framework: 'reserve_xxx()' */ 566 mem_start = gd->start_addr_sp; 567 mem_size = gd->ram_top - mem_start; 568 if (!sysmem_alloc_base(MEMBLK_ID_UBOOT, mem_start, mem_size)) { 569 SYSMEM_E("Failed to reserve sysmem for U-Boot framework\n"); 570 ret = -ENOMEM; 571 goto fail; 572 } 573 574 /* Reserved for U-Boot stack */ 575 mem_start = gd->start_addr_sp - CONFIG_SYS_STACK_SIZE; 576 mem_size = CONFIG_SYS_STACK_SIZE; 577 if (!sysmem_alloc_base(MEMBLK_ID_STACK, mem_start, mem_size)) { 578 SYSMEM_E("Failed to reserve sysmem for stack\n"); 579 ret = -ENOMEM; 580 goto fail; 581 } 582 583 return 0; 584 585 fail: 586 if (ret && !(gd->flags & GD_FLG_RELOC)) { 587 sysmem_dump(); 588 SYSMEM_W("Maybe malloc size %d MiB is too large?\n\n", 589 SIZE_MB(CONFIG_SYS_MALLOC_LEN)); 590 } 591 592 return ret; 593 } 594 595 __weak int board_sysmem_reserve(struct sysmem *sysmem) 596 { 597 /* please define platform specific board_sysmem_reserve() */ 598 return 0; 599 } 600 601 static int do_dump_sysmem(cmd_tbl_t *cmdtp, int flag, 602 int argc, char *const argv[]) 603 { 604 sysmem_dump(); 605 return 0; 606 } 607 608 U_BOOT_CMD( 609 dump_sysmem, 1, 1, do_dump_sysmem, 610 "Dump sysmem layout", 611 "" 612 ); 613