1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <image.h> 13 #include <malloc.h> 14 #include <nand.h> 15 #include <part.h> 16 #include <spi.h> 17 #include <dm/device-internal.h> 18 #include <linux/mtd/spi-nor.h> 19 #ifdef CONFIG_NAND 20 #include <linux/mtd/nand.h> 21 #endif 22 23 #define MTD_PART_NAND_HEAD "mtdparts=" 24 #define MTD_ROOT_PART_NUM "ubi.mtd=" 25 #define MTD_ROOT_PART_NAME "root=ubi0:rootfs" 26 #define MTD_PART_INFO_MAX_SIZE 512 27 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 28 29 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 30 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 31 32 static int *mtd_map_blk_table; 33 34 int mtd_blk_map_table_init(struct blk_desc *desc, 35 loff_t offset, 36 size_t length) 37 { 38 u32 blk_total, blk_begin, blk_cnt; 39 struct mtd_info *mtd = NULL; 40 int i, j; 41 42 if (!desc) 43 return -ENODEV; 44 45 switch (desc->devnum) { 46 case BLK_MTD_NAND: 47 case BLK_MTD_SPI_NAND: 48 mtd = desc->bdev->priv; 49 break; 50 default: 51 break; 52 } 53 54 if (!mtd) { 55 return -ENODEV; 56 } else { 57 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 58 if (!mtd_map_blk_table) { 59 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int)); 60 if (!mtd_map_blk_table) 61 return -ENOMEM; 62 for (i = 0; i < blk_total; i++) 63 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN; 64 } 65 66 blk_begin = (u32)offset >> mtd->erasesize_shift; 67 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ 68 mtd->erasesize - 1) >> mtd->erasesize_shift); 69 if (blk_begin >= blk_total) { 70 pr_err("map table blk begin[%d] overflow\n", blk_begin); 71 return -EINVAL; 72 } 73 if ((blk_begin + blk_cnt) > blk_total) 74 blk_cnt = blk_total - blk_begin; 75 76 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 77 return 0; 78 79 j = 0; 80 /* should not across blk_cnt */ 81 for (i = 0; i < blk_cnt; i++) { 82 if (j >= blk_cnt) 83 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 84 for (; j < blk_cnt; j++) { 85 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 86 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 87 j++; 88 if (j == blk_cnt) 89 j++; 90 break; 91 } 92 } 93 } 94 95 return 0; 96 } 97 } 98 99 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 100 { 101 bool mapped; 102 loff_t offset = *off; 103 size_t block_offset = offset & (mtd->erasesize - 1); 104 105 mapped = false; 106 if (!mtd_map_blk_table || 107 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 108 MTD_BLK_TABLE_BLOCK_UNKNOWN || 109 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 110 0xffffffff) 111 return mapped; 112 113 mapped = true; 114 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 115 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 116 117 return mapped; 118 } 119 120 void mtd_blk_map_partitions(struct blk_desc *desc) 121 { 122 disk_partition_t info; 123 int i, ret; 124 125 if (!desc) 126 return; 127 128 if (desc->if_type != IF_TYPE_MTD) 129 return; 130 131 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 132 ret = part_get_info(desc, i, &info); 133 if (ret != 0) 134 continue; 135 136 if (mtd_blk_map_table_init(desc, 137 info.start << 9, 138 info.size << 9)) { 139 pr_debug("mtd block map table fail\n"); 140 } 141 } 142 } 143 144 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit) 145 { 146 struct mtd_info *mtd = NULL; 147 int totalsize = 0; 148 149 if (desc->if_type != IF_TYPE_MTD) 150 return; 151 152 if (desc->devnum == BLK_MTD_NAND) { 153 #if defined(CONFIG_NAND) 154 mtd = dev_get_priv(desc->bdev->parent); 155 #endif 156 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 157 #if defined(CONFIG_MTD_SPI_NAND) 158 mtd = desc->bdev->priv; 159 #endif 160 } 161 162 #ifdef CONFIG_SPL_FIT 163 if (fit_get_totalsize(fit, &totalsize)) 164 debug("Can not find /totalsize node.\n"); 165 #endif 166 if (mtd && totalsize) { 167 if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize)) 168 debug("Map block table fail.\n"); 169 } 170 } 171 172 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 173 size_t *length, size_t *actual, 174 loff_t lim, u_char *buffer) 175 { 176 size_t left_to_read = *length; 177 u_char *p_buffer = buffer; 178 int rval; 179 180 while (left_to_read > 0) { 181 size_t block_offset = offset & (mtd->erasesize - 1); 182 size_t read_length; 183 loff_t mapped_offset; 184 185 if (offset >= mtd->size) 186 return 0; 187 188 mapped_offset = offset; 189 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 190 if (mtd_block_isbad(mtd, mapped_offset & 191 ~(mtd->erasesize - 1))) { 192 printf("Skipping bad block 0x%08llx\n", 193 offset & ~(mtd->erasesize - 1)); 194 offset += mtd->erasesize - block_offset; 195 continue; 196 } 197 } 198 199 if (left_to_read < (mtd->erasesize - block_offset)) 200 read_length = left_to_read; 201 else 202 read_length = mtd->erasesize - block_offset; 203 204 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, 205 p_buffer); 206 if (rval && rval != -EUCLEAN) { 207 printf("NAND read from offset %llx failed %d\n", 208 offset, rval); 209 *length -= left_to_read; 210 return rval; 211 } 212 213 left_to_read -= read_length; 214 offset += read_length; 215 p_buffer += read_length; 216 } 217 218 return 0; 219 } 220 221 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, 222 size_t *length, size_t *actual, 223 loff_t lim, u_char *buffer, int flags) 224 { 225 int rval = 0, blocksize; 226 size_t left_to_write = *length; 227 u_char *p_buffer = buffer; 228 struct erase_info ei; 229 230 blocksize = mtd->erasesize; 231 232 /* 233 * nand_write() handles unaligned, partial page writes. 234 * 235 * We allow length to be unaligned, for convenience in 236 * using the $filesize variable. 237 * 238 * However, starting at an unaligned offset makes the 239 * semantics of bad block skipping ambiguous (really, 240 * you should only start a block skipping access at a 241 * partition boundary). So don't try to handle that. 242 */ 243 if ((offset & (mtd->writesize - 1)) != 0) { 244 printf("Attempt to write non page-aligned data\n"); 245 *length = 0; 246 return -EINVAL; 247 } 248 249 while (left_to_write > 0) { 250 size_t block_offset = offset & (mtd->erasesize - 1); 251 size_t write_size, truncated_write_size; 252 loff_t mapped_offset; 253 254 if (offset >= mtd->size) 255 return 0; 256 257 mapped_offset = offset; 258 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 259 if (mtd_block_isbad(mtd, mapped_offset & 260 ~(mtd->erasesize - 1))) { 261 printf("Skipping bad block 0x%08llx\n", 262 offset & ~(mtd->erasesize - 1)); 263 offset += mtd->erasesize - block_offset; 264 continue; 265 } 266 } 267 268 if (!(mapped_offset & mtd->erasesize_mask)) { 269 memset(&ei, 0, sizeof(struct erase_info)); 270 ei.addr = mapped_offset; 271 ei.len = mtd->erasesize; 272 rval = mtd_erase(mtd, &ei); 273 if (rval) { 274 pr_info("error %d while erasing %llx\n", rval, 275 mapped_offset); 276 return rval; 277 } 278 } 279 280 if (left_to_write < (blocksize - block_offset)) 281 write_size = left_to_write; 282 else 283 write_size = blocksize - block_offset; 284 285 truncated_write_size = write_size; 286 rval = mtd_write(mtd, mapped_offset, truncated_write_size, 287 (size_t *)(&truncated_write_size), p_buffer); 288 289 offset += write_size; 290 p_buffer += write_size; 291 292 if (rval != 0) { 293 printf("NAND write to offset %llx failed %d\n", 294 offset, rval); 295 *length -= left_to_write; 296 return rval; 297 } 298 299 left_to_write -= write_size; 300 } 301 302 return 0; 303 } 304 305 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset, 306 size_t length) 307 { 308 struct erase_info ei; 309 loff_t pos, len; 310 int ret; 311 312 pos = offset; 313 len = length; 314 315 if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) { 316 pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n", 317 pos, len); 318 319 return -EINVAL; 320 } 321 322 while (len) { 323 loff_t mapped_offset; 324 325 mapped_offset = pos; 326 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 327 if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) { 328 pr_debug("attempt to erase a bad/reserved block @%llx\n", 329 pos); 330 pos += mtd->erasesize; 331 continue; 332 } 333 } 334 335 memset(&ei, 0, sizeof(struct erase_info)); 336 ei.addr = mapped_offset; 337 ei.len = mtd->erasesize; 338 ret = mtd_erase(mtd, &ei); 339 if (ret) { 340 pr_err("map_erase error %d while erasing %llx\n", ret, 341 pos); 342 return ret; 343 } 344 345 pos += mtd->erasesize; 346 len -= mtd->erasesize; 347 } 348 349 return 0; 350 } 351 352 char *mtd_part_parse(void) 353 { 354 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 355 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 356 char mtd_root_part_info[30] = {0}; 357 struct blk_desc *dev_desc; 358 disk_partition_t info; 359 char *mtd_part_info_p; 360 struct mtd_info *mtd; 361 char *mtd_part_info; 362 int ret; 363 int p; 364 365 dev_desc = rockchip_get_bootdev(); 366 if (!dev_desc) 367 return NULL; 368 369 mtd = (struct mtd_info *)dev_desc->bdev->priv; 370 if (!mtd) 371 return NULL; 372 373 p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info); 374 if (p > 0) { 375 snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME); 376 env_update("bootargs", mtd_root_part_info); 377 } 378 379 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 380 if (!mtd_part_info) { 381 printf("%s: Fail to malloc!", __func__); 382 return NULL; 383 } 384 385 mtd_part_info_p = mtd_part_info; 386 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 387 MTD_PART_NAND_HEAD, 388 dev_desc->product); 389 data_len -= strlen(mtd_part_info_p); 390 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 391 392 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 393 ret = part_get_info(dev_desc, p, &info); 394 if (ret) 395 break; 396 397 debug("name is %s, start addr is %x\n", info.name, 398 (int)(size_t)info.start); 399 400 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 401 (int)(size_t)info.size << 9, 402 (int)(size_t)info.start << 9, 403 info.name); 404 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 405 "0x%x@0x%x(%s)", 406 (int)(size_t)info.size << 9, 407 (int)(size_t)info.start << 9, 408 info.name); 409 strcat(mtd_part_info, ","); 410 if (part_get_info(dev_desc, p + 1, &info) && 411 (info.size + info.start + 33) == dev_desc->lba) { 412 if (dev_desc->devnum == BLK_MTD_SPI_NOR) { 413 /* Nor is 64KB erase block(kernel) and gpt table just 414 * resserve 33 sectors for the last partition. This 415 * will erase the backup gpt table by user program, 416 * so reserve one block. 417 */ 418 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 419 (int)(size_t)(info.size - 420 (info.size - 1) % 421 (0x10000 >> 9) - 1) << 9, 422 (int)(size_t)info.start << 9, 423 info.name); 424 break; 425 } else { 426 /* Nand flash is erased by block and gpt table just 427 * resserve 33 sectors for the last partition. This 428 * will erase the backup gpt table by user program, 429 * so reserve one block. 430 */ 431 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 432 (int)(size_t)(info.size - 433 (info.size - 1) % 434 (mtd->erasesize >> 9) - 1) << 9, 435 (int)(size_t)info.start << 9, 436 info.name); 437 break; 438 } 439 } 440 length = strlen(mtd_part_info_temp); 441 data_len -= length; 442 mtd_part_info_p = mtd_part_info_p + length + 1; 443 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 444 } 445 446 return mtd_part_info; 447 } 448 449 ulong mtd_dread(struct udevice *udev, lbaint_t start, 450 lbaint_t blkcnt, void *dst) 451 { 452 struct blk_desc *desc = dev_get_uclass_platdata(udev); 453 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 454 loff_t off = (loff_t)(start * 512); 455 size_t rwsize = blkcnt * 512; 456 #endif 457 struct mtd_info *mtd; 458 int ret = 0; 459 460 if (!desc) 461 return ret; 462 463 mtd = desc->bdev->priv; 464 if (!mtd) 465 return 0; 466 467 if (blkcnt == 0) 468 return 0; 469 470 pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt); 471 472 if (desc->devnum == BLK_MTD_NAND) { 473 ret = mtd_map_read(mtd, off, &rwsize, 474 NULL, mtd->size, 475 (u_char *)(dst)); 476 if (!ret) 477 return blkcnt; 478 else 479 return 0; 480 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 481 ret = mtd_map_read(mtd, off, &rwsize, 482 NULL, mtd->size, 483 (u_char *)(dst)); 484 if (!ret) 485 return blkcnt; 486 else 487 return 0; 488 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 489 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 490 struct spi_nor *nor = (struct spi_nor *)mtd->priv; 491 struct spi_slave *spi = nor->spi; 492 size_t retlen_nor; 493 494 if (desc->op_flag == BLK_PRE_RW) 495 spi->mode |= SPI_DMA_PREPARE; 496 mtd_read(mtd, off, rwsize, &retlen_nor, dst); 497 if (desc->op_flag == BLK_PRE_RW) 498 spi->mode |= SPI_DMA_PREPARE; 499 500 if (retlen_nor == rwsize) 501 return blkcnt; 502 else 503 #endif 504 return 0; 505 } else { 506 return 0; 507 } 508 } 509 510 #if CONFIG_IS_ENABLED(MTD_WRITE) 511 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 512 lbaint_t blkcnt, const void *src) 513 { 514 struct blk_desc *desc = dev_get_uclass_platdata(udev); 515 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 516 loff_t off = (loff_t)(start * 512); 517 size_t rwsize = blkcnt * 512; 518 #endif 519 struct mtd_info *mtd; 520 int ret = 0; 521 522 if (!desc) 523 return ret; 524 525 mtd = desc->bdev->priv; 526 if (!mtd) 527 return 0; 528 529 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); 530 531 if (blkcnt == 0) 532 return 0; 533 534 if (desc->devnum == BLK_MTD_NAND || 535 desc->devnum == BLK_MTD_SPI_NAND || 536 desc->devnum == BLK_MTD_SPI_NOR) { 537 if (desc->op_flag == BLK_MTD_CONT_WRITE) { 538 ret = mtd_map_write(mtd, off, &rwsize, 539 NULL, mtd->size, 540 (u_char *)(src), 0); 541 if (!ret) 542 return blkcnt; 543 else 544 return 0; 545 } else { 546 lbaint_t off_aligned, alinged; 547 size_t rwsize_aligned; 548 u8 *p_buf; 549 550 alinged = off & mtd->erasesize_mask; 551 off_aligned = off - alinged; 552 rwsize_aligned = rwsize + alinged; 553 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & 554 ~(mtd->erasesize - 1); 555 556 p_buf = malloc(rwsize_aligned); 557 if (!p_buf) { 558 printf("%s: Fail to malloc!", __func__); 559 return 0; 560 } 561 562 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, 563 NULL, mtd->size, 564 (u_char *)(p_buf)); 565 if (ret) { 566 free(p_buf); 567 return 0; 568 } 569 570 memcpy(p_buf + alinged, src, rwsize); 571 572 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, 573 NULL, mtd->size, 574 (u_char *)(p_buf), 0); 575 free(p_buf); 576 if (!ret) 577 return blkcnt; 578 else 579 return 0; 580 } 581 } else { 582 return 0; 583 } 584 585 return 0; 586 } 587 588 ulong mtd_derase(struct udevice *udev, lbaint_t start, 589 lbaint_t blkcnt) 590 { 591 struct blk_desc *desc = dev_get_uclass_platdata(udev); 592 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 593 loff_t off = (loff_t)(start * 512); 594 size_t len = blkcnt * 512; 595 #endif 596 struct mtd_info *mtd; 597 int ret = 0; 598 599 if (!desc) 600 return ret; 601 602 mtd = desc->bdev->priv; 603 if (!mtd) 604 return 0; 605 606 pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt); 607 608 if (blkcnt == 0) 609 return 0; 610 611 if (desc->devnum == BLK_MTD_NAND || 612 desc->devnum == BLK_MTD_SPI_NAND || 613 desc->devnum == BLK_MTD_SPI_NOR) { 614 ret = mtd_map_erase(mtd, off, len); 615 if (ret) 616 return ret; 617 } else { 618 return 0; 619 } 620 621 return 0; 622 } 623 #endif 624 625 static int mtd_blk_probe(struct udevice *udev) 626 { 627 struct mtd_info *mtd; 628 struct blk_desc *desc = dev_get_uclass_platdata(udev); 629 int ret, i = 0; 630 631 mtd = dev_get_uclass_priv(udev->parent); 632 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) { 633 #ifndef CONFIG_SPL_BUILD 634 mtd = dev_get_priv(udev->parent); 635 #endif 636 } 637 638 desc->bdev->priv = mtd; 639 sprintf(desc->vendor, "0x%.4x", 0x2207); 640 if (strncmp(mtd->name, "nand", 4) == 0) 641 memcpy(desc->product, "rk-nand", strlen("rk-nand")); 642 else 643 memcpy(desc->product, mtd->name, strlen(mtd->name)); 644 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 645 if (mtd->type == MTD_NANDFLASH) { 646 #ifdef CONFIG_NAND 647 if (desc->devnum == BLK_MTD_NAND) 648 i = NAND_BBT_SCAN_MAXBLOCKS; 649 else if (desc->devnum == BLK_MTD_SPI_NAND) 650 i = NANDDEV_BBT_SCAN_MAXBLOCKS; 651 #endif 652 653 /* 654 * Find the first useful block in the end, 655 * and it is the end lba of the nand storage. 656 */ 657 for (; i < (mtd->size / mtd->erasesize); i++) { 658 ret = mtd_block_isbad(mtd, 659 mtd->size - mtd->erasesize * (i + 1)); 660 if (!ret) { 661 desc->lba = (mtd->size >> 9) - 662 (mtd->erasesize >> 9) * i; 663 break; 664 } 665 } 666 } else { 667 desc->lba = mtd->size >> 9; 668 } 669 670 debug("MTD: desc->lba is %lx\n", desc->lba); 671 672 return 0; 673 } 674 675 static const struct blk_ops mtd_blk_ops = { 676 .read = mtd_dread, 677 #if CONFIG_IS_ENABLED(MTD_WRITE) 678 .write = mtd_dwrite, 679 .erase = mtd_derase, 680 #endif 681 }; 682 683 U_BOOT_DRIVER(mtd_blk) = { 684 .name = "mtd_blk", 685 .id = UCLASS_BLK, 686 .ops = &mtd_blk_ops, 687 .probe = mtd_blk_probe, 688 }; 689