1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <image.h> 13 #include <malloc.h> 14 #include <nand.h> 15 #include <part.h> 16 #include <spi.h> 17 #include <dm/device-internal.h> 18 #include <linux/mtd/spi-nor.h> 19 #ifdef CONFIG_NAND 20 #include <linux/mtd/nand.h> 21 #endif 22 23 #define MTD_PART_NAND_HEAD "mtdparts=" 24 #define MTD_ROOT_PART_NUM "ubi.mtd=" 25 #define MTD_ROOT_PART_NAME "root=ubi0:rootfs" 26 #define MTD_PART_INFO_MAX_SIZE 512 27 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 28 29 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 30 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 31 32 static int *mtd_map_blk_table; 33 34 int mtd_blk_map_table_init(struct blk_desc *desc, 35 loff_t offset, 36 size_t length) 37 { 38 u32 blk_total, blk_begin, blk_cnt; 39 struct mtd_info *mtd = NULL; 40 int i, j; 41 42 if (!desc) 43 return -ENODEV; 44 45 switch (desc->devnum) { 46 case BLK_MTD_NAND: 47 case BLK_MTD_SPI_NAND: 48 mtd = desc->bdev->priv; 49 break; 50 default: 51 break; 52 } 53 54 if (!mtd) { 55 return -ENODEV; 56 } else { 57 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 58 if (!mtd_map_blk_table) { 59 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int)); 60 if (!mtd_map_blk_table) 61 return -ENOMEM; 62 for (i = 0; i < blk_total; i++) 63 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN; 64 } 65 66 blk_begin = (u32)offset >> mtd->erasesize_shift; 67 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ 68 mtd->erasesize - 1) >> mtd->erasesize_shift); 69 if (blk_begin >= blk_total) { 70 pr_err("map table blk begin[%d] overflow\n", blk_begin); 71 return -EINVAL; 72 } 73 if ((blk_begin + blk_cnt) > blk_total) 74 blk_cnt = blk_total - blk_begin; 75 76 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 77 return 0; 78 79 j = 0; 80 /* should not across blk_cnt */ 81 for (i = 0; i < blk_cnt; i++) { 82 if (j >= blk_cnt) 83 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 84 for (; j < blk_cnt; j++) { 85 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 86 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 87 j++; 88 if (j == blk_cnt) 89 j++; 90 break; 91 } 92 } 93 } 94 95 return 0; 96 } 97 } 98 99 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 100 { 101 bool mapped; 102 loff_t offset = *off; 103 size_t block_offset = offset & (mtd->erasesize - 1); 104 105 mapped = false; 106 if (!mtd_map_blk_table || 107 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 108 MTD_BLK_TABLE_BLOCK_UNKNOWN || 109 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 110 0xffffffff) 111 return mapped; 112 113 mapped = true; 114 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 115 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 116 117 return mapped; 118 } 119 120 void mtd_blk_map_partitions(struct blk_desc *desc) 121 { 122 disk_partition_t info; 123 int i, ret; 124 125 if (!desc) 126 return; 127 128 if (desc->if_type != IF_TYPE_MTD) 129 return; 130 131 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 132 ret = part_get_info(desc, i, &info); 133 if (ret != 0) 134 continue; 135 136 if (mtd_blk_map_table_init(desc, 137 info.start << 9, 138 info.size << 9)) { 139 pr_debug("mtd block map table fail\n"); 140 } 141 } 142 } 143 144 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit) 145 { 146 struct mtd_info *mtd = NULL; 147 int totalsize = 0; 148 149 if (desc->if_type != IF_TYPE_MTD) 150 return; 151 152 if (desc->devnum == BLK_MTD_NAND) { 153 #if defined(CONFIG_NAND) 154 mtd = dev_get_priv(desc->bdev->parent); 155 #endif 156 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 157 #if defined(CONFIG_MTD_SPI_NAND) 158 mtd = desc->bdev->priv; 159 #endif 160 } 161 162 #ifdef CONFIG_SPL_FIT 163 if (fit_get_totalsize(fit, &totalsize)) 164 debug("Can not find /totalsize node.\n"); 165 #endif 166 if (mtd && totalsize) { 167 if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize)) 168 debug("Map block table fail.\n"); 169 } 170 } 171 172 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 173 size_t *length, size_t *actual, 174 loff_t lim, u_char *buffer) 175 { 176 size_t left_to_read = *length; 177 u_char *p_buffer = buffer; 178 int rval; 179 180 while (left_to_read > 0) { 181 size_t block_offset = offset & (mtd->erasesize - 1); 182 size_t read_length; 183 loff_t mapped_offset; 184 185 if (offset >= mtd->size) 186 return 0; 187 188 mapped_offset = offset; 189 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 190 if (mtd_block_isbad(mtd, mapped_offset & 191 ~(mtd->erasesize - 1))) { 192 printf("Skipping bad block 0x%08llx\n", 193 offset & ~(mtd->erasesize - 1)); 194 offset += mtd->erasesize - block_offset; 195 continue; 196 } 197 } 198 199 if (left_to_read < (mtd->erasesize - block_offset)) 200 read_length = left_to_read; 201 else 202 read_length = mtd->erasesize - block_offset; 203 204 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, 205 p_buffer); 206 if (rval && rval != -EUCLEAN) { 207 printf("NAND read from offset %llx failed %d\n", 208 offset, rval); 209 *length -= left_to_read; 210 return rval; 211 } 212 213 left_to_read -= read_length; 214 offset += read_length; 215 p_buffer += read_length; 216 } 217 218 return 0; 219 } 220 221 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, 222 size_t *length, size_t *actual, 223 loff_t lim, u_char *buffer, int flags) 224 { 225 int rval = 0, blocksize; 226 size_t left_to_write = *length; 227 u_char *p_buffer = buffer; 228 struct erase_info ei; 229 230 blocksize = mtd->erasesize; 231 232 /* 233 * nand_write() handles unaligned, partial page writes. 234 * 235 * We allow length to be unaligned, for convenience in 236 * using the $filesize variable. 237 * 238 * However, starting at an unaligned offset makes the 239 * semantics of bad block skipping ambiguous (really, 240 * you should only start a block skipping access at a 241 * partition boundary). So don't try to handle that. 242 */ 243 if ((offset & (mtd->writesize - 1)) != 0) { 244 printf("Attempt to write non page-aligned data\n"); 245 *length = 0; 246 return -EINVAL; 247 } 248 249 while (left_to_write > 0) { 250 size_t block_offset = offset & (mtd->erasesize - 1); 251 size_t write_size, truncated_write_size; 252 loff_t mapped_offset; 253 254 if (offset >= mtd->size) 255 return 0; 256 257 mapped_offset = offset; 258 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 259 if (mtd_block_isbad(mtd, mapped_offset & 260 ~(mtd->erasesize - 1))) { 261 printf("Skipping bad block 0x%08llx\n", 262 offset & ~(mtd->erasesize - 1)); 263 offset += mtd->erasesize - block_offset; 264 continue; 265 } 266 } 267 268 if (!(mapped_offset & mtd->erasesize_mask)) { 269 memset(&ei, 0, sizeof(struct erase_info)); 270 ei.addr = mapped_offset; 271 ei.len = mtd->erasesize; 272 rval = mtd_erase(mtd, &ei); 273 if (rval) { 274 pr_info("error %d while erasing %llx\n", rval, 275 mapped_offset); 276 return rval; 277 } 278 } 279 280 if (left_to_write < (blocksize - block_offset)) 281 write_size = left_to_write; 282 else 283 write_size = blocksize - block_offset; 284 285 truncated_write_size = write_size; 286 rval = mtd_write(mtd, mapped_offset, truncated_write_size, 287 (size_t *)(&truncated_write_size), p_buffer); 288 289 offset += write_size; 290 p_buffer += write_size; 291 292 if (rval != 0) { 293 printf("NAND write to offset %llx failed %d\n", 294 offset, rval); 295 *length -= left_to_write; 296 return rval; 297 } 298 299 left_to_write -= write_size; 300 } 301 302 return 0; 303 } 304 305 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset, 306 size_t length) 307 { 308 struct erase_info ei; 309 loff_t pos, len; 310 int ret; 311 312 pos = offset; 313 len = length; 314 315 if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) { 316 pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n", 317 pos, len); 318 319 return -EINVAL; 320 } 321 322 while (len) { 323 if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) { 324 pr_debug("attempt to erase a bad/reserved block @%llx\n", 325 pos); 326 pos += mtd->erasesize; 327 continue; 328 } 329 330 memset(&ei, 0, sizeof(struct erase_info)); 331 ei.addr = pos; 332 ei.len = mtd->erasesize; 333 ret = mtd_erase(mtd, &ei); 334 if (ret) { 335 pr_err("map_erase error %d while erasing %llx\n", ret, 336 pos); 337 return ret; 338 } 339 340 pos += mtd->erasesize; 341 len -= mtd->erasesize; 342 } 343 344 return 0; 345 } 346 347 char *mtd_part_parse(void) 348 { 349 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 350 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 351 char mtd_root_part_info[30] = {0}; 352 struct blk_desc *dev_desc; 353 disk_partition_t info; 354 char *mtd_part_info_p; 355 struct mtd_info *mtd; 356 char *mtd_part_info; 357 int ret; 358 int p; 359 360 dev_desc = rockchip_get_bootdev(); 361 if (!dev_desc) 362 return NULL; 363 364 mtd = (struct mtd_info *)dev_desc->bdev->priv; 365 if (!mtd) 366 return NULL; 367 368 p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info); 369 if (p > 0) { 370 snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME); 371 env_update("bootargs", mtd_root_part_info); 372 } 373 374 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 375 if (!mtd_part_info) { 376 printf("%s: Fail to malloc!", __func__); 377 return NULL; 378 } 379 380 mtd_part_info_p = mtd_part_info; 381 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 382 MTD_PART_NAND_HEAD, 383 dev_desc->product); 384 data_len -= strlen(mtd_part_info_p); 385 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 386 387 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 388 ret = part_get_info(dev_desc, p, &info); 389 if (ret) 390 break; 391 392 debug("name is %s, start addr is %x\n", info.name, 393 (int)(size_t)info.start); 394 395 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 396 (int)(size_t)info.size << 9, 397 (int)(size_t)info.start << 9, 398 info.name); 399 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 400 "0x%x@0x%x(%s)", 401 (int)(size_t)info.size << 9, 402 (int)(size_t)info.start << 9, 403 info.name); 404 strcat(mtd_part_info, ","); 405 if (part_get_info(dev_desc, p + 1, &info) && 406 (info.size + info.start + 33) == dev_desc->lba) { 407 if (dev_desc->devnum == BLK_MTD_SPI_NOR) { 408 /* Nor is 64KB erase block(kernel) and gpt table just 409 * resserve 33 sectors for the last partition. This 410 * will erase the backup gpt table by user program, 411 * so reserve one block. 412 */ 413 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 414 (int)(size_t)(info.size - 415 (info.size - 1) % 416 (0x10000 >> 9) - 1) << 9, 417 (int)(size_t)info.start << 9, 418 info.name); 419 break; 420 } else { 421 /* Nand flash is erased by block and gpt table just 422 * resserve 33 sectors for the last partition. This 423 * will erase the backup gpt table by user program, 424 * so reserve one block. 425 */ 426 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 427 (int)(size_t)(info.size - 428 (info.size - 1) % 429 (mtd->erasesize >> 9) - 1) << 9, 430 (int)(size_t)info.start << 9, 431 info.name); 432 break; 433 } 434 } 435 length = strlen(mtd_part_info_temp); 436 data_len -= length; 437 mtd_part_info_p = mtd_part_info_p + length + 1; 438 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 439 } 440 441 return mtd_part_info; 442 } 443 444 ulong mtd_dread(struct udevice *udev, lbaint_t start, 445 lbaint_t blkcnt, void *dst) 446 { 447 struct blk_desc *desc = dev_get_uclass_platdata(udev); 448 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 449 loff_t off = (loff_t)(start * 512); 450 size_t rwsize = blkcnt * 512; 451 #endif 452 struct mtd_info *mtd; 453 int ret = 0; 454 455 if (!desc) 456 return ret; 457 458 mtd = desc->bdev->priv; 459 if (!mtd) 460 return 0; 461 462 if (blkcnt == 0) 463 return 0; 464 465 pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt); 466 467 if (desc->devnum == BLK_MTD_NAND) { 468 ret = mtd_map_read(mtd, off, &rwsize, 469 NULL, mtd->size, 470 (u_char *)(dst)); 471 if (!ret) 472 return blkcnt; 473 else 474 return 0; 475 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 476 ret = mtd_map_read(mtd, off, &rwsize, 477 NULL, mtd->size, 478 (u_char *)(dst)); 479 if (!ret) 480 return blkcnt; 481 else 482 return 0; 483 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 484 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 485 struct spi_nor *nor = (struct spi_nor *)mtd->priv; 486 struct spi_slave *spi = nor->spi; 487 size_t retlen_nor; 488 489 if (desc->op_flag == BLK_PRE_RW) 490 spi->mode |= SPI_DMA_PREPARE; 491 mtd_read(mtd, off, rwsize, &retlen_nor, dst); 492 if (desc->op_flag == BLK_PRE_RW) 493 spi->mode |= SPI_DMA_PREPARE; 494 495 if (retlen_nor == rwsize) 496 return blkcnt; 497 else 498 #endif 499 return 0; 500 } else { 501 return 0; 502 } 503 } 504 505 #if CONFIG_IS_ENABLED(MTD_WRITE) 506 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 507 lbaint_t blkcnt, const void *src) 508 { 509 struct blk_desc *desc = dev_get_uclass_platdata(udev); 510 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 511 loff_t off = (loff_t)(start * 512); 512 size_t rwsize = blkcnt * 512; 513 #endif 514 struct mtd_info *mtd; 515 int ret = 0; 516 517 if (!desc) 518 return ret; 519 520 mtd = desc->bdev->priv; 521 if (!mtd) 522 return 0; 523 524 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); 525 526 if (blkcnt == 0) 527 return 0; 528 529 if (desc->devnum == BLK_MTD_NAND || 530 desc->devnum == BLK_MTD_SPI_NAND || 531 desc->devnum == BLK_MTD_SPI_NOR) { 532 if (desc->op_flag == BLK_MTD_CONT_WRITE) { 533 ret = mtd_map_write(mtd, off, &rwsize, 534 NULL, mtd->size, 535 (u_char *)(src), 0); 536 if (!ret) 537 return blkcnt; 538 else 539 return 0; 540 } else { 541 lbaint_t off_aligned, alinged; 542 size_t rwsize_aligned; 543 u8 *p_buf; 544 545 alinged = off & mtd->erasesize_mask; 546 off_aligned = off - alinged; 547 rwsize_aligned = rwsize + alinged; 548 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & 549 ~(mtd->erasesize - 1); 550 551 p_buf = malloc(rwsize_aligned); 552 if (!p_buf) { 553 printf("%s: Fail to malloc!", __func__); 554 return 0; 555 } 556 557 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, 558 NULL, mtd->size, 559 (u_char *)(p_buf)); 560 if (ret) { 561 free(p_buf); 562 return 0; 563 } 564 565 memcpy(p_buf + alinged, src, rwsize); 566 567 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, 568 NULL, mtd->size, 569 (u_char *)(p_buf), 0); 570 free(p_buf); 571 if (!ret) 572 return blkcnt; 573 else 574 return 0; 575 } 576 } else { 577 return 0; 578 } 579 580 return 0; 581 } 582 583 ulong mtd_derase(struct udevice *udev, lbaint_t start, 584 lbaint_t blkcnt) 585 { 586 struct blk_desc *desc = dev_get_uclass_platdata(udev); 587 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 588 loff_t off = (loff_t)(start * 512); 589 size_t len = blkcnt * 512; 590 #endif 591 struct mtd_info *mtd; 592 int ret = 0; 593 594 if (!desc) 595 return ret; 596 597 mtd = desc->bdev->priv; 598 if (!mtd) 599 return 0; 600 601 pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt); 602 603 if (blkcnt == 0) 604 return 0; 605 606 if (desc->devnum == BLK_MTD_NAND || 607 desc->devnum == BLK_MTD_SPI_NAND || 608 desc->devnum == BLK_MTD_SPI_NOR) { 609 ret = mtd_map_erase(mtd, off, len); 610 if (ret) 611 return ret; 612 } else { 613 return 0; 614 } 615 616 return 0; 617 } 618 #endif 619 620 static int mtd_blk_probe(struct udevice *udev) 621 { 622 struct mtd_info *mtd; 623 struct blk_desc *desc = dev_get_uclass_platdata(udev); 624 int ret, i = 0; 625 626 mtd = dev_get_uclass_priv(udev->parent); 627 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) { 628 #ifndef CONFIG_SPL_BUILD 629 mtd = dev_get_priv(udev->parent); 630 #endif 631 } 632 633 desc->bdev->priv = mtd; 634 sprintf(desc->vendor, "0x%.4x", 0x2207); 635 memcpy(desc->product, mtd->name, strlen(mtd->name)); 636 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 637 if (mtd->type == MTD_NANDFLASH) { 638 #ifdef CONFIG_NAND 639 if (desc->devnum == BLK_MTD_NAND) 640 i = NAND_BBT_SCAN_MAXBLOCKS; 641 else if (desc->devnum == BLK_MTD_SPI_NAND) 642 i = NANDDEV_BBT_SCAN_MAXBLOCKS; 643 #endif 644 645 /* 646 * Find the first useful block in the end, 647 * and it is the end lba of the nand storage. 648 */ 649 for (; i < (mtd->size / mtd->erasesize); i++) { 650 ret = mtd_block_isbad(mtd, 651 mtd->size - mtd->erasesize * (i + 1)); 652 if (!ret) { 653 desc->lba = (mtd->size >> 9) - 654 (mtd->erasesize >> 9) * i; 655 break; 656 } 657 } 658 } else { 659 desc->lba = mtd->size >> 9; 660 } 661 662 debug("MTD: desc->lba is %lx\n", desc->lba); 663 664 return 0; 665 } 666 667 static const struct blk_ops mtd_blk_ops = { 668 .read = mtd_dread, 669 #if CONFIG_IS_ENABLED(MTD_WRITE) 670 .write = mtd_dwrite, 671 .erase = mtd_derase, 672 #endif 673 }; 674 675 U_BOOT_DRIVER(mtd_blk) = { 676 .name = "mtd_blk", 677 .id = UCLASS_BLK, 678 .ops = &mtd_blk_ops, 679 .probe = mtd_blk_probe, 680 }; 681