1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <image.h> 13 #include <malloc.h> 14 #include <nand.h> 15 #include <part.h> 16 #include <spi.h> 17 #include <dm/device-internal.h> 18 #include <linux/mtd/spi-nor.h> 19 #ifdef CONFIG_NAND 20 #include <linux/mtd/nand.h> 21 #endif 22 23 #define MTD_PART_NAND_HEAD "mtdparts=" 24 #define MTD_ROOT_PART_NUM "ubi.mtd=" 25 #define MTD_ROOT_PART_NAME_UBIFS "root=ubi0:rootfs" 26 #define MTD_ROOT_PART_NAME_SQUASHFS "root=/dev/ubiblock0_0" 27 #define MTD_PART_INFO_MAX_SIZE 512 28 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 29 30 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 31 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 32 33 static int *mtd_map_blk_table; 34 35 int mtd_blk_map_table_init(struct blk_desc *desc, 36 loff_t offset, 37 size_t length) 38 { 39 u32 blk_total, blk_begin, blk_cnt; 40 struct mtd_info *mtd = NULL; 41 int i, j; 42 43 if (!desc) 44 return -ENODEV; 45 46 switch (desc->devnum) { 47 case BLK_MTD_NAND: 48 case BLK_MTD_SPI_NAND: 49 mtd = desc->bdev->priv; 50 break; 51 default: 52 break; 53 } 54 55 if (!mtd) { 56 return -ENODEV; 57 } else { 58 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 59 if (!mtd_map_blk_table) { 60 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int)); 61 if (!mtd_map_blk_table) 62 return -ENOMEM; 63 for (i = 0; i < blk_total; i++) 64 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN; 65 } 66 67 blk_begin = (u32)offset >> mtd->erasesize_shift; 68 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ 69 mtd->erasesize - 1) >> mtd->erasesize_shift); 70 if (blk_begin >= blk_total) { 71 pr_err("map table blk begin[%d] overflow\n", blk_begin); 72 return -EINVAL; 73 } 74 if ((blk_begin + blk_cnt) > blk_total) 75 blk_cnt = blk_total - blk_begin; 76 77 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 78 return 0; 79 80 j = 0; 81 /* should not across blk_cnt */ 82 for (i = 0; i < blk_cnt; i++) { 83 if (j >= blk_cnt) 84 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 85 for (; j < blk_cnt; j++) { 86 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 87 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 88 j++; 89 if (j == blk_cnt) 90 j++; 91 break; 92 } 93 } 94 } 95 96 return 0; 97 } 98 } 99 100 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 101 { 102 bool mapped; 103 loff_t offset = *off; 104 size_t block_offset = offset & (mtd->erasesize - 1); 105 106 mapped = false; 107 if (!mtd_map_blk_table || 108 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 109 MTD_BLK_TABLE_BLOCK_UNKNOWN || 110 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 111 0xffffffff) 112 return mapped; 113 114 mapped = true; 115 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 116 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 117 118 return mapped; 119 } 120 121 void mtd_blk_map_partitions(struct blk_desc *desc) 122 { 123 disk_partition_t info; 124 int i, ret; 125 126 if (!desc) 127 return; 128 129 if (desc->if_type != IF_TYPE_MTD) 130 return; 131 132 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 133 ret = part_get_info(desc, i, &info); 134 if (ret != 0) 135 continue; 136 137 if (mtd_blk_map_table_init(desc, 138 info.start << 9, 139 info.size << 9)) { 140 pr_debug("mtd block map table fail\n"); 141 } 142 } 143 } 144 145 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit) 146 { 147 struct mtd_info *mtd = NULL; 148 int totalsize = 0; 149 150 if (desc->if_type != IF_TYPE_MTD) 151 return; 152 153 if (desc->devnum == BLK_MTD_NAND) { 154 #if defined(CONFIG_NAND) 155 mtd = dev_get_priv(desc->bdev->parent); 156 #endif 157 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 158 #if defined(CONFIG_MTD_SPI_NAND) 159 mtd = desc->bdev->priv; 160 #endif 161 } 162 163 #ifdef CONFIG_SPL_FIT 164 if (fit_get_totalsize(fit, &totalsize)) 165 debug("Can not find /totalsize node.\n"); 166 #endif 167 if (mtd && totalsize) { 168 if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize)) 169 debug("Map block table fail.\n"); 170 } 171 } 172 173 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 174 size_t *length, size_t *actual, 175 loff_t lim, u_char *buffer) 176 { 177 size_t left_to_read = *length; 178 u_char *p_buffer = buffer; 179 int rval; 180 181 while (left_to_read > 0) { 182 size_t block_offset = offset & (mtd->erasesize - 1); 183 size_t read_length; 184 loff_t mapped_offset; 185 186 if (offset >= mtd->size) 187 return 0; 188 189 mapped_offset = offset; 190 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 191 if (mtd_block_isbad(mtd, mapped_offset & 192 ~(mtd->erasesize - 1))) { 193 printf("Skipping bad block 0x%08llx\n", 194 offset & ~(mtd->erasesize - 1)); 195 offset += mtd->erasesize - block_offset; 196 continue; 197 } 198 } 199 200 if (left_to_read < (mtd->erasesize - block_offset)) 201 read_length = left_to_read; 202 else 203 read_length = mtd->erasesize - block_offset; 204 205 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, 206 p_buffer); 207 if (rval && rval != -EUCLEAN) { 208 printf("NAND read from offset %llx failed %d\n", 209 offset, rval); 210 *length -= left_to_read; 211 return rval; 212 } 213 214 left_to_read -= read_length; 215 offset += read_length; 216 p_buffer += read_length; 217 } 218 219 return 0; 220 } 221 222 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, 223 size_t *length, size_t *actual, 224 loff_t lim, u_char *buffer, int flags) 225 { 226 int rval = 0, blocksize; 227 size_t left_to_write = *length; 228 u_char *p_buffer = buffer; 229 struct erase_info ei; 230 231 blocksize = mtd->erasesize; 232 233 /* 234 * nand_write() handles unaligned, partial page writes. 235 * 236 * We allow length to be unaligned, for convenience in 237 * using the $filesize variable. 238 * 239 * However, starting at an unaligned offset makes the 240 * semantics of bad block skipping ambiguous (really, 241 * you should only start a block skipping access at a 242 * partition boundary). So don't try to handle that. 243 */ 244 if ((offset & (mtd->writesize - 1)) != 0) { 245 printf("Attempt to write non page-aligned data\n"); 246 *length = 0; 247 return -EINVAL; 248 } 249 250 while (left_to_write > 0) { 251 size_t block_offset = offset & (mtd->erasesize - 1); 252 size_t write_size, truncated_write_size; 253 loff_t mapped_offset; 254 255 if (offset >= mtd->size) 256 return 0; 257 258 mapped_offset = offset; 259 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 260 if (mtd_block_isbad(mtd, mapped_offset & 261 ~(mtd->erasesize - 1))) { 262 printf("Skipping bad block 0x%08llx\n", 263 offset & ~(mtd->erasesize - 1)); 264 offset += mtd->erasesize - block_offset; 265 continue; 266 } 267 } 268 269 if (!(mapped_offset & mtd->erasesize_mask)) { 270 memset(&ei, 0, sizeof(struct erase_info)); 271 ei.addr = mapped_offset; 272 ei.len = mtd->erasesize; 273 rval = mtd_erase(mtd, &ei); 274 if (rval) { 275 pr_info("error %d while erasing %llx\n", rval, 276 mapped_offset); 277 return rval; 278 } 279 } 280 281 if (left_to_write < (blocksize - block_offset)) 282 write_size = left_to_write; 283 else 284 write_size = blocksize - block_offset; 285 286 truncated_write_size = write_size; 287 rval = mtd_write(mtd, mapped_offset, truncated_write_size, 288 (size_t *)(&truncated_write_size), p_buffer); 289 290 offset += write_size; 291 p_buffer += write_size; 292 293 if (rval != 0) { 294 printf("NAND write to offset %llx failed %d\n", 295 offset, rval); 296 *length -= left_to_write; 297 return rval; 298 } 299 300 left_to_write -= write_size; 301 } 302 303 return 0; 304 } 305 306 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset, 307 size_t length) 308 { 309 struct erase_info ei; 310 loff_t pos, len; 311 int ret; 312 313 pos = offset; 314 len = length; 315 316 if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) { 317 pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n", 318 pos, len); 319 320 return -EINVAL; 321 } 322 323 while (len) { 324 loff_t mapped_offset; 325 326 mapped_offset = pos; 327 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 328 if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) { 329 pr_debug("attempt to erase a bad/reserved block @%llx\n", 330 pos); 331 pos += mtd->erasesize; 332 continue; 333 } 334 } 335 336 memset(&ei, 0, sizeof(struct erase_info)); 337 ei.addr = mapped_offset; 338 ei.len = mtd->erasesize; 339 ret = mtd_erase(mtd, &ei); 340 if (ret) { 341 pr_err("map_erase error %d while erasing %llx\n", ret, 342 pos); 343 return ret; 344 } 345 346 pos += mtd->erasesize; 347 len -= mtd->erasesize; 348 } 349 350 return 0; 351 } 352 353 char *mtd_part_parse(void) 354 { 355 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 356 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 357 char mtd_root_part_info[40] = {0}; 358 struct blk_desc *dev_desc; 359 disk_partition_t info; 360 char *mtd_part_info_p; 361 struct mtd_info *mtd; 362 char *mtd_part_info; 363 int ret; 364 int p; 365 366 dev_desc = rockchip_get_bootdev(); 367 if (!dev_desc) 368 return NULL; 369 370 mtd = (struct mtd_info *)dev_desc->bdev->priv; 371 if (!mtd) 372 return NULL; 373 374 p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info); 375 if (p > 0) { 376 if (strstr(env_get("bootargs"), "rootfstype=squashfs")) 377 snprintf(mtd_root_part_info, ARRAY_SIZE(mtd_root_part_info), "%s%d %s", 378 MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME_SQUASHFS); 379 else 380 snprintf(mtd_root_part_info, ARRAY_SIZE(mtd_root_part_info), "%s%d %s", 381 MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME_UBIFS); 382 env_update("bootargs", mtd_root_part_info); 383 } 384 385 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 386 if (!mtd_part_info) { 387 printf("%s: Fail to malloc!", __func__); 388 return NULL; 389 } 390 391 mtd_part_info_p = mtd_part_info; 392 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 393 MTD_PART_NAND_HEAD, 394 dev_desc->product); 395 data_len -= strlen(mtd_part_info_p); 396 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 397 398 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 399 ret = part_get_info(dev_desc, p, &info); 400 if (ret) 401 break; 402 403 debug("name is %s, start addr is %x\n", info.name, 404 (int)(size_t)info.start); 405 406 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 407 (int)(size_t)info.size << 9, 408 (int)(size_t)info.start << 9, 409 info.name); 410 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 411 "0x%x@0x%x(%s)", 412 (int)(size_t)info.size << 9, 413 (int)(size_t)info.start << 9, 414 info.name); 415 strcat(mtd_part_info, ","); 416 if (part_get_info(dev_desc, p + 1, &info)) { 417 /* Partition with grow tag in parameter will be resized */ 418 if ((info.size + info.start + 64) >= dev_desc->lba) { 419 if (dev_desc->devnum == BLK_MTD_SPI_NOR) { 420 /* Nor is 64KB erase block(kernel) and gpt table just 421 * resserve 33 sectors for the last partition. This 422 * will erase the backup gpt table by user program, 423 * so reserve one block. 424 */ 425 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 426 (int)(size_t)(info.size - 427 (info.size - 1) % 428 (0x10000 >> 9) - 1) << 9, 429 (int)(size_t)info.start << 9, 430 info.name); 431 break; 432 } else { 433 /* Nand flash is erased by block and gpt table just 434 * resserve 33 sectors for the last partition. This 435 * will erase the backup gpt table by user program, 436 * so reserve one block. 437 */ 438 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 439 (int)(size_t)(info.size - 440 (info.size - 1) % 441 (mtd->erasesize >> 9) - 1) << 9, 442 (int)(size_t)info.start << 9, 443 info.name); 444 break; 445 } 446 } else { 447 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 448 "0x%x@0x%x(%s)", 449 (int)(size_t)info.size << 9, 450 (int)(size_t)info.start << 9, 451 info.name); 452 break; 453 } 454 } 455 length = strlen(mtd_part_info_temp); 456 data_len -= length; 457 mtd_part_info_p = mtd_part_info_p + length + 1; 458 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 459 } 460 461 return mtd_part_info; 462 } 463 464 ulong mtd_dread(struct udevice *udev, lbaint_t start, 465 lbaint_t blkcnt, void *dst) 466 { 467 struct blk_desc *desc = dev_get_uclass_platdata(udev); 468 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 469 loff_t off = (loff_t)(start * 512); 470 size_t rwsize = blkcnt * 512; 471 #endif 472 struct mtd_info *mtd; 473 int ret = 0; 474 475 if (!desc) 476 return ret; 477 478 mtd = desc->bdev->priv; 479 if (!mtd) 480 return 0; 481 482 if (blkcnt == 0) 483 return 0; 484 485 pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt); 486 487 if (desc->devnum == BLK_MTD_NAND) { 488 ret = mtd_map_read(mtd, off, &rwsize, 489 NULL, mtd->size, 490 (u_char *)(dst)); 491 if (!ret) 492 return blkcnt; 493 else 494 return 0; 495 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 496 ret = mtd_map_read(mtd, off, &rwsize, 497 NULL, mtd->size, 498 (u_char *)(dst)); 499 if (!ret) 500 return blkcnt; 501 else 502 return 0; 503 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 504 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 505 struct spi_nor *nor = (struct spi_nor *)mtd->priv; 506 struct spi_slave *spi = nor->spi; 507 size_t retlen_nor; 508 509 if (desc->op_flag == BLK_PRE_RW) 510 spi->mode |= SPI_DMA_PREPARE; 511 mtd_read(mtd, off, rwsize, &retlen_nor, dst); 512 if (desc->op_flag == BLK_PRE_RW) 513 spi->mode |= SPI_DMA_PREPARE; 514 515 if (retlen_nor == rwsize) 516 return blkcnt; 517 else 518 #endif 519 return 0; 520 } else { 521 return 0; 522 } 523 } 524 525 #if CONFIG_IS_ENABLED(MTD_WRITE) 526 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 527 lbaint_t blkcnt, const void *src) 528 { 529 struct blk_desc *desc = dev_get_uclass_platdata(udev); 530 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 531 loff_t off = (loff_t)(start * 512); 532 size_t rwsize = blkcnt * 512; 533 #endif 534 struct mtd_info *mtd; 535 int ret = 0; 536 537 if (!desc) 538 return ret; 539 540 mtd = desc->bdev->priv; 541 if (!mtd) 542 return 0; 543 544 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); 545 546 if (blkcnt == 0) 547 return 0; 548 549 if (desc->devnum == BLK_MTD_NAND || 550 desc->devnum == BLK_MTD_SPI_NAND || 551 desc->devnum == BLK_MTD_SPI_NOR) { 552 if (desc->op_flag == BLK_MTD_CONT_WRITE) { 553 ret = mtd_map_write(mtd, off, &rwsize, 554 NULL, mtd->size, 555 (u_char *)(src), 0); 556 if (!ret) 557 return blkcnt; 558 else 559 return 0; 560 } else { 561 lbaint_t off_aligned, alinged; 562 size_t rwsize_aligned; 563 u8 *p_buf; 564 565 alinged = off & mtd->erasesize_mask; 566 off_aligned = off - alinged; 567 rwsize_aligned = rwsize + alinged; 568 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & 569 ~(mtd->erasesize - 1); 570 571 p_buf = malloc(rwsize_aligned); 572 if (!p_buf) { 573 printf("%s: Fail to malloc!", __func__); 574 return 0; 575 } 576 577 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, 578 NULL, mtd->size, 579 (u_char *)(p_buf)); 580 if (ret) { 581 free(p_buf); 582 return 0; 583 } 584 585 memcpy(p_buf + alinged, src, rwsize); 586 587 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, 588 NULL, mtd->size, 589 (u_char *)(p_buf), 0); 590 free(p_buf); 591 if (!ret) 592 return blkcnt; 593 else 594 return 0; 595 } 596 } else { 597 return 0; 598 } 599 600 return 0; 601 } 602 603 ulong mtd_derase(struct udevice *udev, lbaint_t start, 604 lbaint_t blkcnt) 605 { 606 struct blk_desc *desc = dev_get_uclass_platdata(udev); 607 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 608 loff_t off = (loff_t)(start * 512); 609 size_t len = blkcnt * 512; 610 #endif 611 struct mtd_info *mtd; 612 int ret = 0; 613 614 if (!desc) 615 return ret; 616 617 mtd = desc->bdev->priv; 618 if (!mtd) 619 return 0; 620 621 pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt); 622 623 if (blkcnt == 0) 624 return 0; 625 626 if (desc->devnum == BLK_MTD_NAND || 627 desc->devnum == BLK_MTD_SPI_NAND || 628 desc->devnum == BLK_MTD_SPI_NOR) { 629 ret = mtd_map_erase(mtd, off, len); 630 if (ret) 631 return ret; 632 } else { 633 return 0; 634 } 635 636 return 0; 637 } 638 #endif 639 640 static int mtd_blk_probe(struct udevice *udev) 641 { 642 struct mtd_info *mtd; 643 struct blk_desc *desc = dev_get_uclass_platdata(udev); 644 int ret, i = 0; 645 646 mtd = dev_get_uclass_priv(udev->parent); 647 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) { 648 #ifndef CONFIG_SPL_BUILD 649 mtd = dev_get_priv(udev->parent); 650 #endif 651 } 652 653 desc->bdev->priv = mtd; 654 sprintf(desc->vendor, "0x%.4x", 0x2207); 655 if (strncmp(mtd->name, "nand", 4) == 0) 656 memcpy(desc->product, "rk-nand", strlen("rk-nand")); 657 else 658 memcpy(desc->product, mtd->name, strlen(mtd->name)); 659 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 660 if (mtd->type == MTD_NANDFLASH) { 661 #ifdef CONFIG_NAND 662 if (desc->devnum == BLK_MTD_NAND) 663 i = NAND_BBT_SCAN_MAXBLOCKS; 664 else if (desc->devnum == BLK_MTD_SPI_NAND) 665 i = NANDDEV_BBT_SCAN_MAXBLOCKS; 666 #endif 667 668 /* 669 * Find the first useful block in the end, 670 * and it is the end lba of the nand storage. 671 */ 672 for (; i < (mtd->size / mtd->erasesize); i++) { 673 ret = mtd_block_isbad(mtd, 674 mtd->size - mtd->erasesize * (i + 1)); 675 if (!ret) { 676 desc->lba = (mtd->size >> 9) - 677 (mtd->erasesize >> 9) * i; 678 break; 679 } 680 } 681 } else { 682 desc->lba = mtd->size >> 9; 683 } 684 685 debug("MTD: desc->lba is %lx\n", desc->lba); 686 687 return 0; 688 } 689 690 static const struct blk_ops mtd_blk_ops = { 691 .read = mtd_dread, 692 #if CONFIG_IS_ENABLED(MTD_WRITE) 693 .write = mtd_dwrite, 694 .erase = mtd_derase, 695 #endif 696 }; 697 698 U_BOOT_DRIVER(mtd_blk) = { 699 .name = "mtd_blk", 700 .id = UCLASS_BLK, 701 .ops = &mtd_blk_ops, 702 .probe = mtd_blk_probe, 703 }; 704