1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <image.h> 13 #include <linux/log2.h> 14 #include <malloc.h> 15 #include <nand.h> 16 #include <part.h> 17 #include <spi.h> 18 #include <dm/device-internal.h> 19 #include <linux/mtd/spi-nor.h> 20 #ifdef CONFIG_NAND 21 #include <linux/mtd/nand.h> 22 #endif 23 24 // #define MTD_BLK_VERBOSE 25 26 #define MTD_PART_NAND_HEAD "mtdparts=" 27 #define MTD_PART_INFO_MAX_SIZE 512 28 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 29 30 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 31 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 32 33 static int *mtd_map_blk_table; 34 35 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG) 36 static loff_t usbplug_dummy_partition_write_last_addr; 37 static loff_t usbplug_dummy_partition_write_seek; 38 static loff_t usbplug_dummy_partition_read_last_addr; 39 static loff_t usbplug_dummy_partition_read_seek; 40 #endif 41 42 int mtd_blk_map_table_init(struct blk_desc *desc, 43 loff_t offset, 44 size_t length) 45 { 46 u32 blk_total, blk_begin, blk_cnt; 47 struct mtd_info *mtd = NULL; 48 int i, j; 49 50 if (!desc) 51 return -ENODEV; 52 53 switch (desc->devnum) { 54 case BLK_MTD_NAND: 55 case BLK_MTD_SPI_NAND: 56 mtd = desc->bdev->priv; 57 break; 58 default: 59 break; 60 } 61 62 if (!mtd) { 63 return -ENODEV; 64 } else { 65 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 66 if (!mtd_map_blk_table) { 67 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int)); 68 if (!mtd_map_blk_table) 69 return -ENOMEM; 70 for (i = 0; i < blk_total; i++) 71 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN; 72 } 73 74 blk_begin = (u32)offset >> mtd->erasesize_shift; 75 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ 76 mtd->erasesize - 1) >> mtd->erasesize_shift); 77 if (blk_begin >= blk_total) { 78 pr_err("map table blk begin[%d] overflow\n", blk_begin); 79 return -EINVAL; 80 } 81 if ((blk_begin + blk_cnt) > blk_total) 82 blk_cnt = blk_total - blk_begin; 83 84 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 85 return 0; 86 87 j = 0; 88 /* should not across blk_cnt */ 89 for (i = 0; i < blk_cnt; i++) { 90 if (j >= blk_cnt) 91 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 92 for (; j < blk_cnt; j++) { 93 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 94 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 95 j++; 96 if (j == blk_cnt) 97 j++; 98 break; 99 } 100 } 101 } 102 103 return 0; 104 } 105 } 106 107 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 108 { 109 bool mapped; 110 loff_t offset = *off; 111 size_t block_offset = offset & (mtd->erasesize - 1); 112 113 mapped = false; 114 if (!mtd_map_blk_table || 115 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 116 MTD_BLK_TABLE_BLOCK_UNKNOWN || 117 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 118 0xffffffff) 119 return mapped; 120 121 mapped = true; 122 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 123 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 124 125 return mapped; 126 } 127 128 void mtd_blk_map_partitions(struct blk_desc *desc) 129 { 130 disk_partition_t info; 131 int i, ret; 132 133 if (!desc) 134 return; 135 136 if (desc->if_type != IF_TYPE_MTD) 137 return; 138 139 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 140 ret = part_get_info(desc, i, &info); 141 if (ret != 0) 142 break; 143 144 if (mtd_blk_map_table_init(desc, 145 info.start << 9, 146 info.size << 9)) { 147 pr_debug("mtd block map table fail\n"); 148 } 149 } 150 } 151 152 void mtd_blk_map_fit(struct blk_desc *desc, ulong sector, void *fit) 153 { 154 struct mtd_info *mtd = NULL; 155 int totalsize = 0; 156 157 if (desc->if_type != IF_TYPE_MTD) 158 return; 159 160 if (desc->devnum == BLK_MTD_NAND) { 161 #if defined(CONFIG_NAND) 162 mtd = dev_get_priv(desc->bdev->parent); 163 #endif 164 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 165 #if defined(CONFIG_MTD_SPI_NAND) 166 mtd = desc->bdev->priv; 167 #endif 168 } 169 170 #ifdef CONFIG_SPL_FIT 171 if (fit_get_totalsize(fit, &totalsize)) 172 debug("Can not find /totalsize node.\n"); 173 #endif 174 if (mtd && totalsize) { 175 if (mtd_blk_map_table_init(desc, sector << 9, totalsize + (size_t)mtd->erasesize)) 176 debug("Map block table fail.\n"); 177 } 178 } 179 180 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 181 size_t *length, size_t *actual, 182 loff_t lim, u_char *buffer) 183 { 184 size_t left_to_read = *length; 185 u_char *p_buffer = buffer; 186 int rval; 187 188 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG) 189 if (usbplug_dummy_partition_read_last_addr != offset) 190 usbplug_dummy_partition_read_seek = 0; 191 usbplug_dummy_partition_read_last_addr = offset + left_to_read; 192 offset += usbplug_dummy_partition_read_seek; 193 #endif 194 195 while (left_to_read > 0) { 196 size_t block_offset = offset & (mtd->erasesize - 1); 197 size_t read_length; 198 loff_t mapped_offset; 199 200 if (offset >= mtd->size) 201 return 0; 202 203 mapped_offset = offset; 204 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 205 if (mtd_block_isbad(mtd, mapped_offset & 206 ~(mtd->erasesize - 1))) { 207 printf("Skipping bad block 0x%08x in read\n", 208 (u32)(offset & ~(mtd->erasesize - 1))); 209 offset += mtd->erasesize - block_offset; 210 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG) 211 usbplug_dummy_partition_read_seek += mtd->erasesize; 212 #endif 213 continue; 214 } 215 } 216 217 if (left_to_read < (mtd->erasesize - block_offset)) 218 read_length = left_to_read; 219 else 220 read_length = mtd->erasesize - block_offset; 221 222 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, 223 p_buffer); 224 if (rval && rval != -EUCLEAN) { 225 printf("NAND read from offset %x failed %d\n", 226 (u32)offset, rval); 227 *length -= left_to_read; 228 return rval; 229 } 230 231 left_to_read -= read_length; 232 offset += read_length; 233 p_buffer += read_length; 234 } 235 236 return 0; 237 } 238 239 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, 240 size_t *length, size_t *actual, 241 loff_t lim, u_char *buffer, int flags) 242 { 243 int rval = 0, blocksize; 244 size_t left_to_write = *length; 245 u_char *p_buffer = buffer; 246 struct erase_info ei; 247 248 blocksize = mtd->erasesize; 249 250 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG) 251 if (usbplug_dummy_partition_write_last_addr != offset) 252 usbplug_dummy_partition_write_seek = 0; 253 usbplug_dummy_partition_write_last_addr = offset + left_to_write; 254 offset += usbplug_dummy_partition_write_seek; 255 #endif 256 257 /* 258 * nand_write() handles unaligned, partial page writes. 259 * 260 * We allow length to be unaligned, for convenience in 261 * using the $filesize variable. 262 * 263 * However, starting at an unaligned offset makes the 264 * semantics of bad block skipping ambiguous (really, 265 * you should only start a block skipping access at a 266 * partition boundary). So don't try to handle that. 267 */ 268 if ((offset & (mtd->writesize - 1)) != 0) { 269 printf("Attempt to write non page-aligned data\n"); 270 *length = 0; 271 return -EINVAL; 272 } 273 274 while (left_to_write > 0) { 275 size_t block_offset = offset & (mtd->erasesize - 1); 276 size_t write_size, truncated_write_size; 277 loff_t mapped_offset; 278 279 if (offset >= mtd->size) 280 return 0; 281 282 mapped_offset = offset; 283 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 284 if (mtd_block_isbad(mtd, mapped_offset & 285 ~(mtd->erasesize - 1))) { 286 printf("Skipping bad block 0x%08x in write\n", 287 (u32)(offset & ~(mtd->erasesize - 1))); 288 offset += mtd->erasesize - block_offset; 289 #if CONFIG_IS_ENABLED(SUPPORT_USBPLUG) 290 usbplug_dummy_partition_write_seek += mtd->erasesize; 291 #endif 292 continue; 293 } 294 } 295 296 if (!(mapped_offset & mtd->erasesize_mask)) { 297 memset(&ei, 0, sizeof(struct erase_info)); 298 ei.addr = mapped_offset; 299 ei.len = mtd->erasesize; 300 rval = mtd_erase(mtd, &ei); 301 if (rval) { 302 pr_info("error %d while erasing %llx\n", rval, 303 mapped_offset); 304 return rval; 305 } 306 } 307 308 if (left_to_write < (blocksize - block_offset)) 309 write_size = left_to_write; 310 else 311 write_size = blocksize - block_offset; 312 313 truncated_write_size = write_size; 314 rval = mtd_write(mtd, mapped_offset, truncated_write_size, 315 (size_t *)(&truncated_write_size), p_buffer); 316 317 offset += write_size; 318 p_buffer += write_size; 319 320 if (rval != 0) { 321 printf("NAND write to offset %llx failed %d\n", 322 offset, rval); 323 *length -= left_to_write; 324 return rval; 325 } 326 327 left_to_write -= write_size; 328 } 329 330 return 0; 331 } 332 333 static __maybe_unused int mtd_map_erase(struct mtd_info *mtd, loff_t offset, 334 size_t length) 335 { 336 struct erase_info ei; 337 loff_t pos, len; 338 int ret; 339 340 pos = offset; 341 len = length; 342 343 if ((pos & mtd->erasesize_mask) || (len & mtd->erasesize_mask)) { 344 pr_err("Attempt to erase non block-aligned data, pos= %llx, len= %llx\n", 345 pos, len); 346 347 return -EINVAL; 348 } 349 350 while (len) { 351 loff_t mapped_offset; 352 353 mapped_offset = pos; 354 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 355 if (mtd_block_isbad(mtd, pos) || mtd_block_isreserved(mtd, pos)) { 356 pr_debug("attempt to erase a bad/reserved block @%llx\n", 357 pos); 358 pos += mtd->erasesize; 359 continue; 360 } 361 } 362 363 memset(&ei, 0, sizeof(struct erase_info)); 364 ei.addr = mapped_offset; 365 ei.len = mtd->erasesize; 366 ret = mtd_erase(mtd, &ei); 367 if (ret) { 368 pr_err("map_erase error %d while erasing %llx\n", ret, 369 pos); 370 return ret; 371 } 372 373 pos += mtd->erasesize; 374 len -= mtd->erasesize; 375 } 376 377 return 0; 378 } 379 380 char *mtd_part_parse(struct blk_desc *dev_desc) 381 { 382 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 383 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 384 disk_partition_t info; 385 char *mtd_part_info_p; 386 struct mtd_info *mtd; 387 char *mtd_part_info; 388 int ret; 389 int p; 390 391 #ifndef CONFIG_SPL_BUILD 392 dev_desc = rockchip_get_bootdev(); 393 #endif 394 if (!dev_desc) 395 return NULL; 396 397 mtd = (struct mtd_info *)dev_desc->bdev->priv; 398 if (!mtd) 399 return NULL; 400 401 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 402 if (!mtd_part_info) { 403 printf("%s: Fail to malloc!", __func__); 404 return NULL; 405 } 406 407 mtd_part_info_p = mtd_part_info; 408 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 409 MTD_PART_NAND_HEAD, 410 dev_desc->product); 411 data_len -= strlen(mtd_part_info_p); 412 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 413 414 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 415 ret = part_get_info(dev_desc, p, &info); 416 if (ret) 417 break; 418 419 debug("name is %s, start addr is %x\n", info.name, 420 (int)(size_t)info.start); 421 422 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 423 (int)(size_t)info.size << 9, 424 (int)(size_t)info.start << 9, 425 info.name); 426 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 427 "0x%x@0x%x(%s)", 428 (int)(size_t)info.size << 9, 429 (int)(size_t)info.start << 9, 430 info.name); 431 strcat(mtd_part_info, ","); 432 if (part_get_info(dev_desc, p + 1, &info)) { 433 /* Partition with grow tag in parameter will be resized */ 434 if ((info.size + info.start + 64) >= dev_desc->lba) { 435 if (dev_desc->devnum == BLK_MTD_SPI_NOR) { 436 /* Nor is 64KB erase block(kernel) and gpt table just 437 * resserve 33 sectors for the last partition. This 438 * will erase the backup gpt table by user program, 439 * so reserve one block. 440 */ 441 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 442 (int)(size_t)(info.size - 443 (info.size - 1) % 444 (0x10000 >> 9) - 1) << 9, 445 (int)(size_t)info.start << 9, 446 info.name); 447 break; 448 } else { 449 /* Nand flash is erased by block and gpt table just 450 * resserve 33 sectors for the last partition. This 451 * will erase the backup gpt table by user program, 452 * so reserve one block. 453 */ 454 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 455 (int)(size_t)(info.size - 456 (info.size - 1) % 457 (mtd->erasesize >> 9) - 1) << 9, 458 (int)(size_t)info.start << 9, 459 info.name); 460 break; 461 } 462 } else { 463 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 464 "0x%x@0x%x(%s)", 465 (int)(size_t)info.size << 9, 466 (int)(size_t)info.start << 9, 467 info.name); 468 break; 469 } 470 } 471 length = strlen(mtd_part_info_temp); 472 data_len -= length; 473 mtd_part_info_p = mtd_part_info_p + length + 1; 474 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 475 } 476 477 return mtd_part_info; 478 } 479 480 ulong mtd_dread(struct udevice *udev, lbaint_t start, 481 lbaint_t blkcnt, void *dst) 482 { 483 struct blk_desc *desc = dev_get_uclass_platdata(udev); 484 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 485 loff_t off = (loff_t)(start * 512); 486 size_t rwsize = blkcnt * 512; 487 #endif 488 struct mtd_info *mtd; 489 int ret = 0; 490 #ifdef MTD_BLK_VERBOSE 491 ulong us = 1; 492 #endif 493 494 if (!desc) 495 return ret; 496 497 mtd = desc->bdev->priv; 498 if (!mtd) 499 return 0; 500 501 if (blkcnt == 0) 502 return 0; 503 504 #ifdef MTD_BLK_VERBOSE 505 us = get_ticks(); 506 #endif 507 if (desc->devnum == BLK_MTD_NAND) { 508 ret = mtd_map_read(mtd, off, &rwsize, 509 NULL, mtd->size, 510 (u_char *)(dst)); 511 if (!ret) 512 ret = blkcnt; 513 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 514 ret = mtd_map_read(mtd, off, &rwsize, 515 NULL, mtd->size, 516 (u_char *)(dst)); 517 if (!ret) 518 ret = blkcnt; 519 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 520 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 521 struct spi_nor *nor = (struct spi_nor *)mtd->priv; 522 struct spi_slave *spi = nor->spi; 523 size_t retlen_nor; 524 525 if (desc->op_flag == BLK_PRE_RW) 526 spi->mode |= SPI_DMA_PREPARE; 527 ret = mtd_read(mtd, off, rwsize, &retlen_nor, dst); 528 if (desc->op_flag == BLK_PRE_RW) 529 spi->mode &= ~SPI_DMA_PREPARE; 530 531 if (retlen_nor == rwsize) 532 ret = blkcnt; 533 #endif 534 } 535 #ifdef MTD_BLK_VERBOSE 536 us = (get_ticks() - us) / 24UL; 537 pr_err("mtd dread %s %lx %lx cost %ldus: %ldMB/s\n\n", mtd->name, start, blkcnt, us, (blkcnt / 2) / ((us + 999) / 1000)); 538 #else 539 pr_debug("mtd dread %s %lx %lx\n\n", mtd->name, start, blkcnt); 540 #endif 541 542 return ret; 543 } 544 545 #if CONFIG_IS_ENABLED(MTD_WRITE) 546 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 547 lbaint_t blkcnt, const void *src) 548 { 549 struct blk_desc *desc = dev_get_uclass_platdata(udev); 550 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 551 loff_t off = (loff_t)(start * 512); 552 size_t rwsize = blkcnt * 512; 553 #endif 554 struct mtd_info *mtd; 555 int ret = 0; 556 557 if (!desc) 558 return ret; 559 560 mtd = desc->bdev->priv; 561 if (!mtd) 562 return 0; 563 564 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); 565 566 if (blkcnt == 0) 567 return 0; 568 569 if (desc->devnum == BLK_MTD_NAND || 570 desc->devnum == BLK_MTD_SPI_NAND || 571 desc->devnum == BLK_MTD_SPI_NOR) { 572 if (desc->op_flag & BLK_MTD_CONT_WRITE) { 573 ret = mtd_map_write(mtd, off, &rwsize, 574 NULL, mtd->size, 575 (u_char *)(src), 0); 576 if (!ret) 577 return blkcnt; 578 else 579 return 0; 580 } else { 581 lbaint_t off_aligned, alinged; 582 size_t rwsize_aligned; 583 u8 *p_buf; 584 585 alinged = off & mtd->erasesize_mask; 586 off_aligned = off - alinged; 587 rwsize_aligned = rwsize + alinged; 588 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & 589 ~(mtd->erasesize - 1); 590 591 p_buf = malloc(rwsize_aligned); 592 if (!p_buf) { 593 printf("%s: Fail to malloc!", __func__); 594 return 0; 595 } 596 597 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, 598 NULL, mtd->size, 599 (u_char *)(p_buf)); 600 if (ret) { 601 free(p_buf); 602 return 0; 603 } 604 605 memcpy(p_buf + alinged, src, rwsize); 606 607 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, 608 NULL, mtd->size, 609 (u_char *)(p_buf), 0); 610 free(p_buf); 611 if (!ret) 612 return blkcnt; 613 else 614 return 0; 615 } 616 } else { 617 return 0; 618 } 619 620 return 0; 621 } 622 623 ulong mtd_derase(struct udevice *udev, lbaint_t start, 624 lbaint_t blkcnt) 625 { 626 struct blk_desc *desc = dev_get_uclass_platdata(udev); 627 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 628 loff_t off = (loff_t)(start * 512); 629 size_t len = blkcnt * 512; 630 #endif 631 struct mtd_info *mtd; 632 int ret = 0; 633 634 if (!desc) 635 return ret; 636 637 mtd = desc->bdev->priv; 638 if (!mtd) 639 return 0; 640 641 pr_debug("mtd derase %s %lx %lx\n", mtd->name, start, blkcnt); 642 643 if (blkcnt == 0) 644 return 0; 645 646 if (desc->devnum == BLK_MTD_NAND || 647 desc->devnum == BLK_MTD_SPI_NAND || 648 desc->devnum == BLK_MTD_SPI_NOR) { 649 ret = mtd_map_erase(mtd, off, len); 650 if (ret) 651 return ret; 652 } else { 653 return 0; 654 } 655 656 return blkcnt; 657 } 658 #endif 659 660 static int mtd_blk_probe(struct udevice *udev) 661 { 662 struct mtd_info *mtd; 663 struct blk_desc *desc = dev_get_uclass_platdata(udev); 664 int ret, i = 0; 665 666 mtd = dev_get_uclass_priv(udev->parent); 667 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) { 668 #ifndef CONFIG_SPL_BUILD 669 mtd = dev_get_priv(udev->parent); 670 #endif 671 } 672 673 /* Fill mtd devices information */ 674 if (is_power_of_2(mtd->erasesize)) 675 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 676 else 677 mtd->erasesize_shift = 0; 678 679 if (is_power_of_2(mtd->writesize)) 680 mtd->writesize_shift = ffs(mtd->writesize) - 1; 681 else 682 mtd->writesize_shift = 0; 683 684 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1; 685 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1; 686 687 desc->bdev->priv = mtd; 688 sprintf(desc->vendor, "0x%.4x", 0x2207); 689 if (strncmp(mtd->name, "nand", 4) == 0) 690 memcpy(desc->product, "rk-nand", strlen("rk-nand")); 691 else 692 memcpy(desc->product, mtd->name, strlen(mtd->name)); 693 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 694 if (mtd->type == MTD_NANDFLASH) { 695 #ifdef CONFIG_NAND 696 if (desc->devnum == BLK_MTD_NAND) 697 i = NAND_BBT_SCAN_MAXBLOCKS; 698 else if (desc->devnum == BLK_MTD_SPI_NAND) 699 i = NANDDEV_BBT_SCAN_MAXBLOCKS; 700 #endif 701 702 /* 703 * Find the first useful block in the end, 704 * and it is the end lba of the nand storage. 705 */ 706 for (; i < (mtd->size / mtd->erasesize); i++) { 707 ret = mtd_block_isbad(mtd, 708 mtd->size - mtd->erasesize * (i + 1)); 709 if (!ret) { 710 desc->lba = (mtd->size >> 9) - 711 (mtd->erasesize >> 9) * i; 712 break; 713 } 714 } 715 } else { 716 desc->lba = mtd->size >> 9; 717 } 718 719 debug("MTD: desc->lba is %lx\n", desc->lba); 720 721 return 0; 722 } 723 724 static const struct blk_ops mtd_blk_ops = { 725 .read = mtd_dread, 726 #if CONFIG_IS_ENABLED(MTD_WRITE) 727 .write = mtd_dwrite, 728 .erase = mtd_derase, 729 #endif 730 }; 731 732 U_BOOT_DRIVER(mtd_blk) = { 733 .name = "mtd_blk", 734 .id = UCLASS_BLK, 735 .ops = &mtd_blk_ops, 736 .probe = mtd_blk_probe, 737 }; 738