1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <nand.h> 14 #include <part.h> 15 #include <spi.h> 16 #include <dm/device-internal.h> 17 #include <linux/mtd/spi-nor.h> 18 #ifdef CONFIG_NAND 19 #include <linux/mtd/nand.h> 20 #endif 21 22 #define MTD_PART_NAND_HEAD "mtdparts=" 23 #define MTD_ROOT_PART_NUM "ubi.mtd=" 24 #define MTD_ROOT_PART_NAME "root=ubi0:rootfs" 25 #define MTD_PART_INFO_MAX_SIZE 512 26 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 27 28 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 29 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 30 31 static int *mtd_map_blk_table; 32 33 int mtd_blk_map_table_init(struct blk_desc *desc, 34 loff_t offset, 35 size_t length) 36 { 37 u32 blk_total, blk_begin, blk_cnt; 38 struct mtd_info *mtd = NULL; 39 int i, j; 40 41 if (!desc) 42 return -ENODEV; 43 44 switch (desc->devnum) { 45 case BLK_MTD_NAND: 46 case BLK_MTD_SPI_NAND: 47 mtd = desc->bdev->priv; 48 break; 49 default: 50 break; 51 } 52 53 if (!mtd) { 54 return -ENODEV; 55 } else { 56 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 57 if (!mtd_map_blk_table) { 58 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int)); 59 if (!mtd_map_blk_table) 60 return -ENOMEM; 61 for (i = 0; i < blk_total; i++) 62 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN; 63 } 64 65 blk_begin = (u32)offset >> mtd->erasesize_shift; 66 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ 67 mtd->erasesize - 1) >> mtd->erasesize_shift); 68 if (blk_begin >= blk_total) { 69 pr_err("map table blk begin[%d] overflow\n", blk_begin); 70 return -EINVAL; 71 } 72 if ((blk_begin + blk_cnt) > blk_total) 73 blk_cnt = blk_total - blk_begin; 74 75 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 76 return 0; 77 78 j = 0; 79 /* should not across blk_cnt */ 80 for (i = 0; i < blk_cnt; i++) { 81 if (j >= blk_cnt) 82 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 83 for (; j < blk_cnt; j++) { 84 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 85 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 86 j++; 87 if (j == blk_cnt) 88 j++; 89 break; 90 } 91 } 92 } 93 94 return 0; 95 } 96 } 97 98 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 99 { 100 bool mapped; 101 loff_t offset = *off; 102 size_t block_offset = offset & (mtd->erasesize - 1); 103 104 mapped = false; 105 if (!mtd_map_blk_table || 106 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 107 MTD_BLK_TABLE_BLOCK_UNKNOWN || 108 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 109 0xffffffff) 110 return mapped; 111 112 mapped = true; 113 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 114 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 115 116 return mapped; 117 } 118 119 void mtd_blk_map_partitions(struct blk_desc *desc) 120 { 121 disk_partition_t info; 122 int i, ret; 123 124 if (!desc) 125 return; 126 127 if (desc->if_type != IF_TYPE_MTD) 128 return; 129 130 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 131 ret = part_get_info(desc, i, &info); 132 if (ret != 0) 133 continue; 134 135 if (mtd_blk_map_table_init(desc, 136 info.start << 9, 137 info.size << 9)) { 138 pr_debug("mtd block map table fail\n"); 139 } 140 } 141 } 142 143 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 144 size_t *length, size_t *actual, 145 loff_t lim, u_char *buffer) 146 { 147 size_t left_to_read = *length; 148 u_char *p_buffer = buffer; 149 int rval; 150 151 while (left_to_read > 0) { 152 size_t block_offset = offset & (mtd->erasesize - 1); 153 size_t read_length; 154 loff_t mapped_offset; 155 156 if (offset >= mtd->size) 157 return 0; 158 159 mapped_offset = offset; 160 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 161 if (mtd_block_isbad(mtd, mapped_offset & 162 ~(mtd->erasesize - 1))) { 163 printf("Skipping bad block 0x%08llx\n", 164 offset & ~(mtd->erasesize - 1)); 165 offset += mtd->erasesize - block_offset; 166 continue; 167 } 168 } 169 170 if (left_to_read < (mtd->erasesize - block_offset)) 171 read_length = left_to_read; 172 else 173 read_length = mtd->erasesize - block_offset; 174 175 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, 176 p_buffer); 177 if (rval && rval != -EUCLEAN) { 178 printf("NAND read from offset %llx failed %d\n", 179 offset, rval); 180 *length -= left_to_read; 181 return rval; 182 } 183 184 left_to_read -= read_length; 185 offset += read_length; 186 p_buffer += read_length; 187 } 188 189 return 0; 190 } 191 192 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, 193 size_t *length, size_t *actual, 194 loff_t lim, u_char *buffer, int flags) 195 { 196 int rval = 0, blocksize; 197 size_t left_to_write = *length; 198 u_char *p_buffer = buffer; 199 struct erase_info ei; 200 201 blocksize = mtd->erasesize; 202 203 /* 204 * nand_write() handles unaligned, partial page writes. 205 * 206 * We allow length to be unaligned, for convenience in 207 * using the $filesize variable. 208 * 209 * However, starting at an unaligned offset makes the 210 * semantics of bad block skipping ambiguous (really, 211 * you should only start a block skipping access at a 212 * partition boundary). So don't try to handle that. 213 */ 214 if ((offset & (mtd->writesize - 1)) != 0) { 215 printf("Attempt to write non page-aligned data\n"); 216 *length = 0; 217 return -EINVAL; 218 } 219 220 while (left_to_write > 0) { 221 size_t block_offset = offset & (mtd->erasesize - 1); 222 size_t write_size, truncated_write_size; 223 loff_t mapped_offset; 224 225 if (offset >= mtd->size) 226 return 0; 227 228 mapped_offset = offset; 229 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 230 if (mtd_block_isbad(mtd, mapped_offset & 231 ~(mtd->erasesize - 1))) { 232 printf("Skipping bad block 0x%08llx\n", 233 offset & ~(mtd->erasesize - 1)); 234 offset += mtd->erasesize - block_offset; 235 continue; 236 } 237 } 238 239 if (!(mapped_offset & mtd->erasesize_mask)) { 240 memset(&ei, 0, sizeof(struct erase_info)); 241 ei.addr = mapped_offset; 242 ei.len = mtd->erasesize; 243 rval = mtd_erase(mtd, &ei); 244 if (rval) { 245 pr_info("error %d while erasing %llx\n", rval, 246 mapped_offset); 247 return rval; 248 } 249 } 250 251 if (left_to_write < (blocksize - block_offset)) 252 write_size = left_to_write; 253 else 254 write_size = blocksize - block_offset; 255 256 truncated_write_size = write_size; 257 rval = mtd_write(mtd, mapped_offset, truncated_write_size, 258 (size_t *)(&truncated_write_size), p_buffer); 259 260 offset += write_size; 261 p_buffer += write_size; 262 263 if (rval != 0) { 264 printf("NAND write to offset %llx failed %d\n", 265 offset, rval); 266 *length -= left_to_write; 267 return rval; 268 } 269 270 left_to_write -= write_size; 271 } 272 273 return 0; 274 } 275 276 char *mtd_part_parse(void) 277 { 278 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 279 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 280 char mtd_root_part_info[30] = {0}; 281 struct blk_desc *dev_desc; 282 disk_partition_t info; 283 char *mtd_part_info_p; 284 struct mtd_info *mtd; 285 char *mtd_part_info; 286 int ret; 287 int p; 288 289 dev_desc = rockchip_get_bootdev(); 290 if (!dev_desc) 291 return NULL; 292 293 mtd = (struct mtd_info *)dev_desc->bdev->priv; 294 if (!mtd) 295 return NULL; 296 297 p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info); 298 if (p > 0) { 299 snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME); 300 env_update("bootargs", mtd_root_part_info); 301 } 302 303 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 304 if (!mtd_part_info) { 305 printf("%s: Fail to malloc!", __func__); 306 return NULL; 307 } 308 309 mtd_part_info_p = mtd_part_info; 310 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 311 MTD_PART_NAND_HEAD, 312 dev_desc->product); 313 data_len -= strlen(mtd_part_info_p); 314 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 315 316 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 317 ret = part_get_info(dev_desc, p, &info); 318 if (ret) 319 break; 320 321 debug("name is %s, start addr is %x\n", info.name, 322 (int)(size_t)info.start); 323 324 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 325 (int)(size_t)info.size << 9, 326 (int)(size_t)info.start << 9, 327 info.name); 328 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 329 "0x%x@0x%x(%s)", 330 (int)(size_t)info.size << 9, 331 (int)(size_t)info.start << 9, 332 info.name); 333 strcat(mtd_part_info, ","); 334 if (part_get_info(dev_desc, p + 1, &info)) { 335 /* Nand flash is erased by block and gpt table just 336 * resserve 33 sectors for the last partition. This 337 * will erase the backup gpt table by user program, 338 * so reserve one block. 339 */ 340 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 341 (int)(size_t)(info.size - 342 (info.size - 1) % 343 (mtd->erasesize >> 9) - 1) << 9, 344 (int)(size_t)info.start << 9, 345 info.name); 346 break; 347 } 348 length = strlen(mtd_part_info_temp); 349 data_len -= length; 350 mtd_part_info_p = mtd_part_info_p + length + 1; 351 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 352 } 353 354 return mtd_part_info; 355 } 356 357 ulong mtd_dread(struct udevice *udev, lbaint_t start, 358 lbaint_t blkcnt, void *dst) 359 { 360 struct blk_desc *desc = dev_get_uclass_platdata(udev); 361 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 362 loff_t off = (loff_t)(start * 512); 363 size_t rwsize = blkcnt * 512; 364 #endif 365 struct mtd_info *mtd; 366 int ret = 0; 367 368 if (!desc) 369 return ret; 370 371 mtd = desc->bdev->priv; 372 if (!mtd) 373 return 0; 374 375 if (blkcnt == 0) 376 return 0; 377 378 pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt); 379 380 if (desc->devnum == BLK_MTD_NAND) { 381 ret = mtd_map_read(mtd, off, &rwsize, 382 NULL, mtd->size, 383 (u_char *)(dst)); 384 if (!ret) 385 return blkcnt; 386 else 387 return 0; 388 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 389 ret = mtd_map_read(mtd, off, &rwsize, 390 NULL, mtd->size, 391 (u_char *)(dst)); 392 if (!ret) 393 return blkcnt; 394 else 395 return 0; 396 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 397 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 398 struct spi_nor *nor = (struct spi_nor *)mtd->priv; 399 struct spi_slave *spi = nor->spi; 400 size_t retlen_nor; 401 402 if (desc->op_flag == BLK_PRE_RW) 403 spi->mode |= SPI_DMA_PREPARE; 404 mtd_read(mtd, off, rwsize, &retlen_nor, dst); 405 if (desc->op_flag == BLK_PRE_RW) 406 spi->mode |= SPI_DMA_PREPARE; 407 408 if (retlen_nor == rwsize) 409 return blkcnt; 410 else 411 #endif 412 return 0; 413 } else { 414 return 0; 415 } 416 } 417 418 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 419 lbaint_t blkcnt, const void *src) 420 { 421 struct blk_desc *desc = dev_get_uclass_platdata(udev); 422 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 423 loff_t off = (loff_t)(start * 512); 424 size_t rwsize = blkcnt * 512; 425 #endif 426 struct mtd_info *mtd; 427 int ret = 0; 428 429 if (!desc) 430 return ret; 431 432 mtd = desc->bdev->priv; 433 if (!mtd) 434 return 0; 435 436 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); 437 438 if (blkcnt == 0) 439 return 0; 440 441 if (desc->devnum == BLK_MTD_NAND || 442 desc->devnum == BLK_MTD_SPI_NAND || 443 desc->devnum == BLK_MTD_SPI_NOR) { 444 if (desc->op_flag == BLK_MTD_CONT_WRITE) { 445 ret = mtd_map_write(mtd, off, &rwsize, 446 NULL, mtd->size, 447 (u_char *)(src), 0); 448 if (!ret) 449 return blkcnt; 450 else 451 return 0; 452 } else { 453 lbaint_t off_aligned, alinged; 454 size_t rwsize_aligned; 455 u8 *p_buf; 456 457 alinged = off & mtd->erasesize_mask; 458 off_aligned = off - alinged; 459 rwsize_aligned = rwsize + alinged; 460 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & 461 ~(mtd->erasesize - 1); 462 463 p_buf = malloc(rwsize_aligned); 464 if (!p_buf) { 465 printf("%s: Fail to malloc!", __func__); 466 return 0; 467 } 468 469 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, 470 NULL, mtd->size, 471 (u_char *)(p_buf)); 472 if (ret) { 473 free(p_buf); 474 return 0; 475 } 476 477 memcpy(p_buf + alinged, src, rwsize); 478 479 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, 480 NULL, mtd->size, 481 (u_char *)(p_buf), 0); 482 free(p_buf); 483 if (!ret) 484 return blkcnt; 485 else 486 return 0; 487 } 488 } else { 489 return 0; 490 } 491 492 return 0; 493 } 494 495 ulong mtd_derase(struct udevice *udev, lbaint_t start, 496 lbaint_t blkcnt) 497 { 498 /* Not implemented */ 499 return 0; 500 } 501 502 static int mtd_blk_probe(struct udevice *udev) 503 { 504 struct mtd_info *mtd; 505 struct blk_desc *desc = dev_get_uclass_platdata(udev); 506 int ret, i = 0; 507 508 mtd = dev_get_uclass_priv(udev->parent); 509 if (mtd->type == MTD_NANDFLASH && desc->devnum == BLK_MTD_NAND) { 510 #ifndef CONFIG_SPL_BUILD 511 mtd = dev_get_priv(udev->parent); 512 #endif 513 } 514 515 desc->bdev->priv = mtd; 516 sprintf(desc->vendor, "0x%.4x", 0x2207); 517 memcpy(desc->product, mtd->name, strlen(mtd->name)); 518 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 519 if (mtd->type == MTD_NANDFLASH) { 520 #ifdef CONFIG_NAND 521 if (desc->devnum == BLK_MTD_NAND) 522 i = NAND_BBT_SCAN_MAXBLOCKS; 523 else if (desc->devnum == BLK_MTD_SPI_NAND) 524 i = NANDDEV_BBT_SCAN_MAXBLOCKS; 525 #endif 526 527 /* 528 * Find the first useful block in the end, 529 * and it is the end lba of the nand storage. 530 */ 531 for (; i < (mtd->size / mtd->erasesize); i++) { 532 ret = mtd_block_isbad(mtd, 533 mtd->size - mtd->erasesize * (i + 1)); 534 if (!ret) { 535 desc->lba = (mtd->size >> 9) - 536 (mtd->erasesize >> 9) * i; 537 break; 538 } 539 } 540 } else { 541 desc->lba = mtd->size >> 9; 542 } 543 544 debug("MTD: desc->lba is %lx\n", desc->lba); 545 546 return 0; 547 } 548 549 static const struct blk_ops mtd_blk_ops = { 550 .read = mtd_dread, 551 #ifndef CONFIG_SPL_BUILD 552 .write = mtd_dwrite, 553 .erase = mtd_derase, 554 #endif 555 }; 556 557 U_BOOT_DRIVER(mtd_blk) = { 558 .name = "mtd_blk", 559 .id = UCLASS_BLK, 560 .ops = &mtd_blk_ops, 561 .probe = mtd_blk_probe, 562 }; 563