1 /* 2 * (C) Copyright 2019 Rockchip Electronics Co., Ltd 3 * 4 * SPDX-License-Identifier: GPL-2.0+ 5 */ 6 7 #include <common.h> 8 #include <blk.h> 9 #include <boot_rkimg.h> 10 #include <dm.h> 11 #include <errno.h> 12 #include <malloc.h> 13 #include <nand.h> 14 #include <part.h> 15 #include <spi.h> 16 #include <dm/device-internal.h> 17 #include <linux/mtd/spi-nor.h> 18 19 #define MTD_PART_NAND_HEAD "mtdparts=" 20 #define MTD_ROOT_PART_NUM "ubi.mtd=" 21 #define MTD_ROOT_PART_NAME "root=ubi0:rootfs" 22 #define MTD_PART_INFO_MAX_SIZE 512 23 #define MTD_SINGLE_PART_INFO_MAX_SIZE 40 24 25 #define MTD_BLK_TABLE_BLOCK_UNKNOWN (-2) 26 #define MTD_BLK_TABLE_BLOCK_SHIFT (-1) 27 28 static int *mtd_map_blk_table; 29 30 int mtd_blk_map_table_init(struct blk_desc *desc, 31 loff_t offset, 32 size_t length) 33 { 34 u32 blk_total, blk_begin, blk_cnt; 35 struct mtd_info *mtd = NULL; 36 int i, j; 37 38 if (!desc) 39 return -ENODEV; 40 41 if (desc->devnum == BLK_MTD_NAND) { 42 #if defined(CONFIG_NAND) 43 mtd = dev_get_priv(desc->bdev->parent); 44 #endif 45 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 46 #if defined(CONFIG_MTD_SPI_NAND) 47 mtd = desc->bdev->priv; 48 #endif 49 } 50 51 if (!mtd) { 52 return -ENODEV; 53 } else { 54 blk_total = (mtd->size + mtd->erasesize - 1) >> mtd->erasesize_shift; 55 if (!mtd_map_blk_table) { 56 mtd_map_blk_table = (int *)malloc(blk_total * sizeof(int)); 57 for (i = 0; i < blk_total; i++) 58 mtd_map_blk_table[i] = MTD_BLK_TABLE_BLOCK_UNKNOWN; 59 } 60 61 blk_begin = (u32)offset >> mtd->erasesize_shift; 62 blk_cnt = ((u32)((offset & mtd->erasesize_mask) + length + \ 63 mtd->erasesize - 1) >> mtd->erasesize_shift); 64 if (blk_begin >= blk_total) { 65 pr_err("map table blk begin[%d] overflow\n", blk_begin); 66 return -EINVAL; 67 } 68 if ((blk_begin + blk_cnt) > blk_total) 69 blk_cnt = blk_total - blk_begin; 70 71 if (mtd_map_blk_table[blk_begin] != MTD_BLK_TABLE_BLOCK_UNKNOWN) 72 return 0; 73 74 j = 0; 75 /* should not across blk_cnt */ 76 for (i = 0; i < blk_cnt; i++) { 77 if (j >= blk_cnt) 78 mtd_map_blk_table[blk_begin + i] = MTD_BLK_TABLE_BLOCK_SHIFT; 79 for (; j < blk_cnt; j++) { 80 if (!mtd_block_isbad(mtd, (blk_begin + j) << mtd->erasesize_shift)) { 81 mtd_map_blk_table[blk_begin + i] = blk_begin + j; 82 j++; 83 if (j == blk_cnt) 84 j++; 85 break; 86 } 87 } 88 } 89 90 return 0; 91 } 92 } 93 94 static bool get_mtd_blk_map_address(struct mtd_info *mtd, loff_t *off) 95 { 96 bool mapped; 97 loff_t offset = *off; 98 size_t block_offset = offset & (mtd->erasesize - 1); 99 100 mapped = false; 101 if (!mtd_map_blk_table || 102 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 103 MTD_BLK_TABLE_BLOCK_UNKNOWN || 104 mtd_map_blk_table[(u64)offset >> mtd->erasesize_shift] == 105 0xffffffff) 106 return mapped; 107 108 mapped = true; 109 *off = (loff_t)(((u32)mtd_map_blk_table[(u64)offset >> 110 mtd->erasesize_shift] << mtd->erasesize_shift) + block_offset); 111 112 return mapped; 113 } 114 115 void mtd_blk_map_partitions(struct blk_desc *desc) 116 { 117 disk_partition_t info; 118 int i, ret; 119 120 if (!desc) 121 return; 122 123 if (desc->if_type != IF_TYPE_MTD) 124 return; 125 126 for (i = 1; i < MAX_SEARCH_PARTITIONS; i++) { 127 ret = part_get_info(desc, i, &info); 128 if (ret != 0) 129 continue; 130 131 if (mtd_blk_map_table_init(desc, 132 info.start << 9, 133 info.size << 9)) { 134 pr_debug("mtd block map table fail\n"); 135 } 136 } 137 } 138 139 static __maybe_unused int mtd_map_read(struct mtd_info *mtd, loff_t offset, 140 size_t *length, size_t *actual, 141 loff_t lim, u_char *buffer) 142 { 143 size_t left_to_read = *length; 144 u_char *p_buffer = buffer; 145 int rval; 146 147 while (left_to_read > 0) { 148 size_t block_offset = offset & (mtd->erasesize - 1); 149 size_t read_length; 150 loff_t mapped_offset; 151 152 if (offset >= mtd->size) 153 return 0; 154 155 mapped_offset = offset; 156 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 157 if (mtd_block_isbad(mtd, mapped_offset & 158 ~(mtd->erasesize - 1))) { 159 printf("Skipping bad block 0x%08llx\n", 160 offset & ~(mtd->erasesize - 1)); 161 offset += mtd->erasesize - block_offset; 162 continue; 163 } 164 } 165 166 if (left_to_read < (mtd->erasesize - block_offset)) 167 read_length = left_to_read; 168 else 169 read_length = mtd->erasesize - block_offset; 170 171 rval = mtd_read(mtd, mapped_offset, read_length, &read_length, 172 p_buffer); 173 if (rval && rval != -EUCLEAN) { 174 printf("NAND read from offset %llx failed %d\n", 175 offset, rval); 176 *length -= left_to_read; 177 return rval; 178 } 179 180 left_to_read -= read_length; 181 offset += read_length; 182 p_buffer += read_length; 183 } 184 185 return 0; 186 } 187 188 static __maybe_unused int mtd_map_write(struct mtd_info *mtd, loff_t offset, 189 size_t *length, size_t *actual, 190 loff_t lim, u_char *buffer, int flags) 191 { 192 int rval = 0, blocksize; 193 size_t left_to_write = *length; 194 u_char *p_buffer = buffer; 195 struct erase_info ei; 196 197 blocksize = mtd->erasesize; 198 199 /* 200 * nand_write() handles unaligned, partial page writes. 201 * 202 * We allow length to be unaligned, for convenience in 203 * using the $filesize variable. 204 * 205 * However, starting at an unaligned offset makes the 206 * semantics of bad block skipping ambiguous (really, 207 * you should only start a block skipping access at a 208 * partition boundary). So don't try to handle that. 209 */ 210 if ((offset & (mtd->writesize - 1)) != 0) { 211 printf("Attempt to write non page-aligned data\n"); 212 *length = 0; 213 return -EINVAL; 214 } 215 216 while (left_to_write > 0) { 217 size_t block_offset = offset & (mtd->erasesize - 1); 218 size_t write_size, truncated_write_size; 219 loff_t mapped_offset; 220 221 if (offset >= mtd->size) 222 return 0; 223 224 mapped_offset = offset; 225 if (!get_mtd_blk_map_address(mtd, &mapped_offset)) { 226 if (mtd_block_isbad(mtd, mapped_offset & 227 ~(mtd->erasesize - 1))) { 228 printf("Skipping bad block 0x%08llx\n", 229 offset & ~(mtd->erasesize - 1)); 230 offset += mtd->erasesize - block_offset; 231 continue; 232 } 233 } 234 235 if (!(mapped_offset & mtd->erasesize_mask)) { 236 memset(&ei, 0, sizeof(struct erase_info)); 237 ei.addr = mapped_offset; 238 ei.len = mtd->erasesize; 239 rval = mtd_erase(mtd, &ei); 240 if (rval) { 241 pr_info("error %d while erasing %llx\n", rval, 242 mapped_offset); 243 return rval; 244 } 245 } 246 247 if (left_to_write < (blocksize - block_offset)) 248 write_size = left_to_write; 249 else 250 write_size = blocksize - block_offset; 251 252 truncated_write_size = write_size; 253 rval = mtd_write(mtd, mapped_offset, truncated_write_size, 254 (size_t *)(&truncated_write_size), p_buffer); 255 256 offset += write_size; 257 p_buffer += write_size; 258 259 if (rval != 0) { 260 printf("NAND write to offset %llx failed %d\n", 261 offset, rval); 262 *length -= left_to_write; 263 return rval; 264 } 265 266 left_to_write -= write_size; 267 } 268 269 return 0; 270 } 271 272 char *mtd_part_parse(void) 273 { 274 char mtd_part_info_temp[MTD_SINGLE_PART_INFO_MAX_SIZE] = {0}; 275 u32 length, data_len = MTD_PART_INFO_MAX_SIZE; 276 char mtd_root_part_info[30] = {0}; 277 struct blk_desc *dev_desc; 278 disk_partition_t info; 279 char *mtd_part_info_p; 280 struct mtd_info *mtd; 281 char *mtd_part_info; 282 int ret; 283 int p; 284 285 dev_desc = rockchip_get_bootdev(); 286 if (!dev_desc) 287 return NULL; 288 289 mtd = (struct mtd_info *)dev_desc->bdev->priv; 290 if (!mtd) 291 return NULL; 292 293 p = part_get_info_by_name(dev_desc, PART_SYSTEM, &info); 294 if (p > 0) { 295 snprintf(mtd_root_part_info, 30, "%s%d %s", MTD_ROOT_PART_NUM, p - 1, MTD_ROOT_PART_NAME); 296 env_update("bootargs", mtd_root_part_info); 297 } 298 299 mtd_part_info = (char *)calloc(MTD_PART_INFO_MAX_SIZE, sizeof(char)); 300 if (!mtd_part_info) { 301 printf("%s: Fail to malloc!", __func__); 302 return NULL; 303 } 304 305 mtd_part_info_p = mtd_part_info; 306 snprintf(mtd_part_info_p, data_len - 1, "%s%s:", 307 MTD_PART_NAND_HEAD, 308 dev_desc->product); 309 data_len -= strlen(mtd_part_info_p); 310 mtd_part_info_p = mtd_part_info_p + strlen(mtd_part_info_p); 311 312 for (p = 1; p < MAX_SEARCH_PARTITIONS; p++) { 313 ret = part_get_info(dev_desc, p, &info); 314 if (ret) 315 break; 316 317 debug("name is %s, start addr is %x\n", info.name, 318 (int)(size_t)info.start); 319 320 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 321 (int)(size_t)info.size << 9, 322 (int)(size_t)info.start << 9, 323 info.name); 324 snprintf(mtd_part_info_temp, MTD_SINGLE_PART_INFO_MAX_SIZE - 1, 325 "0x%x@0x%x(%s)", 326 (int)(size_t)info.size << 9, 327 (int)(size_t)info.start << 9, 328 info.name); 329 strcat(mtd_part_info, ","); 330 if (part_get_info(dev_desc, p + 1, &info)) { 331 /* Nand flash is erased by block and gpt table just 332 * resserve 33 sectors for the last partition. This 333 * will erase the backup gpt table by user program, 334 * so reserve one block. 335 */ 336 snprintf(mtd_part_info_p, data_len - 1, "0x%x@0x%x(%s)", 337 (int)(size_t)(info.size - 338 (info.size - 1) % 339 (mtd->erasesize >> 9) - 1) << 9, 340 (int)(size_t)info.start << 9, 341 info.name); 342 break; 343 } 344 length = strlen(mtd_part_info_temp); 345 data_len -= length; 346 mtd_part_info_p = mtd_part_info_p + length + 1; 347 memset(mtd_part_info_temp, 0, MTD_SINGLE_PART_INFO_MAX_SIZE); 348 } 349 350 return mtd_part_info; 351 } 352 353 ulong mtd_dread(struct udevice *udev, lbaint_t start, 354 lbaint_t blkcnt, void *dst) 355 { 356 struct blk_desc *desc = dev_get_uclass_platdata(udev); 357 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 358 loff_t off = (loff_t)(start * 512); 359 size_t rwsize = blkcnt * 512; 360 #endif 361 struct mtd_info *mtd; 362 int ret = 0; 363 364 if (!desc) 365 return ret; 366 367 mtd = desc->bdev->priv; 368 if (!mtd) 369 return 0; 370 371 if (blkcnt == 0) 372 return 0; 373 374 pr_debug("mtd dread %s %lx %lx\n", mtd->name, start, blkcnt); 375 376 if (desc->devnum == BLK_MTD_NAND) { 377 #if defined(CONFIG_NAND) && !defined(CONFIG_SPL_BUILD) 378 mtd = dev_get_priv(udev->parent); 379 if (!mtd) 380 return 0; 381 382 ret = nand_read_skip_bad(mtd, off, &rwsize, 383 NULL, mtd->size, 384 (u_char *)(dst)); 385 #else 386 ret = mtd_map_read(mtd, off, &rwsize, 387 NULL, mtd->size, 388 (u_char *)(dst)); 389 #endif 390 if (!ret) 391 return blkcnt; 392 else 393 return 0; 394 } else if (desc->devnum == BLK_MTD_SPI_NAND) { 395 ret = mtd_map_read(mtd, off, &rwsize, 396 NULL, mtd->size, 397 (u_char *)(dst)); 398 if (!ret) 399 return blkcnt; 400 else 401 return 0; 402 } else if (desc->devnum == BLK_MTD_SPI_NOR) { 403 #if defined(CONFIG_SPI_FLASH_MTD) || defined(CONFIG_SPL_BUILD) 404 struct spi_nor *nor = (struct spi_nor *)mtd->priv; 405 struct spi_slave *spi = nor->spi; 406 size_t retlen_nor; 407 408 if (desc->op_flag == BLK_PRE_RW) 409 spi->mode |= SPI_DMA_PREPARE; 410 mtd_read(mtd, off, rwsize, &retlen_nor, dst); 411 if (desc->op_flag == BLK_PRE_RW) 412 spi->mode |= SPI_DMA_PREPARE; 413 414 if (retlen_nor == rwsize) 415 return blkcnt; 416 else 417 #endif 418 return 0; 419 } else { 420 return 0; 421 } 422 } 423 424 ulong mtd_dwrite(struct udevice *udev, lbaint_t start, 425 lbaint_t blkcnt, const void *src) 426 { 427 struct blk_desc *desc = dev_get_uclass_platdata(udev); 428 #if defined(CONFIG_NAND) || defined(CONFIG_MTD_SPI_NAND) || defined(CONFIG_SPI_FLASH_MTD) 429 loff_t off = (loff_t)(start * 512); 430 size_t rwsize = blkcnt * 512; 431 #endif 432 struct mtd_info *mtd; 433 int ret = 0; 434 435 if (!desc) 436 return ret; 437 438 mtd = desc->bdev->priv; 439 if (!mtd) 440 return 0; 441 442 pr_debug("mtd dwrite %s %lx %lx\n", mtd->name, start, blkcnt); 443 444 if (blkcnt == 0) 445 return 0; 446 447 if (desc->devnum == BLK_MTD_NAND || 448 desc->devnum == BLK_MTD_SPI_NAND || 449 desc->devnum == BLK_MTD_SPI_NOR) { 450 if (desc->op_flag == BLK_MTD_CONT_WRITE) { 451 ret = mtd_map_write(mtd, off, &rwsize, 452 NULL, mtd->size, 453 (u_char *)(src), 0); 454 if (!ret) 455 return blkcnt; 456 else 457 return 0; 458 } else { 459 lbaint_t off_aligned, alinged; 460 size_t rwsize_aligned; 461 u8 *p_buf; 462 463 alinged = off & mtd->erasesize_mask; 464 off_aligned = off - alinged; 465 rwsize_aligned = rwsize + alinged; 466 rwsize_aligned = (rwsize_aligned + mtd->erasesize - 1) & 467 ~(mtd->erasesize - 1); 468 469 p_buf = malloc(rwsize_aligned); 470 if (!p_buf) { 471 printf("%s: Fail to malloc!", __func__); 472 return 0; 473 } 474 475 ret = mtd_map_read(mtd, off_aligned, &rwsize_aligned, 476 NULL, mtd->size, 477 (u_char *)(p_buf)); 478 if (ret) { 479 free(p_buf); 480 return 0; 481 } 482 483 memcpy(p_buf + alinged, src, rwsize); 484 485 ret = mtd_map_write(mtd, off_aligned, &rwsize_aligned, 486 NULL, mtd->size, 487 (u_char *)(p_buf), 0); 488 free(p_buf); 489 if (!ret) 490 return blkcnt; 491 else 492 return 0; 493 } 494 } else { 495 return 0; 496 } 497 498 return 0; 499 } 500 501 ulong mtd_derase(struct udevice *udev, lbaint_t start, 502 lbaint_t blkcnt) 503 { 504 /* Not implemented */ 505 return 0; 506 } 507 508 static int mtd_blk_probe(struct udevice *udev) 509 { 510 struct mtd_info *mtd = dev_get_uclass_priv(udev->parent); 511 struct blk_desc *desc = dev_get_uclass_platdata(udev); 512 int ret, i; 513 514 desc->bdev->priv = mtd; 515 sprintf(desc->vendor, "0x%.4x", 0x2207); 516 memcpy(desc->product, mtd->name, strlen(mtd->name)); 517 memcpy(desc->revision, "V1.00", sizeof("V1.00")); 518 if (mtd->type == MTD_NANDFLASH) { 519 if (desc->devnum == BLK_MTD_NAND) 520 mtd = dev_get_priv(udev->parent); 521 /* 522 * Find the first useful block in the end, 523 * and it is the end lba of the nand storage. 524 */ 525 for (i = 0; i < (mtd->size / mtd->erasesize); i++) { 526 ret = mtd_block_isbad(mtd, 527 mtd->size - mtd->erasesize * (i + 1)); 528 if (!ret) { 529 desc->lba = (mtd->size >> 9) - 530 (mtd->erasesize >> 9) * i; 531 break; 532 } 533 } 534 } else { 535 desc->lba = mtd->size >> 9; 536 } 537 538 debug("MTD: desc->lba is %lx\n", desc->lba); 539 540 return 0; 541 } 542 543 static const struct blk_ops mtd_blk_ops = { 544 .read = mtd_dread, 545 #ifndef CONFIG_SPL_BUILD 546 .write = mtd_dwrite, 547 .erase = mtd_derase, 548 #endif 549 }; 550 551 U_BOOT_DRIVER(mtd_blk) = { 552 .name = "mtd_blk", 553 .id = UCLASS_BLK, 554 .ops = &mtd_blk_ops, 555 .probe = mtd_blk_probe, 556 }; 557