1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2016-2017 Micron Technology, Inc. 4 * 5 * Authors: 6 * Peter Pan <peterpandong@micron.com> 7 * Boris Brezillon <boris.brezillon@bootlin.com> 8 */ 9 10 #define pr_fmt(fmt) "spi-nand: " fmt 11 12 #ifndef __UBOOT__ 13 #include <linux/device.h> 14 #include <linux/jiffies.h> 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/mtd/spinand.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include <linux/spi/spi.h> 21 #include <linux/spi/spi-mem.h> 22 #else 23 #include <common.h> 24 #include <errno.h> 25 #include <spi.h> 26 #include <spi-mem.h> 27 #include <linux/mtd/spinand.h> 28 #endif 29 30 /* SPI NAND index visible in MTD names */ 31 static int spi_nand_idx; 32 33 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand, 34 const struct nand_page_io_req *req, 35 u16 *column) 36 { 37 struct nand_device *nand = spinand_to_nand(spinand); 38 unsigned int shift; 39 40 if (nand->memorg.planes_per_lun < 2) 41 return; 42 43 /* The plane number is passed in MSB just above the column address */ 44 shift = fls(nand->memorg.pagesize); 45 *column |= req->pos.plane << shift; 46 } 47 48 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val) 49 { 50 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg, 51 spinand->scratchbuf); 52 int ret; 53 54 ret = spi_mem_exec_op(spinand->slave, &op); 55 if (ret) 56 return ret; 57 58 *val = *spinand->scratchbuf; 59 return 0; 60 } 61 62 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val) 63 { 64 struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg, 65 spinand->scratchbuf); 66 67 *spinand->scratchbuf = val; 68 return spi_mem_exec_op(spinand->slave, &op); 69 } 70 71 static int spinand_read_status(struct spinand_device *spinand, u8 *status) 72 { 73 return spinand_read_reg_op(spinand, REG_STATUS, status); 74 } 75 76 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg) 77 { 78 struct nand_device *nand = spinand_to_nand(spinand); 79 80 if (WARN_ON(spinand->cur_target < 0 || 81 spinand->cur_target >= nand->memorg.ntargets)) 82 return -EINVAL; 83 84 *cfg = spinand->cfg_cache[spinand->cur_target]; 85 return 0; 86 } 87 88 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg) 89 { 90 struct nand_device *nand = spinand_to_nand(spinand); 91 int ret; 92 93 if (WARN_ON(spinand->cur_target < 0 || 94 spinand->cur_target >= nand->memorg.ntargets)) 95 return -EINVAL; 96 97 if (spinand->cfg_cache[spinand->cur_target] == cfg) 98 return 0; 99 100 ret = spinand_write_reg_op(spinand, REG_CFG, cfg); 101 if (ret) 102 return ret; 103 104 spinand->cfg_cache[spinand->cur_target] = cfg; 105 return 0; 106 } 107 108 /** 109 * spinand_upd_cfg() - Update the configuration register 110 * @spinand: the spinand device 111 * @mask: the mask encoding the bits to update in the config reg 112 * @val: the new value to apply 113 * 114 * Update the configuration register. 115 * 116 * Return: 0 on success, a negative error code otherwise. 117 */ 118 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val) 119 { 120 int ret; 121 u8 cfg; 122 123 ret = spinand_get_cfg(spinand, &cfg); 124 if (ret) 125 return ret; 126 127 cfg &= ~mask; 128 cfg |= val; 129 130 return spinand_set_cfg(spinand, cfg); 131 } 132 133 /** 134 * spinand_select_target() - Select a specific NAND target/die 135 * @spinand: the spinand device 136 * @target: the target/die to select 137 * 138 * Select a new target/die. If chip only has one die, this function is a NOOP. 139 * 140 * Return: 0 on success, a negative error code otherwise. 141 */ 142 int spinand_select_target(struct spinand_device *spinand, unsigned int target) 143 { 144 struct nand_device *nand = spinand_to_nand(spinand); 145 int ret; 146 147 if (WARN_ON(target >= nand->memorg.ntargets)) 148 return -EINVAL; 149 150 if (spinand->cur_target == target) 151 return 0; 152 153 if (nand->memorg.ntargets == 1) { 154 spinand->cur_target = target; 155 return 0; 156 } 157 158 ret = spinand->select_target(spinand, target); 159 if (ret) 160 return ret; 161 162 spinand->cur_target = target; 163 return 0; 164 } 165 166 static int spinand_init_cfg_cache(struct spinand_device *spinand) 167 { 168 struct nand_device *nand = spinand_to_nand(spinand); 169 struct udevice *dev = spinand->slave->dev; 170 unsigned int target; 171 int ret; 172 173 spinand->cfg_cache = devm_kzalloc(dev, 174 sizeof(*spinand->cfg_cache) * 175 nand->memorg.ntargets, 176 GFP_KERNEL); 177 if (!spinand->cfg_cache) 178 return -ENOMEM; 179 180 for (target = 0; target < nand->memorg.ntargets; target++) { 181 ret = spinand_select_target(spinand, target); 182 if (ret) 183 return ret; 184 185 /* 186 * We use spinand_read_reg_op() instead of spinand_get_cfg() 187 * here to bypass the config cache. 188 */ 189 ret = spinand_read_reg_op(spinand, REG_CFG, 190 &spinand->cfg_cache[target]); 191 if (ret) 192 return ret; 193 } 194 195 return 0; 196 } 197 198 static int spinand_init_quad_enable(struct spinand_device *spinand) 199 { 200 bool enable = false; 201 202 if (!(spinand->flags & SPINAND_HAS_QE_BIT)) 203 return 0; 204 205 if (spinand->op_templates.read_cache->data.buswidth == 4 || 206 spinand->op_templates.write_cache->data.buswidth == 4 || 207 spinand->op_templates.update_cache->data.buswidth == 4) 208 enable = true; 209 210 return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE, 211 enable ? CFG_QUAD_ENABLE : 0); 212 } 213 214 static int spinand_ecc_enable(struct spinand_device *spinand, 215 bool enable) 216 { 217 return spinand_upd_cfg(spinand, CFG_ECC_ENABLE, 218 enable ? CFG_ECC_ENABLE : 0); 219 } 220 221 static int spinand_write_enable_op(struct spinand_device *spinand) 222 { 223 struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true); 224 225 return spi_mem_exec_op(spinand->slave, &op); 226 } 227 228 static int spinand_load_page_op(struct spinand_device *spinand, 229 const struct nand_page_io_req *req) 230 { 231 struct nand_device *nand = spinand_to_nand(spinand); 232 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 233 struct spi_mem_op op = SPINAND_PAGE_READ_OP(row); 234 235 return spi_mem_exec_op(spinand->slave, &op); 236 } 237 238 static int spinand_read_from_cache_op(struct spinand_device *spinand, 239 const struct nand_page_io_req *req) 240 { 241 struct spi_mem_op op = *spinand->op_templates.read_cache; 242 struct nand_device *nand = spinand_to_nand(spinand); 243 struct mtd_info *mtd = nanddev_to_mtd(nand); 244 struct nand_page_io_req adjreq = *req; 245 unsigned int nbytes = 0; 246 void *buf = NULL; 247 u16 column = 0; 248 int ret; 249 250 if (req->datalen) { 251 adjreq.datalen = nanddev_page_size(nand); 252 adjreq.dataoffs = 0; 253 adjreq.databuf.in = spinand->databuf; 254 buf = spinand->databuf; 255 nbytes = adjreq.datalen; 256 } 257 258 if (req->ooblen) { 259 adjreq.ooblen = nanddev_per_page_oobsize(nand); 260 adjreq.ooboffs = 0; 261 adjreq.oobbuf.in = spinand->oobbuf; 262 nbytes += nanddev_per_page_oobsize(nand); 263 if (!buf) { 264 buf = spinand->oobbuf; 265 column = nanddev_page_size(nand); 266 } 267 } 268 269 spinand_cache_op_adjust_colum(spinand, &adjreq, &column); 270 op.addr.val = column; 271 272 /* 273 * Some controllers are limited in term of max RX data size. In this 274 * case, just repeat the READ_CACHE operation after updating the 275 * column. 276 */ 277 while (nbytes) { 278 op.data.buf.in = buf; 279 op.data.nbytes = nbytes; 280 ret = spi_mem_adjust_op_size(spinand->slave, &op); 281 if (ret) 282 return ret; 283 284 ret = spi_mem_exec_op(spinand->slave, &op); 285 if (ret) 286 return ret; 287 288 buf += op.data.nbytes; 289 nbytes -= op.data.nbytes; 290 op.addr.val += op.data.nbytes; 291 } 292 293 if (req->datalen) 294 memcpy(req->databuf.in, spinand->databuf + req->dataoffs, 295 req->datalen); 296 297 if (req->ooblen) { 298 if (req->mode == MTD_OPS_AUTO_OOB) 299 mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, 300 spinand->oobbuf, 301 req->ooboffs, 302 req->ooblen); 303 else 304 memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, 305 req->ooblen); 306 } 307 308 return 0; 309 } 310 311 static int spinand_write_to_cache_op(struct spinand_device *spinand, 312 const struct nand_page_io_req *req) 313 { 314 struct spi_mem_op op = *spinand->op_templates.write_cache; 315 struct nand_device *nand = spinand_to_nand(spinand); 316 struct mtd_info *mtd = nanddev_to_mtd(nand); 317 struct nand_page_io_req adjreq = *req; 318 unsigned int nbytes = 0; 319 void *buf = NULL; 320 u16 column = 0; 321 int ret; 322 323 memset(spinand->databuf, 0xff, 324 nanddev_page_size(nand) + 325 nanddev_per_page_oobsize(nand)); 326 327 if (req->datalen) { 328 memcpy(spinand->databuf + req->dataoffs, req->databuf.out, 329 req->datalen); 330 adjreq.dataoffs = 0; 331 adjreq.datalen = nanddev_page_size(nand); 332 adjreq.databuf.out = spinand->databuf; 333 nbytes = adjreq.datalen; 334 buf = spinand->databuf; 335 } 336 337 if (req->ooblen) { 338 if (req->mode == MTD_OPS_AUTO_OOB) 339 mtd_ooblayout_set_databytes(mtd, req->oobbuf.out, 340 spinand->oobbuf, 341 req->ooboffs, 342 req->ooblen); 343 else 344 memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, 345 req->ooblen); 346 347 adjreq.ooblen = nanddev_per_page_oobsize(nand); 348 adjreq.ooboffs = 0; 349 nbytes += nanddev_per_page_oobsize(nand); 350 if (!buf) { 351 buf = spinand->oobbuf; 352 column = nanddev_page_size(nand); 353 } 354 } 355 356 spinand_cache_op_adjust_colum(spinand, &adjreq, &column); 357 358 op = *spinand->op_templates.write_cache; 359 op.addr.val = column; 360 361 /* 362 * Some controllers are limited in term of max TX data size. In this 363 * case, split the operation into one LOAD CACHE and one or more 364 * LOAD RANDOM CACHE. 365 */ 366 while (nbytes) { 367 op.data.buf.out = buf; 368 op.data.nbytes = nbytes; 369 370 ret = spi_mem_adjust_op_size(spinand->slave, &op); 371 if (ret) 372 return ret; 373 374 ret = spi_mem_exec_op(spinand->slave, &op); 375 if (ret) 376 return ret; 377 378 buf += op.data.nbytes; 379 nbytes -= op.data.nbytes; 380 op.addr.val += op.data.nbytes; 381 382 /* 383 * We need to use the RANDOM LOAD CACHE operation if there's 384 * more than one iteration, because the LOAD operation resets 385 * the cache to 0xff. 386 */ 387 if (nbytes) { 388 column = op.addr.val; 389 op = *spinand->op_templates.update_cache; 390 op.addr.val = column; 391 } 392 } 393 394 return 0; 395 } 396 397 static int spinand_program_op(struct spinand_device *spinand, 398 const struct nand_page_io_req *req) 399 { 400 struct nand_device *nand = spinand_to_nand(spinand); 401 unsigned int row = nanddev_pos_to_row(nand, &req->pos); 402 struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row); 403 404 return spi_mem_exec_op(spinand->slave, &op); 405 } 406 407 static int spinand_erase_op(struct spinand_device *spinand, 408 const struct nand_pos *pos) 409 { 410 struct nand_device *nand = &spinand->base; 411 unsigned int row = nanddev_pos_to_row(nand, pos); 412 struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row); 413 414 return spi_mem_exec_op(spinand->slave, &op); 415 } 416 417 static int spinand_wait(struct spinand_device *spinand, u8 *s) 418 { 419 unsigned long start, stop; 420 u8 status; 421 int ret; 422 423 start = get_timer(0); 424 stop = 400; 425 do { 426 ret = spinand_read_status(spinand, &status); 427 if (ret) 428 return ret; 429 430 if (!(status & STATUS_BUSY)) 431 goto out; 432 } while (get_timer(start) < stop); 433 434 /* 435 * Extra read, just in case the STATUS_READY bit has changed 436 * since our last check 437 */ 438 ret = spinand_read_status(spinand, &status); 439 if (ret) 440 return ret; 441 442 out: 443 if (s) 444 *s = status; 445 446 return status & STATUS_BUSY ? -ETIMEDOUT : 0; 447 } 448 449 static int spinand_read_id_op(struct spinand_device *spinand, u8 *buf) 450 { 451 struct spi_mem_op op = SPINAND_READID_OP(0, spinand->scratchbuf, 452 SPINAND_MAX_ID_LEN); 453 int ret; 454 455 ret = spi_mem_exec_op(spinand->slave, &op); 456 if (!ret) 457 memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN); 458 459 return ret; 460 } 461 462 static int spinand_reset_op(struct spinand_device *spinand) 463 { 464 struct spi_mem_op op = SPINAND_RESET_OP; 465 int ret; 466 467 ret = spi_mem_exec_op(spinand->slave, &op); 468 if (ret) 469 return ret; 470 471 return spinand_wait(spinand, NULL); 472 } 473 474 static int spinand_lock_block(struct spinand_device *spinand, u8 lock) 475 { 476 return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock); 477 } 478 479 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status) 480 { 481 struct nand_device *nand = spinand_to_nand(spinand); 482 483 if (spinand->eccinfo.get_status) 484 return spinand->eccinfo.get_status(spinand, status); 485 486 switch (status & STATUS_ECC_MASK) { 487 case STATUS_ECC_NO_BITFLIPS: 488 return 0; 489 490 case STATUS_ECC_HAS_BITFLIPS: 491 /* 492 * We have no way to know exactly how many bitflips have been 493 * fixed, so let's return the maximum possible value so that 494 * wear-leveling layers move the data immediately. 495 */ 496 return nand->eccreq.strength; 497 498 case STATUS_ECC_UNCOR_ERROR: 499 return -EBADMSG; 500 501 default: 502 break; 503 } 504 505 return -EINVAL; 506 } 507 508 static int spinand_read_page(struct spinand_device *spinand, 509 const struct nand_page_io_req *req, 510 bool ecc_enabled) 511 { 512 u8 status; 513 int ret; 514 515 ret = spinand_load_page_op(spinand, req); 516 if (ret) 517 return ret; 518 519 ret = spinand_wait(spinand, &status); 520 if (ret < 0) 521 return ret; 522 523 ret = spinand_read_from_cache_op(spinand, req); 524 if (ret) 525 return ret; 526 527 if (!ecc_enabled) 528 return 0; 529 530 return spinand_check_ecc_status(spinand, status); 531 } 532 533 static int spinand_write_page(struct spinand_device *spinand, 534 const struct nand_page_io_req *req) 535 { 536 u8 status; 537 int ret; 538 539 ret = spinand_write_enable_op(spinand); 540 if (ret) 541 return ret; 542 543 ret = spinand_write_to_cache_op(spinand, req); 544 if (ret) 545 return ret; 546 547 ret = spinand_program_op(spinand, req); 548 if (ret) 549 return ret; 550 551 ret = spinand_wait(spinand, &status); 552 if (!ret && (status & STATUS_PROG_FAILED)) 553 ret = -EIO; 554 555 return ret; 556 } 557 558 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from, 559 struct mtd_oob_ops *ops) 560 { 561 struct spinand_device *spinand = mtd_to_spinand(mtd); 562 struct nand_device *nand = mtd_to_nanddev(mtd); 563 unsigned int max_bitflips = 0; 564 struct nand_io_iter iter; 565 bool enable_ecc = false; 566 bool ecc_failed = false; 567 int ret = 0; 568 569 if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout) 570 enable_ecc = true; 571 572 #ifndef __UBOOT__ 573 mutex_lock(&spinand->lock); 574 #endif 575 576 nanddev_io_for_each_page(nand, from, ops, &iter) { 577 ret = spinand_select_target(spinand, iter.req.pos.target); 578 if (ret) 579 break; 580 581 ret = spinand_ecc_enable(spinand, enable_ecc); 582 if (ret) 583 break; 584 585 ret = spinand_read_page(spinand, &iter.req, enable_ecc); 586 if (ret < 0 && ret != -EBADMSG) 587 break; 588 589 if (ret == -EBADMSG) { 590 ecc_failed = true; 591 mtd->ecc_stats.failed++; 592 ret = 0; 593 } else { 594 mtd->ecc_stats.corrected += ret; 595 max_bitflips = max_t(unsigned int, max_bitflips, ret); 596 } 597 598 ops->retlen += iter.req.datalen; 599 ops->oobretlen += iter.req.ooblen; 600 } 601 602 #ifndef __UBOOT__ 603 mutex_unlock(&spinand->lock); 604 #endif 605 if (ecc_failed && !ret) 606 ret = -EBADMSG; 607 608 return ret ? ret : max_bitflips; 609 } 610 611 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to, 612 struct mtd_oob_ops *ops) 613 { 614 struct spinand_device *spinand = mtd_to_spinand(mtd); 615 struct nand_device *nand = mtd_to_nanddev(mtd); 616 struct nand_io_iter iter; 617 bool enable_ecc = false; 618 int ret = 0; 619 620 if (ops->mode != MTD_OPS_RAW && mtd->ooblayout) 621 enable_ecc = true; 622 623 #ifndef __UBOOT__ 624 mutex_lock(&spinand->lock); 625 #endif 626 627 nanddev_io_for_each_page(nand, to, ops, &iter) { 628 ret = spinand_select_target(spinand, iter.req.pos.target); 629 if (ret) 630 break; 631 632 ret = spinand_ecc_enable(spinand, enable_ecc); 633 if (ret) 634 break; 635 636 ret = spinand_write_page(spinand, &iter.req); 637 if (ret) 638 break; 639 640 ops->retlen += iter.req.datalen; 641 ops->oobretlen += iter.req.ooblen; 642 } 643 644 #ifndef __UBOOT__ 645 mutex_unlock(&spinand->lock); 646 #endif 647 648 return ret; 649 } 650 651 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos) 652 { 653 struct spinand_device *spinand = nand_to_spinand(nand); 654 u8 marker[2] = { }; 655 struct nand_page_io_req req = { 656 .pos = *pos, 657 .ooblen = sizeof(marker), 658 .ooboffs = 0, 659 .oobbuf.in = marker, 660 .mode = MTD_OPS_RAW, 661 }; 662 663 spinand_select_target(spinand, pos->target); 664 spinand_read_page(spinand, &req, false); 665 if (marker[0] != 0xff || marker[1] != 0xff) 666 return true; 667 668 return false; 669 } 670 671 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs) 672 { 673 struct nand_device *nand = mtd_to_nanddev(mtd); 674 #ifndef __UBOOT__ 675 struct spinand_device *spinand = nand_to_spinand(nand); 676 #endif 677 struct nand_pos pos; 678 int ret; 679 680 nanddev_offs_to_pos(nand, offs, &pos); 681 #ifndef __UBOOT__ 682 mutex_lock(&spinand->lock); 683 #endif 684 ret = nanddev_isbad(nand, &pos); 685 #ifndef __UBOOT__ 686 mutex_unlock(&spinand->lock); 687 #endif 688 return ret; 689 } 690 691 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos) 692 { 693 struct spinand_device *spinand = nand_to_spinand(nand); 694 u8 marker[2] = { 0, 0 }; 695 struct nand_page_io_req req = { 696 .pos = *pos, 697 .ooboffs = 0, 698 .ooblen = sizeof(marker), 699 .oobbuf.out = marker, 700 .mode = MTD_OPS_RAW, 701 }; 702 int ret; 703 704 ret = spinand_select_target(spinand, pos->target); 705 if (ret) 706 return ret; 707 708 ret = spinand_write_enable_op(spinand); 709 if (ret) 710 return ret; 711 712 return spinand_write_page(spinand, &req); 713 } 714 715 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs) 716 { 717 struct nand_device *nand = mtd_to_nanddev(mtd); 718 #ifndef __UBOOT__ 719 struct spinand_device *spinand = nand_to_spinand(nand); 720 #endif 721 struct nand_pos pos; 722 int ret; 723 724 nanddev_offs_to_pos(nand, offs, &pos); 725 #ifndef __UBOOT__ 726 mutex_lock(&spinand->lock); 727 #endif 728 ret = nanddev_markbad(nand, &pos); 729 #ifndef __UBOOT__ 730 mutex_unlock(&spinand->lock); 731 #endif 732 return ret; 733 } 734 735 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos) 736 { 737 struct spinand_device *spinand = nand_to_spinand(nand); 738 u8 status; 739 int ret; 740 741 ret = spinand_select_target(spinand, pos->target); 742 if (ret) 743 return ret; 744 745 ret = spinand_write_enable_op(spinand); 746 if (ret) 747 return ret; 748 749 ret = spinand_erase_op(spinand, pos); 750 if (ret) 751 return ret; 752 753 ret = spinand_wait(spinand, &status); 754 if (!ret && (status & STATUS_ERASE_FAILED)) 755 ret = -EIO; 756 757 return ret; 758 } 759 760 static int spinand_mtd_erase(struct mtd_info *mtd, 761 struct erase_info *einfo) 762 { 763 #ifndef __UBOOT__ 764 struct spinand_device *spinand = mtd_to_spinand(mtd); 765 #endif 766 int ret; 767 768 #ifndef __UBOOT__ 769 mutex_lock(&spinand->lock); 770 #endif 771 ret = nanddev_mtd_erase(mtd, einfo); 772 #ifndef __UBOOT__ 773 mutex_unlock(&spinand->lock); 774 #endif 775 776 return ret; 777 } 778 779 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs) 780 { 781 #ifndef __UBOOT__ 782 struct spinand_device *spinand = mtd_to_spinand(mtd); 783 #endif 784 struct nand_device *nand = mtd_to_nanddev(mtd); 785 struct nand_pos pos; 786 int ret; 787 788 nanddev_offs_to_pos(nand, offs, &pos); 789 #ifndef __UBOOT__ 790 mutex_lock(&spinand->lock); 791 #endif 792 ret = nanddev_isreserved(nand, &pos); 793 #ifndef __UBOOT__ 794 mutex_unlock(&spinand->lock); 795 #endif 796 797 return ret; 798 } 799 800 const struct spi_mem_op * 801 spinand_find_supported_op(struct spinand_device *spinand, 802 const struct spi_mem_op *ops, 803 unsigned int nops) 804 { 805 unsigned int i; 806 807 for (i = 0; i < nops; i++) { 808 if (spi_mem_supports_op(spinand->slave, &ops[i])) 809 return &ops[i]; 810 } 811 812 return NULL; 813 } 814 815 static const struct nand_ops spinand_ops = { 816 .erase = spinand_erase, 817 .markbad = spinand_markbad, 818 .isbad = spinand_isbad, 819 }; 820 821 static const struct spinand_manufacturer *spinand_manufacturers[] = { 822 &gigadevice_spinand_manufacturer, 823 ¯onix_spinand_manufacturer, 824 µn_spinand_manufacturer, 825 &toshiba_spinand_manufacturer, 826 &winbond_spinand_manufacturer, 827 &dosilicon_spinand_manufacturer, 828 &esmt_spinand_manufacturer, 829 &xtx_spinand_manufacturer, 830 }; 831 832 static int spinand_manufacturer_detect(struct spinand_device *spinand) 833 { 834 unsigned int i; 835 int ret; 836 837 for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) { 838 ret = spinand_manufacturers[i]->ops->detect(spinand); 839 if (ret > 0) { 840 spinand->manufacturer = spinand_manufacturers[i]; 841 return 0; 842 } else if (ret < 0) { 843 return ret; 844 } 845 } 846 847 return -ENOTSUPP; 848 } 849 850 static int spinand_manufacturer_init(struct spinand_device *spinand) 851 { 852 if (spinand->manufacturer->ops->init) 853 return spinand->manufacturer->ops->init(spinand); 854 855 return 0; 856 } 857 858 static void spinand_manufacturer_cleanup(struct spinand_device *spinand) 859 { 860 /* Release manufacturer private data */ 861 if (spinand->manufacturer->ops->cleanup) 862 return spinand->manufacturer->ops->cleanup(spinand); 863 } 864 865 static const struct spi_mem_op * 866 spinand_select_op_variant(struct spinand_device *spinand, 867 const struct spinand_op_variants *variants) 868 { 869 struct nand_device *nand = spinand_to_nand(spinand); 870 unsigned int i; 871 872 for (i = 0; i < variants->nops; i++) { 873 struct spi_mem_op op = variants->ops[i]; 874 unsigned int nbytes; 875 int ret; 876 877 nbytes = nanddev_per_page_oobsize(nand) + 878 nanddev_page_size(nand); 879 880 while (nbytes) { 881 op.data.nbytes = nbytes; 882 ret = spi_mem_adjust_op_size(spinand->slave, &op); 883 if (ret) 884 break; 885 886 if (!spi_mem_supports_op(spinand->slave, &op)) 887 break; 888 889 nbytes -= op.data.nbytes; 890 } 891 892 if (!nbytes) 893 return &variants->ops[i]; 894 } 895 896 return NULL; 897 } 898 899 /** 900 * spinand_match_and_init() - Try to find a match between a device ID and an 901 * entry in a spinand_info table 902 * @spinand: SPI NAND object 903 * @table: SPI NAND device description table 904 * @table_size: size of the device description table 905 * 906 * Should be used by SPI NAND manufacturer drivers when they want to find a 907 * match between a device ID retrieved through the READ_ID command and an 908 * entry in the SPI NAND description table. If a match is found, the spinand 909 * object will be initialized with information provided by the matching 910 * spinand_info entry. 911 * 912 * Return: 0 on success, a negative error code otherwise. 913 */ 914 int spinand_match_and_init(struct spinand_device *spinand, 915 const struct spinand_info *table, 916 unsigned int table_size, u8 devid) 917 { 918 struct nand_device *nand = spinand_to_nand(spinand); 919 unsigned int i; 920 921 for (i = 0; i < table_size; i++) { 922 const struct spinand_info *info = &table[i]; 923 const struct spi_mem_op *op; 924 925 if (devid != info->devid) 926 continue; 927 928 nand->memorg = table[i].memorg; 929 nand->eccreq = table[i].eccreq; 930 spinand->eccinfo = table[i].eccinfo; 931 spinand->flags = table[i].flags; 932 spinand->select_target = table[i].select_target; 933 934 op = spinand_select_op_variant(spinand, 935 info->op_variants.read_cache); 936 if (!op) 937 return -ENOTSUPP; 938 939 spinand->op_templates.read_cache = op; 940 941 op = spinand_select_op_variant(spinand, 942 info->op_variants.write_cache); 943 if (!op) 944 return -ENOTSUPP; 945 946 spinand->op_templates.write_cache = op; 947 948 op = spinand_select_op_variant(spinand, 949 info->op_variants.update_cache); 950 spinand->op_templates.update_cache = op; 951 952 return 0; 953 } 954 955 return -ENOTSUPP; 956 } 957 958 static int spinand_detect(struct spinand_device *spinand) 959 { 960 struct nand_device *nand = spinand_to_nand(spinand); 961 int ret; 962 963 ret = spinand_reset_op(spinand); 964 if (ret) 965 return ret; 966 967 ret = spinand_read_id_op(spinand, spinand->id.data); 968 if (ret) 969 return ret; 970 971 spinand->id.len = SPINAND_MAX_ID_LEN; 972 973 ret = spinand_manufacturer_detect(spinand); 974 if (ret) { 975 dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN, 976 spinand->id.data); 977 return ret; 978 } 979 980 if (nand->memorg.ntargets > 1 && !spinand->select_target) { 981 dev_err(dev, 982 "SPI NANDs with more than one die must implement ->select_target()\n"); 983 return -EINVAL; 984 } 985 986 dev_info(spinand->slave->dev, 987 "%s SPI NAND was found.\n", spinand->manufacturer->name); 988 dev_info(spinand->slave->dev, 989 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n", 990 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10, 991 nanddev_page_size(nand), nanddev_per_page_oobsize(nand)); 992 993 return 0; 994 } 995 996 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section, 997 struct mtd_oob_region *region) 998 { 999 return -ERANGE; 1000 } 1001 1002 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section, 1003 struct mtd_oob_region *region) 1004 { 1005 if (section) 1006 return -ERANGE; 1007 1008 /* Reserve 2 bytes for the BBM. */ 1009 region->offset = 2; 1010 region->length = 62; 1011 1012 return 0; 1013 } 1014 1015 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = { 1016 .ecc = spinand_noecc_ooblayout_ecc, 1017 .rfree = spinand_noecc_ooblayout_free, 1018 }; 1019 1020 static int spinand_init(struct spinand_device *spinand) 1021 { 1022 struct mtd_info *mtd = spinand_to_mtd(spinand); 1023 struct nand_device *nand = mtd_to_nanddev(mtd); 1024 int ret, i; 1025 1026 /* 1027 * We need a scratch buffer because the spi_mem interface requires that 1028 * buf passed in spi_mem_op->data.buf be DMA-able. 1029 */ 1030 spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL); 1031 if (!spinand->scratchbuf) 1032 return -ENOMEM; 1033 1034 ret = spinand_detect(spinand); 1035 if (ret) 1036 goto err_free_bufs; 1037 1038 /* 1039 * Use kzalloc() instead of devm_kzalloc() here, because some drivers 1040 * may use this buffer for DMA access. 1041 * Memory allocated by devm_ does not guarantee DMA-safe alignment. 1042 */ 1043 spinand->databuf = kzalloc(nanddev_page_size(nand) + 1044 nanddev_per_page_oobsize(nand), 1045 GFP_KERNEL); 1046 if (!spinand->databuf) { 1047 ret = -ENOMEM; 1048 goto err_free_bufs; 1049 } 1050 1051 spinand->oobbuf = spinand->databuf + nanddev_page_size(nand); 1052 1053 ret = spinand_init_cfg_cache(spinand); 1054 if (ret) 1055 goto err_free_bufs; 1056 1057 ret = spinand_init_quad_enable(spinand); 1058 if (ret) 1059 goto err_free_bufs; 1060 1061 ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0); 1062 if (ret) 1063 goto err_free_bufs; 1064 1065 ret = spinand_manufacturer_init(spinand); 1066 if (ret) { 1067 dev_err(dev, 1068 "Failed to initialize the SPI NAND chip (err = %d)\n", 1069 ret); 1070 goto err_free_bufs; 1071 } 1072 1073 /* After power up, all blocks are locked, so unlock them here. */ 1074 for (i = 0; i < nand->memorg.ntargets; i++) { 1075 ret = spinand_select_target(spinand, i); 1076 if (ret) 1077 goto err_free_bufs; 1078 1079 ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); 1080 if (ret) 1081 goto err_free_bufs; 1082 } 1083 1084 nand->bbt.option = NANDDEV_BBT_USE_FLASH; 1085 ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); 1086 if (ret) 1087 goto err_manuf_cleanup; 1088 1089 /* 1090 * Right now, we don't support ECC, so let the whole oob 1091 * area is available for user. 1092 */ 1093 mtd->_read_oob = spinand_mtd_read; 1094 mtd->_write_oob = spinand_mtd_write; 1095 mtd->_block_isbad = spinand_mtd_block_isbad; 1096 mtd->_block_markbad = spinand_mtd_block_markbad; 1097 mtd->_block_isreserved = spinand_mtd_block_isreserved; 1098 mtd->_erase = spinand_mtd_erase; 1099 1100 if (spinand->eccinfo.ooblayout) 1101 mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout); 1102 else 1103 mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout); 1104 1105 ret = mtd_ooblayout_count_freebytes(mtd); 1106 if (ret < 0) 1107 goto err_cleanup_nanddev; 1108 1109 mtd->oobavail = ret; 1110 1111 /* Propagate ECC information to mtd_info */ 1112 mtd->ecc_strength = nand->eccreq.strength; 1113 mtd->ecc_step_size = nand->eccreq.step_size; 1114 1115 return 0; 1116 1117 err_cleanup_nanddev: 1118 nanddev_cleanup(nand); 1119 1120 err_manuf_cleanup: 1121 spinand_manufacturer_cleanup(spinand); 1122 1123 err_free_bufs: 1124 kfree(spinand->databuf); 1125 kfree(spinand->scratchbuf); 1126 return ret; 1127 } 1128 1129 static void spinand_cleanup(struct spinand_device *spinand) 1130 { 1131 struct nand_device *nand = spinand_to_nand(spinand); 1132 1133 nanddev_cleanup(nand); 1134 spinand_manufacturer_cleanup(spinand); 1135 kfree(spinand->databuf); 1136 kfree(spinand->scratchbuf); 1137 } 1138 1139 static int spinand_bind(struct udevice *udev) 1140 { 1141 int ret = 0; 1142 1143 #ifdef CONFIG_MTD_BLK 1144 struct udevice *bdev; 1145 1146 ret = blk_create_devicef(udev, "mtd_blk", "blk", IF_TYPE_MTD, 1147 1, 512, 0, &bdev); 1148 if (ret) 1149 printf("Cannot create block device\n"); 1150 #endif 1151 return ret; 1152 } 1153 1154 static int spinand_probe(struct udevice *dev) 1155 { 1156 struct spinand_device *spinand = dev_get_priv(dev); 1157 struct spi_slave *slave = dev_get_parent_priv(dev); 1158 struct mtd_info *mtd = dev_get_uclass_priv(dev); 1159 struct nand_device *nand = spinand_to_nand(spinand); 1160 int ret; 1161 1162 #ifndef __UBOOT__ 1163 spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand), 1164 GFP_KERNEL); 1165 if (!spinand) 1166 return -ENOMEM; 1167 1168 spinand->spimem = mem; 1169 spi_mem_set_drvdata(mem, spinand); 1170 spinand_set_of_node(spinand, mem->spi->dev.of_node); 1171 mutex_init(&spinand->lock); 1172 1173 mtd = spinand_to_mtd(spinand); 1174 mtd->dev.parent = &mem->spi->dev; 1175 #else 1176 nand->mtd = mtd; 1177 mtd->priv = nand; 1178 mtd->dev = dev; 1179 mtd->name = malloc(20); 1180 if (!mtd->name) 1181 return -ENOMEM; 1182 sprintf(mtd->name, "spi-nand%d", spi_nand_idx++); 1183 spinand->slave = slave; 1184 spinand_set_of_node(spinand, dev->node.np); 1185 #endif 1186 1187 ret = spinand_init(spinand); 1188 if (ret) 1189 return ret; 1190 1191 #ifndef __UBOOT__ 1192 ret = mtd_device_register(mtd, NULL, 0); 1193 #else 1194 ret = add_mtd_device(mtd); 1195 #endif 1196 if (ret) 1197 goto err_spinand_cleanup; 1198 1199 return 0; 1200 1201 err_spinand_cleanup: 1202 spinand_cleanup(spinand); 1203 1204 return ret; 1205 } 1206 1207 #ifndef __UBOOT__ 1208 static int spinand_remove(struct udevice *slave) 1209 { 1210 struct spinand_device *spinand; 1211 struct mtd_info *mtd; 1212 int ret; 1213 1214 spinand = spi_mem_get_drvdata(slave); 1215 mtd = spinand_to_mtd(spinand); 1216 free(mtd->name); 1217 1218 ret = mtd_device_unregister(mtd); 1219 if (ret) 1220 return ret; 1221 1222 spinand_cleanup(spinand); 1223 1224 return 0; 1225 } 1226 1227 static const struct spi_device_id spinand_ids[] = { 1228 { .name = "spi-nand" }, 1229 { /* sentinel */ }, 1230 }; 1231 1232 #ifdef CONFIG_OF 1233 static const struct of_device_id spinand_of_ids[] = { 1234 { .compatible = "spi-nand" }, 1235 { /* sentinel */ }, 1236 }; 1237 #endif 1238 1239 static struct spi_mem_driver spinand_drv = { 1240 .spidrv = { 1241 .id_table = spinand_ids, 1242 .driver = { 1243 .name = "spi-nand", 1244 .of_match_table = of_match_ptr(spinand_of_ids), 1245 }, 1246 }, 1247 .probe = spinand_probe, 1248 .remove = spinand_remove, 1249 }; 1250 module_spi_mem_driver(spinand_drv); 1251 1252 MODULE_DESCRIPTION("SPI NAND framework"); 1253 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>"); 1254 MODULE_LICENSE("GPL v2"); 1255 #endif /* __UBOOT__ */ 1256 1257 static const struct udevice_id spinand_ids[] = { 1258 { .compatible = "spi-nand" }, 1259 { /* sentinel */ }, 1260 }; 1261 1262 U_BOOT_DRIVER(spinand) = { 1263 .name = "spi_nand", 1264 .id = UCLASS_MTD, 1265 .of_match = spinand_ids, 1266 .bind = spinand_bind, 1267 .priv_auto_alloc_size = sizeof(struct spinand_device), 1268 .probe = spinand_probe, 1269 }; 1270