1 /* 2 * Copyright (C) 2014 Panasonic Corporation 3 * Copyright (C) 2013-2014, Altera Corporation <www.altera.com> 4 * Copyright (C) 2009-2010, Intel Corporation and its suppliers. 5 * 6 * SPDX-License-Identifier: GPL-2.0+ 7 */ 8 9 #include <dm.h> 10 #include <nand.h> 11 #include <linux/bitfield.h> 12 #include <linux/dma-direction.h> 13 #include <linux/errno.h> 14 #include <linux/io.h> 15 #include <linux/mtd/mtd.h> 16 #include <linux/mtd/rawnand.h> 17 18 #include "denali.h" 19 20 static dma_addr_t dma_map_single(void *dev, void *ptr, size_t size, 21 enum dma_data_direction dir) 22 { 23 unsigned long addr = (unsigned long)ptr; 24 25 size = ALIGN(size, ARCH_DMA_MINALIGN); 26 27 if (dir == DMA_FROM_DEVICE) 28 invalidate_dcache_range(addr, addr + size); 29 else 30 flush_dcache_range(addr, addr + size); 31 32 return addr; 33 } 34 35 static void dma_unmap_single(void *dev, dma_addr_t addr, size_t size, 36 enum dma_data_direction dir) 37 { 38 size = ALIGN(size, ARCH_DMA_MINALIGN); 39 40 if (dir != DMA_TO_DEVICE) 41 invalidate_dcache_range(addr, addr + size); 42 } 43 44 static int dma_mapping_error(void *dev, dma_addr_t addr) 45 { 46 return 0; 47 } 48 49 #define DENALI_NAND_NAME "denali-nand" 50 51 /* for Indexed Addressing */ 52 #define DENALI_INDEXED_CTRL 0x00 53 #define DENALI_INDEXED_DATA 0x10 54 55 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ 56 #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ 57 #define DENALI_MAP10 (2 << 26) /* high-level control plane */ 58 #define DENALI_MAP11 (3 << 26) /* direct controller access */ 59 60 /* MAP11 access cycle type */ 61 #define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ 62 #define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ 63 #define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ 64 65 /* MAP10 commands */ 66 #define DENALI_ERASE 0x01 67 68 #define DENALI_BANK(denali) ((denali)->active_bank << 24) 69 70 #define DENALI_INVALID_BANK -1 71 #define DENALI_NR_BANKS 4 72 73 static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) 74 { 75 return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); 76 } 77 78 /* 79 * Direct Addressing - the slave address forms the control information (command 80 * type, bank, block, and page address). The slave data is the actual data to 81 * be transferred. This mode requires 28 bits of address region allocated. 82 */ 83 static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) 84 { 85 return ioread32(denali->host + addr); 86 } 87 88 static void denali_direct_write(struct denali_nand_info *denali, u32 addr, 89 u32 data) 90 { 91 iowrite32(data, denali->host + addr); 92 } 93 94 /* 95 * Indexed Addressing - address translation module intervenes in passing the 96 * control information. This mode reduces the required address range. The 97 * control information and transferred data are latched by the registers in 98 * the translation module. 99 */ 100 static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) 101 { 102 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 103 return ioread32(denali->host + DENALI_INDEXED_DATA); 104 } 105 106 static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, 107 u32 data) 108 { 109 iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); 110 iowrite32(data, denali->host + DENALI_INDEXED_DATA); 111 } 112 113 /* 114 * Use the configuration feature register to determine the maximum number of 115 * banks that the hardware supports. 116 */ 117 static void denali_detect_max_banks(struct denali_nand_info *denali) 118 { 119 uint32_t features = ioread32(denali->reg + FEATURES); 120 121 denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); 122 123 /* the encoding changed from rev 5.0 to 5.1 */ 124 if (denali->revision < 0x0501) 125 denali->max_banks <<= 1; 126 } 127 128 static void __maybe_unused denali_enable_irq(struct denali_nand_info *denali) 129 { 130 int i; 131 132 for (i = 0; i < DENALI_NR_BANKS; i++) 133 iowrite32(U32_MAX, denali->reg + INTR_EN(i)); 134 iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); 135 } 136 137 static void __maybe_unused denali_disable_irq(struct denali_nand_info *denali) 138 { 139 int i; 140 141 for (i = 0; i < DENALI_NR_BANKS; i++) 142 iowrite32(0, denali->reg + INTR_EN(i)); 143 iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); 144 } 145 146 static void denali_clear_irq(struct denali_nand_info *denali, 147 int bank, uint32_t irq_status) 148 { 149 /* write one to clear bits */ 150 iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); 151 } 152 153 static void denali_clear_irq_all(struct denali_nand_info *denali) 154 { 155 int i; 156 157 for (i = 0; i < DENALI_NR_BANKS; i++) 158 denali_clear_irq(denali, i, U32_MAX); 159 } 160 161 static void __denali_check_irq(struct denali_nand_info *denali) 162 { 163 uint32_t irq_status; 164 int i; 165 166 for (i = 0; i < DENALI_NR_BANKS; i++) { 167 irq_status = ioread32(denali->reg + INTR_STATUS(i)); 168 denali_clear_irq(denali, i, irq_status); 169 170 if (i != denali->active_bank) 171 continue; 172 173 denali->irq_status |= irq_status; 174 } 175 } 176 177 static void denali_reset_irq(struct denali_nand_info *denali) 178 { 179 denali->irq_status = 0; 180 denali->irq_mask = 0; 181 } 182 183 static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, 184 uint32_t irq_mask) 185 { 186 unsigned long time_left = 1000000; 187 188 while (time_left) { 189 __denali_check_irq(denali); 190 191 if (irq_mask & denali->irq_status) 192 return denali->irq_status; 193 udelay(1); 194 time_left--; 195 } 196 197 if (!time_left) { 198 dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", 199 irq_mask); 200 return 0; 201 } 202 203 return denali->irq_status; 204 } 205 206 static uint32_t denali_check_irq(struct denali_nand_info *denali) 207 { 208 __denali_check_irq(denali); 209 210 return denali->irq_status; 211 } 212 213 static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) 214 { 215 struct denali_nand_info *denali = mtd_to_denali(mtd); 216 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 217 int i; 218 219 for (i = 0; i < len; i++) 220 buf[i] = denali->host_read(denali, addr); 221 } 222 223 static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 224 { 225 struct denali_nand_info *denali = mtd_to_denali(mtd); 226 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 227 int i; 228 229 for (i = 0; i < len; i++) 230 denali->host_write(denali, addr, buf[i]); 231 } 232 233 static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) 234 { 235 struct denali_nand_info *denali = mtd_to_denali(mtd); 236 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 237 uint16_t *buf16 = (uint16_t *)buf; 238 int i; 239 240 for (i = 0; i < len / 2; i++) 241 buf16[i] = denali->host_read(denali, addr); 242 } 243 244 static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, 245 int len) 246 { 247 struct denali_nand_info *denali = mtd_to_denali(mtd); 248 u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); 249 const uint16_t *buf16 = (const uint16_t *)buf; 250 int i; 251 252 for (i = 0; i < len / 2; i++) 253 denali->host_write(denali, addr, buf16[i]); 254 } 255 256 static uint8_t denali_read_byte(struct mtd_info *mtd) 257 { 258 uint8_t byte; 259 260 denali_read_buf(mtd, &byte, 1); 261 262 return byte; 263 } 264 265 static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) 266 { 267 denali_write_buf(mtd, &byte, 1); 268 } 269 270 static uint16_t denali_read_word(struct mtd_info *mtd) 271 { 272 uint16_t word; 273 274 denali_read_buf16(mtd, (uint8_t *)&word, 2); 275 276 return word; 277 } 278 279 static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) 280 { 281 struct denali_nand_info *denali = mtd_to_denali(mtd); 282 uint32_t type; 283 284 if (ctrl & NAND_CLE) 285 type = DENALI_MAP11_CMD; 286 else if (ctrl & NAND_ALE) 287 type = DENALI_MAP11_ADDR; 288 else 289 return; 290 291 /* 292 * Some commands are followed by chip->dev_ready or chip->waitfunc. 293 * irq_status must be cleared here to catch the R/B# interrupt later. 294 */ 295 if (ctrl & NAND_CTRL_CHANGE) 296 denali_reset_irq(denali); 297 298 denali->host_write(denali, DENALI_BANK(denali) | type, dat); 299 } 300 301 static int denali_dev_ready(struct mtd_info *mtd) 302 { 303 struct denali_nand_info *denali = mtd_to_denali(mtd); 304 305 return !!(denali_check_irq(denali) & INTR__INT_ACT); 306 } 307 308 static int denali_check_erased_page(struct mtd_info *mtd, 309 struct nand_chip *chip, uint8_t *buf, 310 unsigned long uncor_ecc_flags, 311 unsigned int max_bitflips) 312 { 313 uint8_t *ecc_code = chip->buffers->ecccode; 314 int ecc_steps = chip->ecc.steps; 315 int ecc_size = chip->ecc.size; 316 int ecc_bytes = chip->ecc.bytes; 317 int i, ret, stat; 318 319 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, 320 chip->ecc.total); 321 if (ret) 322 return ret; 323 324 for (i = 0; i < ecc_steps; i++) { 325 if (!(uncor_ecc_flags & BIT(i))) 326 continue; 327 328 stat = nand_check_erased_ecc_chunk(buf, ecc_size, 329 ecc_code, ecc_bytes, 330 NULL, 0, 331 chip->ecc.strength); 332 if (stat < 0) { 333 mtd->ecc_stats.failed++; 334 } else { 335 mtd->ecc_stats.corrected += stat; 336 max_bitflips = max_t(unsigned int, max_bitflips, stat); 337 } 338 339 buf += ecc_size; 340 ecc_code += ecc_bytes; 341 } 342 343 return max_bitflips; 344 } 345 346 static int denali_hw_ecc_fixup(struct mtd_info *mtd, 347 struct denali_nand_info *denali, 348 unsigned long *uncor_ecc_flags) 349 { 350 struct nand_chip *chip = mtd_to_nand(mtd); 351 int bank = denali->active_bank; 352 uint32_t ecc_cor; 353 unsigned int max_bitflips; 354 355 ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); 356 ecc_cor >>= ECC_COR_INFO__SHIFT(bank); 357 358 if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { 359 /* 360 * This flag is set when uncorrectable error occurs at least in 361 * one ECC sector. We can not know "how many sectors", or 362 * "which sector(s)". We need erase-page check for all sectors. 363 */ 364 *uncor_ecc_flags = GENMASK(chip->ecc.steps - 1, 0); 365 return 0; 366 } 367 368 max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); 369 370 /* 371 * The register holds the maximum of per-sector corrected bitflips. 372 * This is suitable for the return value of the ->read_page() callback. 373 * Unfortunately, we can not know the total number of corrected bits in 374 * the page. Increase the stats by max_bitflips. (compromised solution) 375 */ 376 mtd->ecc_stats.corrected += max_bitflips; 377 378 return max_bitflips; 379 } 380 381 static int denali_sw_ecc_fixup(struct mtd_info *mtd, 382 struct denali_nand_info *denali, 383 unsigned long *uncor_ecc_flags, uint8_t *buf) 384 { 385 unsigned int ecc_size = denali->nand.ecc.size; 386 unsigned int bitflips = 0; 387 unsigned int max_bitflips = 0; 388 uint32_t err_addr, err_cor_info; 389 unsigned int err_byte, err_sector, err_device; 390 uint8_t err_cor_value; 391 unsigned int prev_sector = 0; 392 uint32_t irq_status; 393 394 denali_reset_irq(denali); 395 396 do { 397 err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); 398 err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); 399 err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); 400 401 err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); 402 err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, 403 err_cor_info); 404 err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, 405 err_cor_info); 406 407 /* reset the bitflip counter when crossing ECC sector */ 408 if (err_sector != prev_sector) 409 bitflips = 0; 410 411 if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { 412 /* 413 * Check later if this is a real ECC error, or 414 * an erased sector. 415 */ 416 *uncor_ecc_flags |= BIT(err_sector); 417 } else if (err_byte < ecc_size) { 418 /* 419 * If err_byte is larger than ecc_size, means error 420 * happened in OOB, so we ignore it. It's no need for 421 * us to correct it err_device is represented the NAND 422 * error bits are happened in if there are more than 423 * one NAND connected. 424 */ 425 int offset; 426 unsigned int flips_in_byte; 427 428 offset = (err_sector * ecc_size + err_byte) * 429 denali->devs_per_cs + err_device; 430 431 /* correct the ECC error */ 432 flips_in_byte = hweight8(buf[offset] ^ err_cor_value); 433 buf[offset] ^= err_cor_value; 434 mtd->ecc_stats.corrected += flips_in_byte; 435 bitflips += flips_in_byte; 436 437 max_bitflips = max(max_bitflips, bitflips); 438 } 439 440 prev_sector = err_sector; 441 } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); 442 443 /* 444 * Once handle all ECC errors, controller will trigger an 445 * ECC_TRANSACTION_DONE interrupt. 446 */ 447 irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); 448 if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) 449 return -EIO; 450 451 return max_bitflips; 452 } 453 454 static void denali_setup_dma64(struct denali_nand_info *denali, 455 dma_addr_t dma_addr, int page, int write) 456 { 457 uint32_t mode; 458 const int page_count = 1; 459 460 mode = DENALI_MAP10 | DENALI_BANK(denali) | page; 461 462 /* DMA is a three step process */ 463 464 /* 465 * 1. setup transfer type, interrupt when complete, 466 * burst len = 64 bytes, the number of pages 467 */ 468 denali->host_write(denali, mode, 469 0x01002000 | (64 << 16) | (write << 8) | page_count); 470 471 /* 2. set memory low address */ 472 denali->host_write(denali, mode, lower_32_bits(dma_addr)); 473 474 /* 3. set memory high address */ 475 denali->host_write(denali, mode, upper_32_bits(dma_addr)); 476 } 477 478 static void denali_setup_dma32(struct denali_nand_info *denali, 479 dma_addr_t dma_addr, int page, int write) 480 { 481 uint32_t mode; 482 const int page_count = 1; 483 484 mode = DENALI_MAP10 | DENALI_BANK(denali); 485 486 /* DMA is a four step process */ 487 488 /* 1. setup transfer type and # of pages */ 489 denali->host_write(denali, mode | page, 490 0x2000 | (write << 8) | page_count); 491 492 /* 2. set memory high address bits 23:8 */ 493 denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); 494 495 /* 3. set memory low address bits 23:8 */ 496 denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); 497 498 /* 4. interrupt when complete, burst len = 64 bytes */ 499 denali->host_write(denali, mode | 0x14000, 0x2400); 500 } 501 502 static int denali_pio_read(struct denali_nand_info *denali, void *buf, 503 size_t size, int page, int raw) 504 { 505 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 506 uint32_t *buf32 = (uint32_t *)buf; 507 uint32_t irq_status, ecc_err_mask; 508 int i; 509 510 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 511 ecc_err_mask = INTR__ECC_UNCOR_ERR; 512 else 513 ecc_err_mask = INTR__ECC_ERR; 514 515 denali_reset_irq(denali); 516 517 for (i = 0; i < size / 4; i++) 518 *buf32++ = denali->host_read(denali, addr); 519 520 irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); 521 if (!(irq_status & INTR__PAGE_XFER_INC)) 522 return -EIO; 523 524 if (irq_status & INTR__ERASED_PAGE) 525 memset(buf, 0xff, size); 526 527 return irq_status & ecc_err_mask ? -EBADMSG : 0; 528 } 529 530 static int denali_pio_write(struct denali_nand_info *denali, 531 const void *buf, size_t size, int page, int raw) 532 { 533 u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; 534 const uint32_t *buf32 = (uint32_t *)buf; 535 uint32_t irq_status; 536 int i; 537 538 denali_reset_irq(denali); 539 540 for (i = 0; i < size / 4; i++) 541 denali->host_write(denali, addr, *buf32++); 542 543 irq_status = denali_wait_for_irq(denali, 544 INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); 545 if (!(irq_status & INTR__PROGRAM_COMP)) 546 return -EIO; 547 548 return 0; 549 } 550 551 static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, 552 size_t size, int page, int raw, int write) 553 { 554 if (write) 555 return denali_pio_write(denali, buf, size, page, raw); 556 else 557 return denali_pio_read(denali, buf, size, page, raw); 558 } 559 560 static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, 561 size_t size, int page, int raw, int write) 562 { 563 dma_addr_t dma_addr; 564 uint32_t irq_mask, irq_status, ecc_err_mask; 565 enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; 566 int ret = 0; 567 568 dma_addr = dma_map_single(denali->dev, buf, size, dir); 569 if (dma_mapping_error(denali->dev, dma_addr)) { 570 dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); 571 return denali_pio_xfer(denali, buf, size, page, raw, write); 572 } 573 574 if (write) { 575 /* 576 * INTR__PROGRAM_COMP is never asserted for the DMA transfer. 577 * We can use INTR__DMA_CMD_COMP instead. This flag is asserted 578 * when the page program is completed. 579 */ 580 irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; 581 ecc_err_mask = 0; 582 } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { 583 irq_mask = INTR__DMA_CMD_COMP; 584 ecc_err_mask = INTR__ECC_UNCOR_ERR; 585 } else { 586 irq_mask = INTR__DMA_CMD_COMP; 587 ecc_err_mask = INTR__ECC_ERR; 588 } 589 590 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 591 /* 592 * The ->setup_dma() hook kicks DMA by using the data/command 593 * interface, which belongs to a different AXI port from the 594 * register interface. Read back the register to avoid a race. 595 */ 596 ioread32(denali->reg + DMA_ENABLE); 597 598 denali_reset_irq(denali); 599 denali->setup_dma(denali, dma_addr, page, write); 600 601 irq_status = denali_wait_for_irq(denali, irq_mask); 602 if (!(irq_status & INTR__DMA_CMD_COMP)) 603 ret = -EIO; 604 else if (irq_status & ecc_err_mask) 605 ret = -EBADMSG; 606 607 iowrite32(0, denali->reg + DMA_ENABLE); 608 609 dma_unmap_single(denali->dev, dma_addr, size, dir); 610 611 if (irq_status & INTR__ERASED_PAGE) 612 memset(buf, 0xff, size); 613 614 return ret; 615 } 616 617 static int denali_data_xfer(struct denali_nand_info *denali, void *buf, 618 size_t size, int page, int raw, int write) 619 { 620 iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); 621 iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, 622 denali->reg + TRANSFER_SPARE_REG); 623 624 if (denali->dma_avail) 625 return denali_dma_xfer(denali, buf, size, page, raw, write); 626 else 627 return denali_pio_xfer(denali, buf, size, page, raw, write); 628 } 629 630 static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, 631 int page, int write) 632 { 633 struct denali_nand_info *denali = mtd_to_denali(mtd); 634 unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; 635 unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; 636 int writesize = mtd->writesize; 637 int oobsize = mtd->oobsize; 638 uint8_t *bufpoi = chip->oob_poi; 639 int ecc_steps = chip->ecc.steps; 640 int ecc_size = chip->ecc.size; 641 int ecc_bytes = chip->ecc.bytes; 642 int oob_skip = denali->oob_skip_bytes; 643 size_t size = writesize + oobsize; 644 int i, pos, len; 645 646 /* BBM at the beginning of the OOB area */ 647 chip->cmdfunc(mtd, start_cmd, writesize, page); 648 if (write) 649 chip->write_buf(mtd, bufpoi, oob_skip); 650 else 651 chip->read_buf(mtd, bufpoi, oob_skip); 652 bufpoi += oob_skip; 653 654 /* OOB ECC */ 655 for (i = 0; i < ecc_steps; i++) { 656 pos = ecc_size + i * (ecc_size + ecc_bytes); 657 len = ecc_bytes; 658 659 if (pos >= writesize) 660 pos += oob_skip; 661 else if (pos + len > writesize) 662 len = writesize - pos; 663 664 chip->cmdfunc(mtd, rnd_cmd, pos, -1); 665 if (write) 666 chip->write_buf(mtd, bufpoi, len); 667 else 668 chip->read_buf(mtd, bufpoi, len); 669 bufpoi += len; 670 if (len < ecc_bytes) { 671 len = ecc_bytes - len; 672 chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); 673 if (write) 674 chip->write_buf(mtd, bufpoi, len); 675 else 676 chip->read_buf(mtd, bufpoi, len); 677 bufpoi += len; 678 } 679 } 680 681 /* OOB free */ 682 len = oobsize - (bufpoi - chip->oob_poi); 683 chip->cmdfunc(mtd, rnd_cmd, size - len, -1); 684 if (write) 685 chip->write_buf(mtd, bufpoi, len); 686 else 687 chip->read_buf(mtd, bufpoi, len); 688 } 689 690 static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 691 uint8_t *buf, int oob_required, int page) 692 { 693 struct denali_nand_info *denali = mtd_to_denali(mtd); 694 int writesize = mtd->writesize; 695 int oobsize = mtd->oobsize; 696 int ecc_steps = chip->ecc.steps; 697 int ecc_size = chip->ecc.size; 698 int ecc_bytes = chip->ecc.bytes; 699 void *tmp_buf = denali->buf; 700 int oob_skip = denali->oob_skip_bytes; 701 size_t size = writesize + oobsize; 702 int ret, i, pos, len; 703 704 ret = denali_data_xfer(denali, tmp_buf, size, page, 1, 0); 705 if (ret) 706 return ret; 707 708 /* Arrange the buffer for syndrome payload/ecc layout */ 709 if (buf) { 710 for (i = 0; i < ecc_steps; i++) { 711 pos = i * (ecc_size + ecc_bytes); 712 len = ecc_size; 713 714 if (pos >= writesize) 715 pos += oob_skip; 716 else if (pos + len > writesize) 717 len = writesize - pos; 718 719 memcpy(buf, tmp_buf + pos, len); 720 buf += len; 721 if (len < ecc_size) { 722 len = ecc_size - len; 723 memcpy(buf, tmp_buf + writesize + oob_skip, 724 len); 725 buf += len; 726 } 727 } 728 } 729 730 if (oob_required) { 731 uint8_t *oob = chip->oob_poi; 732 733 /* BBM at the beginning of the OOB area */ 734 memcpy(oob, tmp_buf + writesize, oob_skip); 735 oob += oob_skip; 736 737 /* OOB ECC */ 738 for (i = 0; i < ecc_steps; i++) { 739 pos = ecc_size + i * (ecc_size + ecc_bytes); 740 len = ecc_bytes; 741 742 if (pos >= writesize) 743 pos += oob_skip; 744 else if (pos + len > writesize) 745 len = writesize - pos; 746 747 memcpy(oob, tmp_buf + pos, len); 748 oob += len; 749 if (len < ecc_bytes) { 750 len = ecc_bytes - len; 751 memcpy(oob, tmp_buf + writesize + oob_skip, 752 len); 753 oob += len; 754 } 755 } 756 757 /* OOB free */ 758 len = oobsize - (oob - chip->oob_poi); 759 memcpy(oob, tmp_buf + size - len, len); 760 } 761 762 return 0; 763 } 764 765 static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 766 int page) 767 { 768 denali_oob_xfer(mtd, chip, page, 0); 769 770 return 0; 771 } 772 773 static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 774 int page) 775 { 776 struct denali_nand_info *denali = mtd_to_denali(mtd); 777 int status; 778 779 denali_reset_irq(denali); 780 781 denali_oob_xfer(mtd, chip, page, 1); 782 783 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 784 status = chip->waitfunc(mtd, chip); 785 786 return status & NAND_STATUS_FAIL ? -EIO : 0; 787 } 788 789 static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 790 uint8_t *buf, int oob_required, int page) 791 { 792 struct denali_nand_info *denali = mtd_to_denali(mtd); 793 unsigned long uncor_ecc_flags = 0; 794 int stat = 0; 795 int ret; 796 797 ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); 798 if (ret && ret != -EBADMSG) 799 return ret; 800 801 if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) 802 stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); 803 else if (ret == -EBADMSG) 804 stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); 805 806 if (stat < 0) 807 return stat; 808 809 if (uncor_ecc_flags) { 810 ret = denali_read_oob(mtd, chip, page); 811 if (ret) 812 return ret; 813 814 stat = denali_check_erased_page(mtd, chip, buf, 815 uncor_ecc_flags, stat); 816 } 817 818 return stat; 819 } 820 821 static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 822 const uint8_t *buf, int oob_required, int page) 823 { 824 struct denali_nand_info *denali = mtd_to_denali(mtd); 825 int writesize = mtd->writesize; 826 int oobsize = mtd->oobsize; 827 int ecc_steps = chip->ecc.steps; 828 int ecc_size = chip->ecc.size; 829 int ecc_bytes = chip->ecc.bytes; 830 void *tmp_buf = denali->buf; 831 int oob_skip = denali->oob_skip_bytes; 832 size_t size = writesize + oobsize; 833 int i, pos, len; 834 835 /* 836 * Fill the buffer with 0xff first except the full page transfer. 837 * This simplifies the logic. 838 */ 839 if (!buf || !oob_required) 840 memset(tmp_buf, 0xff, size); 841 842 /* Arrange the buffer for syndrome payload/ecc layout */ 843 if (buf) { 844 for (i = 0; i < ecc_steps; i++) { 845 pos = i * (ecc_size + ecc_bytes); 846 len = ecc_size; 847 848 if (pos >= writesize) 849 pos += oob_skip; 850 else if (pos + len > writesize) 851 len = writesize - pos; 852 853 memcpy(tmp_buf + pos, buf, len); 854 buf += len; 855 if (len < ecc_size) { 856 len = ecc_size - len; 857 memcpy(tmp_buf + writesize + oob_skip, buf, 858 len); 859 buf += len; 860 } 861 } 862 } 863 864 if (oob_required) { 865 const uint8_t *oob = chip->oob_poi; 866 867 /* BBM at the beginning of the OOB area */ 868 memcpy(tmp_buf + writesize, oob, oob_skip); 869 oob += oob_skip; 870 871 /* OOB ECC */ 872 for (i = 0; i < ecc_steps; i++) { 873 pos = ecc_size + i * (ecc_size + ecc_bytes); 874 len = ecc_bytes; 875 876 if (pos >= writesize) 877 pos += oob_skip; 878 else if (pos + len > writesize) 879 len = writesize - pos; 880 881 memcpy(tmp_buf + pos, oob, len); 882 oob += len; 883 if (len < ecc_bytes) { 884 len = ecc_bytes - len; 885 memcpy(tmp_buf + writesize + oob_skip, oob, 886 len); 887 oob += len; 888 } 889 } 890 891 /* OOB free */ 892 len = oobsize - (oob - chip->oob_poi); 893 memcpy(tmp_buf + size - len, oob, len); 894 } 895 896 return denali_data_xfer(denali, tmp_buf, size, page, 1, 1); 897 } 898 899 static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 900 const uint8_t *buf, int oob_required, int page) 901 { 902 struct denali_nand_info *denali = mtd_to_denali(mtd); 903 904 return denali_data_xfer(denali, (void *)buf, mtd->writesize, 905 page, 0, 1); 906 } 907 908 static void denali_select_chip(struct mtd_info *mtd, int chip) 909 { 910 struct denali_nand_info *denali = mtd_to_denali(mtd); 911 912 denali->active_bank = chip; 913 } 914 915 static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) 916 { 917 struct denali_nand_info *denali = mtd_to_denali(mtd); 918 uint32_t irq_status; 919 920 /* R/B# pin transitioned from low to high? */ 921 irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); 922 923 return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; 924 } 925 926 static int denali_erase(struct mtd_info *mtd, int page) 927 { 928 struct denali_nand_info *denali = mtd_to_denali(mtd); 929 uint32_t irq_status; 930 931 denali_reset_irq(denali); 932 933 denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, 934 DENALI_ERASE); 935 936 /* wait for erase to complete or failure to occur */ 937 irq_status = denali_wait_for_irq(denali, 938 INTR__ERASE_COMP | INTR__ERASE_FAIL); 939 940 return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; 941 } 942 943 static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, 944 const struct nand_data_interface *conf) 945 { 946 struct denali_nand_info *denali = mtd_to_denali(mtd); 947 const struct nand_sdr_timings *timings; 948 unsigned long t_x, mult_x; 949 int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; 950 int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; 951 int addr_2_data_mask; 952 uint32_t tmp; 953 954 timings = nand_get_sdr_timings(conf); 955 if (IS_ERR(timings)) 956 return PTR_ERR(timings); 957 958 /* clk_x period in picoseconds */ 959 t_x = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); 960 if (!t_x) 961 return -EINVAL; 962 963 /* 964 * The bus interface clock, clk_x, is phase aligned with the core clock. 965 * The clk_x is an integral multiple N of the core clk. The value N is 966 * configured at IP delivery time, and its available value is 4, 5, 6. 967 */ 968 mult_x = DIV_ROUND_CLOSEST_ULL(denali->clk_x_rate, denali->clk_rate); 969 if (mult_x < 4 || mult_x > 6) 970 return -EINVAL; 971 972 if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) 973 return 0; 974 975 /* tREA -> ACC_CLKS */ 976 acc_clks = DIV_ROUND_UP(timings->tREA_max, t_x); 977 acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); 978 979 tmp = ioread32(denali->reg + ACC_CLKS); 980 tmp &= ~ACC_CLKS__VALUE; 981 tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); 982 iowrite32(tmp, denali->reg + ACC_CLKS); 983 984 /* tRWH -> RE_2_WE */ 985 re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_x); 986 re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); 987 988 tmp = ioread32(denali->reg + RE_2_WE); 989 tmp &= ~RE_2_WE__VALUE; 990 tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); 991 iowrite32(tmp, denali->reg + RE_2_WE); 992 993 /* tRHZ -> RE_2_RE */ 994 re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_x); 995 re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); 996 997 tmp = ioread32(denali->reg + RE_2_RE); 998 tmp &= ~RE_2_RE__VALUE; 999 tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); 1000 iowrite32(tmp, denali->reg + RE_2_RE); 1001 1002 /* 1003 * tCCS, tWHR -> WE_2_RE 1004 * 1005 * With WE_2_RE properly set, the Denali controller automatically takes 1006 * care of the delay; the driver need not set NAND_WAIT_TCCS. 1007 */ 1008 we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), t_x); 1009 we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); 1010 1011 tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); 1012 tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; 1013 tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); 1014 iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); 1015 1016 /* tADL -> ADDR_2_DATA */ 1017 1018 /* for older versions, ADDR_2_DATA is only 6 bit wide */ 1019 addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1020 if (denali->revision < 0x0501) 1021 addr_2_data_mask >>= 1; 1022 1023 addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_x); 1024 addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); 1025 1026 tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); 1027 tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; 1028 tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); 1029 iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); 1030 1031 /* tREH, tWH -> RDWR_EN_HI_CNT */ 1032 rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), 1033 t_x); 1034 rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); 1035 1036 tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); 1037 tmp &= ~RDWR_EN_HI_CNT__VALUE; 1038 tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); 1039 iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); 1040 1041 /* tRP, tWP -> RDWR_EN_LO_CNT */ 1042 rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), t_x); 1043 rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), 1044 t_x); 1045 rdwr_en_lo_hi = max_t(int, rdwr_en_lo_hi, mult_x); 1046 rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); 1047 rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); 1048 1049 tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); 1050 tmp &= ~RDWR_EN_LO_CNT__VALUE; 1051 tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); 1052 iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); 1053 1054 /* tCS, tCEA -> CS_SETUP_CNT */ 1055 cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_x) - rdwr_en_lo, 1056 (int)DIV_ROUND_UP(timings->tCEA_max, t_x) - acc_clks, 1057 0); 1058 cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); 1059 1060 tmp = ioread32(denali->reg + CS_SETUP_CNT); 1061 tmp &= ~CS_SETUP_CNT__VALUE; 1062 tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); 1063 iowrite32(tmp, denali->reg + CS_SETUP_CNT); 1064 1065 return 0; 1066 } 1067 1068 static void denali_reset_banks(struct denali_nand_info *denali) 1069 { 1070 u32 irq_status; 1071 int i; 1072 1073 for (i = 0; i < denali->max_banks; i++) { 1074 denali->active_bank = i; 1075 1076 denali_reset_irq(denali); 1077 1078 iowrite32(DEVICE_RESET__BANK(i), 1079 denali->reg + DEVICE_RESET); 1080 1081 irq_status = denali_wait_for_irq(denali, 1082 INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); 1083 if (!(irq_status & INTR__INT_ACT)) 1084 break; 1085 } 1086 1087 dev_dbg(denali->dev, "%d chips connected\n", i); 1088 denali->max_banks = i; 1089 } 1090 1091 static void denali_hw_init(struct denali_nand_info *denali) 1092 { 1093 /* 1094 * The REVISION register may not be reliable. Platforms are allowed to 1095 * override it. 1096 */ 1097 if (!denali->revision) 1098 denali->revision = swab16(ioread32(denali->reg + REVISION)); 1099 1100 /* 1101 * tell driver how many bit controller will skip before writing 1102 * ECC code in OOB. This is normally used for bad block marker 1103 */ 1104 denali->oob_skip_bytes = CONFIG_NAND_DENALI_SPARE_AREA_SKIP_BYTES; 1105 iowrite32(denali->oob_skip_bytes, denali->reg + SPARE_AREA_SKIP_BYTES); 1106 denali_detect_max_banks(denali); 1107 iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); 1108 iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); 1109 1110 iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); 1111 } 1112 1113 int denali_calc_ecc_bytes(int step_size, int strength) 1114 { 1115 /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ 1116 return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; 1117 } 1118 EXPORT_SYMBOL(denali_calc_ecc_bytes); 1119 1120 static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, 1121 struct denali_nand_info *denali) 1122 { 1123 int oobavail = mtd->oobsize - denali->oob_skip_bytes; 1124 int ret; 1125 1126 /* 1127 * If .size and .strength are already set (usually by DT), 1128 * check if they are supported by this controller. 1129 */ 1130 if (chip->ecc.size && chip->ecc.strength) 1131 return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); 1132 1133 /* 1134 * We want .size and .strength closest to the chip's requirement 1135 * unless NAND_ECC_MAXIMIZE is requested. 1136 */ 1137 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { 1138 ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); 1139 if (!ret) 1140 return 0; 1141 } 1142 1143 /* Max ECC strength is the last thing we can do */ 1144 return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); 1145 } 1146 1147 static struct nand_ecclayout nand_oob; 1148 1149 static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, 1150 struct mtd_oob_region *oobregion) 1151 { 1152 struct denali_nand_info *denali = mtd_to_denali(mtd); 1153 struct nand_chip *chip = mtd_to_nand(mtd); 1154 1155 if (section) 1156 return -ERANGE; 1157 1158 oobregion->offset = denali->oob_skip_bytes; 1159 oobregion->length = chip->ecc.total; 1160 1161 return 0; 1162 } 1163 1164 static int denali_ooblayout_free(struct mtd_info *mtd, int section, 1165 struct mtd_oob_region *oobregion) 1166 { 1167 struct denali_nand_info *denali = mtd_to_denali(mtd); 1168 struct nand_chip *chip = mtd_to_nand(mtd); 1169 1170 if (section) 1171 return -ERANGE; 1172 1173 oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; 1174 oobregion->length = mtd->oobsize - oobregion->offset; 1175 1176 return 0; 1177 } 1178 1179 static const struct mtd_ooblayout_ops denali_ooblayout_ops = { 1180 .ecc = denali_ooblayout_ecc, 1181 .free = denali_ooblayout_free, 1182 }; 1183 1184 static int denali_multidev_fixup(struct denali_nand_info *denali) 1185 { 1186 struct nand_chip *chip = &denali->nand; 1187 struct mtd_info *mtd = nand_to_mtd(chip); 1188 1189 /* 1190 * Support for multi device: 1191 * When the IP configuration is x16 capable and two x8 chips are 1192 * connected in parallel, DEVICES_CONNECTED should be set to 2. 1193 * In this case, the core framework knows nothing about this fact, 1194 * so we should tell it the _logical_ pagesize and anything necessary. 1195 */ 1196 denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); 1197 1198 /* 1199 * On some SoCs, DEVICES_CONNECTED is not auto-detected. 1200 * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. 1201 */ 1202 if (denali->devs_per_cs == 0) { 1203 denali->devs_per_cs = 1; 1204 iowrite32(1, denali->reg + DEVICES_CONNECTED); 1205 } 1206 1207 if (denali->devs_per_cs == 1) 1208 return 0; 1209 1210 if (denali->devs_per_cs != 2) { 1211 dev_err(denali->dev, "unsupported number of devices %d\n", 1212 denali->devs_per_cs); 1213 return -EINVAL; 1214 } 1215 1216 /* 2 chips in parallel */ 1217 mtd->size <<= 1; 1218 mtd->erasesize <<= 1; 1219 mtd->writesize <<= 1; 1220 mtd->oobsize <<= 1; 1221 chip->chipsize <<= 1; 1222 chip->page_shift += 1; 1223 chip->phys_erase_shift += 1; 1224 chip->bbt_erase_shift += 1; 1225 chip->chip_shift += 1; 1226 chip->pagemask <<= 1; 1227 chip->ecc.size <<= 1; 1228 chip->ecc.bytes <<= 1; 1229 chip->ecc.strength <<= 1; 1230 denali->oob_skip_bytes <<= 1; 1231 1232 return 0; 1233 } 1234 1235 int denali_init(struct denali_nand_info *denali) 1236 { 1237 struct nand_chip *chip = &denali->nand; 1238 struct mtd_info *mtd = nand_to_mtd(chip); 1239 u32 features = ioread32(denali->reg + FEATURES); 1240 int ret; 1241 1242 denali_hw_init(denali); 1243 1244 denali_clear_irq_all(denali); 1245 1246 denali_reset_banks(denali); 1247 1248 denali->active_bank = DENALI_INVALID_BANK; 1249 1250 chip->flash_node = dev_of_offset(denali->dev); 1251 /* Fallback to the default name if DT did not give "label" property */ 1252 if (!mtd->name) 1253 mtd->name = "denali-nand"; 1254 1255 chip->select_chip = denali_select_chip; 1256 chip->read_byte = denali_read_byte; 1257 chip->write_byte = denali_write_byte; 1258 chip->read_word = denali_read_word; 1259 chip->cmd_ctrl = denali_cmd_ctrl; 1260 chip->dev_ready = denali_dev_ready; 1261 chip->waitfunc = denali_waitfunc; 1262 1263 if (features & FEATURES__INDEX_ADDR) { 1264 denali->host_read = denali_indexed_read; 1265 denali->host_write = denali_indexed_write; 1266 } else { 1267 denali->host_read = denali_direct_read; 1268 denali->host_write = denali_direct_write; 1269 } 1270 1271 /* clk rate info is needed for setup_data_interface */ 1272 if (denali->clk_x_rate) 1273 chip->setup_data_interface = denali_setup_data_interface; 1274 1275 ret = nand_scan_ident(mtd, denali->max_banks, NULL); 1276 if (ret) 1277 return ret; 1278 1279 if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) 1280 denali->dma_avail = 1; 1281 1282 if (denali->dma_avail) { 1283 chip->buf_align = ARCH_DMA_MINALIGN; 1284 if (denali->caps & DENALI_CAP_DMA_64BIT) 1285 denali->setup_dma = denali_setup_dma64; 1286 else 1287 denali->setup_dma = denali_setup_dma32; 1288 } else { 1289 chip->buf_align = 4; 1290 } 1291 1292 chip->options |= NAND_USE_BOUNCE_BUFFER; 1293 chip->bbt_options |= NAND_BBT_USE_FLASH; 1294 chip->bbt_options |= NAND_BBT_NO_OOB; 1295 denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; 1296 1297 /* no subpage writes on denali */ 1298 chip->options |= NAND_NO_SUBPAGE_WRITE; 1299 1300 ret = denali_ecc_setup(mtd, chip, denali); 1301 if (ret) { 1302 dev_err(denali->dev, "Failed to setup ECC settings.\n"); 1303 return ret; 1304 } 1305 1306 dev_dbg(denali->dev, 1307 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", 1308 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); 1309 1310 iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | 1311 FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), 1312 denali->reg + ECC_CORRECTION); 1313 iowrite32(mtd->erasesize / mtd->writesize, 1314 denali->reg + PAGES_PER_BLOCK); 1315 iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, 1316 denali->reg + DEVICE_WIDTH); 1317 iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, 1318 denali->reg + TWO_ROW_ADDR_CYCLES); 1319 iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); 1320 iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); 1321 1322 iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); 1323 iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); 1324 /* chip->ecc.steps is set by nand_scan_tail(); not available here */ 1325 iowrite32(mtd->writesize / chip->ecc.size, 1326 denali->reg + CFG_NUM_DATA_BLOCKS); 1327 1328 mtd_set_ooblayout(mtd, &denali_ooblayout_ops); 1329 1330 nand_oob.eccbytes = denali->nand.ecc.bytes; 1331 denali->nand.ecc.layout = &nand_oob; 1332 1333 if (chip->options & NAND_BUSWIDTH_16) { 1334 chip->read_buf = denali_read_buf16; 1335 chip->write_buf = denali_write_buf16; 1336 } else { 1337 chip->read_buf = denali_read_buf; 1338 chip->write_buf = denali_write_buf; 1339 } 1340 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; 1341 chip->ecc.read_page = denali_read_page; 1342 chip->ecc.read_page_raw = denali_read_page_raw; 1343 chip->ecc.write_page = denali_write_page; 1344 chip->ecc.write_page_raw = denali_write_page_raw; 1345 chip->ecc.read_oob = denali_read_oob; 1346 chip->ecc.write_oob = denali_write_oob; 1347 chip->erase = denali_erase; 1348 1349 ret = denali_multidev_fixup(denali); 1350 if (ret) 1351 return ret; 1352 1353 /* 1354 * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not 1355 * use devm_kmalloc() because the memory allocated by devm_ does not 1356 * guarantee DMA-safe alignment. 1357 */ 1358 denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); 1359 if (!denali->buf) 1360 return -ENOMEM; 1361 1362 ret = nand_scan_tail(mtd); 1363 if (ret) 1364 goto free_buf; 1365 1366 ret = nand_register(0, mtd); 1367 if (ret) { 1368 dev_err(denali->dev, "Failed to register MTD: %d\n", ret); 1369 goto free_buf; 1370 } 1371 return 0; 1372 1373 free_buf: 1374 kfree(denali->buf); 1375 1376 return ret; 1377 } 1378