1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with 4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c 5 * 6 * Copyright (C) 2005, Intec Automation Inc. 7 * Copyright (C) 2014, Freescale Semiconductor, Inc. 8 * 9 * Synced from Linux v4.19 10 */ 11 12 #include <common.h> 13 #include <linux/err.h> 14 #include <linux/errno.h> 15 #include <linux/log2.h> 16 #include <linux/math64.h> 17 #include <linux/sizes.h> 18 19 #include <linux/mtd/mtd.h> 20 #include <linux/mtd/spi-nor.h> 21 #include <spi-mem.h> 22 #include <spi.h> 23 24 #include "sf_internal.h" 25 26 /* Define max times to check status register before we give up. */ 27 28 /* 29 * For everything but full-chip erase; probably could be much smaller, but kept 30 * around for safety for now 31 */ 32 33 #define HZ CONFIG_SYS_HZ 34 35 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) 36 37 static int spi_nor_read_write_reg(struct spi_nor *nor, struct spi_mem_op 38 *op, void *buf) 39 { 40 if (op->data.dir == SPI_MEM_DATA_IN) 41 op->data.buf.in = buf; 42 else 43 op->data.buf.out = buf; 44 return spi_mem_exec_op(nor->spi, op); 45 } 46 47 static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) 48 { 49 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), 50 SPI_MEM_OP_NO_ADDR, 51 SPI_MEM_OP_NO_DUMMY, 52 SPI_MEM_OP_DATA_IN(len, NULL, 1)); 53 int ret; 54 55 ret = spi_nor_read_write_reg(nor, &op, val); 56 if (ret < 0) 57 dev_dbg(&flash->spimem->spi->dev, "error %d reading %x\n", ret, 58 code); 59 60 return ret; 61 } 62 63 static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) 64 { 65 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), 66 SPI_MEM_OP_NO_ADDR, 67 SPI_MEM_OP_NO_DUMMY, 68 SPI_MEM_OP_DATA_OUT(len, NULL, 1)); 69 70 return spi_nor_read_write_reg(nor, &op, buf); 71 } 72 73 static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, 74 u_char *buf) 75 { 76 struct spi_mem_op op = 77 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1), 78 SPI_MEM_OP_ADDR(nor->addr_width, from, 1), 79 SPI_MEM_OP_DUMMY(nor->read_dummy, 1), 80 SPI_MEM_OP_DATA_IN(len, buf, 1)); 81 size_t remaining = len; 82 int ret; 83 84 /* get transfer protocols. */ 85 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto); 86 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto); 87 op.dummy.buswidth = op.addr.buswidth; 88 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto); 89 90 /* convert the dummy cycles to the number of bytes */ 91 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8; 92 93 while (remaining) { 94 op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX; 95 ret = spi_mem_adjust_op_size(nor->spi, &op); 96 if (ret) 97 return ret; 98 99 ret = spi_mem_exec_op(nor->spi, &op); 100 if (ret) 101 return ret; 102 103 op.addr.val += op.data.nbytes; 104 remaining -= op.data.nbytes; 105 op.data.buf.in += op.data.nbytes; 106 } 107 108 return len; 109 } 110 111 static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len, 112 const u_char *buf) 113 { 114 struct spi_mem_op op = 115 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1), 116 SPI_MEM_OP_ADDR(nor->addr_width, to, 1), 117 SPI_MEM_OP_NO_DUMMY, 118 SPI_MEM_OP_DATA_OUT(len, buf, 1)); 119 int ret; 120 121 /* get transfer protocols. */ 122 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto); 123 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto); 124 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto); 125 126 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) 127 op.addr.nbytes = 0; 128 129 ret = spi_mem_adjust_op_size(nor->spi, &op); 130 if (ret) 131 return ret; 132 op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes; 133 134 ret = spi_mem_exec_op(nor->spi, &op); 135 if (ret) 136 return ret; 137 138 return op.data.nbytes; 139 } 140 141 /* 142 * Read the status register, returning its value in the location 143 * Return the status register value. 144 * Returns negative if error occurred. 145 */ 146 static int read_sr(struct spi_nor *nor) 147 { 148 int ret; 149 u8 val; 150 151 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1); 152 if (ret < 0) { 153 pr_debug("error %d reading SR\n", (int)ret); 154 return ret; 155 } 156 157 return val; 158 } 159 160 /* 161 * Read the flag status register, returning its value in the location 162 * Return the status register value. 163 * Returns negative if error occurred. 164 */ 165 static int read_fsr(struct spi_nor *nor) 166 { 167 int ret; 168 u8 val; 169 170 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1); 171 if (ret < 0) { 172 pr_debug("error %d reading FSR\n", ret); 173 return ret; 174 } 175 176 return val; 177 } 178 179 /* 180 * Read configuration register, returning its value in the 181 * location. Return the configuration register value. 182 * Returns negative if error occurred. 183 */ 184 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) 185 static int read_cr(struct spi_nor *nor) 186 { 187 int ret; 188 u8 val; 189 190 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1); 191 if (ret < 0) { 192 dev_dbg(nor->dev, "error %d reading CR\n", ret); 193 return ret; 194 } 195 196 return val; 197 } 198 #endif 199 200 /* 201 * Write status register 1 byte 202 * Returns negative if error occurred. 203 */ 204 static int write_sr(struct spi_nor *nor, u8 val) 205 { 206 nor->cmd_buf[0] = val; 207 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1); 208 } 209 210 /* 211 * Set write enable latch with Write Enable command. 212 * Returns negative if error occurred. 213 */ 214 static int write_enable(struct spi_nor *nor) 215 { 216 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); 217 } 218 219 /* 220 * Send write disable instruction to the chip. 221 */ 222 static int write_disable(struct spi_nor *nor) 223 { 224 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0); 225 } 226 227 static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) 228 { 229 return mtd->priv; 230 } 231 232 #ifndef CONFIG_SPI_FLASH_BAR 233 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size) 234 { 235 size_t i; 236 237 for (i = 0; i < size; i++) 238 if (table[i][0] == opcode) 239 return table[i][1]; 240 241 /* No conversion found, keep input op code. */ 242 return opcode; 243 } 244 245 static u8 spi_nor_convert_3to4_read(u8 opcode) 246 { 247 static const u8 spi_nor_3to4_read[][2] = { 248 { SPINOR_OP_READ, SPINOR_OP_READ_4B }, 249 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B }, 250 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B }, 251 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, 252 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, 253 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, 254 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B }, 255 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B }, 256 257 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, 258 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, 259 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, 260 }; 261 262 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, 263 ARRAY_SIZE(spi_nor_3to4_read)); 264 } 265 266 static u8 spi_nor_convert_3to4_program(u8 opcode) 267 { 268 static const u8 spi_nor_3to4_program[][2] = { 269 { SPINOR_OP_PP, SPINOR_OP_PP_4B }, 270 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B }, 271 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B }, 272 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B }, 273 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B }, 274 }; 275 276 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program, 277 ARRAY_SIZE(spi_nor_3to4_program)); 278 } 279 280 static u8 spi_nor_convert_3to4_erase(u8 opcode) 281 { 282 static const u8 spi_nor_3to4_erase[][2] = { 283 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B }, 284 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B }, 285 { SPINOR_OP_SE, SPINOR_OP_SE_4B }, 286 }; 287 288 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase, 289 ARRAY_SIZE(spi_nor_3to4_erase)); 290 } 291 292 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor, 293 const struct flash_info *info) 294 { 295 /* Do some manufacturer fixups first */ 296 switch (JEDEC_MFR(info)) { 297 case SNOR_MFR_SPANSION: 298 /* No small sector erase for 4-byte command set */ 299 nor->erase_opcode = SPINOR_OP_SE; 300 nor->mtd.erasesize = info->sector_size; 301 break; 302 303 default: 304 break; 305 } 306 307 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode); 308 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode); 309 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode); 310 } 311 #endif /* !CONFIG_SPI_FLASH_BAR */ 312 313 /* Enable/disable 4-byte addressing mode. */ 314 static int set_4byte(struct spi_nor *nor, const struct flash_info *info, 315 int enable) 316 { 317 int status; 318 bool need_wren = false; 319 u8 cmd; 320 321 switch (JEDEC_MFR(info)) { 322 case SNOR_MFR_ST: 323 case SNOR_MFR_MICRON: 324 /* Some Micron need WREN command; all will accept it */ 325 need_wren = true; 326 case SNOR_MFR_MACRONIX: 327 case SNOR_MFR_WINBOND: 328 if (need_wren) 329 write_enable(nor); 330 331 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B; 332 status = nor->write_reg(nor, cmd, NULL, 0); 333 if (need_wren) 334 write_disable(nor); 335 336 if (!status && !enable && 337 JEDEC_MFR(info) == SNOR_MFR_WINBOND) { 338 /* 339 * On Winbond W25Q256FV, leaving 4byte mode causes 340 * the Extended Address Register to be set to 1, so all 341 * 3-byte-address reads come from the second 16M. 342 * We must clear the register to enable normal behavior. 343 */ 344 write_enable(nor); 345 nor->cmd_buf[0] = 0; 346 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1); 347 write_disable(nor); 348 } 349 350 return status; 351 default: 352 /* Spansion style */ 353 nor->cmd_buf[0] = enable << 7; 354 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1); 355 } 356 } 357 358 static int spi_nor_sr_ready(struct spi_nor *nor) 359 { 360 int sr = read_sr(nor); 361 362 if (sr < 0) 363 return sr; 364 365 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) { 366 if (sr & SR_E_ERR) 367 dev_dbg(nor->dev, "Erase Error occurred\n"); 368 else 369 dev_dbg(nor->dev, "Programming Error occurred\n"); 370 371 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0); 372 return -EIO; 373 } 374 375 return !(sr & SR_WIP); 376 } 377 378 static int spi_nor_fsr_ready(struct spi_nor *nor) 379 { 380 int fsr = read_fsr(nor); 381 382 if (fsr < 0) 383 return fsr; 384 385 if (fsr & (FSR_E_ERR | FSR_P_ERR)) { 386 if (fsr & FSR_E_ERR) 387 dev_err(nor->dev, "Erase operation failed.\n"); 388 else 389 dev_err(nor->dev, "Program operation failed.\n"); 390 391 if (fsr & FSR_PT_ERR) 392 dev_err(nor->dev, 393 "Attempted to modify a protected sector.\n"); 394 395 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0); 396 return -EIO; 397 } 398 399 return fsr & FSR_READY; 400 } 401 402 static int spi_nor_ready(struct spi_nor *nor) 403 { 404 int sr, fsr; 405 406 sr = spi_nor_sr_ready(nor); 407 if (sr < 0) 408 return sr; 409 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; 410 if (fsr < 0) 411 return fsr; 412 return sr && fsr; 413 } 414 415 /* 416 * Service routine to read status register until ready, or timeout occurs. 417 * Returns non-zero if error. 418 */ 419 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, 420 unsigned long timeout) 421 { 422 unsigned long timebase; 423 int ret; 424 425 timebase = get_timer(0); 426 427 while (get_timer(timebase) < timeout) { 428 ret = spi_nor_ready(nor); 429 if (ret < 0) 430 return ret; 431 if (ret) 432 return 0; 433 } 434 435 dev_err(nor->dev, "flash operation timed out\n"); 436 437 return -ETIMEDOUT; 438 } 439 440 static int spi_nor_wait_till_ready(struct spi_nor *nor) 441 { 442 return spi_nor_wait_till_ready_with_timeout(nor, 443 DEFAULT_READY_WAIT_JIFFIES); 444 } 445 446 #ifdef CONFIG_SPI_FLASH_BAR 447 /* 448 * This "clean_bar" is necessary in a situation when one was accessing 449 * spi flash memory > 16 MiB by using Bank Address Register's BA24 bit. 450 * 451 * After it the BA24 bit shall be cleared to allow access to correct 452 * memory region after SW reset (by calling "reset" command). 453 * 454 * Otherwise, the BA24 bit may be left set and then after reset, the 455 * ROM would read/write/erase SPL from 16 MiB * bank_sel address. 456 */ 457 static int clean_bar(struct spi_nor *nor) 458 { 459 u8 cmd, bank_sel = 0; 460 461 if (nor->bank_curr == 0) 462 return 0; 463 cmd = nor->bank_write_cmd; 464 nor->bank_curr = 0; 465 write_enable(nor); 466 467 return nor->write_reg(nor, cmd, &bank_sel, 1); 468 } 469 470 static int write_bar(struct spi_nor *nor, u32 offset) 471 { 472 u8 cmd, bank_sel; 473 int ret; 474 475 bank_sel = offset / SZ_16M; 476 if (bank_sel == nor->bank_curr) 477 goto bar_end; 478 479 cmd = nor->bank_write_cmd; 480 write_enable(nor); 481 ret = nor->write_reg(nor, cmd, &bank_sel, 1); 482 if (ret < 0) { 483 debug("SF: fail to write bank register\n"); 484 return ret; 485 } 486 487 bar_end: 488 nor->bank_curr = bank_sel; 489 return nor->bank_curr; 490 } 491 492 static int read_bar(struct spi_nor *nor, const struct flash_info *info) 493 { 494 u8 curr_bank = 0; 495 int ret; 496 497 switch (JEDEC_MFR(info)) { 498 case SNOR_MFR_SPANSION: 499 nor->bank_read_cmd = SPINOR_OP_BRRD; 500 nor->bank_write_cmd = SPINOR_OP_BRWR; 501 break; 502 default: 503 nor->bank_read_cmd = SPINOR_OP_RDEAR; 504 nor->bank_write_cmd = SPINOR_OP_WREAR; 505 } 506 507 ret = nor->read_reg(nor, nor->bank_read_cmd, 508 &curr_bank, 1); 509 if (ret) { 510 debug("SF: fail to read bank addr register\n"); 511 return ret; 512 } 513 nor->bank_curr = curr_bank; 514 515 return 0; 516 } 517 #endif 518 519 /* 520 * Initiate the erasure of a single sector 521 */ 522 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr) 523 { 524 struct spi_mem_op op = 525 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1), 526 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1), 527 SPI_MEM_OP_NO_DUMMY, 528 SPI_MEM_OP_NO_DATA); 529 530 if (nor->erase) 531 return nor->erase(nor, addr); 532 533 /* 534 * Default implementation, if driver doesn't have a specialized HW 535 * control 536 */ 537 return spi_mem_exec_op(nor->spi, &op); 538 } 539 540 /* 541 * Erase an address range on the nor chip. The address range may extend 542 * one or more erase sectors. Return an error is there is a problem erasing. 543 */ 544 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) 545 { 546 struct spi_nor *nor = mtd_to_spi_nor(mtd); 547 u32 addr, len, rem; 548 int ret; 549 550 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, 551 (long long)instr->len); 552 553 div_u64_rem(instr->len, mtd->erasesize, &rem); 554 if (rem) 555 return -EINVAL; 556 557 addr = instr->addr; 558 len = instr->len; 559 560 while (len) { 561 #ifdef CONFIG_SPI_FLASH_BAR 562 ret = write_bar(nor, addr); 563 if (ret < 0) 564 return ret; 565 #endif 566 write_enable(nor); 567 568 ret = spi_nor_erase_sector(nor, addr); 569 if (ret) 570 goto erase_err; 571 572 addr += mtd->erasesize; 573 len -= mtd->erasesize; 574 575 ret = spi_nor_wait_till_ready(nor); 576 if (ret) 577 goto erase_err; 578 } 579 580 erase_err: 581 #ifdef CONFIG_SPI_FLASH_BAR 582 ret = clean_bar(nor); 583 #endif 584 write_disable(nor); 585 586 return ret; 587 } 588 589 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST) 590 /* Write status register and ensure bits in mask match written values */ 591 static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask) 592 { 593 int ret; 594 595 write_enable(nor); 596 ret = write_sr(nor, status_new); 597 if (ret) 598 return ret; 599 600 ret = spi_nor_wait_till_ready(nor); 601 if (ret) 602 return ret; 603 604 ret = read_sr(nor); 605 if (ret < 0) 606 return ret; 607 608 return ((ret & mask) != (status_new & mask)) ? -EIO : 0; 609 } 610 611 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, 612 uint64_t *len) 613 { 614 struct mtd_info *mtd = &nor->mtd; 615 u8 mask = SR_BP2 | SR_BP1 | SR_BP0; 616 int shift = ffs(mask) - 1; 617 int pow; 618 619 if (!(sr & mask)) { 620 /* No protection */ 621 *ofs = 0; 622 *len = 0; 623 } else { 624 pow = ((sr & mask) ^ mask) >> shift; 625 *len = mtd->size >> pow; 626 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB) 627 *ofs = 0; 628 else 629 *ofs = mtd->size - *len; 630 } 631 } 632 633 /* 634 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if 635 * @locked is false); 0 otherwise 636 */ 637 static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, u64 len, 638 u8 sr, bool locked) 639 { 640 loff_t lock_offs; 641 uint64_t lock_len; 642 643 if (!len) 644 return 1; 645 646 stm_get_locked_range(nor, sr, &lock_offs, &lock_len); 647 648 if (locked) 649 /* Requested range is a sub-range of locked range */ 650 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); 651 else 652 /* Requested range does not overlap with locked range */ 653 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs); 654 } 655 656 static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, 657 u8 sr) 658 { 659 return stm_check_lock_status_sr(nor, ofs, len, sr, true); 660 } 661 662 static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, 663 u8 sr) 664 { 665 return stm_check_lock_status_sr(nor, ofs, len, sr, false); 666 } 667 668 /* 669 * Lock a region of the flash. Compatible with ST Micro and similar flash. 670 * Supports the block protection bits BP{0,1,2} in the status register 671 * (SR). Does not support these features found in newer SR bitfields: 672 * - SEC: sector/block protect - only handle SEC=0 (block protect) 673 * - CMP: complement protect - only support CMP=0 (range is not complemented) 674 * 675 * Support for the following is provided conditionally for some flash: 676 * - TB: top/bottom protect 677 * 678 * Sample table portion for 8MB flash (Winbond w25q64fw): 679 * 680 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion 681 * -------------------------------------------------------------------------- 682 * X | X | 0 | 0 | 0 | NONE | NONE 683 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64 684 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32 685 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16 686 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8 687 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 688 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 689 * X | X | 1 | 1 | 1 | 8 MB | ALL 690 * ------|-------|-------|-------|-------|---------------|------------------- 691 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64 692 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32 693 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16 694 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8 695 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4 696 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2 697 * 698 * Returns negative on errors, 0 on success. 699 */ 700 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) 701 { 702 struct mtd_info *mtd = &nor->mtd; 703 int status_old, status_new; 704 u8 mask = SR_BP2 | SR_BP1 | SR_BP0; 705 u8 shift = ffs(mask) - 1, pow, val; 706 loff_t lock_len; 707 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; 708 bool use_top; 709 710 status_old = read_sr(nor); 711 if (status_old < 0) 712 return status_old; 713 714 /* If nothing in our range is unlocked, we don't need to do anything */ 715 if (stm_is_locked_sr(nor, ofs, len, status_old)) 716 return 0; 717 718 /* If anything below us is unlocked, we can't use 'bottom' protection */ 719 if (!stm_is_locked_sr(nor, 0, ofs, status_old)) 720 can_be_bottom = false; 721 722 /* If anything above us is unlocked, we can't use 'top' protection */ 723 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len), 724 status_old)) 725 can_be_top = false; 726 727 if (!can_be_bottom && !can_be_top) 728 return -EINVAL; 729 730 /* Prefer top, if both are valid */ 731 use_top = can_be_top; 732 733 /* lock_len: length of region that should end up locked */ 734 if (use_top) 735 lock_len = mtd->size - ofs; 736 else 737 lock_len = ofs + len; 738 739 /* 740 * Need smallest pow such that: 741 * 742 * 1 / (2^pow) <= (len / size) 743 * 744 * so (assuming power-of-2 size) we do: 745 * 746 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) 747 */ 748 pow = ilog2(mtd->size) - ilog2(lock_len); 749 val = mask - (pow << shift); 750 if (val & ~mask) 751 return -EINVAL; 752 /* Don't "lock" with no region! */ 753 if (!(val & mask)) 754 return -EINVAL; 755 756 status_new = (status_old & ~mask & ~SR_TB) | val; 757 758 /* Disallow further writes if WP pin is asserted */ 759 status_new |= SR_SRWD; 760 761 if (!use_top) 762 status_new |= SR_TB; 763 764 /* Don't bother if they're the same */ 765 if (status_new == status_old) 766 return 0; 767 768 /* Only modify protection if it will not unlock other areas */ 769 if ((status_new & mask) < (status_old & mask)) 770 return -EINVAL; 771 772 return write_sr_and_check(nor, status_new, mask); 773 } 774 775 /* 776 * Unlock a region of the flash. See stm_lock() for more info 777 * 778 * Returns negative on errors, 0 on success. 779 */ 780 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) 781 { 782 struct mtd_info *mtd = &nor->mtd; 783 int status_old, status_new; 784 u8 mask = SR_BP2 | SR_BP1 | SR_BP0; 785 u8 shift = ffs(mask) - 1, pow, val; 786 loff_t lock_len; 787 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB; 788 bool use_top; 789 790 status_old = read_sr(nor); 791 if (status_old < 0) 792 return status_old; 793 794 /* If nothing in our range is locked, we don't need to do anything */ 795 if (stm_is_unlocked_sr(nor, ofs, len, status_old)) 796 return 0; 797 798 /* If anything below us is locked, we can't use 'top' protection */ 799 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old)) 800 can_be_top = false; 801 802 /* If anything above us is locked, we can't use 'bottom' protection */ 803 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len), 804 status_old)) 805 can_be_bottom = false; 806 807 if (!can_be_bottom && !can_be_top) 808 return -EINVAL; 809 810 /* Prefer top, if both are valid */ 811 use_top = can_be_top; 812 813 /* lock_len: length of region that should remain locked */ 814 if (use_top) 815 lock_len = mtd->size - (ofs + len); 816 else 817 lock_len = ofs; 818 819 /* 820 * Need largest pow such that: 821 * 822 * 1 / (2^pow) >= (len / size) 823 * 824 * so (assuming power-of-2 size) we do: 825 * 826 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len)) 827 */ 828 pow = ilog2(mtd->size) - order_base_2(lock_len); 829 if (lock_len == 0) { 830 val = 0; /* fully unlocked */ 831 } else { 832 val = mask - (pow << shift); 833 /* Some power-of-two sizes are not supported */ 834 if (val & ~mask) 835 return -EINVAL; 836 } 837 838 status_new = (status_old & ~mask & ~SR_TB) | val; 839 840 /* Don't protect status register if we're fully unlocked */ 841 if (lock_len == 0) 842 status_new &= ~SR_SRWD; 843 844 if (!use_top) 845 status_new |= SR_TB; 846 847 /* Don't bother if they're the same */ 848 if (status_new == status_old) 849 return 0; 850 851 /* Only modify protection if it will not lock other areas */ 852 if ((status_new & mask) > (status_old & mask)) 853 return -EINVAL; 854 855 return write_sr_and_check(nor, status_new, mask); 856 } 857 858 /* 859 * Check if a region of the flash is (completely) locked. See stm_lock() for 860 * more info. 861 * 862 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and 863 * negative on errors. 864 */ 865 static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) 866 { 867 int status; 868 869 status = read_sr(nor); 870 if (status < 0) 871 return status; 872 873 return stm_is_locked_sr(nor, ofs, len, status); 874 } 875 #endif /* CONFIG_SPI_FLASH_STMICRO */ 876 877 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) 878 { 879 int tmp; 880 u8 id[SPI_NOR_MAX_ID_LEN]; 881 const struct flash_info *info; 882 883 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); 884 if (tmp < 0) { 885 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp); 886 return ERR_PTR(tmp); 887 } 888 889 info = spi_nor_ids; 890 for (; info->name; info++) { 891 if (info->id_len) { 892 if (!memcmp(info->id, id, info->id_len)) 893 return info; 894 } 895 } 896 897 dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n", 898 id[0], id[1], id[2]); 899 return ERR_PTR(-ENODEV); 900 } 901 902 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, 903 size_t *retlen, u_char *buf) 904 { 905 struct spi_nor *nor = mtd_to_spi_nor(mtd); 906 int ret; 907 908 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); 909 910 while (len) { 911 loff_t addr = from; 912 size_t read_len = len; 913 914 #ifdef CONFIG_SPI_FLASH_BAR 915 u32 remain_len; 916 917 ret = write_bar(nor, addr); 918 if (ret < 0) 919 return log_ret(ret); 920 remain_len = (SZ_16M * (nor->bank_curr + 1)) - addr; 921 922 if (len < remain_len) 923 read_len = len; 924 else 925 read_len = remain_len; 926 #endif 927 928 ret = nor->read(nor, addr, read_len, buf); 929 if (ret == 0) { 930 /* We shouldn't see 0-length reads */ 931 ret = -EIO; 932 goto read_err; 933 } 934 if (ret < 0) 935 goto read_err; 936 937 *retlen += ret; 938 buf += ret; 939 from += ret; 940 len -= ret; 941 } 942 ret = 0; 943 944 read_err: 945 #ifdef CONFIG_SPI_FLASH_BAR 946 ret = clean_bar(nor); 947 #endif 948 return ret; 949 } 950 951 #ifdef CONFIG_SPI_FLASH_SST 952 /* 953 * sst26 flash series has its own block protection implementation: 954 * 4x - 8 KByte blocks - read & write protection bits - upper addresses 955 * 1x - 32 KByte blocks - write protection bits 956 * rest - 64 KByte blocks - write protection bits 957 * 1x - 32 KByte blocks - write protection bits 958 * 4x - 8 KByte blocks - read & write protection bits - lower addresses 959 * 960 * We'll support only per 64k lock/unlock so lower and upper 64 KByte region 961 * will be treated as single block. 962 */ 963 #define SST26_BPR_8K_NUM 4 964 #define SST26_MAX_BPR_REG_LEN (18 + 1) 965 #define SST26_BOUND_REG_SIZE ((32 + SST26_BPR_8K_NUM * 8) * SZ_1K) 966 967 enum lock_ctl { 968 SST26_CTL_LOCK, 969 SST26_CTL_UNLOCK, 970 SST26_CTL_CHECK 971 }; 972 973 static bool sst26_process_bpr(u32 bpr_size, u8 *cmd, u32 bit, enum lock_ctl ctl) 974 { 975 switch (ctl) { 976 case SST26_CTL_LOCK: 977 cmd[bpr_size - (bit / 8) - 1] |= BIT(bit % 8); 978 break; 979 case SST26_CTL_UNLOCK: 980 cmd[bpr_size - (bit / 8) - 1] &= ~BIT(bit % 8); 981 break; 982 case SST26_CTL_CHECK: 983 return !!(cmd[bpr_size - (bit / 8) - 1] & BIT(bit % 8)); 984 } 985 986 return false; 987 } 988 989 /* 990 * Lock, unlock or check lock status of the flash region of the flash (depending 991 * on the lock_ctl value) 992 */ 993 static int sst26_lock_ctl(struct spi_nor *nor, loff_t ofs, uint64_t len, enum lock_ctl ctl) 994 { 995 struct mtd_info *mtd = &nor->mtd; 996 u32 i, bpr_ptr, rptr_64k, lptr_64k, bpr_size; 997 bool lower_64k = false, upper_64k = false; 998 u8 bpr_buff[SST26_MAX_BPR_REG_LEN] = {}; 999 int ret; 1000 1001 /* Check length and offset for 64k alignment */ 1002 if ((ofs & (SZ_64K - 1)) || (len & (SZ_64K - 1))) { 1003 dev_err(nor->dev, "length or offset is not 64KiB allighned\n"); 1004 return -EINVAL; 1005 } 1006 1007 if (ofs + len > mtd->size) { 1008 dev_err(nor->dev, "range is more than device size: %#llx + %#llx > %#llx\n", 1009 ofs, len, mtd->size); 1010 return -EINVAL; 1011 } 1012 1013 /* SST26 family has only 16 Mbit, 32 Mbit and 64 Mbit IC */ 1014 if (mtd->size != SZ_2M && 1015 mtd->size != SZ_4M && 1016 mtd->size != SZ_8M) 1017 return -EINVAL; 1018 1019 bpr_size = 2 + (mtd->size / SZ_64K / 8); 1020 1021 ret = nor->read_reg(nor, SPINOR_OP_READ_BPR, bpr_buff, bpr_size); 1022 if (ret < 0) { 1023 dev_err(nor->dev, "fail to read block-protection register\n"); 1024 return ret; 1025 } 1026 1027 rptr_64k = min_t(u32, ofs + len, mtd->size - SST26_BOUND_REG_SIZE); 1028 lptr_64k = max_t(u32, ofs, SST26_BOUND_REG_SIZE); 1029 1030 upper_64k = ((ofs + len) > (mtd->size - SST26_BOUND_REG_SIZE)); 1031 lower_64k = (ofs < SST26_BOUND_REG_SIZE); 1032 1033 /* Lower bits in block-protection register are about 64k region */ 1034 bpr_ptr = lptr_64k / SZ_64K - 1; 1035 1036 /* Process 64K blocks region */ 1037 while (lptr_64k < rptr_64k) { 1038 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl)) 1039 return EACCES; 1040 1041 bpr_ptr++; 1042 lptr_64k += SZ_64K; 1043 } 1044 1045 /* 32K and 8K region bits in BPR are after 64k region bits */ 1046 bpr_ptr = (mtd->size - 2 * SST26_BOUND_REG_SIZE) / SZ_64K; 1047 1048 /* Process lower 32K block region */ 1049 if (lower_64k) 1050 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl)) 1051 return EACCES; 1052 1053 bpr_ptr++; 1054 1055 /* Process upper 32K block region */ 1056 if (upper_64k) 1057 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl)) 1058 return EACCES; 1059 1060 bpr_ptr++; 1061 1062 /* Process lower 8K block regions */ 1063 for (i = 0; i < SST26_BPR_8K_NUM; i++) { 1064 if (lower_64k) 1065 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl)) 1066 return EACCES; 1067 1068 /* In 8K area BPR has both read and write protection bits */ 1069 bpr_ptr += 2; 1070 } 1071 1072 /* Process upper 8K block regions */ 1073 for (i = 0; i < SST26_BPR_8K_NUM; i++) { 1074 if (upper_64k) 1075 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl)) 1076 return EACCES; 1077 1078 /* In 8K area BPR has both read and write protection bits */ 1079 bpr_ptr += 2; 1080 } 1081 1082 /* If we check region status we don't need to write BPR back */ 1083 if (ctl == SST26_CTL_CHECK) 1084 return 0; 1085 1086 ret = nor->write_reg(nor, SPINOR_OP_WRITE_BPR, bpr_buff, bpr_size); 1087 if (ret < 0) { 1088 dev_err(nor->dev, "fail to write block-protection register\n"); 1089 return ret; 1090 } 1091 1092 return 0; 1093 } 1094 1095 static int sst26_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) 1096 { 1097 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_UNLOCK); 1098 } 1099 1100 static int sst26_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) 1101 { 1102 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_LOCK); 1103 } 1104 1105 /* 1106 * Returns EACCES (positive value) if region is locked, 0 if region is unlocked, 1107 * and negative on errors. 1108 */ 1109 static int sst26_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) 1110 { 1111 /* 1112 * is_locked function is used for check before reading or erasing flash 1113 * region, so offset and length might be not 64k allighned, so adjust 1114 * them to be 64k allighned as sst26_lock_ctl works only with 64k 1115 * allighned regions. 1116 */ 1117 ofs -= ofs & (SZ_64K - 1); 1118 len = len & (SZ_64K - 1) ? (len & ~(SZ_64K - 1)) + SZ_64K : len; 1119 1120 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_CHECK); 1121 } 1122 1123 static int sst_write_byteprogram(struct spi_nor *nor, loff_t to, size_t len, 1124 size_t *retlen, const u_char *buf) 1125 { 1126 size_t actual; 1127 int ret = 0; 1128 1129 for (actual = 0; actual < len; actual++) { 1130 nor->program_opcode = SPINOR_OP_BP; 1131 1132 write_enable(nor); 1133 /* write one byte. */ 1134 ret = nor->write(nor, to, 1, buf + actual); 1135 if (ret < 0) 1136 goto sst_write_err; 1137 ret = spi_nor_wait_till_ready(nor); 1138 if (ret) 1139 goto sst_write_err; 1140 to++; 1141 } 1142 1143 sst_write_err: 1144 write_disable(nor); 1145 return ret; 1146 } 1147 1148 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, 1149 size_t *retlen, const u_char *buf) 1150 { 1151 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1152 struct spi_slave *spi = nor->spi; 1153 size_t actual; 1154 int ret; 1155 1156 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 1157 if (spi->mode & SPI_TX_BYTE) 1158 return sst_write_byteprogram(nor, to, len, retlen, buf); 1159 1160 write_enable(nor); 1161 1162 nor->sst_write_second = false; 1163 1164 actual = to % 2; 1165 /* Start write from odd address. */ 1166 if (actual) { 1167 nor->program_opcode = SPINOR_OP_BP; 1168 1169 /* write one byte. */ 1170 ret = nor->write(nor, to, 1, buf); 1171 if (ret < 0) 1172 goto sst_write_err; 1173 ret = spi_nor_wait_till_ready(nor); 1174 if (ret) 1175 goto sst_write_err; 1176 } 1177 to += actual; 1178 1179 /* Write out most of the data here. */ 1180 for (; actual < len - 1; actual += 2) { 1181 nor->program_opcode = SPINOR_OP_AAI_WP; 1182 1183 /* write two bytes. */ 1184 ret = nor->write(nor, to, 2, buf + actual); 1185 if (ret < 0) 1186 goto sst_write_err; 1187 ret = spi_nor_wait_till_ready(nor); 1188 if (ret) 1189 goto sst_write_err; 1190 to += 2; 1191 nor->sst_write_second = true; 1192 } 1193 nor->sst_write_second = false; 1194 1195 write_disable(nor); 1196 ret = spi_nor_wait_till_ready(nor); 1197 if (ret) 1198 goto sst_write_err; 1199 1200 /* Write out trailing byte if it exists. */ 1201 if (actual != len) { 1202 write_enable(nor); 1203 1204 nor->program_opcode = SPINOR_OP_BP; 1205 ret = nor->write(nor, to, 1, buf + actual); 1206 if (ret < 0) 1207 goto sst_write_err; 1208 ret = spi_nor_wait_till_ready(nor); 1209 if (ret) 1210 goto sst_write_err; 1211 write_disable(nor); 1212 actual += 1; 1213 } 1214 sst_write_err: 1215 *retlen += actual; 1216 return ret; 1217 } 1218 #endif 1219 /* 1220 * Write an address range to the nor chip. Data must be written in 1221 * FLASH_PAGESIZE chunks. The address range may be any size provided 1222 * it is within the physical boundaries. 1223 */ 1224 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, 1225 size_t *retlen, const u_char *buf) 1226 { 1227 struct spi_nor *nor = mtd_to_spi_nor(mtd); 1228 size_t page_offset, page_remain, i; 1229 ssize_t ret; 1230 1231 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); 1232 1233 for (i = 0; i < len; ) { 1234 ssize_t written; 1235 loff_t addr = to + i; 1236 1237 /* 1238 * If page_size is a power of two, the offset can be quickly 1239 * calculated with an AND operation. On the other cases we 1240 * need to do a modulus operation (more expensive). 1241 * Power of two numbers have only one bit set and we can use 1242 * the instruction hweight32 to detect if we need to do a 1243 * modulus (do_div()) or not. 1244 */ 1245 if (hweight32(nor->page_size) == 1) { 1246 page_offset = addr & (nor->page_size - 1); 1247 } else { 1248 u64 aux = addr; 1249 1250 page_offset = do_div(aux, nor->page_size); 1251 } 1252 /* the size of data remaining on the first page */ 1253 page_remain = min_t(size_t, 1254 nor->page_size - page_offset, len - i); 1255 1256 #ifdef CONFIG_SPI_FLASH_BAR 1257 ret = write_bar(nor, addr); 1258 if (ret < 0) 1259 return ret; 1260 #endif 1261 write_enable(nor); 1262 ret = nor->write(nor, addr, page_remain, buf + i); 1263 if (ret < 0) 1264 goto write_err; 1265 written = ret; 1266 1267 ret = spi_nor_wait_till_ready(nor); 1268 if (ret) 1269 goto write_err; 1270 *retlen += written; 1271 i += written; 1272 } 1273 1274 write_err: 1275 #ifdef CONFIG_SPI_FLASH_BAR 1276 ret = clean_bar(nor); 1277 #endif 1278 return ret; 1279 } 1280 1281 #ifdef CONFIG_SPI_FLASH_MACRONIX 1282 /** 1283 * macronix_quad_enable() - set QE bit in Status Register. 1284 * @nor: pointer to a 'struct spi_nor' 1285 * 1286 * Set the Quad Enable (QE) bit in the Status Register. 1287 * 1288 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories. 1289 * 1290 * Return: 0 on success, -errno otherwise. 1291 */ 1292 static int macronix_quad_enable(struct spi_nor *nor) 1293 { 1294 int ret, val; 1295 1296 val = read_sr(nor); 1297 if (val < 0) 1298 return val; 1299 if (val & SR_QUAD_EN_MX) 1300 return 0; 1301 1302 write_enable(nor); 1303 1304 write_sr(nor, val | SR_QUAD_EN_MX); 1305 1306 ret = spi_nor_wait_till_ready(nor); 1307 if (ret) 1308 return ret; 1309 1310 ret = read_sr(nor); 1311 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { 1312 dev_err(nor->dev, "Macronix Quad bit not set\n"); 1313 return -EINVAL; 1314 } 1315 1316 return 0; 1317 } 1318 #endif 1319 1320 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) 1321 /* 1322 * Write status Register and configuration register with 2 bytes 1323 * The first byte will be written to the status register, while the 1324 * second byte will be written to the configuration register. 1325 * Return negative if error occurred. 1326 */ 1327 static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr) 1328 { 1329 int ret; 1330 1331 write_enable(nor); 1332 1333 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2); 1334 if (ret < 0) { 1335 dev_dbg(nor->dev, 1336 "error while writing configuration register\n"); 1337 return -EINVAL; 1338 } 1339 1340 ret = spi_nor_wait_till_ready(nor); 1341 if (ret) { 1342 dev_dbg(nor->dev, 1343 "timeout while writing configuration register\n"); 1344 return ret; 1345 } 1346 1347 return 0; 1348 } 1349 1350 /** 1351 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register. 1352 * @nor: pointer to a 'struct spi_nor' 1353 * 1354 * Set the Quad Enable (QE) bit in the Configuration Register. 1355 * This function should be used with QSPI memories supporting the Read 1356 * Configuration Register (35h) instruction. 1357 * 1358 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI 1359 * memories. 1360 * 1361 * Return: 0 on success, -errno otherwise. 1362 */ 1363 static int spansion_read_cr_quad_enable(struct spi_nor *nor) 1364 { 1365 u8 sr_cr[2]; 1366 int ret; 1367 1368 /* Check current Quad Enable bit value. */ 1369 ret = read_cr(nor); 1370 if (ret < 0) { 1371 dev_dbg(dev, "error while reading configuration register\n"); 1372 return -EINVAL; 1373 } 1374 1375 if (ret & CR_QUAD_EN_SPAN) 1376 return 0; 1377 1378 sr_cr[1] = ret | CR_QUAD_EN_SPAN; 1379 1380 /* Keep the current value of the Status Register. */ 1381 ret = read_sr(nor); 1382 if (ret < 0) { 1383 dev_dbg(dev, "error while reading status register\n"); 1384 return -EINVAL; 1385 } 1386 sr_cr[0] = ret; 1387 1388 ret = write_sr_cr(nor, sr_cr); 1389 if (ret) 1390 return ret; 1391 1392 /* Read back and check it. */ 1393 ret = read_cr(nor); 1394 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { 1395 dev_dbg(nor->dev, "Spansion Quad bit not set\n"); 1396 return -EINVAL; 1397 } 1398 1399 return 0; 1400 } 1401 1402 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT) 1403 /** 1404 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register. 1405 * @nor: pointer to a 'struct spi_nor' 1406 * 1407 * Set the Quad Enable (QE) bit in the Configuration Register. 1408 * This function should be used with QSPI memories not supporting the Read 1409 * Configuration Register (35h) instruction. 1410 * 1411 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI 1412 * memories. 1413 * 1414 * Return: 0 on success, -errno otherwise. 1415 */ 1416 static int spansion_no_read_cr_quad_enable(struct spi_nor *nor) 1417 { 1418 u8 sr_cr[2]; 1419 int ret; 1420 1421 /* Keep the current value of the Status Register. */ 1422 ret = read_sr(nor); 1423 if (ret < 0) { 1424 dev_dbg(nor->dev, "error while reading status register\n"); 1425 return -EINVAL; 1426 } 1427 sr_cr[0] = ret; 1428 sr_cr[1] = CR_QUAD_EN_SPAN; 1429 1430 return write_sr_cr(nor, sr_cr); 1431 } 1432 1433 #endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */ 1434 #endif /* CONFIG_SPI_FLASH_SPANSION */ 1435 1436 struct spi_nor_read_command { 1437 u8 num_mode_clocks; 1438 u8 num_wait_states; 1439 u8 opcode; 1440 enum spi_nor_protocol proto; 1441 }; 1442 1443 struct spi_nor_pp_command { 1444 u8 opcode; 1445 enum spi_nor_protocol proto; 1446 }; 1447 1448 enum spi_nor_read_command_index { 1449 SNOR_CMD_READ, 1450 SNOR_CMD_READ_FAST, 1451 SNOR_CMD_READ_1_1_1_DTR, 1452 1453 /* Dual SPI */ 1454 SNOR_CMD_READ_1_1_2, 1455 SNOR_CMD_READ_1_2_2, 1456 SNOR_CMD_READ_2_2_2, 1457 SNOR_CMD_READ_1_2_2_DTR, 1458 1459 /* Quad SPI */ 1460 SNOR_CMD_READ_1_1_4, 1461 SNOR_CMD_READ_1_4_4, 1462 SNOR_CMD_READ_4_4_4, 1463 SNOR_CMD_READ_1_4_4_DTR, 1464 1465 /* Octo SPI */ 1466 SNOR_CMD_READ_1_1_8, 1467 SNOR_CMD_READ_1_8_8, 1468 SNOR_CMD_READ_8_8_8, 1469 SNOR_CMD_READ_1_8_8_DTR, 1470 1471 SNOR_CMD_READ_MAX 1472 }; 1473 1474 enum spi_nor_pp_command_index { 1475 SNOR_CMD_PP, 1476 1477 /* Quad SPI */ 1478 SNOR_CMD_PP_1_1_4, 1479 SNOR_CMD_PP_1_4_4, 1480 SNOR_CMD_PP_4_4_4, 1481 1482 /* Octo SPI */ 1483 SNOR_CMD_PP_1_1_8, 1484 SNOR_CMD_PP_1_8_8, 1485 SNOR_CMD_PP_8_8_8, 1486 1487 SNOR_CMD_PP_MAX 1488 }; 1489 1490 struct spi_nor_flash_parameter { 1491 u64 size; 1492 u32 page_size; 1493 1494 struct spi_nor_hwcaps hwcaps; 1495 struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; 1496 struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; 1497 1498 int (*quad_enable)(struct spi_nor *nor); 1499 }; 1500 1501 static void 1502 spi_nor_set_read_settings(struct spi_nor_read_command *read, 1503 u8 num_mode_clocks, 1504 u8 num_wait_states, 1505 u8 opcode, 1506 enum spi_nor_protocol proto) 1507 { 1508 read->num_mode_clocks = num_mode_clocks; 1509 read->num_wait_states = num_wait_states; 1510 read->opcode = opcode; 1511 read->proto = proto; 1512 } 1513 1514 static void 1515 spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, 1516 u8 opcode, 1517 enum spi_nor_protocol proto) 1518 { 1519 pp->opcode = opcode; 1520 pp->proto = proto; 1521 } 1522 1523 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT) 1524 /* 1525 * Serial Flash Discoverable Parameters (SFDP) parsing. 1526 */ 1527 1528 /** 1529 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters. 1530 * @nor: pointer to a 'struct spi_nor' 1531 * @addr: offset in the SFDP area to start reading data from 1532 * @len: number of bytes to read 1533 * @buf: buffer where the SFDP data are copied into (dma-safe memory) 1534 * 1535 * Whatever the actual numbers of bytes for address and dummy cycles are 1536 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always 1537 * followed by a 3-byte address and 8 dummy clock cycles. 1538 * 1539 * Return: 0 on success, -errno otherwise. 1540 */ 1541 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr, 1542 size_t len, void *buf) 1543 { 1544 u8 addr_width, read_opcode, read_dummy; 1545 int ret; 1546 1547 read_opcode = nor->read_opcode; 1548 addr_width = nor->addr_width; 1549 read_dummy = nor->read_dummy; 1550 1551 nor->read_opcode = SPINOR_OP_RDSFDP; 1552 nor->addr_width = 3; 1553 nor->read_dummy = 8; 1554 1555 while (len) { 1556 ret = nor->read(nor, addr, len, (u8 *)buf); 1557 if (!ret || ret > len) { 1558 ret = -EIO; 1559 goto read_err; 1560 } 1561 if (ret < 0) 1562 goto read_err; 1563 1564 buf += ret; 1565 addr += ret; 1566 len -= ret; 1567 } 1568 ret = 0; 1569 1570 read_err: 1571 nor->read_opcode = read_opcode; 1572 nor->addr_width = addr_width; 1573 nor->read_dummy = read_dummy; 1574 1575 return ret; 1576 } 1577 1578 struct sfdp_parameter_header { 1579 u8 id_lsb; 1580 u8 minor; 1581 u8 major; 1582 u8 length; /* in double words */ 1583 u8 parameter_table_pointer[3]; /* byte address */ 1584 u8 id_msb; 1585 }; 1586 1587 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb) 1588 #define SFDP_PARAM_HEADER_PTP(p) \ 1589 (((p)->parameter_table_pointer[2] << 16) | \ 1590 ((p)->parameter_table_pointer[1] << 8) | \ 1591 ((p)->parameter_table_pointer[0] << 0)) 1592 1593 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */ 1594 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */ 1595 1596 #define SFDP_SIGNATURE 0x50444653U 1597 #define SFDP_JESD216_MAJOR 1 1598 #define SFDP_JESD216_MINOR 0 1599 #define SFDP_JESD216A_MINOR 5 1600 #define SFDP_JESD216B_MINOR 6 1601 1602 struct sfdp_header { 1603 u32 signature; /* Ox50444653U <=> "SFDP" */ 1604 u8 minor; 1605 u8 major; 1606 u8 nph; /* 0-base number of parameter headers */ 1607 u8 unused; 1608 1609 /* Basic Flash Parameter Table. */ 1610 struct sfdp_parameter_header bfpt_header; 1611 }; 1612 1613 /* Basic Flash Parameter Table */ 1614 1615 /* 1616 * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs. 1617 * They are indexed from 1 but C arrays are indexed from 0. 1618 */ 1619 #define BFPT_DWORD(i) ((i) - 1) 1620 #define BFPT_DWORD_MAX 16 1621 1622 /* The first version of JESB216 defined only 9 DWORDs. */ 1623 #define BFPT_DWORD_MAX_JESD216 9 1624 1625 /* 1st DWORD. */ 1626 #define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16) 1627 #define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17) 1628 #define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17) 1629 #define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17) 1630 #define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17) 1631 #define BFPT_DWORD1_DTR BIT(19) 1632 #define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20) 1633 #define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21) 1634 #define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22) 1635 1636 /* 5th DWORD. */ 1637 #define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0) 1638 #define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4) 1639 1640 /* 11th DWORD. */ 1641 #define BFPT_DWORD11_PAGE_SIZE_SHIFT 4 1642 #define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4) 1643 1644 /* 15th DWORD. */ 1645 1646 /* 1647 * (from JESD216 rev B) 1648 * Quad Enable Requirements (QER): 1649 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4 1650 * reads based on instruction. DQ3/HOLD# functions are hold during 1651 * instruction phase. 1652 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with 1653 * two data bytes where bit 1 of the second byte is one. 1654 * [...] 1655 * Writing only one byte to the status register has the side-effect of 1656 * clearing status register 2, including the QE bit. The 100b code is 1657 * used if writing one byte to the status register does not modify 1658 * status register 2. 1659 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with 1660 * one data byte where bit 6 is one. 1661 * [...] 1662 * - 011b: QE is bit 7 of status register 2. It is set via Write status 1663 * register 2 instruction 3Eh with one data byte where bit 7 is one. 1664 * [...] 1665 * The status register 2 is read using instruction 3Fh. 1666 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with 1667 * two data bytes where bit 1 of the second byte is one. 1668 * [...] 1669 * In contrast to the 001b code, writing one byte to the status 1670 * register does not modify status register 2. 1671 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using 1672 * Read Status instruction 05h. Status register2 is read using 1673 * instruction 35h. QE is set via Writ Status instruction 01h with 1674 * two data bytes where bit 1 of the second byte is one. 1675 * [...] 1676 */ 1677 #define BFPT_DWORD15_QER_MASK GENMASK(22, 20) 1678 #define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */ 1679 #define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20) 1680 #define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */ 1681 #define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20) 1682 #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20) 1683 #define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */ 1684 1685 struct sfdp_bfpt { 1686 u32 dwords[BFPT_DWORD_MAX]; 1687 }; 1688 1689 /* Fast Read settings. */ 1690 1691 static void 1692 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read, 1693 u16 half, 1694 enum spi_nor_protocol proto) 1695 { 1696 read->num_mode_clocks = (half >> 5) & 0x07; 1697 read->num_wait_states = (half >> 0) & 0x1f; 1698 read->opcode = (half >> 8) & 0xff; 1699 read->proto = proto; 1700 } 1701 1702 struct sfdp_bfpt_read { 1703 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */ 1704 u32 hwcaps; 1705 1706 /* 1707 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us 1708 * whether the Fast Read x-y-z command is supported. 1709 */ 1710 u32 supported_dword; 1711 u32 supported_bit; 1712 1713 /* 1714 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD 1715 * encodes the op code, the number of mode clocks and the number of wait 1716 * states to be used by Fast Read x-y-z command. 1717 */ 1718 u32 settings_dword; 1719 u32 settings_shift; 1720 1721 /* The SPI protocol for this Fast Read x-y-z command. */ 1722 enum spi_nor_protocol proto; 1723 }; 1724 1725 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = { 1726 /* Fast Read 1-1-2 */ 1727 { 1728 SNOR_HWCAPS_READ_1_1_2, 1729 BFPT_DWORD(1), BIT(16), /* Supported bit */ 1730 BFPT_DWORD(4), 0, /* Settings */ 1731 SNOR_PROTO_1_1_2, 1732 }, 1733 1734 /* Fast Read 1-2-2 */ 1735 { 1736 SNOR_HWCAPS_READ_1_2_2, 1737 BFPT_DWORD(1), BIT(20), /* Supported bit */ 1738 BFPT_DWORD(4), 16, /* Settings */ 1739 SNOR_PROTO_1_2_2, 1740 }, 1741 1742 /* Fast Read 2-2-2 */ 1743 { 1744 SNOR_HWCAPS_READ_2_2_2, 1745 BFPT_DWORD(5), BIT(0), /* Supported bit */ 1746 BFPT_DWORD(6), 16, /* Settings */ 1747 SNOR_PROTO_2_2_2, 1748 }, 1749 1750 /* Fast Read 1-1-4 */ 1751 { 1752 SNOR_HWCAPS_READ_1_1_4, 1753 BFPT_DWORD(1), BIT(22), /* Supported bit */ 1754 BFPT_DWORD(3), 16, /* Settings */ 1755 SNOR_PROTO_1_1_4, 1756 }, 1757 1758 /* Fast Read 1-4-4 */ 1759 { 1760 SNOR_HWCAPS_READ_1_4_4, 1761 BFPT_DWORD(1), BIT(21), /* Supported bit */ 1762 BFPT_DWORD(3), 0, /* Settings */ 1763 SNOR_PROTO_1_4_4, 1764 }, 1765 1766 /* Fast Read 4-4-4 */ 1767 { 1768 SNOR_HWCAPS_READ_4_4_4, 1769 BFPT_DWORD(5), BIT(4), /* Supported bit */ 1770 BFPT_DWORD(7), 16, /* Settings */ 1771 SNOR_PROTO_4_4_4, 1772 }, 1773 }; 1774 1775 struct sfdp_bfpt_erase { 1776 /* 1777 * The half-word at offset <shift> in DWORD <dwoard> encodes the 1778 * op code and erase sector size to be used by Sector Erase commands. 1779 */ 1780 u32 dword; 1781 u32 shift; 1782 }; 1783 1784 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = { 1785 /* Erase Type 1 in DWORD8 bits[15:0] */ 1786 {BFPT_DWORD(8), 0}, 1787 1788 /* Erase Type 2 in DWORD8 bits[31:16] */ 1789 {BFPT_DWORD(8), 16}, 1790 1791 /* Erase Type 3 in DWORD9 bits[15:0] */ 1792 {BFPT_DWORD(9), 0}, 1793 1794 /* Erase Type 4 in DWORD9 bits[31:16] */ 1795 {BFPT_DWORD(9), 16}, 1796 }; 1797 1798 static int spi_nor_hwcaps_read2cmd(u32 hwcaps); 1799 1800 /** 1801 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table. 1802 * @nor: pointer to a 'struct spi_nor' 1803 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing 1804 * the Basic Flash Parameter Table length and version 1805 * @params: pointer to the 'struct spi_nor_flash_parameter' to be 1806 * filled 1807 * 1808 * The Basic Flash Parameter Table is the main and only mandatory table as 1809 * defined by the SFDP (JESD216) specification. 1810 * It provides us with the total size (memory density) of the data array and 1811 * the number of address bytes for Fast Read, Page Program and Sector Erase 1812 * commands. 1813 * For Fast READ commands, it also gives the number of mode clock cycles and 1814 * wait states (regrouped in the number of dummy clock cycles) for each 1815 * supported instruction op code. 1816 * For Page Program, the page size is now available since JESD216 rev A, however 1817 * the supported instruction op codes are still not provided. 1818 * For Sector Erase commands, this table stores the supported instruction op 1819 * codes and the associated sector sizes. 1820 * Finally, the Quad Enable Requirements (QER) are also available since JESD216 1821 * rev A. The QER bits encode the manufacturer dependent procedure to be 1822 * executed to set the Quad Enable (QE) bit in some internal register of the 1823 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before 1824 * sending any Quad SPI command to the memory. Actually, setting the QE bit 1825 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2 1826 * and IO3 hence enabling 4 (Quad) I/O lines. 1827 * 1828 * Return: 0 on success, -errno otherwise. 1829 */ 1830 static int spi_nor_parse_bfpt(struct spi_nor *nor, 1831 const struct sfdp_parameter_header *bfpt_header, 1832 struct spi_nor_flash_parameter *params) 1833 { 1834 struct mtd_info *mtd = &nor->mtd; 1835 struct sfdp_bfpt bfpt; 1836 size_t len; 1837 int i, cmd, err; 1838 u32 addr; 1839 u16 half; 1840 1841 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */ 1842 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216) 1843 return -EINVAL; 1844 1845 /* Read the Basic Flash Parameter Table. */ 1846 len = min_t(size_t, sizeof(bfpt), 1847 bfpt_header->length * sizeof(u32)); 1848 addr = SFDP_PARAM_HEADER_PTP(bfpt_header); 1849 memset(&bfpt, 0, sizeof(bfpt)); 1850 err = spi_nor_read_sfdp(nor, addr, len, &bfpt); 1851 if (err < 0) 1852 return err; 1853 1854 /* Fix endianness of the BFPT DWORDs. */ 1855 for (i = 0; i < BFPT_DWORD_MAX; i++) 1856 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]); 1857 1858 /* Number of address bytes. */ 1859 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) { 1860 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY: 1861 nor->addr_width = 3; 1862 break; 1863 1864 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY: 1865 nor->addr_width = 4; 1866 break; 1867 1868 default: 1869 break; 1870 } 1871 1872 /* Flash Memory Density (in bits). */ 1873 params->size = bfpt.dwords[BFPT_DWORD(2)]; 1874 if (params->size & BIT(31)) { 1875 params->size &= ~BIT(31); 1876 1877 /* 1878 * Prevent overflows on params->size. Anyway, a NOR of 2^64 1879 * bits is unlikely to exist so this error probably means 1880 * the BFPT we are reading is corrupted/wrong. 1881 */ 1882 if (params->size > 63) 1883 return -EINVAL; 1884 1885 params->size = 1ULL << params->size; 1886 } else { 1887 params->size++; 1888 } 1889 params->size >>= 3; /* Convert to bytes. */ 1890 1891 /* Fast Read settings. */ 1892 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) { 1893 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i]; 1894 struct spi_nor_read_command *read; 1895 1896 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) { 1897 params->hwcaps.mask &= ~rd->hwcaps; 1898 continue; 1899 } 1900 1901 params->hwcaps.mask |= rd->hwcaps; 1902 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps); 1903 read = ¶ms->reads[cmd]; 1904 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift; 1905 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto); 1906 } 1907 1908 /* Sector Erase settings. */ 1909 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) { 1910 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i]; 1911 u32 erasesize; 1912 u8 opcode; 1913 1914 half = bfpt.dwords[er->dword] >> er->shift; 1915 erasesize = half & 0xff; 1916 1917 /* erasesize == 0 means this Erase Type is not supported. */ 1918 if (!erasesize) 1919 continue; 1920 1921 erasesize = 1U << erasesize; 1922 opcode = (half >> 8) & 0xff; 1923 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS 1924 if (erasesize == SZ_4K) { 1925 nor->erase_opcode = opcode; 1926 mtd->erasesize = erasesize; 1927 break; 1928 } 1929 #endif 1930 if (!mtd->erasesize || mtd->erasesize < erasesize) { 1931 nor->erase_opcode = opcode; 1932 mtd->erasesize = erasesize; 1933 } 1934 } 1935 1936 /* Stop here if not JESD216 rev A or later. */ 1937 if (bfpt_header->length < BFPT_DWORD_MAX) 1938 return 0; 1939 1940 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */ 1941 params->page_size = bfpt.dwords[BFPT_DWORD(11)]; 1942 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK; 1943 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT; 1944 params->page_size = 1U << params->page_size; 1945 1946 /* Quad Enable Requirements. */ 1947 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) { 1948 case BFPT_DWORD15_QER_NONE: 1949 params->quad_enable = NULL; 1950 break; 1951 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) 1952 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY: 1953 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD: 1954 params->quad_enable = spansion_no_read_cr_quad_enable; 1955 break; 1956 #endif 1957 #ifdef CONFIG_SPI_FLASH_MACRONIX 1958 case BFPT_DWORD15_QER_SR1_BIT6: 1959 params->quad_enable = macronix_quad_enable; 1960 break; 1961 #endif 1962 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) 1963 case BFPT_DWORD15_QER_SR2_BIT1: 1964 params->quad_enable = spansion_read_cr_quad_enable; 1965 break; 1966 #endif 1967 default: 1968 return -EINVAL; 1969 } 1970 1971 return 0; 1972 } 1973 1974 /** 1975 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters. 1976 * @nor: pointer to a 'struct spi_nor' 1977 * @params: pointer to the 'struct spi_nor_flash_parameter' to be 1978 * filled 1979 * 1980 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216 1981 * specification. This is a standard which tends to supported by almost all 1982 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at 1983 * runtime the main parameters needed to perform basic SPI flash operations such 1984 * as Fast Read, Page Program or Sector Erase commands. 1985 * 1986 * Return: 0 on success, -errno otherwise. 1987 */ 1988 static int spi_nor_parse_sfdp(struct spi_nor *nor, 1989 struct spi_nor_flash_parameter *params) 1990 { 1991 const struct sfdp_parameter_header *param_header, *bfpt_header; 1992 struct sfdp_parameter_header *param_headers = NULL; 1993 struct sfdp_header header; 1994 size_t psize; 1995 int i, err; 1996 1997 /* Get the SFDP header. */ 1998 err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header); 1999 if (err < 0) 2000 return err; 2001 2002 /* Check the SFDP header version. */ 2003 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || 2004 header.major != SFDP_JESD216_MAJOR) 2005 return -EINVAL; 2006 2007 /* 2008 * Verify that the first and only mandatory parameter header is a 2009 * Basic Flash Parameter Table header as specified in JESD216. 2010 */ 2011 bfpt_header = &header.bfpt_header; 2012 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID || 2013 bfpt_header->major != SFDP_JESD216_MAJOR) 2014 return -EINVAL; 2015 2016 /* 2017 * Allocate memory then read all parameter headers with a single 2018 * Read SFDP command. These parameter headers will actually be parsed 2019 * twice: a first time to get the latest revision of the basic flash 2020 * parameter table, then a second time to handle the supported optional 2021 * tables. 2022 * Hence we read the parameter headers once for all to reduce the 2023 * processing time. Also we use kmalloc() instead of devm_kmalloc() 2024 * because we don't need to keep these parameter headers: the allocated 2025 * memory is always released with kfree() before exiting this function. 2026 */ 2027 if (header.nph) { 2028 psize = header.nph * sizeof(*param_headers); 2029 2030 param_headers = kmalloc(psize, GFP_KERNEL); 2031 if (!param_headers) 2032 return -ENOMEM; 2033 2034 err = spi_nor_read_sfdp(nor, sizeof(header), 2035 psize, param_headers); 2036 if (err < 0) { 2037 dev_err(dev, "failed to read SFDP parameter headers\n"); 2038 goto exit; 2039 } 2040 } 2041 2042 /* 2043 * Check other parameter headers to get the latest revision of 2044 * the basic flash parameter table. 2045 */ 2046 for (i = 0; i < header.nph; i++) { 2047 param_header = ¶m_headers[i]; 2048 2049 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID && 2050 param_header->major == SFDP_JESD216_MAJOR && 2051 (param_header->minor > bfpt_header->minor || 2052 (param_header->minor == bfpt_header->minor && 2053 param_header->length > bfpt_header->length))) 2054 bfpt_header = param_header; 2055 } 2056 2057 err = spi_nor_parse_bfpt(nor, bfpt_header, params); 2058 if (err) 2059 goto exit; 2060 2061 /* Parse other parameter headers. */ 2062 for (i = 0; i < header.nph; i++) { 2063 param_header = ¶m_headers[i]; 2064 2065 switch (SFDP_PARAM_HEADER_ID(param_header)) { 2066 case SFDP_SECTOR_MAP_ID: 2067 dev_info(dev, "non-uniform erase sector maps are not supported yet.\n"); 2068 break; 2069 2070 default: 2071 break; 2072 } 2073 2074 if (err) 2075 goto exit; 2076 } 2077 2078 exit: 2079 kfree(param_headers); 2080 return err; 2081 } 2082 #else 2083 static int spi_nor_parse_sfdp(struct spi_nor *nor, 2084 struct spi_nor_flash_parameter *params) 2085 { 2086 return -EINVAL; 2087 } 2088 #endif /* SPI_FLASH_SFDP_SUPPORT */ 2089 2090 static int spi_nor_init_params(struct spi_nor *nor, 2091 const struct flash_info *info, 2092 struct spi_nor_flash_parameter *params) 2093 { 2094 /* Set legacy flash parameters as default. */ 2095 memset(params, 0, sizeof(*params)); 2096 2097 /* Set SPI NOR sizes. */ 2098 params->size = info->sector_size * info->n_sectors; 2099 params->page_size = info->page_size; 2100 2101 /* (Fast) Read settings. */ 2102 params->hwcaps.mask |= SNOR_HWCAPS_READ; 2103 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], 2104 0, 0, SPINOR_OP_READ, 2105 SNOR_PROTO_1_1_1); 2106 2107 if (!(info->flags & SPI_NOR_NO_FR)) { 2108 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; 2109 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], 2110 0, 8, SPINOR_OP_READ_FAST, 2111 SNOR_PROTO_1_1_1); 2112 } 2113 2114 if (info->flags & SPI_NOR_DUAL_READ) { 2115 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2116 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], 2117 0, 8, SPINOR_OP_READ_1_1_2, 2118 SNOR_PROTO_1_1_2); 2119 } 2120 2121 if (info->flags & SPI_NOR_QUAD_READ) { 2122 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2123 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], 2124 0, 8, SPINOR_OP_READ_1_1_4, 2125 SNOR_PROTO_1_1_4); 2126 } 2127 2128 if (info->flags & SPI_NOR_OCTAL_READ) { 2129 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2130 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8], 2131 0, 8, SPINOR_OP_READ_1_1_8, 2132 SNOR_PROTO_1_1_8); 2133 } 2134 2135 /* Page Program settings. */ 2136 params->hwcaps.mask |= SNOR_HWCAPS_PP; 2137 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], 2138 SPINOR_OP_PP, SNOR_PROTO_1_1_1); 2139 2140 if (info->flags & SPI_NOR_QUAD_READ) { 2141 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4; 2142 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4], 2143 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4); 2144 } 2145 2146 /* Select the procedure to set the Quad Enable bit. */ 2147 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD | 2148 SNOR_HWCAPS_PP_QUAD)) { 2149 switch (JEDEC_MFR(info)) { 2150 #ifdef CONFIG_SPI_FLASH_MACRONIX 2151 case SNOR_MFR_MACRONIX: 2152 params->quad_enable = macronix_quad_enable; 2153 break; 2154 #endif 2155 case SNOR_MFR_ST: 2156 case SNOR_MFR_MICRON: 2157 break; 2158 2159 default: 2160 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) 2161 /* Kept only for backward compatibility purpose. */ 2162 params->quad_enable = spansion_read_cr_quad_enable; 2163 #endif 2164 break; 2165 } 2166 } 2167 2168 /* Override the parameters with data read from SFDP tables. */ 2169 nor->addr_width = 0; 2170 nor->mtd.erasesize = 0; 2171 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) && 2172 !(info->flags & SPI_NOR_SKIP_SFDP)) { 2173 struct spi_nor_flash_parameter sfdp_params; 2174 2175 memcpy(&sfdp_params, params, sizeof(sfdp_params)); 2176 if (spi_nor_parse_sfdp(nor, &sfdp_params)) { 2177 nor->addr_width = 0; 2178 nor->mtd.erasesize = 0; 2179 } else { 2180 memcpy(params, &sfdp_params, sizeof(*params)); 2181 } 2182 } 2183 2184 return 0; 2185 } 2186 2187 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) 2188 { 2189 size_t i; 2190 2191 for (i = 0; i < size; i++) 2192 if (table[i][0] == (int)hwcaps) 2193 return table[i][1]; 2194 2195 return -EINVAL; 2196 } 2197 2198 static int spi_nor_hwcaps_read2cmd(u32 hwcaps) 2199 { 2200 static const int hwcaps_read2cmd[][2] = { 2201 { SNOR_HWCAPS_READ, SNOR_CMD_READ }, 2202 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, 2203 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, 2204 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, 2205 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, 2206 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, 2207 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, 2208 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, 2209 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, 2210 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, 2211 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, 2212 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, 2213 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, 2214 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, 2215 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, 2216 }; 2217 2218 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, 2219 ARRAY_SIZE(hwcaps_read2cmd)); 2220 } 2221 2222 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) 2223 { 2224 static const int hwcaps_pp2cmd[][2] = { 2225 { SNOR_HWCAPS_PP, SNOR_CMD_PP }, 2226 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, 2227 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, 2228 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, 2229 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, 2230 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, 2231 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, 2232 }; 2233 2234 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, 2235 ARRAY_SIZE(hwcaps_pp2cmd)); 2236 } 2237 2238 static int spi_nor_select_read(struct spi_nor *nor, 2239 const struct spi_nor_flash_parameter *params, 2240 u32 shared_hwcaps) 2241 { 2242 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; 2243 const struct spi_nor_read_command *read; 2244 2245 if (best_match < 0) 2246 return -EINVAL; 2247 2248 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); 2249 if (cmd < 0) 2250 return -EINVAL; 2251 2252 read = ¶ms->reads[cmd]; 2253 nor->read_opcode = read->opcode; 2254 nor->read_proto = read->proto; 2255 2256 /* 2257 * In the spi-nor framework, we don't need to make the difference 2258 * between mode clock cycles and wait state clock cycles. 2259 * Indeed, the value of the mode clock cycles is used by a QSPI 2260 * flash memory to know whether it should enter or leave its 0-4-4 2261 * (Continuous Read / XIP) mode. 2262 * eXecution In Place is out of the scope of the mtd sub-system. 2263 * Hence we choose to merge both mode and wait state clock cycles 2264 * into the so called dummy clock cycles. 2265 */ 2266 nor->read_dummy = read->num_mode_clocks + read->num_wait_states; 2267 return 0; 2268 } 2269 2270 static int spi_nor_select_pp(struct spi_nor *nor, 2271 const struct spi_nor_flash_parameter *params, 2272 u32 shared_hwcaps) 2273 { 2274 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; 2275 const struct spi_nor_pp_command *pp; 2276 2277 if (best_match < 0) 2278 return -EINVAL; 2279 2280 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); 2281 if (cmd < 0) 2282 return -EINVAL; 2283 2284 pp = ¶ms->page_programs[cmd]; 2285 nor->program_opcode = pp->opcode; 2286 nor->write_proto = pp->proto; 2287 return 0; 2288 } 2289 2290 static int spi_nor_select_erase(struct spi_nor *nor, 2291 const struct flash_info *info) 2292 { 2293 struct mtd_info *mtd = &nor->mtd; 2294 2295 /* Do nothing if already configured from SFDP. */ 2296 if (mtd->erasesize) 2297 return 0; 2298 2299 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS 2300 /* prefer "small sector" erase if possible */ 2301 if (info->flags & SECT_4K) { 2302 nor->erase_opcode = SPINOR_OP_BE_4K; 2303 mtd->erasesize = 4096; 2304 } else if (info->flags & SECT_4K_PMC) { 2305 nor->erase_opcode = SPINOR_OP_BE_4K_PMC; 2306 mtd->erasesize = 4096; 2307 } else 2308 #endif 2309 { 2310 nor->erase_opcode = SPINOR_OP_SE; 2311 mtd->erasesize = info->sector_size; 2312 } 2313 return 0; 2314 } 2315 2316 static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info, 2317 const struct spi_nor_flash_parameter *params, 2318 const struct spi_nor_hwcaps *hwcaps) 2319 { 2320 u32 ignored_mask, shared_mask; 2321 bool enable_quad_io; 2322 int err; 2323 2324 /* 2325 * Keep only the hardware capabilities supported by both the SPI 2326 * controller and the SPI flash memory. 2327 */ 2328 shared_mask = hwcaps->mask & params->hwcaps.mask; 2329 2330 /* SPI n-n-n protocols are not supported yet. */ 2331 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 | 2332 SNOR_HWCAPS_READ_4_4_4 | 2333 SNOR_HWCAPS_READ_8_8_8 | 2334 SNOR_HWCAPS_PP_4_4_4 | 2335 SNOR_HWCAPS_PP_8_8_8); 2336 if (shared_mask & ignored_mask) { 2337 dev_dbg(nor->dev, 2338 "SPI n-n-n protocols are not supported yet.\n"); 2339 shared_mask &= ~ignored_mask; 2340 } 2341 2342 /* Select the (Fast) Read command. */ 2343 err = spi_nor_select_read(nor, params, shared_mask); 2344 if (err) { 2345 dev_dbg(nor->dev, 2346 "can't select read settings supported by both the SPI controller and memory.\n"); 2347 return err; 2348 } 2349 2350 /* Select the Page Program command. */ 2351 err = spi_nor_select_pp(nor, params, shared_mask); 2352 if (err) { 2353 dev_dbg(nor->dev, 2354 "can't select write settings supported by both the SPI controller and memory.\n"); 2355 return err; 2356 } 2357 2358 /* Select the Sector Erase command. */ 2359 err = spi_nor_select_erase(nor, info); 2360 if (err) { 2361 dev_dbg(nor->dev, 2362 "can't select erase settings supported by both the SPI controller and memory.\n"); 2363 return err; 2364 } 2365 2366 /* Enable Quad I/O if needed. */ 2367 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || 2368 spi_nor_get_protocol_width(nor->write_proto) == 4); 2369 if (enable_quad_io && params->quad_enable) 2370 nor->quad_enable = params->quad_enable; 2371 else 2372 nor->quad_enable = NULL; 2373 2374 return 0; 2375 } 2376 2377 static int spi_nor_init(struct spi_nor *nor) 2378 { 2379 int err; 2380 2381 /* 2382 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up 2383 * with the software protection bits set 2384 */ 2385 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || 2386 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || 2387 JEDEC_MFR(nor->info) == SNOR_MFR_SST || 2388 nor->info->flags & SPI_NOR_HAS_LOCK) { 2389 write_enable(nor); 2390 write_sr(nor, 0); 2391 spi_nor_wait_till_ready(nor); 2392 } 2393 2394 if (nor->quad_enable) { 2395 err = nor->quad_enable(nor); 2396 if (err) { 2397 dev_dbg(nor->dev, "quad mode not supported\n"); 2398 return err; 2399 } 2400 } 2401 2402 if (nor->addr_width == 4 && 2403 (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) && 2404 !(nor->info->flags & SPI_NOR_4B_OPCODES)) { 2405 /* 2406 * If the RESET# pin isn't hooked up properly, or the system 2407 * otherwise doesn't perform a reset command in the boot 2408 * sequence, it's impossible to 100% protect against unexpected 2409 * reboots (e.g., crashes). Warn the user (or hopefully, system 2410 * designer) that this is bad. 2411 */ 2412 if (nor->flags & SNOR_F_BROKEN_RESET) 2413 printf("enabling reset hack; may not recover from unexpected reboots\n"); 2414 set_4byte(nor, nor->info, 1); 2415 } 2416 2417 return 0; 2418 } 2419 2420 int spi_nor_scan(struct spi_nor *nor) 2421 { 2422 struct spi_nor_flash_parameter params; 2423 const struct flash_info *info = NULL; 2424 struct mtd_info *mtd = &nor->mtd; 2425 struct spi_nor_hwcaps hwcaps = { 2426 .mask = SNOR_HWCAPS_READ | 2427 SNOR_HWCAPS_READ_FAST | 2428 SNOR_HWCAPS_PP, 2429 }; 2430 struct spi_slave *spi = nor->spi; 2431 int ret; 2432 2433 /* Reset SPI protocol for all commands. */ 2434 nor->reg_proto = SNOR_PROTO_1_1_1; 2435 nor->read_proto = SNOR_PROTO_1_1_1; 2436 nor->write_proto = SNOR_PROTO_1_1_1; 2437 nor->read = spi_nor_read_data; 2438 nor->write = spi_nor_write_data; 2439 nor->read_reg = spi_nor_read_reg; 2440 nor->write_reg = spi_nor_write_reg; 2441 2442 if (spi->mode & SPI_RX_OCTAL) { 2443 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8; 2444 2445 if (spi->mode & SPI_TX_OCTAL) 2446 hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 | 2447 SNOR_HWCAPS_PP_1_1_8 | 2448 SNOR_HWCAPS_PP_1_8_8); 2449 } else if (spi->mode & SPI_RX_QUAD) { 2450 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; 2451 2452 if (spi->mode & SPI_TX_QUAD) 2453 hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 | 2454 SNOR_HWCAPS_PP_1_1_4 | 2455 SNOR_HWCAPS_PP_1_4_4); 2456 } else if (spi->mode & SPI_RX_DUAL) { 2457 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; 2458 2459 if (spi->mode & SPI_TX_DUAL) 2460 hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2; 2461 } 2462 2463 info = spi_nor_read_id(nor); 2464 if (IS_ERR_OR_NULL(info)) 2465 return -ENOENT; 2466 /* Parse the Serial Flash Discoverable Parameters table. */ 2467 ret = spi_nor_init_params(nor, info, ¶ms); 2468 if (ret) 2469 return ret; 2470 2471 if (!mtd->name) 2472 mtd->name = info->name; 2473 mtd->priv = nor; 2474 mtd->type = MTD_NORFLASH; 2475 mtd->writesize = 1; 2476 mtd->flags = MTD_CAP_NORFLASH; 2477 mtd->size = params.size; 2478 mtd->_erase = spi_nor_erase; 2479 mtd->_read = spi_nor_read; 2480 2481 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST) 2482 /* NOR protection support for STmicro/Micron chips and similar */ 2483 if (JEDEC_MFR(info) == SNOR_MFR_ST || 2484 JEDEC_MFR(info) == SNOR_MFR_MICRON || 2485 JEDEC_MFR(info) == SNOR_MFR_SST || 2486 info->flags & SPI_NOR_HAS_LOCK) { 2487 nor->flash_lock = stm_lock; 2488 nor->flash_unlock = stm_unlock; 2489 nor->flash_is_locked = stm_is_locked; 2490 } 2491 #endif 2492 2493 #ifdef CONFIG_SPI_FLASH_SST 2494 /* 2495 * sst26 series block protection implementation differs from other 2496 * series. 2497 */ 2498 if (info->flags & SPI_NOR_HAS_SST26LOCK) { 2499 nor->flash_lock = sst26_lock; 2500 nor->flash_unlock = sst26_unlock; 2501 nor->flash_is_locked = sst26_is_locked; 2502 } 2503 2504 /* sst nor chips use AAI word program */ 2505 if (info->flags & SST_WRITE) 2506 mtd->_write = sst_write; 2507 else 2508 #endif 2509 mtd->_write = spi_nor_write; 2510 2511 if (info->flags & USE_FSR) 2512 nor->flags |= SNOR_F_USE_FSR; 2513 if (info->flags & SPI_NOR_HAS_TB) 2514 nor->flags |= SNOR_F_HAS_SR_TB; 2515 if (info->flags & NO_CHIP_ERASE) 2516 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; 2517 if (info->flags & USE_CLSR) 2518 nor->flags |= SNOR_F_USE_CLSR; 2519 2520 if (info->flags & SPI_NOR_NO_ERASE) 2521 mtd->flags |= MTD_NO_ERASE; 2522 2523 nor->page_size = params.page_size; 2524 mtd->writebufsize = nor->page_size; 2525 2526 /* Some devices cannot do fast-read, no matter what DT tells us */ 2527 if ((info->flags & SPI_NOR_NO_FR) || (spi->mode & SPI_RX_SLOW)) 2528 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; 2529 2530 /* 2531 * Configure the SPI memory: 2532 * - select op codes for (Fast) Read, Page Program and Sector Erase. 2533 * - set the number of dummy cycles (mode cycles + wait states). 2534 * - set the SPI protocols for register and memory accesses. 2535 * - set the Quad Enable bit if needed (required by SPI x-y-4 protos). 2536 */ 2537 ret = spi_nor_setup(nor, info, ¶ms, &hwcaps); 2538 if (ret) 2539 return ret; 2540 2541 if (nor->addr_width) { 2542 /* already configured from SFDP */ 2543 } else if (info->addr_width) { 2544 nor->addr_width = info->addr_width; 2545 } else if (mtd->size > SZ_16M) { 2546 #ifndef CONFIG_SPI_FLASH_BAR 2547 /* enable 4-byte addressing if the device exceeds 16MiB */ 2548 nor->addr_width = 4; 2549 if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || 2550 info->flags & SPI_NOR_4B_OPCODES) 2551 spi_nor_set_4byte_opcodes(nor, info); 2552 #else 2553 /* Configure the BAR - discover bank cmds and read current bank */ 2554 nor->addr_width = 3; 2555 ret = read_bar(nor, info); 2556 if (ret < 0) 2557 return ret; 2558 #endif 2559 } else { 2560 nor->addr_width = 3; 2561 } 2562 2563 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { 2564 dev_dbg(dev, "address width is too large: %u\n", 2565 nor->addr_width); 2566 return -EINVAL; 2567 } 2568 2569 /* Send all the required SPI flash commands to initialize device */ 2570 nor->info = info; 2571 ret = spi_nor_init(nor); 2572 if (ret) 2573 return ret; 2574 2575 nor->name = mtd->name; 2576 nor->size = mtd->size; 2577 nor->erase_size = mtd->erasesize; 2578 nor->sector_size = mtd->erasesize; 2579 2580 #ifndef CONFIG_SPL_BUILD 2581 printf("SF: Detected %s with page size ", nor->name); 2582 print_size(nor->page_size, ", erase size "); 2583 print_size(nor->erase_size, ", total "); 2584 print_size(nor->size, ""); 2585 puts("\n"); 2586 #endif 2587 2588 return 0; 2589 } 2590 2591 /* U-Boot specific functions, need to extend MTD to support these */ 2592 int spi_flash_cmd_get_sw_write_prot(struct spi_nor *nor) 2593 { 2594 int sr = read_sr(nor); 2595 2596 if (sr < 0) 2597 return sr; 2598 2599 return (sr >> 2) & 7; 2600 } 2601