1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5 *
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
8 *
9 * Synced from Linux v4.19
10 */
11
12 #include <common.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/log2.h>
16 #include <linux/math64.h>
17 #include <linux/sizes.h>
18
19 #include <linux/mtd/mtd.h>
20 #include <linux/mtd/spi-nor.h>
21 #include <spi-mem.h>
22 #include <spi.h>
23
24 #include "sf_internal.h"
25
26 /* Define max times to check status register before we give up. */
27
28 /*
29 * For everything but full-chip erase; probably could be much smaller, but kept
30 * around for safety for now
31 */
32
33 #define HZ CONFIG_SYS_HZ
34
35 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
36
spi_nor_read_write_reg(struct spi_nor * nor,struct spi_mem_op * op,void * buf)37 static int spi_nor_read_write_reg(struct spi_nor *nor, struct spi_mem_op
38 *op, void *buf)
39 {
40 if (op->data.dir == SPI_MEM_DATA_IN)
41 op->data.buf.in = buf;
42 else
43 op->data.buf.out = buf;
44 return spi_mem_exec_op(nor->spi, op);
45 }
46
spi_nor_read_reg(struct spi_nor * nor,u8 code,u8 * val,int len)47 static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
48 {
49 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
50 SPI_MEM_OP_NO_ADDR,
51 SPI_MEM_OP_NO_DUMMY,
52 SPI_MEM_OP_DATA_IN(len, NULL, 1));
53 int ret;
54
55 ret = spi_nor_read_write_reg(nor, &op, val);
56 if (ret < 0)
57 dev_dbg(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
58 code);
59
60 return ret;
61 }
62
spi_nor_write_reg(struct spi_nor * nor,u8 opcode,u8 * buf,int len)63 static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
64 {
65 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
66 SPI_MEM_OP_NO_ADDR,
67 SPI_MEM_OP_NO_DUMMY,
68 SPI_MEM_OP_DATA_OUT(len, NULL, 1));
69
70 return spi_nor_read_write_reg(nor, &op, buf);
71 }
72
spi_nor_read_data(struct spi_nor * nor,loff_t from,size_t len,u_char * buf)73 static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
74 u_char *buf)
75 {
76 struct spi_mem_op op =
77 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
78 SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
79 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
80 SPI_MEM_OP_DATA_IN(len, buf, 1));
81 size_t remaining = len;
82 int ret;
83
84 /* get transfer protocols. */
85 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
86 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
87 op.dummy.buswidth = op.addr.buswidth;
88 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
89
90 /* convert the dummy cycles to the number of bytes */
91 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
92
93 while (remaining) {
94 op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
95 ret = spi_mem_adjust_op_size(nor->spi, &op);
96 if (ret)
97 return ret;
98
99 ret = spi_mem_exec_op(nor->spi, &op);
100 if (ret)
101 return ret;
102
103 op.addr.val += op.data.nbytes;
104 remaining -= op.data.nbytes;
105 op.data.buf.in += op.data.nbytes;
106 }
107
108 return len;
109 }
110
spi_nor_write_data(struct spi_nor * nor,loff_t to,size_t len,const u_char * buf)111 static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
112 const u_char *buf)
113 {
114 struct spi_mem_op op =
115 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
116 SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
117 SPI_MEM_OP_NO_DUMMY,
118 SPI_MEM_OP_DATA_OUT(len, buf, 1));
119 int ret;
120
121 /* get transfer protocols. */
122 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
123 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
124 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
125
126 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
127 op.addr.nbytes = 0;
128
129 ret = spi_mem_adjust_op_size(nor->spi, &op);
130 if (ret)
131 return ret;
132 op.data.nbytes = len < op.data.nbytes ? len : op.data.nbytes;
133
134 ret = spi_mem_exec_op(nor->spi, &op);
135 if (ret)
136 return ret;
137
138 return op.data.nbytes;
139 }
140
141 /*
142 * Read the status register, returning its value in the location
143 * Return the status register value.
144 * Returns negative if error occurred.
145 */
read_sr(struct spi_nor * nor)146 static int read_sr(struct spi_nor *nor)
147 {
148 int ret;
149 u8 val;
150
151 ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
152 if (ret < 0) {
153 pr_debug("error %d reading SR\n", (int)ret);
154 return ret;
155 }
156
157 return val;
158 }
159
160 /*
161 * Read the flag status register, returning its value in the location
162 * Return the status register value.
163 * Returns negative if error occurred.
164 */
read_fsr(struct spi_nor * nor)165 static int read_fsr(struct spi_nor *nor)
166 {
167 int ret;
168 u8 val;
169
170 ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
171 if (ret < 0) {
172 pr_debug("error %d reading FSR\n", ret);
173 return ret;
174 }
175
176 return val;
177 }
178
179 /*
180 * Read configuration register, returning its value in the
181 * location. Return the configuration register value.
182 * Returns negative if error occurred.
183 */
184 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND) || defined(CONFIG_SPI_FLASH_NORMEM)
read_cr(struct spi_nor * nor)185 static int read_cr(struct spi_nor *nor)
186 {
187 int ret;
188 u8 val;
189
190 ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
191 if (ret < 0) {
192 dev_dbg(nor->dev, "error %d reading CR\n", ret);
193 return ret;
194 }
195
196 return val;
197 }
198 #endif
199
200 /*
201 * Write status register 1 byte
202 * Returns negative if error occurred.
203 */
write_sr(struct spi_nor * nor,u8 val)204 static int write_sr(struct spi_nor *nor, u8 val)
205 {
206 nor->cmd_buf[0] = val;
207 return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
208 }
209
210 #ifdef CONFIG_SPI_FLASH_NORMEM
211 /*
212 * Write confiture register 1 byte
213 * Returns negative if error occurred.
214 */
write_cr(struct spi_nor * nor,u8 val)215 static int write_cr(struct spi_nor *nor, u8 val)
216 {
217 nor->cmd_buf[0] = val;
218 return nor->write_reg(nor, SPINOR_OP_WRCR, nor->cmd_buf, 1);
219 }
220 #endif
221
222 /*
223 * Set write enable latch with Write Enable command.
224 * Returns negative if error occurred.
225 */
write_enable(struct spi_nor * nor)226 static int write_enable(struct spi_nor *nor)
227 {
228 return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
229 }
230
231 /*
232 * Send write disable instruction to the chip.
233 */
write_disable(struct spi_nor * nor)234 static int write_disable(struct spi_nor *nor)
235 {
236 return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
237 }
238
mtd_to_spi_nor(struct mtd_info * mtd)239 static struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
240 {
241 return mtd->priv;
242 }
243
244 #ifndef CONFIG_SPI_FLASH_BAR
spi_nor_convert_opcode(u8 opcode,const u8 table[][2],size_t size)245 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
246 {
247 size_t i;
248
249 for (i = 0; i < size; i++)
250 if (table[i][0] == opcode)
251 return table[i][1];
252
253 /* No conversion found, keep input op code. */
254 return opcode;
255 }
256
spi_nor_convert_3to4_read(u8 opcode)257 static u8 spi_nor_convert_3to4_read(u8 opcode)
258 {
259 static const u8 spi_nor_3to4_read[][2] = {
260 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
261 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
262 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
263 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
264 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
265 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
266 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
267 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
268
269 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
270 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
271 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
272 };
273
274 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
275 ARRAY_SIZE(spi_nor_3to4_read));
276 }
277
spi_nor_convert_3to4_program(u8 opcode)278 static u8 spi_nor_convert_3to4_program(u8 opcode)
279 {
280 static const u8 spi_nor_3to4_program[][2] = {
281 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
282 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
283 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
284 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
285 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
286 };
287
288 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
289 ARRAY_SIZE(spi_nor_3to4_program));
290 }
291
spi_nor_convert_3to4_erase(u8 opcode)292 static u8 spi_nor_convert_3to4_erase(u8 opcode)
293 {
294 static const u8 spi_nor_3to4_erase[][2] = {
295 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
296 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
297 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
298 };
299
300 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
301 ARRAY_SIZE(spi_nor_3to4_erase));
302 }
303
spi_nor_set_4byte_opcodes(struct spi_nor * nor,const struct flash_info * info)304 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
305 const struct flash_info *info)
306 {
307 /* Do some manufacturer fixups first */
308 switch (JEDEC_MFR(info)) {
309 case SNOR_MFR_SPANSION:
310 /* No small sector erase for 4-byte command set */
311 nor->erase_opcode = SPINOR_OP_SE;
312 nor->mtd.erasesize = info->sector_size;
313 break;
314
315 default:
316 break;
317 }
318
319 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
320 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
321 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
322 }
323 #endif /* !CONFIG_SPI_FLASH_BAR */
324
325 /* Enable/disable 4-byte addressing mode. */
set_4byte(struct spi_nor * nor,const struct flash_info * info,int enable)326 static int set_4byte(struct spi_nor *nor, const struct flash_info *info,
327 int enable)
328 {
329 int status;
330 bool need_wren = false;
331 u8 cmd;
332
333 switch (JEDEC_MFR(info)) {
334 case SNOR_MFR_ST:
335 case SNOR_MFR_MICRON:
336 /* Some Micron need WREN command; all will accept it */
337 need_wren = true;
338 case SNOR_MFR_MACRONIX:
339 case SNOR_MFR_WINBOND:
340 if (need_wren)
341 write_enable(nor);
342
343 cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
344 status = nor->write_reg(nor, cmd, NULL, 0);
345 if (need_wren)
346 write_disable(nor);
347
348 if (!status && !enable &&
349 JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
350 /*
351 * On Winbond W25Q256FV, leaving 4byte mode causes
352 * the Extended Address Register to be set to 1, so all
353 * 3-byte-address reads come from the second 16M.
354 * We must clear the register to enable normal behavior.
355 */
356 write_enable(nor);
357 nor->cmd_buf[0] = 0;
358 nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
359 write_disable(nor);
360 }
361
362 return status;
363 default:
364 /* Spansion style */
365 nor->cmd_buf[0] = enable << 7;
366 return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
367 }
368 }
369
spi_nor_sr_ready(struct spi_nor * nor)370 static int spi_nor_sr_ready(struct spi_nor *nor)
371 {
372 int sr = read_sr(nor);
373
374 if (sr < 0)
375 return sr;
376
377 if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
378 if (sr & SR_E_ERR)
379 dev_dbg(nor->dev, "Erase Error occurred\n");
380 else
381 dev_dbg(nor->dev, "Programming Error occurred\n");
382
383 nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
384 return -EIO;
385 }
386
387 return !(sr & SR_WIP);
388 }
389
spi_nor_fsr_ready(struct spi_nor * nor)390 static int spi_nor_fsr_ready(struct spi_nor *nor)
391 {
392 int fsr = read_fsr(nor);
393
394 if (fsr < 0)
395 return fsr;
396
397 if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
398 if (fsr & FSR_E_ERR)
399 dev_err(nor->dev, "Erase operation failed.\n");
400 else
401 dev_err(nor->dev, "Program operation failed.\n");
402
403 if (fsr & FSR_PT_ERR)
404 dev_err(nor->dev,
405 "Attempted to modify a protected sector.\n");
406
407 nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
408 return -EIO;
409 }
410
411 return fsr & FSR_READY;
412 }
413
spi_nor_ready(struct spi_nor * nor)414 static int spi_nor_ready(struct spi_nor *nor)
415 {
416 int sr, fsr;
417
418 sr = spi_nor_sr_ready(nor);
419 if (sr < 0)
420 return sr;
421 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
422 if (fsr < 0)
423 return fsr;
424 return sr && fsr;
425 }
426
427 /*
428 * Service routine to read status register until ready, or timeout occurs.
429 * Returns non-zero if error.
430 */
spi_nor_wait_till_ready_with_timeout(struct spi_nor * nor,unsigned long timeout)431 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
432 unsigned long timeout)
433 {
434 unsigned long timebase;
435 int ret;
436
437 timebase = get_timer(0);
438
439 while (get_timer(timebase) < timeout) {
440 ret = spi_nor_ready(nor);
441 if (ret < 0)
442 return ret;
443 if (ret)
444 return 0;
445 }
446
447 dev_err(nor->dev, "flash operation timed out\n");
448
449 return -ETIMEDOUT;
450 }
451
spi_nor_wait_till_ready(struct spi_nor * nor)452 static int spi_nor_wait_till_ready(struct spi_nor *nor)
453 {
454 return spi_nor_wait_till_ready_with_timeout(nor,
455 DEFAULT_READY_WAIT_JIFFIES);
456 }
457
458 #ifdef CONFIG_SPI_FLASH_BAR
459 /*
460 * This "clean_bar" is necessary in a situation when one was accessing
461 * spi flash memory > 16 MiB by using Bank Address Register's BA24 bit.
462 *
463 * After it the BA24 bit shall be cleared to allow access to correct
464 * memory region after SW reset (by calling "reset" command).
465 *
466 * Otherwise, the BA24 bit may be left set and then after reset, the
467 * ROM would read/write/erase SPL from 16 MiB * bank_sel address.
468 */
clean_bar(struct spi_nor * nor)469 static int clean_bar(struct spi_nor *nor)
470 {
471 u8 cmd, bank_sel = 0;
472
473 if (nor->bank_curr == 0)
474 return 0;
475 cmd = nor->bank_write_cmd;
476 nor->bank_curr = 0;
477 write_enable(nor);
478
479 return nor->write_reg(nor, cmd, &bank_sel, 1);
480 }
481
write_bar(struct spi_nor * nor,u32 offset)482 static int write_bar(struct spi_nor *nor, u32 offset)
483 {
484 u8 cmd, bank_sel;
485 int ret;
486
487 bank_sel = offset / SZ_16M;
488 if (bank_sel == nor->bank_curr)
489 goto bar_end;
490
491 cmd = nor->bank_write_cmd;
492 write_enable(nor);
493 ret = nor->write_reg(nor, cmd, &bank_sel, 1);
494 if (ret < 0) {
495 debug("SF: fail to write bank register\n");
496 return ret;
497 }
498
499 bar_end:
500 nor->bank_curr = bank_sel;
501 return nor->bank_curr;
502 }
503
read_bar(struct spi_nor * nor,const struct flash_info * info)504 static int read_bar(struct spi_nor *nor, const struct flash_info *info)
505 {
506 u8 curr_bank = 0;
507 int ret;
508
509 switch (JEDEC_MFR(info)) {
510 case SNOR_MFR_SPANSION:
511 nor->bank_read_cmd = SPINOR_OP_BRRD;
512 nor->bank_write_cmd = SPINOR_OP_BRWR;
513 break;
514 default:
515 nor->bank_read_cmd = SPINOR_OP_RDEAR;
516 nor->bank_write_cmd = SPINOR_OP_WREAR;
517 }
518
519 ret = nor->read_reg(nor, nor->bank_read_cmd,
520 &curr_bank, 1);
521 if (ret) {
522 debug("SF: fail to read bank addr register\n");
523 return ret;
524 }
525 nor->bank_curr = curr_bank;
526
527 return 0;
528 }
529 #endif
530
531 /*
532 * Initiate the erasure of a single sector
533 */
spi_nor_erase_sector(struct spi_nor * nor,u32 addr)534 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
535 {
536 struct spi_mem_op op =
537 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
538 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
539 SPI_MEM_OP_NO_DUMMY,
540 SPI_MEM_OP_NO_DATA);
541
542 if (nor->erase)
543 return nor->erase(nor, addr);
544
545 /*
546 * Default implementation, if driver doesn't have a specialized HW
547 * control
548 */
549 return spi_mem_exec_op(nor->spi, &op);
550 }
551
552 /*
553 * Erase an address range on the nor chip. The address range may extend
554 * one or more erase sectors. Return an error is there is a problem erasing.
555 */
spi_nor_erase(struct mtd_info * mtd,struct erase_info * instr)556 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
557 {
558 struct spi_nor *nor = mtd_to_spi_nor(mtd);
559 u32 addr, len, rem;
560 int ret;
561
562 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
563 (long long)instr->len);
564
565 div_u64_rem(instr->len, mtd->erasesize, &rem);
566 if (rem)
567 return -EINVAL;
568
569 addr = instr->addr;
570 len = instr->len;
571
572 while (len) {
573 #ifdef CONFIG_SPI_FLASH_BAR
574 ret = write_bar(nor, addr);
575 if (ret < 0)
576 return ret;
577 #endif
578 write_enable(nor);
579
580 ret = spi_nor_erase_sector(nor, addr);
581 if (ret)
582 goto erase_err;
583
584 addr += mtd->erasesize;
585 len -= mtd->erasesize;
586
587 ret = spi_nor_wait_till_ready(nor);
588 if (ret)
589 goto erase_err;
590 }
591
592 erase_err:
593 #ifdef CONFIG_SPI_FLASH_BAR
594 ret = clean_bar(nor);
595 #endif
596 write_disable(nor);
597
598 return ret;
599 }
600
601 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
602 /* Write status register and ensure bits in mask match written values */
write_sr_and_check(struct spi_nor * nor,u8 status_new,u8 mask)603 static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
604 {
605 int ret;
606
607 write_enable(nor);
608 ret = write_sr(nor, status_new);
609 if (ret)
610 return ret;
611
612 ret = spi_nor_wait_till_ready(nor);
613 if (ret)
614 return ret;
615
616 ret = read_sr(nor);
617 if (ret < 0)
618 return ret;
619
620 return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
621 }
622
stm_get_locked_range(struct spi_nor * nor,u8 sr,loff_t * ofs,uint64_t * len)623 static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
624 uint64_t *len)
625 {
626 struct mtd_info *mtd = &nor->mtd;
627 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
628 int shift = ffs(mask) - 1;
629 int pow;
630
631 if (!(sr & mask)) {
632 /* No protection */
633 *ofs = 0;
634 *len = 0;
635 } else {
636 pow = ((sr & mask) ^ mask) >> shift;
637 *len = mtd->size >> pow;
638 if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
639 *ofs = 0;
640 else
641 *ofs = mtd->size - *len;
642 }
643 }
644
645 /*
646 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
647 * @locked is false); 0 otherwise
648 */
stm_check_lock_status_sr(struct spi_nor * nor,loff_t ofs,u64 len,u8 sr,bool locked)649 static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, u64 len,
650 u8 sr, bool locked)
651 {
652 loff_t lock_offs;
653 uint64_t lock_len;
654
655 if (!len)
656 return 1;
657
658 stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
659
660 if (locked)
661 /* Requested range is a sub-range of locked range */
662 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
663 else
664 /* Requested range does not overlap with locked range */
665 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
666 }
667
stm_is_locked_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr)668 static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
669 u8 sr)
670 {
671 return stm_check_lock_status_sr(nor, ofs, len, sr, true);
672 }
673
stm_is_unlocked_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr)674 static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
675 u8 sr)
676 {
677 return stm_check_lock_status_sr(nor, ofs, len, sr, false);
678 }
679
680 /*
681 * Lock a region of the flash. Compatible with ST Micro and similar flash.
682 * Supports the block protection bits BP{0,1,2} in the status register
683 * (SR). Does not support these features found in newer SR bitfields:
684 * - SEC: sector/block protect - only handle SEC=0 (block protect)
685 * - CMP: complement protect - only support CMP=0 (range is not complemented)
686 *
687 * Support for the following is provided conditionally for some flash:
688 * - TB: top/bottom protect
689 *
690 * Sample table portion for 8MB flash (Winbond w25q64fw):
691 *
692 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
693 * --------------------------------------------------------------------------
694 * X | X | 0 | 0 | 0 | NONE | NONE
695 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
696 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
697 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
698 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
699 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
700 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
701 * X | X | 1 | 1 | 1 | 8 MB | ALL
702 * ------|-------|-------|-------|-------|---------------|-------------------
703 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
704 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
705 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
706 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
707 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
708 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
709 *
710 * Returns negative on errors, 0 on success.
711 */
stm_lock(struct spi_nor * nor,loff_t ofs,uint64_t len)712 static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
713 {
714 struct mtd_info *mtd = &nor->mtd;
715 int status_old, status_new;
716 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
717 u8 shift = ffs(mask) - 1, pow, val;
718 loff_t lock_len;
719 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
720 bool use_top;
721
722 status_old = read_sr(nor);
723 if (status_old < 0)
724 return status_old;
725
726 /* If nothing in our range is unlocked, we don't need to do anything */
727 if (stm_is_locked_sr(nor, ofs, len, status_old))
728 return 0;
729
730 /* If anything below us is unlocked, we can't use 'bottom' protection */
731 if (!stm_is_locked_sr(nor, 0, ofs, status_old))
732 can_be_bottom = false;
733
734 /* If anything above us is unlocked, we can't use 'top' protection */
735 if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
736 status_old))
737 can_be_top = false;
738
739 if (!can_be_bottom && !can_be_top)
740 return -EINVAL;
741
742 /* Prefer top, if both are valid */
743 use_top = can_be_top;
744
745 /* lock_len: length of region that should end up locked */
746 if (use_top)
747 lock_len = mtd->size - ofs;
748 else
749 lock_len = ofs + len;
750
751 /*
752 * Need smallest pow such that:
753 *
754 * 1 / (2^pow) <= (len / size)
755 *
756 * so (assuming power-of-2 size) we do:
757 *
758 * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
759 */
760 pow = ilog2(mtd->size) - ilog2(lock_len);
761 val = mask - (pow << shift);
762 if (val & ~mask)
763 return -EINVAL;
764 /* Don't "lock" with no region! */
765 if (!(val & mask))
766 return -EINVAL;
767
768 status_new = (status_old & ~mask & ~SR_TB) | val;
769
770 /* Disallow further writes if WP pin is asserted */
771 status_new |= SR_SRWD;
772
773 if (!use_top)
774 status_new |= SR_TB;
775
776 /* Don't bother if they're the same */
777 if (status_new == status_old)
778 return 0;
779
780 /* Only modify protection if it will not unlock other areas */
781 if ((status_new & mask) < (status_old & mask))
782 return -EINVAL;
783
784 return write_sr_and_check(nor, status_new, mask);
785 }
786
787 /*
788 * Unlock a region of the flash. See stm_lock() for more info
789 *
790 * Returns negative on errors, 0 on success.
791 */
stm_unlock(struct spi_nor * nor,loff_t ofs,uint64_t len)792 static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
793 {
794 struct mtd_info *mtd = &nor->mtd;
795 int status_old, status_new;
796 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
797 u8 shift = ffs(mask) - 1, pow, val;
798 loff_t lock_len;
799 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
800 bool use_top;
801
802 status_old = read_sr(nor);
803 if (status_old < 0)
804 return status_old;
805
806 /* If nothing in our range is locked, we don't need to do anything */
807 if (stm_is_unlocked_sr(nor, ofs, len, status_old))
808 return 0;
809
810 /* If anything below us is locked, we can't use 'top' protection */
811 if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
812 can_be_top = false;
813
814 /* If anything above us is locked, we can't use 'bottom' protection */
815 if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
816 status_old))
817 can_be_bottom = false;
818
819 if (!can_be_bottom && !can_be_top)
820 return -EINVAL;
821
822 /* Prefer top, if both are valid */
823 use_top = can_be_top;
824
825 /* lock_len: length of region that should remain locked */
826 if (use_top)
827 lock_len = mtd->size - (ofs + len);
828 else
829 lock_len = ofs;
830
831 /*
832 * Need largest pow such that:
833 *
834 * 1 / (2^pow) >= (len / size)
835 *
836 * so (assuming power-of-2 size) we do:
837 *
838 * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
839 */
840 pow = ilog2(mtd->size) - order_base_2(lock_len);
841 if (lock_len == 0) {
842 val = 0; /* fully unlocked */
843 } else {
844 val = mask - (pow << shift);
845 /* Some power-of-two sizes are not supported */
846 if (val & ~mask)
847 return -EINVAL;
848 }
849
850 status_new = (status_old & ~mask & ~SR_TB) | val;
851
852 /* Don't protect status register if we're fully unlocked */
853 if (lock_len == 0)
854 status_new &= ~SR_SRWD;
855
856 if (!use_top)
857 status_new |= SR_TB;
858
859 /* Don't bother if they're the same */
860 if (status_new == status_old)
861 return 0;
862
863 /* Only modify protection if it will not lock other areas */
864 if ((status_new & mask) > (status_old & mask))
865 return -EINVAL;
866
867 return write_sr_and_check(nor, status_new, mask);
868 }
869
870 /*
871 * Check if a region of the flash is (completely) locked. See stm_lock() for
872 * more info.
873 *
874 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
875 * negative on errors.
876 */
stm_is_locked(struct spi_nor * nor,loff_t ofs,uint64_t len)877 static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
878 {
879 int status;
880
881 status = read_sr(nor);
882 if (status < 0)
883 return status;
884
885 return stm_is_locked_sr(nor, ofs, len, status);
886 }
887 #endif /* CONFIG_SPI_FLASH_STMICRO */
888
spi_nor_read_id(struct spi_nor * nor)889 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
890 {
891 int tmp;
892 u8 id[SPI_NOR_MAX_ID_LEN];
893 const struct flash_info *info;
894
895 tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
896 if (tmp < 0) {
897 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
898 return ERR_PTR(tmp);
899 }
900
901 info = spi_nor_ids;
902 for (; info->name; info++) {
903 if (info->id_len) {
904 if (!memcmp(info->id, id, info->id_len))
905 return info;
906 }
907 }
908
909 dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
910 id[0], id[1], id[2]);
911 return ERR_PTR(-ENODEV);
912 }
913
spi_nor_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)914 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
915 size_t *retlen, u_char *buf)
916 {
917 struct spi_nor *nor = mtd_to_spi_nor(mtd);
918 int ret;
919
920 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
921
922 while (len) {
923 loff_t addr = from;
924 size_t read_len = len;
925
926 #ifdef CONFIG_SPI_FLASH_BAR
927 u32 remain_len;
928
929 ret = write_bar(nor, addr);
930 if (ret < 0)
931 return log_ret(ret);
932 remain_len = (SZ_16M * (nor->bank_curr + 1)) - addr;
933
934 if (len < remain_len)
935 read_len = len;
936 else
937 read_len = remain_len;
938 #endif
939
940 ret = nor->read(nor, addr, read_len, buf);
941 if (ret == 0) {
942 /* We shouldn't see 0-length reads */
943 ret = -EIO;
944 goto read_err;
945 }
946 if (ret < 0)
947 goto read_err;
948
949 *retlen += ret;
950 buf += ret;
951 from += ret;
952 len -= ret;
953 }
954 ret = 0;
955
956 read_err:
957 #ifdef CONFIG_SPI_FLASH_BAR
958 ret = clean_bar(nor);
959 #endif
960 return ret;
961 }
962
963 #ifdef CONFIG_SPI_FLASH_SST
964 /*
965 * sst26 flash series has its own block protection implementation:
966 * 4x - 8 KByte blocks - read & write protection bits - upper addresses
967 * 1x - 32 KByte blocks - write protection bits
968 * rest - 64 KByte blocks - write protection bits
969 * 1x - 32 KByte blocks - write protection bits
970 * 4x - 8 KByte blocks - read & write protection bits - lower addresses
971 *
972 * We'll support only per 64k lock/unlock so lower and upper 64 KByte region
973 * will be treated as single block.
974 */
975 #define SST26_BPR_8K_NUM 4
976 #define SST26_MAX_BPR_REG_LEN (18 + 1)
977 #define SST26_BOUND_REG_SIZE ((32 + SST26_BPR_8K_NUM * 8) * SZ_1K)
978
979 enum lock_ctl {
980 SST26_CTL_LOCK,
981 SST26_CTL_UNLOCK,
982 SST26_CTL_CHECK
983 };
984
sst26_process_bpr(u32 bpr_size,u8 * cmd,u32 bit,enum lock_ctl ctl)985 static bool sst26_process_bpr(u32 bpr_size, u8 *cmd, u32 bit, enum lock_ctl ctl)
986 {
987 switch (ctl) {
988 case SST26_CTL_LOCK:
989 cmd[bpr_size - (bit / 8) - 1] |= BIT(bit % 8);
990 break;
991 case SST26_CTL_UNLOCK:
992 cmd[bpr_size - (bit / 8) - 1] &= ~BIT(bit % 8);
993 break;
994 case SST26_CTL_CHECK:
995 return !!(cmd[bpr_size - (bit / 8) - 1] & BIT(bit % 8));
996 }
997
998 return false;
999 }
1000
1001 /*
1002 * Lock, unlock or check lock status of the flash region of the flash (depending
1003 * on the lock_ctl value)
1004 */
sst26_lock_ctl(struct spi_nor * nor,loff_t ofs,uint64_t len,enum lock_ctl ctl)1005 static int sst26_lock_ctl(struct spi_nor *nor, loff_t ofs, uint64_t len, enum lock_ctl ctl)
1006 {
1007 struct mtd_info *mtd = &nor->mtd;
1008 u32 i, bpr_ptr, rptr_64k, lptr_64k, bpr_size;
1009 bool lower_64k = false, upper_64k = false;
1010 u8 bpr_buff[SST26_MAX_BPR_REG_LEN] = {};
1011 int ret;
1012
1013 /* Check length and offset for 64k alignment */
1014 if ((ofs & (SZ_64K - 1)) || (len & (SZ_64K - 1))) {
1015 dev_err(nor->dev, "length or offset is not 64KiB allighned\n");
1016 return -EINVAL;
1017 }
1018
1019 if (ofs + len > mtd->size) {
1020 dev_err(nor->dev, "range is more than device size: %#llx + %#llx > %#llx\n",
1021 ofs, len, mtd->size);
1022 return -EINVAL;
1023 }
1024
1025 /* SST26 family has only 16 Mbit, 32 Mbit and 64 Mbit IC */
1026 if (mtd->size != SZ_2M &&
1027 mtd->size != SZ_4M &&
1028 mtd->size != SZ_8M)
1029 return -EINVAL;
1030
1031 bpr_size = 2 + (mtd->size / SZ_64K / 8);
1032
1033 ret = nor->read_reg(nor, SPINOR_OP_READ_BPR, bpr_buff, bpr_size);
1034 if (ret < 0) {
1035 dev_err(nor->dev, "fail to read block-protection register\n");
1036 return ret;
1037 }
1038
1039 rptr_64k = min_t(u32, ofs + len, mtd->size - SST26_BOUND_REG_SIZE);
1040 lptr_64k = max_t(u32, ofs, SST26_BOUND_REG_SIZE);
1041
1042 upper_64k = ((ofs + len) > (mtd->size - SST26_BOUND_REG_SIZE));
1043 lower_64k = (ofs < SST26_BOUND_REG_SIZE);
1044
1045 /* Lower bits in block-protection register are about 64k region */
1046 bpr_ptr = lptr_64k / SZ_64K - 1;
1047
1048 /* Process 64K blocks region */
1049 while (lptr_64k < rptr_64k) {
1050 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1051 return EACCES;
1052
1053 bpr_ptr++;
1054 lptr_64k += SZ_64K;
1055 }
1056
1057 /* 32K and 8K region bits in BPR are after 64k region bits */
1058 bpr_ptr = (mtd->size - 2 * SST26_BOUND_REG_SIZE) / SZ_64K;
1059
1060 /* Process lower 32K block region */
1061 if (lower_64k)
1062 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1063 return EACCES;
1064
1065 bpr_ptr++;
1066
1067 /* Process upper 32K block region */
1068 if (upper_64k)
1069 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1070 return EACCES;
1071
1072 bpr_ptr++;
1073
1074 /* Process lower 8K block regions */
1075 for (i = 0; i < SST26_BPR_8K_NUM; i++) {
1076 if (lower_64k)
1077 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1078 return EACCES;
1079
1080 /* In 8K area BPR has both read and write protection bits */
1081 bpr_ptr += 2;
1082 }
1083
1084 /* Process upper 8K block regions */
1085 for (i = 0; i < SST26_BPR_8K_NUM; i++) {
1086 if (upper_64k)
1087 if (sst26_process_bpr(bpr_size, bpr_buff, bpr_ptr, ctl))
1088 return EACCES;
1089
1090 /* In 8K area BPR has both read and write protection bits */
1091 bpr_ptr += 2;
1092 }
1093
1094 /* If we check region status we don't need to write BPR back */
1095 if (ctl == SST26_CTL_CHECK)
1096 return 0;
1097
1098 ret = nor->write_reg(nor, SPINOR_OP_WRITE_BPR, bpr_buff, bpr_size);
1099 if (ret < 0) {
1100 dev_err(nor->dev, "fail to write block-protection register\n");
1101 return ret;
1102 }
1103
1104 return 0;
1105 }
1106
sst26_unlock(struct spi_nor * nor,loff_t ofs,uint64_t len)1107 static int sst26_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1108 {
1109 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_UNLOCK);
1110 }
1111
sst26_lock(struct spi_nor * nor,loff_t ofs,uint64_t len)1112 static int sst26_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1113 {
1114 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_LOCK);
1115 }
1116
1117 /*
1118 * Returns EACCES (positive value) if region is locked, 0 if region is unlocked,
1119 * and negative on errors.
1120 */
sst26_is_locked(struct spi_nor * nor,loff_t ofs,uint64_t len)1121 static int sst26_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1122 {
1123 /*
1124 * is_locked function is used for check before reading or erasing flash
1125 * region, so offset and length might be not 64k allighned, so adjust
1126 * them to be 64k allighned as sst26_lock_ctl works only with 64k
1127 * allighned regions.
1128 */
1129 ofs -= ofs & (SZ_64K - 1);
1130 len = len & (SZ_64K - 1) ? (len & ~(SZ_64K - 1)) + SZ_64K : len;
1131
1132 return sst26_lock_ctl(nor, ofs, len, SST26_CTL_CHECK);
1133 }
1134
sst_write_byteprogram(struct spi_nor * nor,loff_t to,size_t len,size_t * retlen,const u_char * buf)1135 static int sst_write_byteprogram(struct spi_nor *nor, loff_t to, size_t len,
1136 size_t *retlen, const u_char *buf)
1137 {
1138 size_t actual;
1139 int ret = 0;
1140
1141 for (actual = 0; actual < len; actual++) {
1142 nor->program_opcode = SPINOR_OP_BP;
1143
1144 write_enable(nor);
1145 /* write one byte. */
1146 ret = nor->write(nor, to, 1, buf + actual);
1147 if (ret < 0)
1148 goto sst_write_err;
1149 ret = spi_nor_wait_till_ready(nor);
1150 if (ret)
1151 goto sst_write_err;
1152 to++;
1153 }
1154
1155 sst_write_err:
1156 write_disable(nor);
1157 return ret;
1158 }
1159
sst_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1160 static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
1161 size_t *retlen, const u_char *buf)
1162 {
1163 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1164 struct spi_slave *spi = nor->spi;
1165 size_t actual;
1166 int ret;
1167
1168 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1169 if (spi->mode & SPI_TX_BYTE)
1170 return sst_write_byteprogram(nor, to, len, retlen, buf);
1171
1172 write_enable(nor);
1173
1174 nor->sst_write_second = false;
1175
1176 actual = to % 2;
1177 /* Start write from odd address. */
1178 if (actual) {
1179 nor->program_opcode = SPINOR_OP_BP;
1180
1181 /* write one byte. */
1182 ret = nor->write(nor, to, 1, buf);
1183 if (ret < 0)
1184 goto sst_write_err;
1185 ret = spi_nor_wait_till_ready(nor);
1186 if (ret)
1187 goto sst_write_err;
1188 }
1189 to += actual;
1190
1191 /* Write out most of the data here. */
1192 for (; actual < len - 1; actual += 2) {
1193 nor->program_opcode = SPINOR_OP_AAI_WP;
1194
1195 /* write two bytes. */
1196 ret = nor->write(nor, to, 2, buf + actual);
1197 if (ret < 0)
1198 goto sst_write_err;
1199 ret = spi_nor_wait_till_ready(nor);
1200 if (ret)
1201 goto sst_write_err;
1202 to += 2;
1203 nor->sst_write_second = true;
1204 }
1205 nor->sst_write_second = false;
1206
1207 write_disable(nor);
1208 ret = spi_nor_wait_till_ready(nor);
1209 if (ret)
1210 goto sst_write_err;
1211
1212 /* Write out trailing byte if it exists. */
1213 if (actual != len) {
1214 write_enable(nor);
1215
1216 nor->program_opcode = SPINOR_OP_BP;
1217 ret = nor->write(nor, to, 1, buf + actual);
1218 if (ret < 0)
1219 goto sst_write_err;
1220 ret = spi_nor_wait_till_ready(nor);
1221 if (ret)
1222 goto sst_write_err;
1223 write_disable(nor);
1224 actual += 1;
1225 }
1226 sst_write_err:
1227 *retlen += actual;
1228 return ret;
1229 }
1230 #endif
1231 /*
1232 * Write an address range to the nor chip. Data must be written in
1233 * FLASH_PAGESIZE chunks. The address range may be any size provided
1234 * it is within the physical boundaries.
1235 */
spi_nor_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)1236 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
1237 size_t *retlen, const u_char *buf)
1238 {
1239 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1240 size_t page_offset, page_remain, i;
1241 ssize_t ret;
1242
1243 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
1244
1245 for (i = 0; i < len; ) {
1246 ssize_t written;
1247 loff_t addr = to + i;
1248
1249 /*
1250 * If page_size is a power of two, the offset can be quickly
1251 * calculated with an AND operation. On the other cases we
1252 * need to do a modulus operation (more expensive).
1253 * Power of two numbers have only one bit set and we can use
1254 * the instruction hweight32 to detect if we need to do a
1255 * modulus (do_div()) or not.
1256 */
1257 if (hweight32(nor->page_size) == 1) {
1258 page_offset = addr & (nor->page_size - 1);
1259 } else {
1260 u64 aux = addr;
1261
1262 page_offset = do_div(aux, nor->page_size);
1263 }
1264 /* the size of data remaining on the first page */
1265 page_remain = min_t(size_t,
1266 nor->page_size - page_offset, len - i);
1267
1268 #ifdef CONFIG_SPI_FLASH_BAR
1269 ret = write_bar(nor, addr);
1270 if (ret < 0)
1271 return ret;
1272 #endif
1273 write_enable(nor);
1274 ret = nor->write(nor, addr, page_remain, buf + i);
1275 if (ret < 0)
1276 goto write_err;
1277 written = ret;
1278
1279 ret = spi_nor_wait_till_ready(nor);
1280 if (ret)
1281 goto write_err;
1282 *retlen += written;
1283 i += written;
1284 }
1285
1286 write_err:
1287 #ifdef CONFIG_SPI_FLASH_BAR
1288 ret = clean_bar(nor);
1289 #endif
1290 return ret;
1291 }
1292
1293 #ifdef CONFIG_SPI_FLASH_MACRONIX
1294 /**
1295 * macronix_quad_enable() - set QE bit in Status Register.
1296 * @nor: pointer to a 'struct spi_nor'
1297 *
1298 * Set the Quad Enable (QE) bit in the Status Register.
1299 *
1300 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1301 *
1302 * Return: 0 on success, -errno otherwise.
1303 */
macronix_quad_enable(struct spi_nor * nor)1304 static int macronix_quad_enable(struct spi_nor *nor)
1305 {
1306 int ret, val;
1307
1308 val = read_sr(nor);
1309 if (val < 0)
1310 return val;
1311 if (val & SR_QUAD_EN_MX)
1312 return 0;
1313
1314 write_enable(nor);
1315
1316 write_sr(nor, val | SR_QUAD_EN_MX);
1317
1318 ret = spi_nor_wait_till_ready(nor);
1319 if (ret)
1320 return ret;
1321
1322 ret = read_sr(nor);
1323 if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
1324 dev_err(nor->dev, "Macronix Quad bit not set\n");
1325 return -EINVAL;
1326 }
1327
1328 return 0;
1329 }
1330 #endif
1331
1332 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
1333 /*
1334 * Write status Register and configuration register with 2 bytes
1335 * The first byte will be written to the status register, while the
1336 * second byte will be written to the configuration register.
1337 * Return negative if error occurred.
1338 */
write_sr_cr(struct spi_nor * nor,u8 * sr_cr)1339 static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
1340 {
1341 int ret;
1342
1343 write_enable(nor);
1344
1345 ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
1346 if (ret < 0) {
1347 dev_dbg(nor->dev,
1348 "error while writing configuration register\n");
1349 return -EINVAL;
1350 }
1351
1352 ret = spi_nor_wait_till_ready(nor);
1353 if (ret) {
1354 dev_dbg(nor->dev,
1355 "timeout while writing configuration register\n");
1356 return ret;
1357 }
1358
1359 return 0;
1360 }
1361
1362 /**
1363 * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
1364 * @nor: pointer to a 'struct spi_nor'
1365 *
1366 * Set the Quad Enable (QE) bit in the Configuration Register.
1367 * This function should be used with QSPI memories supporting the Read
1368 * Configuration Register (35h) instruction.
1369 *
1370 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1371 * memories.
1372 *
1373 * Return: 0 on success, -errno otherwise.
1374 */
spansion_read_cr_quad_enable(struct spi_nor * nor)1375 static int spansion_read_cr_quad_enable(struct spi_nor *nor)
1376 {
1377 u8 sr_cr[2];
1378 int ret;
1379
1380 /* Check current Quad Enable bit value. */
1381 ret = read_cr(nor);
1382 if (ret < 0) {
1383 dev_dbg(dev, "error while reading configuration register\n");
1384 return -EINVAL;
1385 }
1386
1387 if (ret & CR_QUAD_EN_SPAN)
1388 return 0;
1389
1390 sr_cr[1] = ret | CR_QUAD_EN_SPAN;
1391
1392 /* Keep the current value of the Status Register. */
1393 ret = read_sr(nor);
1394 if (ret < 0) {
1395 dev_dbg(dev, "error while reading status register\n");
1396 return -EINVAL;
1397 }
1398 sr_cr[0] = ret;
1399
1400 ret = write_sr_cr(nor, sr_cr);
1401 if (ret)
1402 return ret;
1403
1404 /* Read back and check it. */
1405 ret = read_cr(nor);
1406 if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
1407 dev_dbg(nor->dev, "Spansion Quad bit not set\n");
1408 return -EINVAL;
1409 }
1410
1411 return 0;
1412 }
1413
1414 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
1415 /**
1416 * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
1417 * @nor: pointer to a 'struct spi_nor'
1418 *
1419 * Set the Quad Enable (QE) bit in the Configuration Register.
1420 * This function should be used with QSPI memories not supporting the Read
1421 * Configuration Register (35h) instruction.
1422 *
1423 * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
1424 * memories.
1425 *
1426 * Return: 0 on success, -errno otherwise.
1427 */
spansion_no_read_cr_quad_enable(struct spi_nor * nor)1428 static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
1429 {
1430 u8 sr_cr[2];
1431 int ret;
1432
1433 /* Keep the current value of the Status Register. */
1434 ret = read_sr(nor);
1435 if (ret < 0) {
1436 dev_dbg(nor->dev, "error while reading status register\n");
1437 return -EINVAL;
1438 }
1439 sr_cr[0] = ret;
1440 sr_cr[1] = CR_QUAD_EN_SPAN;
1441
1442 return write_sr_cr(nor, sr_cr);
1443 }
1444
1445 #endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
1446 #endif /* CONFIG_SPI_FLASH_SPANSION */
1447
1448 #ifdef CONFIG_SPI_FLASH_NORMEM
1449 /**
1450 * normem_quad_enable() - set QE bit in Status Register.
1451 * @nor: pointer to a 'struct spi_nor'
1452 *
1453 * Set the Quad Enable (QE) bit in the Status Register.
1454 *
1455 * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
1456 *
1457 * Return: 0 on success, -errno otherwise.
1458 */
normem_quad_enable(struct spi_nor * nor)1459 static int normem_quad_enable(struct spi_nor *nor)
1460 {
1461 int ret, val;
1462
1463 val = read_cr(nor);
1464 if (val < 0)
1465 return val;
1466 if (val & SR_QUAD_EN_NORMEM)
1467 return 0;
1468
1469 write_enable(nor);
1470
1471 write_cr(nor, val | SR_QUAD_EN_NORMEM);
1472
1473 ret = spi_nor_wait_till_ready(nor);
1474 if (ret)
1475 return ret;
1476
1477 ret = read_cr(nor);
1478 if (!(ret > 0 && (ret & SR_QUAD_EN_NORMEM))) {
1479 dev_err(nor->dev, "NORMEM Quad bit not set\n");
1480 return -EINVAL;
1481 }
1482
1483 return 0;
1484 }
1485 #endif
1486
1487 struct spi_nor_read_command {
1488 u8 num_mode_clocks;
1489 u8 num_wait_states;
1490 u8 opcode;
1491 enum spi_nor_protocol proto;
1492 };
1493
1494 struct spi_nor_pp_command {
1495 u8 opcode;
1496 enum spi_nor_protocol proto;
1497 };
1498
1499 enum spi_nor_read_command_index {
1500 SNOR_CMD_READ,
1501 SNOR_CMD_READ_FAST,
1502 SNOR_CMD_READ_1_1_1_DTR,
1503
1504 /* Dual SPI */
1505 SNOR_CMD_READ_1_1_2,
1506 SNOR_CMD_READ_1_2_2,
1507 SNOR_CMD_READ_2_2_2,
1508 SNOR_CMD_READ_1_2_2_DTR,
1509
1510 /* Quad SPI */
1511 SNOR_CMD_READ_1_1_4,
1512 SNOR_CMD_READ_1_4_4,
1513 SNOR_CMD_READ_4_4_4,
1514 SNOR_CMD_READ_1_4_4_DTR,
1515
1516 /* Octo SPI */
1517 SNOR_CMD_READ_1_1_8,
1518 SNOR_CMD_READ_1_8_8,
1519 SNOR_CMD_READ_8_8_8,
1520 SNOR_CMD_READ_1_8_8_DTR,
1521
1522 SNOR_CMD_READ_MAX
1523 };
1524
1525 enum spi_nor_pp_command_index {
1526 SNOR_CMD_PP,
1527
1528 /* Quad SPI */
1529 SNOR_CMD_PP_1_1_4,
1530 SNOR_CMD_PP_1_4_4,
1531 SNOR_CMD_PP_4_4_4,
1532
1533 /* Octo SPI */
1534 SNOR_CMD_PP_1_1_8,
1535 SNOR_CMD_PP_1_8_8,
1536 SNOR_CMD_PP_8_8_8,
1537
1538 SNOR_CMD_PP_MAX
1539 };
1540
1541 struct spi_nor_flash_parameter {
1542 u64 size;
1543 u32 page_size;
1544
1545 struct spi_nor_hwcaps hwcaps;
1546 struct spi_nor_read_command reads[SNOR_CMD_READ_MAX];
1547 struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX];
1548
1549 int (*quad_enable)(struct spi_nor *nor);
1550 };
1551
1552 static void
spi_nor_set_read_settings(struct spi_nor_read_command * read,u8 num_mode_clocks,u8 num_wait_states,u8 opcode,enum spi_nor_protocol proto)1553 spi_nor_set_read_settings(struct spi_nor_read_command *read,
1554 u8 num_mode_clocks,
1555 u8 num_wait_states,
1556 u8 opcode,
1557 enum spi_nor_protocol proto)
1558 {
1559 read->num_mode_clocks = num_mode_clocks;
1560 read->num_wait_states = num_wait_states;
1561 read->opcode = opcode;
1562 read->proto = proto;
1563 }
1564
1565 static void
spi_nor_set_pp_settings(struct spi_nor_pp_command * pp,u8 opcode,enum spi_nor_protocol proto)1566 spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
1567 u8 opcode,
1568 enum spi_nor_protocol proto)
1569 {
1570 pp->opcode = opcode;
1571 pp->proto = proto;
1572 }
1573
1574 #if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
1575 /*
1576 * Serial Flash Discoverable Parameters (SFDP) parsing.
1577 */
1578
1579 /**
1580 * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
1581 * @nor: pointer to a 'struct spi_nor'
1582 * @addr: offset in the SFDP area to start reading data from
1583 * @len: number of bytes to read
1584 * @buf: buffer where the SFDP data are copied into (dma-safe memory)
1585 *
1586 * Whatever the actual numbers of bytes for address and dummy cycles are
1587 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
1588 * followed by a 3-byte address and 8 dummy clock cycles.
1589 *
1590 * Return: 0 on success, -errno otherwise.
1591 */
spi_nor_read_sfdp(struct spi_nor * nor,u32 addr,size_t len,void * buf)1592 static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
1593 size_t len, void *buf)
1594 {
1595 u8 addr_width, read_opcode, read_dummy;
1596 int ret;
1597
1598 read_opcode = nor->read_opcode;
1599 addr_width = nor->addr_width;
1600 read_dummy = nor->read_dummy;
1601
1602 nor->read_opcode = SPINOR_OP_RDSFDP;
1603 nor->addr_width = 3;
1604 nor->read_dummy = 8;
1605
1606 while (len) {
1607 ret = nor->read(nor, addr, len, (u8 *)buf);
1608 if (!ret || ret > len) {
1609 ret = -EIO;
1610 goto read_err;
1611 }
1612 if (ret < 0)
1613 goto read_err;
1614
1615 buf += ret;
1616 addr += ret;
1617 len -= ret;
1618 }
1619 ret = 0;
1620
1621 read_err:
1622 nor->read_opcode = read_opcode;
1623 nor->addr_width = addr_width;
1624 nor->read_dummy = read_dummy;
1625
1626 return ret;
1627 }
1628
1629 struct sfdp_parameter_header {
1630 u8 id_lsb;
1631 u8 minor;
1632 u8 major;
1633 u8 length; /* in double words */
1634 u8 parameter_table_pointer[3]; /* byte address */
1635 u8 id_msb;
1636 };
1637
1638 #define SFDP_PARAM_HEADER_ID(p) (((p)->id_msb << 8) | (p)->id_lsb)
1639 #define SFDP_PARAM_HEADER_PTP(p) \
1640 (((p)->parameter_table_pointer[2] << 16) | \
1641 ((p)->parameter_table_pointer[1] << 8) | \
1642 ((p)->parameter_table_pointer[0] << 0))
1643
1644 #define SFDP_BFPT_ID 0xff00 /* Basic Flash Parameter Table */
1645 #define SFDP_SECTOR_MAP_ID 0xff81 /* Sector Map Table */
1646
1647 #define SFDP_SIGNATURE 0x50444653U
1648 #define SFDP_JESD216_MAJOR 1
1649 #define SFDP_JESD216_MINOR 0
1650 #define SFDP_JESD216A_MINOR 5
1651 #define SFDP_JESD216B_MINOR 6
1652
1653 struct sfdp_header {
1654 u32 signature; /* Ox50444653U <=> "SFDP" */
1655 u8 minor;
1656 u8 major;
1657 u8 nph; /* 0-base number of parameter headers */
1658 u8 unused;
1659
1660 /* Basic Flash Parameter Table. */
1661 struct sfdp_parameter_header bfpt_header;
1662 };
1663
1664 /* Basic Flash Parameter Table */
1665
1666 /*
1667 * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
1668 * They are indexed from 1 but C arrays are indexed from 0.
1669 */
1670 #define BFPT_DWORD(i) ((i) - 1)
1671 #define BFPT_DWORD_MAX 16
1672
1673 /* The first version of JESB216 defined only 9 DWORDs. */
1674 #define BFPT_DWORD_MAX_JESD216 9
1675
1676 /* 1st DWORD. */
1677 #define BFPT_DWORD1_FAST_READ_1_1_2 BIT(16)
1678 #define BFPT_DWORD1_ADDRESS_BYTES_MASK GENMASK(18, 17)
1679 #define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY (0x0UL << 17)
1680 #define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4 (0x1UL << 17)
1681 #define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY (0x2UL << 17)
1682 #define BFPT_DWORD1_DTR BIT(19)
1683 #define BFPT_DWORD1_FAST_READ_1_2_2 BIT(20)
1684 #define BFPT_DWORD1_FAST_READ_1_4_4 BIT(21)
1685 #define BFPT_DWORD1_FAST_READ_1_1_4 BIT(22)
1686
1687 /* 5th DWORD. */
1688 #define BFPT_DWORD5_FAST_READ_2_2_2 BIT(0)
1689 #define BFPT_DWORD5_FAST_READ_4_4_4 BIT(4)
1690
1691 /* 11th DWORD. */
1692 #define BFPT_DWORD11_PAGE_SIZE_SHIFT 4
1693 #define BFPT_DWORD11_PAGE_SIZE_MASK GENMASK(7, 4)
1694
1695 /* 15th DWORD. */
1696
1697 /*
1698 * (from JESD216 rev B)
1699 * Quad Enable Requirements (QER):
1700 * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
1701 * reads based on instruction. DQ3/HOLD# functions are hold during
1702 * instruction phase.
1703 * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
1704 * two data bytes where bit 1 of the second byte is one.
1705 * [...]
1706 * Writing only one byte to the status register has the side-effect of
1707 * clearing status register 2, including the QE bit. The 100b code is
1708 * used if writing one byte to the status register does not modify
1709 * status register 2.
1710 * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
1711 * one data byte where bit 6 is one.
1712 * [...]
1713 * - 011b: QE is bit 7 of status register 2. It is set via Write status
1714 * register 2 instruction 3Eh with one data byte where bit 7 is one.
1715 * [...]
1716 * The status register 2 is read using instruction 3Fh.
1717 * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
1718 * two data bytes where bit 1 of the second byte is one.
1719 * [...]
1720 * In contrast to the 001b code, writing one byte to the status
1721 * register does not modify status register 2.
1722 * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
1723 * Read Status instruction 05h. Status register2 is read using
1724 * instruction 35h. QE is set via Writ Status instruction 01h with
1725 * two data bytes where bit 1 of the second byte is one.
1726 * [...]
1727 */
1728 #define BFPT_DWORD15_QER_MASK GENMASK(22, 20)
1729 #define BFPT_DWORD15_QER_NONE (0x0UL << 20) /* Micron */
1730 #define BFPT_DWORD15_QER_SR2_BIT1_BUGGY (0x1UL << 20)
1731 #define BFPT_DWORD15_QER_SR1_BIT6 (0x2UL << 20) /* Macronix */
1732 #define BFPT_DWORD15_QER_SR2_BIT7 (0x3UL << 20)
1733 #define BFPT_DWORD15_QER_SR2_BIT1_NO_RD (0x4UL << 20)
1734 #define BFPT_DWORD15_QER_SR2_BIT1 (0x5UL << 20) /* Spansion */
1735
1736 struct sfdp_bfpt {
1737 u32 dwords[BFPT_DWORD_MAX];
1738 };
1739
1740 /* Fast Read settings. */
1741
1742 static void
spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command * read,u16 half,enum spi_nor_protocol proto)1743 spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
1744 u16 half,
1745 enum spi_nor_protocol proto)
1746 {
1747 read->num_mode_clocks = (half >> 5) & 0x07;
1748 read->num_wait_states = (half >> 0) & 0x1f;
1749 read->opcode = (half >> 8) & 0xff;
1750 read->proto = proto;
1751 }
1752
1753 struct sfdp_bfpt_read {
1754 /* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
1755 u32 hwcaps;
1756
1757 /*
1758 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
1759 * whether the Fast Read x-y-z command is supported.
1760 */
1761 u32 supported_dword;
1762 u32 supported_bit;
1763
1764 /*
1765 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
1766 * encodes the op code, the number of mode clocks and the number of wait
1767 * states to be used by Fast Read x-y-z command.
1768 */
1769 u32 settings_dword;
1770 u32 settings_shift;
1771
1772 /* The SPI protocol for this Fast Read x-y-z command. */
1773 enum spi_nor_protocol proto;
1774 };
1775
1776 static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
1777 /* Fast Read 1-1-2 */
1778 {
1779 SNOR_HWCAPS_READ_1_1_2,
1780 BFPT_DWORD(1), BIT(16), /* Supported bit */
1781 BFPT_DWORD(4), 0, /* Settings */
1782 SNOR_PROTO_1_1_2,
1783 },
1784
1785 /* Fast Read 1-2-2 */
1786 {
1787 SNOR_HWCAPS_READ_1_2_2,
1788 BFPT_DWORD(1), BIT(20), /* Supported bit */
1789 BFPT_DWORD(4), 16, /* Settings */
1790 SNOR_PROTO_1_2_2,
1791 },
1792
1793 /* Fast Read 2-2-2 */
1794 {
1795 SNOR_HWCAPS_READ_2_2_2,
1796 BFPT_DWORD(5), BIT(0), /* Supported bit */
1797 BFPT_DWORD(6), 16, /* Settings */
1798 SNOR_PROTO_2_2_2,
1799 },
1800
1801 /* Fast Read 1-1-4 */
1802 {
1803 SNOR_HWCAPS_READ_1_1_4,
1804 BFPT_DWORD(1), BIT(22), /* Supported bit */
1805 BFPT_DWORD(3), 16, /* Settings */
1806 SNOR_PROTO_1_1_4,
1807 },
1808
1809 /* Fast Read 1-4-4 */
1810 {
1811 SNOR_HWCAPS_READ_1_4_4,
1812 BFPT_DWORD(1), BIT(21), /* Supported bit */
1813 BFPT_DWORD(3), 0, /* Settings */
1814 SNOR_PROTO_1_4_4,
1815 },
1816
1817 /* Fast Read 4-4-4 */
1818 {
1819 SNOR_HWCAPS_READ_4_4_4,
1820 BFPT_DWORD(5), BIT(4), /* Supported bit */
1821 BFPT_DWORD(7), 16, /* Settings */
1822 SNOR_PROTO_4_4_4,
1823 },
1824 };
1825
1826 struct sfdp_bfpt_erase {
1827 /*
1828 * The half-word at offset <shift> in DWORD <dwoard> encodes the
1829 * op code and erase sector size to be used by Sector Erase commands.
1830 */
1831 u32 dword;
1832 u32 shift;
1833 };
1834
1835 static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
1836 /* Erase Type 1 in DWORD8 bits[15:0] */
1837 {BFPT_DWORD(8), 0},
1838
1839 /* Erase Type 2 in DWORD8 bits[31:16] */
1840 {BFPT_DWORD(8), 16},
1841
1842 /* Erase Type 3 in DWORD9 bits[15:0] */
1843 {BFPT_DWORD(9), 0},
1844
1845 /* Erase Type 4 in DWORD9 bits[31:16] */
1846 {BFPT_DWORD(9), 16},
1847 };
1848
1849 static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
1850
1851 /**
1852 * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
1853 * @nor: pointer to a 'struct spi_nor'
1854 * @bfpt_header: pointer to the 'struct sfdp_parameter_header' describing
1855 * the Basic Flash Parameter Table length and version
1856 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
1857 * filled
1858 *
1859 * The Basic Flash Parameter Table is the main and only mandatory table as
1860 * defined by the SFDP (JESD216) specification.
1861 * It provides us with the total size (memory density) of the data array and
1862 * the number of address bytes for Fast Read, Page Program and Sector Erase
1863 * commands.
1864 * For Fast READ commands, it also gives the number of mode clock cycles and
1865 * wait states (regrouped in the number of dummy clock cycles) for each
1866 * supported instruction op code.
1867 * For Page Program, the page size is now available since JESD216 rev A, however
1868 * the supported instruction op codes are still not provided.
1869 * For Sector Erase commands, this table stores the supported instruction op
1870 * codes and the associated sector sizes.
1871 * Finally, the Quad Enable Requirements (QER) are also available since JESD216
1872 * rev A. The QER bits encode the manufacturer dependent procedure to be
1873 * executed to set the Quad Enable (QE) bit in some internal register of the
1874 * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
1875 * sending any Quad SPI command to the memory. Actually, setting the QE bit
1876 * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
1877 * and IO3 hence enabling 4 (Quad) I/O lines.
1878 *
1879 * Return: 0 on success, -errno otherwise.
1880 */
spi_nor_parse_bfpt(struct spi_nor * nor,const struct sfdp_parameter_header * bfpt_header,struct spi_nor_flash_parameter * params)1881 static int spi_nor_parse_bfpt(struct spi_nor *nor,
1882 const struct sfdp_parameter_header *bfpt_header,
1883 struct spi_nor_flash_parameter *params)
1884 {
1885 struct mtd_info *mtd = &nor->mtd;
1886 struct sfdp_bfpt bfpt;
1887 size_t len;
1888 int i, cmd, err;
1889 u32 addr;
1890 u16 half;
1891
1892 /* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
1893 if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
1894 return -EINVAL;
1895
1896 /* Read the Basic Flash Parameter Table. */
1897 len = min_t(size_t, sizeof(bfpt),
1898 bfpt_header->length * sizeof(u32));
1899 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
1900 memset(&bfpt, 0, sizeof(bfpt));
1901 err = spi_nor_read_sfdp(nor, addr, len, &bfpt);
1902 if (err < 0)
1903 return err;
1904
1905 /* Fix endianness of the BFPT DWORDs. */
1906 for (i = 0; i < BFPT_DWORD_MAX; i++)
1907 bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
1908
1909 /* Number of address bytes. */
1910 switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
1911 case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
1912 nor->addr_width = 3;
1913 break;
1914
1915 case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
1916 nor->addr_width = 4;
1917 break;
1918
1919 default:
1920 break;
1921 }
1922
1923 /* Flash Memory Density (in bits). */
1924 params->size = bfpt.dwords[BFPT_DWORD(2)];
1925 if (params->size & BIT(31)) {
1926 params->size &= ~BIT(31);
1927
1928 /*
1929 * Prevent overflows on params->size. Anyway, a NOR of 2^64
1930 * bits is unlikely to exist so this error probably means
1931 * the BFPT we are reading is corrupted/wrong.
1932 */
1933 if (params->size > 63)
1934 return -EINVAL;
1935
1936 params->size = 1ULL << params->size;
1937 } else {
1938 params->size++;
1939 }
1940 params->size >>= 3; /* Convert to bytes. */
1941
1942 /* Fast Read settings. */
1943 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
1944 const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
1945 struct spi_nor_read_command *read;
1946
1947 if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
1948 params->hwcaps.mask &= ~rd->hwcaps;
1949 continue;
1950 }
1951
1952 params->hwcaps.mask |= rd->hwcaps;
1953 cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
1954 read = ¶ms->reads[cmd];
1955 half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
1956 spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
1957 }
1958
1959 /* Sector Erase settings. */
1960 for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
1961 const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
1962 u32 erasesize;
1963 u8 opcode;
1964
1965 half = bfpt.dwords[er->dword] >> er->shift;
1966 erasesize = half & 0xff;
1967
1968 /* erasesize == 0 means this Erase Type is not supported. */
1969 if (!erasesize)
1970 continue;
1971
1972 erasesize = 1U << erasesize;
1973 opcode = (half >> 8) & 0xff;
1974 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
1975 if (erasesize == SZ_4K) {
1976 nor->erase_opcode = opcode;
1977 mtd->erasesize = erasesize;
1978 break;
1979 }
1980 #endif
1981 if (!mtd->erasesize || mtd->erasesize < erasesize) {
1982 nor->erase_opcode = opcode;
1983 mtd->erasesize = erasesize;
1984 }
1985 }
1986
1987 /* Stop here if not JESD216 rev A or later. */
1988 if (bfpt_header->length < BFPT_DWORD_MAX)
1989 return 0;
1990
1991 /* Page size: this field specifies 'N' so the page size = 2^N bytes. */
1992 params->page_size = bfpt.dwords[BFPT_DWORD(11)];
1993 params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
1994 params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
1995 params->page_size = 1U << params->page_size;
1996
1997 /* Quad Enable Requirements. */
1998 switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
1999 case BFPT_DWORD15_QER_NONE:
2000 params->quad_enable = NULL;
2001 break;
2002 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2003 case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
2004 case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
2005 params->quad_enable = spansion_no_read_cr_quad_enable;
2006 break;
2007 #endif
2008 #ifdef CONFIG_SPI_FLASH_MACRONIX
2009 case BFPT_DWORD15_QER_SR1_BIT6:
2010 params->quad_enable = macronix_quad_enable;
2011 break;
2012 #endif
2013 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2014 case BFPT_DWORD15_QER_SR2_BIT1:
2015 params->quad_enable = spansion_read_cr_quad_enable;
2016 break;
2017 #endif
2018 default:
2019 return -EINVAL;
2020 }
2021
2022 return 0;
2023 }
2024
2025 /**
2026 * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
2027 * @nor: pointer to a 'struct spi_nor'
2028 * @params: pointer to the 'struct spi_nor_flash_parameter' to be
2029 * filled
2030 *
2031 * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
2032 * specification. This is a standard which tends to supported by almost all
2033 * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
2034 * runtime the main parameters needed to perform basic SPI flash operations such
2035 * as Fast Read, Page Program or Sector Erase commands.
2036 *
2037 * Return: 0 on success, -errno otherwise.
2038 */
spi_nor_parse_sfdp(struct spi_nor * nor,struct spi_nor_flash_parameter * params)2039 static int spi_nor_parse_sfdp(struct spi_nor *nor,
2040 struct spi_nor_flash_parameter *params)
2041 {
2042 const struct sfdp_parameter_header *param_header, *bfpt_header;
2043 struct sfdp_parameter_header *param_headers = NULL;
2044 struct sfdp_header header;
2045 size_t psize;
2046 int i, err;
2047
2048 /* Get the SFDP header. */
2049 err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
2050 if (err < 0)
2051 return err;
2052
2053 /* Check the SFDP header version. */
2054 if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
2055 header.major != SFDP_JESD216_MAJOR)
2056 return -EINVAL;
2057
2058 /*
2059 * Verify that the first and only mandatory parameter header is a
2060 * Basic Flash Parameter Table header as specified in JESD216.
2061 */
2062 bfpt_header = &header.bfpt_header;
2063 if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
2064 bfpt_header->major != SFDP_JESD216_MAJOR)
2065 return -EINVAL;
2066
2067 /*
2068 * Allocate memory then read all parameter headers with a single
2069 * Read SFDP command. These parameter headers will actually be parsed
2070 * twice: a first time to get the latest revision of the basic flash
2071 * parameter table, then a second time to handle the supported optional
2072 * tables.
2073 * Hence we read the parameter headers once for all to reduce the
2074 * processing time. Also we use kmalloc() instead of devm_kmalloc()
2075 * because we don't need to keep these parameter headers: the allocated
2076 * memory is always released with kfree() before exiting this function.
2077 */
2078 if (header.nph) {
2079 psize = header.nph * sizeof(*param_headers);
2080
2081 param_headers = kmalloc(psize, GFP_KERNEL);
2082 if (!param_headers)
2083 return -ENOMEM;
2084
2085 err = spi_nor_read_sfdp(nor, sizeof(header),
2086 psize, param_headers);
2087 if (err < 0) {
2088 dev_err(dev, "failed to read SFDP parameter headers\n");
2089 goto exit;
2090 }
2091 }
2092
2093 /*
2094 * Check other parameter headers to get the latest revision of
2095 * the basic flash parameter table.
2096 */
2097 for (i = 0; i < header.nph; i++) {
2098 param_header = ¶m_headers[i];
2099
2100 if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
2101 param_header->major == SFDP_JESD216_MAJOR &&
2102 (param_header->minor > bfpt_header->minor ||
2103 (param_header->minor == bfpt_header->minor &&
2104 param_header->length > bfpt_header->length)))
2105 bfpt_header = param_header;
2106 }
2107
2108 err = spi_nor_parse_bfpt(nor, bfpt_header, params);
2109 if (err)
2110 goto exit;
2111
2112 /* Parse other parameter headers. */
2113 for (i = 0; i < header.nph; i++) {
2114 param_header = ¶m_headers[i];
2115
2116 switch (SFDP_PARAM_HEADER_ID(param_header)) {
2117 case SFDP_SECTOR_MAP_ID:
2118 dev_info(dev, "non-uniform erase sector maps are not supported yet.\n");
2119 break;
2120
2121 default:
2122 break;
2123 }
2124
2125 if (err)
2126 goto exit;
2127 }
2128
2129 exit:
2130 kfree(param_headers);
2131 return err;
2132 }
2133 #else
spi_nor_parse_sfdp(struct spi_nor * nor,struct spi_nor_flash_parameter * params)2134 static int spi_nor_parse_sfdp(struct spi_nor *nor,
2135 struct spi_nor_flash_parameter *params)
2136 {
2137 return -EINVAL;
2138 }
2139 #endif /* SPI_FLASH_SFDP_SUPPORT */
2140
spi_nor_init_params(struct spi_nor * nor,const struct flash_info * info,struct spi_nor_flash_parameter * params)2141 static int spi_nor_init_params(struct spi_nor *nor,
2142 const struct flash_info *info,
2143 struct spi_nor_flash_parameter *params)
2144 {
2145 /* Set legacy flash parameters as default. */
2146 memset(params, 0, sizeof(*params));
2147
2148 /* Set SPI NOR sizes. */
2149 params->size = info->sector_size * info->n_sectors;
2150 params->page_size = info->page_size;
2151
2152 /* (Fast) Read settings. */
2153 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2154 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2155 0, 0, SPINOR_OP_READ,
2156 SNOR_PROTO_1_1_1);
2157
2158 if (!(info->flags & SPI_NOR_NO_FR)) {
2159 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2160 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2161 0, 8, SPINOR_OP_READ_FAST,
2162 SNOR_PROTO_1_1_1);
2163 }
2164
2165 if (info->flags & SPI_NOR_DUAL_READ) {
2166 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2167 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2168 0, 8, SPINOR_OP_READ_1_1_2,
2169 SNOR_PROTO_1_1_2);
2170 }
2171
2172 if (info->flags & SPI_NOR_QUAD_READ) {
2173 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2174 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2175 0, 8, SPINOR_OP_READ_1_1_4,
2176 SNOR_PROTO_1_1_4);
2177 }
2178
2179 if (info->flags & SPI_NOR_OCTAL_READ) {
2180 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2181 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2182 0, 8, SPINOR_OP_READ_1_1_8,
2183 SNOR_PROTO_1_1_8);
2184 }
2185
2186 /* Page Program settings. */
2187 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2188 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2189 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2190
2191 if (info->flags & SPI_NOR_QUAD_READ) {
2192 params->hwcaps.mask |= SNOR_HWCAPS_PP_1_1_4;
2193 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP_1_1_4],
2194 SPINOR_OP_PP_1_1_4, SNOR_PROTO_1_1_4);
2195 }
2196
2197 /* Select the procedure to set the Quad Enable bit. */
2198 if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
2199 SNOR_HWCAPS_PP_QUAD)) {
2200 switch (JEDEC_MFR(info)) {
2201 #ifdef CONFIG_SPI_FLASH_MACRONIX
2202 case SNOR_MFR_MACRONIX:
2203 params->quad_enable = macronix_quad_enable;
2204 break;
2205 #endif
2206 case SNOR_MFR_ST:
2207 case SNOR_MFR_MICRON:
2208 break;
2209 #ifdef CONFIG_SPI_FLASH_NORMEM
2210 case SNOR_MFR_NORMEM:
2211 params->quad_enable = normem_quad_enable;
2212 break;
2213 #endif
2214 default:
2215 #if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
2216 /* Kept only for backward compatibility purpose. */
2217 params->quad_enable = spansion_read_cr_quad_enable;
2218 #endif
2219 break;
2220 }
2221 }
2222
2223 /* Override the parameters with data read from SFDP tables. */
2224 nor->addr_width = 0;
2225 nor->mtd.erasesize = 0;
2226 if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
2227 !(info->flags & SPI_NOR_SKIP_SFDP)) {
2228 struct spi_nor_flash_parameter sfdp_params;
2229
2230 memcpy(&sfdp_params, params, sizeof(sfdp_params));
2231 if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
2232 nor->addr_width = 0;
2233 nor->mtd.erasesize = 0;
2234 } else {
2235 memcpy(params, &sfdp_params, sizeof(*params));
2236 }
2237 }
2238
2239 return 0;
2240 }
2241
spi_nor_hwcaps2cmd(u32 hwcaps,const int table[][2],size_t size)2242 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2243 {
2244 size_t i;
2245
2246 for (i = 0; i < size; i++)
2247 if (table[i][0] == (int)hwcaps)
2248 return table[i][1];
2249
2250 return -EINVAL;
2251 }
2252
spi_nor_hwcaps_read2cmd(u32 hwcaps)2253 static int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2254 {
2255 static const int hwcaps_read2cmd[][2] = {
2256 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2257 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2258 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2259 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2260 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2261 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2262 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2263 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2264 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2265 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2266 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2267 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2268 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2269 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2270 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2271 };
2272
2273 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2274 ARRAY_SIZE(hwcaps_read2cmd));
2275 }
2276
spi_nor_hwcaps_pp2cmd(u32 hwcaps)2277 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2278 {
2279 static const int hwcaps_pp2cmd[][2] = {
2280 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2281 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2282 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2283 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2284 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2285 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2286 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2287 };
2288
2289 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2290 ARRAY_SIZE(hwcaps_pp2cmd));
2291 }
2292
spi_nor_select_read(struct spi_nor * nor,const struct spi_nor_flash_parameter * params,u32 shared_hwcaps)2293 static int spi_nor_select_read(struct spi_nor *nor,
2294 const struct spi_nor_flash_parameter *params,
2295 u32 shared_hwcaps)
2296 {
2297 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2298 const struct spi_nor_read_command *read;
2299
2300 if (best_match < 0)
2301 return -EINVAL;
2302
2303 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2304 if (cmd < 0)
2305 return -EINVAL;
2306
2307 read = ¶ms->reads[cmd];
2308 nor->read_opcode = read->opcode;
2309 nor->read_proto = read->proto;
2310
2311 /*
2312 * In the spi-nor framework, we don't need to make the difference
2313 * between mode clock cycles and wait state clock cycles.
2314 * Indeed, the value of the mode clock cycles is used by a QSPI
2315 * flash memory to know whether it should enter or leave its 0-4-4
2316 * (Continuous Read / XIP) mode.
2317 * eXecution In Place is out of the scope of the mtd sub-system.
2318 * Hence we choose to merge both mode and wait state clock cycles
2319 * into the so called dummy clock cycles.
2320 */
2321 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2322 return 0;
2323 }
2324
spi_nor_select_pp(struct spi_nor * nor,const struct spi_nor_flash_parameter * params,u32 shared_hwcaps)2325 static int spi_nor_select_pp(struct spi_nor *nor,
2326 const struct spi_nor_flash_parameter *params,
2327 u32 shared_hwcaps)
2328 {
2329 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2330 const struct spi_nor_pp_command *pp;
2331
2332 if (best_match < 0)
2333 return -EINVAL;
2334
2335 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2336 if (cmd < 0)
2337 return -EINVAL;
2338
2339 pp = ¶ms->page_programs[cmd];
2340 nor->program_opcode = pp->opcode;
2341 nor->write_proto = pp->proto;
2342 return 0;
2343 }
2344
spi_nor_select_erase(struct spi_nor * nor,const struct flash_info * info)2345 static int spi_nor_select_erase(struct spi_nor *nor,
2346 const struct flash_info *info)
2347 {
2348 struct mtd_info *mtd = &nor->mtd;
2349
2350 /* Do nothing if already configured from SFDP. */
2351 if (mtd->erasesize)
2352 return 0;
2353
2354 #ifdef CONFIG_SPI_FLASH_USE_4K_SECTORS
2355 /* prefer "small sector" erase if possible */
2356 if (info->flags & SECT_4K) {
2357 nor->erase_opcode = SPINOR_OP_BE_4K;
2358 mtd->erasesize = 4096;
2359 } else if (info->flags & SECT_4K_PMC) {
2360 nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
2361 mtd->erasesize = 4096;
2362 } else
2363 #endif
2364 {
2365 nor->erase_opcode = SPINOR_OP_SE;
2366 mtd->erasesize = info->sector_size;
2367 }
2368 return 0;
2369 }
2370
spi_nor_setup(struct spi_nor * nor,const struct flash_info * info,const struct spi_nor_flash_parameter * params,const struct spi_nor_hwcaps * hwcaps)2371 static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
2372 const struct spi_nor_flash_parameter *params,
2373 const struct spi_nor_hwcaps *hwcaps)
2374 {
2375 u32 ignored_mask, shared_mask;
2376 bool enable_quad_io;
2377 int err;
2378
2379 /*
2380 * Keep only the hardware capabilities supported by both the SPI
2381 * controller and the SPI flash memory.
2382 */
2383 shared_mask = hwcaps->mask & params->hwcaps.mask;
2384
2385 /* SPI n-n-n protocols are not supported yet. */
2386 ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
2387 SNOR_HWCAPS_READ_4_4_4 |
2388 SNOR_HWCAPS_READ_8_8_8 |
2389 SNOR_HWCAPS_PP_4_4_4 |
2390 SNOR_HWCAPS_PP_8_8_8);
2391 if (shared_mask & ignored_mask) {
2392 dev_dbg(nor->dev,
2393 "SPI n-n-n protocols are not supported yet.\n");
2394 shared_mask &= ~ignored_mask;
2395 }
2396
2397 /* Select the (Fast) Read command. */
2398 err = spi_nor_select_read(nor, params, shared_mask);
2399 if (err) {
2400 dev_dbg(nor->dev,
2401 "can't select read settings supported by both the SPI controller and memory.\n");
2402 return err;
2403 }
2404
2405 /* Select the Page Program command. */
2406 err = spi_nor_select_pp(nor, params, shared_mask);
2407 if (err) {
2408 dev_dbg(nor->dev,
2409 "can't select write settings supported by both the SPI controller and memory.\n");
2410 return err;
2411 }
2412
2413 /* Select the Sector Erase command. */
2414 err = spi_nor_select_erase(nor, info);
2415 if (err) {
2416 dev_dbg(nor->dev,
2417 "can't select erase settings supported by both the SPI controller and memory.\n");
2418 return err;
2419 }
2420
2421 /* Enable Quad I/O if needed. */
2422 enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
2423 spi_nor_get_protocol_width(nor->write_proto) == 4);
2424 if (enable_quad_io && params->quad_enable)
2425 nor->quad_enable = params->quad_enable;
2426 else
2427 nor->quad_enable = NULL;
2428
2429 return 0;
2430 }
2431
spi_nor_init(struct spi_nor * nor)2432 static int spi_nor_init(struct spi_nor *nor)
2433 {
2434 int err;
2435
2436 /*
2437 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
2438 * with the software protection bits set
2439 */
2440 if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
2441 JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
2442 JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
2443 nor->info->flags & SPI_NOR_HAS_LOCK) {
2444 write_enable(nor);
2445 write_sr(nor, 0);
2446 spi_nor_wait_till_ready(nor);
2447 }
2448
2449 if (nor->quad_enable) {
2450 err = nor->quad_enable(nor);
2451 if (err) {
2452 dev_dbg(nor->dev, "quad mode not supported\n");
2453 return err;
2454 }
2455 }
2456
2457 if (nor->addr_width == 4 &&
2458 (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
2459 !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
2460 /*
2461 * If the RESET# pin isn't hooked up properly, or the system
2462 * otherwise doesn't perform a reset command in the boot
2463 * sequence, it's impossible to 100% protect against unexpected
2464 * reboots (e.g., crashes). Warn the user (or hopefully, system
2465 * designer) that this is bad.
2466 */
2467 if (nor->flags & SNOR_F_BROKEN_RESET)
2468 printf("enabling reset hack; may not recover from unexpected reboots\n");
2469 set_4byte(nor, nor->info, 1);
2470 }
2471
2472 return 0;
2473 }
2474
spi_nor_scan(struct spi_nor * nor)2475 int spi_nor_scan(struct spi_nor *nor)
2476 {
2477 struct spi_nor_flash_parameter params;
2478 const struct flash_info *info = NULL;
2479 struct mtd_info *mtd = &nor->mtd;
2480 struct spi_nor_hwcaps hwcaps = {
2481 .mask = SNOR_HWCAPS_READ |
2482 SNOR_HWCAPS_READ_FAST |
2483 SNOR_HWCAPS_PP,
2484 };
2485 struct spi_slave *spi = nor->spi;
2486 int ret;
2487
2488 /* Reset SPI protocol for all commands. */
2489 nor->reg_proto = SNOR_PROTO_1_1_1;
2490 nor->read_proto = SNOR_PROTO_1_1_1;
2491 nor->write_proto = SNOR_PROTO_1_1_1;
2492 nor->read = spi_nor_read_data;
2493 nor->write = spi_nor_write_data;
2494 nor->read_reg = spi_nor_read_reg;
2495 nor->write_reg = spi_nor_write_reg;
2496
2497 if (spi->mode & SPI_RX_OCTAL) {
2498 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2499
2500 if (spi->mode & SPI_TX_OCTAL)
2501 hwcaps.mask |= (SNOR_HWCAPS_READ_1_8_8 |
2502 SNOR_HWCAPS_PP_1_1_8 |
2503 SNOR_HWCAPS_PP_1_8_8);
2504 } else if (spi->mode & SPI_RX_QUAD) {
2505 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2506
2507 if (spi->mode & SPI_TX_QUAD)
2508 hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 |
2509 SNOR_HWCAPS_PP_1_1_4 |
2510 SNOR_HWCAPS_PP_1_4_4);
2511 } else if (spi->mode & SPI_RX_DUAL) {
2512 hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2513
2514 if (spi->mode & SPI_TX_DUAL)
2515 hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2;
2516 }
2517
2518 info = spi_nor_read_id(nor);
2519 if (IS_ERR_OR_NULL(info))
2520 return -ENOENT;
2521 /* Parse the Serial Flash Discoverable Parameters table. */
2522 ret = spi_nor_init_params(nor, info, ¶ms);
2523 if (ret)
2524 return ret;
2525
2526 if (!mtd->name)
2527 mtd->name = info->name;
2528 mtd->priv = nor;
2529 mtd->type = MTD_NORFLASH;
2530 mtd->writesize = 1;
2531 mtd->flags = MTD_CAP_NORFLASH;
2532 mtd->size = params.size;
2533 mtd->_erase = spi_nor_erase;
2534 mtd->_read = spi_nor_read;
2535
2536 #if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
2537 /* NOR protection support for STmicro/Micron chips and similar */
2538 if (JEDEC_MFR(info) == SNOR_MFR_ST ||
2539 JEDEC_MFR(info) == SNOR_MFR_MICRON ||
2540 JEDEC_MFR(info) == SNOR_MFR_SST ||
2541 info->flags & SPI_NOR_HAS_LOCK) {
2542 nor->flash_lock = stm_lock;
2543 nor->flash_unlock = stm_unlock;
2544 nor->flash_is_locked = stm_is_locked;
2545 }
2546 #endif
2547
2548 #ifdef CONFIG_SPI_FLASH_SST
2549 /*
2550 * sst26 series block protection implementation differs from other
2551 * series.
2552 */
2553 if (info->flags & SPI_NOR_HAS_SST26LOCK) {
2554 nor->flash_lock = sst26_lock;
2555 nor->flash_unlock = sst26_unlock;
2556 nor->flash_is_locked = sst26_is_locked;
2557 }
2558
2559 /* sst nor chips use AAI word program */
2560 if (info->flags & SST_WRITE)
2561 mtd->_write = sst_write;
2562 else
2563 #endif
2564 mtd->_write = spi_nor_write;
2565
2566 if (info->flags & USE_FSR)
2567 nor->flags |= SNOR_F_USE_FSR;
2568 if (info->flags & SPI_NOR_HAS_TB)
2569 nor->flags |= SNOR_F_HAS_SR_TB;
2570 if (info->flags & NO_CHIP_ERASE)
2571 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
2572 if (info->flags & USE_CLSR)
2573 nor->flags |= SNOR_F_USE_CLSR;
2574
2575 if (info->flags & SPI_NOR_NO_ERASE)
2576 mtd->flags |= MTD_NO_ERASE;
2577
2578 nor->page_size = params.page_size;
2579 mtd->writebufsize = nor->page_size;
2580
2581 /* Some devices cannot do fast-read, no matter what DT tells us */
2582 if ((info->flags & SPI_NOR_NO_FR) || (spi->mode & SPI_RX_SLOW))
2583 params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2584
2585 /*
2586 * Configure the SPI memory:
2587 * - select op codes for (Fast) Read, Page Program and Sector Erase.
2588 * - set the number of dummy cycles (mode cycles + wait states).
2589 * - set the SPI protocols for register and memory accesses.
2590 * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
2591 */
2592 ret = spi_nor_setup(nor, info, ¶ms, &hwcaps);
2593 if (ret)
2594 return ret;
2595
2596 if (nor->addr_width) {
2597 /* already configured from SFDP */
2598 } else if (info->addr_width) {
2599 nor->addr_width = info->addr_width;
2600 } else if (mtd->size > SZ_16M) {
2601 #ifndef CONFIG_SPI_FLASH_BAR
2602 /* enable 4-byte addressing if the device exceeds 16MiB */
2603 nor->addr_width = 4;
2604 if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
2605 info->flags & SPI_NOR_4B_OPCODES)
2606 spi_nor_set_4byte_opcodes(nor, info);
2607 #else
2608 /* Configure the BAR - discover bank cmds and read current bank */
2609 nor->addr_width = 3;
2610 ret = read_bar(nor, info);
2611 if (ret < 0)
2612 return ret;
2613 #endif
2614 } else {
2615 nor->addr_width = 3;
2616 }
2617
2618 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
2619 dev_dbg(dev, "address width is too large: %u\n",
2620 nor->addr_width);
2621 return -EINVAL;
2622 }
2623
2624 /* Send all the required SPI flash commands to initialize device */
2625 nor->info = info;
2626 ret = spi_nor_init(nor);
2627 if (ret)
2628 return ret;
2629
2630 nor->name = mtd->name;
2631 nor->size = mtd->size;
2632 nor->erase_size = mtd->erasesize;
2633 nor->sector_size = mtd->erasesize;
2634
2635 #ifndef CONFIG_SPL_BUILD
2636 printf("SF: Detected %s with page size ", nor->name);
2637 print_size(nor->page_size, ", erase size ");
2638 print_size(nor->erase_size, ", total ");
2639 print_size(nor->size, "");
2640 puts("\n");
2641 #endif
2642
2643 return 0;
2644 }
2645
2646 /* U-Boot specific functions, need to extend MTD to support these */
spi_flash_cmd_get_sw_write_prot(struct spi_nor * nor)2647 int spi_flash_cmd_get_sw_write_prot(struct spi_nor *nor)
2648 {
2649 int sr = read_sr(nor);
2650
2651 if (sr < 0)
2652 return sr;
2653
2654 return (sr >> 2) & 7;
2655 }
2656