1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
4 * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
5 *
6 * Copyright (C) 2005, Intec Automation Inc.
7 * Copyright (C) 2014, Freescale Semiconductor, Inc.
8 */
9
10 #include <linux/err.h>
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/mutex.h>
15 #include <linux/math64.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18
19 #include <linux/mtd/mtd.h>
20 #include <linux/of_platform.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/spi/flash.h>
23 #include <linux/mtd/spi-nor.h>
24 #include <linux/miscdevice.h>
25
26 #include <uapi/linux/spi_nor_misc.h>
27
28 #include "core.h"
29
30 struct spi_nor_misc_dev {
31 struct miscdevice dev;
32 struct spi_nor *nor;
33 };
34
35 /* Define max times to check status register before we give up. */
36
37 /*
38 * For everything but full-chip erase; probably could be much smaller, but kept
39 * around for safety for now
40 */
41 #define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
42
43 /*
44 * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
45 * for larger flash
46 */
47 #define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
48
49 #define SPI_NOR_MAX_ADDR_WIDTH 4
50
51 /**
52 * spi_nor_spimem_bounce() - check if a bounce buffer is needed for the data
53 * transfer
54 * @nor: pointer to 'struct spi_nor'
55 * @op: pointer to 'struct spi_mem_op' template for transfer
56 *
57 * If we have to use the bounce buffer, the data field in @op will be updated.
58 *
59 * Return: true if the bounce buffer is needed, false if not
60 */
spi_nor_spimem_bounce(struct spi_nor * nor,struct spi_mem_op * op)61 static bool spi_nor_spimem_bounce(struct spi_nor *nor, struct spi_mem_op *op)
62 {
63 /* op->data.buf.in occupies the same memory as op->data.buf.out */
64 if (object_is_on_stack(op->data.buf.in) ||
65 !virt_addr_valid(op->data.buf.in)) {
66 if (op->data.nbytes > nor->bouncebuf_size)
67 op->data.nbytes = nor->bouncebuf_size;
68 op->data.buf.in = nor->bouncebuf;
69 return true;
70 }
71
72 return false;
73 }
74
75 /**
76 * spi_nor_spimem_exec_op() - execute a memory operation
77 * @nor: pointer to 'struct spi_nor'
78 * @op: pointer to 'struct spi_mem_op' template for transfer
79 *
80 * Return: 0 on success, -error otherwise.
81 */
spi_nor_spimem_exec_op(struct spi_nor * nor,struct spi_mem_op * op)82 static int spi_nor_spimem_exec_op(struct spi_nor *nor, struct spi_mem_op *op)
83 {
84 int error;
85
86 error = spi_mem_adjust_op_size(nor->spimem, op);
87 if (error)
88 return error;
89
90 return spi_mem_exec_op(nor->spimem, op);
91 }
92
93 /**
94 * spi_nor_spimem_read_data() - read data from flash's memory region via
95 * spi-mem
96 * @nor: pointer to 'struct spi_nor'
97 * @from: offset to read from
98 * @len: number of bytes to read
99 * @buf: pointer to dst buffer
100 *
101 * Return: number of bytes read successfully, -errno otherwise
102 */
spi_nor_spimem_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)103 static ssize_t spi_nor_spimem_read_data(struct spi_nor *nor, loff_t from,
104 size_t len, u8 *buf)
105 {
106 struct spi_mem_op op =
107 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
108 SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
109 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
110 SPI_MEM_OP_DATA_IN(len, buf, 1));
111 bool usebouncebuf;
112 ssize_t nbytes;
113 int error;
114
115 /* get transfer protocols. */
116 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
117 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
118 op.dummy.buswidth = op.addr.buswidth;
119 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
120
121 /* convert the dummy cycles to the number of bytes */
122 op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
123
124 usebouncebuf = spi_nor_spimem_bounce(nor, &op);
125
126 if (nor->dirmap.rdesc) {
127 nbytes = spi_mem_dirmap_read(nor->dirmap.rdesc, op.addr.val,
128 op.data.nbytes, op.data.buf.in);
129 } else {
130 error = spi_nor_spimem_exec_op(nor, &op);
131 if (error)
132 return error;
133 nbytes = op.data.nbytes;
134 }
135
136 if (usebouncebuf && nbytes > 0)
137 memcpy(buf, op.data.buf.in, nbytes);
138
139 return nbytes;
140 }
141
142 /**
143 * spi_nor_read_data() - read data from flash memory
144 * @nor: pointer to 'struct spi_nor'
145 * @from: offset to read from
146 * @len: number of bytes to read
147 * @buf: pointer to dst buffer
148 *
149 * Return: number of bytes read successfully, -errno otherwise
150 */
spi_nor_read_data(struct spi_nor * nor,loff_t from,size_t len,u8 * buf)151 ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, u8 *buf)
152 {
153 if (nor->spimem)
154 return spi_nor_spimem_read_data(nor, from, len, buf);
155
156 return nor->controller_ops->read(nor, from, len, buf);
157 }
158
159 /**
160 * spi_nor_spimem_write_data() - write data to flash memory via
161 * spi-mem
162 * @nor: pointer to 'struct spi_nor'
163 * @to: offset to write to
164 * @len: number of bytes to write
165 * @buf: pointer to src buffer
166 *
167 * Return: number of bytes written successfully, -errno otherwise
168 */
spi_nor_spimem_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)169 static ssize_t spi_nor_spimem_write_data(struct spi_nor *nor, loff_t to,
170 size_t len, const u8 *buf)
171 {
172 struct spi_mem_op op =
173 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
174 SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
175 SPI_MEM_OP_NO_DUMMY,
176 SPI_MEM_OP_DATA_OUT(len, buf, 1));
177 ssize_t nbytes;
178 int error;
179
180 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
181 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
182 op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
183
184 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
185 op.addr.nbytes = 0;
186
187 if (spi_nor_spimem_bounce(nor, &op))
188 memcpy(nor->bouncebuf, buf, op.data.nbytes);
189
190 if (nor->dirmap.wdesc) {
191 nbytes = spi_mem_dirmap_write(nor->dirmap.wdesc, op.addr.val,
192 op.data.nbytes, op.data.buf.out);
193 } else {
194 error = spi_nor_spimem_exec_op(nor, &op);
195 if (error)
196 return error;
197 nbytes = op.data.nbytes;
198 }
199
200 return nbytes;
201 }
202
203 /**
204 * spi_nor_write_data() - write data to flash memory
205 * @nor: pointer to 'struct spi_nor'
206 * @to: offset to write to
207 * @len: number of bytes to write
208 * @buf: pointer to src buffer
209 *
210 * Return: number of bytes written successfully, -errno otherwise
211 */
spi_nor_write_data(struct spi_nor * nor,loff_t to,size_t len,const u8 * buf)212 ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
213 const u8 *buf)
214 {
215 if (nor->spimem)
216 return spi_nor_spimem_write_data(nor, to, len, buf);
217
218 return nor->controller_ops->write(nor, to, len, buf);
219 }
220
221 /**
222 * spi_nor_write_enable() - Set write enable latch with Write Enable command.
223 * @nor: pointer to 'struct spi_nor'.
224 *
225 * Return: 0 on success, -errno otherwise.
226 */
spi_nor_write_enable(struct spi_nor * nor)227 int spi_nor_write_enable(struct spi_nor *nor)
228 {
229 int ret;
230
231 if (nor->spimem) {
232 struct spi_mem_op op =
233 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREN, 1),
234 SPI_MEM_OP_NO_ADDR,
235 SPI_MEM_OP_NO_DUMMY,
236 SPI_MEM_OP_NO_DATA);
237
238 ret = spi_mem_exec_op(nor->spimem, &op);
239 } else {
240 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREN,
241 NULL, 0);
242 }
243
244 if (ret)
245 dev_dbg(nor->dev, "error %d on Write Enable\n", ret);
246
247 return ret;
248 }
249
250 /**
251 * spi_nor_write_disable() - Send Write Disable instruction to the chip.
252 * @nor: pointer to 'struct spi_nor'.
253 *
254 * Return: 0 on success, -errno otherwise.
255 */
spi_nor_write_disable(struct spi_nor * nor)256 int spi_nor_write_disable(struct spi_nor *nor)
257 {
258 int ret;
259
260 if (nor->spimem) {
261 struct spi_mem_op op =
262 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRDI, 1),
263 SPI_MEM_OP_NO_ADDR,
264 SPI_MEM_OP_NO_DUMMY,
265 SPI_MEM_OP_NO_DATA);
266
267 ret = spi_mem_exec_op(nor->spimem, &op);
268 } else {
269 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRDI,
270 NULL, 0);
271 }
272
273 if (ret)
274 dev_dbg(nor->dev, "error %d on Write Disable\n", ret);
275
276 return ret;
277 }
278
279 /**
280 * spi_nor_read_sr() - Read the Status Register.
281 * @nor: pointer to 'struct spi_nor'.
282 * @sr: pointer to a DMA-able buffer where the value of the
283 * Status Register will be written.
284 *
285 * Return: 0 on success, -errno otherwise.
286 */
spi_nor_read_sr(struct spi_nor * nor,u8 * sr)287 static int spi_nor_read_sr(struct spi_nor *nor, u8 *sr)
288 {
289 int ret;
290
291 if (nor->spimem) {
292 struct spi_mem_op op =
293 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR, 1),
294 SPI_MEM_OP_NO_ADDR,
295 SPI_MEM_OP_NO_DUMMY,
296 SPI_MEM_OP_DATA_IN(1, sr, 1));
297
298 ret = spi_mem_exec_op(nor->spimem, &op);
299 } else {
300 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR,
301 sr, 1);
302 }
303
304 if (ret)
305 dev_dbg(nor->dev, "error %d reading SR\n", ret);
306
307 return ret;
308 }
309
310 /**
311 * spi_nor_read_fsr() - Read the Flag Status Register.
312 * @nor: pointer to 'struct spi_nor'
313 * @fsr: pointer to a DMA-able buffer where the value of the
314 * Flag Status Register will be written.
315 *
316 * Return: 0 on success, -errno otherwise.
317 */
spi_nor_read_fsr(struct spi_nor * nor,u8 * fsr)318 static int spi_nor_read_fsr(struct spi_nor *nor, u8 *fsr)
319 {
320 int ret;
321
322 if (nor->spimem) {
323 struct spi_mem_op op =
324 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDFSR, 1),
325 SPI_MEM_OP_NO_ADDR,
326 SPI_MEM_OP_NO_DUMMY,
327 SPI_MEM_OP_DATA_IN(1, fsr, 1));
328
329 ret = spi_mem_exec_op(nor->spimem, &op);
330 } else {
331 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDFSR,
332 fsr, 1);
333 }
334
335 if (ret)
336 dev_dbg(nor->dev, "error %d reading FSR\n", ret);
337
338 return ret;
339 }
340
341 /**
342 * spi_nor_read_cr() - Read the Configuration Register using the
343 * SPINOR_OP_RDCR (35h) command.
344 * @nor: pointer to 'struct spi_nor'
345 * @cr: pointer to a DMA-able buffer where the value of the
346 * Configuration Register will be written.
347 *
348 * Return: 0 on success, -errno otherwise.
349 */
spi_nor_read_cr(struct spi_nor * nor,u8 * cr)350 static int spi_nor_read_cr(struct spi_nor *nor, u8 *cr)
351 {
352 int ret;
353
354 if (nor->spimem) {
355 struct spi_mem_op op =
356 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDCR, 1),
357 SPI_MEM_OP_NO_ADDR,
358 SPI_MEM_OP_NO_DUMMY,
359 SPI_MEM_OP_DATA_IN(1, cr, 1));
360
361 ret = spi_mem_exec_op(nor->spimem, &op);
362 } else {
363 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDCR, cr, 1);
364 }
365
366 if (ret)
367 dev_dbg(nor->dev, "error %d reading CR\n", ret);
368
369 return ret;
370 }
371
372 /**
373 * spi_nor_set_4byte_addr_mode() - Enter/Exit 4-byte address mode.
374 * @nor: pointer to 'struct spi_nor'.
375 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
376 * address mode.
377 *
378 * Return: 0 on success, -errno otherwise.
379 */
spi_nor_set_4byte_addr_mode(struct spi_nor * nor,bool enable)380 int spi_nor_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
381 {
382 int ret;
383
384 if (nor->spimem) {
385 struct spi_mem_op op =
386 SPI_MEM_OP(SPI_MEM_OP_CMD(enable ?
387 SPINOR_OP_EN4B :
388 SPINOR_OP_EX4B,
389 1),
390 SPI_MEM_OP_NO_ADDR,
391 SPI_MEM_OP_NO_DUMMY,
392 SPI_MEM_OP_NO_DATA);
393
394 ret = spi_mem_exec_op(nor->spimem, &op);
395 } else {
396 ret = nor->controller_ops->write_reg(nor,
397 enable ? SPINOR_OP_EN4B :
398 SPINOR_OP_EX4B,
399 NULL, 0);
400 }
401
402 if (ret)
403 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
404
405 return ret;
406 }
407
408 /**
409 * spansion_set_4byte_addr_mode() - Set 4-byte address mode for Spansion
410 * flashes.
411 * @nor: pointer to 'struct spi_nor'.
412 * @enable: true to enter the 4-byte address mode, false to exit the 4-byte
413 * address mode.
414 *
415 * Return: 0 on success, -errno otherwise.
416 */
spansion_set_4byte_addr_mode(struct spi_nor * nor,bool enable)417 static int spansion_set_4byte_addr_mode(struct spi_nor *nor, bool enable)
418 {
419 int ret;
420
421 nor->bouncebuf[0] = enable << 7;
422
423 if (nor->spimem) {
424 struct spi_mem_op op =
425 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_BRWR, 1),
426 SPI_MEM_OP_NO_ADDR,
427 SPI_MEM_OP_NO_DUMMY,
428 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
429
430 ret = spi_mem_exec_op(nor->spimem, &op);
431 } else {
432 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_BRWR,
433 nor->bouncebuf, 1);
434 }
435
436 if (ret)
437 dev_dbg(nor->dev, "error %d setting 4-byte mode\n", ret);
438
439 return ret;
440 }
441
442 /**
443 * spi_nor_write_ear() - Write Extended Address Register.
444 * @nor: pointer to 'struct spi_nor'.
445 * @ear: value to write to the Extended Address Register.
446 *
447 * Return: 0 on success, -errno otherwise.
448 */
spi_nor_write_ear(struct spi_nor * nor,u8 ear)449 int spi_nor_write_ear(struct spi_nor *nor, u8 ear)
450 {
451 int ret;
452
453 nor->bouncebuf[0] = ear;
454
455 if (nor->spimem) {
456 struct spi_mem_op op =
457 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WREAR, 1),
458 SPI_MEM_OP_NO_ADDR,
459 SPI_MEM_OP_NO_DUMMY,
460 SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
461
462 ret = spi_mem_exec_op(nor->spimem, &op);
463 } else {
464 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WREAR,
465 nor->bouncebuf, 1);
466 }
467
468 if (ret)
469 dev_dbg(nor->dev, "error %d writing EAR\n", ret);
470
471 return ret;
472 }
473
474 /**
475 * spi_nor_xread_sr() - Read the Status Register on S3AN flashes.
476 * @nor: pointer to 'struct spi_nor'.
477 * @sr: pointer to a DMA-able buffer where the value of the
478 * Status Register will be written.
479 *
480 * Return: 0 on success, -errno otherwise.
481 */
spi_nor_xread_sr(struct spi_nor * nor,u8 * sr)482 int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr)
483 {
484 int ret;
485
486 if (nor->spimem) {
487 struct spi_mem_op op =
488 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_XRDSR, 1),
489 SPI_MEM_OP_NO_ADDR,
490 SPI_MEM_OP_NO_DUMMY,
491 SPI_MEM_OP_DATA_IN(1, sr, 1));
492
493 ret = spi_mem_exec_op(nor->spimem, &op);
494 } else {
495 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_XRDSR,
496 sr, 1);
497 }
498
499 if (ret)
500 dev_dbg(nor->dev, "error %d reading XRDSR\n", ret);
501
502 return ret;
503 }
504
505 /**
506 * spi_nor_xsr_ready() - Query the Status Register of the S3AN flash to see if
507 * the flash is ready for new commands.
508 * @nor: pointer to 'struct spi_nor'.
509 *
510 * Return: 1 if ready, 0 if not ready, -errno on errors.
511 */
spi_nor_xsr_ready(struct spi_nor * nor)512 static int spi_nor_xsr_ready(struct spi_nor *nor)
513 {
514 int ret;
515
516 ret = spi_nor_xread_sr(nor, nor->bouncebuf);
517 if (ret)
518 return ret;
519
520 return !!(nor->bouncebuf[0] & XSR_RDY);
521 }
522
523 /**
524 * spi_nor_clear_sr() - Clear the Status Register.
525 * @nor: pointer to 'struct spi_nor'.
526 */
spi_nor_clear_sr(struct spi_nor * nor)527 static void spi_nor_clear_sr(struct spi_nor *nor)
528 {
529 int ret;
530
531 if (nor->spimem) {
532 struct spi_mem_op op =
533 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLSR, 1),
534 SPI_MEM_OP_NO_ADDR,
535 SPI_MEM_OP_NO_DUMMY,
536 SPI_MEM_OP_NO_DATA);
537
538 ret = spi_mem_exec_op(nor->spimem, &op);
539 } else {
540 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLSR,
541 NULL, 0);
542 }
543
544 if (ret)
545 dev_dbg(nor->dev, "error %d clearing SR\n", ret);
546 }
547
548 /**
549 * spi_nor_sr_ready() - Query the Status Register to see if the flash is ready
550 * for new commands.
551 * @nor: pointer to 'struct spi_nor'.
552 *
553 * Return: 1 if ready, 0 if not ready, -errno on errors.
554 */
spi_nor_sr_ready(struct spi_nor * nor)555 static int spi_nor_sr_ready(struct spi_nor *nor)
556 {
557 int ret = spi_nor_read_sr(nor, nor->bouncebuf);
558
559 if (ret)
560 return ret;
561
562 if (nor->flags & SNOR_F_USE_CLSR &&
563 nor->bouncebuf[0] & (SR_E_ERR | SR_P_ERR)) {
564 if (nor->bouncebuf[0] & SR_E_ERR)
565 dev_err(nor->dev, "Erase Error occurred\n");
566 else
567 dev_err(nor->dev, "Programming Error occurred\n");
568
569 spi_nor_clear_sr(nor);
570
571 /*
572 * WEL bit remains set to one when an erase or page program
573 * error occurs. Issue a Write Disable command to protect
574 * against inadvertent writes that can possibly corrupt the
575 * contents of the memory.
576 */
577 ret = spi_nor_write_disable(nor);
578 if (ret)
579 return ret;
580
581 return -EIO;
582 }
583
584 return !(nor->bouncebuf[0] & SR_WIP);
585 }
586
587 /**
588 * spi_nor_clear_fsr() - Clear the Flag Status Register.
589 * @nor: pointer to 'struct spi_nor'.
590 */
spi_nor_clear_fsr(struct spi_nor * nor)591 static void spi_nor_clear_fsr(struct spi_nor *nor)
592 {
593 int ret;
594
595 if (nor->spimem) {
596 struct spi_mem_op op =
597 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CLFSR, 1),
598 SPI_MEM_OP_NO_ADDR,
599 SPI_MEM_OP_NO_DUMMY,
600 SPI_MEM_OP_NO_DATA);
601
602 ret = spi_mem_exec_op(nor->spimem, &op);
603 } else {
604 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CLFSR,
605 NULL, 0);
606 }
607
608 if (ret)
609 dev_dbg(nor->dev, "error %d clearing FSR\n", ret);
610 }
611
612 /**
613 * spi_nor_fsr_ready() - Query the Flag Status Register to see if the flash is
614 * ready for new commands.
615 * @nor: pointer to 'struct spi_nor'.
616 *
617 * Return: 1 if ready, 0 if not ready, -errno on errors.
618 */
spi_nor_fsr_ready(struct spi_nor * nor)619 static int spi_nor_fsr_ready(struct spi_nor *nor)
620 {
621 int ret = spi_nor_read_fsr(nor, nor->bouncebuf);
622
623 if (ret)
624 return ret;
625
626 if (nor->bouncebuf[0] & (FSR_E_ERR | FSR_P_ERR)) {
627 if (nor->bouncebuf[0] & FSR_E_ERR)
628 dev_err(nor->dev, "Erase operation failed.\n");
629 else
630 dev_err(nor->dev, "Program operation failed.\n");
631
632 if (nor->bouncebuf[0] & FSR_PT_ERR)
633 dev_err(nor->dev,
634 "Attempted to modify a protected sector.\n");
635
636 spi_nor_clear_fsr(nor);
637
638 /*
639 * WEL bit remains set to one when an erase or page program
640 * error occurs. Issue a Write Disable command to protect
641 * against inadvertent writes that can possibly corrupt the
642 * contents of the memory.
643 */
644 ret = spi_nor_write_disable(nor);
645 if (ret)
646 return ret;
647
648 return -EIO;
649 }
650
651 return !!(nor->bouncebuf[0] & FSR_READY);
652 }
653
654 /**
655 * spi_nor_ready() - Query the flash to see if it is ready for new commands.
656 * @nor: pointer to 'struct spi_nor'.
657 *
658 * Return: 1 if ready, 0 if not ready, -errno on errors.
659 */
spi_nor_ready(struct spi_nor * nor)660 static int spi_nor_ready(struct spi_nor *nor)
661 {
662 int sr, fsr;
663
664 if (nor->flags & SNOR_F_READY_XSR_RDY)
665 sr = spi_nor_xsr_ready(nor);
666 else
667 sr = spi_nor_sr_ready(nor);
668 if (sr < 0)
669 return sr;
670 fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
671 if (fsr < 0)
672 return fsr;
673 return sr && fsr;
674 }
675
676 /**
677 * spi_nor_wait_till_ready_with_timeout() - Service routine to read the
678 * Status Register until ready, or timeout occurs.
679 * @nor: pointer to "struct spi_nor".
680 * @timeout_jiffies: jiffies to wait until timeout.
681 *
682 * Return: 0 on success, -errno otherwise.
683 */
spi_nor_wait_till_ready_with_timeout(struct spi_nor * nor,unsigned long timeout_jiffies)684 static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
685 unsigned long timeout_jiffies)
686 {
687 unsigned long deadline;
688 int timeout = 0, ret;
689
690 deadline = jiffies + timeout_jiffies;
691
692 while (!timeout) {
693 if (time_after_eq(jiffies, deadline))
694 timeout = 1;
695
696 ret = spi_nor_ready(nor);
697 if (ret < 0)
698 return ret;
699 if (ret)
700 return 0;
701
702 cond_resched();
703 }
704
705 dev_dbg(nor->dev, "flash operation timed out\n");
706
707 return -ETIMEDOUT;
708 }
709
710 /**
711 * spi_nor_wait_till_ready() - Wait for a predefined amount of time for the
712 * flash to be ready, or timeout occurs.
713 * @nor: pointer to "struct spi_nor".
714 *
715 * Return: 0 on success, -errno otherwise.
716 */
spi_nor_wait_till_ready(struct spi_nor * nor)717 int spi_nor_wait_till_ready(struct spi_nor *nor)
718 {
719 return spi_nor_wait_till_ready_with_timeout(nor,
720 DEFAULT_READY_WAIT_JIFFIES);
721 }
722
723 /**
724 * spi_nor_wait_till_ready_with_timeout_and_msleep() - Service routine to read the
725 * Status Register until ready with msleep, or timeout occurs.
726 * @nor: pointer to "struct spi_nor".
727 * @timeout_jiffies: jiffies to wait until timeout.
728 *
729 * Return: 0 on success, -errno otherwise.
730 */
spi_nor_wait_till_ready_with_timeout_and_msleep(struct spi_nor * nor,unsigned long timeout_jiffies)731 static int spi_nor_wait_till_ready_with_timeout_and_msleep(struct spi_nor *nor,
732 unsigned long timeout_jiffies)
733 {
734 unsigned long deadline;
735 int timeout = 0, ret;
736
737 deadline = jiffies + timeout_jiffies;
738
739 while (!timeout) {
740 if (time_after_eq(jiffies, deadline))
741 timeout = 1;
742
743 ret = spi_nor_ready(nor);
744 if (ret < 0)
745 return ret;
746 if (ret)
747 return 0;
748
749 msleep(10);
750 }
751
752 dev_dbg(nor->dev, "flash operation timed out\n");
753
754 return -ETIMEDOUT;
755 }
756
757 /**
758 * spi_nor_wait_till_ready_with_msleep() - Wait for a predefined amount of time for the
759 * flash to be ready with msleep, or timeout occurs.
760 * @nor: pointer to "struct spi_nor".
761 *
762 * Return: 0 on success, -errno otherwise.
763 */
spi_nor_wait_till_ready_with_msleep(struct spi_nor * nor)764 int spi_nor_wait_till_ready_with_msleep(struct spi_nor *nor)
765 {
766 return spi_nor_wait_till_ready_with_timeout_and_msleep(nor,
767 DEFAULT_READY_WAIT_JIFFIES);
768 }
769
770 /**
771 * spi_nor_write_sr() - Write the Status Register.
772 * @nor: pointer to 'struct spi_nor'.
773 * @sr: pointer to DMA-able buffer to write to the Status Register.
774 * @len: number of bytes to write to the Status Register.
775 *
776 * Return: 0 on success, -errno otherwise.
777 */
spi_nor_write_sr(struct spi_nor * nor,const u8 * sr,size_t len)778 static int spi_nor_write_sr(struct spi_nor *nor, const u8 *sr, size_t len)
779 {
780 int ret;
781
782 ret = spi_nor_write_enable(nor);
783 if (ret)
784 return ret;
785
786 if (nor->spimem) {
787 struct spi_mem_op op =
788 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
789 SPI_MEM_OP_NO_ADDR,
790 SPI_MEM_OP_NO_DUMMY,
791 SPI_MEM_OP_DATA_OUT(len, sr, 1));
792
793 ret = spi_mem_exec_op(nor->spimem, &op);
794 } else {
795 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR,
796 sr, len);
797 }
798
799 if (ret) {
800 dev_dbg(nor->dev, "error %d writing SR\n", ret);
801 return ret;
802 }
803
804 return spi_nor_wait_till_ready(nor);
805 }
806
807 /**
808 * spi_nor_write_sr1_and_check() - Write one byte to the Status Register 1 and
809 * ensure that the byte written match the received value.
810 * @nor: pointer to a 'struct spi_nor'.
811 * @sr1: byte value to be written to the Status Register.
812 *
813 * Return: 0 on success, -errno otherwise.
814 */
spi_nor_write_sr1_and_check(struct spi_nor * nor,u8 sr1)815 static int spi_nor_write_sr1_and_check(struct spi_nor *nor, u8 sr1)
816 {
817 int ret;
818
819 nor->bouncebuf[0] = sr1;
820
821 ret = spi_nor_write_sr(nor, nor->bouncebuf, 1);
822 if (ret)
823 return ret;
824
825 ret = spi_nor_read_sr(nor, nor->bouncebuf);
826 if (ret)
827 return ret;
828
829 if (nor->bouncebuf[0] != sr1) {
830 dev_dbg(nor->dev, "SR1: read back test failed\n");
831 return -EIO;
832 }
833
834 return 0;
835 }
836
837 /**
838 * spi_nor_write_16bit_sr_and_check() - Write the Status Register 1 and the
839 * Status Register 2 in one shot. Ensure that the byte written in the Status
840 * Register 1 match the received value, and that the 16-bit Write did not
841 * affect what was already in the Status Register 2.
842 * @nor: pointer to a 'struct spi_nor'.
843 * @sr1: byte value to be written to the Status Register 1.
844 *
845 * Return: 0 on success, -errno otherwise.
846 */
spi_nor_write_16bit_sr_and_check(struct spi_nor * nor,u8 sr1)847 static int spi_nor_write_16bit_sr_and_check(struct spi_nor *nor, u8 sr1)
848 {
849 int ret;
850 u8 *sr_cr = nor->bouncebuf;
851 u8 cr_written;
852
853 /* Make sure we don't overwrite the contents of Status Register 2. */
854 if (!(nor->flags & SNOR_F_NO_READ_CR)) {
855 ret = spi_nor_read_cr(nor, &sr_cr[1]);
856 if (ret)
857 return ret;
858 } else if (nor->params->quad_enable) {
859 /*
860 * If the Status Register 2 Read command (35h) is not
861 * supported, we should at least be sure we don't
862 * change the value of the SR2 Quad Enable bit.
863 *
864 * We can safely assume that when the Quad Enable method is
865 * set, the value of the QE bit is one, as a consequence of the
866 * nor->params->quad_enable() call.
867 *
868 * We can safely assume that the Quad Enable bit is present in
869 * the Status Register 2 at BIT(1). According to the JESD216
870 * revB standard, BFPT DWORDS[15], bits 22:20, the 16-bit
871 * Write Status (01h) command is available just for the cases
872 * in which the QE bit is described in SR2 at BIT(1).
873 */
874 sr_cr[1] = SR2_QUAD_EN_BIT1;
875 } else {
876 sr_cr[1] = 0;
877 }
878
879 sr_cr[0] = sr1;
880
881 ret = spi_nor_write_sr(nor, sr_cr, 2);
882 if (ret)
883 return ret;
884
885 ret = spi_nor_read_sr(nor, sr_cr);
886 if (ret)
887 return ret;
888
889 if (sr1 != sr_cr[0]) {
890 dev_dbg(nor->dev, "SR: Read back test failed\n");
891 return -EIO;
892 }
893
894 if (nor->flags & SNOR_F_NO_READ_CR)
895 return 0;
896
897 cr_written = sr_cr[1];
898
899 ret = spi_nor_read_cr(nor, &sr_cr[1]);
900 if (ret)
901 return ret;
902
903 if (cr_written != sr_cr[1]) {
904 dev_dbg(nor->dev, "CR: read back test failed\n");
905 return -EIO;
906 }
907
908 return 0;
909 }
910
911 /**
912 * spi_nor_write_cr() - Write the Configure Register.
913 * @nor: pointer to 'struct spi_nor'.
914 * @sr: pointer to DMA-able buffer to write to the Status Register.
915 * @len: number of bytes to write to the Status Register.
916 *
917 * Return: 0 on success, -errno otherwise.
918 */
spi_nor_write_8bit_cr(struct spi_nor * nor,u8 cr)919 static int spi_nor_write_8bit_cr(struct spi_nor *nor, u8 cr)
920 {
921 int ret;
922 u8 *sr_cr = nor->bouncebuf;
923
924 ret = spi_nor_write_enable(nor);
925 if (ret)
926 return ret;
927
928 sr_cr[0] = cr;
929
930 if (nor->spimem) {
931 struct spi_mem_op op =
932 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRCR, 1),
933 SPI_MEM_OP_NO_ADDR,
934 SPI_MEM_OP_NO_DUMMY,
935 SPI_MEM_OP_DATA_OUT(1, sr_cr, 1));
936
937 ret = spi_mem_exec_op(nor->spimem, &op);
938 } else {
939 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRCR, sr_cr, 1);
940 }
941
942 if (ret) {
943 dev_dbg(nor->dev, "error %d writing SR\n", ret);
944 return ret;
945 }
946
947 return spi_nor_wait_till_ready(nor);
948 }
949
950 /**
951 * spi_nor_write_16bit_cr_and_check() - Write the Status Register 1 and the
952 * Configuration Register in one shot. Ensure that the byte written in the
953 * Configuration Register match the received value, and that the 16-bit Write
954 * did not affect what was already in the Status Register 1.
955 * @nor: pointer to a 'struct spi_nor'.
956 * @cr: byte value to be written to the Configuration Register.
957 *
958 * Return: 0 on success, -errno otherwise.
959 */
spi_nor_write_16bit_cr_and_check(struct spi_nor * nor,u8 cr)960 static int spi_nor_write_16bit_cr_and_check(struct spi_nor *nor, u8 cr)
961 {
962 int ret;
963 u8 *sr_cr = nor->bouncebuf;
964 u8 sr_written;
965
966 /* Keep the current value of the Status Register 1. */
967 ret = spi_nor_read_sr(nor, sr_cr);
968 if (ret)
969 return ret;
970
971 sr_cr[1] = cr;
972
973 ret = spi_nor_write_sr(nor, sr_cr, 2);
974 if (ret)
975 return ret;
976
977 sr_written = sr_cr[0];
978
979 ret = spi_nor_read_sr(nor, sr_cr);
980 if (ret)
981 return ret;
982
983 if (sr_written != sr_cr[0]) {
984 dev_dbg(nor->dev, "SR: Read back test failed\n");
985 return -EIO;
986 }
987
988 if (nor->flags & SNOR_F_NO_READ_CR)
989 return 0;
990
991 ret = spi_nor_read_cr(nor, &sr_cr[1]);
992 if (ret)
993 return ret;
994
995 if (cr != sr_cr[1]) {
996 dev_dbg(nor->dev, "CR: read back test failed\n");
997 return -EIO;
998 }
999
1000 return 0;
1001 }
1002
1003 /**
1004 * spi_nor_write_sr_and_check() - Write the Status Register 1 and ensure that
1005 * the byte written match the received value without affecting other bits in the
1006 * Status Register 1 and 2.
1007 * @nor: pointer to a 'struct spi_nor'.
1008 * @sr1: byte value to be written to the Status Register.
1009 *
1010 * Return: 0 on success, -errno otherwise.
1011 */
spi_nor_write_sr_and_check(struct spi_nor * nor,u8 sr1)1012 int spi_nor_write_sr_and_check(struct spi_nor *nor, u8 sr1)
1013 {
1014 if (nor->flags & SNOR_F_HAS_16BIT_SR)
1015 return spi_nor_write_16bit_sr_and_check(nor, sr1);
1016
1017 return spi_nor_write_sr1_and_check(nor, sr1);
1018 }
1019
1020 /**
1021 * spi_nor_write_sr2() - Write the Status Register 2 using the
1022 * SPINOR_OP_WRSR2 (3eh) command.
1023 * @nor: pointer to 'struct spi_nor'.
1024 * @sr2: pointer to DMA-able buffer to write to the Status Register 2.
1025 *
1026 * Return: 0 on success, -errno otherwise.
1027 */
spi_nor_write_sr2(struct spi_nor * nor,const u8 * sr2)1028 static int spi_nor_write_sr2(struct spi_nor *nor, const u8 *sr2)
1029 {
1030 int ret;
1031
1032 ret = spi_nor_write_enable(nor);
1033 if (ret)
1034 return ret;
1035
1036 if (nor->spimem) {
1037 struct spi_mem_op op =
1038 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR2, 1),
1039 SPI_MEM_OP_NO_ADDR,
1040 SPI_MEM_OP_NO_DUMMY,
1041 SPI_MEM_OP_DATA_OUT(1, sr2, 1));
1042
1043 ret = spi_mem_exec_op(nor->spimem, &op);
1044 } else {
1045 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_WRSR2,
1046 sr2, 1);
1047 }
1048
1049 if (ret) {
1050 dev_dbg(nor->dev, "error %d writing SR2\n", ret);
1051 return ret;
1052 }
1053
1054 return spi_nor_wait_till_ready(nor);
1055 }
1056
1057 /**
1058 * spi_nor_read_sr2() - Read the Status Register 2 using the
1059 * SPINOR_OP_RDSR2 (3fh) command.
1060 * @nor: pointer to 'struct spi_nor'.
1061 * @sr2: pointer to DMA-able buffer where the value of the
1062 * Status Register 2 will be written.
1063 *
1064 * Return: 0 on success, -errno otherwise.
1065 */
spi_nor_read_sr2(struct spi_nor * nor,u8 * sr2)1066 static int spi_nor_read_sr2(struct spi_nor *nor, u8 *sr2)
1067 {
1068 int ret;
1069
1070 if (nor->spimem) {
1071 struct spi_mem_op op =
1072 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDSR2, 1),
1073 SPI_MEM_OP_NO_ADDR,
1074 SPI_MEM_OP_NO_DUMMY,
1075 SPI_MEM_OP_DATA_IN(1, sr2, 1));
1076
1077 ret = spi_mem_exec_op(nor->spimem, &op);
1078 } else {
1079 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDSR2,
1080 sr2, 1);
1081 }
1082
1083 if (ret)
1084 dev_dbg(nor->dev, "error %d reading SR2\n", ret);
1085
1086 return ret;
1087 }
1088
1089 /**
1090 * spi_nor_erase_chip() - Erase the entire flash memory.
1091 * @nor: pointer to 'struct spi_nor'.
1092 *
1093 * Return: 0 on success, -errno otherwise.
1094 */
spi_nor_erase_chip(struct spi_nor * nor)1095 static int spi_nor_erase_chip(struct spi_nor *nor)
1096 {
1097 int ret;
1098
1099 dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
1100
1101 if (nor->spimem) {
1102 struct spi_mem_op op =
1103 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_CHIP_ERASE, 1),
1104 SPI_MEM_OP_NO_ADDR,
1105 SPI_MEM_OP_NO_DUMMY,
1106 SPI_MEM_OP_NO_DATA);
1107
1108 ret = spi_mem_exec_op(nor->spimem, &op);
1109 } else {
1110 ret = nor->controller_ops->write_reg(nor, SPINOR_OP_CHIP_ERASE,
1111 NULL, 0);
1112 }
1113
1114 if (ret)
1115 dev_dbg(nor->dev, "error %d erasing chip\n", ret);
1116
1117 return ret;
1118 }
1119
spi_nor_convert_opcode(u8 opcode,const u8 table[][2],size_t size)1120 static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
1121 {
1122 size_t i;
1123
1124 for (i = 0; i < size; i++)
1125 if (table[i][0] == opcode)
1126 return table[i][1];
1127
1128 /* No conversion found, keep input op code. */
1129 return opcode;
1130 }
1131
spi_nor_convert_3to4_read(u8 opcode)1132 u8 spi_nor_convert_3to4_read(u8 opcode)
1133 {
1134 static const u8 spi_nor_3to4_read[][2] = {
1135 { SPINOR_OP_READ, SPINOR_OP_READ_4B },
1136 { SPINOR_OP_READ_FAST, SPINOR_OP_READ_FAST_4B },
1137 { SPINOR_OP_READ_1_1_2, SPINOR_OP_READ_1_1_2_4B },
1138 { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B },
1139 { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B },
1140 { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B },
1141 { SPINOR_OP_READ_1_1_8, SPINOR_OP_READ_1_1_8_4B },
1142 { SPINOR_OP_READ_1_8_8, SPINOR_OP_READ_1_8_8_4B },
1143
1144 { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B },
1145 { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B },
1146 { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B },
1147 };
1148
1149 return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
1150 ARRAY_SIZE(spi_nor_3to4_read));
1151 }
1152
spi_nor_convert_3to4_program(u8 opcode)1153 static u8 spi_nor_convert_3to4_program(u8 opcode)
1154 {
1155 static const u8 spi_nor_3to4_program[][2] = {
1156 { SPINOR_OP_PP, SPINOR_OP_PP_4B },
1157 { SPINOR_OP_PP_1_1_4, SPINOR_OP_PP_1_1_4_4B },
1158 { SPINOR_OP_PP_1_4_4, SPINOR_OP_PP_1_4_4_4B },
1159 { SPINOR_OP_PP_1_1_8, SPINOR_OP_PP_1_1_8_4B },
1160 { SPINOR_OP_PP_1_8_8, SPINOR_OP_PP_1_8_8_4B },
1161 };
1162
1163 return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
1164 ARRAY_SIZE(spi_nor_3to4_program));
1165 }
1166
spi_nor_convert_3to4_erase(u8 opcode)1167 static u8 spi_nor_convert_3to4_erase(u8 opcode)
1168 {
1169 static const u8 spi_nor_3to4_erase[][2] = {
1170 { SPINOR_OP_BE_4K, SPINOR_OP_BE_4K_4B },
1171 { SPINOR_OP_BE_32K, SPINOR_OP_BE_32K_4B },
1172 { SPINOR_OP_SE, SPINOR_OP_SE_4B },
1173 };
1174
1175 return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
1176 ARRAY_SIZE(spi_nor_3to4_erase));
1177 }
1178
spi_nor_has_uniform_erase(const struct spi_nor * nor)1179 static bool spi_nor_has_uniform_erase(const struct spi_nor *nor)
1180 {
1181 return !!nor->params->erase_map.uniform_erase_type;
1182 }
1183
spi_nor_set_4byte_opcodes(struct spi_nor * nor)1184 static void spi_nor_set_4byte_opcodes(struct spi_nor *nor)
1185 {
1186 nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
1187 nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
1188 nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
1189
1190 if (!spi_nor_has_uniform_erase(nor)) {
1191 struct spi_nor_erase_map *map = &nor->params->erase_map;
1192 struct spi_nor_erase_type *erase;
1193 int i;
1194
1195 for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
1196 erase = &map->erase_type[i];
1197 erase->opcode =
1198 spi_nor_convert_3to4_erase(erase->opcode);
1199 }
1200 }
1201 }
1202
spi_nor_lock_and_prep(struct spi_nor * nor)1203 int spi_nor_lock_and_prep(struct spi_nor *nor)
1204 {
1205 int ret = 0;
1206
1207 mutex_lock(&nor->lock);
1208
1209 if (nor->controller_ops && nor->controller_ops->prepare) {
1210 ret = nor->controller_ops->prepare(nor);
1211 if (ret) {
1212 mutex_unlock(&nor->lock);
1213 return ret;
1214 }
1215 }
1216 return ret;
1217 }
1218
spi_nor_unlock_and_unprep(struct spi_nor * nor)1219 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
1220 {
1221 if (nor->controller_ops && nor->controller_ops->unprepare)
1222 nor->controller_ops->unprepare(nor);
1223 mutex_unlock(&nor->lock);
1224 }
1225
spi_nor_convert_addr(struct spi_nor * nor,loff_t addr)1226 static u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr)
1227 {
1228 if (!nor->params->convert_addr)
1229 return addr;
1230
1231 return nor->params->convert_addr(nor, addr);
1232 }
1233
1234 /*
1235 * Initiate the erasure of a single sector
1236 */
spi_nor_erase_sector(struct spi_nor * nor,u32 addr)1237 static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
1238 {
1239 int i;
1240
1241 addr = spi_nor_convert_addr(nor, addr);
1242
1243 if (nor->spimem) {
1244 struct spi_mem_op op =
1245 SPI_MEM_OP(SPI_MEM_OP_CMD(nor->erase_opcode, 1),
1246 SPI_MEM_OP_ADDR(nor->addr_width, addr, 1),
1247 SPI_MEM_OP_NO_DUMMY,
1248 SPI_MEM_OP_NO_DATA);
1249
1250 return spi_mem_exec_op(nor->spimem, &op);
1251 } else if (nor->controller_ops->erase) {
1252 return nor->controller_ops->erase(nor, addr);
1253 }
1254
1255 /*
1256 * Default implementation, if driver doesn't have a specialized HW
1257 * control
1258 */
1259 for (i = nor->addr_width - 1; i >= 0; i--) {
1260 nor->bouncebuf[i] = addr & 0xff;
1261 addr >>= 8;
1262 }
1263
1264 return nor->controller_ops->write_reg(nor, nor->erase_opcode,
1265 nor->bouncebuf, nor->addr_width);
1266 }
1267
1268 /**
1269 * spi_nor_div_by_erase_size() - calculate remainder and update new dividend
1270 * @erase: pointer to a structure that describes a SPI NOR erase type
1271 * @dividend: dividend value
1272 * @remainder: pointer to u32 remainder (will be updated)
1273 *
1274 * Return: the result of the division
1275 */
spi_nor_div_by_erase_size(const struct spi_nor_erase_type * erase,u64 dividend,u32 * remainder)1276 static u64 spi_nor_div_by_erase_size(const struct spi_nor_erase_type *erase,
1277 u64 dividend, u32 *remainder)
1278 {
1279 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
1280 *remainder = (u32)dividend & erase->size_mask;
1281 return dividend >> erase->size_shift;
1282 }
1283
1284 /**
1285 * spi_nor_find_best_erase_type() - find the best erase type for the given
1286 * offset in the serial flash memory and the
1287 * number of bytes to erase. The region in
1288 * which the address fits is expected to be
1289 * provided.
1290 * @map: the erase map of the SPI NOR
1291 * @region: pointer to a structure that describes a SPI NOR erase region
1292 * @addr: offset in the serial flash memory
1293 * @len: number of bytes to erase
1294 *
1295 * Return: a pointer to the best fitted erase type, NULL otherwise.
1296 */
1297 static const struct spi_nor_erase_type *
spi_nor_find_best_erase_type(const struct spi_nor_erase_map * map,const struct spi_nor_erase_region * region,u64 addr,u32 len)1298 spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
1299 const struct spi_nor_erase_region *region,
1300 u64 addr, u32 len)
1301 {
1302 const struct spi_nor_erase_type *erase;
1303 u32 rem;
1304 int i;
1305 u8 erase_mask = region->offset & SNOR_ERASE_TYPE_MASK;
1306
1307 /*
1308 * Erase types are ordered by size, with the smallest erase type at
1309 * index 0.
1310 */
1311 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
1312 /* Does the erase region support the tested erase type? */
1313 if (!(erase_mask & BIT(i)))
1314 continue;
1315
1316 erase = &map->erase_type[i];
1317
1318 /* Alignment is not mandatory for overlaid regions */
1319 if (region->offset & SNOR_OVERLAID_REGION &&
1320 region->size <= len)
1321 return erase;
1322
1323 /* Don't erase more than what the user has asked for. */
1324 if (erase->size > len)
1325 continue;
1326
1327 spi_nor_div_by_erase_size(erase, addr, &rem);
1328 if (rem)
1329 continue;
1330 else
1331 return erase;
1332 }
1333
1334 return NULL;
1335 }
1336
spi_nor_region_is_last(const struct spi_nor_erase_region * region)1337 static u64 spi_nor_region_is_last(const struct spi_nor_erase_region *region)
1338 {
1339 return region->offset & SNOR_LAST_REGION;
1340 }
1341
spi_nor_region_end(const struct spi_nor_erase_region * region)1342 static u64 spi_nor_region_end(const struct spi_nor_erase_region *region)
1343 {
1344 return (region->offset & ~SNOR_ERASE_FLAGS_MASK) + region->size;
1345 }
1346
1347 /**
1348 * spi_nor_region_next() - get the next spi nor region
1349 * @region: pointer to a structure that describes a SPI NOR erase region
1350 *
1351 * Return: the next spi nor region or NULL if last region.
1352 */
1353 struct spi_nor_erase_region *
spi_nor_region_next(struct spi_nor_erase_region * region)1354 spi_nor_region_next(struct spi_nor_erase_region *region)
1355 {
1356 if (spi_nor_region_is_last(region))
1357 return NULL;
1358 region++;
1359 return region;
1360 }
1361
1362 /**
1363 * spi_nor_find_erase_region() - find the region of the serial flash memory in
1364 * which the offset fits
1365 * @map: the erase map of the SPI NOR
1366 * @addr: offset in the serial flash memory
1367 *
1368 * Return: a pointer to the spi_nor_erase_region struct, ERR_PTR(-errno)
1369 * otherwise.
1370 */
1371 static struct spi_nor_erase_region *
spi_nor_find_erase_region(const struct spi_nor_erase_map * map,u64 addr)1372 spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr)
1373 {
1374 struct spi_nor_erase_region *region = map->regions;
1375 u64 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1376 u64 region_end = region_start + region->size;
1377
1378 while (addr < region_start || addr >= region_end) {
1379 region = spi_nor_region_next(region);
1380 if (!region)
1381 return ERR_PTR(-EINVAL);
1382
1383 region_start = region->offset & ~SNOR_ERASE_FLAGS_MASK;
1384 region_end = region_start + region->size;
1385 }
1386
1387 return region;
1388 }
1389
1390 /**
1391 * spi_nor_init_erase_cmd() - initialize an erase command
1392 * @region: pointer to a structure that describes a SPI NOR erase region
1393 * @erase: pointer to a structure that describes a SPI NOR erase type
1394 *
1395 * Return: the pointer to the allocated erase command, ERR_PTR(-errno)
1396 * otherwise.
1397 */
1398 static struct spi_nor_erase_command *
spi_nor_init_erase_cmd(const struct spi_nor_erase_region * region,const struct spi_nor_erase_type * erase)1399 spi_nor_init_erase_cmd(const struct spi_nor_erase_region *region,
1400 const struct spi_nor_erase_type *erase)
1401 {
1402 struct spi_nor_erase_command *cmd;
1403
1404 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
1405 if (!cmd)
1406 return ERR_PTR(-ENOMEM);
1407
1408 INIT_LIST_HEAD(&cmd->list);
1409 cmd->opcode = erase->opcode;
1410 cmd->count = 1;
1411
1412 if (region->offset & SNOR_OVERLAID_REGION)
1413 cmd->size = region->size;
1414 else
1415 cmd->size = erase->size;
1416
1417 return cmd;
1418 }
1419
1420 /**
1421 * spi_nor_destroy_erase_cmd_list() - destroy erase command list
1422 * @erase_list: list of erase commands
1423 */
spi_nor_destroy_erase_cmd_list(struct list_head * erase_list)1424 static void spi_nor_destroy_erase_cmd_list(struct list_head *erase_list)
1425 {
1426 struct spi_nor_erase_command *cmd, *next;
1427
1428 list_for_each_entry_safe(cmd, next, erase_list, list) {
1429 list_del(&cmd->list);
1430 kfree(cmd);
1431 }
1432 }
1433
1434 /**
1435 * spi_nor_init_erase_cmd_list() - initialize erase command list
1436 * @nor: pointer to a 'struct spi_nor'
1437 * @erase_list: list of erase commands to be executed once we validate that the
1438 * erase can be performed
1439 * @addr: offset in the serial flash memory
1440 * @len: number of bytes to erase
1441 *
1442 * Builds the list of best fitted erase commands and verifies if the erase can
1443 * be performed.
1444 *
1445 * Return: 0 on success, -errno otherwise.
1446 */
spi_nor_init_erase_cmd_list(struct spi_nor * nor,struct list_head * erase_list,u64 addr,u32 len)1447 static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
1448 struct list_head *erase_list,
1449 u64 addr, u32 len)
1450 {
1451 const struct spi_nor_erase_map *map = &nor->params->erase_map;
1452 const struct spi_nor_erase_type *erase, *prev_erase = NULL;
1453 struct spi_nor_erase_region *region;
1454 struct spi_nor_erase_command *cmd = NULL;
1455 u64 region_end;
1456 int ret = -EINVAL;
1457
1458 region = spi_nor_find_erase_region(map, addr);
1459 if (IS_ERR(region))
1460 return PTR_ERR(region);
1461
1462 region_end = spi_nor_region_end(region);
1463
1464 while (len) {
1465 erase = spi_nor_find_best_erase_type(map, region, addr, len);
1466 if (!erase)
1467 goto destroy_erase_cmd_list;
1468
1469 if (prev_erase != erase ||
1470 erase->size != cmd->size ||
1471 region->offset & SNOR_OVERLAID_REGION) {
1472 cmd = spi_nor_init_erase_cmd(region, erase);
1473 if (IS_ERR(cmd)) {
1474 ret = PTR_ERR(cmd);
1475 goto destroy_erase_cmd_list;
1476 }
1477
1478 list_add_tail(&cmd->list, erase_list);
1479 } else {
1480 cmd->count++;
1481 }
1482
1483 addr += cmd->size;
1484 len -= cmd->size;
1485
1486 if (len && addr >= region_end) {
1487 region = spi_nor_region_next(region);
1488 if (!region)
1489 goto destroy_erase_cmd_list;
1490 region_end = spi_nor_region_end(region);
1491 }
1492
1493 prev_erase = erase;
1494 }
1495
1496 return 0;
1497
1498 destroy_erase_cmd_list:
1499 spi_nor_destroy_erase_cmd_list(erase_list);
1500 return ret;
1501 }
1502
1503 /**
1504 * spi_nor_erase_multi_sectors() - perform a non-uniform erase
1505 * @nor: pointer to a 'struct spi_nor'
1506 * @addr: offset in the serial flash memory
1507 * @len: number of bytes to erase
1508 *
1509 * Build a list of best fitted erase commands and execute it once we validate
1510 * that the erase can be performed.
1511 *
1512 * Return: 0 on success, -errno otherwise.
1513 */
spi_nor_erase_multi_sectors(struct spi_nor * nor,u64 addr,u32 len)1514 static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
1515 {
1516 LIST_HEAD(erase_list);
1517 struct spi_nor_erase_command *cmd, *next;
1518 int ret;
1519
1520 ret = spi_nor_init_erase_cmd_list(nor, &erase_list, addr, len);
1521 if (ret)
1522 return ret;
1523
1524 list_for_each_entry_safe(cmd, next, &erase_list, list) {
1525 nor->erase_opcode = cmd->opcode;
1526 while (cmd->count) {
1527 ret = spi_nor_write_enable(nor);
1528 if (ret)
1529 goto destroy_erase_cmd_list;
1530
1531 ret = spi_nor_erase_sector(nor, addr);
1532 if (ret)
1533 goto destroy_erase_cmd_list;
1534
1535 addr += cmd->size;
1536 cmd->count--;
1537
1538 ret = spi_nor_wait_till_ready_with_msleep(nor);
1539 if (ret)
1540 goto destroy_erase_cmd_list;
1541 }
1542 list_del(&cmd->list);
1543 kfree(cmd);
1544 }
1545
1546 return 0;
1547
1548 destroy_erase_cmd_list:
1549 spi_nor_destroy_erase_cmd_list(&erase_list);
1550 return ret;
1551 }
1552
1553 /*
1554 * Erase an address range on the nor chip. The address range may extend
1555 * one or more erase sectors. Return an error is there is a problem erasing.
1556 */
spi_nor_erase(struct mtd_info * mtd,struct erase_info * instr)1557 static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
1558 {
1559 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1560 u32 addr, len;
1561 uint32_t rem;
1562 int ret;
1563
1564 dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
1565 (long long)instr->len);
1566
1567 if (spi_nor_has_uniform_erase(nor)) {
1568 div_u64_rem(instr->len, mtd->erasesize, &rem);
1569 if (rem)
1570 return -EINVAL;
1571 }
1572
1573 addr = instr->addr;
1574 len = instr->len;
1575
1576 ret = spi_nor_lock_and_prep(nor);
1577 if (ret)
1578 return ret;
1579
1580 /* whole-chip erase? */
1581 if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
1582 unsigned long timeout;
1583
1584 ret = spi_nor_write_enable(nor);
1585 if (ret)
1586 goto erase_err;
1587
1588 ret = spi_nor_erase_chip(nor);
1589 if (ret)
1590 goto erase_err;
1591
1592 /*
1593 * Scale the timeout linearly with the size of the flash, with
1594 * a minimum calibrated to an old 2MB flash. We could try to
1595 * pull these from CFI/SFDP, but these values should be good
1596 * enough for now.
1597 */
1598 timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
1599 CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
1600 (unsigned long)(mtd->size / SZ_2M));
1601 ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
1602 if (ret)
1603 goto erase_err;
1604
1605 /* REVISIT in some cases we could speed up erasing large regions
1606 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
1607 * to use "small sector erase", but that's not always optimal.
1608 */
1609
1610 /* "sector"-at-a-time erase */
1611 } else if (spi_nor_has_uniform_erase(nor)) {
1612 while (len) {
1613 ret = spi_nor_write_enable(nor);
1614 if (ret)
1615 goto erase_err;
1616
1617 ret = spi_nor_erase_sector(nor, addr);
1618 if (ret)
1619 goto erase_err;
1620
1621 addr += mtd->erasesize;
1622 len -= mtd->erasesize;
1623
1624 ret = spi_nor_wait_till_ready_with_msleep(nor);
1625 if (ret)
1626 goto erase_err;
1627 }
1628
1629 /* erase multiple sectors */
1630 } else {
1631 ret = spi_nor_erase_multi_sectors(nor, addr, len);
1632 if (ret)
1633 goto erase_err;
1634 }
1635
1636 ret = spi_nor_write_disable(nor);
1637
1638 erase_err:
1639 spi_nor_unlock_and_unprep(nor);
1640
1641 return ret;
1642 }
1643
spi_nor_get_sr_bp_mask(struct spi_nor * nor)1644 static u8 spi_nor_get_sr_bp_mask(struct spi_nor *nor)
1645 {
1646 u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
1647
1648 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6)
1649 return mask | SR_BP3_BIT6;
1650
1651 if (nor->flags & SNOR_F_HAS_4BIT_BP)
1652 return mask | SR_BP3;
1653
1654 return mask;
1655 }
1656
spi_nor_get_sr_tb_mask(struct spi_nor * nor)1657 static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
1658 {
1659 if (nor->flags & SNOR_F_HAS_SR_TB_BIT6)
1660 return SR_TB_BIT6;
1661 else
1662 return SR_TB_BIT5;
1663 }
1664
spi_nor_get_min_prot_length_sr(struct spi_nor * nor)1665 static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
1666 {
1667 unsigned int bp_slots, bp_slots_needed;
1668 u8 mask = spi_nor_get_sr_bp_mask(nor);
1669
1670 /* Reserved one for "protect none" and one for "protect all". */
1671 bp_slots = (1 << hweight8(mask)) - 2;
1672 bp_slots_needed = ilog2(nor->info->n_sectors);
1673
1674 if (bp_slots_needed > bp_slots)
1675 return nor->info->sector_size <<
1676 (bp_slots_needed - bp_slots);
1677 else
1678 return nor->info->sector_size;
1679 }
1680
spi_nor_get_locked_range_sr(struct spi_nor * nor,u8 sr,loff_t * ofs,uint64_t * len)1681 static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
1682 uint64_t *len)
1683 {
1684 struct mtd_info *mtd = &nor->mtd;
1685 u64 min_prot_len;
1686 u8 mask = spi_nor_get_sr_bp_mask(nor);
1687 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1688 u8 bp, val = sr & mask;
1689
1690 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3_BIT6)
1691 val = (val & ~SR_BP3_BIT6) | SR_BP3;
1692
1693 bp = val >> SR_BP_SHIFT;
1694
1695 if (!bp) {
1696 /* No protection */
1697 *ofs = 0;
1698 *len = 0;
1699 return;
1700 }
1701
1702 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1703 *len = min_prot_len << (bp - 1);
1704
1705 if (*len > mtd->size)
1706 *len = mtd->size;
1707
1708 if (nor->flags & SNOR_F_HAS_SR_TB && sr & tb_mask)
1709 *ofs = 0;
1710 else
1711 *ofs = mtd->size - *len;
1712 }
1713
1714 /*
1715 * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
1716 * @locked is false); 0 otherwise
1717 */
spi_nor_check_lock_status_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr,bool locked)1718 static int spi_nor_check_lock_status_sr(struct spi_nor *nor, loff_t ofs,
1719 uint64_t len, u8 sr, bool locked)
1720 {
1721 loff_t lock_offs;
1722 uint64_t lock_len;
1723
1724 if (!len)
1725 return 1;
1726
1727 spi_nor_get_locked_range_sr(nor, sr, &lock_offs, &lock_len);
1728
1729 if (locked)
1730 /* Requested range is a sub-range of locked range */
1731 return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
1732 else
1733 /* Requested range does not overlap with locked range */
1734 return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
1735 }
1736
spi_nor_is_locked_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr)1737 static int spi_nor_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1738 u8 sr)
1739 {
1740 return spi_nor_check_lock_status_sr(nor, ofs, len, sr, true);
1741 }
1742
spi_nor_is_unlocked_sr(struct spi_nor * nor,loff_t ofs,uint64_t len,u8 sr)1743 static int spi_nor_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
1744 u8 sr)
1745 {
1746 return spi_nor_check_lock_status_sr(nor, ofs, len, sr, false);
1747 }
1748
1749 /*
1750 * Lock a region of the flash. Compatible with ST Micro and similar flash.
1751 * Supports the block protection bits BP{0,1,2}/BP{0,1,2,3} in the status
1752 * register
1753 * (SR). Does not support these features found in newer SR bitfields:
1754 * - SEC: sector/block protect - only handle SEC=0 (block protect)
1755 * - CMP: complement protect - only support CMP=0 (range is not complemented)
1756 *
1757 * Support for the following is provided conditionally for some flash:
1758 * - TB: top/bottom protect
1759 *
1760 * Sample table portion for 8MB flash (Winbond w25q64fw):
1761 *
1762 * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
1763 * --------------------------------------------------------------------------
1764 * X | X | 0 | 0 | 0 | NONE | NONE
1765 * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
1766 * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
1767 * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
1768 * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
1769 * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
1770 * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
1771 * X | X | 1 | 1 | 1 | 8 MB | ALL
1772 * ------|-------|-------|-------|-------|---------------|-------------------
1773 * 0 | 1 | 0 | 0 | 1 | 128 KB | Lower 1/64
1774 * 0 | 1 | 0 | 1 | 0 | 256 KB | Lower 1/32
1775 * 0 | 1 | 0 | 1 | 1 | 512 KB | Lower 1/16
1776 * 0 | 1 | 1 | 0 | 0 | 1 MB | Lower 1/8
1777 * 0 | 1 | 1 | 0 | 1 | 2 MB | Lower 1/4
1778 * 0 | 1 | 1 | 1 | 0 | 4 MB | Lower 1/2
1779 *
1780 * Returns negative on errors, 0 on success.
1781 */
spi_nor_sr_lock(struct spi_nor * nor,loff_t ofs,uint64_t len)1782 static int spi_nor_sr_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1783 {
1784 struct mtd_info *mtd = &nor->mtd;
1785 u64 min_prot_len;
1786 int ret, status_old, status_new;
1787 u8 mask = spi_nor_get_sr_bp_mask(nor);
1788 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1789 u8 pow, val;
1790 loff_t lock_len;
1791 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1792 bool use_top;
1793
1794 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1795 if (ret)
1796 return ret;
1797
1798 status_old = nor->bouncebuf[0];
1799
1800 /* If nothing in our range is unlocked, we don't need to do anything */
1801 if (spi_nor_is_locked_sr(nor, ofs, len, status_old))
1802 return 0;
1803
1804 /* If anything below us is unlocked, we can't use 'bottom' protection */
1805 if (!spi_nor_is_locked_sr(nor, 0, ofs, status_old))
1806 can_be_bottom = false;
1807
1808 /* If anything above us is unlocked, we can't use 'top' protection */
1809 if (!spi_nor_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
1810 status_old))
1811 can_be_top = false;
1812
1813 if (!can_be_bottom && !can_be_top)
1814 return -EINVAL;
1815
1816 /* Prefer top, if both are valid */
1817 use_top = can_be_top;
1818
1819 /* lock_len: length of region that should end up locked */
1820 if (use_top)
1821 lock_len = mtd->size - ofs;
1822 else
1823 lock_len = ofs + len;
1824
1825 if (lock_len == mtd->size) {
1826 val = mask;
1827 } else {
1828 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1829 pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1830 val = pow << SR_BP_SHIFT;
1831
1832 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1833 val = (val & ~SR_BP3) | SR_BP3_BIT6;
1834
1835 if (val & ~mask)
1836 return -EINVAL;
1837
1838 /* Don't "lock" with no region! */
1839 if (!(val & mask))
1840 return -EINVAL;
1841 }
1842
1843 status_new = (status_old & ~mask & ~tb_mask) | val;
1844
1845 /* Disallow further writes if WP pin is asserted */
1846 status_new |= SR_SRWD;
1847
1848 if (!use_top)
1849 status_new |= tb_mask;
1850
1851 /* Don't bother if they're the same */
1852 if (status_new == status_old)
1853 return 0;
1854
1855 /* Only modify protection if it will not unlock other areas */
1856 if ((status_new & mask) < (status_old & mask))
1857 return -EINVAL;
1858
1859 return spi_nor_write_sr_and_check(nor, status_new);
1860 }
1861
1862 /*
1863 * Unlock a region of the flash. See spi_nor_sr_lock() for more info
1864 *
1865 * Returns negative on errors, 0 on success.
1866 */
spi_nor_sr_unlock(struct spi_nor * nor,loff_t ofs,uint64_t len)1867 static int spi_nor_sr_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
1868 {
1869 struct mtd_info *mtd = &nor->mtd;
1870 u64 min_prot_len;
1871 int ret, status_old, status_new;
1872 u8 mask = spi_nor_get_sr_bp_mask(nor);
1873 u8 tb_mask = spi_nor_get_sr_tb_mask(nor);
1874 u8 pow, val;
1875 loff_t lock_len;
1876 bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
1877 bool use_top;
1878
1879 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1880 if (ret)
1881 return ret;
1882
1883 status_old = nor->bouncebuf[0];
1884
1885 /* If nothing in our range is locked, we don't need to do anything */
1886 if (spi_nor_is_unlocked_sr(nor, ofs, len, status_old))
1887 return 0;
1888
1889 /* If anything below us is locked, we can't use 'top' protection */
1890 if (!spi_nor_is_unlocked_sr(nor, 0, ofs, status_old))
1891 can_be_top = false;
1892
1893 /* If anything above us is locked, we can't use 'bottom' protection */
1894 if (!spi_nor_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
1895 status_old))
1896 can_be_bottom = false;
1897
1898 if (!can_be_bottom && !can_be_top)
1899 return -EINVAL;
1900
1901 /* Prefer top, if both are valid */
1902 use_top = can_be_top;
1903
1904 /* lock_len: length of region that should remain locked */
1905 if (use_top)
1906 lock_len = mtd->size - (ofs + len);
1907 else
1908 lock_len = ofs;
1909
1910 if (lock_len == 0) {
1911 val = 0; /* fully unlocked */
1912 } else {
1913 min_prot_len = spi_nor_get_min_prot_length_sr(nor);
1914 pow = ilog2(lock_len) - ilog2(min_prot_len) + 1;
1915 val = pow << SR_BP_SHIFT;
1916
1917 if (nor->flags & SNOR_F_HAS_SR_BP3_BIT6 && val & SR_BP3)
1918 val = (val & ~SR_BP3) | SR_BP3_BIT6;
1919
1920 /* Some power-of-two sizes are not supported */
1921 if (val & ~mask)
1922 return -EINVAL;
1923 }
1924
1925 status_new = (status_old & ~mask & ~tb_mask) | val;
1926
1927 /* Don't protect status register if we're fully unlocked */
1928 if (lock_len == 0)
1929 status_new &= ~SR_SRWD;
1930
1931 if (!use_top)
1932 status_new |= tb_mask;
1933
1934 /* Don't bother if they're the same */
1935 if (status_new == status_old)
1936 return 0;
1937
1938 /* Only modify protection if it will not lock other areas */
1939 if ((status_new & mask) > (status_old & mask))
1940 return -EINVAL;
1941
1942 return spi_nor_write_sr_and_check(nor, status_new);
1943 }
1944
1945 /*
1946 * Check if a region of the flash is (completely) locked. See spi_nor_sr_lock()
1947 * for more info.
1948 *
1949 * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
1950 * negative on errors.
1951 */
spi_nor_sr_is_locked(struct spi_nor * nor,loff_t ofs,uint64_t len)1952 static int spi_nor_sr_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
1953 {
1954 int ret;
1955
1956 ret = spi_nor_read_sr(nor, nor->bouncebuf);
1957 if (ret)
1958 return ret;
1959
1960 return spi_nor_is_locked_sr(nor, ofs, len, nor->bouncebuf[0]);
1961 }
1962
1963 static const struct spi_nor_locking_ops spi_nor_sr_locking_ops = {
1964 .lock = spi_nor_sr_lock,
1965 .unlock = spi_nor_sr_unlock,
1966 .is_locked = spi_nor_sr_is_locked,
1967 };
1968
spi_nor_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1969 static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1970 {
1971 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1972 int ret;
1973
1974 ret = spi_nor_lock_and_prep(nor);
1975 if (ret)
1976 return ret;
1977
1978 ret = nor->params->locking_ops->lock(nor, ofs, len);
1979
1980 spi_nor_unlock_and_unprep(nor);
1981 return ret;
1982 }
1983
spi_nor_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)1984 static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1985 {
1986 struct spi_nor *nor = mtd_to_spi_nor(mtd);
1987 int ret;
1988
1989 ret = spi_nor_lock_and_prep(nor);
1990 if (ret)
1991 return ret;
1992
1993 ret = nor->params->locking_ops->unlock(nor, ofs, len);
1994
1995 spi_nor_unlock_and_unprep(nor);
1996 return ret;
1997 }
1998
spi_nor_is_locked(struct mtd_info * mtd,loff_t ofs,uint64_t len)1999 static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2000 {
2001 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2002 int ret;
2003
2004 ret = spi_nor_lock_and_prep(nor);
2005 if (ret)
2006 return ret;
2007
2008 ret = nor->params->locking_ops->is_locked(nor, ofs, len);
2009
2010 spi_nor_unlock_and_unprep(nor);
2011 return ret;
2012 }
2013
2014 /**
2015 * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status
2016 * Register 1.
2017 * @nor: pointer to a 'struct spi_nor'
2018 *
2019 * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories.
2020 *
2021 * Return: 0 on success, -errno otherwise.
2022 */
spi_nor_sr1_bit6_quad_enable(struct spi_nor * nor)2023 int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor)
2024 {
2025 int ret;
2026
2027 ret = spi_nor_read_sr(nor, nor->bouncebuf);
2028 if (ret)
2029 return ret;
2030
2031 if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)
2032 return 0;
2033
2034 nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6;
2035
2036 return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]);
2037 }
2038
2039 /**
2040 * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status
2041 * Register 2.
2042 * @nor: pointer to a 'struct spi_nor'.
2043 *
2044 * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories.
2045 *
2046 * Return: 0 on success, -errno otherwise.
2047 */
spi_nor_sr2_bit1_quad_enable(struct spi_nor * nor)2048 int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor)
2049 {
2050 int ret;
2051
2052 if (nor->flags & SNOR_F_NO_READ_CR)
2053 return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1);
2054
2055 ret = spi_nor_read_cr(nor, nor->bouncebuf);
2056 if (ret)
2057 return ret;
2058
2059 if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)
2060 return 0;
2061
2062 nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1;
2063
2064 return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]);
2065 }
2066
2067 /**
2068 * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2.
2069 * @nor: pointer to a 'struct spi_nor'
2070 *
2071 * Set the Quad Enable (QE) bit in the Status Register 2.
2072 *
2073 * This is one of the procedures to set the QE bit described in the SFDP
2074 * (JESD216 rev B) specification but no manufacturer using this procedure has
2075 * been identified yet, hence the name of the function.
2076 *
2077 * Return: 0 on success, -errno otherwise.
2078 */
spi_nor_sr2_bit7_quad_enable(struct spi_nor * nor)2079 int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
2080 {
2081 u8 *sr2 = nor->bouncebuf;
2082 int ret;
2083 u8 sr2_written;
2084
2085 /* Check current Quad Enable bit value. */
2086 ret = spi_nor_read_sr2(nor, sr2);
2087 if (ret)
2088 return ret;
2089 if (*sr2 & SR2_QUAD_EN_BIT7)
2090 return 0;
2091
2092 /* Update the Quad Enable bit. */
2093 *sr2 |= SR2_QUAD_EN_BIT7;
2094
2095 ret = spi_nor_write_sr2(nor, sr2);
2096 if (ret)
2097 return ret;
2098
2099 sr2_written = *sr2;
2100
2101 /* Read back and check it. */
2102 ret = spi_nor_read_sr2(nor, sr2);
2103 if (ret)
2104 return ret;
2105
2106 if (*sr2 != sr2_written) {
2107 dev_dbg(nor->dev, "SR2: Read back test failed\n");
2108 return -EIO;
2109 }
2110
2111 return 0;
2112 }
2113
2114 /**
2115 * spi_nor_sr2_bit2_quad_enable() - set QE bit in Status Register 2.
2116 * @nor: pointer to a 'struct spi_nor'
2117 *
2118 * Set the Quad Enable (QE) bit in the Status Register 2.
2119 *
2120 * Return: 0 on success, -errno otherwise.
2121 */
spi_nor_sr2_bit2_quad_enable(struct spi_nor * nor)2122 int spi_nor_sr2_bit2_quad_enable(struct spi_nor *nor)
2123 {
2124 u8 *cr = nor->bouncebuf;
2125 int ret;
2126 u8 cr_written;
2127
2128 /* Check current Quad Enable bit value. */
2129 ret = spi_nor_read_cr(nor, cr);
2130 if (ret)
2131 return ret;
2132 if (*cr & SR2_QUAD_EN_BIT2)
2133 return 0;
2134
2135 /* Update the Quad Enable bit. */
2136 *cr |= SR2_QUAD_EN_BIT2;
2137
2138 ret = spi_nor_write_8bit_cr(nor, *cr);
2139 if (ret)
2140 return ret;
2141
2142 cr_written = *cr;
2143
2144 /* Read back and check it. */
2145 ret = spi_nor_read_cr(nor, cr);
2146 if (ret)
2147 return ret;
2148
2149 if (*cr != cr_written) {
2150 dev_dbg(nor->dev, "CR: Read back test failed\n");
2151 return -EIO;
2152 }
2153
2154 return 0;
2155 }
2156
2157 static const struct spi_nor_manufacturer *manufacturers[] = {
2158 &spi_nor_atmel,
2159 &spi_nor_boya,
2160 &spi_nor_catalyst,
2161 &spi_nor_dosilicon,
2162 &spi_nor_eon,
2163 &spi_nor_esmt,
2164 &spi_nor_everspin,
2165 &spi_nor_fmsh,
2166 &spi_nor_fujitsu,
2167 &spi_nor_gigadevice,
2168 &spi_nor_normem,
2169 &spi_nor_intel,
2170 &spi_nor_issi,
2171 &spi_nor_macronix,
2172 &spi_nor_micron,
2173 &spi_nor_puya,
2174 &spi_nor_st,
2175 &spi_nor_spansion,
2176 &spi_nor_sst,
2177 &spi_nor_winbond,
2178 &spi_nor_xilinx,
2179 &spi_nor_xmc,
2180 &spi_nor_xtx,
2181 };
2182
2183 static const struct flash_info *
spi_nor_search_part_by_id(const struct flash_info * parts,unsigned int nparts,const u8 * id)2184 spi_nor_search_part_by_id(const struct flash_info *parts, unsigned int nparts,
2185 const u8 *id)
2186 {
2187 unsigned int i;
2188
2189 for (i = 0; i < nparts; i++) {
2190 if (parts[i].id_len &&
2191 !memcmp(parts[i].id, id, parts[i].id_len))
2192 return &parts[i];
2193 }
2194
2195 return NULL;
2196 }
2197
spi_nor_read_id(struct spi_nor * nor)2198 static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
2199 {
2200 const struct flash_info *info;
2201 u8 *id = nor->bouncebuf;
2202 unsigned int i;
2203 int ret;
2204
2205 if (nor->spimem) {
2206 struct spi_mem_op op =
2207 SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID, 1),
2208 SPI_MEM_OP_NO_ADDR,
2209 SPI_MEM_OP_NO_DUMMY,
2210 SPI_MEM_OP_DATA_IN(SPI_NOR_MAX_ID_LEN, id, 1));
2211
2212 ret = spi_mem_exec_op(nor->spimem, &op);
2213 } else {
2214 ret = nor->controller_ops->read_reg(nor, SPINOR_OP_RDID, id,
2215 SPI_NOR_MAX_ID_LEN);
2216 }
2217 if (ret) {
2218 dev_dbg(nor->dev, "error %d reading JEDEC ID\n", ret);
2219 return ERR_PTR(ret);
2220 }
2221
2222 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
2223 info = spi_nor_search_part_by_id(manufacturers[i]->parts,
2224 manufacturers[i]->nparts,
2225 id);
2226 if (info) {
2227 nor->manufacturer = manufacturers[i];
2228 return info;
2229 }
2230 }
2231
2232 dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
2233 SPI_NOR_MAX_ID_LEN, id);
2234 return ERR_PTR(-ENODEV);
2235 }
2236
spi_nor_read(struct mtd_info * mtd,loff_t from,size_t len,size_t * retlen,u_char * buf)2237 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
2238 size_t *retlen, u_char *buf)
2239 {
2240 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2241 ssize_t ret;
2242
2243 dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
2244
2245 ret = spi_nor_lock_and_prep(nor);
2246 if (ret)
2247 return ret;
2248
2249 while (len) {
2250 loff_t addr = from;
2251
2252 addr = spi_nor_convert_addr(nor, addr);
2253
2254 ret = spi_nor_read_data(nor, addr, len, buf);
2255 if (ret == 0) {
2256 /* We shouldn't see 0-length reads */
2257 ret = -EIO;
2258 goto read_err;
2259 }
2260 if (ret < 0)
2261 goto read_err;
2262
2263 WARN_ON(ret > len);
2264 *retlen += ret;
2265 buf += ret;
2266 from += ret;
2267 len -= ret;
2268 }
2269 ret = 0;
2270
2271 read_err:
2272 spi_nor_unlock_and_unprep(nor);
2273 return ret;
2274 }
2275
2276 /*
2277 * Write an address range to the nor chip. Data must be written in
2278 * FLASH_PAGESIZE chunks. The address range may be any size provided
2279 * it is within the physical boundaries.
2280 */
spi_nor_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const u_char * buf)2281 static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
2282 size_t *retlen, const u_char *buf)
2283 {
2284 struct spi_nor *nor = mtd_to_spi_nor(mtd);
2285 size_t page_offset, page_remain, i;
2286 ssize_t ret;
2287
2288 dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
2289
2290 ret = spi_nor_lock_and_prep(nor);
2291 if (ret)
2292 return ret;
2293
2294 for (i = 0; i < len; ) {
2295 ssize_t written;
2296 loff_t addr = to + i;
2297
2298 /*
2299 * If page_size is a power of two, the offset can be quickly
2300 * calculated with an AND operation. On the other cases we
2301 * need to do a modulus operation (more expensive).
2302 * Power of two numbers have only one bit set and we can use
2303 * the instruction hweight32 to detect if we need to do a
2304 * modulus (do_div()) or not.
2305 */
2306 if (hweight32(nor->page_size) == 1) {
2307 page_offset = addr & (nor->page_size - 1);
2308 } else {
2309 uint64_t aux = addr;
2310
2311 page_offset = do_div(aux, nor->page_size);
2312 }
2313 /* the size of data remaining on the first page */
2314 page_remain = min_t(size_t,
2315 nor->page_size - page_offset, len - i);
2316
2317 addr = spi_nor_convert_addr(nor, addr);
2318
2319 ret = spi_nor_write_enable(nor);
2320 if (ret)
2321 goto write_err;
2322
2323 ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
2324 if (ret < 0)
2325 goto write_err;
2326 written = ret;
2327
2328 ret = spi_nor_wait_till_ready(nor);
2329 if (ret)
2330 goto write_err;
2331 *retlen += written;
2332 i += written;
2333 }
2334
2335 write_err:
2336 spi_nor_unlock_and_unprep(nor);
2337 return ret;
2338 }
2339
spi_nor_check(struct spi_nor * nor)2340 static int spi_nor_check(struct spi_nor *nor)
2341 {
2342 if (!nor->dev ||
2343 (!nor->spimem && !nor->controller_ops) ||
2344 (!nor->spimem && nor->controller_ops &&
2345 (!nor->controller_ops->read ||
2346 !nor->controller_ops->write ||
2347 !nor->controller_ops->read_reg ||
2348 !nor->controller_ops->write_reg))) {
2349 pr_err("spi-nor: please fill all the necessary fields!\n");
2350 return -EINVAL;
2351 }
2352
2353 if (nor->spimem && nor->controller_ops) {
2354 dev_err(nor->dev, "nor->spimem and nor->controller_ops are mutually exclusive, please set just one of them.\n");
2355 return -EINVAL;
2356 }
2357
2358 return 0;
2359 }
2360
2361 static void
spi_nor_set_read_settings(struct spi_nor_read_command * read,u8 num_mode_clocks,u8 num_wait_states,u8 opcode,enum spi_nor_protocol proto)2362 spi_nor_set_read_settings(struct spi_nor_read_command *read,
2363 u8 num_mode_clocks,
2364 u8 num_wait_states,
2365 u8 opcode,
2366 enum spi_nor_protocol proto)
2367 {
2368 read->num_mode_clocks = num_mode_clocks;
2369 read->num_wait_states = num_wait_states;
2370 read->opcode = opcode;
2371 read->proto = proto;
2372 }
2373
spi_nor_set_pp_settings(struct spi_nor_pp_command * pp,u8 opcode,enum spi_nor_protocol proto)2374 void spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, u8 opcode,
2375 enum spi_nor_protocol proto)
2376 {
2377 pp->opcode = opcode;
2378 pp->proto = proto;
2379 }
2380
spi_nor_hwcaps2cmd(u32 hwcaps,const int table[][2],size_t size)2381 static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size)
2382 {
2383 size_t i;
2384
2385 for (i = 0; i < size; i++)
2386 if (table[i][0] == (int)hwcaps)
2387 return table[i][1];
2388
2389 return -EINVAL;
2390 }
2391
spi_nor_hwcaps_read2cmd(u32 hwcaps)2392 int spi_nor_hwcaps_read2cmd(u32 hwcaps)
2393 {
2394 static const int hwcaps_read2cmd[][2] = {
2395 { SNOR_HWCAPS_READ, SNOR_CMD_READ },
2396 { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST },
2397 { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR },
2398 { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 },
2399 { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 },
2400 { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 },
2401 { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR },
2402 { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 },
2403 { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 },
2404 { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 },
2405 { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR },
2406 { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 },
2407 { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 },
2408 { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 },
2409 { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR },
2410 };
2411
2412 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd,
2413 ARRAY_SIZE(hwcaps_read2cmd));
2414 }
2415
spi_nor_hwcaps_pp2cmd(u32 hwcaps)2416 static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
2417 {
2418 static const int hwcaps_pp2cmd[][2] = {
2419 { SNOR_HWCAPS_PP, SNOR_CMD_PP },
2420 { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 },
2421 { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 },
2422 { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 },
2423 { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 },
2424 { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 },
2425 { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 },
2426 };
2427
2428 return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd,
2429 ARRAY_SIZE(hwcaps_pp2cmd));
2430 }
2431
2432 /**
2433 * spi_nor_spimem_check_op - check if the operation is supported
2434 * by controller
2435 *@nor: pointer to a 'struct spi_nor'
2436 *@op: pointer to op template to be checked
2437 *
2438 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2439 */
spi_nor_spimem_check_op(struct spi_nor * nor,struct spi_mem_op * op)2440 static int spi_nor_spimem_check_op(struct spi_nor *nor,
2441 struct spi_mem_op *op)
2442 {
2443 /*
2444 * First test with 4 address bytes. The opcode itself might
2445 * be a 3B addressing opcode but we don't care, because
2446 * SPI controller implementation should not check the opcode,
2447 * but just the sequence.
2448 */
2449 op->addr.nbytes = 4;
2450 if (!spi_mem_supports_op(nor->spimem, op)) {
2451 if (nor->mtd.size > SZ_16M)
2452 return -ENOTSUPP;
2453
2454 /* If flash size <= 16MB, 3 address bytes are sufficient */
2455 op->addr.nbytes = 3;
2456 if (!spi_mem_supports_op(nor->spimem, op))
2457 return -ENOTSUPP;
2458 }
2459
2460 return 0;
2461 }
2462
2463 /**
2464 * spi_nor_spimem_check_readop - check if the read op is supported
2465 * by controller
2466 *@nor: pointer to a 'struct spi_nor'
2467 *@read: pointer to op template to be checked
2468 *
2469 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2470 */
spi_nor_spimem_check_readop(struct spi_nor * nor,const struct spi_nor_read_command * read)2471 static int spi_nor_spimem_check_readop(struct spi_nor *nor,
2472 const struct spi_nor_read_command *read)
2473 {
2474 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(read->opcode, 1),
2475 SPI_MEM_OP_ADDR(3, 0, 1),
2476 SPI_MEM_OP_DUMMY(0, 1),
2477 SPI_MEM_OP_DATA_IN(0, NULL, 1));
2478
2479 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(read->proto);
2480 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(read->proto);
2481 op.data.buswidth = spi_nor_get_protocol_data_nbits(read->proto);
2482 op.dummy.buswidth = op.addr.buswidth;
2483 op.dummy.nbytes = (read->num_mode_clocks + read->num_wait_states) *
2484 op.dummy.buswidth / 8;
2485
2486 return spi_nor_spimem_check_op(nor, &op);
2487 }
2488
2489 /**
2490 * spi_nor_spimem_check_pp - check if the page program op is supported
2491 * by controller
2492 *@nor: pointer to a 'struct spi_nor'
2493 *@pp: pointer to op template to be checked
2494 *
2495 * Returns 0 if operation is supported, -ENOTSUPP otherwise.
2496 */
spi_nor_spimem_check_pp(struct spi_nor * nor,const struct spi_nor_pp_command * pp)2497 static int spi_nor_spimem_check_pp(struct spi_nor *nor,
2498 const struct spi_nor_pp_command *pp)
2499 {
2500 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1),
2501 SPI_MEM_OP_ADDR(3, 0, 1),
2502 SPI_MEM_OP_NO_DUMMY,
2503 SPI_MEM_OP_DATA_OUT(0, NULL, 1));
2504
2505 op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto);
2506 op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto);
2507 op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto);
2508
2509 return spi_nor_spimem_check_op(nor, &op);
2510 }
2511
2512 /**
2513 * spi_nor_spimem_adjust_hwcaps - Find optimal Read/Write protocol
2514 * based on SPI controller capabilities
2515 * @nor: pointer to a 'struct spi_nor'
2516 * @hwcaps: pointer to resulting capabilities after adjusting
2517 * according to controller and flash's capability
2518 */
2519 static void
spi_nor_spimem_adjust_hwcaps(struct spi_nor * nor,u32 * hwcaps)2520 spi_nor_spimem_adjust_hwcaps(struct spi_nor *nor, u32 *hwcaps)
2521 {
2522 struct spi_nor_flash_parameter *params = nor->params;
2523 unsigned int cap;
2524
2525 /* DTR modes are not supported yet, mask them all. */
2526 *hwcaps &= ~SNOR_HWCAPS_DTR;
2527
2528 /* X-X-X modes are not supported yet, mask them all. */
2529 *hwcaps &= ~SNOR_HWCAPS_X_X_X;
2530
2531 for (cap = 0; cap < sizeof(*hwcaps) * BITS_PER_BYTE; cap++) {
2532 int rdidx, ppidx;
2533
2534 if (!(*hwcaps & BIT(cap)))
2535 continue;
2536
2537 rdidx = spi_nor_hwcaps_read2cmd(BIT(cap));
2538 if (rdidx >= 0 &&
2539 spi_nor_spimem_check_readop(nor, ¶ms->reads[rdidx]))
2540 *hwcaps &= ~BIT(cap);
2541
2542 ppidx = spi_nor_hwcaps_pp2cmd(BIT(cap));
2543 if (ppidx < 0)
2544 continue;
2545
2546 if (spi_nor_spimem_check_pp(nor,
2547 ¶ms->page_programs[ppidx]))
2548 *hwcaps &= ~BIT(cap);
2549 }
2550 }
2551
2552 /**
2553 * spi_nor_set_erase_type() - set a SPI NOR erase type
2554 * @erase: pointer to a structure that describes a SPI NOR erase type
2555 * @size: the size of the sector/block erased by the erase type
2556 * @opcode: the SPI command op code to erase the sector/block
2557 */
spi_nor_set_erase_type(struct spi_nor_erase_type * erase,u32 size,u8 opcode)2558 void spi_nor_set_erase_type(struct spi_nor_erase_type *erase, u32 size,
2559 u8 opcode)
2560 {
2561 erase->size = size;
2562 erase->opcode = opcode;
2563 /* JEDEC JESD216B Standard imposes erase sizes to be power of 2. */
2564 erase->size_shift = ffs(erase->size) - 1;
2565 erase->size_mask = (1 << erase->size_shift) - 1;
2566 }
2567
2568 /**
2569 * spi_nor_init_uniform_erase_map() - Initialize uniform erase map
2570 * @map: the erase map of the SPI NOR
2571 * @erase_mask: bitmask encoding erase types that can erase the entire
2572 * flash memory
2573 * @flash_size: the spi nor flash memory size
2574 */
spi_nor_init_uniform_erase_map(struct spi_nor_erase_map * map,u8 erase_mask,u64 flash_size)2575 void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map,
2576 u8 erase_mask, u64 flash_size)
2577 {
2578 /* Offset 0 with erase_mask and SNOR_LAST_REGION bit set */
2579 map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) |
2580 SNOR_LAST_REGION;
2581 map->uniform_region.size = flash_size;
2582 map->regions = &map->uniform_region;
2583 map->uniform_erase_type = erase_mask;
2584 }
2585
spi_nor_post_bfpt_fixups(struct spi_nor * nor,const struct sfdp_parameter_header * bfpt_header,const struct sfdp_bfpt * bfpt,struct spi_nor_flash_parameter * params)2586 int spi_nor_post_bfpt_fixups(struct spi_nor *nor,
2587 const struct sfdp_parameter_header *bfpt_header,
2588 const struct sfdp_bfpt *bfpt,
2589 struct spi_nor_flash_parameter *params)
2590 {
2591 int ret;
2592
2593 if (nor->manufacturer && nor->manufacturer->fixups &&
2594 nor->manufacturer->fixups->post_bfpt) {
2595 ret = nor->manufacturer->fixups->post_bfpt(nor, bfpt_header,
2596 bfpt, params);
2597 if (ret)
2598 return ret;
2599 }
2600
2601 if (nor->info->fixups && nor->info->fixups->post_bfpt)
2602 return nor->info->fixups->post_bfpt(nor, bfpt_header, bfpt,
2603 params);
2604
2605 return 0;
2606 }
2607
spi_nor_select_read(struct spi_nor * nor,u32 shared_hwcaps)2608 static int spi_nor_select_read(struct spi_nor *nor,
2609 u32 shared_hwcaps)
2610 {
2611 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1;
2612 const struct spi_nor_read_command *read;
2613
2614 if (best_match < 0)
2615 return -EINVAL;
2616
2617 cmd = spi_nor_hwcaps_read2cmd(BIT(best_match));
2618 if (cmd < 0)
2619 return -EINVAL;
2620
2621 read = &nor->params->reads[cmd];
2622 nor->read_opcode = read->opcode;
2623 nor->read_proto = read->proto;
2624
2625 /*
2626 * In the SPI NOR framework, we don't need to make the difference
2627 * between mode clock cycles and wait state clock cycles.
2628 * Indeed, the value of the mode clock cycles is used by a QSPI
2629 * flash memory to know whether it should enter or leave its 0-4-4
2630 * (Continuous Read / XIP) mode.
2631 * eXecution In Place is out of the scope of the mtd sub-system.
2632 * Hence we choose to merge both mode and wait state clock cycles
2633 * into the so called dummy clock cycles.
2634 */
2635 nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
2636 return 0;
2637 }
2638
spi_nor_select_pp(struct spi_nor * nor,u32 shared_hwcaps)2639 static int spi_nor_select_pp(struct spi_nor *nor,
2640 u32 shared_hwcaps)
2641 {
2642 int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1;
2643 const struct spi_nor_pp_command *pp;
2644
2645 if (best_match < 0)
2646 return -EINVAL;
2647
2648 cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match));
2649 if (cmd < 0)
2650 return -EINVAL;
2651
2652 pp = &nor->params->page_programs[cmd];
2653 nor->program_opcode = pp->opcode;
2654 nor->write_proto = pp->proto;
2655 return 0;
2656 }
2657
2658 /**
2659 * spi_nor_select_uniform_erase() - select optimum uniform erase type
2660 * @map: the erase map of the SPI NOR
2661 * @wanted_size: the erase type size to search for. Contains the value of
2662 * info->sector_size or of the "small sector" size in case
2663 * CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined.
2664 *
2665 * Once the optimum uniform sector erase command is found, disable all the
2666 * other.
2667 *
2668 * Return: pointer to erase type on success, NULL otherwise.
2669 */
2670 static const struct spi_nor_erase_type *
spi_nor_select_uniform_erase(struct spi_nor_erase_map * map,const u32 wanted_size)2671 spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
2672 const u32 wanted_size)
2673 {
2674 const struct spi_nor_erase_type *tested_erase, *erase = NULL;
2675 int i;
2676 u8 uniform_erase_type = map->uniform_erase_type;
2677
2678 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2679 if (!(uniform_erase_type & BIT(i)))
2680 continue;
2681
2682 tested_erase = &map->erase_type[i];
2683
2684 /*
2685 * If the current erase size is the one, stop here:
2686 * we have found the right uniform Sector Erase command.
2687 */
2688 if (tested_erase->size == wanted_size) {
2689 erase = tested_erase;
2690 break;
2691 }
2692
2693 /*
2694 * Otherwise, the current erase size is still a valid canditate.
2695 * Select the biggest valid candidate.
2696 */
2697 if (!erase && tested_erase->size)
2698 erase = tested_erase;
2699 /* keep iterating to find the wanted_size */
2700 }
2701
2702 if (!erase)
2703 return NULL;
2704
2705 /* Disable all other Sector Erase commands. */
2706 map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK;
2707 map->uniform_erase_type |= BIT(erase - map->erase_type);
2708 return erase;
2709 }
2710
spi_nor_select_erase(struct spi_nor * nor)2711 static int spi_nor_select_erase(struct spi_nor *nor)
2712 {
2713 struct spi_nor_erase_map *map = &nor->params->erase_map;
2714 const struct spi_nor_erase_type *erase = NULL;
2715 struct mtd_info *mtd = &nor->mtd;
2716 u32 wanted_size = nor->info->sector_size;
2717 int i;
2718
2719 /*
2720 * The previous implementation handling Sector Erase commands assumed
2721 * that the SPI flash memory has an uniform layout then used only one
2722 * of the supported erase sizes for all Sector Erase commands.
2723 * So to be backward compatible, the new implementation also tries to
2724 * manage the SPI flash memory as uniform with a single erase sector
2725 * size, when possible.
2726 */
2727 #ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
2728 /* prefer "small sector" erase if possible */
2729 wanted_size = 4096u;
2730 #endif
2731
2732 if (spi_nor_has_uniform_erase(nor)) {
2733 erase = spi_nor_select_uniform_erase(map, wanted_size);
2734 if (!erase)
2735 return -EINVAL;
2736 nor->erase_opcode = erase->opcode;
2737 mtd->erasesize = erase->size;
2738 return 0;
2739 }
2740
2741 /*
2742 * For non-uniform SPI flash memory, set mtd->erasesize to the
2743 * maximum erase sector size. No need to set nor->erase_opcode.
2744 */
2745 for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
2746 if (map->erase_type[i].size) {
2747 erase = &map->erase_type[i];
2748 break;
2749 }
2750 }
2751
2752 if (!erase)
2753 return -EINVAL;
2754
2755 mtd->erasesize = erase->size;
2756 return 0;
2757 }
2758
spi_nor_default_setup(struct spi_nor * nor,const struct spi_nor_hwcaps * hwcaps)2759 static int spi_nor_default_setup(struct spi_nor *nor,
2760 const struct spi_nor_hwcaps *hwcaps)
2761 {
2762 struct spi_nor_flash_parameter *params = nor->params;
2763 u32 ignored_mask, shared_mask;
2764 int err;
2765
2766 /*
2767 * Keep only the hardware capabilities supported by both the SPI
2768 * controller and the SPI flash memory.
2769 */
2770 shared_mask = hwcaps->mask & params->hwcaps.mask;
2771
2772 if (nor->spimem) {
2773 /*
2774 * When called from spi_nor_probe(), all caps are set and we
2775 * need to discard some of them based on what the SPI
2776 * controller actually supports (using spi_mem_supports_op()).
2777 */
2778 spi_nor_spimem_adjust_hwcaps(nor, &shared_mask);
2779 } else {
2780 /*
2781 * SPI n-n-n protocols are not supported when the SPI
2782 * controller directly implements the spi_nor interface.
2783 * Yet another reason to switch to spi-mem.
2784 */
2785 ignored_mask = SNOR_HWCAPS_X_X_X;
2786 if (shared_mask & ignored_mask) {
2787 dev_dbg(nor->dev,
2788 "SPI n-n-n protocols are not supported.\n");
2789 shared_mask &= ~ignored_mask;
2790 }
2791 }
2792
2793 /* Select the (Fast) Read command. */
2794 err = spi_nor_select_read(nor, shared_mask);
2795 if (err) {
2796 dev_dbg(nor->dev,
2797 "can't select read settings supported by both the SPI controller and memory.\n");
2798 return err;
2799 }
2800
2801 /* Select the Page Program command. */
2802 err = spi_nor_select_pp(nor, shared_mask);
2803 if (err) {
2804 dev_dbg(nor->dev,
2805 "can't select write settings supported by both the SPI controller and memory.\n");
2806 return err;
2807 }
2808
2809 /* Select the Sector Erase command. */
2810 err = spi_nor_select_erase(nor);
2811 if (err) {
2812 dev_dbg(nor->dev,
2813 "can't select erase settings supported by both the SPI controller and memory.\n");
2814 return err;
2815 }
2816
2817 return 0;
2818 }
2819
spi_nor_setup(struct spi_nor * nor,const struct spi_nor_hwcaps * hwcaps)2820 static int spi_nor_setup(struct spi_nor *nor,
2821 const struct spi_nor_hwcaps *hwcaps)
2822 {
2823 if (!nor->params->setup)
2824 return 0;
2825
2826 return nor->params->setup(nor, hwcaps);
2827 }
2828
2829 /**
2830 * spi_nor_manufacturer_init_params() - Initialize the flash's parameters and
2831 * settings based on MFR register and ->default_init() hook.
2832 * @nor: pointer to a 'struct spi_nor'.
2833 */
spi_nor_manufacturer_init_params(struct spi_nor * nor)2834 static void spi_nor_manufacturer_init_params(struct spi_nor *nor)
2835 {
2836 if (nor->manufacturer && nor->manufacturer->fixups &&
2837 nor->manufacturer->fixups->default_init)
2838 nor->manufacturer->fixups->default_init(nor);
2839
2840 if (nor->info->fixups && nor->info->fixups->default_init)
2841 nor->info->fixups->default_init(nor);
2842 }
2843
2844 /**
2845 * spi_nor_sfdp_init_params() - Initialize the flash's parameters and settings
2846 * based on JESD216 SFDP standard.
2847 * @nor: pointer to a 'struct spi_nor'.
2848 *
2849 * The method has a roll-back mechanism: in case the SFDP parsing fails, the
2850 * legacy flash parameters and settings will be restored.
2851 */
spi_nor_sfdp_init_params(struct spi_nor * nor)2852 static void spi_nor_sfdp_init_params(struct spi_nor *nor)
2853 {
2854 struct spi_nor_flash_parameter sfdp_params;
2855
2856 memcpy(&sfdp_params, nor->params, sizeof(sfdp_params));
2857
2858 if (spi_nor_parse_sfdp(nor, nor->params)) {
2859 memcpy(nor->params, &sfdp_params, sizeof(*nor->params));
2860 nor->addr_width = 0;
2861 nor->flags &= ~SNOR_F_4B_OPCODES;
2862 }
2863 }
2864
2865 /**
2866 * spi_nor_info_init_params() - Initialize the flash's parameters and settings
2867 * based on nor->info data.
2868 * @nor: pointer to a 'struct spi_nor'.
2869 */
spi_nor_info_init_params(struct spi_nor * nor)2870 static void spi_nor_info_init_params(struct spi_nor *nor)
2871 {
2872 struct spi_nor_flash_parameter *params = nor->params;
2873 struct spi_nor_erase_map *map = ¶ms->erase_map;
2874 const struct flash_info *info = nor->info;
2875 struct device_node *np = spi_nor_get_flash_node(nor);
2876 u8 i, erase_mask;
2877
2878 /* Initialize legacy flash parameters and settings. */
2879 params->quad_enable = spi_nor_sr2_bit1_quad_enable;
2880 params->set_4byte_addr_mode = spansion_set_4byte_addr_mode;
2881 params->setup = spi_nor_default_setup;
2882 /* Default to 16-bit Write Status (01h) Command */
2883 nor->flags |= SNOR_F_HAS_16BIT_SR;
2884
2885 /* Set SPI NOR sizes. */
2886 params->size = (u64)info->sector_size * info->n_sectors;
2887 params->page_size = info->page_size;
2888
2889 if (!(info->flags & SPI_NOR_NO_FR)) {
2890 /* Default to Fast Read for DT and non-DT platform devices. */
2891 params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
2892
2893 /* Mask out Fast Read if not requested at DT instantiation. */
2894 if (np && !of_property_read_bool(np, "m25p,fast-read"))
2895 params->hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
2896 }
2897
2898 /* (Fast) Read settings. */
2899 params->hwcaps.mask |= SNOR_HWCAPS_READ;
2900 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ],
2901 0, 0, SPINOR_OP_READ,
2902 SNOR_PROTO_1_1_1);
2903
2904 if (params->hwcaps.mask & SNOR_HWCAPS_READ_FAST)
2905 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST],
2906 0, 8, SPINOR_OP_READ_FAST,
2907 SNOR_PROTO_1_1_1);
2908
2909 if (info->flags & SPI_NOR_DUAL_READ) {
2910 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
2911 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2],
2912 0, 8, SPINOR_OP_READ_1_1_2,
2913 SNOR_PROTO_1_1_2);
2914 }
2915
2916 if (info->flags & SPI_NOR_QUAD_READ) {
2917 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
2918 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4],
2919 0, 8, SPINOR_OP_READ_1_1_4,
2920 SNOR_PROTO_1_1_4);
2921 }
2922
2923 if (info->flags & SPI_NOR_OCTAL_READ) {
2924 params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_8;
2925 spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_8],
2926 0, 8, SPINOR_OP_READ_1_1_8,
2927 SNOR_PROTO_1_1_8);
2928 }
2929
2930 /* Page Program settings. */
2931 params->hwcaps.mask |= SNOR_HWCAPS_PP;
2932 spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP],
2933 SPINOR_OP_PP, SNOR_PROTO_1_1_1);
2934
2935 /*
2936 * Sector Erase settings. Sort Erase Types in ascending order, with the
2937 * smallest erase size starting at BIT(0).
2938 */
2939 erase_mask = 0;
2940 i = 0;
2941 if (info->flags & SECT_4K_PMC) {
2942 erase_mask |= BIT(i);
2943 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2944 SPINOR_OP_BE_4K_PMC);
2945 i++;
2946 } else if (info->flags & SECT_4K) {
2947 erase_mask |= BIT(i);
2948 spi_nor_set_erase_type(&map->erase_type[i], 4096u,
2949 SPINOR_OP_BE_4K);
2950 i++;
2951 }
2952 erase_mask |= BIT(i);
2953 spi_nor_set_erase_type(&map->erase_type[i], info->sector_size,
2954 SPINOR_OP_SE);
2955 spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
2956 }
2957
2958 /**
2959 * spi_nor_post_sfdp_fixups() - Updates the flash's parameters and settings
2960 * after SFDP has been parsed (is also called for SPI NORs that do not
2961 * support RDSFDP).
2962 * @nor: pointer to a 'struct spi_nor'
2963 *
2964 * Typically used to tweak various parameters that could not be extracted by
2965 * other means (i.e. when information provided by the SFDP/flash_info tables
2966 * are incomplete or wrong).
2967 */
spi_nor_post_sfdp_fixups(struct spi_nor * nor)2968 static void spi_nor_post_sfdp_fixups(struct spi_nor *nor)
2969 {
2970 if (nor->manufacturer && nor->manufacturer->fixups &&
2971 nor->manufacturer->fixups->post_sfdp)
2972 nor->manufacturer->fixups->post_sfdp(nor);
2973
2974 if (nor->info->fixups && nor->info->fixups->post_sfdp)
2975 nor->info->fixups->post_sfdp(nor);
2976 }
2977
2978 /**
2979 * spi_nor_late_init_params() - Late initialization of default flash parameters.
2980 * @nor: pointer to a 'struct spi_nor'
2981 *
2982 * Used to set default flash parameters and settings when the ->default_init()
2983 * hook or the SFDP parser let voids.
2984 */
spi_nor_late_init_params(struct spi_nor * nor)2985 static void spi_nor_late_init_params(struct spi_nor *nor)
2986 {
2987 /*
2988 * NOR protection support. When locking_ops are not provided, we pick
2989 * the default ones.
2990 */
2991 if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
2992 nor->params->locking_ops = &spi_nor_sr_locking_ops;
2993 }
2994
2995 /**
2996 * spi_nor_init_params() - Initialize the flash's parameters and settings.
2997 * @nor: pointer to a 'struct spi_nor'.
2998 *
2999 * The flash parameters and settings are initialized based on a sequence of
3000 * calls that are ordered by priority:
3001 *
3002 * 1/ Default flash parameters initialization. The initializations are done
3003 * based on nor->info data:
3004 * spi_nor_info_init_params()
3005 *
3006 * which can be overwritten by:
3007 * 2/ Manufacturer flash parameters initialization. The initializations are
3008 * done based on MFR register, or when the decisions can not be done solely
3009 * based on MFR, by using specific flash_info tweeks, ->default_init():
3010 * spi_nor_manufacturer_init_params()
3011 *
3012 * which can be overwritten by:
3013 * 3/ SFDP flash parameters initialization. JESD216 SFDP is a standard and
3014 * should be more accurate that the above.
3015 * spi_nor_sfdp_init_params()
3016 *
3017 * Please note that there is a ->post_bfpt() fixup hook that can overwrite
3018 * the flash parameters and settings immediately after parsing the Basic
3019 * Flash Parameter Table.
3020 *
3021 * which can be overwritten by:
3022 * 4/ Post SFDP flash parameters initialization. Used to tweak various
3023 * parameters that could not be extracted by other means (i.e. when
3024 * information provided by the SFDP/flash_info tables are incomplete or
3025 * wrong).
3026 * spi_nor_post_sfdp_fixups()
3027 *
3028 * 5/ Late default flash parameters initialization, used when the
3029 * ->default_init() hook or the SFDP parser do not set specific params.
3030 * spi_nor_late_init_params()
3031 */
spi_nor_init_params(struct spi_nor * nor)3032 static int spi_nor_init_params(struct spi_nor *nor)
3033 {
3034 nor->params = devm_kzalloc(nor->dev, sizeof(*nor->params), GFP_KERNEL);
3035 if (!nor->params)
3036 return -ENOMEM;
3037
3038 spi_nor_info_init_params(nor);
3039
3040 spi_nor_manufacturer_init_params(nor);
3041
3042 if ((nor->info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
3043 !(nor->info->flags & SPI_NOR_SKIP_SFDP))
3044 spi_nor_sfdp_init_params(nor);
3045
3046 spi_nor_post_sfdp_fixups(nor);
3047
3048 spi_nor_late_init_params(nor);
3049
3050 return 0;
3051 }
3052
3053 /**
3054 * spi_nor_quad_enable() - enable Quad I/O if needed.
3055 * @nor: pointer to a 'struct spi_nor'
3056 *
3057 * Return: 0 on success, -errno otherwise.
3058 */
spi_nor_quad_enable(struct spi_nor * nor)3059 static int spi_nor_quad_enable(struct spi_nor *nor)
3060 {
3061 if (!nor->params->quad_enable)
3062 return 0;
3063
3064 if (!(spi_nor_get_protocol_width(nor->read_proto) == 4 ||
3065 spi_nor_get_protocol_width(nor->write_proto) == 4))
3066 return 0;
3067
3068 return nor->params->quad_enable(nor);
3069 }
3070
3071 /**
3072 * spi_nor_try_unlock_all() - Tries to unlock the entire flash memory array.
3073 * @nor: pointer to a 'struct spi_nor'.
3074 *
3075 * Some SPI NOR flashes are write protected by default after a power-on reset
3076 * cycle, in order to avoid inadvertent writes during power-up. Backward
3077 * compatibility imposes to unlock the entire flash memory array at power-up
3078 * by default.
3079 *
3080 * Unprotecting the entire flash array will fail for boards which are hardware
3081 * write-protected. Thus any errors are ignored.
3082 */
spi_nor_try_unlock_all(struct spi_nor * nor)3083 static void spi_nor_try_unlock_all(struct spi_nor *nor)
3084 {
3085 int ret;
3086
3087 if (!(nor->flags & SNOR_F_HAS_LOCK))
3088 return;
3089
3090 ret = spi_nor_unlock(&nor->mtd, 0, nor->params->size);
3091 if (ret)
3092 dev_dbg(nor->dev, "Failed to unlock the entire flash memory array\n");
3093 }
3094
spi_nor_init(struct spi_nor * nor)3095 static int spi_nor_init(struct spi_nor *nor)
3096 {
3097 int err;
3098
3099 err = spi_nor_quad_enable(nor);
3100 if (err) {
3101 dev_dbg(nor->dev, "quad mode not supported\n");
3102 return err;
3103 }
3104
3105 spi_nor_try_unlock_all(nor);
3106
3107 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES)) {
3108 /*
3109 * If the RESET# pin isn't hooked up properly, or the system
3110 * otherwise doesn't perform a reset command in the boot
3111 * sequence, it's impossible to 100% protect against unexpected
3112 * reboots (e.g., crashes). Warn the user (or hopefully, system
3113 * designer) that this is bad.
3114 */
3115 WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET,
3116 "enabling reset hack; may not recover from unexpected reboots\n");
3117 nor->params->set_4byte_addr_mode(nor, true);
3118 }
3119
3120 return 0;
3121 }
3122
3123 /* mtd resume handler */
spi_nor_resume(struct mtd_info * mtd)3124 static void spi_nor_resume(struct mtd_info *mtd)
3125 {
3126 struct spi_nor *nor = mtd_to_spi_nor(mtd);
3127 struct device *dev = nor->dev;
3128 int ret;
3129
3130 /* re-initialize the nor chip */
3131 ret = spi_nor_init(nor);
3132 if (ret)
3133 dev_err(dev, "resume() failed\n");
3134 }
3135
spi_nor_get_device(struct mtd_info * mtd)3136 static int spi_nor_get_device(struct mtd_info *mtd)
3137 {
3138 struct mtd_info *master = mtd_get_master(mtd);
3139 struct spi_nor *nor = mtd_to_spi_nor(master);
3140 struct device *dev;
3141
3142 if (nor->spimem)
3143 dev = nor->spimem->spi->controller->dev.parent;
3144 else
3145 dev = nor->dev;
3146
3147 if (!try_module_get(dev->driver->owner))
3148 return -ENODEV;
3149
3150 return 0;
3151 }
3152
spi_nor_put_device(struct mtd_info * mtd)3153 static void spi_nor_put_device(struct mtd_info *mtd)
3154 {
3155 struct mtd_info *master = mtd_get_master(mtd);
3156 struct spi_nor *nor = mtd_to_spi_nor(master);
3157 struct device *dev;
3158
3159 if (nor->spimem)
3160 dev = nor->spimem->spi->controller->dev.parent;
3161 else
3162 dev = nor->dev;
3163
3164 module_put(dev->driver->owner);
3165 }
3166
spi_nor_restore(struct spi_nor * nor)3167 void spi_nor_restore(struct spi_nor *nor)
3168 {
3169 /* restore the addressing mode */
3170 if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) &&
3171 nor->flags & SNOR_F_BROKEN_RESET)
3172 nor->params->set_4byte_addr_mode(nor, false);
3173 }
3174 EXPORT_SYMBOL_GPL(spi_nor_restore);
3175
spi_nor_match_id(struct spi_nor * nor,const char * name)3176 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
3177 const char *name)
3178 {
3179 unsigned int i, j;
3180
3181 for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
3182 for (j = 0; j < manufacturers[i]->nparts; j++) {
3183 if (!strcmp(name, manufacturers[i]->parts[j].name)) {
3184 nor->manufacturer = manufacturers[i];
3185 return &manufacturers[i]->parts[j];
3186 }
3187 }
3188 }
3189
3190 return NULL;
3191 }
3192
spi_nor_set_addr_width(struct spi_nor * nor)3193 static int spi_nor_set_addr_width(struct spi_nor *nor)
3194 {
3195 if (nor->addr_width) {
3196 /* already configured from SFDP */
3197 } else if (nor->info->addr_width) {
3198 nor->addr_width = nor->info->addr_width;
3199 } else {
3200 nor->addr_width = 3;
3201 }
3202
3203 if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) {
3204 /* enable 4-byte addressing if the device exceeds 16MiB */
3205 nor->addr_width = 4;
3206 }
3207
3208 if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
3209 dev_dbg(nor->dev, "address width is too large: %u\n",
3210 nor->addr_width);
3211 return -EINVAL;
3212 }
3213
3214 /* Set 4byte opcodes when possible. */
3215 if (nor->addr_width == 4 && nor->flags & SNOR_F_4B_OPCODES &&
3216 !(nor->flags & SNOR_F_HAS_4BAIT))
3217 spi_nor_set_4byte_opcodes(nor);
3218
3219 return 0;
3220 }
3221
spi_nor_debugfs_init(struct spi_nor * nor,const struct flash_info * info)3222 static void spi_nor_debugfs_init(struct spi_nor *nor,
3223 const struct flash_info *info)
3224 {
3225 struct mtd_info *mtd = &nor->mtd;
3226
3227 mtd->dbg.partname = info->name;
3228 mtd->dbg.partid = devm_kasprintf(nor->dev, GFP_KERNEL, "spi-nor:%*phN",
3229 info->id_len, info->id);
3230 }
3231
spi_nor_get_flash_info(struct spi_nor * nor,const char * name)3232 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
3233 const char *name)
3234 {
3235 const struct flash_info *info = NULL;
3236
3237 if (name)
3238 info = spi_nor_match_id(nor, name);
3239 /* Try to auto-detect if chip name wasn't specified or not found */
3240 if (!info)
3241 info = spi_nor_read_id(nor);
3242 if (IS_ERR_OR_NULL(info))
3243 return ERR_PTR(-ENOENT);
3244
3245 /*
3246 * If caller has specified name of flash model that can normally be
3247 * detected using JEDEC, let's verify it.
3248 */
3249 if (name && info->id_len) {
3250 const struct flash_info *jinfo;
3251
3252 jinfo = spi_nor_read_id(nor);
3253 if (IS_ERR(jinfo)) {
3254 return jinfo;
3255 } else if (jinfo != info) {
3256 /*
3257 * JEDEC knows better, so overwrite platform ID. We
3258 * can't trust partitions any longer, but we'll let
3259 * mtd apply them anyway, since some partitions may be
3260 * marked read-only, and we don't want to lose that
3261 * information, even if it's not 100% accurate.
3262 */
3263 dev_warn(nor->dev, "found %s, expected %s\n",
3264 jinfo->name, info->name);
3265 info = jinfo;
3266 }
3267 }
3268
3269 return info;
3270 }
3271
spi_nor_scan(struct spi_nor * nor,const char * name,const struct spi_nor_hwcaps * hwcaps)3272 int spi_nor_scan(struct spi_nor *nor, const char *name,
3273 const struct spi_nor_hwcaps *hwcaps)
3274 {
3275 const struct flash_info *info;
3276 struct device *dev = nor->dev;
3277 struct mtd_info *mtd = &nor->mtd;
3278 struct device_node *np = spi_nor_get_flash_node(nor);
3279 int ret;
3280 int i;
3281
3282 ret = spi_nor_check(nor);
3283 if (ret)
3284 return ret;
3285
3286 /* Reset SPI protocol for all commands. */
3287 nor->reg_proto = SNOR_PROTO_1_1_1;
3288 nor->read_proto = SNOR_PROTO_1_1_1;
3289 nor->write_proto = SNOR_PROTO_1_1_1;
3290
3291 /*
3292 * We need the bounce buffer early to read/write registers when going
3293 * through the spi-mem layer (buffers have to be DMA-able).
3294 * For spi-mem drivers, we'll reallocate a new buffer if
3295 * nor->page_size turns out to be greater than PAGE_SIZE (which
3296 * shouldn't happen before long since NOR pages are usually less
3297 * than 1KB) after spi_nor_scan() returns.
3298 */
3299 nor->bouncebuf_size = PAGE_SIZE;
3300 nor->bouncebuf = devm_kmalloc(dev, nor->bouncebuf_size,
3301 GFP_KERNEL);
3302 if (!nor->bouncebuf)
3303 return -ENOMEM;
3304
3305 info = spi_nor_get_flash_info(nor, name);
3306 if (IS_ERR(info))
3307 return PTR_ERR(info);
3308
3309 nor->info = info;
3310
3311 spi_nor_debugfs_init(nor, info);
3312
3313 mutex_init(&nor->lock);
3314
3315 /*
3316 * Make sure the XSR_RDY flag is set before calling
3317 * spi_nor_wait_till_ready(). Xilinx S3AN share MFR
3318 * with Atmel SPI NOR.
3319 */
3320 if (info->flags & SPI_NOR_XSR_RDY)
3321 nor->flags |= SNOR_F_READY_XSR_RDY;
3322
3323 if (info->flags & SPI_NOR_HAS_LOCK)
3324 nor->flags |= SNOR_F_HAS_LOCK;
3325
3326 mtd->_write = spi_nor_write;
3327
3328 /* Init flash parameters based on flash_info struct and SFDP */
3329 ret = spi_nor_init_params(nor);
3330 if (ret)
3331 return ret;
3332
3333 if (!mtd->name)
3334 mtd->name = dev_name(dev);
3335
3336 if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_SFC))
3337 mtd->name = "sfc_nor";
3338
3339 mtd->priv = nor;
3340 mtd->type = MTD_NORFLASH;
3341 mtd->writesize = 1;
3342 mtd->flags = MTD_CAP_NORFLASH;
3343 mtd->size = nor->params->size;
3344 mtd->_erase = spi_nor_erase;
3345 mtd->_read = spi_nor_read;
3346 mtd->_resume = spi_nor_resume;
3347 mtd->_get_device = spi_nor_get_device;
3348 mtd->_put_device = spi_nor_put_device;
3349
3350 if (nor->params->locking_ops) {
3351 mtd->_lock = spi_nor_lock;
3352 mtd->_unlock = spi_nor_unlock;
3353 mtd->_is_locked = spi_nor_is_locked;
3354 }
3355
3356 if (info->flags & USE_FSR)
3357 nor->flags |= SNOR_F_USE_FSR;
3358 if (info->flags & SPI_NOR_HAS_TB) {
3359 nor->flags |= SNOR_F_HAS_SR_TB;
3360 if (info->flags & SPI_NOR_TB_SR_BIT6)
3361 nor->flags |= SNOR_F_HAS_SR_TB_BIT6;
3362 }
3363
3364 if (info->flags & NO_CHIP_ERASE)
3365 nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
3366 if (info->flags & USE_CLSR)
3367 nor->flags |= SNOR_F_USE_CLSR;
3368
3369 if (info->flags & SPI_NOR_4BIT_BP) {
3370 nor->flags |= SNOR_F_HAS_4BIT_BP;
3371 if (info->flags & SPI_NOR_BP3_SR_BIT6)
3372 nor->flags |= SNOR_F_HAS_SR_BP3_BIT6;
3373 }
3374
3375 if (info->flags & SPI_NOR_NO_ERASE)
3376 mtd->flags |= MTD_NO_ERASE;
3377
3378 mtd->dev.parent = dev;
3379 nor->page_size = nor->params->page_size;
3380 mtd->writebufsize = nor->page_size;
3381
3382 if (of_property_read_bool(np, "broken-flash-reset"))
3383 nor->flags |= SNOR_F_BROKEN_RESET;
3384
3385 /*
3386 * Configure the SPI memory:
3387 * - select op codes for (Fast) Read, Page Program and Sector Erase.
3388 * - set the number of dummy cycles (mode cycles + wait states).
3389 * - set the SPI protocols for register and memory accesses.
3390 */
3391 ret = spi_nor_setup(nor, hwcaps);
3392 if (ret)
3393 return ret;
3394
3395 if (info->flags & SPI_NOR_4B_OPCODES)
3396 nor->flags |= SNOR_F_4B_OPCODES;
3397
3398 ret = spi_nor_set_addr_width(nor);
3399 if (ret)
3400 return ret;
3401
3402 /* Send all the required SPI flash commands to initialize device */
3403 ret = spi_nor_init(nor);
3404 if (ret)
3405 return ret;
3406
3407 dev_info(dev, "%s (%lld Kbytes) read_data x%d\n", info->name,
3408 (long long)mtd->size >> 10, spi_nor_get_protocol_data_nbits(nor->read_proto));
3409
3410 dev_dbg(dev,
3411 "mtd .name = %s, .size = 0x%llx (%lldMiB), "
3412 ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
3413 mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
3414 mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
3415
3416 if (mtd->numeraseregions)
3417 for (i = 0; i < mtd->numeraseregions; i++)
3418 dev_dbg(dev,
3419 "mtd.eraseregions[%d] = { .offset = 0x%llx, "
3420 ".erasesize = 0x%.8x (%uKiB), "
3421 ".numblocks = %d }\n",
3422 i, (long long)mtd->eraseregions[i].offset,
3423 mtd->eraseregions[i].erasesize,
3424 mtd->eraseregions[i].erasesize / 1024,
3425 mtd->eraseregions[i].numblocks);
3426 return 0;
3427 }
3428 EXPORT_SYMBOL_GPL(spi_nor_scan);
3429
spi_nor_create_read_dirmap(struct spi_nor * nor)3430 static int spi_nor_create_read_dirmap(struct spi_nor *nor)
3431 {
3432 struct spi_mem_dirmap_info info = {
3433 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
3434 SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3435 SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
3436 SPI_MEM_OP_DATA_IN(0, NULL, 1)),
3437 .offset = 0,
3438 .length = nor->mtd.size,
3439 };
3440 struct spi_mem_op *op = &info.op_tmpl;
3441
3442 /* get transfer protocols. */
3443 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
3444 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
3445 op->dummy.buswidth = op->addr.buswidth;
3446 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
3447
3448 /* convert the dummy cycles to the number of bytes */
3449 op->dummy.nbytes = (nor->read_dummy * op->dummy.buswidth) / 8;
3450
3451 nor->dirmap.rdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3452 &info);
3453 return PTR_ERR_OR_ZERO(nor->dirmap.rdesc);
3454 }
3455
spi_nor_create_write_dirmap(struct spi_nor * nor)3456 static int spi_nor_create_write_dirmap(struct spi_nor *nor)
3457 {
3458 struct spi_mem_dirmap_info info = {
3459 .op_tmpl = SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
3460 SPI_MEM_OP_ADDR(nor->addr_width, 0, 1),
3461 SPI_MEM_OP_NO_DUMMY,
3462 SPI_MEM_OP_DATA_OUT(0, NULL, 1)),
3463 .offset = 0,
3464 .length = nor->mtd.size,
3465 };
3466 struct spi_mem_op *op = &info.op_tmpl;
3467
3468 /* get transfer protocols. */
3469 op->cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
3470 op->addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
3471 op->dummy.buswidth = op->addr.buswidth;
3472 op->data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
3473
3474 if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
3475 op->addr.nbytes = 0;
3476
3477 nor->dirmap.wdesc = devm_spi_mem_dirmap_create(nor->dev, nor->spimem,
3478 &info);
3479 return PTR_ERR_OR_ZERO(nor->dirmap.wdesc);
3480 }
3481
spi_nor_misc_open(struct inode * inode,struct file * file)3482 static int spi_nor_misc_open(struct inode *inode, struct file *file)
3483 {
3484 struct miscdevice *miscdev = file->private_data;
3485 struct spi_nor_misc_dev *nor_dev;
3486
3487 nor_dev = container_of(miscdev, struct spi_nor_misc_dev, dev);
3488 file->private_data = nor_dev->nor;
3489
3490 return 0;
3491 }
3492
spi_nor_misc_ioctl(struct file * file,unsigned int cmd,unsigned long arg)3493 static long spi_nor_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3494 {
3495 struct spi_nor *nor = (struct spi_nor *)file->private_data;
3496 struct nor_flash_user_info info;
3497 void __user *uarg = (void __user *)arg;
3498 int i, ret;
3499
3500 switch (cmd) {
3501 case NOR_GET_FLASH_INFO:
3502 for (i = 0; i < SPI_NOR_MAX_ID_LEN; i++)
3503 info.id[i] = nor->info->id[i];
3504
3505 ret = copy_to_user(uarg, &info, sizeof(info));
3506 if (ret) {
3507 dev_err(nor->dev, "failed to get elbi data\n");
3508 return -EFAULT;
3509 }
3510 break;
3511 default:
3512 break;
3513 }
3514 return 0;
3515 }
3516
3517 static const struct file_operations spi_nor_misc_ops = {
3518 .owner = THIS_MODULE,
3519 .open = spi_nor_misc_open,
3520 .unlocked_ioctl = spi_nor_misc_ioctl,
3521 };
3522
spi_nor_add_misc(struct spi_nor * nor)3523 static int spi_nor_add_misc(struct spi_nor *nor)
3524 {
3525 int ret;
3526 struct spi_nor_misc_dev *nor_dev;
3527 char name[24];
3528
3529 nor_dev = devm_kzalloc(nor->dev, sizeof(struct spi_nor_misc_dev),
3530 GFP_KERNEL);
3531 if (!nor_dev)
3532 return -ENOMEM;
3533
3534 nor_dev->dev.minor = MISC_DYNAMIC_MINOR;
3535 snprintf(name, sizeof(name), "%s%s", "nor_misc_", dev_name(nor->dev));
3536 nor_dev->dev.name = devm_kstrdup(nor->dev, name, GFP_KERNEL);
3537 nor_dev->dev.fops = &spi_nor_misc_ops;
3538 nor_dev->dev.parent = nor->dev;
3539
3540 ret = misc_register(&nor_dev->dev);
3541 if (ret) {
3542 dev_err(nor->dev, "failed to register misc device.\n");
3543 return ret;
3544 }
3545
3546 nor_dev->nor = nor;
3547 nor->misc_dev = &nor_dev->dev;
3548
3549 dev_info(nor->dev, "register misc device\n");
3550
3551 return 0;
3552 }
3553
spi_nor_probe(struct spi_mem * spimem)3554 static int spi_nor_probe(struct spi_mem *spimem)
3555 {
3556 struct spi_device *spi = spimem->spi;
3557 struct flash_platform_data *data = dev_get_platdata(&spi->dev);
3558 struct spi_nor *nor;
3559 /*
3560 * Enable all caps by default. The core will mask them after
3561 * checking what's really supported using spi_mem_supports_op().
3562 */
3563 const struct spi_nor_hwcaps hwcaps = { .mask = SNOR_HWCAPS_ALL };
3564 char *flash_name;
3565 int ret;
3566
3567 nor = devm_kzalloc(&spi->dev, sizeof(*nor), GFP_KERNEL);
3568 if (!nor)
3569 return -ENOMEM;
3570
3571 nor->spimem = spimem;
3572 nor->dev = &spi->dev;
3573 spi_nor_set_flash_node(nor, spi->dev.of_node);
3574
3575 spi_mem_set_drvdata(spimem, nor);
3576
3577 if (data && data->name)
3578 nor->mtd.name = data->name;
3579
3580 if (!nor->mtd.name)
3581 nor->mtd.name = spi_mem_get_name(spimem);
3582
3583 /*
3584 * For some (historical?) reason many platforms provide two different
3585 * names in flash_platform_data: "name" and "type". Quite often name is
3586 * set to "m25p80" and then "type" provides a real chip name.
3587 * If that's the case, respect "type" and ignore a "name".
3588 */
3589 if (data && data->type)
3590 flash_name = data->type;
3591 else if (!strcmp(spi->modalias, "spi-nor"))
3592 flash_name = NULL; /* auto-detect */
3593 else
3594 flash_name = spi->modalias;
3595
3596 ret = spi_nor_scan(nor, flash_name, &hwcaps);
3597 if (ret)
3598 return ret;
3599
3600 /*
3601 * None of the existing parts have > 512B pages, but let's play safe
3602 * and add this logic so that if anyone ever adds support for such
3603 * a NOR we don't end up with buffer overflows.
3604 */
3605 if (nor->page_size > PAGE_SIZE) {
3606 nor->bouncebuf_size = nor->page_size;
3607 devm_kfree(nor->dev, nor->bouncebuf);
3608 nor->bouncebuf = devm_kmalloc(nor->dev,
3609 nor->bouncebuf_size,
3610 GFP_KERNEL);
3611 if (!nor->bouncebuf)
3612 return -ENOMEM;
3613 }
3614
3615 ret = spi_nor_create_read_dirmap(nor);
3616 if (ret)
3617 return ret;
3618
3619 ret = spi_nor_create_write_dirmap(nor);
3620 if (ret)
3621 return ret;
3622
3623 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_MISC))
3624 spi_nor_add_misc(nor);
3625
3626 return mtd_device_register(&nor->mtd, data ? data->parts : NULL,
3627 data ? data->nr_parts : 0);
3628 }
3629
spi_nor_remove(struct spi_mem * spimem)3630 static int spi_nor_remove(struct spi_mem *spimem)
3631 {
3632 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3633
3634 spi_nor_restore(nor);
3635
3636 if (IS_ENABLED(CONFIG_MTD_SPI_NOR_MISC) && nor->misc_dev)
3637 misc_deregister(nor->misc_dev);
3638
3639 /* Clean up MTD stuff. */
3640 return mtd_device_unregister(&nor->mtd);
3641 }
3642
spi_nor_shutdown(struct spi_mem * spimem)3643 static void spi_nor_shutdown(struct spi_mem *spimem)
3644 {
3645 struct spi_nor *nor = spi_mem_get_drvdata(spimem);
3646
3647 spi_nor_restore(nor);
3648 }
3649
3650 /*
3651 * Do NOT add to this array without reading the following:
3652 *
3653 * Historically, many flash devices are bound to this driver by their name. But
3654 * since most of these flash are compatible to some extent, and their
3655 * differences can often be differentiated by the JEDEC read-ID command, we
3656 * encourage new users to add support to the spi-nor library, and simply bind
3657 * against a generic string here (e.g., "jedec,spi-nor").
3658 *
3659 * Many flash names are kept here in this list (as well as in spi-nor.c) to
3660 * keep them available as module aliases for existing platforms.
3661 */
3662 static const struct spi_device_id spi_nor_dev_ids[] = {
3663 /*
3664 * Allow non-DT platform devices to bind to the "spi-nor" modalias, and
3665 * hack around the fact that the SPI core does not provide uevent
3666 * matching for .of_match_table
3667 */
3668 {"spi-nor"},
3669
3670 /*
3671 * Entries not used in DTs that should be safe to drop after replacing
3672 * them with "spi-nor" in platform data.
3673 */
3674 {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"},
3675
3676 /*
3677 * Entries that were used in DTs without "jedec,spi-nor" fallback and
3678 * should be kept for backward compatibility.
3679 */
3680 {"at25df321a"}, {"at25df641"}, {"at26df081a"},
3681 {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"},
3682 {"mx25l25635e"},{"mx66l51235l"},
3683 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"},
3684 {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"},
3685 {"s25fl064k"},
3686 {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"},
3687 {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"},
3688 {"m25p64"}, {"m25p128"},
3689 {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
3690 {"w25q80bl"}, {"w25q128"}, {"w25q256"},
3691
3692 /* Flashes that can't be detected using JEDEC */
3693 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
3694 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
3695 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
3696
3697 /* Everspin MRAMs (non-JEDEC) */
3698 { "mr25h128" }, /* 128 Kib, 40 MHz */
3699 { "mr25h256" }, /* 256 Kib, 40 MHz */
3700 { "mr25h10" }, /* 1 Mib, 40 MHz */
3701 { "mr25h40" }, /* 4 Mib, 40 MHz */
3702
3703 { },
3704 };
3705 MODULE_DEVICE_TABLE(spi, spi_nor_dev_ids);
3706
3707 static const struct of_device_id spi_nor_of_table[] = {
3708 /*
3709 * Generic compatibility for SPI NOR that can be identified by the
3710 * JEDEC READ ID opcode (0x9F). Use this, if possible.
3711 */
3712 { .compatible = "jedec,spi-nor" },
3713 { /* sentinel */ },
3714 };
3715 MODULE_DEVICE_TABLE(of, spi_nor_of_table);
3716
3717 /*
3718 * REVISIT: many of these chips have deep power-down modes, which
3719 * should clearly be entered on suspend() to minimize power use.
3720 * And also when they're otherwise idle...
3721 */
3722 static struct spi_mem_driver spi_nor_driver = {
3723 .spidrv = {
3724 .driver = {
3725 .name = "spi-nor",
3726 .of_match_table = spi_nor_of_table,
3727 },
3728 .id_table = spi_nor_dev_ids,
3729 },
3730 .probe = spi_nor_probe,
3731 .remove = spi_nor_remove,
3732 .shutdown = spi_nor_shutdown,
3733 };
3734 module_spi_mem_driver(spi_nor_driver);
3735
3736 MODULE_LICENSE("GPL v2");
3737 MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
3738 MODULE_AUTHOR("Mike Lavender");
3739 MODULE_DESCRIPTION("framework for SPI NOR");
3740