xref: /rk3399_rockchip-uboot/drivers/mtd/nand/spi/core.c (revision 5a94b26492fd3ad20c580976e18e101b67d14e6e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #ifndef __UBOOT__
13 #include <linux/device.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/spinand.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 #else
23 #include <common.h>
24 #include <errno.h>
25 #include <spi.h>
26 #include <spi-mem.h>
27 #include <linux/mtd/spinand.h>
28 #endif
29 
30 /* SPI NAND index visible in MTD names */
31 static int spi_nand_idx;
32 
33 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
34 					  const struct nand_page_io_req *req,
35 					  u16 *column)
36 {
37 	struct nand_device *nand = spinand_to_nand(spinand);
38 	unsigned int shift;
39 
40 	if (nand->memorg.planes_per_lun < 2)
41 		return;
42 
43 	/* The plane number is passed in MSB just above the column address */
44 	shift = fls(nand->memorg.pagesize);
45 	*column |= req->pos.plane << shift;
46 }
47 
48 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
49 {
50 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
51 						      spinand->scratchbuf);
52 	int ret;
53 
54 	ret = spi_mem_exec_op(spinand->slave, &op);
55 	if (ret)
56 		return ret;
57 
58 	*val = *spinand->scratchbuf;
59 	return 0;
60 }
61 
62 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
63 {
64 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
65 						      spinand->scratchbuf);
66 
67 	*spinand->scratchbuf = val;
68 	return spi_mem_exec_op(spinand->slave, &op);
69 }
70 
71 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
72 {
73 	return spinand_read_reg_op(spinand, REG_STATUS, status);
74 }
75 
76 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
77 {
78 	struct nand_device *nand = spinand_to_nand(spinand);
79 
80 	if (WARN_ON(spinand->cur_target < 0 ||
81 		    spinand->cur_target >= nand->memorg.ntargets))
82 		return -EINVAL;
83 
84 	*cfg = spinand->cfg_cache[spinand->cur_target];
85 	return 0;
86 }
87 
88 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
89 {
90 	struct nand_device *nand = spinand_to_nand(spinand);
91 	int ret;
92 
93 	if (WARN_ON(spinand->cur_target < 0 ||
94 		    spinand->cur_target >= nand->memorg.ntargets))
95 		return -EINVAL;
96 
97 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
98 		return 0;
99 
100 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
101 	if (ret)
102 		return ret;
103 
104 	spinand->cfg_cache[spinand->cur_target] = cfg;
105 	return 0;
106 }
107 
108 /**
109  * spinand_upd_cfg() - Update the configuration register
110  * @spinand: the spinand device
111  * @mask: the mask encoding the bits to update in the config reg
112  * @val: the new value to apply
113  *
114  * Update the configuration register.
115  *
116  * Return: 0 on success, a negative error code otherwise.
117  */
118 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
119 {
120 	int ret;
121 	u8 cfg;
122 
123 	ret = spinand_get_cfg(spinand, &cfg);
124 	if (ret)
125 		return ret;
126 
127 	cfg &= ~mask;
128 	cfg |= val;
129 
130 	return spinand_set_cfg(spinand, cfg);
131 }
132 
133 /**
134  * spinand_select_target() - Select a specific NAND target/die
135  * @spinand: the spinand device
136  * @target: the target/die to select
137  *
138  * Select a new target/die. If chip only has one die, this function is a NOOP.
139  *
140  * Return: 0 on success, a negative error code otherwise.
141  */
142 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
143 {
144 	struct nand_device *nand = spinand_to_nand(spinand);
145 	int ret;
146 
147 	if (WARN_ON(target >= nand->memorg.ntargets))
148 		return -EINVAL;
149 
150 	if (spinand->cur_target == target)
151 		return 0;
152 
153 	if (nand->memorg.ntargets == 1) {
154 		spinand->cur_target = target;
155 		return 0;
156 	}
157 
158 	ret = spinand->select_target(spinand, target);
159 	if (ret)
160 		return ret;
161 
162 	spinand->cur_target = target;
163 	return 0;
164 }
165 
166 static int spinand_init_cfg_cache(struct spinand_device *spinand)
167 {
168 	struct nand_device *nand = spinand_to_nand(spinand);
169 	struct udevice *dev = spinand->slave->dev;
170 	unsigned int target;
171 	int ret;
172 
173 	spinand->cfg_cache = devm_kzalloc(dev,
174 					  sizeof(*spinand->cfg_cache) *
175 					  nand->memorg.ntargets,
176 					  GFP_KERNEL);
177 	if (!spinand->cfg_cache)
178 		return -ENOMEM;
179 
180 	for (target = 0; target < nand->memorg.ntargets; target++) {
181 		ret = spinand_select_target(spinand, target);
182 		if (ret)
183 			return ret;
184 
185 		/*
186 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
187 		 * here to bypass the config cache.
188 		 */
189 		ret = spinand_read_reg_op(spinand, REG_CFG,
190 					  &spinand->cfg_cache[target]);
191 		if (ret)
192 			return ret;
193 	}
194 
195 	return 0;
196 }
197 
198 static int spinand_init_quad_enable(struct spinand_device *spinand)
199 {
200 	bool enable = false;
201 
202 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
203 		return 0;
204 
205 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
206 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
207 	    spinand->op_templates.update_cache->data.buswidth == 4)
208 		enable = true;
209 
210 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
211 			       enable ? CFG_QUAD_ENABLE : 0);
212 }
213 
214 static int spinand_ecc_enable(struct spinand_device *spinand,
215 			      bool enable)
216 {
217 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
218 			       enable ? CFG_ECC_ENABLE : 0);
219 }
220 
221 static int spinand_write_enable_op(struct spinand_device *spinand)
222 {
223 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
224 
225 	return spi_mem_exec_op(spinand->slave, &op);
226 }
227 
228 static int spinand_load_page_op(struct spinand_device *spinand,
229 				const struct nand_page_io_req *req)
230 {
231 	struct nand_device *nand = spinand_to_nand(spinand);
232 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
233 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
234 
235 	return spi_mem_exec_op(spinand->slave, &op);
236 }
237 
238 static int spinand_read_from_cache_op(struct spinand_device *spinand,
239 				      const struct nand_page_io_req *req)
240 {
241 	struct spi_mem_op op = *spinand->op_templates.read_cache;
242 	struct nand_device *nand = spinand_to_nand(spinand);
243 	struct mtd_info *mtd = nanddev_to_mtd(nand);
244 	struct nand_page_io_req adjreq = *req;
245 	unsigned int nbytes = 0;
246 	void *buf = NULL;
247 	u16 column = 0;
248 	int ret;
249 
250 	if (req->datalen) {
251 		adjreq.datalen = nanddev_page_size(nand);
252 		adjreq.dataoffs = 0;
253 		adjreq.databuf.in = spinand->databuf;
254 		buf = spinand->databuf;
255 		nbytes = adjreq.datalen;
256 	}
257 
258 	if (req->ooblen) {
259 		adjreq.ooblen = nanddev_per_page_oobsize(nand);
260 		adjreq.ooboffs = 0;
261 		adjreq.oobbuf.in = spinand->oobbuf;
262 		nbytes += nanddev_per_page_oobsize(nand);
263 		if (!buf) {
264 			buf = spinand->oobbuf;
265 			column = nanddev_page_size(nand);
266 		}
267 	}
268 
269 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
270 	op.addr.val = column;
271 
272 	/*
273 	 * Some controllers are limited in term of max RX data size. In this
274 	 * case, just repeat the READ_CACHE operation after updating the
275 	 * column.
276 	 */
277 	while (nbytes) {
278 		op.data.buf.in = buf;
279 		op.data.nbytes = nbytes;
280 		ret = spi_mem_adjust_op_size(spinand->slave, &op);
281 		if (ret)
282 			return ret;
283 
284 		ret = spi_mem_exec_op(spinand->slave, &op);
285 		if (ret)
286 			return ret;
287 
288 		buf += op.data.nbytes;
289 		nbytes -= op.data.nbytes;
290 		op.addr.val += op.data.nbytes;
291 	}
292 
293 	if (req->datalen)
294 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
295 		       req->datalen);
296 
297 	if (req->ooblen) {
298 		if (req->mode == MTD_OPS_AUTO_OOB)
299 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
300 						    spinand->oobbuf,
301 						    req->ooboffs,
302 						    req->ooblen);
303 		else
304 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
305 			       req->ooblen);
306 	}
307 
308 	return 0;
309 }
310 
311 static int spinand_write_to_cache_op(struct spinand_device *spinand,
312 				     const struct nand_page_io_req *req)
313 {
314 	struct spi_mem_op op = *spinand->op_templates.write_cache;
315 	struct nand_device *nand = spinand_to_nand(spinand);
316 	struct mtd_info *mtd = nanddev_to_mtd(nand);
317 	struct nand_page_io_req adjreq = *req;
318 	unsigned int nbytes = 0;
319 	void *buf = NULL;
320 	u16 column = 0;
321 	int ret;
322 
323 	memset(spinand->databuf, 0xff,
324 	       nanddev_page_size(nand) +
325 	       nanddev_per_page_oobsize(nand));
326 
327 	if (req->datalen) {
328 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
329 		       req->datalen);
330 		adjreq.dataoffs = 0;
331 		adjreq.datalen = nanddev_page_size(nand);
332 		adjreq.databuf.out = spinand->databuf;
333 		nbytes = adjreq.datalen;
334 		buf = spinand->databuf;
335 	}
336 
337 	if (req->ooblen) {
338 		if (req->mode == MTD_OPS_AUTO_OOB)
339 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
340 						    spinand->oobbuf,
341 						    req->ooboffs,
342 						    req->ooblen);
343 		else
344 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
345 			       req->ooblen);
346 
347 		adjreq.ooblen = nanddev_per_page_oobsize(nand);
348 		adjreq.ooboffs = 0;
349 		nbytes += nanddev_per_page_oobsize(nand);
350 		if (!buf) {
351 			buf = spinand->oobbuf;
352 			column = nanddev_page_size(nand);
353 		}
354 	}
355 
356 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
357 
358 	op = *spinand->op_templates.write_cache;
359 	op.addr.val = column;
360 
361 	/*
362 	 * Some controllers are limited in term of max TX data size. In this
363 	 * case, split the operation into one LOAD CACHE and one or more
364 	 * LOAD RANDOM CACHE.
365 	 */
366 	while (nbytes) {
367 		op.data.buf.out = buf;
368 		op.data.nbytes = nbytes;
369 
370 		ret = spi_mem_adjust_op_size(spinand->slave, &op);
371 		if (ret)
372 			return ret;
373 
374 		ret = spi_mem_exec_op(spinand->slave, &op);
375 		if (ret)
376 			return ret;
377 
378 		buf += op.data.nbytes;
379 		nbytes -= op.data.nbytes;
380 		op.addr.val += op.data.nbytes;
381 
382 		/*
383 		 * We need to use the RANDOM LOAD CACHE operation if there's
384 		 * more than one iteration, because the LOAD operation resets
385 		 * the cache to 0xff.
386 		 */
387 		if (nbytes) {
388 			column = op.addr.val;
389 			op = *spinand->op_templates.update_cache;
390 			op.addr.val = column;
391 		}
392 	}
393 
394 	return 0;
395 }
396 
397 static int spinand_program_op(struct spinand_device *spinand,
398 			      const struct nand_page_io_req *req)
399 {
400 	struct nand_device *nand = spinand_to_nand(spinand);
401 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
402 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
403 
404 	return spi_mem_exec_op(spinand->slave, &op);
405 }
406 
407 static int spinand_erase_op(struct spinand_device *spinand,
408 			    const struct nand_pos *pos)
409 {
410 	struct nand_device *nand = &spinand->base;
411 	unsigned int row = nanddev_pos_to_row(nand, pos);
412 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
413 
414 	return spi_mem_exec_op(spinand->slave, &op);
415 }
416 
417 static int spinand_wait(struct spinand_device *spinand, u8 *s)
418 {
419 	unsigned long start, stop;
420 	u8 status;
421 	int ret;
422 
423 	start = get_timer(0);
424 	stop = 400;
425 	do {
426 		ret = spinand_read_status(spinand, &status);
427 		if (ret)
428 			return ret;
429 
430 		if (!(status & STATUS_BUSY))
431 			goto out;
432 	} while (get_timer(start) < stop);
433 
434 	/*
435 	 * Extra read, just in case the STATUS_READY bit has changed
436 	 * since our last check
437 	 */
438 	ret = spinand_read_status(spinand, &status);
439 	if (ret)
440 		return ret;
441 
442 out:
443 	if (s)
444 		*s = status;
445 
446 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
447 }
448 
449 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
450 			      u8 ndummy, u8 *buf)
451 {
452 	struct spi_mem_op op = SPINAND_READID_OP(
453 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
454 	int ret;
455 
456 	ret = spi_mem_exec_op(spinand->slave, &op);
457 	if (!ret)
458 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
459 
460 	return ret;
461 }
462 
463 static int spinand_reset_op(struct spinand_device *spinand)
464 {
465 	struct spi_mem_op op = SPINAND_RESET_OP;
466 	int ret;
467 
468 	ret = spi_mem_exec_op(spinand->slave, &op);
469 	if (ret)
470 		return ret;
471 
472 	return spinand_wait(spinand, NULL);
473 }
474 
475 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
476 {
477 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
478 }
479 
480 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
481 {
482 	struct nand_device *nand = spinand_to_nand(spinand);
483 
484 	if (spinand->eccinfo.get_status)
485 		return spinand->eccinfo.get_status(spinand, status);
486 
487 	switch (status & STATUS_ECC_MASK) {
488 	case STATUS_ECC_NO_BITFLIPS:
489 		return 0;
490 
491 	case STATUS_ECC_HAS_BITFLIPS:
492 		/*
493 		 * We have no way to know exactly how many bitflips have been
494 		 * fixed, so let's return the maximum possible value so that
495 		 * wear-leveling layers move the data immediately.
496 		 */
497 		return nand->eccreq.strength;
498 
499 	case STATUS_ECC_UNCOR_ERROR:
500 		return -EBADMSG;
501 
502 	default:
503 		break;
504 	}
505 
506 	return -EINVAL;
507 }
508 
509 static int spinand_read_page(struct spinand_device *spinand,
510 			     const struct nand_page_io_req *req,
511 			     bool ecc_enabled)
512 {
513 	u8 status;
514 	int ret;
515 
516 	ret = spinand_load_page_op(spinand, req);
517 	if (ret)
518 		return ret;
519 
520 	ret = spinand_wait(spinand, &status);
521 	if (ret < 0)
522 		return ret;
523 
524 	ret = spinand_read_from_cache_op(spinand, req);
525 	if (ret)
526 		return ret;
527 
528 	if (!ecc_enabled)
529 		return 0;
530 
531 	return spinand_check_ecc_status(spinand, status);
532 }
533 
534 static int spinand_write_page(struct spinand_device *spinand,
535 			      const struct nand_page_io_req *req)
536 {
537 	u8 status;
538 	int ret;
539 
540 	ret = spinand_write_enable_op(spinand);
541 	if (ret)
542 		return ret;
543 
544 	ret = spinand_write_to_cache_op(spinand, req);
545 	if (ret)
546 		return ret;
547 
548 	ret = spinand_program_op(spinand, req);
549 	if (ret)
550 		return ret;
551 
552 	ret = spinand_wait(spinand, &status);
553 	if (!ret && (status & STATUS_PROG_FAILED))
554 		ret = -EIO;
555 
556 	return ret;
557 }
558 
559 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
560 			    struct mtd_oob_ops *ops)
561 {
562 	struct spinand_device *spinand = mtd_to_spinand(mtd);
563 	struct nand_device *nand = mtd_to_nanddev(mtd);
564 	unsigned int max_bitflips = 0;
565 	struct nand_io_iter iter;
566 	bool enable_ecc = false;
567 	bool ecc_failed = false;
568 	int ret = 0;
569 
570 	if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
571 		enable_ecc = true;
572 
573 #ifndef __UBOOT__
574 	mutex_lock(&spinand->lock);
575 #endif
576 
577 	nanddev_io_for_each_page(nand, from, ops, &iter) {
578 		ret = spinand_select_target(spinand, iter.req.pos.target);
579 		if (ret)
580 			break;
581 
582 		ret = spinand_ecc_enable(spinand, enable_ecc);
583 		if (ret)
584 			break;
585 
586 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
587 		if (ret < 0 && ret != -EBADMSG)
588 			break;
589 
590 		if (ret == -EBADMSG) {
591 			ecc_failed = true;
592 			mtd->ecc_stats.failed++;
593 			ret = 0;
594 		} else {
595 			mtd->ecc_stats.corrected += ret;
596 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
597 		}
598 
599 		ops->retlen += iter.req.datalen;
600 		ops->oobretlen += iter.req.ooblen;
601 	}
602 
603 #ifndef __UBOOT__
604 	mutex_unlock(&spinand->lock);
605 #endif
606 	if (ecc_failed && !ret)
607 		ret = -EBADMSG;
608 
609 	return ret ? ret : max_bitflips;
610 }
611 
612 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
613 			     struct mtd_oob_ops *ops)
614 {
615 	struct spinand_device *spinand = mtd_to_spinand(mtd);
616 	struct nand_device *nand = mtd_to_nanddev(mtd);
617 	struct nand_io_iter iter;
618 	bool enable_ecc = false;
619 	int ret = 0;
620 
621 	if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
622 		enable_ecc = true;
623 
624 #ifndef __UBOOT__
625 	mutex_lock(&spinand->lock);
626 #endif
627 
628 	nanddev_io_for_each_page(nand, to, ops, &iter) {
629 		ret = spinand_select_target(spinand, iter.req.pos.target);
630 		if (ret)
631 			break;
632 
633 		ret = spinand_ecc_enable(spinand, enable_ecc);
634 		if (ret)
635 			break;
636 
637 		ret = spinand_write_page(spinand, &iter.req);
638 		if (ret)
639 			break;
640 
641 		ops->retlen += iter.req.datalen;
642 		ops->oobretlen += iter.req.ooblen;
643 	}
644 
645 #ifndef __UBOOT__
646 	mutex_unlock(&spinand->lock);
647 #endif
648 
649 	return ret;
650 }
651 
652 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
653 {
654 	struct spinand_device *spinand = nand_to_spinand(nand);
655 	u8 marker[2] = { };
656 	struct nand_page_io_req req = {
657 		.pos = *pos,
658 		.ooblen = sizeof(marker),
659 		.ooboffs = 0,
660 		.oobbuf.in = marker,
661 		.mode = MTD_OPS_RAW,
662 	};
663 
664 	spinand_select_target(spinand, pos->target);
665 	spinand_read_page(spinand, &req, false);
666 	if (marker[0] != 0xff || marker[1] != 0xff)
667 		return true;
668 
669 	return false;
670 }
671 
672 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
673 {
674 	struct nand_device *nand = mtd_to_nanddev(mtd);
675 #ifndef __UBOOT__
676 	struct spinand_device *spinand = nand_to_spinand(nand);
677 #endif
678 	struct nand_pos pos;
679 	int ret;
680 
681 	nanddev_offs_to_pos(nand, offs, &pos);
682 #ifndef __UBOOT__
683 	mutex_lock(&spinand->lock);
684 #endif
685 	ret = nanddev_isbad(nand, &pos);
686 #ifndef __UBOOT__
687 	mutex_unlock(&spinand->lock);
688 #endif
689 	return ret;
690 }
691 
692 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
693 {
694 	struct spinand_device *spinand = nand_to_spinand(nand);
695 	u8 marker[2] = { 0, 0 };
696 	struct nand_page_io_req req = {
697 		.pos = *pos,
698 		.ooboffs = 0,
699 		.ooblen = sizeof(marker),
700 		.oobbuf.out = marker,
701 		.mode = MTD_OPS_RAW,
702 	};
703 	int ret;
704 
705 	ret = spinand_select_target(spinand, pos->target);
706 	if (ret)
707 		return ret;
708 
709 	return spinand_write_page(spinand, &req);
710 }
711 
712 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
713 {
714 	struct nand_device *nand = mtd_to_nanddev(mtd);
715 #ifndef __UBOOT__
716 	struct spinand_device *spinand = nand_to_spinand(nand);
717 #endif
718 	struct nand_pos pos;
719 	int ret;
720 
721 	nanddev_offs_to_pos(nand, offs, &pos);
722 #ifndef __UBOOT__
723 	mutex_lock(&spinand->lock);
724 #endif
725 	ret = nanddev_markbad(nand, &pos);
726 #ifndef __UBOOT__
727 	mutex_unlock(&spinand->lock);
728 #endif
729 	return ret;
730 }
731 
732 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
733 {
734 	struct spinand_device *spinand = nand_to_spinand(nand);
735 	u8 status;
736 	int ret;
737 
738 	ret = spinand_select_target(spinand, pos->target);
739 	if (ret)
740 		return ret;
741 
742 	ret = spinand_write_enable_op(spinand);
743 	if (ret)
744 		return ret;
745 
746 	ret = spinand_erase_op(spinand, pos);
747 	if (ret)
748 		return ret;
749 
750 	ret = spinand_wait(spinand, &status);
751 	if (!ret && (status & STATUS_ERASE_FAILED))
752 		ret = -EIO;
753 
754 	return ret;
755 }
756 
757 static int spinand_mtd_erase(struct mtd_info *mtd,
758 			     struct erase_info *einfo)
759 {
760 #ifndef __UBOOT__
761 	struct spinand_device *spinand = mtd_to_spinand(mtd);
762 #endif
763 	int ret;
764 
765 #ifndef __UBOOT__
766 	mutex_lock(&spinand->lock);
767 #endif
768 	ret = nanddev_mtd_erase(mtd, einfo);
769 #ifndef __UBOOT__
770 	mutex_unlock(&spinand->lock);
771 #endif
772 
773 	return ret;
774 }
775 
776 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
777 {
778 #ifndef __UBOOT__
779 	struct spinand_device *spinand = mtd_to_spinand(mtd);
780 #endif
781 	struct nand_device *nand = mtd_to_nanddev(mtd);
782 	struct nand_pos pos;
783 	int ret;
784 
785 	nanddev_offs_to_pos(nand, offs, &pos);
786 #ifndef __UBOOT__
787 	mutex_lock(&spinand->lock);
788 #endif
789 	ret = nanddev_isreserved(nand, &pos);
790 #ifndef __UBOOT__
791 	mutex_unlock(&spinand->lock);
792 #endif
793 
794 	return ret;
795 }
796 
797 const struct spi_mem_op *
798 spinand_find_supported_op(struct spinand_device *spinand,
799 			  const struct spi_mem_op *ops,
800 			  unsigned int nops)
801 {
802 	unsigned int i;
803 
804 	for (i = 0; i < nops; i++) {
805 		if (spi_mem_supports_op(spinand->slave, &ops[i]))
806 			return &ops[i];
807 	}
808 
809 	return NULL;
810 }
811 
812 static const struct nand_ops spinand_ops = {
813 	.erase = spinand_erase,
814 	.markbad = spinand_markbad,
815 	.isbad = spinand_isbad,
816 };
817 
818 static const struct spinand_manufacturer *spinand_manufacturers[] = {
819 #ifdef CONFIG_SPI_NAND_GIGADEVICE
820 	&gigadevice_spinand_manufacturer,
821 #endif
822 #ifdef CONFIG_SPI_NAND_MACRONIX
823 	&macronix_spinand_manufacturer,
824 #endif
825 #ifdef CONFIG_SPI_NAND_MICRON
826 	&micron_spinand_manufacturer,
827 #endif
828 #ifdef CONFIG_SPI_NAND_TOSHIBA
829 	&toshiba_spinand_manufacturer,
830 #endif
831 #ifdef CONFIG_SPI_NAND_WINBOND
832 	&winbond_spinand_manufacturer,
833 #endif
834 #ifdef CONFIG_SPI_NAND_DOSILICON
835 	&dosilicon_spinand_manufacturer,
836 #endif
837 #ifdef CONFIG_SPI_NAND_ESMT
838 	&esmt_spinand_manufacturer,
839 #endif
840 #ifdef CONFIG_SPI_NAND_XTX
841 	&xtx_spinand_manufacturer,
842 #endif
843 #ifdef CONFIG_SPI_NAND_HYF
844 	&hyf_spinand_manufacturer,
845 #endif
846 #ifdef CONFIG_SPI_NAND_FMSH
847 	&fmsh_spinand_manufacturer,
848 #endif
849 #ifdef CONFIG_SPI_NAND_FORESEE
850 	&foresee_spinand_manufacturer,
851 #endif
852 #ifdef CONFIG_SPI_NAND_BIWIN
853 	&biwin_spinand_manufacturer,
854 #endif
855 #ifdef CONFIG_SPI_NAND_ETRON
856 	&etron_spinand_manufacturer,
857 #endif
858 #ifdef CONFIG_SPI_NAND_JSC
859 	&jsc_spinand_manufacturer,
860 #endif
861 #ifdef CONFIG_SPI_NAND_SILICONGO
862 	&silicongo_spinand_manufacturer,
863 #endif
864 #ifdef CONFIG_SPI_NAND_UNIM
865 	&unim_spinand_manufacturer,
866 #endif
867 };
868 
869 static int spinand_manufacturer_match(struct spinand_device *spinand,
870 				      enum spinand_readid_method rdid_method)
871 {
872 	u8 *id = spinand->id.data;
873 	unsigned int i;
874 	int ret;
875 
876 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
877 		const struct spinand_manufacturer *manufacturer =
878 			spinand_manufacturers[i];
879 
880 		if (id[0] != manufacturer->id)
881 			continue;
882 
883 		ret = spinand_match_and_init(spinand,
884 					     manufacturer->chips,
885 					     manufacturer->nchips,
886 					     rdid_method);
887 		if (ret < 0)
888 			continue;
889 
890 		spinand->manufacturer = manufacturer;
891 		return 0;
892 	}
893 	return -ENOTSUPP;
894 }
895 
896 static int spinand_id_detect(struct spinand_device *spinand)
897 {
898 	u8 *id = spinand->id.data;
899 	int ret;
900 
901 	ret = spinand_read_id_op(spinand, 0, 0, id);
902 	if (ret)
903 		return ret;
904 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
905 	if (!ret)
906 		return 0;
907 
908 	ret = spinand_read_id_op(spinand, 1, 0, id);
909 	if (ret)
910 		return ret;
911 	ret = spinand_manufacturer_match(spinand,
912 					 SPINAND_READID_METHOD_OPCODE_ADDR);
913 	if (!ret)
914 		return 0;
915 
916 	ret = spinand_read_id_op(spinand, 0, 1, id);
917 	if (ret)
918 		return ret;
919 	ret = spinand_manufacturer_match(spinand,
920 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
921 
922 	return ret;
923 }
924 
925 static int spinand_manufacturer_init(struct spinand_device *spinand)
926 {
927 	if (spinand->manufacturer->ops->init)
928 		return spinand->manufacturer->ops->init(spinand);
929 
930 	return 0;
931 }
932 
933 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
934 {
935 	/* Release manufacturer private data */
936 	if (spinand->manufacturer->ops->cleanup)
937 		return spinand->manufacturer->ops->cleanup(spinand);
938 }
939 
940 static const struct spi_mem_op *
941 spinand_select_op_variant(struct spinand_device *spinand,
942 			  const struct spinand_op_variants *variants)
943 {
944 	struct nand_device *nand = spinand_to_nand(spinand);
945 	unsigned int i;
946 
947 	for (i = 0; i < variants->nops; i++) {
948 		struct spi_mem_op op = variants->ops[i];
949 		unsigned int nbytes;
950 		int ret;
951 
952 		nbytes = nanddev_per_page_oobsize(nand) +
953 			 nanddev_page_size(nand);
954 
955 		while (nbytes) {
956 			op.data.nbytes = nbytes;
957 			ret = spi_mem_adjust_op_size(spinand->slave, &op);
958 			if (ret)
959 				break;
960 
961 			if (!spi_mem_supports_op(spinand->slave, &op))
962 				break;
963 
964 			nbytes -= op.data.nbytes;
965 		}
966 
967 		if (!nbytes)
968 			return &variants->ops[i];
969 	}
970 
971 	return NULL;
972 }
973 
974 /**
975  * spinand_match_and_init() - Try to find a match between a device ID and an
976  *			      entry in a spinand_info table
977  * @spinand: SPI NAND object
978  * @table: SPI NAND device description table
979  * @table_size: size of the device description table
980  * @rdid_method: read id method to match
981  *
982  * Match between a device ID retrieved through the READ_ID command and an
983  * entry in the SPI NAND description table. If a match is found, the spinand
984  * object will be initialized with information provided by the matching
985  * spinand_info entry.
986  *
987  * Return: 0 on success, a negative error code otherwise.
988  */
989 int spinand_match_and_init(struct spinand_device *spinand,
990 			   const struct spinand_info *table,
991 			   unsigned int table_size,
992 			   enum spinand_readid_method rdid_method)
993 {
994 	u8 *id = spinand->id.data;
995 	struct nand_device *nand = spinand_to_nand(spinand);
996 	unsigned int i;
997 
998 	for (i = 0; i < table_size; i++) {
999 		const struct spinand_info *info = &table[i];
1000 		const struct spi_mem_op *op;
1001 
1002 		if (rdid_method != info->devid.method)
1003 			continue;
1004 
1005 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1006 			continue;
1007 
1008 		nand->memorg = table[i].memorg;
1009 		nand->eccreq = table[i].eccreq;
1010 		spinand->eccinfo = table[i].eccinfo;
1011 		spinand->flags = table[i].flags;
1012 		spinand->id.len = 1 + table[i].devid.len;
1013 		spinand->select_target = table[i].select_target;
1014 
1015 		op = spinand_select_op_variant(spinand,
1016 					       info->op_variants.read_cache);
1017 		if (!op)
1018 			return -ENOTSUPP;
1019 
1020 		spinand->op_templates.read_cache = op;
1021 
1022 		op = spinand_select_op_variant(spinand,
1023 					       info->op_variants.write_cache);
1024 		if (!op)
1025 			return -ENOTSUPP;
1026 
1027 		spinand->op_templates.write_cache = op;
1028 
1029 		op = spinand_select_op_variant(spinand,
1030 					       info->op_variants.update_cache);
1031 		spinand->op_templates.update_cache = op;
1032 
1033 		return 0;
1034 	}
1035 
1036 	return -ENOTSUPP;
1037 }
1038 
1039 static int spinand_detect(struct spinand_device *spinand)
1040 {
1041 	struct nand_device *nand = spinand_to_nand(spinand);
1042 	int ret;
1043 
1044 	ret = spinand_reset_op(spinand);
1045 	if (ret)
1046 		return ret;
1047 
1048 	ret = spinand_id_detect(spinand);
1049 	if (ret) {
1050 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1051 			spinand->id.data);
1052 		return ret;
1053 	}
1054 
1055 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1056 		dev_err(dev,
1057 			"SPI NANDs with more than one die must implement ->select_target()\n");
1058 		return -EINVAL;
1059 	}
1060 
1061 	dev_info(spinand->slave->dev,
1062 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1063 	dev_info(spinand->slave->dev,
1064 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1065 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1066 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1067 
1068 	return 0;
1069 }
1070 
1071 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1072 				       struct mtd_oob_region *region)
1073 {
1074 	return -ERANGE;
1075 }
1076 
1077 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1078 					struct mtd_oob_region *region)
1079 {
1080 	if (section)
1081 		return -ERANGE;
1082 
1083 	/* Reserve 2 bytes for the BBM. */
1084 	region->offset = 2;
1085 	region->length = 62;
1086 
1087 	return 0;
1088 }
1089 
1090 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1091 	.ecc = spinand_noecc_ooblayout_ecc,
1092 	.rfree = spinand_noecc_ooblayout_free,
1093 };
1094 
1095 static int spinand_init(struct spinand_device *spinand)
1096 {
1097 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1098 	struct nand_device *nand = mtd_to_nanddev(mtd);
1099 	int ret, i;
1100 
1101 	/*
1102 	 * We need a scratch buffer because the spi_mem interface requires that
1103 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1104 	 */
1105 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1106 	if (!spinand->scratchbuf)
1107 		return -ENOMEM;
1108 
1109 	ret = spinand_detect(spinand);
1110 	if (ret)
1111 		goto err_free_bufs;
1112 
1113 	/*
1114 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1115 	 * may use this buffer for DMA access.
1116 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1117 	 */
1118 	spinand->databuf = kzalloc(nanddev_page_size(nand) +
1119 			       nanddev_per_page_oobsize(nand),
1120 			       GFP_KERNEL);
1121 	if (!spinand->databuf) {
1122 		ret = -ENOMEM;
1123 		goto err_free_bufs;
1124 	}
1125 
1126 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1127 
1128 	ret = spinand_init_cfg_cache(spinand);
1129 	if (ret)
1130 		goto err_free_bufs;
1131 
1132 	ret = spinand_init_quad_enable(spinand);
1133 	if (ret)
1134 		goto err_free_bufs;
1135 
1136 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1137 	if (ret)
1138 		goto err_free_bufs;
1139 
1140 	ret = spinand_manufacturer_init(spinand);
1141 	if (ret) {
1142 		dev_err(dev,
1143 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1144 			ret);
1145 		goto err_free_bufs;
1146 	}
1147 
1148 	/* After power up, all blocks are locked, so unlock them here. */
1149 	for (i = 0; i < nand->memorg.ntargets; i++) {
1150 		ret = spinand_select_target(spinand, i);
1151 		if (ret)
1152 			goto err_free_bufs;
1153 
1154 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1155 		if (ret)
1156 			goto err_free_bufs;
1157 	}
1158 
1159 	nand->bbt.option = NANDDEV_BBT_USE_FLASH;
1160 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1161 	if (ret)
1162 		goto err_manuf_cleanup;
1163 
1164 	/*
1165 	 * Right now, we don't support ECC, so let the whole oob
1166 	 * area is available for user.
1167 	 */
1168 	mtd->_read_oob = spinand_mtd_read;
1169 	mtd->_write_oob = spinand_mtd_write;
1170 	mtd->_block_isbad = spinand_mtd_block_isbad;
1171 	mtd->_block_markbad = spinand_mtd_block_markbad;
1172 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1173 	mtd->_erase = spinand_mtd_erase;
1174 
1175 	if (spinand->eccinfo.ooblayout)
1176 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1177 	else
1178 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1179 
1180 	ret = mtd_ooblayout_count_freebytes(mtd);
1181 	if (ret < 0)
1182 		goto err_cleanup_nanddev;
1183 
1184 	mtd->oobavail = ret;
1185 
1186 	/* Propagate ECC information to mtd_info */
1187 	mtd->ecc_strength = nand->eccreq.strength;
1188 	mtd->ecc_step_size = nand->eccreq.step_size;
1189 
1190 	return 0;
1191 
1192 err_cleanup_nanddev:
1193 	nanddev_cleanup(nand);
1194 
1195 err_manuf_cleanup:
1196 	spinand_manufacturer_cleanup(spinand);
1197 
1198 err_free_bufs:
1199 	kfree(spinand->databuf);
1200 	kfree(spinand->scratchbuf);
1201 	return ret;
1202 }
1203 
1204 static void spinand_cleanup(struct spinand_device *spinand)
1205 {
1206 	struct nand_device *nand = spinand_to_nand(spinand);
1207 
1208 	nanddev_cleanup(nand);
1209 	spinand_manufacturer_cleanup(spinand);
1210 	kfree(spinand->databuf);
1211 	kfree(spinand->scratchbuf);
1212 }
1213 
1214 static int spinand_bind(struct udevice *udev)
1215 {
1216 	int ret = 0;
1217 
1218 #ifdef CONFIG_MTD_BLK
1219 	struct udevice *bdev;
1220 
1221 	ret = blk_create_devicef(udev, "mtd_blk", "blk", IF_TYPE_MTD,
1222 				 BLK_MTD_SPI_NAND, 512, 0, &bdev);
1223 	if (ret)
1224 		printf("Cannot create block device\n");
1225 #endif
1226 	return ret;
1227 }
1228 
1229 static int spinand_probe(struct udevice *dev)
1230 {
1231 	struct spinand_device *spinand = dev_get_priv(dev);
1232 	struct spi_slave *slave = dev_get_parent_priv(dev);
1233 	struct mtd_info *mtd = dev_get_uclass_priv(dev);
1234 	struct nand_device *nand = spinand_to_nand(spinand);
1235 	int ret;
1236 
1237 #ifndef __UBOOT__
1238 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1239 			       GFP_KERNEL);
1240 	if (!spinand)
1241 		return -ENOMEM;
1242 
1243 	spinand->spimem = mem;
1244 	spi_mem_set_drvdata(mem, spinand);
1245 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1246 	mutex_init(&spinand->lock);
1247 
1248 	mtd = spinand_to_mtd(spinand);
1249 	mtd->dev.parent = &mem->spi->dev;
1250 #else
1251 	nand->mtd = mtd;
1252 	mtd->priv = nand;
1253 	mtd->dev = dev;
1254 	mtd->name = malloc(20);
1255 	if (!mtd->name)
1256 		return -ENOMEM;
1257 	sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1258 	spinand->slave = slave;
1259 	spinand_set_of_node(spinand, dev->node.np);
1260 #endif
1261 
1262 	ret = spinand_init(spinand);
1263 	if (ret)
1264 		return ret;
1265 
1266 #ifndef __UBOOT__
1267 	ret = mtd_device_register(mtd, NULL, 0);
1268 #else
1269 	ret = add_mtd_device(mtd);
1270 #endif
1271 	if (ret)
1272 		goto err_spinand_cleanup;
1273 
1274 	return 0;
1275 
1276 err_spinand_cleanup:
1277 	spinand_cleanup(spinand);
1278 
1279 	return ret;
1280 }
1281 
1282 #ifndef __UBOOT__
1283 static int spinand_remove(struct udevice *slave)
1284 {
1285 	struct spinand_device *spinand;
1286 	struct mtd_info *mtd;
1287 	int ret;
1288 
1289 	spinand = spi_mem_get_drvdata(slave);
1290 	mtd = spinand_to_mtd(spinand);
1291 	free(mtd->name);
1292 
1293 	ret = mtd_device_unregister(mtd);
1294 	if (ret)
1295 		return ret;
1296 
1297 	spinand_cleanup(spinand);
1298 
1299 	return 0;
1300 }
1301 
1302 static const struct spi_device_id spinand_ids[] = {
1303 	{ .name = "spi-nand" },
1304 	{ /* sentinel */ },
1305 };
1306 
1307 #ifdef CONFIG_OF
1308 static const struct of_device_id spinand_of_ids[] = {
1309 	{ .compatible = "spi-nand" },
1310 	{ /* sentinel */ },
1311 };
1312 #endif
1313 
1314 static struct spi_mem_driver spinand_drv = {
1315 	.spidrv = {
1316 		.id_table = spinand_ids,
1317 		.driver = {
1318 			.name = "spi-nand",
1319 			.of_match_table = of_match_ptr(spinand_of_ids),
1320 		},
1321 	},
1322 	.probe = spinand_probe,
1323 	.remove = spinand_remove,
1324 };
1325 module_spi_mem_driver(spinand_drv);
1326 
1327 MODULE_DESCRIPTION("SPI NAND framework");
1328 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1329 MODULE_LICENSE("GPL v2");
1330 #endif /* __UBOOT__ */
1331 
1332 static const struct udevice_id spinand_ids[] = {
1333 	{ .compatible = "spi-nand" },
1334 	{ /* sentinel */ },
1335 };
1336 
1337 U_BOOT_DRIVER(spinand) = {
1338 	.name = "spi_nand",
1339 	.id = UCLASS_MTD,
1340 	.of_match = spinand_ids,
1341 	.bind	= spinand_bind,
1342 	.priv_auto_alloc_size = sizeof(struct spinand_device),
1343 	.probe = spinand_probe,
1344 };
1345