xref: /rk3399_rockchip-uboot/drivers/mtd/nand/spi/core.c (revision 2f6c020d95ebda22b28d3a31f574ec547a9281fb)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #ifndef __UBOOT__
13 #include <linux/device.h>
14 #include <linux/jiffies.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/spinand.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/spi/spi.h>
21 #include <linux/spi/spi-mem.h>
22 #else
23 #include <common.h>
24 #include <errno.h>
25 #include <spi.h>
26 #include <spi-mem.h>
27 #include <linux/mtd/spinand.h>
28 #endif
29 
30 /* SPI NAND index visible in MTD names */
31 static int spi_nand_idx;
32 
33 static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
34 					  const struct nand_page_io_req *req,
35 					  u16 *column)
36 {
37 	struct nand_device *nand = spinand_to_nand(spinand);
38 	unsigned int shift;
39 
40 	if (nand->memorg.planes_per_lun < 2)
41 		return;
42 
43 	/* The plane number is passed in MSB just above the column address */
44 	shift = fls(nand->memorg.pagesize);
45 	*column |= req->pos.plane << shift;
46 }
47 
48 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
49 {
50 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
51 						      spinand->scratchbuf);
52 	int ret;
53 
54 	ret = spi_mem_exec_op(spinand->slave, &op);
55 	if (ret)
56 		return ret;
57 
58 	*val = *spinand->scratchbuf;
59 	return 0;
60 }
61 
62 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
63 {
64 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
65 						      spinand->scratchbuf);
66 
67 	*spinand->scratchbuf = val;
68 	return spi_mem_exec_op(spinand->slave, &op);
69 }
70 
71 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
72 {
73 	return spinand_read_reg_op(spinand, REG_STATUS, status);
74 }
75 
76 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
77 {
78 	struct nand_device *nand = spinand_to_nand(spinand);
79 
80 	if (WARN_ON(spinand->cur_target < 0 ||
81 		    spinand->cur_target >= nand->memorg.ntargets))
82 		return -EINVAL;
83 
84 	*cfg = spinand->cfg_cache[spinand->cur_target];
85 	return 0;
86 }
87 
88 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
89 {
90 	struct nand_device *nand = spinand_to_nand(spinand);
91 	int ret;
92 
93 	if (WARN_ON(spinand->cur_target < 0 ||
94 		    spinand->cur_target >= nand->memorg.ntargets))
95 		return -EINVAL;
96 
97 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
98 		return 0;
99 
100 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
101 	if (ret)
102 		return ret;
103 
104 	spinand->cfg_cache[spinand->cur_target] = cfg;
105 	return 0;
106 }
107 
108 /**
109  * spinand_upd_cfg() - Update the configuration register
110  * @spinand: the spinand device
111  * @mask: the mask encoding the bits to update in the config reg
112  * @val: the new value to apply
113  *
114  * Update the configuration register.
115  *
116  * Return: 0 on success, a negative error code otherwise.
117  */
118 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
119 {
120 	int ret;
121 	u8 cfg;
122 
123 	ret = spinand_get_cfg(spinand, &cfg);
124 	if (ret)
125 		return ret;
126 
127 	cfg &= ~mask;
128 	cfg |= val;
129 
130 	return spinand_set_cfg(spinand, cfg);
131 }
132 
133 /**
134  * spinand_select_target() - Select a specific NAND target/die
135  * @spinand: the spinand device
136  * @target: the target/die to select
137  *
138  * Select a new target/die. If chip only has one die, this function is a NOOP.
139  *
140  * Return: 0 on success, a negative error code otherwise.
141  */
142 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
143 {
144 	struct nand_device *nand = spinand_to_nand(spinand);
145 	int ret;
146 
147 	if (WARN_ON(target >= nand->memorg.ntargets))
148 		return -EINVAL;
149 
150 	if (spinand->cur_target == target)
151 		return 0;
152 
153 	if (nand->memorg.ntargets == 1) {
154 		spinand->cur_target = target;
155 		return 0;
156 	}
157 
158 	ret = spinand->select_target(spinand, target);
159 	if (ret)
160 		return ret;
161 
162 	spinand->cur_target = target;
163 	return 0;
164 }
165 
166 static int spinand_init_cfg_cache(struct spinand_device *spinand)
167 {
168 	struct nand_device *nand = spinand_to_nand(spinand);
169 	struct udevice *dev = spinand->slave->dev;
170 	unsigned int target;
171 	int ret;
172 
173 	spinand->cfg_cache = devm_kzalloc(dev,
174 					  sizeof(*spinand->cfg_cache) *
175 					  nand->memorg.ntargets,
176 					  GFP_KERNEL);
177 	if (!spinand->cfg_cache)
178 		return -ENOMEM;
179 
180 	for (target = 0; target < nand->memorg.ntargets; target++) {
181 		ret = spinand_select_target(spinand, target);
182 		if (ret)
183 			return ret;
184 
185 		/*
186 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
187 		 * here to bypass the config cache.
188 		 */
189 		ret = spinand_read_reg_op(spinand, REG_CFG,
190 					  &spinand->cfg_cache[target]);
191 		if (ret)
192 			return ret;
193 	}
194 
195 	return 0;
196 }
197 
198 static int spinand_init_quad_enable(struct spinand_device *spinand)
199 {
200 	bool enable = false;
201 
202 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
203 		return 0;
204 
205 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
206 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
207 	    spinand->op_templates.update_cache->data.buswidth == 4)
208 		enable = true;
209 
210 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
211 			       enable ? CFG_QUAD_ENABLE : 0);
212 }
213 
214 static int spinand_ecc_enable(struct spinand_device *spinand,
215 			      bool enable)
216 {
217 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
218 			       enable ? CFG_ECC_ENABLE : 0);
219 }
220 
221 static int spinand_write_enable_op(struct spinand_device *spinand)
222 {
223 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
224 
225 	return spi_mem_exec_op(spinand->slave, &op);
226 }
227 
228 static int spinand_load_page_op(struct spinand_device *spinand,
229 				const struct nand_page_io_req *req)
230 {
231 	struct nand_device *nand = spinand_to_nand(spinand);
232 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
233 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
234 
235 	return spi_mem_exec_op(spinand->slave, &op);
236 }
237 
238 static int spinand_read_from_cache_op(struct spinand_device *spinand,
239 				      const struct nand_page_io_req *req)
240 {
241 	struct spi_mem_op op = *spinand->op_templates.read_cache;
242 	struct nand_device *nand = spinand_to_nand(spinand);
243 	struct mtd_info *mtd = nanddev_to_mtd(nand);
244 	struct nand_page_io_req adjreq = *req;
245 	unsigned int nbytes = 0;
246 	void *buf = NULL;
247 	u16 column = 0;
248 	int ret;
249 
250 	if (req->datalen) {
251 		adjreq.datalen = nanddev_page_size(nand);
252 		adjreq.dataoffs = 0;
253 		adjreq.databuf.in = spinand->databuf;
254 		buf = spinand->databuf;
255 		nbytes = adjreq.datalen;
256 	}
257 
258 	if (spinand->support_cont_read && req->datalen) {
259 		adjreq.datalen = req->datalen;
260 		adjreq.dataoffs = 0;
261 		adjreq.databuf.in = req->databuf.in;
262 		buf = req->databuf.in;
263 		nbytes = adjreq.datalen;
264 	}
265 
266 	if (req->ooblen) {
267 		adjreq.ooblen = nanddev_per_page_oobsize(nand);
268 		adjreq.ooboffs = 0;
269 		adjreq.oobbuf.in = spinand->oobbuf;
270 		nbytes += nanddev_per_page_oobsize(nand);
271 		if (!buf) {
272 			buf = spinand->oobbuf;
273 			column = nanddev_page_size(nand);
274 		}
275 	}
276 
277 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
278 	op.addr.val = column;
279 
280 	/*
281 	 * Some controllers are limited in term of max RX data size. In this
282 	 * case, just repeat the READ_CACHE operation after updating the
283 	 * column.
284 	 */
285 	while (nbytes) {
286 		op.data.buf.in = buf;
287 		op.data.nbytes = nbytes;
288 		ret = spi_mem_adjust_op_size(spinand->slave, &op);
289 		if (ret)
290 			return ret;
291 
292 		if (spinand->support_cont_read)
293 			op.addr.nbytes = 3;
294 		ret = spi_mem_exec_op(spinand->slave, &op);
295 		if (ret)
296 			return ret;
297 
298 		buf += op.data.nbytes;
299 		nbytes -= op.data.nbytes;
300 		op.addr.val += op.data.nbytes;
301 	}
302 
303 	if (!spinand->support_cont_read && req->datalen)
304 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs, req->datalen);
305 
306 	if (req->ooblen) {
307 		if (req->mode == MTD_OPS_AUTO_OOB)
308 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
309 						    spinand->oobbuf,
310 						    req->ooboffs,
311 						    req->ooblen);
312 		else
313 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
314 			       req->ooblen);
315 	}
316 
317 	return 0;
318 }
319 
320 static int spinand_write_to_cache_op(struct spinand_device *spinand,
321 				     const struct nand_page_io_req *req)
322 {
323 	struct spi_mem_op op = *spinand->op_templates.write_cache;
324 	struct nand_device *nand = spinand_to_nand(spinand);
325 	struct mtd_info *mtd = nanddev_to_mtd(nand);
326 	struct nand_page_io_req adjreq = *req;
327 	unsigned int nbytes = 0;
328 	void *buf = NULL;
329 	u16 column = 0;
330 	int ret;
331 
332 	/*
333 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
334 	 * the cache content to 0xFF (depends on vendor implementation), so we
335 	 * must fill the page cache entirely even if we only want to program
336 	 * the data portion of the page, otherwise we might corrupt the BBM or
337 	 * user data previously programmed in OOB area.
338 	 */
339 	memset(spinand->databuf, 0xff,
340 	       nanddev_page_size(nand) +
341 	       nanddev_per_page_oobsize(nand));
342 
343 	if (req->datalen) {
344 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
345 		       req->datalen);
346 		adjreq.dataoffs = 0;
347 		adjreq.datalen = nanddev_page_size(nand);
348 		adjreq.databuf.out = spinand->databuf;
349 		nbytes = adjreq.datalen;
350 		buf = spinand->databuf;
351 	}
352 
353 	if (req->ooblen) {
354 		if (req->mode == MTD_OPS_AUTO_OOB)
355 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
356 						    spinand->oobbuf,
357 						    req->ooboffs,
358 						    req->ooblen);
359 		else
360 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
361 			       req->ooblen);
362 
363 		adjreq.ooblen = nanddev_per_page_oobsize(nand);
364 		adjreq.ooboffs = 0;
365 		nbytes += nanddev_per_page_oobsize(nand);
366 		if (!buf) {
367 			buf = spinand->oobbuf;
368 			column = nanddev_page_size(nand);
369 		}
370 	}
371 
372 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
373 
374 	op = *spinand->op_templates.write_cache;
375 	op.addr.val = column;
376 
377 	/*
378 	 * Some controllers are limited in term of max TX data size. In this
379 	 * case, split the operation into one LOAD CACHE and one or more
380 	 * LOAD RANDOM CACHE.
381 	 */
382 	while (nbytes) {
383 		op.data.buf.out = buf;
384 		op.data.nbytes = nbytes;
385 
386 		ret = spi_mem_adjust_op_size(spinand->slave, &op);
387 		if (ret)
388 			return ret;
389 
390 		ret = spi_mem_exec_op(spinand->slave, &op);
391 		if (ret)
392 			return ret;
393 
394 		buf += op.data.nbytes;
395 		nbytes -= op.data.nbytes;
396 		op.addr.val += op.data.nbytes;
397 
398 		/*
399 		 * We need to use the RANDOM LOAD CACHE operation if there's
400 		 * more than one iteration, because the LOAD operation resets
401 		 * the cache to 0xff.
402 		 */
403 		if (nbytes) {
404 			column = op.addr.val;
405 			op = *spinand->op_templates.update_cache;
406 			op.addr.val = column;
407 		}
408 	}
409 
410 	return 0;
411 }
412 
413 static int spinand_program_op(struct spinand_device *spinand,
414 			      const struct nand_page_io_req *req)
415 {
416 	struct nand_device *nand = spinand_to_nand(spinand);
417 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
418 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
419 
420 	return spi_mem_exec_op(spinand->slave, &op);
421 }
422 
423 static int spinand_erase_op(struct spinand_device *spinand,
424 			    const struct nand_pos *pos)
425 {
426 	struct nand_device *nand = &spinand->base;
427 	unsigned int row = nanddev_pos_to_row(nand, pos);
428 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
429 
430 	return spi_mem_exec_op(spinand->slave, &op);
431 }
432 
433 static int spinand_wait(struct spinand_device *spinand, u8 *s)
434 {
435 	unsigned long start, stop;
436 	u8 status;
437 	int ret;
438 
439 	start = get_timer(0);
440 	stop = 400;
441 	do {
442 		ret = spinand_read_status(spinand, &status);
443 		if (ret)
444 			return ret;
445 
446 		if (!(status & STATUS_BUSY))
447 			goto out;
448 	} while (get_timer(start) < stop);
449 
450 	/*
451 	 * Extra read, just in case the STATUS_READY bit has changed
452 	 * since our last check
453 	 */
454 	ret = spinand_read_status(spinand, &status);
455 	if (ret)
456 		return ret;
457 
458 out:
459 	if (s)
460 		*s = status;
461 
462 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
463 }
464 
465 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
466 			      u8 ndummy, u8 *buf)
467 {
468 	struct spi_mem_op op = SPINAND_READID_OP(
469 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
470 	int ret;
471 
472 	ret = spi_mem_exec_op(spinand->slave, &op);
473 	if (!ret)
474 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
475 
476 	return ret;
477 }
478 
479 #if !CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
480 static int spinand_reset_op(struct spinand_device *spinand)
481 {
482 	struct spi_mem_op op = SPINAND_RESET_OP;
483 	int ret;
484 
485 	ret = spi_mem_exec_op(spinand->slave, &op);
486 	if (ret)
487 		return ret;
488 
489 	return spinand_wait(spinand, NULL);
490 }
491 #endif
492 
493 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
494 {
495 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
496 }
497 
498 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
499 {
500 	struct nand_device *nand = spinand_to_nand(spinand);
501 
502 	if (spinand->eccinfo.get_status)
503 		return spinand->eccinfo.get_status(spinand, status);
504 
505 	switch (status & STATUS_ECC_MASK) {
506 	case STATUS_ECC_NO_BITFLIPS:
507 		return 0;
508 
509 	case STATUS_ECC_HAS_BITFLIPS:
510 		/*
511 		 * We have no way to know exactly how many bitflips have been
512 		 * fixed, so let's return the maximum possible value so that
513 		 * wear-leveling layers move the data immediately.
514 		 */
515 		return nand->eccreq.strength;
516 
517 	case STATUS_ECC_UNCOR_ERROR:
518 		return -EBADMSG;
519 
520 	default:
521 		break;
522 	}
523 
524 	return -EINVAL;
525 }
526 
527 static int spinand_read_page(struct spinand_device *spinand,
528 			     const struct nand_page_io_req *req,
529 			     bool ecc_enabled)
530 {
531 	u8 status = 0;
532 	int ret;
533 
534 	ret = spinand_load_page_op(spinand, req);
535 	if (ret)
536 		return ret;
537 
538 	ret = spinand_wait(spinand, &status);
539 	/*
540 	 * When there is data outside of OIP in the status, the status data is
541 	 * inaccurate and needs to be reconfirmed
542 	 */
543 	if (spinand->id.data[0] == 0x01 && status && !ret)
544 		ret = spinand_wait(spinand, &status);
545 	if (ret < 0)
546 		return ret;
547 
548 	ret = spinand_read_from_cache_op(spinand, req);
549 	if (ret)
550 		return ret;
551 
552 	if (spinand->support_cont_read && !(spinand->slave->mode & SPI_DMA_PREPARE))
553 		spinand_wait(spinand, &status);
554 
555 	if (!ecc_enabled)
556 		return 0;
557 
558 	return spinand_check_ecc_status(spinand, status);
559 }
560 
561 static int spinand_write_page(struct spinand_device *spinand,
562 			      const struct nand_page_io_req *req)
563 {
564 	u8 status;
565 	int ret;
566 
567 	ret = spinand_write_enable_op(spinand);
568 	if (ret)
569 		return ret;
570 
571 	ret = spinand_write_to_cache_op(spinand, req);
572 	if (ret)
573 		return ret;
574 
575 	ret = spinand_program_op(spinand, req);
576 	if (ret)
577 		return ret;
578 
579 	ret = spinand_wait(spinand, &status);
580 	if (!ret && (status & STATUS_PROG_FAILED))
581 		ret = -EIO;
582 
583 	return ret;
584 }
585 
586 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
587 			    struct mtd_oob_ops *ops)
588 {
589 	struct spinand_device *spinand = mtd_to_spinand(mtd);
590 	struct nand_device *nand = mtd_to_nanddev(mtd);
591 	unsigned int max_bitflips = 0;
592 	struct nand_io_iter iter;
593 	bool enable_ecc = false;
594 	bool ecc_failed = false;
595 	int ret = 0;
596 
597 	if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
598 		enable_ecc = true;
599 
600 #ifndef __UBOOT__
601 	mutex_lock(&spinand->lock);
602 #endif
603 
604 	nanddev_io_for_each_page(nand, from, ops, &iter) {
605 		ret = spinand_select_target(spinand, iter.req.pos.target);
606 		if (ret)
607 			break;
608 
609 		ret = spinand_ecc_enable(spinand, enable_ecc);
610 		if (ret)
611 			break;
612 
613 		if (spinand->support_cont_read) {
614 			iter.req.datalen = ops->len;
615 			iter.req.ooblen = 0;
616 		}
617 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
618 		if (ret < 0 && ret != -EBADMSG)
619 			break;
620 
621 		if (ret == -EBADMSG) {
622 			ecc_failed = true;
623 			mtd->ecc_stats.failed++;
624 		} else {
625 			mtd->ecc_stats.corrected += ret;
626 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
627 		}
628 
629 		ret = 0;
630 		if (spinand->support_cont_read) {
631 			ops->retlen = ops->len;
632 			ops->oobretlen = ops->ooblen;
633 			break;
634 		}
635 
636 		ops->retlen += iter.req.datalen;
637 		ops->oobretlen += iter.req.ooblen;
638 	}
639 
640 #ifndef __UBOOT__
641 	mutex_unlock(&spinand->lock);
642 #endif
643 	if (ecc_failed && !ret)
644 		ret = -EBADMSG;
645 
646 	return ret ? ret : max_bitflips;
647 }
648 
649 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
650 			     struct mtd_oob_ops *ops)
651 {
652 	struct spinand_device *spinand = mtd_to_spinand(mtd);
653 	struct nand_device *nand = mtd_to_nanddev(mtd);
654 	struct nand_io_iter iter;
655 	bool enable_ecc = false;
656 	int ret = 0;
657 
658 	if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
659 		enable_ecc = true;
660 
661 #ifndef __UBOOT__
662 	mutex_lock(&spinand->lock);
663 #endif
664 
665 	nanddev_io_for_each_page(nand, to, ops, &iter) {
666 		ret = spinand_select_target(spinand, iter.req.pos.target);
667 		if (ret)
668 			break;
669 
670 		ret = spinand_ecc_enable(spinand, enable_ecc);
671 		if (ret)
672 			break;
673 
674 		ret = spinand_write_page(spinand, &iter.req);
675 		if (ret)
676 			break;
677 
678 		ops->retlen += iter.req.datalen;
679 		ops->oobretlen += iter.req.ooblen;
680 	}
681 
682 #ifndef __UBOOT__
683 	mutex_unlock(&spinand->lock);
684 #endif
685 
686 	return ret;
687 }
688 
689 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
690 {
691 	struct spinand_device *spinand = nand_to_spinand(nand);
692 	u8 marker[2] = { };
693 	struct nand_page_io_req req = {
694 		.pos = *pos,
695 		.ooblen = sizeof(marker),
696 		.ooboffs = 0,
697 		.oobbuf.in = marker,
698 		.mode = MTD_OPS_RAW,
699 	};
700 
701 	spinand_select_target(spinand, pos->target);
702 	spinand_read_page(spinand, &req, false);
703 	if (marker[0] != 0xff || marker[1] != 0xff)
704 		return true;
705 
706 	return false;
707 }
708 
709 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
710 {
711 	struct nand_device *nand = mtd_to_nanddev(mtd);
712 #ifndef __UBOOT__
713 	struct spinand_device *spinand = nand_to_spinand(nand);
714 #endif
715 	struct nand_pos pos;
716 	int ret;
717 
718 	nanddev_offs_to_pos(nand, offs, &pos);
719 #ifndef __UBOOT__
720 	mutex_lock(&spinand->lock);
721 #endif
722 	ret = nanddev_isbad(nand, &pos);
723 #ifndef __UBOOT__
724 	mutex_unlock(&spinand->lock);
725 #endif
726 	return ret;
727 }
728 
729 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
730 {
731 	struct spinand_device *spinand = nand_to_spinand(nand);
732 	u8 marker[2] = { 0, 0 };
733 	struct nand_page_io_req req = {
734 		.pos = *pos,
735 		.ooboffs = 0,
736 		.ooblen = sizeof(marker),
737 		.oobbuf.out = marker,
738 		.mode = MTD_OPS_RAW,
739 	};
740 	int ret;
741 
742 	ret = spinand_select_target(spinand, pos->target);
743 	if (ret)
744 		return ret;
745 
746 	ret = spinand_write_enable_op(spinand);
747 	if (ret)
748 		return ret;
749 
750 	return spinand_write_page(spinand, &req);
751 }
752 
753 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
754 {
755 	struct nand_device *nand = mtd_to_nanddev(mtd);
756 #ifndef __UBOOT__
757 	struct spinand_device *spinand = nand_to_spinand(nand);
758 #endif
759 	struct nand_pos pos;
760 	int ret;
761 
762 	nanddev_offs_to_pos(nand, offs, &pos);
763 #ifndef __UBOOT__
764 	mutex_lock(&spinand->lock);
765 #endif
766 	ret = nanddev_markbad(nand, &pos);
767 #ifndef __UBOOT__
768 	mutex_unlock(&spinand->lock);
769 #endif
770 	return ret;
771 }
772 
773 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
774 {
775 	struct spinand_device *spinand = nand_to_spinand(nand);
776 	u8 status;
777 	int ret;
778 
779 	ret = spinand_select_target(spinand, pos->target);
780 	if (ret)
781 		return ret;
782 
783 	ret = spinand_write_enable_op(spinand);
784 	if (ret)
785 		return ret;
786 
787 	ret = spinand_erase_op(spinand, pos);
788 	if (ret)
789 		return ret;
790 
791 	ret = spinand_wait(spinand, &status);
792 	if (!ret && (status & STATUS_ERASE_FAILED))
793 		ret = -EIO;
794 
795 	return ret;
796 }
797 
798 static int spinand_mtd_erase(struct mtd_info *mtd,
799 			     struct erase_info *einfo)
800 {
801 #ifndef __UBOOT__
802 	struct spinand_device *spinand = mtd_to_spinand(mtd);
803 #endif
804 	int ret;
805 
806 #ifndef __UBOOT__
807 	mutex_lock(&spinand->lock);
808 #endif
809 	ret = nanddev_mtd_erase(mtd, einfo);
810 #ifndef __UBOOT__
811 	mutex_unlock(&spinand->lock);
812 #endif
813 
814 	return ret;
815 }
816 
817 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
818 {
819 #ifndef __UBOOT__
820 	struct spinand_device *spinand = mtd_to_spinand(mtd);
821 #endif
822 	struct nand_device *nand = mtd_to_nanddev(mtd);
823 	struct nand_pos pos;
824 	int ret;
825 
826 	nanddev_offs_to_pos(nand, offs, &pos);
827 #ifndef __UBOOT__
828 	mutex_lock(&spinand->lock);
829 #endif
830 	ret = nanddev_isreserved(nand, &pos);
831 #ifndef __UBOOT__
832 	mutex_unlock(&spinand->lock);
833 #endif
834 
835 	return ret;
836 }
837 
838 const struct spi_mem_op *
839 spinand_find_supported_op(struct spinand_device *spinand,
840 			  const struct spi_mem_op *ops,
841 			  unsigned int nops)
842 {
843 	unsigned int i;
844 
845 	for (i = 0; i < nops; i++) {
846 		if (spi_mem_supports_op(spinand->slave, &ops[i]))
847 			return &ops[i];
848 	}
849 
850 	return NULL;
851 }
852 
853 static const struct nand_ops spinand_ops = {
854 	.erase = spinand_erase,
855 	.markbad = spinand_markbad,
856 	.isbad = spinand_isbad,
857 };
858 
859 static const struct spinand_manufacturer *spinand_manufacturers[] = {
860 #ifdef CONFIG_SPI_NAND_GIGADEVICE
861 	&gigadevice_spinand_manufacturer,
862 #endif
863 #ifdef CONFIG_SPI_NAND_MACRONIX
864 	&macronix_spinand_manufacturer,
865 #endif
866 #ifdef CONFIG_SPI_NAND_MICRON
867 	&micron_spinand_manufacturer,
868 #endif
869 #ifdef CONFIG_SPI_NAND_TOSHIBA
870 	&toshiba_spinand_manufacturer,
871 #endif
872 #ifdef CONFIG_SPI_NAND_WINBOND
873 	&winbond_spinand_manufacturer,
874 #endif
875 #ifdef CONFIG_SPI_NAND_DOSILICON
876 	&dosilicon_spinand_manufacturer,
877 #endif
878 #ifdef CONFIG_SPI_NAND_ESMT
879 	&esmt_spinand_manufacturer,
880 #endif
881 #ifdef CONFIG_SPI_NAND_XINCUN
882 	&xincun_spinand_manufacturer,
883 #endif
884 #ifdef CONFIG_SPI_NAND_XTX
885 	&xtx_spinand_manufacturer,
886 #endif
887 #ifdef CONFIG_SPI_NAND_HYF
888 	&hyf_spinand_manufacturer,
889 #endif
890 #ifdef CONFIG_SPI_NAND_FMSH
891 	&fmsh_spinand_manufacturer,
892 #endif
893 #ifdef CONFIG_SPI_NAND_FORESEE
894 	&foresee_spinand_manufacturer,
895 #endif
896 #ifdef CONFIG_SPI_NAND_BIWIN
897 	&biwin_spinand_manufacturer,
898 #endif
899 #ifdef CONFIG_SPI_NAND_ETRON
900 	&etron_spinand_manufacturer,
901 #endif
902 #ifdef CONFIG_SPI_NAND_JSC
903 	&jsc_spinand_manufacturer,
904 #endif
905 #ifdef CONFIG_SPI_NAND_SILICONGO
906 	&silicongo_spinand_manufacturer,
907 #endif
908 #ifdef CONFIG_SPI_NAND_UNIM
909 	&unim_spinand_manufacturer,
910 	&unim_zl_spinand_manufacturer,
911 #endif
912 #ifdef CONFIG_SPI_NAND_SKYHIGH
913 	&skyhigh_spinand_manufacturer,
914 #endif
915 #ifdef CONFIG_SPI_NAND_GSTO
916 	&gsto_spinand_manufacturer,
917 #endif
918 #ifdef CONFIG_SPI_NAND_ZBIT
919 	&zbit_spinand_manufacturer,
920 #endif
921 };
922 
923 static int spinand_manufacturer_match(struct spinand_device *spinand,
924 				      enum spinand_readid_method rdid_method)
925 {
926 	u8 *id = spinand->id.data;
927 	unsigned int i;
928 	int ret;
929 
930 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
931 		const struct spinand_manufacturer *manufacturer =
932 			spinand_manufacturers[i];
933 
934 		if (id[0] != manufacturer->id)
935 			continue;
936 
937 		ret = spinand_match_and_init(spinand,
938 					     manufacturer->chips,
939 					     manufacturer->nchips,
940 					     rdid_method);
941 		if (ret < 0)
942 			continue;
943 
944 		spinand->manufacturer = manufacturer;
945 		return 0;
946 	}
947 	return -ENOTSUPP;
948 }
949 
950 static int spinand_id_detect(struct spinand_device *spinand)
951 {
952 	u8 *id = spinand->id.data;
953 	int ret;
954 
955 	ret = spinand_read_id_op(spinand, 0, 0, id);
956 	if (ret)
957 		return ret;
958 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
959 	if (!ret)
960 		return 0;
961 
962 	ret = spinand_read_id_op(spinand, 1, 0, id);
963 	if (ret)
964 		return ret;
965 	ret = spinand_manufacturer_match(spinand,
966 					 SPINAND_READID_METHOD_OPCODE_ADDR);
967 	if (!ret)
968 		return 0;
969 
970 	ret = spinand_read_id_op(spinand, 0, 1, id);
971 	if (ret)
972 		return ret;
973 	ret = spinand_manufacturer_match(spinand,
974 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
975 
976 	return ret;
977 }
978 
979 static int spinand_manufacturer_init(struct spinand_device *spinand)
980 {
981 	if (spinand->manufacturer->ops->init)
982 		return spinand->manufacturer->ops->init(spinand);
983 
984 	return 0;
985 }
986 
987 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
988 {
989 	/* Release manufacturer private data */
990 	if (spinand->manufacturer->ops->cleanup)
991 		return spinand->manufacturer->ops->cleanup(spinand);
992 }
993 
994 static const struct spi_mem_op *
995 spinand_select_op_variant(struct spinand_device *spinand,
996 			  const struct spinand_op_variants *variants)
997 {
998 	struct nand_device *nand = spinand_to_nand(spinand);
999 	unsigned int i;
1000 
1001 	for (i = 0; i < variants->nops; i++) {
1002 		struct spi_mem_op op = variants->ops[i];
1003 		unsigned int nbytes;
1004 		int ret;
1005 
1006 		nbytes = nanddev_per_page_oobsize(nand) +
1007 			 nanddev_page_size(nand);
1008 
1009 		while (nbytes) {
1010 			op.data.nbytes = nbytes;
1011 			ret = spi_mem_adjust_op_size(spinand->slave, &op);
1012 			if (ret)
1013 				break;
1014 
1015 			if (!spi_mem_supports_op(spinand->slave, &op))
1016 				break;
1017 
1018 			nbytes -= op.data.nbytes;
1019 		}
1020 
1021 		if (!nbytes)
1022 			return &variants->ops[i];
1023 	}
1024 
1025 	return NULL;
1026 }
1027 
1028 /**
1029  * spinand_match_and_init() - Try to find a match between a device ID and an
1030  *			      entry in a spinand_info table
1031  * @spinand: SPI NAND object
1032  * @table: SPI NAND device description table
1033  * @table_size: size of the device description table
1034  * @rdid_method: read id method to match
1035  *
1036  * Match between a device ID retrieved through the READ_ID command and an
1037  * entry in the SPI NAND description table. If a match is found, the spinand
1038  * object will be initialized with information provided by the matching
1039  * spinand_info entry.
1040  *
1041  * Return: 0 on success, a negative error code otherwise.
1042  */
1043 int spinand_match_and_init(struct spinand_device *spinand,
1044 			   const struct spinand_info *table,
1045 			   unsigned int table_size,
1046 			   enum spinand_readid_method rdid_method)
1047 {
1048 	u8 *id = spinand->id.data;
1049 	struct nand_device *nand = spinand_to_nand(spinand);
1050 	unsigned int i;
1051 
1052 	for (i = 0; i < table_size; i++) {
1053 		const struct spinand_info *info = &table[i];
1054 		const struct spi_mem_op *op;
1055 
1056 		if (rdid_method != info->devid.method)
1057 			continue;
1058 
1059 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1060 			continue;
1061 
1062 		nand->memorg = table[i].memorg;
1063 		nand->eccreq = table[i].eccreq;
1064 		spinand->eccinfo = table[i].eccinfo;
1065 		spinand->flags = table[i].flags;
1066 		spinand->id.len = 1 + table[i].devid.len;
1067 		spinand->select_target = table[i].select_target;
1068 
1069 		op = spinand_select_op_variant(spinand,
1070 					       info->op_variants.read_cache);
1071 		if (!op)
1072 			return -ENOTSUPP;
1073 
1074 		spinand->op_templates.read_cache = op;
1075 
1076 		op = spinand_select_op_variant(spinand,
1077 					       info->op_variants.write_cache);
1078 		if (!op)
1079 			return -ENOTSUPP;
1080 
1081 		spinand->op_templates.write_cache = op;
1082 
1083 		op = spinand_select_op_variant(spinand,
1084 					       info->op_variants.update_cache);
1085 		spinand->op_templates.update_cache = op;
1086 
1087 		return 0;
1088 	}
1089 
1090 	return -ENOTSUPP;
1091 }
1092 
1093 static int spinand_detect(struct spinand_device *spinand)
1094 {
1095 	struct nand_device *nand = spinand_to_nand(spinand);
1096 	int ret;
1097 
1098 #if !CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
1099 	ret = spinand_reset_op(spinand);
1100 	if (ret)
1101 		return ret;
1102 #endif
1103 
1104 	ret = spinand_id_detect(spinand);
1105 	if (ret) {
1106 		dev_err(dev, "unknown raw ID %x %x %x\n",
1107 			spinand->id.data[0], spinand->id.data[1], spinand->id.data[2]);
1108 		return ret;
1109 	}
1110 	dev_err(dev, "SPI Nand ID %x %x %x\n",
1111 		spinand->id.data[0], spinand->id.data[1], spinand->id.data[2]);
1112 
1113 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1114 		dev_err(dev,
1115 			"SPI NANDs with more than one die must implement ->select_target()\n");
1116 		return -EINVAL;
1117 	}
1118 
1119 	dev_info(spinand->slave->dev,
1120 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1121 	dev_info(spinand->slave->dev,
1122 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1123 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1124 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1125 
1126 	return 0;
1127 }
1128 
1129 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1130 				       struct mtd_oob_region *region)
1131 {
1132 	return -ERANGE;
1133 }
1134 
1135 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1136 					struct mtd_oob_region *region)
1137 {
1138 	if (section)
1139 		return -ERANGE;
1140 
1141 	/* Reserve 2 bytes for the BBM. */
1142 	region->offset = 2;
1143 	region->length = 62;
1144 
1145 	return 0;
1146 }
1147 
1148 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1149 	.ecc = spinand_noecc_ooblayout_ecc,
1150 	.rfree = spinand_noecc_ooblayout_free,
1151 };
1152 
1153 static int spinand_init(struct spinand_device *spinand)
1154 {
1155 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1156 	struct nand_device *nand = mtd_to_nanddev(mtd);
1157 	int ret, i;
1158 
1159 	/*
1160 	 * We need a scratch buffer because the spi_mem interface requires that
1161 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1162 	 */
1163 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1164 	if (!spinand->scratchbuf)
1165 		return -ENOMEM;
1166 
1167 	ret = spinand_detect(spinand);
1168 	if (ret)
1169 		goto err_free_bufs;
1170 
1171 	/*
1172 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1173 	 * may use this buffer for DMA access.
1174 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1175 	 */
1176 	spinand->databuf = kzalloc(nanddev_page_size(nand) +
1177 			       nanddev_per_page_oobsize(nand),
1178 			       GFP_KERNEL);
1179 	if (!spinand->databuf) {
1180 		ret = -ENOMEM;
1181 		goto err_free_bufs;
1182 	}
1183 
1184 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1185 
1186 	ret = spinand_init_cfg_cache(spinand);
1187 	if (ret)
1188 		goto err_free_bufs;
1189 
1190 	ret = spinand_init_quad_enable(spinand);
1191 	if (ret)
1192 		goto err_free_bufs;
1193 
1194 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1195 	if (ret)
1196 		goto err_free_bufs;
1197 
1198 	ret = spinand_manufacturer_init(spinand);
1199 	if (ret) {
1200 		dev_err(dev,
1201 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1202 			ret);
1203 		goto err_free_bufs;
1204 	}
1205 
1206 	/* After power up, all blocks are locked, so unlock them here. */
1207 	for (i = 0; i < nand->memorg.ntargets; i++) {
1208 		ret = spinand_select_target(spinand, i);
1209 		if (ret)
1210 			goto err_free_bufs;
1211 
1212 		/* HWP_EN must be enabled first before block unlock region is set */
1213 		if (spinand->id.data[0] == 0x01) {
1214 			ret = spinand_lock_block(spinand, HWP_EN);
1215 			if (ret)
1216 				goto err_free_bufs;
1217 		}
1218 
1219 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1220 		if (ret)
1221 			goto err_free_bufs;
1222 	}
1223 
1224 	nand->bbt.option = NANDDEV_BBT_USE_FLASH;
1225 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1226 	if (ret)
1227 		goto err_manuf_cleanup;
1228 
1229 	/*
1230 	 * Right now, we don't support ECC, so let the whole oob
1231 	 * area is available for user.
1232 	 */
1233 	mtd->_read_oob = spinand_mtd_read;
1234 	mtd->_write_oob = spinand_mtd_write;
1235 	mtd->_block_isbad = spinand_mtd_block_isbad;
1236 	mtd->_block_markbad = spinand_mtd_block_markbad;
1237 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1238 	mtd->_erase = spinand_mtd_erase;
1239 
1240 	if (spinand->eccinfo.ooblayout)
1241 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1242 	else
1243 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1244 
1245 	ret = mtd_ooblayout_count_freebytes(mtd);
1246 	if (ret < 0)
1247 		goto err_cleanup_nanddev;
1248 
1249 	mtd->oobavail = ret;
1250 
1251 	/* Propagate ECC information to mtd_info */
1252 	mtd->ecc_strength = nand->eccreq.strength;
1253 	mtd->ecc_step_size = nand->eccreq.step_size;
1254 
1255 	return 0;
1256 
1257 err_cleanup_nanddev:
1258 	nanddev_cleanup(nand);
1259 
1260 err_manuf_cleanup:
1261 	spinand_manufacturer_cleanup(spinand);
1262 
1263 err_free_bufs:
1264 	kfree(spinand->databuf);
1265 	kfree(spinand->scratchbuf);
1266 	return ret;
1267 }
1268 
1269 static void spinand_cleanup(struct spinand_device *spinand)
1270 {
1271 	struct nand_device *nand = spinand_to_nand(spinand);
1272 
1273 	nanddev_cleanup(nand);
1274 	spinand_manufacturer_cleanup(spinand);
1275 	kfree(spinand->databuf);
1276 	kfree(spinand->scratchbuf);
1277 }
1278 
1279 static int spinand_bind(struct udevice *udev)
1280 {
1281 	int ret = 0;
1282 
1283 #ifdef CONFIG_MTD_BLK
1284 	struct udevice *bdev;
1285 
1286 	ret = blk_create_devicef(udev, "mtd_blk", "blk", IF_TYPE_MTD,
1287 				 BLK_MTD_SPI_NAND, 512, 0, &bdev);
1288 	if (ret)
1289 		printf("Cannot create block device\n");
1290 #endif
1291 	return ret;
1292 }
1293 
1294 static int spinand_probe(struct udevice *dev)
1295 {
1296 	struct spinand_device *spinand = dev_get_priv(dev);
1297 	struct spi_slave *slave = dev_get_parent_priv(dev);
1298 	struct mtd_info *mtd = dev_get_uclass_priv(dev);
1299 	struct nand_device *nand = spinand_to_nand(spinand);
1300 	int ret;
1301 
1302 #ifndef __UBOOT__
1303 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1304 			       GFP_KERNEL);
1305 	if (!spinand)
1306 		return -ENOMEM;
1307 
1308 	spinand->spimem = mem;
1309 	spi_mem_set_drvdata(mem, spinand);
1310 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1311 	mutex_init(&spinand->lock);
1312 
1313 	mtd = spinand_to_mtd(spinand);
1314 	mtd->dev.parent = &mem->spi->dev;
1315 #else
1316 	nand->mtd = mtd;
1317 	mtd->priv = nand;
1318 	mtd->dev = dev;
1319 	mtd->name = malloc(20);
1320 	if (!mtd->name)
1321 		return -ENOMEM;
1322 	sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1323 	spinand->slave = slave;
1324 	spinand_set_of_node(spinand, dev->node.np);
1325 #endif
1326 
1327 	ret = spinand_init(spinand);
1328 	if (ret)
1329 		return ret;
1330 
1331 #ifndef __UBOOT__
1332 	ret = mtd_device_register(mtd, NULL, 0);
1333 #else
1334 	ret = add_mtd_device(mtd);
1335 #endif
1336 	if (ret)
1337 		goto err_spinand_cleanup;
1338 
1339 	return 0;
1340 
1341 err_spinand_cleanup:
1342 	spinand_cleanup(spinand);
1343 
1344 	return ret;
1345 }
1346 
1347 #ifndef __UBOOT__
1348 static int spinand_remove(struct udevice *slave)
1349 {
1350 	struct spinand_device *spinand;
1351 	struct mtd_info *mtd;
1352 	int ret;
1353 
1354 	spinand = spi_mem_get_drvdata(slave);
1355 	mtd = spinand_to_mtd(spinand);
1356 	free(mtd->name);
1357 
1358 	ret = mtd_device_unregister(mtd);
1359 	if (ret)
1360 		return ret;
1361 
1362 	spinand_cleanup(spinand);
1363 
1364 	return 0;
1365 }
1366 
1367 static const struct spi_device_id spinand_ids[] = {
1368 	{ .name = "spi-nand" },
1369 	{ /* sentinel */ },
1370 };
1371 
1372 #ifdef CONFIG_OF
1373 static const struct of_device_id spinand_of_ids[] = {
1374 	{ .compatible = "spi-nand" },
1375 	{ /* sentinel */ },
1376 };
1377 #endif
1378 
1379 static struct spi_mem_driver spinand_drv = {
1380 	.spidrv = {
1381 		.id_table = spinand_ids,
1382 		.driver = {
1383 			.name = "spi-nand",
1384 			.of_match_table = of_match_ptr(spinand_of_ids),
1385 		},
1386 	},
1387 	.probe = spinand_probe,
1388 	.remove = spinand_remove,
1389 };
1390 module_spi_mem_driver(spinand_drv);
1391 
1392 MODULE_DESCRIPTION("SPI NAND framework");
1393 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1394 MODULE_LICENSE("GPL v2");
1395 #endif /* __UBOOT__ */
1396 
1397 static const struct udevice_id spinand_ids[] = {
1398 	{ .compatible = "spi-nand" },
1399 	{ /* sentinel */ },
1400 };
1401 
1402 U_BOOT_DRIVER(spinand) = {
1403 	.name = "spi_nand",
1404 	.id = UCLASS_MTD,
1405 	.of_match = spinand_ids,
1406 	.bind	= spinand_bind,
1407 	.priv_auto_alloc_size = sizeof(struct spinand_device),
1408 	.probe = spinand_probe,
1409 };
1410