xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/spi/core.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016-2017 Micron Technology, Inc.
4  *
5  * Authors:
6  *	Peter Pan <peterpandong@micron.com>
7  *	Boris Brezillon <boris.brezillon@bootlin.com>
8  */
9 
10 #define pr_fmt(fmt)	"spi-nand: " fmt
11 
12 #include <linux/device.h>
13 #include <linux/jiffies.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/mtd/bbt_store.h>
17 #include <linux/mtd/spinand.h>
18 #include <linux/of.h>
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spi/spi.h>
22 #include <linux/spi/spi-mem.h>
23 
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)24 static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
25 {
26 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
27 						      spinand->scratchbuf);
28 	int ret;
29 
30 	ret = spi_mem_exec_op(spinand->spimem, &op);
31 	if (ret)
32 		return ret;
33 
34 	*val = *spinand->scratchbuf;
35 	return 0;
36 }
37 
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)38 static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
39 {
40 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
41 						      spinand->scratchbuf);
42 
43 	*spinand->scratchbuf = val;
44 	return spi_mem_exec_op(spinand->spimem, &op);
45 }
46 
spinand_read_status(struct spinand_device * spinand,u8 * status)47 static int spinand_read_status(struct spinand_device *spinand, u8 *status)
48 {
49 	return spinand_read_reg_op(spinand, REG_STATUS, status);
50 }
51 
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)52 static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
53 {
54 	struct nand_device *nand = spinand_to_nand(spinand);
55 
56 	if (WARN_ON(spinand->cur_target < 0 ||
57 		    spinand->cur_target >= nand->memorg.ntargets))
58 		return -EINVAL;
59 
60 	*cfg = spinand->cfg_cache[spinand->cur_target];
61 	return 0;
62 }
63 
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)64 static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
65 {
66 	struct nand_device *nand = spinand_to_nand(spinand);
67 	int ret;
68 
69 	if (WARN_ON(spinand->cur_target < 0 ||
70 		    spinand->cur_target >= nand->memorg.ntargets))
71 		return -EINVAL;
72 
73 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
74 		return 0;
75 
76 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
77 	if (ret)
78 		return ret;
79 
80 	spinand->cfg_cache[spinand->cur_target] = cfg;
81 	return 0;
82 }
83 
84 /**
85  * spinand_upd_cfg() - Update the configuration register
86  * @spinand: the spinand device
87  * @mask: the mask encoding the bits to update in the config reg
88  * @val: the new value to apply
89  *
90  * Update the configuration register.
91  *
92  * Return: 0 on success, a negative error code otherwise.
93  */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)94 int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
95 {
96 	int ret;
97 	u8 cfg;
98 
99 	ret = spinand_get_cfg(spinand, &cfg);
100 	if (ret)
101 		return ret;
102 
103 	cfg &= ~mask;
104 	cfg |= val;
105 
106 	return spinand_set_cfg(spinand, cfg);
107 }
108 
109 /**
110  * spinand_select_target() - Select a specific NAND target/die
111  * @spinand: the spinand device
112  * @target: the target/die to select
113  *
114  * Select a new target/die. If chip only has one die, this function is a NOOP.
115  *
116  * Return: 0 on success, a negative error code otherwise.
117  */
spinand_select_target(struct spinand_device * spinand,unsigned int target)118 int spinand_select_target(struct spinand_device *spinand, unsigned int target)
119 {
120 	struct nand_device *nand = spinand_to_nand(spinand);
121 	int ret;
122 
123 	if (WARN_ON(target >= nand->memorg.ntargets))
124 		return -EINVAL;
125 
126 	if (spinand->cur_target == target)
127 		return 0;
128 
129 	if (nand->memorg.ntargets == 1) {
130 		spinand->cur_target = target;
131 		return 0;
132 	}
133 
134 	ret = spinand->select_target(spinand, target);
135 	if (ret)
136 		return ret;
137 
138 	spinand->cur_target = target;
139 	return 0;
140 }
141 
spinand_init_cfg_cache(struct spinand_device * spinand)142 static int spinand_init_cfg_cache(struct spinand_device *spinand)
143 {
144 	struct nand_device *nand = spinand_to_nand(spinand);
145 	struct device *dev = &spinand->spimem->spi->dev;
146 	unsigned int target;
147 	int ret;
148 
149 	spinand->cfg_cache = devm_kcalloc(dev,
150 					  nand->memorg.ntargets,
151 					  sizeof(*spinand->cfg_cache),
152 					  GFP_KERNEL);
153 	if (!spinand->cfg_cache)
154 		return -ENOMEM;
155 
156 	for (target = 0; target < nand->memorg.ntargets; target++) {
157 		ret = spinand_select_target(spinand, target);
158 		if (ret)
159 			return ret;
160 
161 		/*
162 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
163 		 * here to bypass the config cache.
164 		 */
165 		ret = spinand_read_reg_op(spinand, REG_CFG,
166 					  &spinand->cfg_cache[target]);
167 		if (ret)
168 			return ret;
169 	}
170 
171 	return 0;
172 }
173 
spinand_init_quad_enable(struct spinand_device * spinand)174 static int spinand_init_quad_enable(struct spinand_device *spinand)
175 {
176 	bool enable = false;
177 
178 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
179 		return 0;
180 
181 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
182 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
183 	    spinand->op_templates.update_cache->data.buswidth == 4)
184 		enable = true;
185 
186 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
187 			       enable ? CFG_QUAD_ENABLE : 0);
188 }
189 
spinand_ecc_enable(struct spinand_device * spinand,bool enable)190 static int spinand_ecc_enable(struct spinand_device *spinand,
191 			      bool enable)
192 {
193 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
194 			       enable ? CFG_ECC_ENABLE : 0);
195 }
196 
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)197 static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
198 {
199 	struct nand_device *nand = spinand_to_nand(spinand);
200 
201 	if (spinand->eccinfo.get_status)
202 		return spinand->eccinfo.get_status(spinand, status);
203 
204 	switch (status & STATUS_ECC_MASK) {
205 	case STATUS_ECC_NO_BITFLIPS:
206 		return 0;
207 
208 	case STATUS_ECC_HAS_BITFLIPS:
209 		/*
210 		 * We have no way to know exactly how many bitflips have been
211 		 * fixed, so let's return the maximum possible value so that
212 		 * wear-leveling layers move the data immediately.
213 		 */
214 		return nanddev_get_ecc_requirements(nand)->strength;
215 
216 	case STATUS_ECC_UNCOR_ERROR:
217 		return -EBADMSG;
218 
219 	default:
220 		break;
221 	}
222 
223 	return -EINVAL;
224 }
225 
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)226 static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
227 				       struct mtd_oob_region *region)
228 {
229 	return -ERANGE;
230 }
231 
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)232 static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
233 					struct mtd_oob_region *region)
234 {
235 	if (section)
236 		return -ERANGE;
237 
238 	/* Reserve 2 bytes for the BBM. */
239 	region->offset = 2;
240 	region->length = 62;
241 
242 	return 0;
243 }
244 
245 static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
246 	.ecc = spinand_noecc_ooblayout_ecc,
247 	.free = spinand_noecc_ooblayout_free,
248 };
249 
spinand_ondie_ecc_init_ctx(struct nand_device * nand)250 static int spinand_ondie_ecc_init_ctx(struct nand_device *nand)
251 {
252 	struct spinand_device *spinand = nand_to_spinand(nand);
253 	struct mtd_info *mtd = nanddev_to_mtd(nand);
254 	struct spinand_ondie_ecc_conf *engine_conf;
255 
256 	nand->ecc.ctx.conf.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
257 	nand->ecc.ctx.conf.step_size = nand->ecc.requirements.step_size;
258 	nand->ecc.ctx.conf.strength = nand->ecc.requirements.strength;
259 
260 	engine_conf = kzalloc(sizeof(*engine_conf), GFP_KERNEL);
261 	if (!engine_conf)
262 		return -ENOMEM;
263 
264 	nand->ecc.ctx.priv = engine_conf;
265 
266 	if (spinand->eccinfo.ooblayout)
267 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
268 	else
269 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
270 
271 	return 0;
272 }
273 
spinand_ondie_ecc_cleanup_ctx(struct nand_device * nand)274 static void spinand_ondie_ecc_cleanup_ctx(struct nand_device *nand)
275 {
276 	kfree(nand->ecc.ctx.priv);
277 }
278 
spinand_ondie_ecc_prepare_io_req(struct nand_device * nand,struct nand_page_io_req * req)279 static int spinand_ondie_ecc_prepare_io_req(struct nand_device *nand,
280 					    struct nand_page_io_req *req)
281 {
282 	struct spinand_device *spinand = nand_to_spinand(nand);
283 	bool enable = (req->mode != MTD_OPS_RAW);
284 
285 	/* Only enable or disable the engine */
286 	return spinand_ecc_enable(spinand, enable);
287 }
288 
spinand_ondie_ecc_finish_io_req(struct nand_device * nand,struct nand_page_io_req * req)289 static int spinand_ondie_ecc_finish_io_req(struct nand_device *nand,
290 					   struct nand_page_io_req *req)
291 {
292 	struct spinand_ondie_ecc_conf *engine_conf = nand->ecc.ctx.priv;
293 	struct spinand_device *spinand = nand_to_spinand(nand);
294 
295 	if (req->mode == MTD_OPS_RAW)
296 		return 0;
297 
298 	/* Nothing to do when finishing a page write */
299 	if (req->type == NAND_PAGE_WRITE)
300 		return 0;
301 
302 	/* Finish a page write: check the status, report errors/bitflips */
303 	return spinand_check_ecc_status(spinand, engine_conf->status);
304 }
305 
306 static struct nand_ecc_engine_ops spinand_ondie_ecc_engine_ops = {
307 	.init_ctx = spinand_ondie_ecc_init_ctx,
308 	.cleanup_ctx = spinand_ondie_ecc_cleanup_ctx,
309 	.prepare_io_req = spinand_ondie_ecc_prepare_io_req,
310 	.finish_io_req = spinand_ondie_ecc_finish_io_req,
311 };
312 
313 static struct nand_ecc_engine spinand_ondie_ecc_engine = {
314 	.ops = &spinand_ondie_ecc_engine_ops,
315 };
316 
spinand_write_enable_op(struct spinand_device * spinand)317 static int spinand_write_enable_op(struct spinand_device *spinand)
318 {
319 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
320 
321 	return spi_mem_exec_op(spinand->spimem, &op);
322 }
323 
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)324 static int spinand_load_page_op(struct spinand_device *spinand,
325 				const struct nand_page_io_req *req)
326 {
327 	struct nand_device *nand = spinand_to_nand(spinand);
328 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
329 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
330 
331 	return spi_mem_exec_op(spinand->spimem, &op);
332 }
333 
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)334 static int spinand_read_from_cache_op(struct spinand_device *spinand,
335 				      const struct nand_page_io_req *req)
336 {
337 	struct nand_device *nand = spinand_to_nand(spinand);
338 	struct mtd_info *mtd = nanddev_to_mtd(nand);
339 	struct spi_mem_dirmap_desc *rdesc;
340 	unsigned int nbytes = 0;
341 	void *buf = NULL;
342 	u16 column = 0;
343 	ssize_t ret;
344 
345 	if (req->datalen) {
346 		buf = spinand->databuf;
347 		nbytes = nanddev_page_size(nand);
348 		column = 0;
349 	}
350 
351 	if (req->ooblen) {
352 		nbytes += nanddev_per_page_oobsize(nand);
353 		if (!buf) {
354 			buf = spinand->oobbuf;
355 			column = nanddev_page_size(nand);
356 		}
357 	}
358 
359 	rdesc = spinand->dirmaps[req->pos.plane].rdesc;
360 
361 	while (nbytes) {
362 		ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
363 		if (ret < 0)
364 			return ret;
365 
366 		if (!ret || ret > nbytes)
367 			return -EIO;
368 
369 		nbytes -= ret;
370 		column += ret;
371 		buf += ret;
372 	}
373 
374 	if (req->datalen)
375 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
376 		       req->datalen);
377 
378 	if (req->ooblen) {
379 		if (req->mode == MTD_OPS_AUTO_OOB)
380 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
381 						    spinand->oobbuf,
382 						    req->ooboffs,
383 						    req->ooblen);
384 		else
385 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
386 			       req->ooblen);
387 	}
388 
389 	return 0;
390 }
391 
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)392 static int spinand_write_to_cache_op(struct spinand_device *spinand,
393 				     const struct nand_page_io_req *req)
394 {
395 	struct nand_device *nand = spinand_to_nand(spinand);
396 	struct mtd_info *mtd = nanddev_to_mtd(nand);
397 	struct spi_mem_dirmap_desc *wdesc;
398 	unsigned int nbytes, column = 0;
399 	void *buf = spinand->databuf;
400 	ssize_t ret;
401 
402 	/*
403 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
404 	 * the cache content to 0xFF (depends on vendor implementation), so we
405 	 * must fill the page cache entirely even if we only want to program
406 	 * the data portion of the page, otherwise we might corrupt the BBM or
407 	 * user data previously programmed in OOB area.
408 	 */
409 	nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
410 	memset(spinand->databuf, 0xff, nbytes);
411 
412 	if (req->datalen)
413 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
414 		       req->datalen);
415 
416 	if (req->ooblen) {
417 		if (req->mode == MTD_OPS_AUTO_OOB)
418 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
419 						    spinand->oobbuf,
420 						    req->ooboffs,
421 						    req->ooblen);
422 		else
423 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
424 			       req->ooblen);
425 	}
426 
427 	wdesc = spinand->dirmaps[req->pos.plane].wdesc;
428 
429 	while (nbytes) {
430 		ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
431 		if (ret < 0)
432 			return ret;
433 
434 		if (!ret || ret > nbytes)
435 			return -EIO;
436 
437 		nbytes -= ret;
438 		column += ret;
439 		buf += ret;
440 	}
441 
442 	return 0;
443 }
444 
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)445 static int spinand_program_op(struct spinand_device *spinand,
446 			      const struct nand_page_io_req *req)
447 {
448 	struct nand_device *nand = spinand_to_nand(spinand);
449 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
450 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
451 
452 	return spi_mem_exec_op(spinand->spimem, &op);
453 }
454 
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)455 static int spinand_erase_op(struct spinand_device *spinand,
456 			    const struct nand_pos *pos)
457 {
458 	struct nand_device *nand = spinand_to_nand(spinand);
459 	unsigned int row = nanddev_pos_to_row(nand, pos);
460 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
461 
462 	return spi_mem_exec_op(spinand->spimem, &op);
463 }
464 
spinand_wait(struct spinand_device * spinand,u8 * s)465 static int spinand_wait(struct spinand_device *spinand, u8 *s)
466 {
467 	unsigned long timeo =  jiffies + msecs_to_jiffies(400);
468 	u8 status;
469 	int ret;
470 
471 	do {
472 		ret = spinand_read_status(spinand, &status);
473 		if (ret)
474 			return ret;
475 
476 		if (!(status & STATUS_BUSY))
477 			goto out;
478 	} while (time_before(jiffies, timeo));
479 
480 	/*
481 	 * Extra read, just in case the STATUS_READY bit has changed
482 	 * since our last check
483 	 */
484 	ret = spinand_read_status(spinand, &status);
485 	if (ret)
486 		return ret;
487 
488 out:
489 	if (s)
490 		*s = status;
491 
492 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
493 }
494 
spinand_read_id_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,u8 * buf)495 static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
496 			      u8 ndummy, u8 *buf)
497 {
498 	struct spi_mem_op op = SPINAND_READID_OP(
499 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
500 	int ret;
501 
502 	ret = spi_mem_exec_op(spinand->spimem, &op);
503 	if (!ret)
504 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
505 
506 	return ret;
507 }
508 
spinand_reset_op(struct spinand_device * spinand)509 static int spinand_reset_op(struct spinand_device *spinand)
510 {
511 	struct spi_mem_op op = SPINAND_RESET_OP;
512 	int ret;
513 
514 	ret = spi_mem_exec_op(spinand->spimem, &op);
515 	if (ret)
516 		return ret;
517 
518 	return spinand_wait(spinand, NULL);
519 }
520 
spinand_lock_block(struct spinand_device * spinand,u8 lock)521 static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
522 {
523 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
524 }
525 
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req,bool ecc_enabled)526 static int spinand_read_page(struct spinand_device *spinand,
527 			     const struct nand_page_io_req *req,
528 			     bool ecc_enabled)
529 {
530 	u8 status = 0;
531 	int ret;
532 
533 	ret = spinand_load_page_op(spinand, req);
534 	if (ret)
535 		return ret;
536 
537 	ret = spinand_wait(spinand, &status);
538 	/*
539 	 * When there is data outside of OIP in the status, the status data is
540 	 * inaccurate and needs to be reconfirmed
541 	 */
542 	if (spinand->id.data[0] == 0x01 && status && !ret)
543 		ret = spinand_wait(spinand, &status);
544 
545 	if (ret < 0)
546 		return ret;
547 
548 	ret = spinand_read_from_cache_op(spinand, req);
549 	if (ret)
550 		return ret;
551 
552 	if (!ecc_enabled)
553 		return 0;
554 
555 	return spinand_check_ecc_status(spinand, status);
556 }
557 
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)558 static int spinand_write_page(struct spinand_device *spinand,
559 			      const struct nand_page_io_req *req)
560 {
561 	u8 status;
562 	int ret;
563 
564 	ret = spinand_write_enable_op(spinand);
565 	if (ret)
566 		return ret;
567 
568 	ret = spinand_write_to_cache_op(spinand, req);
569 	if (ret)
570 		return ret;
571 
572 	ret = spinand_program_op(spinand, req);
573 	if (ret)
574 		return ret;
575 
576 	ret = spinand_wait(spinand, &status);
577 	if (!ret && (status & STATUS_PROG_FAILED))
578 		ret = -EIO;
579 
580 	return ret;
581 }
582 
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)583 static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
584 			    struct mtd_oob_ops *ops)
585 {
586 	struct spinand_device *spinand = mtd_to_spinand(mtd);
587 	struct nand_device *nand = mtd_to_nanddev(mtd);
588 	unsigned int max_bitflips = 0;
589 	struct nand_io_iter iter;
590 	bool enable_ecc = false;
591 	bool ecc_failed = false;
592 	int ret = 0;
593 
594 	if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
595 		enable_ecc = true;
596 
597 	mutex_lock(&spinand->lock);
598 
599 	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
600 		ret = spinand_select_target(spinand, iter.req.pos.target);
601 		if (ret)
602 			break;
603 
604 		ret = spinand_ecc_enable(spinand, enable_ecc);
605 		if (ret)
606 			break;
607 
608 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
609 		if (ret < 0 && ret != -EBADMSG)
610 			break;
611 
612 		if (ret == -EBADMSG) {
613 			ecc_failed = true;
614 			mtd->ecc_stats.failed++;
615 		} else {
616 			mtd->ecc_stats.corrected += ret;
617 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
618 		}
619 
620 		ret = 0;
621 		ops->retlen += iter.req.datalen;
622 		ops->oobretlen += iter.req.ooblen;
623 	}
624 
625 	mutex_unlock(&spinand->lock);
626 
627 	if (ecc_failed && !ret)
628 		ret = -EBADMSG;
629 
630 	return ret ? ret : max_bitflips;
631 }
632 
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)633 static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
634 			     struct mtd_oob_ops *ops)
635 {
636 	struct spinand_device *spinand = mtd_to_spinand(mtd);
637 	struct nand_device *nand = mtd_to_nanddev(mtd);
638 	struct nand_io_iter iter;
639 	bool enable_ecc = false;
640 	int ret = 0;
641 
642 	if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
643 		enable_ecc = true;
644 
645 	mutex_lock(&spinand->lock);
646 
647 	nanddev_io_for_each_page(nand, NAND_PAGE_WRITE, to, ops, &iter) {
648 		ret = spinand_select_target(spinand, iter.req.pos.target);
649 		if (ret)
650 			break;
651 
652 		ret = spinand_ecc_enable(spinand, enable_ecc);
653 		if (ret)
654 			break;
655 
656 		ret = spinand_write_page(spinand, &iter.req);
657 		if (ret)
658 			break;
659 
660 		ops->retlen += iter.req.datalen;
661 		ops->oobretlen += iter.req.ooblen;
662 	}
663 
664 	mutex_unlock(&spinand->lock);
665 
666 	return ret;
667 }
668 
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)669 static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
670 {
671 	struct spinand_device *spinand = nand_to_spinand(nand);
672 	u8 marker[2] = { };
673 	struct nand_page_io_req req = {
674 		.pos = *pos,
675 		.ooblen = sizeof(marker),
676 		.ooboffs = 0,
677 		.oobbuf.in = marker,
678 		.mode = MTD_OPS_RAW,
679 	};
680 
681 	spinand_select_target(spinand, pos->target);
682 	spinand_read_page(spinand, &req, false);
683 	if (marker[0] != 0xff || marker[1] != 0xff)
684 		return true;
685 
686 	return false;
687 }
688 
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)689 static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
690 {
691 	struct nand_device *nand = mtd_to_nanddev(mtd);
692 	struct spinand_device *spinand = nand_to_spinand(nand);
693 	struct nand_pos pos;
694 	int ret;
695 
696 	nanddev_offs_to_pos(nand, offs, &pos);
697 	mutex_lock(&spinand->lock);
698 	ret = nanddev_isbad(nand, &pos);
699 	mutex_unlock(&spinand->lock);
700 
701 	return ret;
702 }
703 
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)704 static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
705 {
706 	struct spinand_device *spinand = nand_to_spinand(nand);
707 	u8 marker[2] = { };
708 	struct nand_page_io_req req = {
709 		.pos = *pos,
710 		.ooboffs = 0,
711 		.ooblen = sizeof(marker),
712 		.oobbuf.out = marker,
713 		.mode = MTD_OPS_RAW,
714 	};
715 	int ret;
716 
717 	ret = spinand_select_target(spinand, pos->target);
718 	if (ret)
719 		return ret;
720 
721 	ret = spinand_write_enable_op(spinand);
722 	if (ret)
723 		return ret;
724 
725 	return spinand_write_page(spinand, &req);
726 }
727 
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)728 static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
729 {
730 	struct nand_device *nand = mtd_to_nanddev(mtd);
731 	struct spinand_device *spinand = nand_to_spinand(nand);
732 	struct nand_pos pos;
733 	int ret;
734 
735 	nanddev_offs_to_pos(nand, offs, &pos);
736 	mutex_lock(&spinand->lock);
737 	ret = nanddev_markbad(nand, &pos);
738 	mutex_unlock(&spinand->lock);
739 
740 	if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
741 		nanddev_bbt_in_flash_update(nand);
742 
743 	return ret;
744 }
745 
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)746 static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
747 {
748 	struct spinand_device *spinand = nand_to_spinand(nand);
749 	u8 status;
750 	int ret;
751 
752 	ret = spinand_select_target(spinand, pos->target);
753 	if (ret)
754 		return ret;
755 
756 	ret = spinand_write_enable_op(spinand);
757 	if (ret)
758 		return ret;
759 
760 	ret = spinand_erase_op(spinand, pos);
761 	if (ret)
762 		return ret;
763 
764 	ret = spinand_wait(spinand, &status);
765 	if (!ret && (status & STATUS_ERASE_FAILED))
766 		ret = -EIO;
767 
768 	return ret;
769 }
770 
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)771 static int spinand_mtd_erase(struct mtd_info *mtd,
772 			     struct erase_info *einfo)
773 {
774 	struct spinand_device *spinand = mtd_to_spinand(mtd);
775 	int ret;
776 
777 	mutex_lock(&spinand->lock);
778 	ret = nanddev_mtd_erase(mtd, einfo);
779 	mutex_unlock(&spinand->lock);
780 
781 	return ret;
782 }
783 
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)784 static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
785 {
786 	struct spinand_device *spinand = mtd_to_spinand(mtd);
787 	struct nand_device *nand = mtd_to_nanddev(mtd);
788 	struct nand_pos pos;
789 	int ret;
790 
791 	nanddev_offs_to_pos(nand, offs, &pos);
792 	mutex_lock(&spinand->lock);
793 	ret = nanddev_isreserved(nand, &pos);
794 	mutex_unlock(&spinand->lock);
795 
796 	return ret;
797 }
798 
spinand_create_dirmap(struct spinand_device * spinand,unsigned int plane)799 static int spinand_create_dirmap(struct spinand_device *spinand,
800 				 unsigned int plane)
801 {
802 	struct nand_device *nand = spinand_to_nand(spinand);
803 	struct spi_mem_dirmap_info info = {
804 		.length = nanddev_page_size(nand) +
805 			  nanddev_per_page_oobsize(nand),
806 	};
807 	struct spi_mem_dirmap_desc *desc;
808 
809 	/* The plane number is passed in MSB just above the column address */
810 	info.offset = plane << fls(nand->memorg.pagesize);
811 
812 	info.op_tmpl = *spinand->op_templates.update_cache;
813 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
814 					  spinand->spimem, &info);
815 	if (IS_ERR(desc))
816 		return PTR_ERR(desc);
817 
818 	spinand->dirmaps[plane].wdesc = desc;
819 
820 	info.op_tmpl = *spinand->op_templates.read_cache;
821 	desc = devm_spi_mem_dirmap_create(&spinand->spimem->spi->dev,
822 					  spinand->spimem, &info);
823 	if (IS_ERR(desc))
824 		return PTR_ERR(desc);
825 
826 	spinand->dirmaps[plane].rdesc = desc;
827 
828 	return 0;
829 }
830 
spinand_create_dirmaps(struct spinand_device * spinand)831 static int spinand_create_dirmaps(struct spinand_device *spinand)
832 {
833 	struct nand_device *nand = spinand_to_nand(spinand);
834 	int i, ret;
835 
836 	spinand->dirmaps = devm_kzalloc(&spinand->spimem->spi->dev,
837 					sizeof(*spinand->dirmaps) *
838 					nand->memorg.planes_per_lun,
839 					GFP_KERNEL);
840 	if (!spinand->dirmaps)
841 		return -ENOMEM;
842 
843 	for (i = 0; i < nand->memorg.planes_per_lun; i++) {
844 		ret = spinand_create_dirmap(spinand, i);
845 		if (ret)
846 			return ret;
847 	}
848 
849 	return 0;
850 }
851 
852 static const struct nand_ops spinand_ops = {
853 	.erase = spinand_erase,
854 	.markbad = spinand_markbad,
855 	.isbad = spinand_isbad,
856 };
857 
858 static const struct spinand_manufacturer *spinand_manufacturers[] = {
859 	&biwin_spinand_manufacturer,
860 	&dosilicon_spinand_manufacturer,
861 	&esmt_spinand_manufacturer,
862 	&etron_spinand_manufacturer,
863 	&fmsh_spinand_manufacturer,
864 	&foresee_spinand_manufacturer,
865 	&gigadevice_spinand_manufacturer,
866 	&gsto_spinand_manufacturer,
867 	&hyf_spinand_manufacturer,
868 	&jsc_spinand_manufacturer,
869 	&macronix_spinand_manufacturer,
870 	&micron_spinand_manufacturer,
871 	&paragon_spinand_manufacturer,
872 	&silicongo_spinand_manufacturer,
873 	&skyhigh_spinand_manufacturer,
874 	&toshiba_spinand_manufacturer,
875 	&unim_spinand_manufacturer,
876 	&winbond_spinand_manufacturer,
877 	&xincun_spinand_manufacturer,
878 	&xtx_spinand_manufacturer,
879 };
880 
spinand_manufacturer_match(struct spinand_device * spinand,enum spinand_readid_method rdid_method)881 static int spinand_manufacturer_match(struct spinand_device *spinand,
882 				      enum spinand_readid_method rdid_method)
883 {
884 	u8 *id = spinand->id.data;
885 	unsigned int i;
886 	int ret;
887 
888 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
889 		const struct spinand_manufacturer *manufacturer =
890 			spinand_manufacturers[i];
891 
892 		if (id[0] != manufacturer->id)
893 			continue;
894 
895 		ret = spinand_match_and_init(spinand,
896 					     manufacturer->chips,
897 					     manufacturer->nchips,
898 					     rdid_method);
899 		if (ret < 0)
900 			continue;
901 
902 		spinand->manufacturer = manufacturer;
903 		return 0;
904 	}
905 	return -ENOTSUPP;
906 }
907 
spinand_id_detect(struct spinand_device * spinand)908 static int spinand_id_detect(struct spinand_device *spinand)
909 {
910 	u8 *id = spinand->id.data;
911 	int ret;
912 
913 	ret = spinand_read_id_op(spinand, 0, 0, id);
914 	if (ret)
915 		return ret;
916 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
917 	if (!ret)
918 		return 0;
919 
920 	ret = spinand_read_id_op(spinand, 1, 0, id);
921 	if (ret)
922 		return ret;
923 	ret = spinand_manufacturer_match(spinand,
924 					 SPINAND_READID_METHOD_OPCODE_ADDR);
925 	if (!ret)
926 		return 0;
927 
928 	ret = spinand_read_id_op(spinand, 0, 1, id);
929 	if (ret)
930 		return ret;
931 	ret = spinand_manufacturer_match(spinand,
932 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
933 
934 	return ret;
935 }
936 
spinand_manufacturer_init(struct spinand_device * spinand)937 static int spinand_manufacturer_init(struct spinand_device *spinand)
938 {
939 	if (spinand->manufacturer->ops->init)
940 		return spinand->manufacturer->ops->init(spinand);
941 
942 	return 0;
943 }
944 
spinand_manufacturer_cleanup(struct spinand_device * spinand)945 static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
946 {
947 	/* Release manufacturer private data */
948 	if (spinand->manufacturer->ops->cleanup)
949 		return spinand->manufacturer->ops->cleanup(spinand);
950 }
951 
952 static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,const struct spinand_op_variants * variants)953 spinand_select_op_variant(struct spinand_device *spinand,
954 			  const struct spinand_op_variants *variants)
955 {
956 	struct nand_device *nand = spinand_to_nand(spinand);
957 	unsigned int i;
958 
959 	for (i = 0; i < variants->nops; i++) {
960 		struct spi_mem_op op = variants->ops[i];
961 		unsigned int nbytes;
962 		int ret;
963 
964 		nbytes = nanddev_per_page_oobsize(nand) +
965 			 nanddev_page_size(nand);
966 
967 		while (nbytes) {
968 			op.data.nbytes = nbytes;
969 			ret = spi_mem_adjust_op_size(spinand->spimem, &op);
970 			if (ret)
971 				break;
972 
973 			if (!spi_mem_supports_op(spinand->spimem, &op))
974 				break;
975 
976 			nbytes -= op.data.nbytes;
977 		}
978 
979 		if (!nbytes)
980 			return &variants->ops[i];
981 	}
982 
983 	return NULL;
984 }
985 
986 /**
987  * spinand_match_and_init() - Try to find a match between a device ID and an
988  *			      entry in a spinand_info table
989  * @spinand: SPI NAND object
990  * @table: SPI NAND device description table
991  * @table_size: size of the device description table
992  * @rdid_method: read id method to match
993  *
994  * Match between a device ID retrieved through the READ_ID command and an
995  * entry in the SPI NAND description table. If a match is found, the spinand
996  * object will be initialized with information provided by the matching
997  * spinand_info entry.
998  *
999  * Return: 0 on success, a negative error code otherwise.
1000  */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,enum spinand_readid_method rdid_method)1001 int spinand_match_and_init(struct spinand_device *spinand,
1002 			   const struct spinand_info *table,
1003 			   unsigned int table_size,
1004 			   enum spinand_readid_method rdid_method)
1005 {
1006 	u8 *id = spinand->id.data;
1007 	struct nand_device *nand = spinand_to_nand(spinand);
1008 	unsigned int i;
1009 
1010 	for (i = 0; i < table_size; i++) {
1011 		const struct spinand_info *info = &table[i];
1012 		const struct spi_mem_op *op;
1013 
1014 		if (rdid_method != info->devid.method)
1015 			continue;
1016 
1017 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1018 			continue;
1019 
1020 		nand->memorg = table[i].memorg;
1021 		nanddev_set_ecc_requirements(nand, &table[i].eccreq);
1022 		spinand->eccinfo = table[i].eccinfo;
1023 		spinand->flags = table[i].flags;
1024 		spinand->id.len = 1 + table[i].devid.len;
1025 		spinand->select_target = table[i].select_target;
1026 
1027 		op = spinand_select_op_variant(spinand,
1028 					       info->op_variants.read_cache);
1029 		if (!op)
1030 			return -ENOTSUPP;
1031 
1032 		spinand->op_templates.read_cache = op;
1033 
1034 		op = spinand_select_op_variant(spinand,
1035 					       info->op_variants.write_cache);
1036 		if (!op)
1037 			return -ENOTSUPP;
1038 
1039 		spinand->op_templates.write_cache = op;
1040 
1041 		op = spinand_select_op_variant(spinand,
1042 					       info->op_variants.update_cache);
1043 		spinand->op_templates.update_cache = op;
1044 
1045 		return 0;
1046 	}
1047 
1048 	return -ENOTSUPP;
1049 }
1050 
spinand_detect(struct spinand_device * spinand)1051 static int spinand_detect(struct spinand_device *spinand)
1052 {
1053 	struct device *dev = &spinand->spimem->spi->dev;
1054 	struct nand_device *nand = spinand_to_nand(spinand);
1055 	int ret;
1056 
1057 	ret = spinand_reset_op(spinand);
1058 	if (ret)
1059 		return ret;
1060 
1061 	ret = spinand_id_detect(spinand);
1062 	if (ret) {
1063 		dev_err(dev, "unknown raw ID %*phN\n", SPINAND_MAX_ID_LEN,
1064 			spinand->id.data);
1065 		return ret;
1066 	}
1067 
1068 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1069 		dev_err(dev,
1070 			"SPI NANDs with more than one die must implement ->select_target()\n");
1071 		return -EINVAL;
1072 	}
1073 
1074 	dev_info(&spinand->spimem->spi->dev,
1075 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1076 	dev_info(&spinand->spimem->spi->dev,
1077 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1078 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1079 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1080 
1081 	return 0;
1082 }
1083 
spinand_reinit(struct mtd_info * mtd)1084 static int spinand_reinit(struct mtd_info *mtd)
1085 {
1086 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1087 	struct nand_device *nand = mtd_to_nanddev(mtd);
1088 	struct device *dev = &spinand->spimem->spi->dev;
1089 	int ret, i;
1090 
1091 	ret = spinand_init_quad_enable(spinand);
1092 	if (ret)
1093 		return ret;
1094 
1095 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1096 	if (ret)
1097 		return ret;
1098 
1099 	ret = spinand_manufacturer_init(spinand);
1100 	if (ret) {
1101 		dev_err(dev,
1102 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1103 			ret);
1104 		return ret;
1105 	}
1106 
1107 	ret = spinand_create_dirmaps(spinand);
1108 	if (ret) {
1109 		dev_err(dev,
1110 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1111 			ret);
1112 		return ret;
1113 	}
1114 
1115 	/* After power up, all blocks are locked, so unlock them here. */
1116 	for (i = 0; i < nand->memorg.ntargets; i++) {
1117 		ret = spinand_select_target(spinand, i);
1118 		if (ret)
1119 			return ret;
1120 
1121 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1122 		if (ret)
1123 			return ret;
1124 	}
1125 
1126 	return ret;
1127 }
1128 
1129 /**
1130  * spinand_mtd_suspend - [MTD Interface] Suspend the spinand flash
1131  * @mtd: MTD device structure
1132  *
1133  * Returns 0 for success or negative error code otherwise.
1134  */
spinand_mtd_suspend(struct mtd_info * mtd)1135 static int spinand_mtd_suspend(struct mtd_info *mtd)
1136 {
1137 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1138 	int ret = 0;
1139 
1140 	mutex_lock(&spinand->lock);
1141 
1142 	return ret;
1143 }
1144 
1145 /**
1146  * spinand_mtd_resume - [MTD Interface] Resume the spinand flash
1147  * @mtd: MTD device structure
1148  */
spinand_mtd_resume(struct mtd_info * mtd)1149 static void spinand_mtd_resume(struct mtd_info *mtd)
1150 {
1151 	struct spinand_device *spinand = mtd_to_spinand(mtd);
1152 	struct device *dev = &spinand->spimem->spi->dev;
1153 	int ret;
1154 
1155 	ret = spinand_reinit(mtd);
1156 	if (ret)
1157 		dev_err(dev, "Failed to resume, ret =%d !\n", ret);
1158 	mutex_unlock(&spinand->lock);
1159 }
1160 
1161 /**
1162  * spinand_mtd_shutdown - [MTD Interface] Finish the current spinand operation and
1163  *                 prevent further operations
1164  * @mtd: MTD device structure
1165  */
spinand_mtd_shutdown(struct mtd_info * mtd)1166 static void spinand_mtd_shutdown(struct mtd_info *mtd)
1167 {
1168 	spinand_mtd_suspend(mtd);
1169 }
1170 
spinand_init(struct spinand_device * spinand)1171 static int spinand_init(struct spinand_device *spinand)
1172 {
1173 	struct device *dev = &spinand->spimem->spi->dev;
1174 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1175 	struct nand_device *nand = mtd_to_nanddev(mtd);
1176 	int ret, i;
1177 
1178 	/*
1179 	 * We need a scratch buffer because the spi_mem interface requires that
1180 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1181 	 */
1182 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1183 	if (!spinand->scratchbuf)
1184 		return -ENOMEM;
1185 
1186 	ret = spinand_detect(spinand);
1187 	if (ret)
1188 		goto err_free_bufs;
1189 
1190 	/*
1191 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1192 	 * may use this buffer for DMA access.
1193 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1194 	 */
1195 	spinand->databuf = kzalloc(nanddev_page_size(nand) +
1196 			       nanddev_per_page_oobsize(nand),
1197 			       GFP_KERNEL);
1198 	if (!spinand->databuf) {
1199 		ret = -ENOMEM;
1200 		goto err_free_bufs;
1201 	}
1202 
1203 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1204 
1205 	ret = spinand_init_cfg_cache(spinand);
1206 	if (ret)
1207 		goto err_free_bufs;
1208 
1209 	ret = spinand_init_quad_enable(spinand);
1210 	if (ret)
1211 		goto err_free_bufs;
1212 
1213 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1214 	if (ret)
1215 		goto err_free_bufs;
1216 
1217 	ret = spinand_manufacturer_init(spinand);
1218 	if (ret) {
1219 		dev_err(dev,
1220 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1221 			ret);
1222 		goto err_free_bufs;
1223 	}
1224 
1225 	ret = spinand_create_dirmaps(spinand);
1226 	if (ret) {
1227 		dev_err(dev,
1228 			"Failed to create direct mappings for read/write operations (err = %d)\n",
1229 			ret);
1230 		goto err_manuf_cleanup;
1231 	}
1232 
1233 	/* After power up, all blocks are locked, so unlock them here. */
1234 	for (i = 0; i < nand->memorg.ntargets; i++) {
1235 		ret = spinand_select_target(spinand, i);
1236 		if (ret)
1237 			goto err_manuf_cleanup;
1238 
1239 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1240 		if (ret)
1241 			goto err_manuf_cleanup;
1242 	}
1243 
1244 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1245 	if (ret)
1246 		goto err_manuf_cleanup;
1247 
1248 	/* SPI-NAND default ECC engine is on-die */
1249 	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_DIE;
1250 	nand->ecc.ondie_engine = &spinand_ondie_ecc_engine;
1251 
1252 	/*
1253 	 * Right now, we don't support ECC, so let the whole oob
1254 	 * area is available for user.
1255 	 */
1256 	mtd->_read_oob = spinand_mtd_read;
1257 	mtd->_write_oob = spinand_mtd_write;
1258 	mtd->_block_isbad = spinand_mtd_block_isbad;
1259 	mtd->_block_markbad = spinand_mtd_block_markbad;
1260 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1261 	mtd->_erase = spinand_mtd_erase;
1262 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
1263 	mtd->_suspend = spinand_mtd_suspend;
1264 	mtd->_resume = spinand_mtd_resume;
1265 	mtd->_reboot = spinand_mtd_shutdown;
1266 
1267 	if (spinand->eccinfo.ooblayout)
1268 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1269 	else
1270 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1271 
1272 	ret = mtd_ooblayout_count_freebytes(mtd);
1273 	if (ret < 0)
1274 		goto err_cleanup_nanddev;
1275 
1276 	mtd->oobavail = ret;
1277 
1278 	/* Propagate ECC information to mtd_info */
1279 	mtd->ecc_strength = nanddev_get_ecc_requirements(nand)->strength;
1280 	mtd->ecc_step_size = nanddev_get_ecc_requirements(nand)->step_size;
1281 	if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_SFC))
1282 		mtd->name = "spi-nand0";
1283 
1284 	if (IS_ENABLED(CONFIG_MTD_NAND_BBT_USING_FLASH))
1285 		nanddev_scan_bbt_in_flash(nand);
1286 
1287 	return 0;
1288 
1289 err_cleanup_nanddev:
1290 	nanddev_cleanup(nand);
1291 
1292 err_manuf_cleanup:
1293 	spinand_manufacturer_cleanup(spinand);
1294 
1295 err_free_bufs:
1296 	kfree(spinand->databuf);
1297 	kfree(spinand->scratchbuf);
1298 	return ret;
1299 }
1300 
spinand_cleanup(struct spinand_device * spinand)1301 static void spinand_cleanup(struct spinand_device *spinand)
1302 {
1303 	struct nand_device *nand = spinand_to_nand(spinand);
1304 
1305 	nanddev_cleanup(nand);
1306 	spinand_manufacturer_cleanup(spinand);
1307 	kfree(spinand->databuf);
1308 	kfree(spinand->scratchbuf);
1309 }
1310 
spinand_probe(struct spi_mem * mem)1311 static int spinand_probe(struct spi_mem *mem)
1312 {
1313 	struct spinand_device *spinand;
1314 	struct mtd_info *mtd;
1315 	int ret;
1316 
1317 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1318 			       GFP_KERNEL);
1319 	if (!spinand)
1320 		return -ENOMEM;
1321 
1322 	spinand->spimem = mem;
1323 	spi_mem_set_drvdata(mem, spinand);
1324 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1325 	mutex_init(&spinand->lock);
1326 	mtd = spinand_to_mtd(spinand);
1327 	mtd->dev.parent = &mem->spi->dev;
1328 
1329 	ret = spinand_init(spinand);
1330 	if (ret)
1331 		return ret;
1332 
1333 	ret = mtd_device_register(mtd, NULL, 0);
1334 	if (ret)
1335 		goto err_spinand_cleanup;
1336 
1337 	return 0;
1338 
1339 err_spinand_cleanup:
1340 	spinand_cleanup(spinand);
1341 
1342 	return ret;
1343 }
1344 
spinand_remove(struct spi_mem * mem)1345 static int spinand_remove(struct spi_mem *mem)
1346 {
1347 	struct spinand_device *spinand;
1348 	struct mtd_info *mtd;
1349 	int ret;
1350 
1351 	spinand = spi_mem_get_drvdata(mem);
1352 	mtd = spinand_to_mtd(spinand);
1353 
1354 	ret = mtd_device_unregister(mtd);
1355 	if (ret)
1356 		return ret;
1357 
1358 	spinand_cleanup(spinand);
1359 
1360 	return 0;
1361 }
1362 
1363 static const struct spi_device_id spinand_ids[] = {
1364 	{ .name = "spi-nand" },
1365 	{ /* sentinel */ },
1366 };
1367 MODULE_DEVICE_TABLE(spi, spinand_ids);
1368 
1369 #ifdef CONFIG_OF
1370 static const struct of_device_id spinand_of_ids[] = {
1371 	{ .compatible = "spi-nand" },
1372 	{ /* sentinel */ },
1373 };
1374 MODULE_DEVICE_TABLE(of, spinand_of_ids);
1375 #endif
1376 
1377 static struct spi_mem_driver spinand_drv = {
1378 	.spidrv = {
1379 		.id_table = spinand_ids,
1380 		.driver = {
1381 			.name = "spi-nand",
1382 			.of_match_table = of_match_ptr(spinand_of_ids),
1383 		},
1384 	},
1385 	.probe = spinand_probe,
1386 	.remove = spinand_remove,
1387 };
1388 module_spi_mem_driver(spinand_drv);
1389 
1390 MODULE_DESCRIPTION("SPI NAND framework");
1391 MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1392 MODULE_LICENSE("GPL v2");
1393