xref: /rk3399_rockchip-uboot/drivers/mtd/nand/spi/core.c (revision 534e5d47c0a0fbcbd4683ec669edb0108801e0fc)
1749af7cdSPeter Pan // SPDX-License-Identifier: GPL-2.0
2749af7cdSPeter Pan /*
3749af7cdSPeter Pan  * Copyright (C) 2016-2017 Micron Technology, Inc.
4749af7cdSPeter Pan  *
5749af7cdSPeter Pan  * Authors:
6749af7cdSPeter Pan  *	Peter Pan <peterpandong@micron.com>
7749af7cdSPeter Pan  *	Boris Brezillon <boris.brezillon@bootlin.com>
8749af7cdSPeter Pan  */
9749af7cdSPeter Pan 
10749af7cdSPeter Pan #define pr_fmt(fmt)	"spi-nand: " fmt
11749af7cdSPeter Pan 
12749af7cdSPeter Pan #ifndef __UBOOT__
13749af7cdSPeter Pan #include <linux/device.h>
14749af7cdSPeter Pan #include <linux/jiffies.h>
15749af7cdSPeter Pan #include <linux/kernel.h>
16749af7cdSPeter Pan #include <linux/module.h>
17749af7cdSPeter Pan #include <linux/mtd/spinand.h>
18749af7cdSPeter Pan #include <linux/of.h>
19749af7cdSPeter Pan #include <linux/slab.h>
20749af7cdSPeter Pan #include <linux/spi/spi.h>
21749af7cdSPeter Pan #include <linux/spi/spi-mem.h>
22749af7cdSPeter Pan #else
23749af7cdSPeter Pan #include <common.h>
24749af7cdSPeter Pan #include <errno.h>
25749af7cdSPeter Pan #include <spi.h>
26749af7cdSPeter Pan #include <spi-mem.h>
27749af7cdSPeter Pan #include <linux/mtd/spinand.h>
28749af7cdSPeter Pan #endif
29749af7cdSPeter Pan 
30749af7cdSPeter Pan /* SPI NAND index visible in MTD names */
31749af7cdSPeter Pan static int spi_nand_idx;
32749af7cdSPeter Pan 
spinand_cache_op_adjust_colum(struct spinand_device * spinand,const struct nand_page_io_req * req,u16 * column)33749af7cdSPeter Pan static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
34749af7cdSPeter Pan 					  const struct nand_page_io_req *req,
35749af7cdSPeter Pan 					  u16 *column)
36749af7cdSPeter Pan {
37749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
38749af7cdSPeter Pan 	unsigned int shift;
39749af7cdSPeter Pan 
40749af7cdSPeter Pan 	if (nand->memorg.planes_per_lun < 2)
41749af7cdSPeter Pan 		return;
42749af7cdSPeter Pan 
43749af7cdSPeter Pan 	/* The plane number is passed in MSB just above the column address */
44749af7cdSPeter Pan 	shift = fls(nand->memorg.pagesize);
45749af7cdSPeter Pan 	*column |= req->pos.plane << shift;
46749af7cdSPeter Pan }
47749af7cdSPeter Pan 
spinand_read_reg_op(struct spinand_device * spinand,u8 reg,u8 * val)48749af7cdSPeter Pan static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
49749af7cdSPeter Pan {
50749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
51749af7cdSPeter Pan 						      spinand->scratchbuf);
52749af7cdSPeter Pan 	int ret;
53749af7cdSPeter Pan 
54749af7cdSPeter Pan 	ret = spi_mem_exec_op(spinand->slave, &op);
55749af7cdSPeter Pan 	if (ret)
56749af7cdSPeter Pan 		return ret;
57749af7cdSPeter Pan 
58749af7cdSPeter Pan 	*val = *spinand->scratchbuf;
59749af7cdSPeter Pan 	return 0;
60749af7cdSPeter Pan }
61749af7cdSPeter Pan 
spinand_write_reg_op(struct spinand_device * spinand,u8 reg,u8 val)62749af7cdSPeter Pan static int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val)
63749af7cdSPeter Pan {
64749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_SET_FEATURE_OP(reg,
65749af7cdSPeter Pan 						      spinand->scratchbuf);
66749af7cdSPeter Pan 
67749af7cdSPeter Pan 	*spinand->scratchbuf = val;
68749af7cdSPeter Pan 	return spi_mem_exec_op(spinand->slave, &op);
69749af7cdSPeter Pan }
70749af7cdSPeter Pan 
spinand_read_status(struct spinand_device * spinand,u8 * status)71749af7cdSPeter Pan static int spinand_read_status(struct spinand_device *spinand, u8 *status)
72749af7cdSPeter Pan {
73749af7cdSPeter Pan 	return spinand_read_reg_op(spinand, REG_STATUS, status);
74749af7cdSPeter Pan }
75749af7cdSPeter Pan 
spinand_get_cfg(struct spinand_device * spinand,u8 * cfg)76749af7cdSPeter Pan static int spinand_get_cfg(struct spinand_device *spinand, u8 *cfg)
77749af7cdSPeter Pan {
78749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
79749af7cdSPeter Pan 
80749af7cdSPeter Pan 	if (WARN_ON(spinand->cur_target < 0 ||
81749af7cdSPeter Pan 		    spinand->cur_target >= nand->memorg.ntargets))
82749af7cdSPeter Pan 		return -EINVAL;
83749af7cdSPeter Pan 
84749af7cdSPeter Pan 	*cfg = spinand->cfg_cache[spinand->cur_target];
85749af7cdSPeter Pan 	return 0;
86749af7cdSPeter Pan }
87749af7cdSPeter Pan 
spinand_set_cfg(struct spinand_device * spinand,u8 cfg)88749af7cdSPeter Pan static int spinand_set_cfg(struct spinand_device *spinand, u8 cfg)
89749af7cdSPeter Pan {
90749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
91749af7cdSPeter Pan 	int ret;
92749af7cdSPeter Pan 
93749af7cdSPeter Pan 	if (WARN_ON(spinand->cur_target < 0 ||
94749af7cdSPeter Pan 		    spinand->cur_target >= nand->memorg.ntargets))
95749af7cdSPeter Pan 		return -EINVAL;
96749af7cdSPeter Pan 
97749af7cdSPeter Pan 	if (spinand->cfg_cache[spinand->cur_target] == cfg)
98749af7cdSPeter Pan 		return 0;
99749af7cdSPeter Pan 
100749af7cdSPeter Pan 	ret = spinand_write_reg_op(spinand, REG_CFG, cfg);
101749af7cdSPeter Pan 	if (ret)
102749af7cdSPeter Pan 		return ret;
103749af7cdSPeter Pan 
104749af7cdSPeter Pan 	spinand->cfg_cache[spinand->cur_target] = cfg;
105749af7cdSPeter Pan 	return 0;
106749af7cdSPeter Pan }
107749af7cdSPeter Pan 
108749af7cdSPeter Pan /**
109749af7cdSPeter Pan  * spinand_upd_cfg() - Update the configuration register
110749af7cdSPeter Pan  * @spinand: the spinand device
111749af7cdSPeter Pan  * @mask: the mask encoding the bits to update in the config reg
112749af7cdSPeter Pan  * @val: the new value to apply
113749af7cdSPeter Pan  *
114749af7cdSPeter Pan  * Update the configuration register.
115749af7cdSPeter Pan  *
116749af7cdSPeter Pan  * Return: 0 on success, a negative error code otherwise.
117749af7cdSPeter Pan  */
spinand_upd_cfg(struct spinand_device * spinand,u8 mask,u8 val)118749af7cdSPeter Pan int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val)
119749af7cdSPeter Pan {
120749af7cdSPeter Pan 	int ret;
121749af7cdSPeter Pan 	u8 cfg;
122749af7cdSPeter Pan 
123749af7cdSPeter Pan 	ret = spinand_get_cfg(spinand, &cfg);
124749af7cdSPeter Pan 	if (ret)
125749af7cdSPeter Pan 		return ret;
126749af7cdSPeter Pan 
127749af7cdSPeter Pan 	cfg &= ~mask;
128749af7cdSPeter Pan 	cfg |= val;
129749af7cdSPeter Pan 
130749af7cdSPeter Pan 	return spinand_set_cfg(spinand, cfg);
131749af7cdSPeter Pan }
132749af7cdSPeter Pan 
133749af7cdSPeter Pan /**
134749af7cdSPeter Pan  * spinand_select_target() - Select a specific NAND target/die
135749af7cdSPeter Pan  * @spinand: the spinand device
136749af7cdSPeter Pan  * @target: the target/die to select
137749af7cdSPeter Pan  *
138749af7cdSPeter Pan  * Select a new target/die. If chip only has one die, this function is a NOOP.
139749af7cdSPeter Pan  *
140749af7cdSPeter Pan  * Return: 0 on success, a negative error code otherwise.
141749af7cdSPeter Pan  */
spinand_select_target(struct spinand_device * spinand,unsigned int target)142749af7cdSPeter Pan int spinand_select_target(struct spinand_device *spinand, unsigned int target)
143749af7cdSPeter Pan {
144749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
145749af7cdSPeter Pan 	int ret;
146749af7cdSPeter Pan 
147749af7cdSPeter Pan 	if (WARN_ON(target >= nand->memorg.ntargets))
148749af7cdSPeter Pan 		return -EINVAL;
149749af7cdSPeter Pan 
150749af7cdSPeter Pan 	if (spinand->cur_target == target)
151749af7cdSPeter Pan 		return 0;
152749af7cdSPeter Pan 
153749af7cdSPeter Pan 	if (nand->memorg.ntargets == 1) {
154749af7cdSPeter Pan 		spinand->cur_target = target;
155749af7cdSPeter Pan 		return 0;
156749af7cdSPeter Pan 	}
157749af7cdSPeter Pan 
158749af7cdSPeter Pan 	ret = spinand->select_target(spinand, target);
159749af7cdSPeter Pan 	if (ret)
160749af7cdSPeter Pan 		return ret;
161749af7cdSPeter Pan 
162749af7cdSPeter Pan 	spinand->cur_target = target;
163749af7cdSPeter Pan 	return 0;
164749af7cdSPeter Pan }
165749af7cdSPeter Pan 
spinand_init_cfg_cache(struct spinand_device * spinand)166749af7cdSPeter Pan static int spinand_init_cfg_cache(struct spinand_device *spinand)
167749af7cdSPeter Pan {
168749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
169749af7cdSPeter Pan 	struct udevice *dev = spinand->slave->dev;
170749af7cdSPeter Pan 	unsigned int target;
171749af7cdSPeter Pan 	int ret;
172749af7cdSPeter Pan 
173749af7cdSPeter Pan 	spinand->cfg_cache = devm_kzalloc(dev,
174749af7cdSPeter Pan 					  sizeof(*spinand->cfg_cache) *
175749af7cdSPeter Pan 					  nand->memorg.ntargets,
176749af7cdSPeter Pan 					  GFP_KERNEL);
177749af7cdSPeter Pan 	if (!spinand->cfg_cache)
178749af7cdSPeter Pan 		return -ENOMEM;
179749af7cdSPeter Pan 
180749af7cdSPeter Pan 	for (target = 0; target < nand->memorg.ntargets; target++) {
181749af7cdSPeter Pan 		ret = spinand_select_target(spinand, target);
182749af7cdSPeter Pan 		if (ret)
183749af7cdSPeter Pan 			return ret;
184749af7cdSPeter Pan 
185749af7cdSPeter Pan 		/*
186749af7cdSPeter Pan 		 * We use spinand_read_reg_op() instead of spinand_get_cfg()
187749af7cdSPeter Pan 		 * here to bypass the config cache.
188749af7cdSPeter Pan 		 */
189749af7cdSPeter Pan 		ret = spinand_read_reg_op(spinand, REG_CFG,
190749af7cdSPeter Pan 					  &spinand->cfg_cache[target]);
191749af7cdSPeter Pan 		if (ret)
192749af7cdSPeter Pan 			return ret;
193749af7cdSPeter Pan 	}
194749af7cdSPeter Pan 
195749af7cdSPeter Pan 	return 0;
196749af7cdSPeter Pan }
197749af7cdSPeter Pan 
spinand_init_quad_enable(struct spinand_device * spinand)198749af7cdSPeter Pan static int spinand_init_quad_enable(struct spinand_device *spinand)
199749af7cdSPeter Pan {
200749af7cdSPeter Pan 	bool enable = false;
201749af7cdSPeter Pan 
202749af7cdSPeter Pan 	if (!(spinand->flags & SPINAND_HAS_QE_BIT))
203749af7cdSPeter Pan 		return 0;
204749af7cdSPeter Pan 
205749af7cdSPeter Pan 	if (spinand->op_templates.read_cache->data.buswidth == 4 ||
206749af7cdSPeter Pan 	    spinand->op_templates.write_cache->data.buswidth == 4 ||
207749af7cdSPeter Pan 	    spinand->op_templates.update_cache->data.buswidth == 4)
208749af7cdSPeter Pan 		enable = true;
209749af7cdSPeter Pan 
210749af7cdSPeter Pan 	return spinand_upd_cfg(spinand, CFG_QUAD_ENABLE,
211749af7cdSPeter Pan 			       enable ? CFG_QUAD_ENABLE : 0);
212749af7cdSPeter Pan }
213749af7cdSPeter Pan 
spinand_ecc_enable(struct spinand_device * spinand,bool enable)214749af7cdSPeter Pan static int spinand_ecc_enable(struct spinand_device *spinand,
215749af7cdSPeter Pan 			      bool enable)
216749af7cdSPeter Pan {
217749af7cdSPeter Pan 	return spinand_upd_cfg(spinand, CFG_ECC_ENABLE,
218749af7cdSPeter Pan 			       enable ? CFG_ECC_ENABLE : 0);
219749af7cdSPeter Pan }
220749af7cdSPeter Pan 
spinand_write_enable_op(struct spinand_device * spinand)221749af7cdSPeter Pan static int spinand_write_enable_op(struct spinand_device *spinand)
222749af7cdSPeter Pan {
223749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_WR_EN_DIS_OP(true);
224749af7cdSPeter Pan 
225749af7cdSPeter Pan 	return spi_mem_exec_op(spinand->slave, &op);
226749af7cdSPeter Pan }
227749af7cdSPeter Pan 
spinand_load_page_op(struct spinand_device * spinand,const struct nand_page_io_req * req)228749af7cdSPeter Pan static int spinand_load_page_op(struct spinand_device *spinand,
229749af7cdSPeter Pan 				const struct nand_page_io_req *req)
230749af7cdSPeter Pan {
231749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
232749af7cdSPeter Pan 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
233749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_PAGE_READ_OP(row);
234749af7cdSPeter Pan 
235749af7cdSPeter Pan 	return spi_mem_exec_op(spinand->slave, &op);
236749af7cdSPeter Pan }
237749af7cdSPeter Pan 
spinand_read_from_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)238749af7cdSPeter Pan static int spinand_read_from_cache_op(struct spinand_device *spinand,
239749af7cdSPeter Pan 				      const struct nand_page_io_req *req)
240749af7cdSPeter Pan {
241749af7cdSPeter Pan 	struct spi_mem_op op = *spinand->op_templates.read_cache;
242749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
243749af7cdSPeter Pan 	struct mtd_info *mtd = nanddev_to_mtd(nand);
244749af7cdSPeter Pan 	struct nand_page_io_req adjreq = *req;
245749af7cdSPeter Pan 	unsigned int nbytes = 0;
246749af7cdSPeter Pan 	void *buf = NULL;
247749af7cdSPeter Pan 	u16 column = 0;
248749af7cdSPeter Pan 	int ret;
249749af7cdSPeter Pan 
250749af7cdSPeter Pan 	if (req->datalen) {
251749af7cdSPeter Pan 		adjreq.datalen = nanddev_page_size(nand);
252749af7cdSPeter Pan 		adjreq.dataoffs = 0;
253749af7cdSPeter Pan 		adjreq.databuf.in = spinand->databuf;
254749af7cdSPeter Pan 		buf = spinand->databuf;
255749af7cdSPeter Pan 		nbytes = adjreq.datalen;
256749af7cdSPeter Pan 	}
257749af7cdSPeter Pan 
258fa2454d5SJon Lin 	if (spinand->support_cont_read && req->datalen) {
259fa2454d5SJon Lin 		adjreq.datalen = req->datalen;
260fa2454d5SJon Lin 		adjreq.dataoffs = 0;
261fa2454d5SJon Lin 		adjreq.databuf.in = req->databuf.in;
262fa2454d5SJon Lin 		buf = req->databuf.in;
263fa2454d5SJon Lin 		nbytes = adjreq.datalen;
264fa2454d5SJon Lin 	}
265fa2454d5SJon Lin 
266749af7cdSPeter Pan 	if (req->ooblen) {
267749af7cdSPeter Pan 		adjreq.ooblen = nanddev_per_page_oobsize(nand);
268749af7cdSPeter Pan 		adjreq.ooboffs = 0;
269749af7cdSPeter Pan 		adjreq.oobbuf.in = spinand->oobbuf;
270749af7cdSPeter Pan 		nbytes += nanddev_per_page_oobsize(nand);
271749af7cdSPeter Pan 		if (!buf) {
272749af7cdSPeter Pan 			buf = spinand->oobbuf;
273749af7cdSPeter Pan 			column = nanddev_page_size(nand);
274749af7cdSPeter Pan 		}
275749af7cdSPeter Pan 	}
276749af7cdSPeter Pan 
277749af7cdSPeter Pan 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
278749af7cdSPeter Pan 	op.addr.val = column;
279749af7cdSPeter Pan 
280749af7cdSPeter Pan 	/*
281749af7cdSPeter Pan 	 * Some controllers are limited in term of max RX data size. In this
282749af7cdSPeter Pan 	 * case, just repeat the READ_CACHE operation after updating the
283749af7cdSPeter Pan 	 * column.
284749af7cdSPeter Pan 	 */
285749af7cdSPeter Pan 	while (nbytes) {
286749af7cdSPeter Pan 		op.data.buf.in = buf;
287749af7cdSPeter Pan 		op.data.nbytes = nbytes;
288749af7cdSPeter Pan 		ret = spi_mem_adjust_op_size(spinand->slave, &op);
289749af7cdSPeter Pan 		if (ret)
290749af7cdSPeter Pan 			return ret;
291749af7cdSPeter Pan 
292749af7cdSPeter Pan 		ret = spi_mem_exec_op(spinand->slave, &op);
293749af7cdSPeter Pan 		if (ret)
294749af7cdSPeter Pan 			return ret;
295749af7cdSPeter Pan 
296749af7cdSPeter Pan 		buf += op.data.nbytes;
297749af7cdSPeter Pan 		nbytes -= op.data.nbytes;
298749af7cdSPeter Pan 		op.addr.val += op.data.nbytes;
299749af7cdSPeter Pan 	}
300749af7cdSPeter Pan 
301fa2454d5SJon Lin 	if (!spinand->support_cont_read && req->datalen)
302fa2454d5SJon Lin 		memcpy(req->databuf.in, spinand->databuf + req->dataoffs, req->datalen);
303749af7cdSPeter Pan 
304749af7cdSPeter Pan 	if (req->ooblen) {
305749af7cdSPeter Pan 		if (req->mode == MTD_OPS_AUTO_OOB)
306749af7cdSPeter Pan 			mtd_ooblayout_get_databytes(mtd, req->oobbuf.in,
307749af7cdSPeter Pan 						    spinand->oobbuf,
308749af7cdSPeter Pan 						    req->ooboffs,
309749af7cdSPeter Pan 						    req->ooblen);
310749af7cdSPeter Pan 		else
311749af7cdSPeter Pan 			memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
312749af7cdSPeter Pan 			       req->ooblen);
313749af7cdSPeter Pan 	}
314749af7cdSPeter Pan 
315749af7cdSPeter Pan 	return 0;
316749af7cdSPeter Pan }
317749af7cdSPeter Pan 
spinand_write_to_cache_op(struct spinand_device * spinand,const struct nand_page_io_req * req)318749af7cdSPeter Pan static int spinand_write_to_cache_op(struct spinand_device *spinand,
319749af7cdSPeter Pan 				     const struct nand_page_io_req *req)
320749af7cdSPeter Pan {
321749af7cdSPeter Pan 	struct spi_mem_op op = *spinand->op_templates.write_cache;
322749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
323749af7cdSPeter Pan 	struct mtd_info *mtd = nanddev_to_mtd(nand);
324749af7cdSPeter Pan 	struct nand_page_io_req adjreq = *req;
325749af7cdSPeter Pan 	unsigned int nbytes = 0;
326749af7cdSPeter Pan 	void *buf = NULL;
327749af7cdSPeter Pan 	u16 column = 0;
328749af7cdSPeter Pan 	int ret;
329749af7cdSPeter Pan 
3300a34ee1dSMikhail Kshevetskiy 	/*
3310a34ee1dSMikhail Kshevetskiy 	 * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
3320a34ee1dSMikhail Kshevetskiy 	 * the cache content to 0xFF (depends on vendor implementation), so we
3330a34ee1dSMikhail Kshevetskiy 	 * must fill the page cache entirely even if we only want to program
3340a34ee1dSMikhail Kshevetskiy 	 * the data portion of the page, otherwise we might corrupt the BBM or
3350a34ee1dSMikhail Kshevetskiy 	 * user data previously programmed in OOB area.
3360a34ee1dSMikhail Kshevetskiy 	 */
337749af7cdSPeter Pan 	memset(spinand->databuf, 0xff,
338749af7cdSPeter Pan 	       nanddev_page_size(nand) +
339749af7cdSPeter Pan 	       nanddev_per_page_oobsize(nand));
340749af7cdSPeter Pan 
341749af7cdSPeter Pan 	if (req->datalen) {
342749af7cdSPeter Pan 		memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
343749af7cdSPeter Pan 		       req->datalen);
344749af7cdSPeter Pan 		adjreq.dataoffs = 0;
345749af7cdSPeter Pan 		adjreq.datalen = nanddev_page_size(nand);
346749af7cdSPeter Pan 		adjreq.databuf.out = spinand->databuf;
347749af7cdSPeter Pan 		nbytes = adjreq.datalen;
348749af7cdSPeter Pan 		buf = spinand->databuf;
349749af7cdSPeter Pan 	}
350749af7cdSPeter Pan 
351749af7cdSPeter Pan 	if (req->ooblen) {
352749af7cdSPeter Pan 		if (req->mode == MTD_OPS_AUTO_OOB)
353749af7cdSPeter Pan 			mtd_ooblayout_set_databytes(mtd, req->oobbuf.out,
354749af7cdSPeter Pan 						    spinand->oobbuf,
355749af7cdSPeter Pan 						    req->ooboffs,
356749af7cdSPeter Pan 						    req->ooblen);
357749af7cdSPeter Pan 		else
358749af7cdSPeter Pan 			memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
359749af7cdSPeter Pan 			       req->ooblen);
360749af7cdSPeter Pan 
361749af7cdSPeter Pan 		adjreq.ooblen = nanddev_per_page_oobsize(nand);
362749af7cdSPeter Pan 		adjreq.ooboffs = 0;
363749af7cdSPeter Pan 		nbytes += nanddev_per_page_oobsize(nand);
364749af7cdSPeter Pan 		if (!buf) {
365749af7cdSPeter Pan 			buf = spinand->oobbuf;
366749af7cdSPeter Pan 			column = nanddev_page_size(nand);
367749af7cdSPeter Pan 		}
368749af7cdSPeter Pan 	}
369749af7cdSPeter Pan 
370749af7cdSPeter Pan 	spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
371749af7cdSPeter Pan 
372749af7cdSPeter Pan 	op = *spinand->op_templates.write_cache;
373749af7cdSPeter Pan 	op.addr.val = column;
374749af7cdSPeter Pan 
375749af7cdSPeter Pan 	/*
376749af7cdSPeter Pan 	 * Some controllers are limited in term of max TX data size. In this
377749af7cdSPeter Pan 	 * case, split the operation into one LOAD CACHE and one or more
378749af7cdSPeter Pan 	 * LOAD RANDOM CACHE.
379749af7cdSPeter Pan 	 */
380749af7cdSPeter Pan 	while (nbytes) {
381749af7cdSPeter Pan 		op.data.buf.out = buf;
382749af7cdSPeter Pan 		op.data.nbytes = nbytes;
383749af7cdSPeter Pan 
384749af7cdSPeter Pan 		ret = spi_mem_adjust_op_size(spinand->slave, &op);
385749af7cdSPeter Pan 		if (ret)
386749af7cdSPeter Pan 			return ret;
387749af7cdSPeter Pan 
388749af7cdSPeter Pan 		ret = spi_mem_exec_op(spinand->slave, &op);
389749af7cdSPeter Pan 		if (ret)
390749af7cdSPeter Pan 			return ret;
391749af7cdSPeter Pan 
392749af7cdSPeter Pan 		buf += op.data.nbytes;
393749af7cdSPeter Pan 		nbytes -= op.data.nbytes;
394749af7cdSPeter Pan 		op.addr.val += op.data.nbytes;
395749af7cdSPeter Pan 
396749af7cdSPeter Pan 		/*
397749af7cdSPeter Pan 		 * We need to use the RANDOM LOAD CACHE operation if there's
398749af7cdSPeter Pan 		 * more than one iteration, because the LOAD operation resets
399749af7cdSPeter Pan 		 * the cache to 0xff.
400749af7cdSPeter Pan 		 */
401749af7cdSPeter Pan 		if (nbytes) {
402749af7cdSPeter Pan 			column = op.addr.val;
403749af7cdSPeter Pan 			op = *spinand->op_templates.update_cache;
404749af7cdSPeter Pan 			op.addr.val = column;
405749af7cdSPeter Pan 		}
406749af7cdSPeter Pan 	}
407749af7cdSPeter Pan 
408749af7cdSPeter Pan 	return 0;
409749af7cdSPeter Pan }
410749af7cdSPeter Pan 
spinand_program_op(struct spinand_device * spinand,const struct nand_page_io_req * req)411749af7cdSPeter Pan static int spinand_program_op(struct spinand_device *spinand,
412749af7cdSPeter Pan 			      const struct nand_page_io_req *req)
413749af7cdSPeter Pan {
414749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
415749af7cdSPeter Pan 	unsigned int row = nanddev_pos_to_row(nand, &req->pos);
416749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_PROG_EXEC_OP(row);
417749af7cdSPeter Pan 
418749af7cdSPeter Pan 	return spi_mem_exec_op(spinand->slave, &op);
419749af7cdSPeter Pan }
420749af7cdSPeter Pan 
spinand_erase_op(struct spinand_device * spinand,const struct nand_pos * pos)421749af7cdSPeter Pan static int spinand_erase_op(struct spinand_device *spinand,
422749af7cdSPeter Pan 			    const struct nand_pos *pos)
423749af7cdSPeter Pan {
424749af7cdSPeter Pan 	struct nand_device *nand = &spinand->base;
425749af7cdSPeter Pan 	unsigned int row = nanddev_pos_to_row(nand, pos);
426749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_BLK_ERASE_OP(row);
427749af7cdSPeter Pan 
428749af7cdSPeter Pan 	return spi_mem_exec_op(spinand->slave, &op);
429749af7cdSPeter Pan }
430749af7cdSPeter Pan 
spinand_wait(struct spinand_device * spinand,u8 * s)431749af7cdSPeter Pan static int spinand_wait(struct spinand_device *spinand, u8 *s)
432749af7cdSPeter Pan {
433749af7cdSPeter Pan 	unsigned long start, stop;
434749af7cdSPeter Pan 	u8 status;
435749af7cdSPeter Pan 	int ret;
436749af7cdSPeter Pan 
437749af7cdSPeter Pan 	start = get_timer(0);
438749af7cdSPeter Pan 	stop = 400;
439749af7cdSPeter Pan 	do {
440749af7cdSPeter Pan 		ret = spinand_read_status(spinand, &status);
441749af7cdSPeter Pan 		if (ret)
442749af7cdSPeter Pan 			return ret;
443749af7cdSPeter Pan 
444749af7cdSPeter Pan 		if (!(status & STATUS_BUSY))
445749af7cdSPeter Pan 			goto out;
446749af7cdSPeter Pan 	} while (get_timer(start) < stop);
447749af7cdSPeter Pan 
448749af7cdSPeter Pan 	/*
449749af7cdSPeter Pan 	 * Extra read, just in case the STATUS_READY bit has changed
450749af7cdSPeter Pan 	 * since our last check
451749af7cdSPeter Pan 	 */
452749af7cdSPeter Pan 	ret = spinand_read_status(spinand, &status);
453749af7cdSPeter Pan 	if (ret)
454749af7cdSPeter Pan 		return ret;
455749af7cdSPeter Pan 
456749af7cdSPeter Pan out:
457749af7cdSPeter Pan 	if (s)
458749af7cdSPeter Pan 		*s = status;
459749af7cdSPeter Pan 
460749af7cdSPeter Pan 	return status & STATUS_BUSY ? -ETIMEDOUT : 0;
461749af7cdSPeter Pan }
462749af7cdSPeter Pan 
spinand_read_id_op(struct spinand_device * spinand,u8 naddr,u8 ndummy,u8 * buf)46381afcfe1SJon Lin static int spinand_read_id_op(struct spinand_device *spinand, u8 naddr,
46481afcfe1SJon Lin 			      u8 ndummy, u8 *buf)
465749af7cdSPeter Pan {
46681afcfe1SJon Lin 	struct spi_mem_op op = SPINAND_READID_OP(
46781afcfe1SJon Lin 		naddr, ndummy, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
468749af7cdSPeter Pan 	int ret;
469749af7cdSPeter Pan 
470749af7cdSPeter Pan 	ret = spi_mem_exec_op(spinand->slave, &op);
471749af7cdSPeter Pan 	if (!ret)
472749af7cdSPeter Pan 		memcpy(buf, spinand->scratchbuf, SPINAND_MAX_ID_LEN);
473749af7cdSPeter Pan 
474749af7cdSPeter Pan 	return ret;
475749af7cdSPeter Pan }
476749af7cdSPeter Pan 
477af0025c8SJon Lin #if !CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
spinand_reset_op(struct spinand_device * spinand)478749af7cdSPeter Pan static int spinand_reset_op(struct spinand_device *spinand)
479749af7cdSPeter Pan {
480749af7cdSPeter Pan 	struct spi_mem_op op = SPINAND_RESET_OP;
481749af7cdSPeter Pan 	int ret;
482749af7cdSPeter Pan 
483749af7cdSPeter Pan 	ret = spi_mem_exec_op(spinand->slave, &op);
484749af7cdSPeter Pan 	if (ret)
485749af7cdSPeter Pan 		return ret;
486749af7cdSPeter Pan 
487749af7cdSPeter Pan 	return spinand_wait(spinand, NULL);
488749af7cdSPeter Pan }
489af0025c8SJon Lin #endif
490749af7cdSPeter Pan 
spinand_lock_block(struct spinand_device * spinand,u8 lock)491749af7cdSPeter Pan static int spinand_lock_block(struct spinand_device *spinand, u8 lock)
492749af7cdSPeter Pan {
493749af7cdSPeter Pan 	return spinand_write_reg_op(spinand, REG_BLOCK_LOCK, lock);
494749af7cdSPeter Pan }
495749af7cdSPeter Pan 
spinand_check_ecc_status(struct spinand_device * spinand,u8 status)496749af7cdSPeter Pan static int spinand_check_ecc_status(struct spinand_device *spinand, u8 status)
497749af7cdSPeter Pan {
498749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
499749af7cdSPeter Pan 
500749af7cdSPeter Pan 	if (spinand->eccinfo.get_status)
501749af7cdSPeter Pan 		return spinand->eccinfo.get_status(spinand, status);
502749af7cdSPeter Pan 
503749af7cdSPeter Pan 	switch (status & STATUS_ECC_MASK) {
504749af7cdSPeter Pan 	case STATUS_ECC_NO_BITFLIPS:
505749af7cdSPeter Pan 		return 0;
506749af7cdSPeter Pan 
507749af7cdSPeter Pan 	case STATUS_ECC_HAS_BITFLIPS:
508749af7cdSPeter Pan 		/*
509749af7cdSPeter Pan 		 * We have no way to know exactly how many bitflips have been
510749af7cdSPeter Pan 		 * fixed, so let's return the maximum possible value so that
511749af7cdSPeter Pan 		 * wear-leveling layers move the data immediately.
512749af7cdSPeter Pan 		 */
513749af7cdSPeter Pan 		return nand->eccreq.strength;
514749af7cdSPeter Pan 
515749af7cdSPeter Pan 	case STATUS_ECC_UNCOR_ERROR:
516749af7cdSPeter Pan 		return -EBADMSG;
517749af7cdSPeter Pan 
518749af7cdSPeter Pan 	default:
519749af7cdSPeter Pan 		break;
520749af7cdSPeter Pan 	}
521749af7cdSPeter Pan 
522749af7cdSPeter Pan 	return -EINVAL;
523749af7cdSPeter Pan }
524749af7cdSPeter Pan 
spinand_read_page(struct spinand_device * spinand,const struct nand_page_io_req * req,bool ecc_enabled)525749af7cdSPeter Pan static int spinand_read_page(struct spinand_device *spinand,
526749af7cdSPeter Pan 			     const struct nand_page_io_req *req,
527749af7cdSPeter Pan 			     bool ecc_enabled)
528749af7cdSPeter Pan {
529d1e9de8bSJon Lin 	u8 status = 0;
530749af7cdSPeter Pan 	int ret;
531749af7cdSPeter Pan 
532749af7cdSPeter Pan 	ret = spinand_load_page_op(spinand, req);
533749af7cdSPeter Pan 	if (ret)
534749af7cdSPeter Pan 		return ret;
535749af7cdSPeter Pan 
536749af7cdSPeter Pan 	ret = spinand_wait(spinand, &status);
537d1e9de8bSJon Lin 	/*
538d1e9de8bSJon Lin 	 * When there is data outside of OIP in the status, the status data is
539d1e9de8bSJon Lin 	 * inaccurate and needs to be reconfirmed
540d1e9de8bSJon Lin 	 */
541d1e9de8bSJon Lin 	if (spinand->id.data[0] == 0x01 && status && !ret)
542d1e9de8bSJon Lin 		ret = spinand_wait(spinand, &status);
543749af7cdSPeter Pan 	if (ret < 0)
544749af7cdSPeter Pan 		return ret;
545749af7cdSPeter Pan 
546749af7cdSPeter Pan 	ret = spinand_read_from_cache_op(spinand, req);
547749af7cdSPeter Pan 	if (ret)
548749af7cdSPeter Pan 		return ret;
549749af7cdSPeter Pan 
550fa2454d5SJon Lin 	if (spinand->support_cont_read && !(spinand->slave->mode & SPI_DMA_PREPARE))
551fa2454d5SJon Lin 		spinand_wait(spinand, &status);
552fa2454d5SJon Lin 
553749af7cdSPeter Pan 	if (!ecc_enabled)
554749af7cdSPeter Pan 		return 0;
555749af7cdSPeter Pan 
556749af7cdSPeter Pan 	return spinand_check_ecc_status(spinand, status);
557749af7cdSPeter Pan }
558749af7cdSPeter Pan 
spinand_write_page(struct spinand_device * spinand,const struct nand_page_io_req * req)559749af7cdSPeter Pan static int spinand_write_page(struct spinand_device *spinand,
560749af7cdSPeter Pan 			      const struct nand_page_io_req *req)
561749af7cdSPeter Pan {
562749af7cdSPeter Pan 	u8 status;
563749af7cdSPeter Pan 	int ret;
564749af7cdSPeter Pan 
565749af7cdSPeter Pan 	ret = spinand_write_enable_op(spinand);
566749af7cdSPeter Pan 	if (ret)
567749af7cdSPeter Pan 		return ret;
568749af7cdSPeter Pan 
569749af7cdSPeter Pan 	ret = spinand_write_to_cache_op(spinand, req);
570749af7cdSPeter Pan 	if (ret)
571749af7cdSPeter Pan 		return ret;
572749af7cdSPeter Pan 
573749af7cdSPeter Pan 	ret = spinand_program_op(spinand, req);
574749af7cdSPeter Pan 	if (ret)
575749af7cdSPeter Pan 		return ret;
576749af7cdSPeter Pan 
577749af7cdSPeter Pan 	ret = spinand_wait(spinand, &status);
578749af7cdSPeter Pan 	if (!ret && (status & STATUS_PROG_FAILED))
579749af7cdSPeter Pan 		ret = -EIO;
580749af7cdSPeter Pan 
581749af7cdSPeter Pan 	return ret;
582749af7cdSPeter Pan }
583749af7cdSPeter Pan 
spinand_mtd_read(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)584749af7cdSPeter Pan static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
585749af7cdSPeter Pan 			    struct mtd_oob_ops *ops)
586749af7cdSPeter Pan {
587749af7cdSPeter Pan 	struct spinand_device *spinand = mtd_to_spinand(mtd);
588749af7cdSPeter Pan 	struct nand_device *nand = mtd_to_nanddev(mtd);
589749af7cdSPeter Pan 	unsigned int max_bitflips = 0;
590749af7cdSPeter Pan 	struct nand_io_iter iter;
591749af7cdSPeter Pan 	bool enable_ecc = false;
592749af7cdSPeter Pan 	bool ecc_failed = false;
593749af7cdSPeter Pan 	int ret = 0;
5943caf5abeSJon Lin 	bool cont_real = spinand->support_cont_read;
595749af7cdSPeter Pan 
596749af7cdSPeter Pan 	if (ops->mode != MTD_OPS_RAW && spinand->eccinfo.ooblayout)
597749af7cdSPeter Pan 		enable_ecc = true;
598749af7cdSPeter Pan 
599749af7cdSPeter Pan #ifndef __UBOOT__
600749af7cdSPeter Pan 	mutex_lock(&spinand->lock);
601749af7cdSPeter Pan #endif
602749af7cdSPeter Pan 
603749af7cdSPeter Pan 	nanddev_io_for_each_page(nand, from, ops, &iter) {
604749af7cdSPeter Pan 		ret = spinand_select_target(spinand, iter.req.pos.target);
605749af7cdSPeter Pan 		if (ret)
606749af7cdSPeter Pan 			break;
607749af7cdSPeter Pan 
608749af7cdSPeter Pan 		ret = spinand_ecc_enable(spinand, enable_ecc);
609749af7cdSPeter Pan 		if (ret)
610749af7cdSPeter Pan 			break;
611749af7cdSPeter Pan 
6123caf5abeSJon Lin 		/* For misaligned situations, temporarily disable the cont read capability */
6133caf5abeSJon Lin 		if (iter.req.dataoffs)
6143caf5abeSJon Lin 			spinand->support_cont_read = false;
6153caf5abeSJon Lin 		else
6163caf5abeSJon Lin 			spinand->support_cont_read = cont_real;
6173caf5abeSJon Lin 
618fa2454d5SJon Lin 		if (spinand->support_cont_read) {
619fa2454d5SJon Lin 			iter.req.datalen = ops->len;
620fa2454d5SJon Lin 			iter.req.ooblen = 0;
621fa2454d5SJon Lin 		}
622749af7cdSPeter Pan 		ret = spinand_read_page(spinand, &iter.req, enable_ecc);
623749af7cdSPeter Pan 		if (ret < 0 && ret != -EBADMSG)
624749af7cdSPeter Pan 			break;
625749af7cdSPeter Pan 
626749af7cdSPeter Pan 		if (ret == -EBADMSG) {
627749af7cdSPeter Pan 			ecc_failed = true;
628749af7cdSPeter Pan 			mtd->ecc_stats.failed++;
629749af7cdSPeter Pan 		} else {
630749af7cdSPeter Pan 			mtd->ecc_stats.corrected += ret;
631749af7cdSPeter Pan 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
632749af7cdSPeter Pan 		}
633749af7cdSPeter Pan 
6340a34ee1dSMikhail Kshevetskiy 		ret = 0;
635fa2454d5SJon Lin 		if (spinand->support_cont_read) {
636fa2454d5SJon Lin 			ops->retlen = ops->len;
637fa2454d5SJon Lin 			ops->oobretlen = ops->ooblen;
638fa2454d5SJon Lin 			break;
639fa2454d5SJon Lin 		}
640fa2454d5SJon Lin 
641749af7cdSPeter Pan 		ops->retlen += iter.req.datalen;
642749af7cdSPeter Pan 		ops->oobretlen += iter.req.ooblen;
643749af7cdSPeter Pan 	}
644749af7cdSPeter Pan 
645749af7cdSPeter Pan #ifndef __UBOOT__
646749af7cdSPeter Pan 	mutex_unlock(&spinand->lock);
647749af7cdSPeter Pan #endif
648749af7cdSPeter Pan 	if (ecc_failed && !ret)
649749af7cdSPeter Pan 		ret = -EBADMSG;
650749af7cdSPeter Pan 
6513caf5abeSJon Lin 	spinand->support_cont_read = cont_real;
6523caf5abeSJon Lin 
653749af7cdSPeter Pan 	return ret ? ret : max_bitflips;
654749af7cdSPeter Pan }
655749af7cdSPeter Pan 
spinand_mtd_write(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)656749af7cdSPeter Pan static int spinand_mtd_write(struct mtd_info *mtd, loff_t to,
657749af7cdSPeter Pan 			     struct mtd_oob_ops *ops)
658749af7cdSPeter Pan {
659749af7cdSPeter Pan 	struct spinand_device *spinand = mtd_to_spinand(mtd);
660749af7cdSPeter Pan 	struct nand_device *nand = mtd_to_nanddev(mtd);
661749af7cdSPeter Pan 	struct nand_io_iter iter;
662749af7cdSPeter Pan 	bool enable_ecc = false;
663749af7cdSPeter Pan 	int ret = 0;
664749af7cdSPeter Pan 
665749af7cdSPeter Pan 	if (ops->mode != MTD_OPS_RAW && mtd->ooblayout)
666749af7cdSPeter Pan 		enable_ecc = true;
667749af7cdSPeter Pan 
668749af7cdSPeter Pan #ifndef __UBOOT__
669749af7cdSPeter Pan 	mutex_lock(&spinand->lock);
670749af7cdSPeter Pan #endif
671749af7cdSPeter Pan 
672749af7cdSPeter Pan 	nanddev_io_for_each_page(nand, to, ops, &iter) {
673749af7cdSPeter Pan 		ret = spinand_select_target(spinand, iter.req.pos.target);
674749af7cdSPeter Pan 		if (ret)
675749af7cdSPeter Pan 			break;
676749af7cdSPeter Pan 
677749af7cdSPeter Pan 		ret = spinand_ecc_enable(spinand, enable_ecc);
678749af7cdSPeter Pan 		if (ret)
679749af7cdSPeter Pan 			break;
680749af7cdSPeter Pan 
681749af7cdSPeter Pan 		ret = spinand_write_page(spinand, &iter.req);
682749af7cdSPeter Pan 		if (ret)
683749af7cdSPeter Pan 			break;
684749af7cdSPeter Pan 
685749af7cdSPeter Pan 		ops->retlen += iter.req.datalen;
686749af7cdSPeter Pan 		ops->oobretlen += iter.req.ooblen;
687749af7cdSPeter Pan 	}
688749af7cdSPeter Pan 
689749af7cdSPeter Pan #ifndef __UBOOT__
690749af7cdSPeter Pan 	mutex_unlock(&spinand->lock);
691749af7cdSPeter Pan #endif
692749af7cdSPeter Pan 
693749af7cdSPeter Pan 	return ret;
694749af7cdSPeter Pan }
695749af7cdSPeter Pan 
spinand_isbad(struct nand_device * nand,const struct nand_pos * pos)696749af7cdSPeter Pan static bool spinand_isbad(struct nand_device *nand, const struct nand_pos *pos)
697749af7cdSPeter Pan {
698749af7cdSPeter Pan 	struct spinand_device *spinand = nand_to_spinand(nand);
699d537a52dSJon Lin 	u8 marker[2] = { };
700749af7cdSPeter Pan 	struct nand_page_io_req req = {
701749af7cdSPeter Pan 		.pos = *pos,
702d537a52dSJon Lin 		.ooblen = sizeof(marker),
703749af7cdSPeter Pan 		.ooboffs = 0,
704d537a52dSJon Lin 		.oobbuf.in = marker,
705749af7cdSPeter Pan 		.mode = MTD_OPS_RAW,
706749af7cdSPeter Pan 	};
707749af7cdSPeter Pan 
708d537a52dSJon Lin 	spinand_select_target(spinand, pos->target);
709d537a52dSJon Lin 	spinand_read_page(spinand, &req, false);
710d537a52dSJon Lin 	if (marker[0] != 0xff || marker[1] != 0xff)
711749af7cdSPeter Pan 		return true;
712749af7cdSPeter Pan 
713749af7cdSPeter Pan 	return false;
714749af7cdSPeter Pan }
715749af7cdSPeter Pan 
spinand_mtd_block_isbad(struct mtd_info * mtd,loff_t offs)716749af7cdSPeter Pan static int spinand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
717749af7cdSPeter Pan {
718749af7cdSPeter Pan 	struct nand_device *nand = mtd_to_nanddev(mtd);
719749af7cdSPeter Pan #ifndef __UBOOT__
720749af7cdSPeter Pan 	struct spinand_device *spinand = nand_to_spinand(nand);
721749af7cdSPeter Pan #endif
722749af7cdSPeter Pan 	struct nand_pos pos;
723749af7cdSPeter Pan 	int ret;
724749af7cdSPeter Pan 
725749af7cdSPeter Pan 	nanddev_offs_to_pos(nand, offs, &pos);
726749af7cdSPeter Pan #ifndef __UBOOT__
727749af7cdSPeter Pan 	mutex_lock(&spinand->lock);
728749af7cdSPeter Pan #endif
729749af7cdSPeter Pan 	ret = nanddev_isbad(nand, &pos);
730749af7cdSPeter Pan #ifndef __UBOOT__
731749af7cdSPeter Pan 	mutex_unlock(&spinand->lock);
732749af7cdSPeter Pan #endif
733749af7cdSPeter Pan 	return ret;
734749af7cdSPeter Pan }
735749af7cdSPeter Pan 
spinand_markbad(struct nand_device * nand,const struct nand_pos * pos)736749af7cdSPeter Pan static int spinand_markbad(struct nand_device *nand, const struct nand_pos *pos)
737749af7cdSPeter Pan {
738749af7cdSPeter Pan 	struct spinand_device *spinand = nand_to_spinand(nand);
73961939387SJon Lin 	u8 marker[2] = { 0, 0 };
740749af7cdSPeter Pan 	struct nand_page_io_req req = {
741749af7cdSPeter Pan 		.pos = *pos,
742749af7cdSPeter Pan 		.ooboffs = 0,
743d537a52dSJon Lin 		.ooblen = sizeof(marker),
744d537a52dSJon Lin 		.oobbuf.out = marker,
74535a88e77SJon Lin 		.mode = MTD_OPS_RAW,
746749af7cdSPeter Pan 	};
747749af7cdSPeter Pan 	int ret;
748749af7cdSPeter Pan 
749749af7cdSPeter Pan 	ret = spinand_select_target(spinand, pos->target);
750749af7cdSPeter Pan 	if (ret)
751749af7cdSPeter Pan 		return ret;
752749af7cdSPeter Pan 
7530a34ee1dSMikhail Kshevetskiy 	ret = spinand_write_enable_op(spinand);
7540a34ee1dSMikhail Kshevetskiy 	if (ret)
7550a34ee1dSMikhail Kshevetskiy 		return ret;
7560a34ee1dSMikhail Kshevetskiy 
757749af7cdSPeter Pan 	return spinand_write_page(spinand, &req);
758749af7cdSPeter Pan }
759749af7cdSPeter Pan 
spinand_mtd_block_markbad(struct mtd_info * mtd,loff_t offs)760749af7cdSPeter Pan static int spinand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
761749af7cdSPeter Pan {
762749af7cdSPeter Pan 	struct nand_device *nand = mtd_to_nanddev(mtd);
763749af7cdSPeter Pan #ifndef __UBOOT__
764749af7cdSPeter Pan 	struct spinand_device *spinand = nand_to_spinand(nand);
765749af7cdSPeter Pan #endif
766749af7cdSPeter Pan 	struct nand_pos pos;
767749af7cdSPeter Pan 	int ret;
768749af7cdSPeter Pan 
769749af7cdSPeter Pan 	nanddev_offs_to_pos(nand, offs, &pos);
770749af7cdSPeter Pan #ifndef __UBOOT__
771749af7cdSPeter Pan 	mutex_lock(&spinand->lock);
772749af7cdSPeter Pan #endif
773749af7cdSPeter Pan 	ret = nanddev_markbad(nand, &pos);
774749af7cdSPeter Pan #ifndef __UBOOT__
775749af7cdSPeter Pan 	mutex_unlock(&spinand->lock);
776749af7cdSPeter Pan #endif
777749af7cdSPeter Pan 	return ret;
778749af7cdSPeter Pan }
779749af7cdSPeter Pan 
spinand_erase(struct nand_device * nand,const struct nand_pos * pos)780749af7cdSPeter Pan static int spinand_erase(struct nand_device *nand, const struct nand_pos *pos)
781749af7cdSPeter Pan {
782749af7cdSPeter Pan 	struct spinand_device *spinand = nand_to_spinand(nand);
783749af7cdSPeter Pan 	u8 status;
784749af7cdSPeter Pan 	int ret;
785749af7cdSPeter Pan 
786749af7cdSPeter Pan 	ret = spinand_select_target(spinand, pos->target);
787749af7cdSPeter Pan 	if (ret)
788749af7cdSPeter Pan 		return ret;
789749af7cdSPeter Pan 
790749af7cdSPeter Pan 	ret = spinand_write_enable_op(spinand);
791749af7cdSPeter Pan 	if (ret)
792749af7cdSPeter Pan 		return ret;
793749af7cdSPeter Pan 
794749af7cdSPeter Pan 	ret = spinand_erase_op(spinand, pos);
795749af7cdSPeter Pan 	if (ret)
796749af7cdSPeter Pan 		return ret;
797749af7cdSPeter Pan 
798749af7cdSPeter Pan 	ret = spinand_wait(spinand, &status);
799749af7cdSPeter Pan 	if (!ret && (status & STATUS_ERASE_FAILED))
800749af7cdSPeter Pan 		ret = -EIO;
801749af7cdSPeter Pan 
802749af7cdSPeter Pan 	return ret;
803749af7cdSPeter Pan }
804749af7cdSPeter Pan 
spinand_mtd_erase(struct mtd_info * mtd,struct erase_info * einfo)805749af7cdSPeter Pan static int spinand_mtd_erase(struct mtd_info *mtd,
806749af7cdSPeter Pan 			     struct erase_info *einfo)
807749af7cdSPeter Pan {
808749af7cdSPeter Pan #ifndef __UBOOT__
809749af7cdSPeter Pan 	struct spinand_device *spinand = mtd_to_spinand(mtd);
810749af7cdSPeter Pan #endif
811749af7cdSPeter Pan 	int ret;
812749af7cdSPeter Pan 
813749af7cdSPeter Pan #ifndef __UBOOT__
814749af7cdSPeter Pan 	mutex_lock(&spinand->lock);
815749af7cdSPeter Pan #endif
816749af7cdSPeter Pan 	ret = nanddev_mtd_erase(mtd, einfo);
817749af7cdSPeter Pan #ifndef __UBOOT__
818749af7cdSPeter Pan 	mutex_unlock(&spinand->lock);
819749af7cdSPeter Pan #endif
820749af7cdSPeter Pan 
821749af7cdSPeter Pan 	return ret;
822749af7cdSPeter Pan }
823749af7cdSPeter Pan 
spinand_mtd_block_isreserved(struct mtd_info * mtd,loff_t offs)824749af7cdSPeter Pan static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
825749af7cdSPeter Pan {
826749af7cdSPeter Pan #ifndef __UBOOT__
827749af7cdSPeter Pan 	struct spinand_device *spinand = mtd_to_spinand(mtd);
828749af7cdSPeter Pan #endif
829749af7cdSPeter Pan 	struct nand_device *nand = mtd_to_nanddev(mtd);
830749af7cdSPeter Pan 	struct nand_pos pos;
831749af7cdSPeter Pan 	int ret;
832749af7cdSPeter Pan 
833749af7cdSPeter Pan 	nanddev_offs_to_pos(nand, offs, &pos);
834749af7cdSPeter Pan #ifndef __UBOOT__
835749af7cdSPeter Pan 	mutex_lock(&spinand->lock);
836749af7cdSPeter Pan #endif
837749af7cdSPeter Pan 	ret = nanddev_isreserved(nand, &pos);
838749af7cdSPeter Pan #ifndef __UBOOT__
839749af7cdSPeter Pan 	mutex_unlock(&spinand->lock);
840749af7cdSPeter Pan #endif
841749af7cdSPeter Pan 
842749af7cdSPeter Pan 	return ret;
843749af7cdSPeter Pan }
844749af7cdSPeter Pan 
845749af7cdSPeter Pan const struct spi_mem_op *
spinand_find_supported_op(struct spinand_device * spinand,const struct spi_mem_op * ops,unsigned int nops)846749af7cdSPeter Pan spinand_find_supported_op(struct spinand_device *spinand,
847749af7cdSPeter Pan 			  const struct spi_mem_op *ops,
848749af7cdSPeter Pan 			  unsigned int nops)
849749af7cdSPeter Pan {
850749af7cdSPeter Pan 	unsigned int i;
851749af7cdSPeter Pan 
852749af7cdSPeter Pan 	for (i = 0; i < nops; i++) {
853749af7cdSPeter Pan 		if (spi_mem_supports_op(spinand->slave, &ops[i]))
854749af7cdSPeter Pan 			return &ops[i];
855749af7cdSPeter Pan 	}
856749af7cdSPeter Pan 
857749af7cdSPeter Pan 	return NULL;
858749af7cdSPeter Pan }
859749af7cdSPeter Pan 
860749af7cdSPeter Pan static const struct nand_ops spinand_ops = {
861749af7cdSPeter Pan 	.erase = spinand_erase,
862749af7cdSPeter Pan 	.markbad = spinand_markbad,
863749af7cdSPeter Pan 	.isbad = spinand_isbad,
864749af7cdSPeter Pan };
865749af7cdSPeter Pan 
866ed13557fSPeter Pan static const struct spinand_manufacturer *spinand_manufacturers[] = {
86743f2461cSJon Lin #ifdef CONFIG_SPI_NAND_GIGADEVICE
8686eb4b036SStefan Roese 	&gigadevice_spinand_manufacturer,
86943f2461cSJon Lin #endif
87043f2461cSJon Lin #ifdef CONFIG_SPI_NAND_MACRONIX
87180c0c832SBoris Brezillon 	&macronix_spinand_manufacturer,
87243f2461cSJon Lin #endif
87343f2461cSJon Lin #ifdef CONFIG_SPI_NAND_MICRON
874ed13557fSPeter Pan 	&micron_spinand_manufacturer,
87543f2461cSJon Lin #endif
87643f2461cSJon Lin #ifdef CONFIG_SPI_NAND_TOSHIBA
877e0242cafSRobert Marko 	&toshiba_spinand_manufacturer,
87843f2461cSJon Lin #endif
87943f2461cSJon Lin #ifdef CONFIG_SPI_NAND_WINBOND
880b98ac5e2SFrieder Schrempf 	&winbond_spinand_manufacturer,
88143f2461cSJon Lin #endif
88243f2461cSJon Lin #ifdef CONFIG_SPI_NAND_DOSILICON
883c219aedbSJon Lin 	&dosilicon_spinand_manufacturer,
88443f2461cSJon Lin #endif
88543f2461cSJon Lin #ifdef CONFIG_SPI_NAND_ESMT
88652b00601SJon Lin 	&esmt_spinand_manufacturer,
88743f2461cSJon Lin #endif
88818a6bef8SJon Lin #ifdef CONFIG_SPI_NAND_XINCUN
88918a6bef8SJon Lin 	&xincun_spinand_manufacturer,
89018a6bef8SJon Lin #endif
89143f2461cSJon Lin #ifdef CONFIG_SPI_NAND_XTX
892fc656fc3SJon Lin 	&xtx_spinand_manufacturer,
89343f2461cSJon Lin #endif
89443f2461cSJon Lin #ifdef CONFIG_SPI_NAND_HYF
895b66d41c2SJon Lin 	&hyf_spinand_manufacturer,
89643f2461cSJon Lin #endif
89743f2461cSJon Lin #ifdef CONFIG_SPI_NAND_FMSH
89803d86fc3SJon Lin 	&fmsh_spinand_manufacturer,
89943f2461cSJon Lin #endif
90043f2461cSJon Lin #ifdef CONFIG_SPI_NAND_FORESEE
901e336ce4eSJon Lin 	&foresee_spinand_manufacturer,
90243f2461cSJon Lin #endif
9038c4105ccSJon Lin #ifdef CONFIG_SPI_NAND_BIWIN
9048c4105ccSJon Lin 	&biwin_spinand_manufacturer,
9058c4105ccSJon Lin #endif
9069c409da6SJon Lin #ifdef CONFIG_SPI_NAND_ETRON
9079c409da6SJon Lin 	&etron_spinand_manufacturer,
9089c409da6SJon Lin #endif
909da9bb89bSJon Lin #ifdef CONFIG_SPI_NAND_JSC
910da9bb89bSJon Lin 	&jsc_spinand_manufacturer,
911da9bb89bSJon Lin #endif
91268df10e3SJon Lin #ifdef CONFIG_SPI_NAND_SILICONGO
91368df10e3SJon Lin 	&silicongo_spinand_manufacturer,
91468df10e3SJon Lin #endif
915b00e662dSJon Lin #ifdef CONFIG_SPI_NAND_UNIM
916b00e662dSJon Lin 	&unim_spinand_manufacturer,
91724e784d8SJon Lin 	&unim_zl_spinand_manufacturer,
918b00e662dSJon Lin #endif
919f143afc2SJon Lin #ifdef CONFIG_SPI_NAND_SKYHIGH
920f143afc2SJon Lin 	&skyhigh_spinand_manufacturer,
921f143afc2SJon Lin #endif
9224a725a41SJon Lin #ifdef CONFIG_SPI_NAND_GSTO
9234a725a41SJon Lin 	&gsto_spinand_manufacturer,
9244a725a41SJon Lin #endif
92592d9c1d9SJon Lin #ifdef CONFIG_SPI_NAND_ZBIT
92692d9c1d9SJon Lin 	&zbit_spinand_manufacturer,
92792d9c1d9SJon Lin #endif
9285e3003c6SJon Lin #ifdef CONFIG_SPI_NAND_HIKSEMI
9295e3003c6SJon Lin 	&hiksemi_spinand_manufacturer,
9305e3003c6SJon Lin #endif
931*534e5d47SJon Lin #ifdef CONFIG_SPI_NAND_KINGSTON
932*534e5d47SJon Lin 	&kingston_spinand_manufacturer,
933*534e5d47SJon Lin #endif
934ed13557fSPeter Pan };
935ed13557fSPeter Pan 
spinand_manufacturer_match(struct spinand_device * spinand,enum spinand_readid_method rdid_method)93681afcfe1SJon Lin static int spinand_manufacturer_match(struct spinand_device *spinand,
93781afcfe1SJon Lin 				      enum spinand_readid_method rdid_method)
938749af7cdSPeter Pan {
93981afcfe1SJon Lin 	u8 *id = spinand->id.data;
940ed13557fSPeter Pan 	unsigned int i;
941ed13557fSPeter Pan 	int ret;
942ed13557fSPeter Pan 
943ed13557fSPeter Pan 	for (i = 0; i < ARRAY_SIZE(spinand_manufacturers); i++) {
94481afcfe1SJon Lin 		const struct spinand_manufacturer *manufacturer =
94581afcfe1SJon Lin 			spinand_manufacturers[i];
94681afcfe1SJon Lin 
94781afcfe1SJon Lin 		if (id[0] != manufacturer->id)
94881afcfe1SJon Lin 			continue;
94981afcfe1SJon Lin 
95081afcfe1SJon Lin 		ret = spinand_match_and_init(spinand,
95181afcfe1SJon Lin 					     manufacturer->chips,
95281afcfe1SJon Lin 					     manufacturer->nchips,
95381afcfe1SJon Lin 					     rdid_method);
95481afcfe1SJon Lin 		if (ret < 0)
95581afcfe1SJon Lin 			continue;
95681afcfe1SJon Lin 
95781afcfe1SJon Lin 		spinand->manufacturer = manufacturer;
958ed13557fSPeter Pan 		return 0;
959ed13557fSPeter Pan 	}
96081afcfe1SJon Lin 	return -ENOTSUPP;
961ed13557fSPeter Pan }
962ed13557fSPeter Pan 
spinand_id_detect(struct spinand_device * spinand)96381afcfe1SJon Lin static int spinand_id_detect(struct spinand_device *spinand)
96481afcfe1SJon Lin {
96581afcfe1SJon Lin 	u8 *id = spinand->id.data;
96681afcfe1SJon Lin 	int ret;
96781afcfe1SJon Lin 
96881afcfe1SJon Lin 	ret = spinand_read_id_op(spinand, 0, 0, id);
96981afcfe1SJon Lin 	if (ret)
97081afcfe1SJon Lin 		return ret;
97181afcfe1SJon Lin 	ret = spinand_manufacturer_match(spinand, SPINAND_READID_METHOD_OPCODE);
97281afcfe1SJon Lin 	if (!ret)
97381afcfe1SJon Lin 		return 0;
97481afcfe1SJon Lin 
97581afcfe1SJon Lin 	ret = spinand_read_id_op(spinand, 1, 0, id);
97681afcfe1SJon Lin 	if (ret)
97781afcfe1SJon Lin 		return ret;
97881afcfe1SJon Lin 	ret = spinand_manufacturer_match(spinand,
97981afcfe1SJon Lin 					 SPINAND_READID_METHOD_OPCODE_ADDR);
98081afcfe1SJon Lin 	if (!ret)
98181afcfe1SJon Lin 		return 0;
98281afcfe1SJon Lin 
98381afcfe1SJon Lin 	ret = spinand_read_id_op(spinand, 0, 1, id);
98481afcfe1SJon Lin 	if (ret)
98581afcfe1SJon Lin 		return ret;
98681afcfe1SJon Lin 	ret = spinand_manufacturer_match(spinand,
98781afcfe1SJon Lin 					 SPINAND_READID_METHOD_OPCODE_DUMMY);
98881afcfe1SJon Lin 
98981afcfe1SJon Lin 	return ret;
990749af7cdSPeter Pan }
991749af7cdSPeter Pan 
spinand_manufacturer_init(struct spinand_device * spinand)992749af7cdSPeter Pan static int spinand_manufacturer_init(struct spinand_device *spinand)
993749af7cdSPeter Pan {
994749af7cdSPeter Pan 	if (spinand->manufacturer->ops->init)
995749af7cdSPeter Pan 		return spinand->manufacturer->ops->init(spinand);
996749af7cdSPeter Pan 
997749af7cdSPeter Pan 	return 0;
998749af7cdSPeter Pan }
999749af7cdSPeter Pan 
spinand_manufacturer_cleanup(struct spinand_device * spinand)1000749af7cdSPeter Pan static void spinand_manufacturer_cleanup(struct spinand_device *spinand)
1001749af7cdSPeter Pan {
1002749af7cdSPeter Pan 	/* Release manufacturer private data */
1003749af7cdSPeter Pan 	if (spinand->manufacturer->ops->cleanup)
1004749af7cdSPeter Pan 		return spinand->manufacturer->ops->cleanup(spinand);
1005749af7cdSPeter Pan }
1006749af7cdSPeter Pan 
1007749af7cdSPeter Pan static const struct spi_mem_op *
spinand_select_op_variant(struct spinand_device * spinand,const struct spinand_op_variants * variants)1008749af7cdSPeter Pan spinand_select_op_variant(struct spinand_device *spinand,
1009749af7cdSPeter Pan 			  const struct spinand_op_variants *variants)
1010749af7cdSPeter Pan {
1011749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
1012749af7cdSPeter Pan 	unsigned int i;
1013749af7cdSPeter Pan 
1014749af7cdSPeter Pan 	for (i = 0; i < variants->nops; i++) {
1015749af7cdSPeter Pan 		struct spi_mem_op op = variants->ops[i];
1016749af7cdSPeter Pan 		unsigned int nbytes;
1017749af7cdSPeter Pan 		int ret;
1018749af7cdSPeter Pan 
1019749af7cdSPeter Pan 		nbytes = nanddev_per_page_oobsize(nand) +
1020749af7cdSPeter Pan 			 nanddev_page_size(nand);
1021749af7cdSPeter Pan 
1022749af7cdSPeter Pan 		while (nbytes) {
1023749af7cdSPeter Pan 			op.data.nbytes = nbytes;
1024749af7cdSPeter Pan 			ret = spi_mem_adjust_op_size(spinand->slave, &op);
1025749af7cdSPeter Pan 			if (ret)
1026749af7cdSPeter Pan 				break;
1027749af7cdSPeter Pan 
1028749af7cdSPeter Pan 			if (!spi_mem_supports_op(spinand->slave, &op))
1029749af7cdSPeter Pan 				break;
1030749af7cdSPeter Pan 
1031749af7cdSPeter Pan 			nbytes -= op.data.nbytes;
1032749af7cdSPeter Pan 		}
1033749af7cdSPeter Pan 
1034749af7cdSPeter Pan 		if (!nbytes)
1035749af7cdSPeter Pan 			return &variants->ops[i];
1036749af7cdSPeter Pan 	}
1037749af7cdSPeter Pan 
1038749af7cdSPeter Pan 	return NULL;
1039749af7cdSPeter Pan }
1040749af7cdSPeter Pan 
1041749af7cdSPeter Pan /**
1042749af7cdSPeter Pan  * spinand_match_and_init() - Try to find a match between a device ID and an
1043749af7cdSPeter Pan  *			      entry in a spinand_info table
1044749af7cdSPeter Pan  * @spinand: SPI NAND object
1045749af7cdSPeter Pan  * @table: SPI NAND device description table
1046749af7cdSPeter Pan  * @table_size: size of the device description table
104781afcfe1SJon Lin  * @rdid_method: read id method to match
1048749af7cdSPeter Pan  *
104981afcfe1SJon Lin  * Match between a device ID retrieved through the READ_ID command and an
1050749af7cdSPeter Pan  * entry in the SPI NAND description table. If a match is found, the spinand
1051749af7cdSPeter Pan  * object will be initialized with information provided by the matching
1052749af7cdSPeter Pan  * spinand_info entry.
1053749af7cdSPeter Pan  *
1054749af7cdSPeter Pan  * Return: 0 on success, a negative error code otherwise.
1055749af7cdSPeter Pan  */
spinand_match_and_init(struct spinand_device * spinand,const struct spinand_info * table,unsigned int table_size,enum spinand_readid_method rdid_method)1056749af7cdSPeter Pan int spinand_match_and_init(struct spinand_device *spinand,
1057749af7cdSPeter Pan 			   const struct spinand_info *table,
105881afcfe1SJon Lin 			   unsigned int table_size,
105981afcfe1SJon Lin 			   enum spinand_readid_method rdid_method)
1060749af7cdSPeter Pan {
106181afcfe1SJon Lin 	u8 *id = spinand->id.data;
1062749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
1063749af7cdSPeter Pan 	unsigned int i;
1064749af7cdSPeter Pan 
1065749af7cdSPeter Pan 	for (i = 0; i < table_size; i++) {
1066749af7cdSPeter Pan 		const struct spinand_info *info = &table[i];
1067749af7cdSPeter Pan 		const struct spi_mem_op *op;
1068749af7cdSPeter Pan 
106981afcfe1SJon Lin 		if (rdid_method != info->devid.method)
107081afcfe1SJon Lin 			continue;
107181afcfe1SJon Lin 
107281afcfe1SJon Lin 		if (memcmp(id + 1, info->devid.id, info->devid.len))
1073749af7cdSPeter Pan 			continue;
1074749af7cdSPeter Pan 
1075749af7cdSPeter Pan 		nand->memorg = table[i].memorg;
1076749af7cdSPeter Pan 		nand->eccreq = table[i].eccreq;
1077749af7cdSPeter Pan 		spinand->eccinfo = table[i].eccinfo;
1078749af7cdSPeter Pan 		spinand->flags = table[i].flags;
107981afcfe1SJon Lin 		spinand->id.len = 1 + table[i].devid.len;
1080749af7cdSPeter Pan 		spinand->select_target = table[i].select_target;
1081749af7cdSPeter Pan 
1082749af7cdSPeter Pan 		op = spinand_select_op_variant(spinand,
1083749af7cdSPeter Pan 					       info->op_variants.read_cache);
1084749af7cdSPeter Pan 		if (!op)
1085749af7cdSPeter Pan 			return -ENOTSUPP;
1086749af7cdSPeter Pan 
1087749af7cdSPeter Pan 		spinand->op_templates.read_cache = op;
1088749af7cdSPeter Pan 
1089749af7cdSPeter Pan 		op = spinand_select_op_variant(spinand,
1090749af7cdSPeter Pan 					       info->op_variants.write_cache);
1091749af7cdSPeter Pan 		if (!op)
1092749af7cdSPeter Pan 			return -ENOTSUPP;
1093749af7cdSPeter Pan 
1094749af7cdSPeter Pan 		spinand->op_templates.write_cache = op;
1095749af7cdSPeter Pan 
1096749af7cdSPeter Pan 		op = spinand_select_op_variant(spinand,
1097749af7cdSPeter Pan 					       info->op_variants.update_cache);
1098749af7cdSPeter Pan 		spinand->op_templates.update_cache = op;
1099749af7cdSPeter Pan 
1100749af7cdSPeter Pan 		return 0;
1101749af7cdSPeter Pan 	}
1102749af7cdSPeter Pan 
1103749af7cdSPeter Pan 	return -ENOTSUPP;
1104749af7cdSPeter Pan }
1105749af7cdSPeter Pan 
spinand_detect(struct spinand_device * spinand)1106749af7cdSPeter Pan static int spinand_detect(struct spinand_device *spinand)
1107749af7cdSPeter Pan {
1108749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
1109749af7cdSPeter Pan 	int ret;
1110749af7cdSPeter Pan 
1111af0025c8SJon Lin #if !CONFIG_IS_ENABLED(SUPPORT_USBPLUG)
1112749af7cdSPeter Pan 	ret = spinand_reset_op(spinand);
1113749af7cdSPeter Pan 	if (ret)
1114749af7cdSPeter Pan 		return ret;
1115af0025c8SJon Lin #endif
1116749af7cdSPeter Pan 
111781afcfe1SJon Lin 	ret = spinand_id_detect(spinand);
1118749af7cdSPeter Pan 	if (ret) {
1119c8f193b7SJon Lin 		dev_err(dev, "unknown raw ID %x %x %x\n",
1120c8f193b7SJon Lin 			spinand->id.data[0], spinand->id.data[1], spinand->id.data[2]);
1121749af7cdSPeter Pan 		return ret;
1122749af7cdSPeter Pan 	}
11230fe85c24SJon Lin 	dev_err(dev, "SPI Nand ID %x %x %x\n",
11240fe85c24SJon Lin 		spinand->id.data[0], spinand->id.data[1], spinand->id.data[2]);
1125749af7cdSPeter Pan 
1126749af7cdSPeter Pan 	if (nand->memorg.ntargets > 1 && !spinand->select_target) {
1127749af7cdSPeter Pan 		dev_err(dev,
1128749af7cdSPeter Pan 			"SPI NANDs with more than one die must implement ->select_target()\n");
1129749af7cdSPeter Pan 		return -EINVAL;
1130749af7cdSPeter Pan 	}
1131749af7cdSPeter Pan 
1132749af7cdSPeter Pan 	dev_info(spinand->slave->dev,
1133749af7cdSPeter Pan 		 "%s SPI NAND was found.\n", spinand->manufacturer->name);
1134749af7cdSPeter Pan 	dev_info(spinand->slave->dev,
1135749af7cdSPeter Pan 		 "%llu MiB, block size: %zu KiB, page size: %zu, OOB size: %u\n",
1136749af7cdSPeter Pan 		 nanddev_size(nand) >> 20, nanddev_eraseblock_size(nand) >> 10,
1137749af7cdSPeter Pan 		 nanddev_page_size(nand), nanddev_per_page_oobsize(nand));
1138749af7cdSPeter Pan 
1139749af7cdSPeter Pan 	return 0;
1140749af7cdSPeter Pan }
1141749af7cdSPeter Pan 
spinand_noecc_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * region)1142749af7cdSPeter Pan static int spinand_noecc_ooblayout_ecc(struct mtd_info *mtd, int section,
1143749af7cdSPeter Pan 				       struct mtd_oob_region *region)
1144749af7cdSPeter Pan {
1145749af7cdSPeter Pan 	return -ERANGE;
1146749af7cdSPeter Pan }
1147749af7cdSPeter Pan 
spinand_noecc_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * region)1148749af7cdSPeter Pan static int spinand_noecc_ooblayout_free(struct mtd_info *mtd, int section,
1149749af7cdSPeter Pan 					struct mtd_oob_region *region)
1150749af7cdSPeter Pan {
1151749af7cdSPeter Pan 	if (section)
1152749af7cdSPeter Pan 		return -ERANGE;
1153749af7cdSPeter Pan 
1154749af7cdSPeter Pan 	/* Reserve 2 bytes for the BBM. */
1155749af7cdSPeter Pan 	region->offset = 2;
1156749af7cdSPeter Pan 	region->length = 62;
1157749af7cdSPeter Pan 
1158749af7cdSPeter Pan 	return 0;
1159749af7cdSPeter Pan }
1160749af7cdSPeter Pan 
1161749af7cdSPeter Pan static const struct mtd_ooblayout_ops spinand_noecc_ooblayout = {
1162749af7cdSPeter Pan 	.ecc = spinand_noecc_ooblayout_ecc,
1163301f8dd1SSimon Glass 	.rfree = spinand_noecc_ooblayout_free,
1164749af7cdSPeter Pan };
1165749af7cdSPeter Pan 
spinand_init(struct spinand_device * spinand)1166749af7cdSPeter Pan static int spinand_init(struct spinand_device *spinand)
1167749af7cdSPeter Pan {
1168749af7cdSPeter Pan 	struct mtd_info *mtd = spinand_to_mtd(spinand);
1169749af7cdSPeter Pan 	struct nand_device *nand = mtd_to_nanddev(mtd);
1170749af7cdSPeter Pan 	int ret, i;
1171749af7cdSPeter Pan 
1172749af7cdSPeter Pan 	/*
1173749af7cdSPeter Pan 	 * We need a scratch buffer because the spi_mem interface requires that
1174749af7cdSPeter Pan 	 * buf passed in spi_mem_op->data.buf be DMA-able.
1175749af7cdSPeter Pan 	 */
1176749af7cdSPeter Pan 	spinand->scratchbuf = kzalloc(SPINAND_MAX_ID_LEN, GFP_KERNEL);
1177749af7cdSPeter Pan 	if (!spinand->scratchbuf)
1178749af7cdSPeter Pan 		return -ENOMEM;
1179749af7cdSPeter Pan 
1180749af7cdSPeter Pan 	ret = spinand_detect(spinand);
1181749af7cdSPeter Pan 	if (ret)
1182749af7cdSPeter Pan 		goto err_free_bufs;
1183749af7cdSPeter Pan 
1184749af7cdSPeter Pan 	/*
1185749af7cdSPeter Pan 	 * Use kzalloc() instead of devm_kzalloc() here, because some drivers
1186749af7cdSPeter Pan 	 * may use this buffer for DMA access.
1187749af7cdSPeter Pan 	 * Memory allocated by devm_ does not guarantee DMA-safe alignment.
1188749af7cdSPeter Pan 	 */
1189749af7cdSPeter Pan 	spinand->databuf = kzalloc(nanddev_page_size(nand) +
1190749af7cdSPeter Pan 			       nanddev_per_page_oobsize(nand),
1191749af7cdSPeter Pan 			       GFP_KERNEL);
1192749af7cdSPeter Pan 	if (!spinand->databuf) {
1193749af7cdSPeter Pan 		ret = -ENOMEM;
1194749af7cdSPeter Pan 		goto err_free_bufs;
1195749af7cdSPeter Pan 	}
1196749af7cdSPeter Pan 
1197749af7cdSPeter Pan 	spinand->oobbuf = spinand->databuf + nanddev_page_size(nand);
1198749af7cdSPeter Pan 
1199749af7cdSPeter Pan 	ret = spinand_init_cfg_cache(spinand);
1200749af7cdSPeter Pan 	if (ret)
1201749af7cdSPeter Pan 		goto err_free_bufs;
1202749af7cdSPeter Pan 
1203749af7cdSPeter Pan 	ret = spinand_init_quad_enable(spinand);
1204749af7cdSPeter Pan 	if (ret)
1205749af7cdSPeter Pan 		goto err_free_bufs;
1206749af7cdSPeter Pan 
1207749af7cdSPeter Pan 	ret = spinand_upd_cfg(spinand, CFG_OTP_ENABLE, 0);
1208749af7cdSPeter Pan 	if (ret)
1209749af7cdSPeter Pan 		goto err_free_bufs;
1210749af7cdSPeter Pan 
1211749af7cdSPeter Pan 	ret = spinand_manufacturer_init(spinand);
1212749af7cdSPeter Pan 	if (ret) {
1213749af7cdSPeter Pan 		dev_err(dev,
1214749af7cdSPeter Pan 			"Failed to initialize the SPI NAND chip (err = %d)\n",
1215749af7cdSPeter Pan 			ret);
1216749af7cdSPeter Pan 		goto err_free_bufs;
1217749af7cdSPeter Pan 	}
1218749af7cdSPeter Pan 
1219749af7cdSPeter Pan 	/* After power up, all blocks are locked, so unlock them here. */
1220749af7cdSPeter Pan 	for (i = 0; i < nand->memorg.ntargets; i++) {
1221749af7cdSPeter Pan 		ret = spinand_select_target(spinand, i);
1222749af7cdSPeter Pan 		if (ret)
1223749af7cdSPeter Pan 			goto err_free_bufs;
1224749af7cdSPeter Pan 
1225a8886ceeSJon Lin 		/* HWP_EN must be enabled first before block unlock region is set */
1226a8886ceeSJon Lin 		if (spinand->id.data[0] == 0x01) {
1227a8886ceeSJon Lin 			ret = spinand_lock_block(spinand, HWP_EN);
1228a8886ceeSJon Lin 			if (ret)
1229a8886ceeSJon Lin 				goto err_free_bufs;
1230a8886ceeSJon Lin 		}
1231a8886ceeSJon Lin 
1232749af7cdSPeter Pan 		ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
1233749af7cdSPeter Pan 		if (ret)
1234749af7cdSPeter Pan 			goto err_free_bufs;
1235749af7cdSPeter Pan 	}
1236749af7cdSPeter Pan 
1237b8af31a7SJon Lin 	nand->bbt.option = NANDDEV_BBT_USE_FLASH;
1238749af7cdSPeter Pan 	ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
1239749af7cdSPeter Pan 	if (ret)
1240749af7cdSPeter Pan 		goto err_manuf_cleanup;
1241749af7cdSPeter Pan 
1242749af7cdSPeter Pan 	/*
1243749af7cdSPeter Pan 	 * Right now, we don't support ECC, so let the whole oob
1244749af7cdSPeter Pan 	 * area is available for user.
1245749af7cdSPeter Pan 	 */
1246749af7cdSPeter Pan 	mtd->_read_oob = spinand_mtd_read;
1247749af7cdSPeter Pan 	mtd->_write_oob = spinand_mtd_write;
1248749af7cdSPeter Pan 	mtd->_block_isbad = spinand_mtd_block_isbad;
1249749af7cdSPeter Pan 	mtd->_block_markbad = spinand_mtd_block_markbad;
1250749af7cdSPeter Pan 	mtd->_block_isreserved = spinand_mtd_block_isreserved;
1251749af7cdSPeter Pan 	mtd->_erase = spinand_mtd_erase;
1252749af7cdSPeter Pan 
1253749af7cdSPeter Pan 	if (spinand->eccinfo.ooblayout)
1254749af7cdSPeter Pan 		mtd_set_ooblayout(mtd, spinand->eccinfo.ooblayout);
1255749af7cdSPeter Pan 	else
1256749af7cdSPeter Pan 		mtd_set_ooblayout(mtd, &spinand_noecc_ooblayout);
1257749af7cdSPeter Pan 
1258749af7cdSPeter Pan 	ret = mtd_ooblayout_count_freebytes(mtd);
1259749af7cdSPeter Pan 	if (ret < 0)
1260749af7cdSPeter Pan 		goto err_cleanup_nanddev;
1261749af7cdSPeter Pan 
1262749af7cdSPeter Pan 	mtd->oobavail = ret;
1263749af7cdSPeter Pan 
1264bfb4edbcSJon Lin 	/* Propagate ECC information to mtd_info */
1265bfb4edbcSJon Lin 	mtd->ecc_strength = nand->eccreq.strength;
1266bfb4edbcSJon Lin 	mtd->ecc_step_size = nand->eccreq.step_size;
1267bfb4edbcSJon Lin 
1268749af7cdSPeter Pan 	return 0;
1269749af7cdSPeter Pan 
1270749af7cdSPeter Pan err_cleanup_nanddev:
1271749af7cdSPeter Pan 	nanddev_cleanup(nand);
1272749af7cdSPeter Pan 
1273749af7cdSPeter Pan err_manuf_cleanup:
1274749af7cdSPeter Pan 	spinand_manufacturer_cleanup(spinand);
1275749af7cdSPeter Pan 
1276749af7cdSPeter Pan err_free_bufs:
1277749af7cdSPeter Pan 	kfree(spinand->databuf);
1278749af7cdSPeter Pan 	kfree(spinand->scratchbuf);
1279749af7cdSPeter Pan 	return ret;
1280749af7cdSPeter Pan }
1281749af7cdSPeter Pan 
spinand_cleanup(struct spinand_device * spinand)1282749af7cdSPeter Pan static void spinand_cleanup(struct spinand_device *spinand)
1283749af7cdSPeter Pan {
1284749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
1285749af7cdSPeter Pan 
1286749af7cdSPeter Pan 	nanddev_cleanup(nand);
1287749af7cdSPeter Pan 	spinand_manufacturer_cleanup(spinand);
1288749af7cdSPeter Pan 	kfree(spinand->databuf);
1289749af7cdSPeter Pan 	kfree(spinand->scratchbuf);
1290749af7cdSPeter Pan }
1291749af7cdSPeter Pan 
spinand_bind(struct udevice * udev)129205b7d884SJason Zhu static int spinand_bind(struct udevice *udev)
129305b7d884SJason Zhu {
129405b7d884SJason Zhu 	int ret = 0;
129505b7d884SJason Zhu 
129605b7d884SJason Zhu #ifdef CONFIG_MTD_BLK
129705b7d884SJason Zhu 	struct udevice *bdev;
129805b7d884SJason Zhu 
129905b7d884SJason Zhu 	ret = blk_create_devicef(udev, "mtd_blk", "blk", IF_TYPE_MTD,
130033a3075bSJon Lin 				 BLK_MTD_SPI_NAND, 512, 0, &bdev);
130105b7d884SJason Zhu 	if (ret)
130205b7d884SJason Zhu 		printf("Cannot create block device\n");
130305b7d884SJason Zhu #endif
130405b7d884SJason Zhu 	return ret;
130505b7d884SJason Zhu }
130605b7d884SJason Zhu 
spinand_probe(struct udevice * dev)1307749af7cdSPeter Pan static int spinand_probe(struct udevice *dev)
1308749af7cdSPeter Pan {
1309749af7cdSPeter Pan 	struct spinand_device *spinand = dev_get_priv(dev);
1310749af7cdSPeter Pan 	struct spi_slave *slave = dev_get_parent_priv(dev);
1311749af7cdSPeter Pan 	struct mtd_info *mtd = dev_get_uclass_priv(dev);
1312749af7cdSPeter Pan 	struct nand_device *nand = spinand_to_nand(spinand);
1313749af7cdSPeter Pan 	int ret;
1314749af7cdSPeter Pan 
1315749af7cdSPeter Pan #ifndef __UBOOT__
1316749af7cdSPeter Pan 	spinand = devm_kzalloc(&mem->spi->dev, sizeof(*spinand),
1317749af7cdSPeter Pan 			       GFP_KERNEL);
1318749af7cdSPeter Pan 	if (!spinand)
1319749af7cdSPeter Pan 		return -ENOMEM;
1320749af7cdSPeter Pan 
1321749af7cdSPeter Pan 	spinand->spimem = mem;
1322749af7cdSPeter Pan 	spi_mem_set_drvdata(mem, spinand);
1323749af7cdSPeter Pan 	spinand_set_of_node(spinand, mem->spi->dev.of_node);
1324749af7cdSPeter Pan 	mutex_init(&spinand->lock);
1325749af7cdSPeter Pan 
1326749af7cdSPeter Pan 	mtd = spinand_to_mtd(spinand);
1327749af7cdSPeter Pan 	mtd->dev.parent = &mem->spi->dev;
1328749af7cdSPeter Pan #else
1329749af7cdSPeter Pan 	nand->mtd = mtd;
1330749af7cdSPeter Pan 	mtd->priv = nand;
1331749af7cdSPeter Pan 	mtd->dev = dev;
1332749af7cdSPeter Pan 	mtd->name = malloc(20);
1333749af7cdSPeter Pan 	if (!mtd->name)
1334749af7cdSPeter Pan 		return -ENOMEM;
1335749af7cdSPeter Pan 	sprintf(mtd->name, "spi-nand%d", spi_nand_idx++);
1336749af7cdSPeter Pan 	spinand->slave = slave;
1337749af7cdSPeter Pan 	spinand_set_of_node(spinand, dev->node.np);
1338749af7cdSPeter Pan #endif
1339749af7cdSPeter Pan 
1340749af7cdSPeter Pan 	ret = spinand_init(spinand);
1341749af7cdSPeter Pan 	if (ret)
1342749af7cdSPeter Pan 		return ret;
1343749af7cdSPeter Pan 
1344749af7cdSPeter Pan #ifndef __UBOOT__
1345749af7cdSPeter Pan 	ret = mtd_device_register(mtd, NULL, 0);
1346749af7cdSPeter Pan #else
1347749af7cdSPeter Pan 	ret = add_mtd_device(mtd);
1348749af7cdSPeter Pan #endif
1349749af7cdSPeter Pan 	if (ret)
1350749af7cdSPeter Pan 		goto err_spinand_cleanup;
1351749af7cdSPeter Pan 
1352749af7cdSPeter Pan 	return 0;
1353749af7cdSPeter Pan 
1354749af7cdSPeter Pan err_spinand_cleanup:
1355749af7cdSPeter Pan 	spinand_cleanup(spinand);
1356749af7cdSPeter Pan 
1357749af7cdSPeter Pan 	return ret;
1358749af7cdSPeter Pan }
1359749af7cdSPeter Pan 
1360749af7cdSPeter Pan #ifndef __UBOOT__
spinand_remove(struct udevice * slave)1361749af7cdSPeter Pan static int spinand_remove(struct udevice *slave)
1362749af7cdSPeter Pan {
1363749af7cdSPeter Pan 	struct spinand_device *spinand;
1364749af7cdSPeter Pan 	struct mtd_info *mtd;
1365749af7cdSPeter Pan 	int ret;
1366749af7cdSPeter Pan 
1367749af7cdSPeter Pan 	spinand = spi_mem_get_drvdata(slave);
1368749af7cdSPeter Pan 	mtd = spinand_to_mtd(spinand);
1369749af7cdSPeter Pan 	free(mtd->name);
1370749af7cdSPeter Pan 
1371749af7cdSPeter Pan 	ret = mtd_device_unregister(mtd);
1372749af7cdSPeter Pan 	if (ret)
1373749af7cdSPeter Pan 		return ret;
1374749af7cdSPeter Pan 
1375749af7cdSPeter Pan 	spinand_cleanup(spinand);
1376749af7cdSPeter Pan 
1377749af7cdSPeter Pan 	return 0;
1378749af7cdSPeter Pan }
1379749af7cdSPeter Pan 
1380749af7cdSPeter Pan static const struct spi_device_id spinand_ids[] = {
1381749af7cdSPeter Pan 	{ .name = "spi-nand" },
1382749af7cdSPeter Pan 	{ /* sentinel */ },
1383749af7cdSPeter Pan };
1384749af7cdSPeter Pan 
1385749af7cdSPeter Pan #ifdef CONFIG_OF
1386749af7cdSPeter Pan static const struct of_device_id spinand_of_ids[] = {
1387749af7cdSPeter Pan 	{ .compatible = "spi-nand" },
1388749af7cdSPeter Pan 	{ /* sentinel */ },
1389749af7cdSPeter Pan };
1390749af7cdSPeter Pan #endif
1391749af7cdSPeter Pan 
1392749af7cdSPeter Pan static struct spi_mem_driver spinand_drv = {
1393749af7cdSPeter Pan 	.spidrv = {
1394749af7cdSPeter Pan 		.id_table = spinand_ids,
1395749af7cdSPeter Pan 		.driver = {
1396749af7cdSPeter Pan 			.name = "spi-nand",
1397749af7cdSPeter Pan 			.of_match_table = of_match_ptr(spinand_of_ids),
1398749af7cdSPeter Pan 		},
1399749af7cdSPeter Pan 	},
1400749af7cdSPeter Pan 	.probe = spinand_probe,
1401749af7cdSPeter Pan 	.remove = spinand_remove,
1402749af7cdSPeter Pan };
1403749af7cdSPeter Pan module_spi_mem_driver(spinand_drv);
1404749af7cdSPeter Pan 
1405749af7cdSPeter Pan MODULE_DESCRIPTION("SPI NAND framework");
1406749af7cdSPeter Pan MODULE_AUTHOR("Peter Pan<peterpandong@micron.com>");
1407749af7cdSPeter Pan MODULE_LICENSE("GPL v2");
1408749af7cdSPeter Pan #endif /* __UBOOT__ */
1409749af7cdSPeter Pan 
1410749af7cdSPeter Pan static const struct udevice_id spinand_ids[] = {
1411749af7cdSPeter Pan 	{ .compatible = "spi-nand" },
1412749af7cdSPeter Pan 	{ /* sentinel */ },
1413749af7cdSPeter Pan };
1414749af7cdSPeter Pan 
1415749af7cdSPeter Pan U_BOOT_DRIVER(spinand) = {
1416749af7cdSPeter Pan 	.name = "spi_nand",
1417749af7cdSPeter Pan 	.id = UCLASS_MTD,
1418749af7cdSPeter Pan 	.of_match = spinand_ids,
141905b7d884SJason Zhu 	.bind	= spinand_bind,
1420749af7cdSPeter Pan 	.priv_auto_alloc_size = sizeof(struct spinand_device),
1421749af7cdSPeter Pan 	.probe = spinand_probe,
1422749af7cdSPeter Pan };
1423