xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/nand_base.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Overview:
4*4882a593Smuzhiyun  *   This is the generic MTD driver for NAND flash devices. It should be
5*4882a593Smuzhiyun  *   capable of working with almost all NAND chips currently available.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *	Additional technical information is available on
8*4882a593Smuzhiyun  *	http://www.linux-mtd.infradead.org/doc/nand.html
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11*4882a593Smuzhiyun  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *  Credits:
14*4882a593Smuzhiyun  *	David Woodhouse for adding multichip support
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17*4882a593Smuzhiyun  *	rework for 2K page size chips
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *  TODO:
20*4882a593Smuzhiyun  *	Enable cached programming for 2k page size chips
21*4882a593Smuzhiyun  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
22*4882a593Smuzhiyun  *	if we have HW ECC support.
23*4882a593Smuzhiyun  *	BBT table is not serialized, has to be fixed
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <linux/module.h>
29*4882a593Smuzhiyun #include <linux/delay.h>
30*4882a593Smuzhiyun #include <linux/errno.h>
31*4882a593Smuzhiyun #include <linux/err.h>
32*4882a593Smuzhiyun #include <linux/sched.h>
33*4882a593Smuzhiyun #include <linux/slab.h>
34*4882a593Smuzhiyun #include <linux/mm.h>
35*4882a593Smuzhiyun #include <linux/types.h>
36*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
37*4882a593Smuzhiyun #include <linux/mtd/nand.h>
38*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
39*4882a593Smuzhiyun #include <linux/mtd/nand_bch.h>
40*4882a593Smuzhiyun #include <linux/interrupt.h>
41*4882a593Smuzhiyun #include <linux/bitops.h>
42*4882a593Smuzhiyun #include <linux/io.h>
43*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
44*4882a593Smuzhiyun #include <linux/of.h>
45*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include "internals.h"
48*4882a593Smuzhiyun 
nand_pairing_dist3_get_info(struct mtd_info * mtd,int page,struct mtd_pairing_info * info)49*4882a593Smuzhiyun static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
50*4882a593Smuzhiyun 				       struct mtd_pairing_info *info)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	int lastpage = (mtd->erasesize / mtd->writesize) - 1;
53*4882a593Smuzhiyun 	int dist = 3;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	if (page == lastpage)
56*4882a593Smuzhiyun 		dist = 2;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (!page || (page & 1)) {
59*4882a593Smuzhiyun 		info->group = 0;
60*4882a593Smuzhiyun 		info->pair = (page + 1) / 2;
61*4882a593Smuzhiyun 	} else {
62*4882a593Smuzhiyun 		info->group = 1;
63*4882a593Smuzhiyun 		info->pair = (page + 1 - dist) / 2;
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
nand_pairing_dist3_get_wunit(struct mtd_info * mtd,const struct mtd_pairing_info * info)69*4882a593Smuzhiyun static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
70*4882a593Smuzhiyun 					const struct mtd_pairing_info *info)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
73*4882a593Smuzhiyun 	int page = info->pair * 2;
74*4882a593Smuzhiyun 	int dist = 3;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	if (!info->group && !info->pair)
77*4882a593Smuzhiyun 		return 0;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (info->pair == lastpair && info->group)
80*4882a593Smuzhiyun 		dist = 2;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	if (!info->group)
83*4882a593Smuzhiyun 		page--;
84*4882a593Smuzhiyun 	else if (info->pair)
85*4882a593Smuzhiyun 		page += dist - 1;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (page >= mtd->erasesize / mtd->writesize)
88*4882a593Smuzhiyun 		return -EINVAL;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return page;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun const struct mtd_pairing_scheme dist3_pairing_scheme = {
94*4882a593Smuzhiyun 	.ngroups = 2,
95*4882a593Smuzhiyun 	.get_info = nand_pairing_dist3_get_info,
96*4882a593Smuzhiyun 	.get_wunit = nand_pairing_dist3_get_wunit,
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun 
check_offs_len(struct nand_chip * chip,loff_t ofs,uint64_t len)99*4882a593Smuzhiyun static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	int ret = 0;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* Start address must align on block boundary */
104*4882a593Smuzhiyun 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
105*4882a593Smuzhiyun 		pr_debug("%s: unaligned address\n", __func__);
106*4882a593Smuzhiyun 		ret = -EINVAL;
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* Length must align on block boundary */
110*4882a593Smuzhiyun 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
111*4882a593Smuzhiyun 		pr_debug("%s: length not block aligned\n", __func__);
112*4882a593Smuzhiyun 		ret = -EINVAL;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return ret;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun  * nand_extract_bits - Copy unaligned bits from one buffer to another one
120*4882a593Smuzhiyun  * @dst: destination buffer
121*4882a593Smuzhiyun  * @dst_off: bit offset at which the writing starts
122*4882a593Smuzhiyun  * @src: source buffer
123*4882a593Smuzhiyun  * @src_off: bit offset at which the reading starts
124*4882a593Smuzhiyun  * @nbits: number of bits to copy from @src to @dst
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  * Copy bits from one memory region to another (overlap authorized).
127*4882a593Smuzhiyun  */
nand_extract_bits(u8 * dst,unsigned int dst_off,const u8 * src,unsigned int src_off,unsigned int nbits)128*4882a593Smuzhiyun void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
129*4882a593Smuzhiyun 		       unsigned int src_off, unsigned int nbits)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	unsigned int tmp, n;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	dst += dst_off / 8;
134*4882a593Smuzhiyun 	dst_off %= 8;
135*4882a593Smuzhiyun 	src += src_off / 8;
136*4882a593Smuzhiyun 	src_off %= 8;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	while (nbits) {
139*4882a593Smuzhiyun 		n = min3(8 - dst_off, 8 - src_off, nbits);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		tmp = (*src >> src_off) & GENMASK(n - 1, 0);
142*4882a593Smuzhiyun 		*dst &= ~GENMASK(n - 1 + dst_off, dst_off);
143*4882a593Smuzhiyun 		*dst |= tmp << dst_off;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		dst_off += n;
146*4882a593Smuzhiyun 		if (dst_off >= 8) {
147*4882a593Smuzhiyun 			dst++;
148*4882a593Smuzhiyun 			dst_off -= 8;
149*4882a593Smuzhiyun 		}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		src_off += n;
152*4882a593Smuzhiyun 		if (src_off >= 8) {
153*4882a593Smuzhiyun 			src++;
154*4882a593Smuzhiyun 			src_off -= 8;
155*4882a593Smuzhiyun 		}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		nbits -= n;
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_extract_bits);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun /**
163*4882a593Smuzhiyun  * nand_select_target() - Select a NAND target (A.K.A. die)
164*4882a593Smuzhiyun  * @chip: NAND chip object
165*4882a593Smuzhiyun  * @cs: the CS line to select. Note that this CS id is always from the chip
166*4882a593Smuzhiyun  *	PoV, not the controller one
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * Select a NAND target so that further operations executed on @chip go to the
169*4882a593Smuzhiyun  * selected NAND target.
170*4882a593Smuzhiyun  */
nand_select_target(struct nand_chip * chip,unsigned int cs)171*4882a593Smuzhiyun void nand_select_target(struct nand_chip *chip, unsigned int cs)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	/*
174*4882a593Smuzhiyun 	 * cs should always lie between 0 and nanddev_ntargets(), when that's
175*4882a593Smuzhiyun 	 * not the case it's a bug and the caller should be fixed.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
178*4882a593Smuzhiyun 		return;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	chip->cur_cs = cs;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (chip->legacy.select_chip)
183*4882a593Smuzhiyun 		chip->legacy.select_chip(chip, cs);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_select_target);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /**
188*4882a593Smuzhiyun  * nand_deselect_target() - Deselect the currently selected target
189*4882a593Smuzhiyun  * @chip: NAND chip object
190*4882a593Smuzhiyun  *
191*4882a593Smuzhiyun  * Deselect the currently selected NAND target. The result of operations
192*4882a593Smuzhiyun  * executed on @chip after the target has been deselected is undefined.
193*4882a593Smuzhiyun  */
nand_deselect_target(struct nand_chip * chip)194*4882a593Smuzhiyun void nand_deselect_target(struct nand_chip *chip)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	if (chip->legacy.select_chip)
197*4882a593Smuzhiyun 		chip->legacy.select_chip(chip, -1);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	chip->cur_cs = -1;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_deselect_target);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /**
204*4882a593Smuzhiyun  * nand_release_device - [GENERIC] release chip
205*4882a593Smuzhiyun  * @chip: NAND chip object
206*4882a593Smuzhiyun  *
207*4882a593Smuzhiyun  * Release chip lock and wake up anyone waiting on the device.
208*4882a593Smuzhiyun  */
nand_release_device(struct nand_chip * chip)209*4882a593Smuzhiyun static void nand_release_device(struct nand_chip *chip)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	/* Release the controller and the chip */
212*4882a593Smuzhiyun 	mutex_unlock(&chip->controller->lock);
213*4882a593Smuzhiyun 	mutex_unlock(&chip->lock);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun  * nand_bbm_get_next_page - Get the next page for bad block markers
218*4882a593Smuzhiyun  * @chip: NAND chip object
219*4882a593Smuzhiyun  * @page: First page to start checking for bad block marker usage
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * Returns an integer that corresponds to the page offset within a block, for
222*4882a593Smuzhiyun  * a page that is used to store bad block markers. If no more pages are
223*4882a593Smuzhiyun  * available, -EINVAL is returned.
224*4882a593Smuzhiyun  */
nand_bbm_get_next_page(struct nand_chip * chip,int page)225*4882a593Smuzhiyun int nand_bbm_get_next_page(struct nand_chip *chip, int page)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
228*4882a593Smuzhiyun 	int last_page = ((mtd->erasesize - mtd->writesize) >>
229*4882a593Smuzhiyun 			 chip->page_shift) & chip->pagemask;
230*4882a593Smuzhiyun 	unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
231*4882a593Smuzhiyun 		| NAND_BBM_LASTPAGE;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (page == 0 && !(chip->options & bbm_flags))
234*4882a593Smuzhiyun 		return 0;
235*4882a593Smuzhiyun 	if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
236*4882a593Smuzhiyun 		return 0;
237*4882a593Smuzhiyun 	if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
238*4882a593Smuzhiyun 		return 1;
239*4882a593Smuzhiyun 	if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
240*4882a593Smuzhiyun 		return last_page;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return -EINVAL;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
247*4882a593Smuzhiyun  * @chip: NAND chip object
248*4882a593Smuzhiyun  * @ofs: offset from device start
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * Check, if the block is bad.
251*4882a593Smuzhiyun  */
nand_block_bad(struct nand_chip * chip,loff_t ofs)252*4882a593Smuzhiyun static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	int first_page, page_offset;
255*4882a593Smuzhiyun 	int res;
256*4882a593Smuzhiyun 	u8 bad;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
259*4882a593Smuzhiyun 	page_offset = nand_bbm_get_next_page(chip, 0);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	while (page_offset >= 0) {
262*4882a593Smuzhiyun 		res = chip->ecc.read_oob(chip, first_page + page_offset);
263*4882a593Smuzhiyun 		if (res < 0)
264*4882a593Smuzhiyun 			return res;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		bad = chip->oob_poi[chip->badblockpos];
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 		if (likely(chip->badblockbits == 8))
269*4882a593Smuzhiyun 			res = bad != 0xFF;
270*4882a593Smuzhiyun 		else
271*4882a593Smuzhiyun 			res = hweight8(bad) < chip->badblockbits;
272*4882a593Smuzhiyun 		if (res)
273*4882a593Smuzhiyun 			return res;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return 0;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun 
nand_isbad_bbm(struct nand_chip * chip,loff_t ofs)281*4882a593Smuzhiyun static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	if (chip->options & NAND_NO_BBM_QUIRK)
284*4882a593Smuzhiyun 		return 0;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (chip->legacy.block_bad)
287*4882a593Smuzhiyun 		return chip->legacy.block_bad(chip, ofs);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return nand_block_bad(chip, ofs);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /**
293*4882a593Smuzhiyun  * nand_get_device - [GENERIC] Get chip for selected access
294*4882a593Smuzhiyun  * @chip: NAND chip structure
295*4882a593Smuzhiyun  *
296*4882a593Smuzhiyun  * Lock the device and its controller for exclusive access
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * Return: -EBUSY if the chip has been suspended, 0 otherwise
299*4882a593Smuzhiyun  */
nand_get_device(struct nand_chip * chip)300*4882a593Smuzhiyun static void nand_get_device(struct nand_chip *chip)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	/* Wait until the device is resumed. */
303*4882a593Smuzhiyun 	while (1) {
304*4882a593Smuzhiyun 		mutex_lock(&chip->lock);
305*4882a593Smuzhiyun 		if (!chip->suspended) {
306*4882a593Smuzhiyun 			mutex_lock(&chip->controller->lock);
307*4882a593Smuzhiyun 			return;
308*4882a593Smuzhiyun 		}
309*4882a593Smuzhiyun 		mutex_unlock(&chip->lock);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		wait_event(chip->resume_wq, !chip->suspended);
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun  * nand_check_wp - [GENERIC] check if the chip is write protected
317*4882a593Smuzhiyun  * @chip: NAND chip object
318*4882a593Smuzhiyun  *
319*4882a593Smuzhiyun  * Check, if the device is write protected. The function expects, that the
320*4882a593Smuzhiyun  * device is already selected.
321*4882a593Smuzhiyun  */
nand_check_wp(struct nand_chip * chip)322*4882a593Smuzhiyun static int nand_check_wp(struct nand_chip *chip)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	u8 status;
325*4882a593Smuzhiyun 	int ret;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/* Broken xD cards report WP despite being writable */
328*4882a593Smuzhiyun 	if (chip->options & NAND_BROKEN_XD)
329*4882a593Smuzhiyun 		return 0;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* Check the WP bit */
332*4882a593Smuzhiyun 	ret = nand_status_op(chip, &status);
333*4882a593Smuzhiyun 	if (ret)
334*4882a593Smuzhiyun 		return ret;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	return status & NAND_STATUS_WP ? 0 : 1;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /**
340*4882a593Smuzhiyun  * nand_fill_oob - [INTERN] Transfer client buffer to oob
341*4882a593Smuzhiyun  * @chip: NAND chip object
342*4882a593Smuzhiyun  * @oob: oob data buffer
343*4882a593Smuzhiyun  * @len: oob data write length
344*4882a593Smuzhiyun  * @ops: oob ops structure
345*4882a593Smuzhiyun  */
nand_fill_oob(struct nand_chip * chip,uint8_t * oob,size_t len,struct mtd_oob_ops * ops)346*4882a593Smuzhiyun static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
347*4882a593Smuzhiyun 			      struct mtd_oob_ops *ops)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
350*4882a593Smuzhiyun 	int ret;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/*
353*4882a593Smuzhiyun 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
354*4882a593Smuzhiyun 	 * data from a previous OOB read.
355*4882a593Smuzhiyun 	 */
356*4882a593Smuzhiyun 	memset(chip->oob_poi, 0xff, mtd->oobsize);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	switch (ops->mode) {
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
361*4882a593Smuzhiyun 	case MTD_OPS_RAW:
362*4882a593Smuzhiyun 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
363*4882a593Smuzhiyun 		return oob + len;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB:
366*4882a593Smuzhiyun 		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
367*4882a593Smuzhiyun 						  ops->ooboffs, len);
368*4882a593Smuzhiyun 		BUG_ON(ret);
369*4882a593Smuzhiyun 		return oob + len;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	default:
372*4882a593Smuzhiyun 		BUG();
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 	return NULL;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
379*4882a593Smuzhiyun  * @chip: NAND chip object
380*4882a593Smuzhiyun  * @to: offset to write to
381*4882a593Smuzhiyun  * @ops: oob operation description structure
382*4882a593Smuzhiyun  *
383*4882a593Smuzhiyun  * NAND write out-of-band.
384*4882a593Smuzhiyun  */
nand_do_write_oob(struct nand_chip * chip,loff_t to,struct mtd_oob_ops * ops)385*4882a593Smuzhiyun static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
386*4882a593Smuzhiyun 			     struct mtd_oob_ops *ops)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
389*4882a593Smuzhiyun 	int chipnr, page, status, len, ret;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	pr_debug("%s: to = 0x%08x, len = %i\n",
392*4882a593Smuzhiyun 			 __func__, (unsigned int)to, (int)ops->ooblen);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	len = mtd_oobavail(mtd, ops);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* Do not allow write past end of page */
397*4882a593Smuzhiyun 	if ((ops->ooboffs + ops->ooblen) > len) {
398*4882a593Smuzhiyun 		pr_debug("%s: attempt to write past end of page\n",
399*4882a593Smuzhiyun 				__func__);
400*4882a593Smuzhiyun 		return -EINVAL;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	chipnr = (int)(to >> chip->chip_shift);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/*
406*4882a593Smuzhiyun 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
407*4882a593Smuzhiyun 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
408*4882a593Smuzhiyun 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
409*4882a593Smuzhiyun 	 * it in the doc2000 driver in August 1999.  dwmw2.
410*4882a593Smuzhiyun 	 */
411*4882a593Smuzhiyun 	ret = nand_reset(chip, chipnr);
412*4882a593Smuzhiyun 	if (ret)
413*4882a593Smuzhiyun 		return ret;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* Shift to get page */
418*4882a593Smuzhiyun 	page = (int)(to >> chip->page_shift);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* Check, if it is write protected */
421*4882a593Smuzhiyun 	if (nand_check_wp(chip)) {
422*4882a593Smuzhiyun 		nand_deselect_target(chip);
423*4882a593Smuzhiyun 		return -EROFS;
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	/* Invalidate the page cache, if we write to the cached page */
427*4882a593Smuzhiyun 	if (page == chip->pagecache.page)
428*4882a593Smuzhiyun 		chip->pagecache.page = -1;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (ops->mode == MTD_OPS_RAW)
433*4882a593Smuzhiyun 		status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
434*4882a593Smuzhiyun 	else
435*4882a593Smuzhiyun 		status = chip->ecc.write_oob(chip, page & chip->pagemask);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	nand_deselect_target(chip);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (status)
440*4882a593Smuzhiyun 		return status;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	ops->oobretlen = ops->ooblen;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	return 0;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
449*4882a593Smuzhiyun  * @chip: NAND chip object
450*4882a593Smuzhiyun  * @ofs: offset from device start
451*4882a593Smuzhiyun  *
452*4882a593Smuzhiyun  * This is the default implementation, which can be overridden by a hardware
453*4882a593Smuzhiyun  * specific driver. It provides the details for writing a bad block marker to a
454*4882a593Smuzhiyun  * block.
455*4882a593Smuzhiyun  */
nand_default_block_markbad(struct nand_chip * chip,loff_t ofs)456*4882a593Smuzhiyun static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
459*4882a593Smuzhiyun 	struct mtd_oob_ops ops;
460*4882a593Smuzhiyun 	uint8_t buf[2] = { 0, 0 };
461*4882a593Smuzhiyun 	int ret = 0, res, page_offset;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	memset(&ops, 0, sizeof(ops));
464*4882a593Smuzhiyun 	ops.oobbuf = buf;
465*4882a593Smuzhiyun 	ops.ooboffs = chip->badblockpos;
466*4882a593Smuzhiyun 	if (chip->options & NAND_BUSWIDTH_16) {
467*4882a593Smuzhiyun 		ops.ooboffs &= ~0x01;
468*4882a593Smuzhiyun 		ops.len = ops.ooblen = 2;
469*4882a593Smuzhiyun 	} else {
470*4882a593Smuzhiyun 		ops.len = ops.ooblen = 1;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 	ops.mode = MTD_OPS_PLACE_OOB;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	page_offset = nand_bbm_get_next_page(chip, 0);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	while (page_offset >= 0) {
477*4882a593Smuzhiyun 		res = nand_do_write_oob(chip,
478*4882a593Smuzhiyun 					ofs + (page_offset * mtd->writesize),
479*4882a593Smuzhiyun 					&ops);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		if (!ret)
482*4882a593Smuzhiyun 			ret = res;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return ret;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun /**
491*4882a593Smuzhiyun  * nand_markbad_bbm - mark a block by updating the BBM
492*4882a593Smuzhiyun  * @chip: NAND chip object
493*4882a593Smuzhiyun  * @ofs: offset of the block to mark bad
494*4882a593Smuzhiyun  */
nand_markbad_bbm(struct nand_chip * chip,loff_t ofs)495*4882a593Smuzhiyun int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	if (chip->legacy.block_markbad)
498*4882a593Smuzhiyun 		return chip->legacy.block_markbad(chip, ofs);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	return nand_default_block_markbad(chip, ofs);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /**
504*4882a593Smuzhiyun  * nand_block_markbad_lowlevel - mark a block bad
505*4882a593Smuzhiyun  * @chip: NAND chip object
506*4882a593Smuzhiyun  * @ofs: offset from device start
507*4882a593Smuzhiyun  *
508*4882a593Smuzhiyun  * This function performs the generic NAND bad block marking steps (i.e., bad
509*4882a593Smuzhiyun  * block table(s) and/or marker(s)). We only allow the hardware driver to
510*4882a593Smuzhiyun  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
511*4882a593Smuzhiyun  *
512*4882a593Smuzhiyun  * We try operations in the following order:
513*4882a593Smuzhiyun  *
514*4882a593Smuzhiyun  *  (1) erase the affected block, to allow OOB marker to be written cleanly
515*4882a593Smuzhiyun  *  (2) write bad block marker to OOB area of affected block (unless flag
516*4882a593Smuzhiyun  *      NAND_BBT_NO_OOB_BBM is present)
517*4882a593Smuzhiyun  *  (3) update the BBT
518*4882a593Smuzhiyun  *
519*4882a593Smuzhiyun  * Note that we retain the first error encountered in (2) or (3), finish the
520*4882a593Smuzhiyun  * procedures, and dump the error in the end.
521*4882a593Smuzhiyun */
nand_block_markbad_lowlevel(struct nand_chip * chip,loff_t ofs)522*4882a593Smuzhiyun static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
525*4882a593Smuzhiyun 	int res, ret = 0;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
528*4882a593Smuzhiyun 		struct erase_info einfo;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		/* Attempt erase before marking OOB */
531*4882a593Smuzhiyun 		memset(&einfo, 0, sizeof(einfo));
532*4882a593Smuzhiyun 		einfo.addr = ofs;
533*4882a593Smuzhiyun 		einfo.len = 1ULL << chip->phys_erase_shift;
534*4882a593Smuzhiyun 		nand_erase_nand(chip, &einfo, 0);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		/* Write bad block marker to OOB */
537*4882a593Smuzhiyun 		nand_get_device(chip);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		ret = nand_markbad_bbm(chip, ofs);
540*4882a593Smuzhiyun 		nand_release_device(chip);
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Mark block bad in BBT */
544*4882a593Smuzhiyun 	if (chip->bbt) {
545*4882a593Smuzhiyun 		res = nand_markbad_bbt(chip, ofs);
546*4882a593Smuzhiyun 		if (!ret)
547*4882a593Smuzhiyun 			ret = res;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (!ret)
551*4882a593Smuzhiyun 		mtd->ecc_stats.badblocks++;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	return ret;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun /**
557*4882a593Smuzhiyun  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
558*4882a593Smuzhiyun  * @mtd: MTD device structure
559*4882a593Smuzhiyun  * @ofs: offset from device start
560*4882a593Smuzhiyun  *
561*4882a593Smuzhiyun  * Check if the block is marked as reserved.
562*4882a593Smuzhiyun  */
nand_block_isreserved(struct mtd_info * mtd,loff_t ofs)563*4882a593Smuzhiyun static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
564*4882a593Smuzhiyun {
565*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (!chip->bbt)
568*4882a593Smuzhiyun 		return 0;
569*4882a593Smuzhiyun 	/* Return info from the table */
570*4882a593Smuzhiyun 	return nand_isreserved_bbt(chip, ofs);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun /**
574*4882a593Smuzhiyun  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
575*4882a593Smuzhiyun  * @chip: NAND chip object
576*4882a593Smuzhiyun  * @ofs: offset from device start
577*4882a593Smuzhiyun  * @allowbbt: 1, if its allowed to access the bbt area
578*4882a593Smuzhiyun  *
579*4882a593Smuzhiyun  * Check, if the block is bad. Either by reading the bad block table or
580*4882a593Smuzhiyun  * calling of the scan function.
581*4882a593Smuzhiyun  */
nand_block_checkbad(struct nand_chip * chip,loff_t ofs,int allowbbt)582*4882a593Smuzhiyun static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	/* Return info from the table */
585*4882a593Smuzhiyun 	if (chip->bbt)
586*4882a593Smuzhiyun 		return nand_isbad_bbt(chip, ofs, allowbbt);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	return nand_isbad_bbm(chip, ofs);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun /**
592*4882a593Smuzhiyun  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
593*4882a593Smuzhiyun  * @chip: NAND chip structure
594*4882a593Smuzhiyun  * @timeout_ms: Timeout in ms
595*4882a593Smuzhiyun  *
596*4882a593Smuzhiyun  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
597*4882a593Smuzhiyun  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
598*4882a593Smuzhiyun  * returned.
599*4882a593Smuzhiyun  *
600*4882a593Smuzhiyun  * This helper is intended to be used when the controller does not have access
601*4882a593Smuzhiyun  * to the NAND R/B pin.
602*4882a593Smuzhiyun  *
603*4882a593Smuzhiyun  * Be aware that calling this helper from an ->exec_op() implementation means
604*4882a593Smuzhiyun  * ->exec_op() must be re-entrant.
605*4882a593Smuzhiyun  *
606*4882a593Smuzhiyun  * Return 0 if the NAND chip is ready, a negative error otherwise.
607*4882a593Smuzhiyun  */
nand_soft_waitrdy(struct nand_chip * chip,unsigned long timeout_ms)608*4882a593Smuzhiyun int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	const struct nand_sdr_timings *timings;
611*4882a593Smuzhiyun 	u8 status = 0;
612*4882a593Smuzhiyun 	int ret;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (!nand_has_exec_op(chip))
615*4882a593Smuzhiyun 		return -ENOTSUPP;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/* Wait tWB before polling the STATUS reg. */
618*4882a593Smuzhiyun 	timings = nand_get_sdr_timings(nand_get_interface_config(chip));
619*4882a593Smuzhiyun 	ndelay(PSEC_TO_NSEC(timings->tWB_max));
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	ret = nand_status_op(chip, NULL);
622*4882a593Smuzhiyun 	if (ret)
623*4882a593Smuzhiyun 		return ret;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/*
626*4882a593Smuzhiyun 	 * +1 below is necessary because if we are now in the last fraction
627*4882a593Smuzhiyun 	 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
628*4882a593Smuzhiyun 	 * small jiffy fraction - possibly leading to false timeout
629*4882a593Smuzhiyun 	 */
630*4882a593Smuzhiyun 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
631*4882a593Smuzhiyun 	do {
632*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, &status, sizeof(status), true,
633*4882a593Smuzhiyun 					false);
634*4882a593Smuzhiyun 		if (ret)
635*4882a593Smuzhiyun 			break;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		if (status & NAND_STATUS_READY)
638*4882a593Smuzhiyun 			break;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 		/*
641*4882a593Smuzhiyun 		 * Typical lowest execution time for a tR on most NANDs is 10us,
642*4882a593Smuzhiyun 		 * use this as polling delay before doing something smarter (ie.
643*4882a593Smuzhiyun 		 * deriving a delay from the timeout value, timeout_ms/ratio).
644*4882a593Smuzhiyun 		 */
645*4882a593Smuzhiyun 		udelay(10);
646*4882a593Smuzhiyun 	} while	(time_before(jiffies, timeout_ms));
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	/*
649*4882a593Smuzhiyun 	 * We have to exit READ_STATUS mode in order to read real data on the
650*4882a593Smuzhiyun 	 * bus in case the WAITRDY instruction is preceding a DATA_IN
651*4882a593Smuzhiyun 	 * instruction.
652*4882a593Smuzhiyun 	 */
653*4882a593Smuzhiyun 	nand_exit_status_op(chip);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	if (ret)
656*4882a593Smuzhiyun 		return ret;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
659*4882a593Smuzhiyun };
660*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun /**
663*4882a593Smuzhiyun  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
664*4882a593Smuzhiyun  * @chip: NAND chip structure
665*4882a593Smuzhiyun  * @gpiod: GPIO descriptor of R/B pin
666*4882a593Smuzhiyun  * @timeout_ms: Timeout in ms
667*4882a593Smuzhiyun  *
668*4882a593Smuzhiyun  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
669*4882a593Smuzhiyun  * whitin the specified timeout, -ETIMEDOUT is returned.
670*4882a593Smuzhiyun  *
671*4882a593Smuzhiyun  * This helper is intended to be used when the controller has access to the
672*4882a593Smuzhiyun  * NAND R/B pin over GPIO.
673*4882a593Smuzhiyun  *
674*4882a593Smuzhiyun  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
675*4882a593Smuzhiyun  */
nand_gpio_waitrdy(struct nand_chip * chip,struct gpio_desc * gpiod,unsigned long timeout_ms)676*4882a593Smuzhiyun int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
677*4882a593Smuzhiyun 		      unsigned long timeout_ms)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	/*
681*4882a593Smuzhiyun 	 * Wait until R/B pin indicates chip is ready or timeout occurs.
682*4882a593Smuzhiyun 	 * +1 below is necessary because if we are now in the last fraction
683*4882a593Smuzhiyun 	 * of jiffy and msecs_to_jiffies is 1 then we will wait only that
684*4882a593Smuzhiyun 	 * small jiffy fraction - possibly leading to false timeout.
685*4882a593Smuzhiyun 	 */
686*4882a593Smuzhiyun 	timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
687*4882a593Smuzhiyun 	do {
688*4882a593Smuzhiyun 		if (gpiod_get_value_cansleep(gpiod))
689*4882a593Smuzhiyun 			return 0;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		cond_resched();
692*4882a593Smuzhiyun 	} while	(time_before(jiffies, timeout_ms));
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
695*4882a593Smuzhiyun };
696*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun /**
699*4882a593Smuzhiyun  * panic_nand_wait - [GENERIC] wait until the command is done
700*4882a593Smuzhiyun  * @chip: NAND chip structure
701*4882a593Smuzhiyun  * @timeo: timeout
702*4882a593Smuzhiyun  *
703*4882a593Smuzhiyun  * Wait for command done. This is a helper function for nand_wait used when
704*4882a593Smuzhiyun  * we are in interrupt context. May happen when in panic and trying to write
705*4882a593Smuzhiyun  * an oops through mtdoops.
706*4882a593Smuzhiyun  */
panic_nand_wait(struct nand_chip * chip,unsigned long timeo)707*4882a593Smuzhiyun void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	int i;
710*4882a593Smuzhiyun 	for (i = 0; i < timeo; i++) {
711*4882a593Smuzhiyun 		if (chip->legacy.dev_ready) {
712*4882a593Smuzhiyun 			if (chip->legacy.dev_ready(chip))
713*4882a593Smuzhiyun 				break;
714*4882a593Smuzhiyun 		} else {
715*4882a593Smuzhiyun 			int ret;
716*4882a593Smuzhiyun 			u8 status;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, &status, sizeof(status),
719*4882a593Smuzhiyun 						true, false);
720*4882a593Smuzhiyun 			if (ret)
721*4882a593Smuzhiyun 				return;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 			if (status & NAND_STATUS_READY)
724*4882a593Smuzhiyun 				break;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 		mdelay(1);
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun 
nand_supports_get_features(struct nand_chip * chip,int addr)730*4882a593Smuzhiyun static bool nand_supports_get_features(struct nand_chip *chip, int addr)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	return (chip->parameters.supports_set_get_features &&
733*4882a593Smuzhiyun 		test_bit(addr, chip->parameters.get_feature_list));
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun 
nand_supports_set_features(struct nand_chip * chip,int addr)736*4882a593Smuzhiyun static bool nand_supports_set_features(struct nand_chip *chip, int addr)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun 	return (chip->parameters.supports_set_get_features &&
739*4882a593Smuzhiyun 		test_bit(addr, chip->parameters.set_feature_list));
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun /**
743*4882a593Smuzhiyun  * nand_reset_interface - Reset data interface and timings
744*4882a593Smuzhiyun  * @chip: The NAND chip
745*4882a593Smuzhiyun  * @chipnr: Internal die id
746*4882a593Smuzhiyun  *
747*4882a593Smuzhiyun  * Reset the Data interface and timings to ONFI mode 0.
748*4882a593Smuzhiyun  *
749*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
750*4882a593Smuzhiyun  */
nand_reset_interface(struct nand_chip * chip,int chipnr)751*4882a593Smuzhiyun static int nand_reset_interface(struct nand_chip *chip, int chipnr)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	const struct nand_controller_ops *ops = chip->controller->ops;
754*4882a593Smuzhiyun 	int ret;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	if (!nand_controller_can_setup_interface(chip))
757*4882a593Smuzhiyun 		return 0;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/*
760*4882a593Smuzhiyun 	 * The ONFI specification says:
761*4882a593Smuzhiyun 	 * "
762*4882a593Smuzhiyun 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
763*4882a593Smuzhiyun 	 * interface, the host shall use the Reset (FFh) command
764*4882a593Smuzhiyun 	 * using SDR timing mode 0. A device in any timing mode is
765*4882a593Smuzhiyun 	 * required to recognize Reset (FFh) command issued in SDR
766*4882a593Smuzhiyun 	 * timing mode 0.
767*4882a593Smuzhiyun 	 * "
768*4882a593Smuzhiyun 	 *
769*4882a593Smuzhiyun 	 * Configure the data interface in SDR mode and set the
770*4882a593Smuzhiyun 	 * timings to timing mode 0.
771*4882a593Smuzhiyun 	 */
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	chip->current_interface_config = nand_get_reset_interface_config();
774*4882a593Smuzhiyun 	ret = ops->setup_interface(chip, chipnr,
775*4882a593Smuzhiyun 				   chip->current_interface_config);
776*4882a593Smuzhiyun 	if (ret)
777*4882a593Smuzhiyun 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	return ret;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun /**
783*4882a593Smuzhiyun  * nand_setup_interface - Setup the best data interface and timings
784*4882a593Smuzhiyun  * @chip: The NAND chip
785*4882a593Smuzhiyun  * @chipnr: Internal die id
786*4882a593Smuzhiyun  *
787*4882a593Smuzhiyun  * Configure what has been reported to be the best data interface and NAND
788*4882a593Smuzhiyun  * timings supported by the chip and the driver.
789*4882a593Smuzhiyun  *
790*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
791*4882a593Smuzhiyun  */
nand_setup_interface(struct nand_chip * chip,int chipnr)792*4882a593Smuzhiyun static int nand_setup_interface(struct nand_chip *chip, int chipnr)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	const struct nand_controller_ops *ops = chip->controller->ops;
795*4882a593Smuzhiyun 	u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
796*4882a593Smuzhiyun 	int ret;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	if (!nand_controller_can_setup_interface(chip))
799*4882a593Smuzhiyun 		return 0;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	/*
802*4882a593Smuzhiyun 	 * A nand_reset_interface() put both the NAND chip and the NAND
803*4882a593Smuzhiyun 	 * controller in timings mode 0. If the default mode for this chip is
804*4882a593Smuzhiyun 	 * also 0, no need to proceed to the change again. Plus, at probe time,
805*4882a593Smuzhiyun 	 * nand_setup_interface() uses ->set/get_features() which would
806*4882a593Smuzhiyun 	 * fail anyway as the parameter page is not available yet.
807*4882a593Smuzhiyun 	 */
808*4882a593Smuzhiyun 	if (!chip->best_interface_config)
809*4882a593Smuzhiyun 		return 0;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	tmode_param[0] = chip->best_interface_config->timings.mode;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	/* Change the mode on the chip side (if supported by the NAND chip) */
814*4882a593Smuzhiyun 	if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
815*4882a593Smuzhiyun 		nand_select_target(chip, chipnr);
816*4882a593Smuzhiyun 		ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
817*4882a593Smuzhiyun 					tmode_param);
818*4882a593Smuzhiyun 		nand_deselect_target(chip);
819*4882a593Smuzhiyun 		if (ret)
820*4882a593Smuzhiyun 			return ret;
821*4882a593Smuzhiyun 	}
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	/* Change the mode on the controller side */
824*4882a593Smuzhiyun 	ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
825*4882a593Smuzhiyun 	if (ret)
826*4882a593Smuzhiyun 		return ret;
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	/* Check the mode has been accepted by the chip, if supported */
829*4882a593Smuzhiyun 	if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
830*4882a593Smuzhiyun 		goto update_interface_config;
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
833*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
834*4882a593Smuzhiyun 	ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
835*4882a593Smuzhiyun 				tmode_param);
836*4882a593Smuzhiyun 	nand_deselect_target(chip);
837*4882a593Smuzhiyun 	if (ret)
838*4882a593Smuzhiyun 		goto err_reset_chip;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (tmode_param[0] != chip->best_interface_config->timings.mode) {
841*4882a593Smuzhiyun 		pr_warn("timing mode %d not acknowledged by the NAND chip\n",
842*4882a593Smuzhiyun 			chip->best_interface_config->timings.mode);
843*4882a593Smuzhiyun 		goto err_reset_chip;
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun update_interface_config:
847*4882a593Smuzhiyun 	chip->current_interface_config = chip->best_interface_config;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	return 0;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun err_reset_chip:
852*4882a593Smuzhiyun 	/*
853*4882a593Smuzhiyun 	 * Fallback to mode 0 if the chip explicitly did not ack the chosen
854*4882a593Smuzhiyun 	 * timing mode.
855*4882a593Smuzhiyun 	 */
856*4882a593Smuzhiyun 	nand_reset_interface(chip, chipnr);
857*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
858*4882a593Smuzhiyun 	nand_reset_op(chip);
859*4882a593Smuzhiyun 	nand_deselect_target(chip);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return ret;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun /**
865*4882a593Smuzhiyun  * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
866*4882a593Smuzhiyun  *                                NAND controller and the NAND chip support
867*4882a593Smuzhiyun  * @chip: the NAND chip
868*4882a593Smuzhiyun  * @iface: the interface configuration (can eventually be updated)
869*4882a593Smuzhiyun  * @spec_timings: specific timings, when not fitting the ONFI specification
870*4882a593Smuzhiyun  *
871*4882a593Smuzhiyun  * If specific timings are provided, use them. Otherwise, retrieve supported
872*4882a593Smuzhiyun  * timing modes from ONFI information.
873*4882a593Smuzhiyun  */
nand_choose_best_sdr_timings(struct nand_chip * chip,struct nand_interface_config * iface,struct nand_sdr_timings * spec_timings)874*4882a593Smuzhiyun int nand_choose_best_sdr_timings(struct nand_chip *chip,
875*4882a593Smuzhiyun 				 struct nand_interface_config *iface,
876*4882a593Smuzhiyun 				 struct nand_sdr_timings *spec_timings)
877*4882a593Smuzhiyun {
878*4882a593Smuzhiyun 	const struct nand_controller_ops *ops = chip->controller->ops;
879*4882a593Smuzhiyun 	int best_mode = 0, mode, ret;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	iface->type = NAND_SDR_IFACE;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (spec_timings) {
884*4882a593Smuzhiyun 		iface->timings.sdr = *spec_timings;
885*4882a593Smuzhiyun 		iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		/* Verify the controller supports the requested interface */
888*4882a593Smuzhiyun 		ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
889*4882a593Smuzhiyun 					   iface);
890*4882a593Smuzhiyun 		if (!ret) {
891*4882a593Smuzhiyun 			chip->best_interface_config = iface;
892*4882a593Smuzhiyun 			return ret;
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 		/* Fallback to slower modes */
896*4882a593Smuzhiyun 		best_mode = iface->timings.mode;
897*4882a593Smuzhiyun 	} else if (chip->parameters.onfi) {
898*4882a593Smuzhiyun 		best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
899*4882a593Smuzhiyun 	}
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	for (mode = best_mode; mode >= 0; mode--) {
902*4882a593Smuzhiyun 		onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 		ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
905*4882a593Smuzhiyun 					   iface);
906*4882a593Smuzhiyun 		if (!ret)
907*4882a593Smuzhiyun 			break;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	chip->best_interface_config = iface;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	return 0;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun /**
916*4882a593Smuzhiyun  * nand_choose_interface_config - find the best data interface and timings
917*4882a593Smuzhiyun  * @chip: The NAND chip
918*4882a593Smuzhiyun  *
919*4882a593Smuzhiyun  * Find the best data interface and NAND timings supported by the chip
920*4882a593Smuzhiyun  * and the driver. Eventually let the NAND manufacturer driver propose his own
921*4882a593Smuzhiyun  * set of timings.
922*4882a593Smuzhiyun  *
923*4882a593Smuzhiyun  * After this function nand_chip->interface_config is initialized with the best
924*4882a593Smuzhiyun  * timing mode available.
925*4882a593Smuzhiyun  *
926*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
927*4882a593Smuzhiyun  */
nand_choose_interface_config(struct nand_chip * chip)928*4882a593Smuzhiyun static int nand_choose_interface_config(struct nand_chip *chip)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	struct nand_interface_config *iface;
931*4882a593Smuzhiyun 	int ret;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	if (!nand_controller_can_setup_interface(chip))
934*4882a593Smuzhiyun 		return 0;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	iface = kzalloc(sizeof(*iface), GFP_KERNEL);
937*4882a593Smuzhiyun 	if (!iface)
938*4882a593Smuzhiyun 		return -ENOMEM;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	if (chip->ops.choose_interface_config)
941*4882a593Smuzhiyun 		ret = chip->ops.choose_interface_config(chip, iface);
942*4882a593Smuzhiyun 	else
943*4882a593Smuzhiyun 		ret = nand_choose_best_sdr_timings(chip, iface, NULL);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	if (ret)
946*4882a593Smuzhiyun 		kfree(iface);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	return ret;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun /**
952*4882a593Smuzhiyun  * nand_fill_column_cycles - fill the column cycles of an address
953*4882a593Smuzhiyun  * @chip: The NAND chip
954*4882a593Smuzhiyun  * @addrs: Array of address cycles to fill
955*4882a593Smuzhiyun  * @offset_in_page: The offset in the page
956*4882a593Smuzhiyun  *
957*4882a593Smuzhiyun  * Fills the first or the first two bytes of the @addrs field depending
958*4882a593Smuzhiyun  * on the NAND bus width and the page size.
959*4882a593Smuzhiyun  *
960*4882a593Smuzhiyun  * Returns the number of cycles needed to encode the column, or a negative
961*4882a593Smuzhiyun  * error code in case one of the arguments is invalid.
962*4882a593Smuzhiyun  */
nand_fill_column_cycles(struct nand_chip * chip,u8 * addrs,unsigned int offset_in_page)963*4882a593Smuzhiyun static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
964*4882a593Smuzhiyun 				   unsigned int offset_in_page)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	/* Make sure the offset is less than the actual page size. */
969*4882a593Smuzhiyun 	if (offset_in_page > mtd->writesize + mtd->oobsize)
970*4882a593Smuzhiyun 		return -EINVAL;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/*
973*4882a593Smuzhiyun 	 * On small page NANDs, there's a dedicated command to access the OOB
974*4882a593Smuzhiyun 	 * area, and the column address is relative to the start of the OOB
975*4882a593Smuzhiyun 	 * area, not the start of the page. Asjust the address accordingly.
976*4882a593Smuzhiyun 	 */
977*4882a593Smuzhiyun 	if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
978*4882a593Smuzhiyun 		offset_in_page -= mtd->writesize;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	/*
981*4882a593Smuzhiyun 	 * The offset in page is expressed in bytes, if the NAND bus is 16-bit
982*4882a593Smuzhiyun 	 * wide, then it must be divided by 2.
983*4882a593Smuzhiyun 	 */
984*4882a593Smuzhiyun 	if (chip->options & NAND_BUSWIDTH_16) {
985*4882a593Smuzhiyun 		if (WARN_ON(offset_in_page % 2))
986*4882a593Smuzhiyun 			return -EINVAL;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 		offset_in_page /= 2;
989*4882a593Smuzhiyun 	}
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	addrs[0] = offset_in_page;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	/*
994*4882a593Smuzhiyun 	 * Small page NANDs use 1 cycle for the columns, while large page NANDs
995*4882a593Smuzhiyun 	 * need 2
996*4882a593Smuzhiyun 	 */
997*4882a593Smuzhiyun 	if (mtd->writesize <= 512)
998*4882a593Smuzhiyun 		return 1;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	addrs[1] = offset_in_page >> 8;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	return 2;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun 
nand_sp_exec_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1005*4882a593Smuzhiyun static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1006*4882a593Smuzhiyun 				     unsigned int offset_in_page, void *buf,
1007*4882a593Smuzhiyun 				     unsigned int len)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	const struct nand_sdr_timings *sdr =
1010*4882a593Smuzhiyun 		nand_get_sdr_timings(nand_get_interface_config(chip));
1011*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1012*4882a593Smuzhiyun 	u8 addrs[4];
1013*4882a593Smuzhiyun 	struct nand_op_instr instrs[] = {
1014*4882a593Smuzhiyun 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1015*4882a593Smuzhiyun 		NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1016*4882a593Smuzhiyun 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1017*4882a593Smuzhiyun 				 PSEC_TO_NSEC(sdr->tRR_min)),
1018*4882a593Smuzhiyun 		NAND_OP_DATA_IN(len, buf, 0),
1019*4882a593Smuzhiyun 	};
1020*4882a593Smuzhiyun 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1021*4882a593Smuzhiyun 	int ret;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	/* Drop the DATA_IN instruction if len is set to 0. */
1024*4882a593Smuzhiyun 	if (!len)
1025*4882a593Smuzhiyun 		op.ninstrs--;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	if (offset_in_page >= mtd->writesize)
1028*4882a593Smuzhiyun 		instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1029*4882a593Smuzhiyun 	else if (offset_in_page >= 256 &&
1030*4882a593Smuzhiyun 		 !(chip->options & NAND_BUSWIDTH_16))
1031*4882a593Smuzhiyun 		instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1034*4882a593Smuzhiyun 	if (ret < 0)
1035*4882a593Smuzhiyun 		return ret;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	addrs[1] = page;
1038*4882a593Smuzhiyun 	addrs[2] = page >> 8;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	if (chip->options & NAND_ROW_ADDR_3) {
1041*4882a593Smuzhiyun 		addrs[3] = page >> 16;
1042*4882a593Smuzhiyun 		instrs[1].ctx.addr.naddrs++;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	return nand_exec_op(chip, &op);
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
nand_lp_exec_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1048*4882a593Smuzhiyun static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1049*4882a593Smuzhiyun 				     unsigned int offset_in_page, void *buf,
1050*4882a593Smuzhiyun 				     unsigned int len)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	const struct nand_sdr_timings *sdr =
1053*4882a593Smuzhiyun 		nand_get_sdr_timings(nand_get_interface_config(chip));
1054*4882a593Smuzhiyun 	u8 addrs[5];
1055*4882a593Smuzhiyun 	struct nand_op_instr instrs[] = {
1056*4882a593Smuzhiyun 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1057*4882a593Smuzhiyun 		NAND_OP_ADDR(4, addrs, 0),
1058*4882a593Smuzhiyun 		NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1059*4882a593Smuzhiyun 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1060*4882a593Smuzhiyun 				 PSEC_TO_NSEC(sdr->tRR_min)),
1061*4882a593Smuzhiyun 		NAND_OP_DATA_IN(len, buf, 0),
1062*4882a593Smuzhiyun 	};
1063*4882a593Smuzhiyun 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1064*4882a593Smuzhiyun 	int ret;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	/* Drop the DATA_IN instruction if len is set to 0. */
1067*4882a593Smuzhiyun 	if (!len)
1068*4882a593Smuzhiyun 		op.ninstrs--;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1071*4882a593Smuzhiyun 	if (ret < 0)
1072*4882a593Smuzhiyun 		return ret;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	addrs[2] = page;
1075*4882a593Smuzhiyun 	addrs[3] = page >> 8;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (chip->options & NAND_ROW_ADDR_3) {
1078*4882a593Smuzhiyun 		addrs[4] = page >> 16;
1079*4882a593Smuzhiyun 		instrs[1].ctx.addr.naddrs++;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	return nand_exec_op(chip, &op);
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun /**
1086*4882a593Smuzhiyun  * nand_read_page_op - Do a READ PAGE operation
1087*4882a593Smuzhiyun  * @chip: The NAND chip
1088*4882a593Smuzhiyun  * @page: page to read
1089*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1090*4882a593Smuzhiyun  * @buf: buffer used to store the data
1091*4882a593Smuzhiyun  * @len: length of the buffer
1092*4882a593Smuzhiyun  *
1093*4882a593Smuzhiyun  * This function issues a READ PAGE operation.
1094*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1095*4882a593Smuzhiyun  *
1096*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1097*4882a593Smuzhiyun  */
nand_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1098*4882a593Smuzhiyun int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1099*4882a593Smuzhiyun 		      unsigned int offset_in_page, void *buf, unsigned int len)
1100*4882a593Smuzhiyun {
1101*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	if (len && !buf)
1104*4882a593Smuzhiyun 		return -EINVAL;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1107*4882a593Smuzhiyun 		return -EINVAL;
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1110*4882a593Smuzhiyun 		if (mtd->writesize > 512)
1111*4882a593Smuzhiyun 			return nand_lp_exec_read_page_op(chip, page,
1112*4882a593Smuzhiyun 							 offset_in_page, buf,
1113*4882a593Smuzhiyun 							 len);
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1116*4882a593Smuzhiyun 						 buf, len);
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1120*4882a593Smuzhiyun 	if (len)
1121*4882a593Smuzhiyun 		chip->legacy.read_buf(chip, buf, len);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	return 0;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_read_page_op);
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun /**
1128*4882a593Smuzhiyun  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1129*4882a593Smuzhiyun  * @chip: The NAND chip
1130*4882a593Smuzhiyun  * @page: parameter page to read
1131*4882a593Smuzhiyun  * @buf: buffer used to store the data
1132*4882a593Smuzhiyun  * @len: length of the buffer
1133*4882a593Smuzhiyun  *
1134*4882a593Smuzhiyun  * This function issues a READ PARAMETER PAGE operation.
1135*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1136*4882a593Smuzhiyun  *
1137*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1138*4882a593Smuzhiyun  */
nand_read_param_page_op(struct nand_chip * chip,u8 page,void * buf,unsigned int len)1139*4882a593Smuzhiyun int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1140*4882a593Smuzhiyun 			    unsigned int len)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	unsigned int i;
1143*4882a593Smuzhiyun 	u8 *p = buf;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	if (len && !buf)
1146*4882a593Smuzhiyun 		return -EINVAL;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1149*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1150*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1151*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1152*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_PARAM, 0),
1153*4882a593Smuzhiyun 			NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1154*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1155*4882a593Smuzhiyun 					 PSEC_TO_NSEC(sdr->tRR_min)),
1156*4882a593Smuzhiyun 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1157*4882a593Smuzhiyun 		};
1158*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 		/* Drop the DATA_IN instruction if len is set to 0. */
1161*4882a593Smuzhiyun 		if (!len)
1162*4882a593Smuzhiyun 			op.ninstrs--;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1168*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
1169*4882a593Smuzhiyun 		p[i] = chip->legacy.read_byte(chip);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	return 0;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun /**
1175*4882a593Smuzhiyun  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1176*4882a593Smuzhiyun  * @chip: The NAND chip
1177*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1178*4882a593Smuzhiyun  * @buf: buffer used to store the data
1179*4882a593Smuzhiyun  * @len: length of the buffer
1180*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1181*4882a593Smuzhiyun  *
1182*4882a593Smuzhiyun  * This function issues a CHANGE READ COLUMN operation.
1183*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1184*4882a593Smuzhiyun  *
1185*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1186*4882a593Smuzhiyun  */
nand_change_read_column_op(struct nand_chip * chip,unsigned int offset_in_page,void * buf,unsigned int len,bool force_8bit)1187*4882a593Smuzhiyun int nand_change_read_column_op(struct nand_chip *chip,
1188*4882a593Smuzhiyun 			       unsigned int offset_in_page, void *buf,
1189*4882a593Smuzhiyun 			       unsigned int len, bool force_8bit)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	if (len && !buf)
1194*4882a593Smuzhiyun 		return -EINVAL;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1197*4882a593Smuzhiyun 		return -EINVAL;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	/* Small page NANDs do not support column change. */
1200*4882a593Smuzhiyun 	if (mtd->writesize <= 512)
1201*4882a593Smuzhiyun 		return -ENOTSUPP;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1204*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1205*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1206*4882a593Smuzhiyun 		u8 addrs[2] = {};
1207*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1208*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1209*4882a593Smuzhiyun 			NAND_OP_ADDR(2, addrs, 0),
1210*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1211*4882a593Smuzhiyun 				    PSEC_TO_NSEC(sdr->tCCS_min)),
1212*4882a593Smuzhiyun 			NAND_OP_DATA_IN(len, buf, 0),
1213*4882a593Smuzhiyun 		};
1214*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1215*4882a593Smuzhiyun 		int ret;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1218*4882a593Smuzhiyun 		if (ret < 0)
1219*4882a593Smuzhiyun 			return ret;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 		/* Drop the DATA_IN instruction if len is set to 0. */
1222*4882a593Smuzhiyun 		if (!len)
1223*4882a593Smuzhiyun 			op.ninstrs--;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 		instrs[3].ctx.data.force_8bit = force_8bit;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1231*4882a593Smuzhiyun 	if (len)
1232*4882a593Smuzhiyun 		chip->legacy.read_buf(chip, buf, len);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	return 0;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun /**
1239*4882a593Smuzhiyun  * nand_read_oob_op - Do a READ OOB operation
1240*4882a593Smuzhiyun  * @chip: The NAND chip
1241*4882a593Smuzhiyun  * @page: page to read
1242*4882a593Smuzhiyun  * @offset_in_oob: offset within the OOB area
1243*4882a593Smuzhiyun  * @buf: buffer used to store the data
1244*4882a593Smuzhiyun  * @len: length of the buffer
1245*4882a593Smuzhiyun  *
1246*4882a593Smuzhiyun  * This function issues a READ OOB operation.
1247*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1248*4882a593Smuzhiyun  *
1249*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1250*4882a593Smuzhiyun  */
nand_read_oob_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_oob,void * buf,unsigned int len)1251*4882a593Smuzhiyun int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1252*4882a593Smuzhiyun 		     unsigned int offset_in_oob, void *buf, unsigned int len)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	if (len && !buf)
1257*4882a593Smuzhiyun 		return -EINVAL;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	if (offset_in_oob + len > mtd->oobsize)
1260*4882a593Smuzhiyun 		return -EINVAL;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	if (nand_has_exec_op(chip))
1263*4882a593Smuzhiyun 		return nand_read_page_op(chip, page,
1264*4882a593Smuzhiyun 					 mtd->writesize + offset_in_oob,
1265*4882a593Smuzhiyun 					 buf, len);
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1268*4882a593Smuzhiyun 	if (len)
1269*4882a593Smuzhiyun 		chip->legacy.read_buf(chip, buf, len);
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	return 0;
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_read_oob_op);
1274*4882a593Smuzhiyun 
nand_exec_prog_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len,bool prog)1275*4882a593Smuzhiyun static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1276*4882a593Smuzhiyun 				  unsigned int offset_in_page, const void *buf,
1277*4882a593Smuzhiyun 				  unsigned int len, bool prog)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun 	const struct nand_sdr_timings *sdr =
1280*4882a593Smuzhiyun 		nand_get_sdr_timings(nand_get_interface_config(chip));
1281*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1282*4882a593Smuzhiyun 	u8 addrs[5] = {};
1283*4882a593Smuzhiyun 	struct nand_op_instr instrs[] = {
1284*4882a593Smuzhiyun 		/*
1285*4882a593Smuzhiyun 		 * The first instruction will be dropped if we're dealing
1286*4882a593Smuzhiyun 		 * with a large page NAND and adjusted if we're dealing
1287*4882a593Smuzhiyun 		 * with a small page NAND and the page offset is > 255.
1288*4882a593Smuzhiyun 		 */
1289*4882a593Smuzhiyun 		NAND_OP_CMD(NAND_CMD_READ0, 0),
1290*4882a593Smuzhiyun 		NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1291*4882a593Smuzhiyun 		NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1292*4882a593Smuzhiyun 		NAND_OP_DATA_OUT(len, buf, 0),
1293*4882a593Smuzhiyun 		NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1294*4882a593Smuzhiyun 		NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1295*4882a593Smuzhiyun 	};
1296*4882a593Smuzhiyun 	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1297*4882a593Smuzhiyun 	int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1298*4882a593Smuzhiyun 	int ret;
1299*4882a593Smuzhiyun 	u8 status;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	if (naddrs < 0)
1302*4882a593Smuzhiyun 		return naddrs;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	addrs[naddrs++] = page;
1305*4882a593Smuzhiyun 	addrs[naddrs++] = page >> 8;
1306*4882a593Smuzhiyun 	if (chip->options & NAND_ROW_ADDR_3)
1307*4882a593Smuzhiyun 		addrs[naddrs++] = page >> 16;
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 	instrs[2].ctx.addr.naddrs = naddrs;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	/* Drop the last two instructions if we're not programming the page. */
1312*4882a593Smuzhiyun 	if (!prog) {
1313*4882a593Smuzhiyun 		op.ninstrs -= 2;
1314*4882a593Smuzhiyun 		/* Also drop the DATA_OUT instruction if empty. */
1315*4882a593Smuzhiyun 		if (!len)
1316*4882a593Smuzhiyun 			op.ninstrs--;
1317*4882a593Smuzhiyun 	}
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 	if (mtd->writesize <= 512) {
1320*4882a593Smuzhiyun 		/*
1321*4882a593Smuzhiyun 		 * Small pages need some more tweaking: we have to adjust the
1322*4882a593Smuzhiyun 		 * first instruction depending on the page offset we're trying
1323*4882a593Smuzhiyun 		 * to access.
1324*4882a593Smuzhiyun 		 */
1325*4882a593Smuzhiyun 		if (offset_in_page >= mtd->writesize)
1326*4882a593Smuzhiyun 			instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1327*4882a593Smuzhiyun 		else if (offset_in_page >= 256 &&
1328*4882a593Smuzhiyun 			 !(chip->options & NAND_BUSWIDTH_16))
1329*4882a593Smuzhiyun 			instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1330*4882a593Smuzhiyun 	} else {
1331*4882a593Smuzhiyun 		/*
1332*4882a593Smuzhiyun 		 * Drop the first command if we're dealing with a large page
1333*4882a593Smuzhiyun 		 * NAND.
1334*4882a593Smuzhiyun 		 */
1335*4882a593Smuzhiyun 		op.instrs++;
1336*4882a593Smuzhiyun 		op.ninstrs--;
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	ret = nand_exec_op(chip, &op);
1340*4882a593Smuzhiyun 	if (!prog || ret)
1341*4882a593Smuzhiyun 		return ret;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	ret = nand_status_op(chip, &status);
1344*4882a593Smuzhiyun 	if (ret)
1345*4882a593Smuzhiyun 		return ret;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	return status;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun /**
1351*4882a593Smuzhiyun  * nand_prog_page_begin_op - starts a PROG PAGE operation
1352*4882a593Smuzhiyun  * @chip: The NAND chip
1353*4882a593Smuzhiyun  * @page: page to write
1354*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1355*4882a593Smuzhiyun  * @buf: buffer containing the data to write to the page
1356*4882a593Smuzhiyun  * @len: length of the buffer
1357*4882a593Smuzhiyun  *
1358*4882a593Smuzhiyun  * This function issues the first half of a PROG PAGE operation.
1359*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1360*4882a593Smuzhiyun  *
1361*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1362*4882a593Smuzhiyun  */
nand_prog_page_begin_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len)1363*4882a593Smuzhiyun int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1364*4882a593Smuzhiyun 			    unsigned int offset_in_page, const void *buf,
1365*4882a593Smuzhiyun 			    unsigned int len)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	if (len && !buf)
1370*4882a593Smuzhiyun 		return -EINVAL;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1373*4882a593Smuzhiyun 		return -EINVAL;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	if (nand_has_exec_op(chip))
1376*4882a593Smuzhiyun 		return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1377*4882a593Smuzhiyun 					      len, false);
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (buf)
1382*4882a593Smuzhiyun 		chip->legacy.write_buf(chip, buf, len);
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	return 0;
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun /**
1389*4882a593Smuzhiyun  * nand_prog_page_end_op - ends a PROG PAGE operation
1390*4882a593Smuzhiyun  * @chip: The NAND chip
1391*4882a593Smuzhiyun  *
1392*4882a593Smuzhiyun  * This function issues the second half of a PROG PAGE operation.
1393*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1394*4882a593Smuzhiyun  *
1395*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1396*4882a593Smuzhiyun  */
nand_prog_page_end_op(struct nand_chip * chip)1397*4882a593Smuzhiyun int nand_prog_page_end_op(struct nand_chip *chip)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	int ret;
1400*4882a593Smuzhiyun 	u8 status;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1403*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1404*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1405*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1406*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_PAGEPROG,
1407*4882a593Smuzhiyun 				    PSEC_TO_NSEC(sdr->tWB_max)),
1408*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1409*4882a593Smuzhiyun 		};
1410*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 		ret = nand_exec_op(chip, &op);
1413*4882a593Smuzhiyun 		if (ret)
1414*4882a593Smuzhiyun 			return ret;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 		ret = nand_status_op(chip, &status);
1417*4882a593Smuzhiyun 		if (ret)
1418*4882a593Smuzhiyun 			return ret;
1419*4882a593Smuzhiyun 	} else {
1420*4882a593Smuzhiyun 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1421*4882a593Smuzhiyun 		ret = chip->legacy.waitfunc(chip);
1422*4882a593Smuzhiyun 		if (ret < 0)
1423*4882a593Smuzhiyun 			return ret;
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 		status = ret;
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1429*4882a593Smuzhiyun 		return -EIO;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	return 0;
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun /**
1436*4882a593Smuzhiyun  * nand_prog_page_op - Do a full PROG PAGE operation
1437*4882a593Smuzhiyun  * @chip: The NAND chip
1438*4882a593Smuzhiyun  * @page: page to write
1439*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1440*4882a593Smuzhiyun  * @buf: buffer containing the data to write to the page
1441*4882a593Smuzhiyun  * @len: length of the buffer
1442*4882a593Smuzhiyun  *
1443*4882a593Smuzhiyun  * This function issues a full PROG PAGE operation.
1444*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1445*4882a593Smuzhiyun  *
1446*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1447*4882a593Smuzhiyun  */
nand_prog_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len)1448*4882a593Smuzhiyun int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1449*4882a593Smuzhiyun 		      unsigned int offset_in_page, const void *buf,
1450*4882a593Smuzhiyun 		      unsigned int len)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1453*4882a593Smuzhiyun 	int status;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	if (!len || !buf)
1456*4882a593Smuzhiyun 		return -EINVAL;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1459*4882a593Smuzhiyun 		return -EINVAL;
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1462*4882a593Smuzhiyun 		status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1463*4882a593Smuzhiyun 						len, true);
1464*4882a593Smuzhiyun 	} else {
1465*4882a593Smuzhiyun 		chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1466*4882a593Smuzhiyun 				     page);
1467*4882a593Smuzhiyun 		chip->legacy.write_buf(chip, buf, len);
1468*4882a593Smuzhiyun 		chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1469*4882a593Smuzhiyun 		status = chip->legacy.waitfunc(chip);
1470*4882a593Smuzhiyun 	}
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1473*4882a593Smuzhiyun 		return -EIO;
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	return 0;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_prog_page_op);
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun /**
1480*4882a593Smuzhiyun  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1481*4882a593Smuzhiyun  * @chip: The NAND chip
1482*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1483*4882a593Smuzhiyun  * @buf: buffer containing the data to send to the NAND
1484*4882a593Smuzhiyun  * @len: length of the buffer
1485*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1486*4882a593Smuzhiyun  *
1487*4882a593Smuzhiyun  * This function issues a CHANGE WRITE COLUMN operation.
1488*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1489*4882a593Smuzhiyun  *
1490*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1491*4882a593Smuzhiyun  */
nand_change_write_column_op(struct nand_chip * chip,unsigned int offset_in_page,const void * buf,unsigned int len,bool force_8bit)1492*4882a593Smuzhiyun int nand_change_write_column_op(struct nand_chip *chip,
1493*4882a593Smuzhiyun 				unsigned int offset_in_page,
1494*4882a593Smuzhiyun 				const void *buf, unsigned int len,
1495*4882a593Smuzhiyun 				bool force_8bit)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	if (len && !buf)
1500*4882a593Smuzhiyun 		return -EINVAL;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1503*4882a593Smuzhiyun 		return -EINVAL;
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	/* Small page NANDs do not support column change. */
1506*4882a593Smuzhiyun 	if (mtd->writesize <= 512)
1507*4882a593Smuzhiyun 		return -ENOTSUPP;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1510*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1511*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1512*4882a593Smuzhiyun 		u8 addrs[2];
1513*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1514*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1515*4882a593Smuzhiyun 			NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1516*4882a593Smuzhiyun 			NAND_OP_DATA_OUT(len, buf, 0),
1517*4882a593Smuzhiyun 		};
1518*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1519*4882a593Smuzhiyun 		int ret;
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1522*4882a593Smuzhiyun 		if (ret < 0)
1523*4882a593Smuzhiyun 			return ret;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 		instrs[2].ctx.data.force_8bit = force_8bit;
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 		/* Drop the DATA_OUT instruction if len is set to 0. */
1528*4882a593Smuzhiyun 		if (!len)
1529*4882a593Smuzhiyun 			op.ninstrs--;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1532*4882a593Smuzhiyun 	}
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1535*4882a593Smuzhiyun 	if (len)
1536*4882a593Smuzhiyun 		chip->legacy.write_buf(chip, buf, len);
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	return 0;
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun /**
1543*4882a593Smuzhiyun  * nand_readid_op - Do a READID operation
1544*4882a593Smuzhiyun  * @chip: The NAND chip
1545*4882a593Smuzhiyun  * @addr: address cycle to pass after the READID command
1546*4882a593Smuzhiyun  * @buf: buffer used to store the ID
1547*4882a593Smuzhiyun  * @len: length of the buffer
1548*4882a593Smuzhiyun  *
1549*4882a593Smuzhiyun  * This function sends a READID command and reads back the ID returned by the
1550*4882a593Smuzhiyun  * NAND.
1551*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1552*4882a593Smuzhiyun  *
1553*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1554*4882a593Smuzhiyun  */
nand_readid_op(struct nand_chip * chip,u8 addr,void * buf,unsigned int len)1555*4882a593Smuzhiyun int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1556*4882a593Smuzhiyun 		   unsigned int len)
1557*4882a593Smuzhiyun {
1558*4882a593Smuzhiyun 	unsigned int i;
1559*4882a593Smuzhiyun 	u8 *id = buf;
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	if (len && !buf)
1562*4882a593Smuzhiyun 		return -EINVAL;
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1565*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1566*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1567*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1568*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_READID, 0),
1569*4882a593Smuzhiyun 			NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1570*4882a593Smuzhiyun 			NAND_OP_8BIT_DATA_IN(len, buf, 0),
1571*4882a593Smuzhiyun 		};
1572*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 		/* Drop the DATA_IN instruction if len is set to 0. */
1575*4882a593Smuzhiyun 		if (!len)
1576*4882a593Smuzhiyun 			op.ninstrs--;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1579*4882a593Smuzhiyun 	}
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
1584*4882a593Smuzhiyun 		id[i] = chip->legacy.read_byte(chip);
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	return 0;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_readid_op);
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun /**
1591*4882a593Smuzhiyun  * nand_status_op - Do a STATUS operation
1592*4882a593Smuzhiyun  * @chip: The NAND chip
1593*4882a593Smuzhiyun  * @status: out variable to store the NAND status
1594*4882a593Smuzhiyun  *
1595*4882a593Smuzhiyun  * This function sends a STATUS command and reads back the status returned by
1596*4882a593Smuzhiyun  * the NAND.
1597*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1598*4882a593Smuzhiyun  *
1599*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1600*4882a593Smuzhiyun  */
nand_status_op(struct nand_chip * chip,u8 * status)1601*4882a593Smuzhiyun int nand_status_op(struct nand_chip *chip, u8 *status)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1604*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1605*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1606*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1607*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_STATUS,
1608*4882a593Smuzhiyun 				    PSEC_TO_NSEC(sdr->tADL_min)),
1609*4882a593Smuzhiyun 			NAND_OP_8BIT_DATA_IN(1, status, 0),
1610*4882a593Smuzhiyun 		};
1611*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		if (!status)
1614*4882a593Smuzhiyun 			op.ninstrs--;
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1617*4882a593Smuzhiyun 	}
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1620*4882a593Smuzhiyun 	if (status)
1621*4882a593Smuzhiyun 		*status = chip->legacy.read_byte(chip);
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	return 0;
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_status_op);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun /**
1628*4882a593Smuzhiyun  * nand_exit_status_op - Exit a STATUS operation
1629*4882a593Smuzhiyun  * @chip: The NAND chip
1630*4882a593Smuzhiyun  *
1631*4882a593Smuzhiyun  * This function sends a READ0 command to cancel the effect of the STATUS
1632*4882a593Smuzhiyun  * command to avoid reading only the status until a new read command is sent.
1633*4882a593Smuzhiyun  *
1634*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1635*4882a593Smuzhiyun  *
1636*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1637*4882a593Smuzhiyun  */
nand_exit_status_op(struct nand_chip * chip)1638*4882a593Smuzhiyun int nand_exit_status_op(struct nand_chip *chip)
1639*4882a593Smuzhiyun {
1640*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1641*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1642*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_READ0, 0),
1643*4882a593Smuzhiyun 		};
1644*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1647*4882a593Smuzhiyun 	}
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	return 0;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun /**
1655*4882a593Smuzhiyun  * nand_erase_op - Do an erase operation
1656*4882a593Smuzhiyun  * @chip: The NAND chip
1657*4882a593Smuzhiyun  * @eraseblock: block to erase
1658*4882a593Smuzhiyun  *
1659*4882a593Smuzhiyun  * This function sends an ERASE command and waits for the NAND to be ready
1660*4882a593Smuzhiyun  * before returning.
1661*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1662*4882a593Smuzhiyun  *
1663*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1664*4882a593Smuzhiyun  */
nand_erase_op(struct nand_chip * chip,unsigned int eraseblock)1665*4882a593Smuzhiyun int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1666*4882a593Smuzhiyun {
1667*4882a593Smuzhiyun 	unsigned int page = eraseblock <<
1668*4882a593Smuzhiyun 			    (chip->phys_erase_shift - chip->page_shift);
1669*4882a593Smuzhiyun 	int ret;
1670*4882a593Smuzhiyun 	u8 status;
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1673*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1674*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1675*4882a593Smuzhiyun 		u8 addrs[3] = {	page, page >> 8, page >> 16 };
1676*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1677*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1678*4882a593Smuzhiyun 			NAND_OP_ADDR(2, addrs, 0),
1679*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_ERASE2,
1680*4882a593Smuzhiyun 				    PSEC_TO_MSEC(sdr->tWB_max)),
1681*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1682*4882a593Smuzhiyun 		};
1683*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 		if (chip->options & NAND_ROW_ADDR_3)
1686*4882a593Smuzhiyun 			instrs[1].ctx.addr.naddrs++;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 		ret = nand_exec_op(chip, &op);
1689*4882a593Smuzhiyun 		if (ret)
1690*4882a593Smuzhiyun 			return ret;
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 		ret = nand_status_op(chip, &status);
1693*4882a593Smuzhiyun 		if (ret)
1694*4882a593Smuzhiyun 			return ret;
1695*4882a593Smuzhiyun 	} else {
1696*4882a593Smuzhiyun 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1697*4882a593Smuzhiyun 		chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 		ret = chip->legacy.waitfunc(chip);
1700*4882a593Smuzhiyun 		if (ret < 0)
1701*4882a593Smuzhiyun 			return ret;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 		status = ret;
1704*4882a593Smuzhiyun 	}
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1707*4882a593Smuzhiyun 		return -EIO;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	return 0;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_erase_op);
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun /**
1714*4882a593Smuzhiyun  * nand_set_features_op - Do a SET FEATURES operation
1715*4882a593Smuzhiyun  * @chip: The NAND chip
1716*4882a593Smuzhiyun  * @feature: feature id
1717*4882a593Smuzhiyun  * @data: 4 bytes of data
1718*4882a593Smuzhiyun  *
1719*4882a593Smuzhiyun  * This function sends a SET FEATURES command and waits for the NAND to be
1720*4882a593Smuzhiyun  * ready before returning.
1721*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1722*4882a593Smuzhiyun  *
1723*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1724*4882a593Smuzhiyun  */
nand_set_features_op(struct nand_chip * chip,u8 feature,const void * data)1725*4882a593Smuzhiyun static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1726*4882a593Smuzhiyun 				const void *data)
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun 	const u8 *params = data;
1729*4882a593Smuzhiyun 	int i, ret;
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1732*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1733*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1734*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1735*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1736*4882a593Smuzhiyun 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1737*4882a593Smuzhiyun 			NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1738*4882a593Smuzhiyun 					      PSEC_TO_NSEC(sdr->tWB_max)),
1739*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1740*4882a593Smuzhiyun 		};
1741*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1744*4882a593Smuzhiyun 	}
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1747*4882a593Smuzhiyun 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1748*4882a593Smuzhiyun 		chip->legacy.write_byte(chip, params[i]);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	ret = chip->legacy.waitfunc(chip);
1751*4882a593Smuzhiyun 	if (ret < 0)
1752*4882a593Smuzhiyun 		return ret;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	if (ret & NAND_STATUS_FAIL)
1755*4882a593Smuzhiyun 		return -EIO;
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	return 0;
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun /**
1761*4882a593Smuzhiyun  * nand_get_features_op - Do a GET FEATURES operation
1762*4882a593Smuzhiyun  * @chip: The NAND chip
1763*4882a593Smuzhiyun  * @feature: feature id
1764*4882a593Smuzhiyun  * @data: 4 bytes of data
1765*4882a593Smuzhiyun  *
1766*4882a593Smuzhiyun  * This function sends a GET FEATURES command and waits for the NAND to be
1767*4882a593Smuzhiyun  * ready before returning.
1768*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1769*4882a593Smuzhiyun  *
1770*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1771*4882a593Smuzhiyun  */
nand_get_features_op(struct nand_chip * chip,u8 feature,void * data)1772*4882a593Smuzhiyun static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1773*4882a593Smuzhiyun 				void *data)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun 	u8 *params = data;
1776*4882a593Smuzhiyun 	int i;
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1779*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1780*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1781*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1782*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1783*4882a593Smuzhiyun 			NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1784*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1785*4882a593Smuzhiyun 					 PSEC_TO_NSEC(sdr->tRR_min)),
1786*4882a593Smuzhiyun 			NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1787*4882a593Smuzhiyun 					     data, 0),
1788*4882a593Smuzhiyun 		};
1789*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1792*4882a593Smuzhiyun 	}
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1795*4882a593Smuzhiyun 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1796*4882a593Smuzhiyun 		params[i] = chip->legacy.read_byte(chip);
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	return 0;
1799*4882a593Smuzhiyun }
1800*4882a593Smuzhiyun 
nand_wait_rdy_op(struct nand_chip * chip,unsigned int timeout_ms,unsigned int delay_ns)1801*4882a593Smuzhiyun static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1802*4882a593Smuzhiyun 			    unsigned int delay_ns)
1803*4882a593Smuzhiyun {
1804*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1805*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1806*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1807*4882a593Smuzhiyun 					 PSEC_TO_NSEC(delay_ns)),
1808*4882a593Smuzhiyun 		};
1809*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1812*4882a593Smuzhiyun 	}
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	/* Apply delay or wait for ready/busy pin */
1815*4882a593Smuzhiyun 	if (!chip->legacy.dev_ready)
1816*4882a593Smuzhiyun 		udelay(chip->legacy.chip_delay);
1817*4882a593Smuzhiyun 	else
1818*4882a593Smuzhiyun 		nand_wait_ready(chip);
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	return 0;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun /**
1824*4882a593Smuzhiyun  * nand_reset_op - Do a reset operation
1825*4882a593Smuzhiyun  * @chip: The NAND chip
1826*4882a593Smuzhiyun  *
1827*4882a593Smuzhiyun  * This function sends a RESET command and waits for the NAND to be ready
1828*4882a593Smuzhiyun  * before returning.
1829*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1830*4882a593Smuzhiyun  *
1831*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1832*4882a593Smuzhiyun  */
nand_reset_op(struct nand_chip * chip)1833*4882a593Smuzhiyun int nand_reset_op(struct nand_chip *chip)
1834*4882a593Smuzhiyun {
1835*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1836*4882a593Smuzhiyun 		const struct nand_sdr_timings *sdr =
1837*4882a593Smuzhiyun 			nand_get_sdr_timings(nand_get_interface_config(chip));
1838*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1839*4882a593Smuzhiyun 			NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1840*4882a593Smuzhiyun 			NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1841*4882a593Smuzhiyun 		};
1842*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1845*4882a593Smuzhiyun 	}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	return 0;
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_reset_op);
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun /**
1854*4882a593Smuzhiyun  * nand_read_data_op - Read data from the NAND
1855*4882a593Smuzhiyun  * @chip: The NAND chip
1856*4882a593Smuzhiyun  * @buf: buffer used to store the data
1857*4882a593Smuzhiyun  * @len: length of the buffer
1858*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1859*4882a593Smuzhiyun  * @check_only: do not actually run the command, only checks if the
1860*4882a593Smuzhiyun  *              controller driver supports it
1861*4882a593Smuzhiyun  *
1862*4882a593Smuzhiyun  * This function does a raw data read on the bus. Usually used after launching
1863*4882a593Smuzhiyun  * another NAND operation like nand_read_page_op().
1864*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1865*4882a593Smuzhiyun  *
1866*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1867*4882a593Smuzhiyun  */
nand_read_data_op(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit,bool check_only)1868*4882a593Smuzhiyun int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1869*4882a593Smuzhiyun 		      bool force_8bit, bool check_only)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	if (!len || !buf)
1872*4882a593Smuzhiyun 		return -EINVAL;
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1875*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1876*4882a593Smuzhiyun 			NAND_OP_DATA_IN(len, buf, 0),
1877*4882a593Smuzhiyun 		};
1878*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 		instrs[0].ctx.data.force_8bit = force_8bit;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 		if (check_only)
1883*4882a593Smuzhiyun 			return nand_check_op(chip, &op);
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1886*4882a593Smuzhiyun 	}
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	if (check_only)
1889*4882a593Smuzhiyun 		return 0;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	if (force_8bit) {
1892*4882a593Smuzhiyun 		u8 *p = buf;
1893*4882a593Smuzhiyun 		unsigned int i;
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
1896*4882a593Smuzhiyun 			p[i] = chip->legacy.read_byte(chip);
1897*4882a593Smuzhiyun 	} else {
1898*4882a593Smuzhiyun 		chip->legacy.read_buf(chip, buf, len);
1899*4882a593Smuzhiyun 	}
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	return 0;
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_read_data_op);
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun /**
1906*4882a593Smuzhiyun  * nand_write_data_op - Write data from the NAND
1907*4882a593Smuzhiyun  * @chip: The NAND chip
1908*4882a593Smuzhiyun  * @buf: buffer containing the data to send on the bus
1909*4882a593Smuzhiyun  * @len: length of the buffer
1910*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1911*4882a593Smuzhiyun  *
1912*4882a593Smuzhiyun  * This function does a raw data write on the bus. Usually used after launching
1913*4882a593Smuzhiyun  * another NAND operation like nand_write_page_begin_op().
1914*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1915*4882a593Smuzhiyun  *
1916*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1917*4882a593Smuzhiyun  */
nand_write_data_op(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)1918*4882a593Smuzhiyun int nand_write_data_op(struct nand_chip *chip, const void *buf,
1919*4882a593Smuzhiyun 		       unsigned int len, bool force_8bit)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	if (!len || !buf)
1922*4882a593Smuzhiyun 		return -EINVAL;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	if (nand_has_exec_op(chip)) {
1925*4882a593Smuzhiyun 		struct nand_op_instr instrs[] = {
1926*4882a593Smuzhiyun 			NAND_OP_DATA_OUT(len, buf, 0),
1927*4882a593Smuzhiyun 		};
1928*4882a593Smuzhiyun 		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 		instrs[0].ctx.data.force_8bit = force_8bit;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 		return nand_exec_op(chip, &op);
1933*4882a593Smuzhiyun 	}
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	if (force_8bit) {
1936*4882a593Smuzhiyun 		const u8 *p = buf;
1937*4882a593Smuzhiyun 		unsigned int i;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
1940*4882a593Smuzhiyun 			chip->legacy.write_byte(chip, p[i]);
1941*4882a593Smuzhiyun 	} else {
1942*4882a593Smuzhiyun 		chip->legacy.write_buf(chip, buf, len);
1943*4882a593Smuzhiyun 	}
1944*4882a593Smuzhiyun 
1945*4882a593Smuzhiyun 	return 0;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_write_data_op);
1948*4882a593Smuzhiyun 
1949*4882a593Smuzhiyun /**
1950*4882a593Smuzhiyun  * struct nand_op_parser_ctx - Context used by the parser
1951*4882a593Smuzhiyun  * @instrs: array of all the instructions that must be addressed
1952*4882a593Smuzhiyun  * @ninstrs: length of the @instrs array
1953*4882a593Smuzhiyun  * @subop: Sub-operation to be passed to the NAND controller
1954*4882a593Smuzhiyun  *
1955*4882a593Smuzhiyun  * This structure is used by the core to split NAND operations into
1956*4882a593Smuzhiyun  * sub-operations that can be handled by the NAND controller.
1957*4882a593Smuzhiyun  */
1958*4882a593Smuzhiyun struct nand_op_parser_ctx {
1959*4882a593Smuzhiyun 	const struct nand_op_instr *instrs;
1960*4882a593Smuzhiyun 	unsigned int ninstrs;
1961*4882a593Smuzhiyun 	struct nand_subop subop;
1962*4882a593Smuzhiyun };
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun /**
1965*4882a593Smuzhiyun  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1966*4882a593Smuzhiyun  * @pat: the parser pattern element that matches @instr
1967*4882a593Smuzhiyun  * @instr: pointer to the instruction to check
1968*4882a593Smuzhiyun  * @start_offset: this is an in/out parameter. If @instr has already been
1969*4882a593Smuzhiyun  *		  split, then @start_offset is the offset from which to start
1970*4882a593Smuzhiyun  *		  (either an address cycle or an offset in the data buffer).
1971*4882a593Smuzhiyun  *		  Conversely, if the function returns true (ie. instr must be
1972*4882a593Smuzhiyun  *		  split), this parameter is updated to point to the first
1973*4882a593Smuzhiyun  *		  data/address cycle that has not been taken care of.
1974*4882a593Smuzhiyun  *
1975*4882a593Smuzhiyun  * Some NAND controllers are limited and cannot send X address cycles with a
1976*4882a593Smuzhiyun  * unique operation, or cannot read/write more than Y bytes at the same time.
1977*4882a593Smuzhiyun  * In this case, split the instruction that does not fit in a single
1978*4882a593Smuzhiyun  * controller-operation into two or more chunks.
1979*4882a593Smuzhiyun  *
1980*4882a593Smuzhiyun  * Returns true if the instruction must be split, false otherwise.
1981*4882a593Smuzhiyun  * The @start_offset parameter is also updated to the offset at which the next
1982*4882a593Smuzhiyun  * bundle of instruction must start (if an address or a data instruction).
1983*4882a593Smuzhiyun  */
1984*4882a593Smuzhiyun static bool
nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem * pat,const struct nand_op_instr * instr,unsigned int * start_offset)1985*4882a593Smuzhiyun nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1986*4882a593Smuzhiyun 				const struct nand_op_instr *instr,
1987*4882a593Smuzhiyun 				unsigned int *start_offset)
1988*4882a593Smuzhiyun {
1989*4882a593Smuzhiyun 	switch (pat->type) {
1990*4882a593Smuzhiyun 	case NAND_OP_ADDR_INSTR:
1991*4882a593Smuzhiyun 		if (!pat->ctx.addr.maxcycles)
1992*4882a593Smuzhiyun 			break;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 		if (instr->ctx.addr.naddrs - *start_offset >
1995*4882a593Smuzhiyun 		    pat->ctx.addr.maxcycles) {
1996*4882a593Smuzhiyun 			*start_offset += pat->ctx.addr.maxcycles;
1997*4882a593Smuzhiyun 			return true;
1998*4882a593Smuzhiyun 		}
1999*4882a593Smuzhiyun 		break;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	case NAND_OP_DATA_IN_INSTR:
2002*4882a593Smuzhiyun 	case NAND_OP_DATA_OUT_INSTR:
2003*4882a593Smuzhiyun 		if (!pat->ctx.data.maxlen)
2004*4882a593Smuzhiyun 			break;
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 		if (instr->ctx.data.len - *start_offset >
2007*4882a593Smuzhiyun 		    pat->ctx.data.maxlen) {
2008*4882a593Smuzhiyun 			*start_offset += pat->ctx.data.maxlen;
2009*4882a593Smuzhiyun 			return true;
2010*4882a593Smuzhiyun 		}
2011*4882a593Smuzhiyun 		break;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	default:
2014*4882a593Smuzhiyun 		break;
2015*4882a593Smuzhiyun 	}
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	return false;
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun /**
2021*4882a593Smuzhiyun  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2022*4882a593Smuzhiyun  *			      remaining in the parser context
2023*4882a593Smuzhiyun  * @pat: the pattern to test
2024*4882a593Smuzhiyun  * @ctx: the parser context structure to match with the pattern @pat
2025*4882a593Smuzhiyun  *
2026*4882a593Smuzhiyun  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2027*4882a593Smuzhiyun  * Returns true if this is the case, false ortherwise. When true is returned,
2028*4882a593Smuzhiyun  * @ctx->subop is updated with the set of instructions to be passed to the
2029*4882a593Smuzhiyun  * controller driver.
2030*4882a593Smuzhiyun  */
2031*4882a593Smuzhiyun static bool
nand_op_parser_match_pat(const struct nand_op_parser_pattern * pat,struct nand_op_parser_ctx * ctx)2032*4882a593Smuzhiyun nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2033*4882a593Smuzhiyun 			 struct nand_op_parser_ctx *ctx)
2034*4882a593Smuzhiyun {
2035*4882a593Smuzhiyun 	unsigned int instr_offset = ctx->subop.first_instr_start_off;
2036*4882a593Smuzhiyun 	const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2037*4882a593Smuzhiyun 	const struct nand_op_instr *instr = ctx->subop.instrs;
2038*4882a593Smuzhiyun 	unsigned int i, ninstrs;
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2041*4882a593Smuzhiyun 		/*
2042*4882a593Smuzhiyun 		 * The pattern instruction does not match the operation
2043*4882a593Smuzhiyun 		 * instruction. If the instruction is marked optional in the
2044*4882a593Smuzhiyun 		 * pattern definition, we skip the pattern element and continue
2045*4882a593Smuzhiyun 		 * to the next one. If the element is mandatory, there's no
2046*4882a593Smuzhiyun 		 * match and we can return false directly.
2047*4882a593Smuzhiyun 		 */
2048*4882a593Smuzhiyun 		if (instr->type != pat->elems[i].type) {
2049*4882a593Smuzhiyun 			if (!pat->elems[i].optional)
2050*4882a593Smuzhiyun 				return false;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 			continue;
2053*4882a593Smuzhiyun 		}
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 		/*
2056*4882a593Smuzhiyun 		 * Now check the pattern element constraints. If the pattern is
2057*4882a593Smuzhiyun 		 * not able to handle the whole instruction in a single step,
2058*4882a593Smuzhiyun 		 * we have to split it.
2059*4882a593Smuzhiyun 		 * The last_instr_end_off value comes back updated to point to
2060*4882a593Smuzhiyun 		 * the position where we have to split the instruction (the
2061*4882a593Smuzhiyun 		 * start of the next subop chunk).
2062*4882a593Smuzhiyun 		 */
2063*4882a593Smuzhiyun 		if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2064*4882a593Smuzhiyun 						    &instr_offset)) {
2065*4882a593Smuzhiyun 			ninstrs++;
2066*4882a593Smuzhiyun 			i++;
2067*4882a593Smuzhiyun 			break;
2068*4882a593Smuzhiyun 		}
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 		instr++;
2071*4882a593Smuzhiyun 		ninstrs++;
2072*4882a593Smuzhiyun 		instr_offset = 0;
2073*4882a593Smuzhiyun 	}
2074*4882a593Smuzhiyun 
2075*4882a593Smuzhiyun 	/*
2076*4882a593Smuzhiyun 	 * This can happen if all instructions of a pattern are optional.
2077*4882a593Smuzhiyun 	 * Still, if there's not at least one instruction handled by this
2078*4882a593Smuzhiyun 	 * pattern, this is not a match, and we should try the next one (if
2079*4882a593Smuzhiyun 	 * any).
2080*4882a593Smuzhiyun 	 */
2081*4882a593Smuzhiyun 	if (!ninstrs)
2082*4882a593Smuzhiyun 		return false;
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	/*
2085*4882a593Smuzhiyun 	 * We had a match on the pattern head, but the pattern may be longer
2086*4882a593Smuzhiyun 	 * than the instructions we're asked to execute. We need to make sure
2087*4882a593Smuzhiyun 	 * there's no mandatory elements in the pattern tail.
2088*4882a593Smuzhiyun 	 */
2089*4882a593Smuzhiyun 	for (; i < pat->nelems; i++) {
2090*4882a593Smuzhiyun 		if (!pat->elems[i].optional)
2091*4882a593Smuzhiyun 			return false;
2092*4882a593Smuzhiyun 	}
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	/*
2095*4882a593Smuzhiyun 	 * We have a match: update the subop structure accordingly and return
2096*4882a593Smuzhiyun 	 * true.
2097*4882a593Smuzhiyun 	 */
2098*4882a593Smuzhiyun 	ctx->subop.ninstrs = ninstrs;
2099*4882a593Smuzhiyun 	ctx->subop.last_instr_end_off = instr_offset;
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	return true;
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
nand_op_parser_trace(const struct nand_op_parser_ctx * ctx)2105*4882a593Smuzhiyun static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2106*4882a593Smuzhiyun {
2107*4882a593Smuzhiyun 	const struct nand_op_instr *instr;
2108*4882a593Smuzhiyun 	char *prefix = "      ";
2109*4882a593Smuzhiyun 	unsigned int i;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	for (i = 0; i < ctx->ninstrs; i++) {
2114*4882a593Smuzhiyun 		instr = &ctx->instrs[i];
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 		if (instr == &ctx->subop.instrs[0])
2117*4882a593Smuzhiyun 			prefix = "    ->";
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 		nand_op_trace(prefix, instr);
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 		if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2122*4882a593Smuzhiyun 			prefix = "      ";
2123*4882a593Smuzhiyun 	}
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun #else
nand_op_parser_trace(const struct nand_op_parser_ctx * ctx)2126*4882a593Smuzhiyun static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun 	/* NOP */
2129*4882a593Smuzhiyun }
2130*4882a593Smuzhiyun #endif
2131*4882a593Smuzhiyun 
nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx * a,const struct nand_op_parser_ctx * b)2132*4882a593Smuzhiyun static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2133*4882a593Smuzhiyun 				  const struct nand_op_parser_ctx *b)
2134*4882a593Smuzhiyun {
2135*4882a593Smuzhiyun 	if (a->subop.ninstrs < b->subop.ninstrs)
2136*4882a593Smuzhiyun 		return -1;
2137*4882a593Smuzhiyun 	else if (a->subop.ninstrs > b->subop.ninstrs)
2138*4882a593Smuzhiyun 		return 1;
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2141*4882a593Smuzhiyun 		return -1;
2142*4882a593Smuzhiyun 	else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2143*4882a593Smuzhiyun 		return 1;
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 	return 0;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun /**
2149*4882a593Smuzhiyun  * nand_op_parser_exec_op - exec_op parser
2150*4882a593Smuzhiyun  * @chip: the NAND chip
2151*4882a593Smuzhiyun  * @parser: patterns description provided by the controller driver
2152*4882a593Smuzhiyun  * @op: the NAND operation to address
2153*4882a593Smuzhiyun  * @check_only: when true, the function only checks if @op can be handled but
2154*4882a593Smuzhiyun  *		does not execute the operation
2155*4882a593Smuzhiyun  *
2156*4882a593Smuzhiyun  * Helper function designed to ease integration of NAND controller drivers that
2157*4882a593Smuzhiyun  * only support a limited set of instruction sequences. The supported sequences
2158*4882a593Smuzhiyun  * are described in @parser, and the framework takes care of splitting @op into
2159*4882a593Smuzhiyun  * multiple sub-operations (if required) and pass them back to the ->exec()
2160*4882a593Smuzhiyun  * callback of the matching pattern if @check_only is set to false.
2161*4882a593Smuzhiyun  *
2162*4882a593Smuzhiyun  * NAND controller drivers should call this function from their own ->exec_op()
2163*4882a593Smuzhiyun  * implementation.
2164*4882a593Smuzhiyun  *
2165*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise. A failure can be
2166*4882a593Smuzhiyun  * caused by an unsupported operation (none of the supported patterns is able
2167*4882a593Smuzhiyun  * to handle the requested operation), or an error returned by one of the
2168*4882a593Smuzhiyun  * matching pattern->exec() hook.
2169*4882a593Smuzhiyun  */
nand_op_parser_exec_op(struct nand_chip * chip,const struct nand_op_parser * parser,const struct nand_operation * op,bool check_only)2170*4882a593Smuzhiyun int nand_op_parser_exec_op(struct nand_chip *chip,
2171*4882a593Smuzhiyun 			   const struct nand_op_parser *parser,
2172*4882a593Smuzhiyun 			   const struct nand_operation *op, bool check_only)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun 	struct nand_op_parser_ctx ctx = {
2175*4882a593Smuzhiyun 		.subop.cs = op->cs,
2176*4882a593Smuzhiyun 		.subop.instrs = op->instrs,
2177*4882a593Smuzhiyun 		.instrs = op->instrs,
2178*4882a593Smuzhiyun 		.ninstrs = op->ninstrs,
2179*4882a593Smuzhiyun 	};
2180*4882a593Smuzhiyun 	unsigned int i;
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2183*4882a593Smuzhiyun 		const struct nand_op_parser_pattern *pattern;
2184*4882a593Smuzhiyun 		struct nand_op_parser_ctx best_ctx;
2185*4882a593Smuzhiyun 		int ret, best_pattern = -1;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 		for (i = 0; i < parser->npatterns; i++) {
2188*4882a593Smuzhiyun 			struct nand_op_parser_ctx test_ctx = ctx;
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 			pattern = &parser->patterns[i];
2191*4882a593Smuzhiyun 			if (!nand_op_parser_match_pat(pattern, &test_ctx))
2192*4882a593Smuzhiyun 				continue;
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 			if (best_pattern >= 0 &&
2195*4882a593Smuzhiyun 			    nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2196*4882a593Smuzhiyun 				continue;
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 			best_pattern = i;
2199*4882a593Smuzhiyun 			best_ctx = test_ctx;
2200*4882a593Smuzhiyun 		}
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 		if (best_pattern < 0) {
2203*4882a593Smuzhiyun 			pr_debug("->exec_op() parser: pattern not found!\n");
2204*4882a593Smuzhiyun 			return -ENOTSUPP;
2205*4882a593Smuzhiyun 		}
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 		ctx = best_ctx;
2208*4882a593Smuzhiyun 		nand_op_parser_trace(&ctx);
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 		if (!check_only) {
2211*4882a593Smuzhiyun 			pattern = &parser->patterns[best_pattern];
2212*4882a593Smuzhiyun 			ret = pattern->exec(chip, &ctx.subop);
2213*4882a593Smuzhiyun 			if (ret)
2214*4882a593Smuzhiyun 				return ret;
2215*4882a593Smuzhiyun 		}
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 		/*
2218*4882a593Smuzhiyun 		 * Update the context structure by pointing to the start of the
2219*4882a593Smuzhiyun 		 * next subop.
2220*4882a593Smuzhiyun 		 */
2221*4882a593Smuzhiyun 		ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2222*4882a593Smuzhiyun 		if (ctx.subop.last_instr_end_off)
2223*4882a593Smuzhiyun 			ctx.subop.instrs -= 1;
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 		ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2226*4882a593Smuzhiyun 	}
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 	return 0;
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2231*4882a593Smuzhiyun 
nand_instr_is_data(const struct nand_op_instr * instr)2232*4882a593Smuzhiyun static bool nand_instr_is_data(const struct nand_op_instr *instr)
2233*4882a593Smuzhiyun {
2234*4882a593Smuzhiyun 	return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2235*4882a593Smuzhiyun 			 instr->type == NAND_OP_DATA_OUT_INSTR);
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun 
nand_subop_instr_is_valid(const struct nand_subop * subop,unsigned int instr_idx)2238*4882a593Smuzhiyun static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2239*4882a593Smuzhiyun 				      unsigned int instr_idx)
2240*4882a593Smuzhiyun {
2241*4882a593Smuzhiyun 	return subop && instr_idx < subop->ninstrs;
2242*4882a593Smuzhiyun }
2243*4882a593Smuzhiyun 
nand_subop_get_start_off(const struct nand_subop * subop,unsigned int instr_idx)2244*4882a593Smuzhiyun static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2245*4882a593Smuzhiyun 					     unsigned int instr_idx)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun 	if (instr_idx)
2248*4882a593Smuzhiyun 		return 0;
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	return subop->first_instr_start_off;
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun /**
2254*4882a593Smuzhiyun  * nand_subop_get_addr_start_off - Get the start offset in an address array
2255*4882a593Smuzhiyun  * @subop: The entire sub-operation
2256*4882a593Smuzhiyun  * @instr_idx: Index of the instruction inside the sub-operation
2257*4882a593Smuzhiyun  *
2258*4882a593Smuzhiyun  * During driver development, one could be tempted to directly use the
2259*4882a593Smuzhiyun  * ->addr.addrs field of address instructions. This is wrong as address
2260*4882a593Smuzhiyun  * instructions might be split.
2261*4882a593Smuzhiyun  *
2262*4882a593Smuzhiyun  * Given an address instruction, returns the offset of the first cycle to issue.
2263*4882a593Smuzhiyun  */
nand_subop_get_addr_start_off(const struct nand_subop * subop,unsigned int instr_idx)2264*4882a593Smuzhiyun unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2265*4882a593Smuzhiyun 					   unsigned int instr_idx)
2266*4882a593Smuzhiyun {
2267*4882a593Smuzhiyun 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2268*4882a593Smuzhiyun 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2269*4882a593Smuzhiyun 		return 0;
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	return nand_subop_get_start_off(subop, instr_idx);
2272*4882a593Smuzhiyun }
2273*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun /**
2276*4882a593Smuzhiyun  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2277*4882a593Smuzhiyun  * @subop: The entire sub-operation
2278*4882a593Smuzhiyun  * @instr_idx: Index of the instruction inside the sub-operation
2279*4882a593Smuzhiyun  *
2280*4882a593Smuzhiyun  * During driver development, one could be tempted to directly use the
2281*4882a593Smuzhiyun  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2282*4882a593Smuzhiyun  * might be split.
2283*4882a593Smuzhiyun  *
2284*4882a593Smuzhiyun  * Given an address instruction, returns the number of address cycle to issue.
2285*4882a593Smuzhiyun  */
nand_subop_get_num_addr_cyc(const struct nand_subop * subop,unsigned int instr_idx)2286*4882a593Smuzhiyun unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2287*4882a593Smuzhiyun 					 unsigned int instr_idx)
2288*4882a593Smuzhiyun {
2289*4882a593Smuzhiyun 	int start_off, end_off;
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2292*4882a593Smuzhiyun 		    subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2293*4882a593Smuzhiyun 		return 0;
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun 	if (instr_idx == subop->ninstrs - 1 &&
2298*4882a593Smuzhiyun 	    subop->last_instr_end_off)
2299*4882a593Smuzhiyun 		end_off = subop->last_instr_end_off;
2300*4882a593Smuzhiyun 	else
2301*4882a593Smuzhiyun 		end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	return end_off - start_off;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2306*4882a593Smuzhiyun 
2307*4882a593Smuzhiyun /**
2308*4882a593Smuzhiyun  * nand_subop_get_data_start_off - Get the start offset in a data array
2309*4882a593Smuzhiyun  * @subop: The entire sub-operation
2310*4882a593Smuzhiyun  * @instr_idx: Index of the instruction inside the sub-operation
2311*4882a593Smuzhiyun  *
2312*4882a593Smuzhiyun  * During driver development, one could be tempted to directly use the
2313*4882a593Smuzhiyun  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2314*4882a593Smuzhiyun  * instructions might be split.
2315*4882a593Smuzhiyun  *
2316*4882a593Smuzhiyun  * Given a data instruction, returns the offset to start from.
2317*4882a593Smuzhiyun  */
nand_subop_get_data_start_off(const struct nand_subop * subop,unsigned int instr_idx)2318*4882a593Smuzhiyun unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2319*4882a593Smuzhiyun 					   unsigned int instr_idx)
2320*4882a593Smuzhiyun {
2321*4882a593Smuzhiyun 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2322*4882a593Smuzhiyun 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2323*4882a593Smuzhiyun 		return 0;
2324*4882a593Smuzhiyun 
2325*4882a593Smuzhiyun 	return nand_subop_get_start_off(subop, instr_idx);
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun /**
2330*4882a593Smuzhiyun  * nand_subop_get_data_len - Get the number of bytes to retrieve
2331*4882a593Smuzhiyun  * @subop: The entire sub-operation
2332*4882a593Smuzhiyun  * @instr_idx: Index of the instruction inside the sub-operation
2333*4882a593Smuzhiyun  *
2334*4882a593Smuzhiyun  * During driver development, one could be tempted to directly use the
2335*4882a593Smuzhiyun  * ->data->len field of a data instruction. This is wrong as data instructions
2336*4882a593Smuzhiyun  * might be split.
2337*4882a593Smuzhiyun  *
2338*4882a593Smuzhiyun  * Returns the length of the chunk of data to send/receive.
2339*4882a593Smuzhiyun  */
nand_subop_get_data_len(const struct nand_subop * subop,unsigned int instr_idx)2340*4882a593Smuzhiyun unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2341*4882a593Smuzhiyun 				     unsigned int instr_idx)
2342*4882a593Smuzhiyun {
2343*4882a593Smuzhiyun 	int start_off = 0, end_off;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 	if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2346*4882a593Smuzhiyun 		    !nand_instr_is_data(&subop->instrs[instr_idx])))
2347*4882a593Smuzhiyun 		return 0;
2348*4882a593Smuzhiyun 
2349*4882a593Smuzhiyun 	start_off = nand_subop_get_data_start_off(subop, instr_idx);
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	if (instr_idx == subop->ninstrs - 1 &&
2352*4882a593Smuzhiyun 	    subop->last_instr_end_off)
2353*4882a593Smuzhiyun 		end_off = subop->last_instr_end_off;
2354*4882a593Smuzhiyun 	else
2355*4882a593Smuzhiyun 		end_off = subop->instrs[instr_idx].ctx.data.len;
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	return end_off - start_off;
2358*4882a593Smuzhiyun }
2359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun /**
2362*4882a593Smuzhiyun  * nand_reset - Reset and initialize a NAND device
2363*4882a593Smuzhiyun  * @chip: The NAND chip
2364*4882a593Smuzhiyun  * @chipnr: Internal die id
2365*4882a593Smuzhiyun  *
2366*4882a593Smuzhiyun  * Save the timings data structure, then apply SDR timings mode 0 (see
2367*4882a593Smuzhiyun  * nand_reset_interface for details), do the reset operation, and apply
2368*4882a593Smuzhiyun  * back the previous timings.
2369*4882a593Smuzhiyun  *
2370*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
2371*4882a593Smuzhiyun  */
nand_reset(struct nand_chip * chip,int chipnr)2372*4882a593Smuzhiyun int nand_reset(struct nand_chip *chip, int chipnr)
2373*4882a593Smuzhiyun {
2374*4882a593Smuzhiyun 	int ret;
2375*4882a593Smuzhiyun 
2376*4882a593Smuzhiyun 	ret = nand_reset_interface(chip, chipnr);
2377*4882a593Smuzhiyun 	if (ret)
2378*4882a593Smuzhiyun 		return ret;
2379*4882a593Smuzhiyun 
2380*4882a593Smuzhiyun 	/*
2381*4882a593Smuzhiyun 	 * The CS line has to be released before we can apply the new NAND
2382*4882a593Smuzhiyun 	 * interface settings, hence this weird nand_select_target()
2383*4882a593Smuzhiyun 	 * nand_deselect_target() dance.
2384*4882a593Smuzhiyun 	 */
2385*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
2386*4882a593Smuzhiyun 	ret = nand_reset_op(chip);
2387*4882a593Smuzhiyun 	nand_deselect_target(chip);
2388*4882a593Smuzhiyun 	if (ret)
2389*4882a593Smuzhiyun 		return ret;
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 	ret = nand_setup_interface(chip, chipnr);
2392*4882a593Smuzhiyun 	if (ret)
2393*4882a593Smuzhiyun 		return ret;
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	return 0;
2396*4882a593Smuzhiyun }
2397*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_reset);
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun /**
2400*4882a593Smuzhiyun  * nand_get_features - wrapper to perform a GET_FEATURE
2401*4882a593Smuzhiyun  * @chip: NAND chip info structure
2402*4882a593Smuzhiyun  * @addr: feature address
2403*4882a593Smuzhiyun  * @subfeature_param: the subfeature parameters, a four bytes array
2404*4882a593Smuzhiyun  *
2405*4882a593Smuzhiyun  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2406*4882a593Smuzhiyun  * operation cannot be handled.
2407*4882a593Smuzhiyun  */
nand_get_features(struct nand_chip * chip,int addr,u8 * subfeature_param)2408*4882a593Smuzhiyun int nand_get_features(struct nand_chip *chip, int addr,
2409*4882a593Smuzhiyun 		      u8 *subfeature_param)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun 	if (!nand_supports_get_features(chip, addr))
2412*4882a593Smuzhiyun 		return -ENOTSUPP;
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	if (chip->legacy.get_features)
2415*4882a593Smuzhiyun 		return chip->legacy.get_features(chip, addr, subfeature_param);
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	return nand_get_features_op(chip, addr, subfeature_param);
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun /**
2421*4882a593Smuzhiyun  * nand_set_features - wrapper to perform a SET_FEATURE
2422*4882a593Smuzhiyun  * @chip: NAND chip info structure
2423*4882a593Smuzhiyun  * @addr: feature address
2424*4882a593Smuzhiyun  * @subfeature_param: the subfeature parameters, a four bytes array
2425*4882a593Smuzhiyun  *
2426*4882a593Smuzhiyun  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2427*4882a593Smuzhiyun  * operation cannot be handled.
2428*4882a593Smuzhiyun  */
nand_set_features(struct nand_chip * chip,int addr,u8 * subfeature_param)2429*4882a593Smuzhiyun int nand_set_features(struct nand_chip *chip, int addr,
2430*4882a593Smuzhiyun 		      u8 *subfeature_param)
2431*4882a593Smuzhiyun {
2432*4882a593Smuzhiyun 	if (!nand_supports_set_features(chip, addr))
2433*4882a593Smuzhiyun 		return -ENOTSUPP;
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	if (chip->legacy.set_features)
2436*4882a593Smuzhiyun 		return chip->legacy.set_features(chip, addr, subfeature_param);
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 	return nand_set_features_op(chip, addr, subfeature_param);
2439*4882a593Smuzhiyun }
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun /**
2442*4882a593Smuzhiyun  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2443*4882a593Smuzhiyun  * @buf: buffer to test
2444*4882a593Smuzhiyun  * @len: buffer length
2445*4882a593Smuzhiyun  * @bitflips_threshold: maximum number of bitflips
2446*4882a593Smuzhiyun  *
2447*4882a593Smuzhiyun  * Check if a buffer contains only 0xff, which means the underlying region
2448*4882a593Smuzhiyun  * has been erased and is ready to be programmed.
2449*4882a593Smuzhiyun  * The bitflips_threshold specify the maximum number of bitflips before
2450*4882a593Smuzhiyun  * considering the region is not erased.
2451*4882a593Smuzhiyun  * Note: The logic of this function has been extracted from the memweight
2452*4882a593Smuzhiyun  * implementation, except that nand_check_erased_buf function exit before
2453*4882a593Smuzhiyun  * testing the whole buffer if the number of bitflips exceed the
2454*4882a593Smuzhiyun  * bitflips_threshold value.
2455*4882a593Smuzhiyun  *
2456*4882a593Smuzhiyun  * Returns a positive number of bitflips less than or equal to
2457*4882a593Smuzhiyun  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2458*4882a593Smuzhiyun  * threshold.
2459*4882a593Smuzhiyun  */
nand_check_erased_buf(void * buf,int len,int bitflips_threshold)2460*4882a593Smuzhiyun static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2461*4882a593Smuzhiyun {
2462*4882a593Smuzhiyun 	const unsigned char *bitmap = buf;
2463*4882a593Smuzhiyun 	int bitflips = 0;
2464*4882a593Smuzhiyun 	int weight;
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
2467*4882a593Smuzhiyun 	     len--, bitmap++) {
2468*4882a593Smuzhiyun 		weight = hweight8(*bitmap);
2469*4882a593Smuzhiyun 		bitflips += BITS_PER_BYTE - weight;
2470*4882a593Smuzhiyun 		if (unlikely(bitflips > bitflips_threshold))
2471*4882a593Smuzhiyun 			return -EBADMSG;
2472*4882a593Smuzhiyun 	}
2473*4882a593Smuzhiyun 
2474*4882a593Smuzhiyun 	for (; len >= sizeof(long);
2475*4882a593Smuzhiyun 	     len -= sizeof(long), bitmap += sizeof(long)) {
2476*4882a593Smuzhiyun 		unsigned long d = *((unsigned long *)bitmap);
2477*4882a593Smuzhiyun 		if (d == ~0UL)
2478*4882a593Smuzhiyun 			continue;
2479*4882a593Smuzhiyun 		weight = hweight_long(d);
2480*4882a593Smuzhiyun 		bitflips += BITS_PER_LONG - weight;
2481*4882a593Smuzhiyun 		if (unlikely(bitflips > bitflips_threshold))
2482*4882a593Smuzhiyun 			return -EBADMSG;
2483*4882a593Smuzhiyun 	}
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	for (; len > 0; len--, bitmap++) {
2486*4882a593Smuzhiyun 		weight = hweight8(*bitmap);
2487*4882a593Smuzhiyun 		bitflips += BITS_PER_BYTE - weight;
2488*4882a593Smuzhiyun 		if (unlikely(bitflips > bitflips_threshold))
2489*4882a593Smuzhiyun 			return -EBADMSG;
2490*4882a593Smuzhiyun 	}
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	return bitflips;
2493*4882a593Smuzhiyun }
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun /**
2496*4882a593Smuzhiyun  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2497*4882a593Smuzhiyun  *				 0xff data
2498*4882a593Smuzhiyun  * @data: data buffer to test
2499*4882a593Smuzhiyun  * @datalen: data length
2500*4882a593Smuzhiyun  * @ecc: ECC buffer
2501*4882a593Smuzhiyun  * @ecclen: ECC length
2502*4882a593Smuzhiyun  * @extraoob: extra OOB buffer
2503*4882a593Smuzhiyun  * @extraooblen: extra OOB length
2504*4882a593Smuzhiyun  * @bitflips_threshold: maximum number of bitflips
2505*4882a593Smuzhiyun  *
2506*4882a593Smuzhiyun  * Check if a data buffer and its associated ECC and OOB data contains only
2507*4882a593Smuzhiyun  * 0xff pattern, which means the underlying region has been erased and is
2508*4882a593Smuzhiyun  * ready to be programmed.
2509*4882a593Smuzhiyun  * The bitflips_threshold specify the maximum number of bitflips before
2510*4882a593Smuzhiyun  * considering the region as not erased.
2511*4882a593Smuzhiyun  *
2512*4882a593Smuzhiyun  * Note:
2513*4882a593Smuzhiyun  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2514*4882a593Smuzhiyun  *    different from the NAND page size. When fixing bitflips, ECC engines will
2515*4882a593Smuzhiyun  *    report the number of errors per chunk, and the NAND core infrastructure
2516*4882a593Smuzhiyun  *    expect you to return the maximum number of bitflips for the whole page.
2517*4882a593Smuzhiyun  *    This is why you should always use this function on a single chunk and
2518*4882a593Smuzhiyun  *    not on the whole page. After checking each chunk you should update your
2519*4882a593Smuzhiyun  *    max_bitflips value accordingly.
2520*4882a593Smuzhiyun  * 2/ When checking for bitflips in erased pages you should not only check
2521*4882a593Smuzhiyun  *    the payload data but also their associated ECC data, because a user might
2522*4882a593Smuzhiyun  *    have programmed almost all bits to 1 but a few. In this case, we
2523*4882a593Smuzhiyun  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2524*4882a593Smuzhiyun  *    this case.
2525*4882a593Smuzhiyun  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2526*4882a593Smuzhiyun  *    data are protected by the ECC engine.
2527*4882a593Smuzhiyun  *    It could also be used if you support subpages and want to attach some
2528*4882a593Smuzhiyun  *    extra OOB data to an ECC chunk.
2529*4882a593Smuzhiyun  *
2530*4882a593Smuzhiyun  * Returns a positive number of bitflips less than or equal to
2531*4882a593Smuzhiyun  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2532*4882a593Smuzhiyun  * threshold. In case of success, the passed buffers are filled with 0xff.
2533*4882a593Smuzhiyun  */
nand_check_erased_ecc_chunk(void * data,int datalen,void * ecc,int ecclen,void * extraoob,int extraooblen,int bitflips_threshold)2534*4882a593Smuzhiyun int nand_check_erased_ecc_chunk(void *data, int datalen,
2535*4882a593Smuzhiyun 				void *ecc, int ecclen,
2536*4882a593Smuzhiyun 				void *extraoob, int extraooblen,
2537*4882a593Smuzhiyun 				int bitflips_threshold)
2538*4882a593Smuzhiyun {
2539*4882a593Smuzhiyun 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	data_bitflips = nand_check_erased_buf(data, datalen,
2542*4882a593Smuzhiyun 					      bitflips_threshold);
2543*4882a593Smuzhiyun 	if (data_bitflips < 0)
2544*4882a593Smuzhiyun 		return data_bitflips;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	bitflips_threshold -= data_bitflips;
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2549*4882a593Smuzhiyun 	if (ecc_bitflips < 0)
2550*4882a593Smuzhiyun 		return ecc_bitflips;
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	bitflips_threshold -= ecc_bitflips;
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2555*4882a593Smuzhiyun 						  bitflips_threshold);
2556*4882a593Smuzhiyun 	if (extraoob_bitflips < 0)
2557*4882a593Smuzhiyun 		return extraoob_bitflips;
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun 	if (data_bitflips)
2560*4882a593Smuzhiyun 		memset(data, 0xff, datalen);
2561*4882a593Smuzhiyun 
2562*4882a593Smuzhiyun 	if (ecc_bitflips)
2563*4882a593Smuzhiyun 		memset(ecc, 0xff, ecclen);
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun 	if (extraoob_bitflips)
2566*4882a593Smuzhiyun 		memset(extraoob, 0xff, extraooblen);
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun /**
2573*4882a593Smuzhiyun  * nand_read_page_raw_notsupp - dummy read raw page function
2574*4882a593Smuzhiyun  * @chip: nand chip info structure
2575*4882a593Smuzhiyun  * @buf: buffer to store read data
2576*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2577*4882a593Smuzhiyun  * @page: page number to read
2578*4882a593Smuzhiyun  *
2579*4882a593Smuzhiyun  * Returns -ENOTSUPP unconditionally.
2580*4882a593Smuzhiyun  */
nand_read_page_raw_notsupp(struct nand_chip * chip,u8 * buf,int oob_required,int page)2581*4882a593Smuzhiyun int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2582*4882a593Smuzhiyun 			       int oob_required, int page)
2583*4882a593Smuzhiyun {
2584*4882a593Smuzhiyun 	return -ENOTSUPP;
2585*4882a593Smuzhiyun }
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun /**
2588*4882a593Smuzhiyun  * nand_read_page_raw - [INTERN] read raw page data without ecc
2589*4882a593Smuzhiyun  * @chip: nand chip info structure
2590*4882a593Smuzhiyun  * @buf: buffer to store read data
2591*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2592*4882a593Smuzhiyun  * @page: page number to read
2593*4882a593Smuzhiyun  *
2594*4882a593Smuzhiyun  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2595*4882a593Smuzhiyun  */
nand_read_page_raw(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2596*4882a593Smuzhiyun int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2597*4882a593Smuzhiyun 		       int page)
2598*4882a593Smuzhiyun {
2599*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2600*4882a593Smuzhiyun 	int ret;
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2603*4882a593Smuzhiyun 	if (ret)
2604*4882a593Smuzhiyun 		return ret;
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun 	if (oob_required) {
2607*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2608*4882a593Smuzhiyun 					false, false);
2609*4882a593Smuzhiyun 		if (ret)
2610*4882a593Smuzhiyun 			return ret;
2611*4882a593Smuzhiyun 	}
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 	return 0;
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun EXPORT_SYMBOL(nand_read_page_raw);
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun /**
2618*4882a593Smuzhiyun  * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2619*4882a593Smuzhiyun  * @chip: NAND chip info structure
2620*4882a593Smuzhiyun  * @buf: buffer to store read data
2621*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2622*4882a593Smuzhiyun  * @page: page number to read
2623*4882a593Smuzhiyun  *
2624*4882a593Smuzhiyun  * This is a raw page read, ie. without any error detection/correction.
2625*4882a593Smuzhiyun  * Monolithic means we are requesting all the relevant data (main plus
2626*4882a593Smuzhiyun  * eventually OOB) to be loaded in the NAND cache and sent over the
2627*4882a593Smuzhiyun  * bus (from the NAND chip to the NAND controller) in a single
2628*4882a593Smuzhiyun  * operation. This is an alternative to nand_read_page_raw(), which
2629*4882a593Smuzhiyun  * first reads the main data, and if the OOB data is requested too,
2630*4882a593Smuzhiyun  * then reads more data on the bus.
2631*4882a593Smuzhiyun  */
nand_monolithic_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)2632*4882a593Smuzhiyun int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2633*4882a593Smuzhiyun 				  int oob_required, int page)
2634*4882a593Smuzhiyun {
2635*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2636*4882a593Smuzhiyun 	unsigned int size = mtd->writesize;
2637*4882a593Smuzhiyun 	u8 *read_buf = buf;
2638*4882a593Smuzhiyun 	int ret;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	if (oob_required) {
2641*4882a593Smuzhiyun 		size += mtd->oobsize;
2642*4882a593Smuzhiyun 
2643*4882a593Smuzhiyun 		if (buf != chip->data_buf)
2644*4882a593Smuzhiyun 			read_buf = nand_get_data_buf(chip);
2645*4882a593Smuzhiyun 	}
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, 0, read_buf, size);
2648*4882a593Smuzhiyun 	if (ret)
2649*4882a593Smuzhiyun 		return ret;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	if (buf != chip->data_buf)
2652*4882a593Smuzhiyun 		memcpy(buf, read_buf, mtd->writesize);
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	return 0;
2655*4882a593Smuzhiyun }
2656*4882a593Smuzhiyun EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2657*4882a593Smuzhiyun 
2658*4882a593Smuzhiyun /**
2659*4882a593Smuzhiyun  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2660*4882a593Smuzhiyun  * @chip: nand chip info structure
2661*4882a593Smuzhiyun  * @buf: buffer to store read data
2662*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2663*4882a593Smuzhiyun  * @page: page number to read
2664*4882a593Smuzhiyun  *
2665*4882a593Smuzhiyun  * We need a special oob layout and handling even when OOB isn't used.
2666*4882a593Smuzhiyun  */
nand_read_page_raw_syndrome(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2667*4882a593Smuzhiyun static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2668*4882a593Smuzhiyun 				       int oob_required, int page)
2669*4882a593Smuzhiyun {
2670*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2671*4882a593Smuzhiyun 	int eccsize = chip->ecc.size;
2672*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2673*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
2674*4882a593Smuzhiyun 	int steps, size, ret;
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2677*4882a593Smuzhiyun 	if (ret)
2678*4882a593Smuzhiyun 		return ret;
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	for (steps = chip->ecc.steps; steps > 0; steps--) {
2681*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, buf, eccsize, false, false);
2682*4882a593Smuzhiyun 		if (ret)
2683*4882a593Smuzhiyun 			return ret;
2684*4882a593Smuzhiyun 
2685*4882a593Smuzhiyun 		buf += eccsize;
2686*4882a593Smuzhiyun 
2687*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
2688*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2689*4882a593Smuzhiyun 						false, false);
2690*4882a593Smuzhiyun 			if (ret)
2691*4882a593Smuzhiyun 				return ret;
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
2694*4882a593Smuzhiyun 		}
2695*4882a593Smuzhiyun 
2696*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2697*4882a593Smuzhiyun 		if (ret)
2698*4882a593Smuzhiyun 			return ret;
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun 		oob += eccbytes;
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
2703*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2704*4882a593Smuzhiyun 						false, false);
2705*4882a593Smuzhiyun 			if (ret)
2706*4882a593Smuzhiyun 				return ret;
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
2709*4882a593Smuzhiyun 		}
2710*4882a593Smuzhiyun 	}
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	size = mtd->oobsize - (oob - chip->oob_poi);
2713*4882a593Smuzhiyun 	if (size) {
2714*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, size, false, false);
2715*4882a593Smuzhiyun 		if (ret)
2716*4882a593Smuzhiyun 			return ret;
2717*4882a593Smuzhiyun 	}
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	return 0;
2720*4882a593Smuzhiyun }
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun /**
2723*4882a593Smuzhiyun  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2724*4882a593Smuzhiyun  * @chip: nand chip info structure
2725*4882a593Smuzhiyun  * @buf: buffer to store read data
2726*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2727*4882a593Smuzhiyun  * @page: page number to read
2728*4882a593Smuzhiyun  */
nand_read_page_swecc(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2729*4882a593Smuzhiyun static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2730*4882a593Smuzhiyun 				int oob_required, int page)
2731*4882a593Smuzhiyun {
2732*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2733*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size, ret;
2734*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2735*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2736*4882a593Smuzhiyun 	uint8_t *p = buf;
2737*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2738*4882a593Smuzhiyun 	uint8_t *ecc_code = chip->ecc.code_buf;
2739*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun 	chip->ecc.read_page_raw(chip, buf, 1, page);
2742*4882a593Smuzhiyun 
2743*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2744*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2747*4882a593Smuzhiyun 					 chip->ecc.total);
2748*4882a593Smuzhiyun 	if (ret)
2749*4882a593Smuzhiyun 		return ret;
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 	eccsteps = chip->ecc.steps;
2752*4882a593Smuzhiyun 	p = buf;
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2755*4882a593Smuzhiyun 		int stat;
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2758*4882a593Smuzhiyun 		if (stat < 0) {
2759*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
2760*4882a593Smuzhiyun 		} else {
2761*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
2762*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2763*4882a593Smuzhiyun 		}
2764*4882a593Smuzhiyun 	}
2765*4882a593Smuzhiyun 	return max_bitflips;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun /**
2769*4882a593Smuzhiyun  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2770*4882a593Smuzhiyun  * @chip: nand chip info structure
2771*4882a593Smuzhiyun  * @data_offs: offset of requested data within the page
2772*4882a593Smuzhiyun  * @readlen: data length
2773*4882a593Smuzhiyun  * @bufpoi: buffer to store read data
2774*4882a593Smuzhiyun  * @page: page number to read
2775*4882a593Smuzhiyun  */
nand_read_subpage(struct nand_chip * chip,uint32_t data_offs,uint32_t readlen,uint8_t * bufpoi,int page)2776*4882a593Smuzhiyun static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2777*4882a593Smuzhiyun 			     uint32_t readlen, uint8_t *bufpoi, int page)
2778*4882a593Smuzhiyun {
2779*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2780*4882a593Smuzhiyun 	int start_step, end_step, num_steps, ret;
2781*4882a593Smuzhiyun 	uint8_t *p;
2782*4882a593Smuzhiyun 	int data_col_addr, i, gaps = 0;
2783*4882a593Smuzhiyun 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2784*4882a593Smuzhiyun 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2785*4882a593Smuzhiyun 	int index, section = 0;
2786*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2787*4882a593Smuzhiyun 	struct mtd_oob_region oobregion = { };
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun 	/* Column address within the page aligned to ECC size (256bytes) */
2790*4882a593Smuzhiyun 	start_step = data_offs / chip->ecc.size;
2791*4882a593Smuzhiyun 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
2792*4882a593Smuzhiyun 	num_steps = end_step - start_step + 1;
2793*4882a593Smuzhiyun 	index = start_step * chip->ecc.bytes;
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	/* Data size aligned to ECC ecc.size */
2796*4882a593Smuzhiyun 	datafrag_len = num_steps * chip->ecc.size;
2797*4882a593Smuzhiyun 	eccfrag_len = num_steps * chip->ecc.bytes;
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 	data_col_addr = start_step * chip->ecc.size;
2800*4882a593Smuzhiyun 	/* If we read not a page aligned data */
2801*4882a593Smuzhiyun 	p = bufpoi + data_col_addr;
2802*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2803*4882a593Smuzhiyun 	if (ret)
2804*4882a593Smuzhiyun 		return ret;
2805*4882a593Smuzhiyun 
2806*4882a593Smuzhiyun 	/* Calculate ECC */
2807*4882a593Smuzhiyun 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2808*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 	/*
2811*4882a593Smuzhiyun 	 * The performance is faster if we position offsets according to
2812*4882a593Smuzhiyun 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2813*4882a593Smuzhiyun 	 */
2814*4882a593Smuzhiyun 	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2815*4882a593Smuzhiyun 	if (ret)
2816*4882a593Smuzhiyun 		return ret;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	if (oobregion.length < eccfrag_len)
2819*4882a593Smuzhiyun 		gaps = 1;
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	if (gaps) {
2822*4882a593Smuzhiyun 		ret = nand_change_read_column_op(chip, mtd->writesize,
2823*4882a593Smuzhiyun 						 chip->oob_poi, mtd->oobsize,
2824*4882a593Smuzhiyun 						 false);
2825*4882a593Smuzhiyun 		if (ret)
2826*4882a593Smuzhiyun 			return ret;
2827*4882a593Smuzhiyun 	} else {
2828*4882a593Smuzhiyun 		/*
2829*4882a593Smuzhiyun 		 * Send the command to read the particular ECC bytes take care
2830*4882a593Smuzhiyun 		 * about buswidth alignment in read_buf.
2831*4882a593Smuzhiyun 		 */
2832*4882a593Smuzhiyun 		aligned_pos = oobregion.offset & ~(busw - 1);
2833*4882a593Smuzhiyun 		aligned_len = eccfrag_len;
2834*4882a593Smuzhiyun 		if (oobregion.offset & (busw - 1))
2835*4882a593Smuzhiyun 			aligned_len++;
2836*4882a593Smuzhiyun 		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2837*4882a593Smuzhiyun 		    (busw - 1))
2838*4882a593Smuzhiyun 			aligned_len++;
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 		ret = nand_change_read_column_op(chip,
2841*4882a593Smuzhiyun 						 mtd->writesize + aligned_pos,
2842*4882a593Smuzhiyun 						 &chip->oob_poi[aligned_pos],
2843*4882a593Smuzhiyun 						 aligned_len, false);
2844*4882a593Smuzhiyun 		if (ret)
2845*4882a593Smuzhiyun 			return ret;
2846*4882a593Smuzhiyun 	}
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 	ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2849*4882a593Smuzhiyun 					 chip->oob_poi, index, eccfrag_len);
2850*4882a593Smuzhiyun 	if (ret)
2851*4882a593Smuzhiyun 		return ret;
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 	p = bufpoi + data_col_addr;
2854*4882a593Smuzhiyun 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2855*4882a593Smuzhiyun 		int stat;
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun 		stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2858*4882a593Smuzhiyun 					 &chip->ecc.calc_buf[i]);
2859*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
2860*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2861*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
2862*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2863*4882a593Smuzhiyun 						&chip->ecc.code_buf[i],
2864*4882a593Smuzhiyun 						chip->ecc.bytes,
2865*4882a593Smuzhiyun 						NULL, 0,
2866*4882a593Smuzhiyun 						chip->ecc.strength);
2867*4882a593Smuzhiyun 		}
2868*4882a593Smuzhiyun 
2869*4882a593Smuzhiyun 		if (stat < 0) {
2870*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
2871*4882a593Smuzhiyun 		} else {
2872*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
2873*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2874*4882a593Smuzhiyun 		}
2875*4882a593Smuzhiyun 	}
2876*4882a593Smuzhiyun 	return max_bitflips;
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun 
2879*4882a593Smuzhiyun /**
2880*4882a593Smuzhiyun  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2881*4882a593Smuzhiyun  * @chip: nand chip info structure
2882*4882a593Smuzhiyun  * @buf: buffer to store read data
2883*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2884*4882a593Smuzhiyun  * @page: page number to read
2885*4882a593Smuzhiyun  *
2886*4882a593Smuzhiyun  * Not for syndrome calculating ECC controllers which need a special oob layout.
2887*4882a593Smuzhiyun  */
nand_read_page_hwecc(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2888*4882a593Smuzhiyun static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2889*4882a593Smuzhiyun 				int oob_required, int page)
2890*4882a593Smuzhiyun {
2891*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2892*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size, ret;
2893*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2894*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2895*4882a593Smuzhiyun 	uint8_t *p = buf;
2896*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->ecc.calc_buf;
2897*4882a593Smuzhiyun 	uint8_t *ecc_code = chip->ecc.code_buf;
2898*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2899*4882a593Smuzhiyun 
2900*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2901*4882a593Smuzhiyun 	if (ret)
2902*4882a593Smuzhiyun 		return ret;
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2905*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, eccsize, false, false);
2908*4882a593Smuzhiyun 		if (ret)
2909*4882a593Smuzhiyun 			return ret;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
2912*4882a593Smuzhiyun 	}
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
2915*4882a593Smuzhiyun 				false);
2916*4882a593Smuzhiyun 	if (ret)
2917*4882a593Smuzhiyun 		return ret;
2918*4882a593Smuzhiyun 
2919*4882a593Smuzhiyun 	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2920*4882a593Smuzhiyun 					 chip->ecc.total);
2921*4882a593Smuzhiyun 	if (ret)
2922*4882a593Smuzhiyun 		return ret;
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 	eccsteps = chip->ecc.steps;
2925*4882a593Smuzhiyun 	p = buf;
2926*4882a593Smuzhiyun 
2927*4882a593Smuzhiyun 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2928*4882a593Smuzhiyun 		int stat;
2929*4882a593Smuzhiyun 
2930*4882a593Smuzhiyun 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2931*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
2932*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2933*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
2934*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2935*4882a593Smuzhiyun 						&ecc_code[i], eccbytes,
2936*4882a593Smuzhiyun 						NULL, 0,
2937*4882a593Smuzhiyun 						chip->ecc.strength);
2938*4882a593Smuzhiyun 		}
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun 		if (stat < 0) {
2941*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
2942*4882a593Smuzhiyun 		} else {
2943*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
2944*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2945*4882a593Smuzhiyun 		}
2946*4882a593Smuzhiyun 	}
2947*4882a593Smuzhiyun 	return max_bitflips;
2948*4882a593Smuzhiyun }
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun /**
2951*4882a593Smuzhiyun  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2952*4882a593Smuzhiyun  * @chip: nand chip info structure
2953*4882a593Smuzhiyun  * @buf: buffer to store read data
2954*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2955*4882a593Smuzhiyun  * @page: page number to read
2956*4882a593Smuzhiyun  *
2957*4882a593Smuzhiyun  * The hw generator calculates the error syndrome automatically. Therefore we
2958*4882a593Smuzhiyun  * need a special oob layout and handling.
2959*4882a593Smuzhiyun  */
nand_read_page_syndrome(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2960*4882a593Smuzhiyun static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2961*4882a593Smuzhiyun 				   int oob_required, int page)
2962*4882a593Smuzhiyun {
2963*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2964*4882a593Smuzhiyun 	int ret, i, eccsize = chip->ecc.size;
2965*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2966*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2967*4882a593Smuzhiyun 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2968*4882a593Smuzhiyun 	uint8_t *p = buf;
2969*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
2970*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2973*4882a593Smuzhiyun 	if (ret)
2974*4882a593Smuzhiyun 		return ret;
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2977*4882a593Smuzhiyun 		int stat;
2978*4882a593Smuzhiyun 
2979*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_READ);
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, eccsize, false, false);
2982*4882a593Smuzhiyun 		if (ret)
2983*4882a593Smuzhiyun 			return ret;
2984*4882a593Smuzhiyun 
2985*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
2986*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2987*4882a593Smuzhiyun 						false, false);
2988*4882a593Smuzhiyun 			if (ret)
2989*4882a593Smuzhiyun 				return ret;
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
2992*4882a593Smuzhiyun 		}
2993*4882a593Smuzhiyun 
2994*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_READSYN);
2995*4882a593Smuzhiyun 
2996*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2997*4882a593Smuzhiyun 		if (ret)
2998*4882a593Smuzhiyun 			return ret;
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 		stat = chip->ecc.correct(chip, p, oob, NULL);
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun 		oob += eccbytes;
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
3005*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3006*4882a593Smuzhiyun 						false, false);
3007*4882a593Smuzhiyun 			if (ret)
3008*4882a593Smuzhiyun 				return ret;
3009*4882a593Smuzhiyun 
3010*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
3011*4882a593Smuzhiyun 		}
3012*4882a593Smuzhiyun 
3013*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
3014*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3015*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
3016*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3017*4882a593Smuzhiyun 							   oob - eccpadbytes,
3018*4882a593Smuzhiyun 							   eccpadbytes,
3019*4882a593Smuzhiyun 							   NULL, 0,
3020*4882a593Smuzhiyun 							   chip->ecc.strength);
3021*4882a593Smuzhiyun 		}
3022*4882a593Smuzhiyun 
3023*4882a593Smuzhiyun 		if (stat < 0) {
3024*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
3025*4882a593Smuzhiyun 		} else {
3026*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
3027*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
3028*4882a593Smuzhiyun 		}
3029*4882a593Smuzhiyun 	}
3030*4882a593Smuzhiyun 
3031*4882a593Smuzhiyun 	/* Calculate remaining oob bytes */
3032*4882a593Smuzhiyun 	i = mtd->oobsize - (oob - chip->oob_poi);
3033*4882a593Smuzhiyun 	if (i) {
3034*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, i, false, false);
3035*4882a593Smuzhiyun 		if (ret)
3036*4882a593Smuzhiyun 			return ret;
3037*4882a593Smuzhiyun 	}
3038*4882a593Smuzhiyun 
3039*4882a593Smuzhiyun 	return max_bitflips;
3040*4882a593Smuzhiyun }
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun /**
3043*4882a593Smuzhiyun  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3044*4882a593Smuzhiyun  * @chip: NAND chip object
3045*4882a593Smuzhiyun  * @oob: oob destination address
3046*4882a593Smuzhiyun  * @ops: oob ops structure
3047*4882a593Smuzhiyun  * @len: size of oob to transfer
3048*4882a593Smuzhiyun  */
nand_transfer_oob(struct nand_chip * chip,uint8_t * oob,struct mtd_oob_ops * ops,size_t len)3049*4882a593Smuzhiyun static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3050*4882a593Smuzhiyun 				  struct mtd_oob_ops *ops, size_t len)
3051*4882a593Smuzhiyun {
3052*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3053*4882a593Smuzhiyun 	int ret;
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 	switch (ops->mode) {
3056*4882a593Smuzhiyun 
3057*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
3058*4882a593Smuzhiyun 	case MTD_OPS_RAW:
3059*4882a593Smuzhiyun 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3060*4882a593Smuzhiyun 		return oob + len;
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB:
3063*4882a593Smuzhiyun 		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3064*4882a593Smuzhiyun 						  ops->ooboffs, len);
3065*4882a593Smuzhiyun 		BUG_ON(ret);
3066*4882a593Smuzhiyun 		return oob + len;
3067*4882a593Smuzhiyun 
3068*4882a593Smuzhiyun 	default:
3069*4882a593Smuzhiyun 		BUG();
3070*4882a593Smuzhiyun 	}
3071*4882a593Smuzhiyun 	return NULL;
3072*4882a593Smuzhiyun }
3073*4882a593Smuzhiyun 
3074*4882a593Smuzhiyun /**
3075*4882a593Smuzhiyun  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3076*4882a593Smuzhiyun  * @chip: NAND chip object
3077*4882a593Smuzhiyun  * @retry_mode: the retry mode to use
3078*4882a593Smuzhiyun  *
3079*4882a593Smuzhiyun  * Some vendors supply a special command to shift the Vt threshold, to be used
3080*4882a593Smuzhiyun  * when there are too many bitflips in a page (i.e., ECC error). After setting
3081*4882a593Smuzhiyun  * a new threshold, the host should retry reading the page.
3082*4882a593Smuzhiyun  */
nand_setup_read_retry(struct nand_chip * chip,int retry_mode)3083*4882a593Smuzhiyun static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3084*4882a593Smuzhiyun {
3085*4882a593Smuzhiyun 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
3086*4882a593Smuzhiyun 
3087*4882a593Smuzhiyun 	if (retry_mode >= chip->read_retries)
3088*4882a593Smuzhiyun 		return -EINVAL;
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 	if (!chip->ops.setup_read_retry)
3091*4882a593Smuzhiyun 		return -EOPNOTSUPP;
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun 	return chip->ops.setup_read_retry(chip, retry_mode);
3094*4882a593Smuzhiyun }
3095*4882a593Smuzhiyun 
nand_wait_readrdy(struct nand_chip * chip)3096*4882a593Smuzhiyun static void nand_wait_readrdy(struct nand_chip *chip)
3097*4882a593Smuzhiyun {
3098*4882a593Smuzhiyun 	const struct nand_sdr_timings *sdr;
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	if (!(chip->options & NAND_NEED_READRDY))
3101*4882a593Smuzhiyun 		return;
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun 	sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
3104*4882a593Smuzhiyun 	WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun /**
3108*4882a593Smuzhiyun  * nand_do_read_ops - [INTERN] Read data with ECC
3109*4882a593Smuzhiyun  * @chip: NAND chip object
3110*4882a593Smuzhiyun  * @from: offset to read from
3111*4882a593Smuzhiyun  * @ops: oob ops structure
3112*4882a593Smuzhiyun  *
3113*4882a593Smuzhiyun  * Internal function. Called with chip held.
3114*4882a593Smuzhiyun  */
nand_do_read_ops(struct nand_chip * chip,loff_t from,struct mtd_oob_ops * ops)3115*4882a593Smuzhiyun static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3116*4882a593Smuzhiyun 			    struct mtd_oob_ops *ops)
3117*4882a593Smuzhiyun {
3118*4882a593Smuzhiyun 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
3119*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3120*4882a593Smuzhiyun 	int ret = 0;
3121*4882a593Smuzhiyun 	uint32_t readlen = ops->len;
3122*4882a593Smuzhiyun 	uint32_t oobreadlen = ops->ooblen;
3123*4882a593Smuzhiyun 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3124*4882a593Smuzhiyun 
3125*4882a593Smuzhiyun 	uint8_t *bufpoi, *oob, *buf;
3126*4882a593Smuzhiyun 	int use_bounce_buf;
3127*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
3128*4882a593Smuzhiyun 	int retry_mode = 0;
3129*4882a593Smuzhiyun 	bool ecc_fail = false;
3130*4882a593Smuzhiyun 
3131*4882a593Smuzhiyun 	chipnr = (int)(from >> chip->chip_shift);
3132*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun 	realpage = (int)(from >> chip->page_shift);
3135*4882a593Smuzhiyun 	page = realpage & chip->pagemask;
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	col = (int)(from & (mtd->writesize - 1));
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 	buf = ops->datbuf;
3140*4882a593Smuzhiyun 	oob = ops->oobbuf;
3141*4882a593Smuzhiyun 	oob_required = oob ? 1 : 0;
3142*4882a593Smuzhiyun 
3143*4882a593Smuzhiyun 	while (1) {
3144*4882a593Smuzhiyun 		struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun 		bytes = min(mtd->writesize - col, readlen);
3147*4882a593Smuzhiyun 		aligned = (bytes == mtd->writesize);
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 		if (!aligned)
3150*4882a593Smuzhiyun 			use_bounce_buf = 1;
3151*4882a593Smuzhiyun 		else if (chip->options & NAND_USES_DMA)
3152*4882a593Smuzhiyun 			use_bounce_buf = !virt_addr_valid(buf) ||
3153*4882a593Smuzhiyun 					 !IS_ALIGNED((unsigned long)buf,
3154*4882a593Smuzhiyun 						     chip->buf_align);
3155*4882a593Smuzhiyun 		else
3156*4882a593Smuzhiyun 			use_bounce_buf = 0;
3157*4882a593Smuzhiyun 
3158*4882a593Smuzhiyun 		/* Is the current page in the buffer? */
3159*4882a593Smuzhiyun 		if (realpage != chip->pagecache.page || oob) {
3160*4882a593Smuzhiyun 			bufpoi = use_bounce_buf ? chip->data_buf : buf;
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun 			if (use_bounce_buf && aligned)
3163*4882a593Smuzhiyun 				pr_debug("%s: using read bounce buffer for buf@%p\n",
3164*4882a593Smuzhiyun 						 __func__, buf);
3165*4882a593Smuzhiyun 
3166*4882a593Smuzhiyun read_retry:
3167*4882a593Smuzhiyun 			/*
3168*4882a593Smuzhiyun 			 * Now read the page into the buffer.  Absent an error,
3169*4882a593Smuzhiyun 			 * the read methods return max bitflips per ecc step.
3170*4882a593Smuzhiyun 			 */
3171*4882a593Smuzhiyun 			if (unlikely(ops->mode == MTD_OPS_RAW))
3172*4882a593Smuzhiyun 				ret = chip->ecc.read_page_raw(chip, bufpoi,
3173*4882a593Smuzhiyun 							      oob_required,
3174*4882a593Smuzhiyun 							      page);
3175*4882a593Smuzhiyun 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3176*4882a593Smuzhiyun 				 !oob)
3177*4882a593Smuzhiyun 				ret = chip->ecc.read_subpage(chip, col, bytes,
3178*4882a593Smuzhiyun 							     bufpoi, page);
3179*4882a593Smuzhiyun 			else
3180*4882a593Smuzhiyun 				ret = chip->ecc.read_page(chip, bufpoi,
3181*4882a593Smuzhiyun 							  oob_required, page);
3182*4882a593Smuzhiyun 			if (ret < 0) {
3183*4882a593Smuzhiyun 				if (use_bounce_buf)
3184*4882a593Smuzhiyun 					/* Invalidate page cache */
3185*4882a593Smuzhiyun 					chip->pagecache.page = -1;
3186*4882a593Smuzhiyun 				break;
3187*4882a593Smuzhiyun 			}
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun 			/*
3190*4882a593Smuzhiyun 			 * Copy back the data in the initial buffer when reading
3191*4882a593Smuzhiyun 			 * partial pages or when a bounce buffer is required.
3192*4882a593Smuzhiyun 			 */
3193*4882a593Smuzhiyun 			if (use_bounce_buf) {
3194*4882a593Smuzhiyun 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3195*4882a593Smuzhiyun 				    !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3196*4882a593Smuzhiyun 				    (ops->mode != MTD_OPS_RAW)) {
3197*4882a593Smuzhiyun 					chip->pagecache.page = realpage;
3198*4882a593Smuzhiyun 					chip->pagecache.bitflips = ret;
3199*4882a593Smuzhiyun 				} else {
3200*4882a593Smuzhiyun 					/* Invalidate page cache */
3201*4882a593Smuzhiyun 					chip->pagecache.page = -1;
3202*4882a593Smuzhiyun 				}
3203*4882a593Smuzhiyun 				memcpy(buf, bufpoi + col, bytes);
3204*4882a593Smuzhiyun 			}
3205*4882a593Smuzhiyun 
3206*4882a593Smuzhiyun 			if (unlikely(oob)) {
3207*4882a593Smuzhiyun 				int toread = min(oobreadlen, max_oobsize);
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 				if (toread) {
3210*4882a593Smuzhiyun 					oob = nand_transfer_oob(chip, oob, ops,
3211*4882a593Smuzhiyun 								toread);
3212*4882a593Smuzhiyun 					oobreadlen -= toread;
3213*4882a593Smuzhiyun 				}
3214*4882a593Smuzhiyun 			}
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 			nand_wait_readrdy(chip);
3217*4882a593Smuzhiyun 
3218*4882a593Smuzhiyun 			if (mtd->ecc_stats.failed - ecc_stats.failed) {
3219*4882a593Smuzhiyun 				if (retry_mode + 1 < chip->read_retries) {
3220*4882a593Smuzhiyun 					retry_mode++;
3221*4882a593Smuzhiyun 					ret = nand_setup_read_retry(chip,
3222*4882a593Smuzhiyun 							retry_mode);
3223*4882a593Smuzhiyun 					if (ret < 0)
3224*4882a593Smuzhiyun 						break;
3225*4882a593Smuzhiyun 
3226*4882a593Smuzhiyun 					/* Reset ecc_stats; retry */
3227*4882a593Smuzhiyun 					mtd->ecc_stats = ecc_stats;
3228*4882a593Smuzhiyun 					goto read_retry;
3229*4882a593Smuzhiyun 				} else {
3230*4882a593Smuzhiyun 					/* No more retry modes; real failure */
3231*4882a593Smuzhiyun 					ecc_fail = true;
3232*4882a593Smuzhiyun 				}
3233*4882a593Smuzhiyun 			}
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun 			buf += bytes;
3236*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
3237*4882a593Smuzhiyun 		} else {
3238*4882a593Smuzhiyun 			memcpy(buf, chip->data_buf + col, bytes);
3239*4882a593Smuzhiyun 			buf += bytes;
3240*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips,
3241*4882a593Smuzhiyun 					     chip->pagecache.bitflips);
3242*4882a593Smuzhiyun 		}
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun 		readlen -= bytes;
3245*4882a593Smuzhiyun 
3246*4882a593Smuzhiyun 		/* Reset to retry mode 0 */
3247*4882a593Smuzhiyun 		if (retry_mode) {
3248*4882a593Smuzhiyun 			ret = nand_setup_read_retry(chip, 0);
3249*4882a593Smuzhiyun 			if (ret < 0)
3250*4882a593Smuzhiyun 				break;
3251*4882a593Smuzhiyun 			retry_mode = 0;
3252*4882a593Smuzhiyun 		}
3253*4882a593Smuzhiyun 
3254*4882a593Smuzhiyun 		if (!readlen)
3255*4882a593Smuzhiyun 			break;
3256*4882a593Smuzhiyun 
3257*4882a593Smuzhiyun 		/* For subsequent reads align to page boundary */
3258*4882a593Smuzhiyun 		col = 0;
3259*4882a593Smuzhiyun 		/* Increment page address */
3260*4882a593Smuzhiyun 		realpage++;
3261*4882a593Smuzhiyun 
3262*4882a593Smuzhiyun 		page = realpage & chip->pagemask;
3263*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
3264*4882a593Smuzhiyun 		if (!page) {
3265*4882a593Smuzhiyun 			chipnr++;
3266*4882a593Smuzhiyun 			nand_deselect_target(chip);
3267*4882a593Smuzhiyun 			nand_select_target(chip, chipnr);
3268*4882a593Smuzhiyun 		}
3269*4882a593Smuzhiyun 	}
3270*4882a593Smuzhiyun 	nand_deselect_target(chip);
3271*4882a593Smuzhiyun 
3272*4882a593Smuzhiyun 	ops->retlen = ops->len - (size_t) readlen;
3273*4882a593Smuzhiyun 	if (oob)
3274*4882a593Smuzhiyun 		ops->oobretlen = ops->ooblen - oobreadlen;
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	if (ret < 0)
3277*4882a593Smuzhiyun 		return ret;
3278*4882a593Smuzhiyun 
3279*4882a593Smuzhiyun 	if (ecc_fail)
3280*4882a593Smuzhiyun 		return -EBADMSG;
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun 	return max_bitflips;
3283*4882a593Smuzhiyun }
3284*4882a593Smuzhiyun 
3285*4882a593Smuzhiyun /**
3286*4882a593Smuzhiyun  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3287*4882a593Smuzhiyun  * @chip: nand chip info structure
3288*4882a593Smuzhiyun  * @page: page number to read
3289*4882a593Smuzhiyun  */
nand_read_oob_std(struct nand_chip * chip,int page)3290*4882a593Smuzhiyun int nand_read_oob_std(struct nand_chip *chip, int page)
3291*4882a593Smuzhiyun {
3292*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3295*4882a593Smuzhiyun }
3296*4882a593Smuzhiyun EXPORT_SYMBOL(nand_read_oob_std);
3297*4882a593Smuzhiyun 
3298*4882a593Smuzhiyun /**
3299*4882a593Smuzhiyun  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3300*4882a593Smuzhiyun  *			    with syndromes
3301*4882a593Smuzhiyun  * @chip: nand chip info structure
3302*4882a593Smuzhiyun  * @page: page number to read
3303*4882a593Smuzhiyun  */
nand_read_oob_syndrome(struct nand_chip * chip,int page)3304*4882a593Smuzhiyun static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3305*4882a593Smuzhiyun {
3306*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3307*4882a593Smuzhiyun 	int length = mtd->oobsize;
3308*4882a593Smuzhiyun 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3309*4882a593Smuzhiyun 	int eccsize = chip->ecc.size;
3310*4882a593Smuzhiyun 	uint8_t *bufpoi = chip->oob_poi;
3311*4882a593Smuzhiyun 	int i, toread, sndrnd = 0, pos, ret;
3312*4882a593Smuzhiyun 
3313*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3314*4882a593Smuzhiyun 	if (ret)
3315*4882a593Smuzhiyun 		return ret;
3316*4882a593Smuzhiyun 
3317*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.steps; i++) {
3318*4882a593Smuzhiyun 		if (sndrnd) {
3319*4882a593Smuzhiyun 			int ret;
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun 			pos = eccsize + i * (eccsize + chunk);
3322*4882a593Smuzhiyun 			if (mtd->writesize > 512)
3323*4882a593Smuzhiyun 				ret = nand_change_read_column_op(chip, pos,
3324*4882a593Smuzhiyun 								 NULL, 0,
3325*4882a593Smuzhiyun 								 false);
3326*4882a593Smuzhiyun 			else
3327*4882a593Smuzhiyun 				ret = nand_read_page_op(chip, page, pos, NULL,
3328*4882a593Smuzhiyun 							0);
3329*4882a593Smuzhiyun 
3330*4882a593Smuzhiyun 			if (ret)
3331*4882a593Smuzhiyun 				return ret;
3332*4882a593Smuzhiyun 		} else
3333*4882a593Smuzhiyun 			sndrnd = 1;
3334*4882a593Smuzhiyun 		toread = min_t(int, length, chunk);
3335*4882a593Smuzhiyun 
3336*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3337*4882a593Smuzhiyun 		if (ret)
3338*4882a593Smuzhiyun 			return ret;
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 		bufpoi += toread;
3341*4882a593Smuzhiyun 		length -= toread;
3342*4882a593Smuzhiyun 	}
3343*4882a593Smuzhiyun 	if (length > 0) {
3344*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, bufpoi, length, false, false);
3345*4882a593Smuzhiyun 		if (ret)
3346*4882a593Smuzhiyun 			return ret;
3347*4882a593Smuzhiyun 	}
3348*4882a593Smuzhiyun 
3349*4882a593Smuzhiyun 	return 0;
3350*4882a593Smuzhiyun }
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun /**
3353*4882a593Smuzhiyun  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3354*4882a593Smuzhiyun  * @chip: nand chip info structure
3355*4882a593Smuzhiyun  * @page: page number to write
3356*4882a593Smuzhiyun  */
nand_write_oob_std(struct nand_chip * chip,int page)3357*4882a593Smuzhiyun int nand_write_oob_std(struct nand_chip *chip, int page)
3358*4882a593Smuzhiyun {
3359*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3362*4882a593Smuzhiyun 				 mtd->oobsize);
3363*4882a593Smuzhiyun }
3364*4882a593Smuzhiyun EXPORT_SYMBOL(nand_write_oob_std);
3365*4882a593Smuzhiyun 
3366*4882a593Smuzhiyun /**
3367*4882a593Smuzhiyun  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3368*4882a593Smuzhiyun  *			     with syndrome - only for large page flash
3369*4882a593Smuzhiyun  * @chip: nand chip info structure
3370*4882a593Smuzhiyun  * @page: page number to write
3371*4882a593Smuzhiyun  */
nand_write_oob_syndrome(struct nand_chip * chip,int page)3372*4882a593Smuzhiyun static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3373*4882a593Smuzhiyun {
3374*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3375*4882a593Smuzhiyun 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3376*4882a593Smuzhiyun 	int eccsize = chip->ecc.size, length = mtd->oobsize;
3377*4882a593Smuzhiyun 	int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3378*4882a593Smuzhiyun 	const uint8_t *bufpoi = chip->oob_poi;
3379*4882a593Smuzhiyun 
3380*4882a593Smuzhiyun 	/*
3381*4882a593Smuzhiyun 	 * data-ecc-data-ecc ... ecc-oob
3382*4882a593Smuzhiyun 	 * or
3383*4882a593Smuzhiyun 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3384*4882a593Smuzhiyun 	 */
3385*4882a593Smuzhiyun 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
3386*4882a593Smuzhiyun 		pos = steps * (eccsize + chunk);
3387*4882a593Smuzhiyun 		steps = 0;
3388*4882a593Smuzhiyun 	} else
3389*4882a593Smuzhiyun 		pos = eccsize;
3390*4882a593Smuzhiyun 
3391*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3392*4882a593Smuzhiyun 	if (ret)
3393*4882a593Smuzhiyun 		return ret;
3394*4882a593Smuzhiyun 
3395*4882a593Smuzhiyun 	for (i = 0; i < steps; i++) {
3396*4882a593Smuzhiyun 		if (sndcmd) {
3397*4882a593Smuzhiyun 			if (mtd->writesize <= 512) {
3398*4882a593Smuzhiyun 				uint32_t fill = 0xFFFFFFFF;
3399*4882a593Smuzhiyun 
3400*4882a593Smuzhiyun 				len = eccsize;
3401*4882a593Smuzhiyun 				while (len > 0) {
3402*4882a593Smuzhiyun 					int num = min_t(int, len, 4);
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun 					ret = nand_write_data_op(chip, &fill,
3405*4882a593Smuzhiyun 								 num, false);
3406*4882a593Smuzhiyun 					if (ret)
3407*4882a593Smuzhiyun 						return ret;
3408*4882a593Smuzhiyun 
3409*4882a593Smuzhiyun 					len -= num;
3410*4882a593Smuzhiyun 				}
3411*4882a593Smuzhiyun 			} else {
3412*4882a593Smuzhiyun 				pos = eccsize + i * (eccsize + chunk);
3413*4882a593Smuzhiyun 				ret = nand_change_write_column_op(chip, pos,
3414*4882a593Smuzhiyun 								  NULL, 0,
3415*4882a593Smuzhiyun 								  false);
3416*4882a593Smuzhiyun 				if (ret)
3417*4882a593Smuzhiyun 					return ret;
3418*4882a593Smuzhiyun 			}
3419*4882a593Smuzhiyun 		} else
3420*4882a593Smuzhiyun 			sndcmd = 1;
3421*4882a593Smuzhiyun 		len = min_t(int, length, chunk);
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, bufpoi, len, false);
3424*4882a593Smuzhiyun 		if (ret)
3425*4882a593Smuzhiyun 			return ret;
3426*4882a593Smuzhiyun 
3427*4882a593Smuzhiyun 		bufpoi += len;
3428*4882a593Smuzhiyun 		length -= len;
3429*4882a593Smuzhiyun 	}
3430*4882a593Smuzhiyun 	if (length > 0) {
3431*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, bufpoi, length, false);
3432*4882a593Smuzhiyun 		if (ret)
3433*4882a593Smuzhiyun 			return ret;
3434*4882a593Smuzhiyun 	}
3435*4882a593Smuzhiyun 
3436*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
3437*4882a593Smuzhiyun }
3438*4882a593Smuzhiyun 
3439*4882a593Smuzhiyun /**
3440*4882a593Smuzhiyun  * nand_do_read_oob - [INTERN] NAND read out-of-band
3441*4882a593Smuzhiyun  * @chip: NAND chip object
3442*4882a593Smuzhiyun  * @from: offset to read from
3443*4882a593Smuzhiyun  * @ops: oob operations description structure
3444*4882a593Smuzhiyun  *
3445*4882a593Smuzhiyun  * NAND read out-of-band data from the spare area.
3446*4882a593Smuzhiyun  */
nand_do_read_oob(struct nand_chip * chip,loff_t from,struct mtd_oob_ops * ops)3447*4882a593Smuzhiyun static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3448*4882a593Smuzhiyun 			    struct mtd_oob_ops *ops)
3449*4882a593Smuzhiyun {
3450*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3451*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
3452*4882a593Smuzhiyun 	int page, realpage, chipnr;
3453*4882a593Smuzhiyun 	struct mtd_ecc_stats stats;
3454*4882a593Smuzhiyun 	int readlen = ops->ooblen;
3455*4882a593Smuzhiyun 	int len;
3456*4882a593Smuzhiyun 	uint8_t *buf = ops->oobbuf;
3457*4882a593Smuzhiyun 	int ret = 0;
3458*4882a593Smuzhiyun 
3459*4882a593Smuzhiyun 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
3460*4882a593Smuzhiyun 			__func__, (unsigned long long)from, readlen);
3461*4882a593Smuzhiyun 
3462*4882a593Smuzhiyun 	stats = mtd->ecc_stats;
3463*4882a593Smuzhiyun 
3464*4882a593Smuzhiyun 	len = mtd_oobavail(mtd, ops);
3465*4882a593Smuzhiyun 
3466*4882a593Smuzhiyun 	chipnr = (int)(from >> chip->chip_shift);
3467*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun 	/* Shift to get page */
3470*4882a593Smuzhiyun 	realpage = (int)(from >> chip->page_shift);
3471*4882a593Smuzhiyun 	page = realpage & chip->pagemask;
3472*4882a593Smuzhiyun 
3473*4882a593Smuzhiyun 	while (1) {
3474*4882a593Smuzhiyun 		if (ops->mode == MTD_OPS_RAW)
3475*4882a593Smuzhiyun 			ret = chip->ecc.read_oob_raw(chip, page);
3476*4882a593Smuzhiyun 		else
3477*4882a593Smuzhiyun 			ret = chip->ecc.read_oob(chip, page);
3478*4882a593Smuzhiyun 
3479*4882a593Smuzhiyun 		if (ret < 0)
3480*4882a593Smuzhiyun 			break;
3481*4882a593Smuzhiyun 
3482*4882a593Smuzhiyun 		len = min(len, readlen);
3483*4882a593Smuzhiyun 		buf = nand_transfer_oob(chip, buf, ops, len);
3484*4882a593Smuzhiyun 
3485*4882a593Smuzhiyun 		nand_wait_readrdy(chip);
3486*4882a593Smuzhiyun 
3487*4882a593Smuzhiyun 		max_bitflips = max_t(unsigned int, max_bitflips, ret);
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 		readlen -= len;
3490*4882a593Smuzhiyun 		if (!readlen)
3491*4882a593Smuzhiyun 			break;
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 		/* Increment page address */
3494*4882a593Smuzhiyun 		realpage++;
3495*4882a593Smuzhiyun 
3496*4882a593Smuzhiyun 		page = realpage & chip->pagemask;
3497*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
3498*4882a593Smuzhiyun 		if (!page) {
3499*4882a593Smuzhiyun 			chipnr++;
3500*4882a593Smuzhiyun 			nand_deselect_target(chip);
3501*4882a593Smuzhiyun 			nand_select_target(chip, chipnr);
3502*4882a593Smuzhiyun 		}
3503*4882a593Smuzhiyun 	}
3504*4882a593Smuzhiyun 	nand_deselect_target(chip);
3505*4882a593Smuzhiyun 
3506*4882a593Smuzhiyun 	ops->oobretlen = ops->ooblen - readlen;
3507*4882a593Smuzhiyun 
3508*4882a593Smuzhiyun 	if (ret < 0)
3509*4882a593Smuzhiyun 		return ret;
3510*4882a593Smuzhiyun 
3511*4882a593Smuzhiyun 	if (mtd->ecc_stats.failed - stats.failed)
3512*4882a593Smuzhiyun 		return -EBADMSG;
3513*4882a593Smuzhiyun 
3514*4882a593Smuzhiyun 	return max_bitflips;
3515*4882a593Smuzhiyun }
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun /**
3518*4882a593Smuzhiyun  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3519*4882a593Smuzhiyun  * @mtd: MTD device structure
3520*4882a593Smuzhiyun  * @from: offset to read from
3521*4882a593Smuzhiyun  * @ops: oob operation description structure
3522*4882a593Smuzhiyun  *
3523*4882a593Smuzhiyun  * NAND read data and/or out-of-band data.
3524*4882a593Smuzhiyun  */
nand_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)3525*4882a593Smuzhiyun static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3526*4882a593Smuzhiyun 			 struct mtd_oob_ops *ops)
3527*4882a593Smuzhiyun {
3528*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3529*4882a593Smuzhiyun 	int ret;
3530*4882a593Smuzhiyun 
3531*4882a593Smuzhiyun 	ops->retlen = 0;
3532*4882a593Smuzhiyun 
3533*4882a593Smuzhiyun 	if (ops->mode != MTD_OPS_PLACE_OOB &&
3534*4882a593Smuzhiyun 	    ops->mode != MTD_OPS_AUTO_OOB &&
3535*4882a593Smuzhiyun 	    ops->mode != MTD_OPS_RAW)
3536*4882a593Smuzhiyun 		return -ENOTSUPP;
3537*4882a593Smuzhiyun 
3538*4882a593Smuzhiyun 	nand_get_device(chip);
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun 	if (!ops->datbuf)
3541*4882a593Smuzhiyun 		ret = nand_do_read_oob(chip, from, ops);
3542*4882a593Smuzhiyun 	else
3543*4882a593Smuzhiyun 		ret = nand_do_read_ops(chip, from, ops);
3544*4882a593Smuzhiyun 
3545*4882a593Smuzhiyun 	nand_release_device(chip);
3546*4882a593Smuzhiyun 	return ret;
3547*4882a593Smuzhiyun }
3548*4882a593Smuzhiyun 
3549*4882a593Smuzhiyun /**
3550*4882a593Smuzhiyun  * nand_write_page_raw_notsupp - dummy raw page write function
3551*4882a593Smuzhiyun  * @chip: nand chip info structure
3552*4882a593Smuzhiyun  * @buf: data buffer
3553*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3554*4882a593Smuzhiyun  * @page: page number to write
3555*4882a593Smuzhiyun  *
3556*4882a593Smuzhiyun  * Returns -ENOTSUPP unconditionally.
3557*4882a593Smuzhiyun  */
nand_write_page_raw_notsupp(struct nand_chip * chip,const u8 * buf,int oob_required,int page)3558*4882a593Smuzhiyun int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3559*4882a593Smuzhiyun 				int oob_required, int page)
3560*4882a593Smuzhiyun {
3561*4882a593Smuzhiyun 	return -ENOTSUPP;
3562*4882a593Smuzhiyun }
3563*4882a593Smuzhiyun 
3564*4882a593Smuzhiyun /**
3565*4882a593Smuzhiyun  * nand_write_page_raw - [INTERN] raw page write function
3566*4882a593Smuzhiyun  * @chip: nand chip info structure
3567*4882a593Smuzhiyun  * @buf: data buffer
3568*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3569*4882a593Smuzhiyun  * @page: page number to write
3570*4882a593Smuzhiyun  *
3571*4882a593Smuzhiyun  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3572*4882a593Smuzhiyun  */
nand_write_page_raw(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3573*4882a593Smuzhiyun int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3574*4882a593Smuzhiyun 			int oob_required, int page)
3575*4882a593Smuzhiyun {
3576*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3577*4882a593Smuzhiyun 	int ret;
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3580*4882a593Smuzhiyun 	if (ret)
3581*4882a593Smuzhiyun 		return ret;
3582*4882a593Smuzhiyun 
3583*4882a593Smuzhiyun 	if (oob_required) {
3584*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3585*4882a593Smuzhiyun 					 false);
3586*4882a593Smuzhiyun 		if (ret)
3587*4882a593Smuzhiyun 			return ret;
3588*4882a593Smuzhiyun 	}
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
3591*4882a593Smuzhiyun }
3592*4882a593Smuzhiyun EXPORT_SYMBOL(nand_write_page_raw);
3593*4882a593Smuzhiyun 
3594*4882a593Smuzhiyun /**
3595*4882a593Smuzhiyun  * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3596*4882a593Smuzhiyun  * @chip: NAND chip info structure
3597*4882a593Smuzhiyun  * @buf: data buffer to write
3598*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3599*4882a593Smuzhiyun  * @page: page number to write
3600*4882a593Smuzhiyun  *
3601*4882a593Smuzhiyun  * This is a raw page write, ie. without any error detection/correction.
3602*4882a593Smuzhiyun  * Monolithic means we are requesting all the relevant data (main plus
3603*4882a593Smuzhiyun  * eventually OOB) to be sent over the bus and effectively programmed
3604*4882a593Smuzhiyun  * into the NAND chip arrays in a single operation. This is an
3605*4882a593Smuzhiyun  * alternative to nand_write_page_raw(), which first sends the main
3606*4882a593Smuzhiyun  * data, then eventually send the OOB data by latching more data
3607*4882a593Smuzhiyun  * cycles on the NAND bus, and finally sends the program command to
3608*4882a593Smuzhiyun  * synchronyze the NAND chip cache.
3609*4882a593Smuzhiyun  */
nand_monolithic_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)3610*4882a593Smuzhiyun int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3611*4882a593Smuzhiyun 				   int oob_required, int page)
3612*4882a593Smuzhiyun {
3613*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3614*4882a593Smuzhiyun 	unsigned int size = mtd->writesize;
3615*4882a593Smuzhiyun 	u8 *write_buf = (u8 *)buf;
3616*4882a593Smuzhiyun 
3617*4882a593Smuzhiyun 	if (oob_required) {
3618*4882a593Smuzhiyun 		size += mtd->oobsize;
3619*4882a593Smuzhiyun 
3620*4882a593Smuzhiyun 		if (buf != chip->data_buf) {
3621*4882a593Smuzhiyun 			write_buf = nand_get_data_buf(chip);
3622*4882a593Smuzhiyun 			memcpy(write_buf, buf, mtd->writesize);
3623*4882a593Smuzhiyun 		}
3624*4882a593Smuzhiyun 	}
3625*4882a593Smuzhiyun 
3626*4882a593Smuzhiyun 	return nand_prog_page_op(chip, page, 0, write_buf, size);
3627*4882a593Smuzhiyun }
3628*4882a593Smuzhiyun EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun /**
3631*4882a593Smuzhiyun  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3632*4882a593Smuzhiyun  * @chip: nand chip info structure
3633*4882a593Smuzhiyun  * @buf: data buffer
3634*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3635*4882a593Smuzhiyun  * @page: page number to write
3636*4882a593Smuzhiyun  *
3637*4882a593Smuzhiyun  * We need a special oob layout and handling even when ECC isn't checked.
3638*4882a593Smuzhiyun  */
nand_write_page_raw_syndrome(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3639*4882a593Smuzhiyun static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3640*4882a593Smuzhiyun 					const uint8_t *buf, int oob_required,
3641*4882a593Smuzhiyun 					int page)
3642*4882a593Smuzhiyun {
3643*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3644*4882a593Smuzhiyun 	int eccsize = chip->ecc.size;
3645*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
3646*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
3647*4882a593Smuzhiyun 	int steps, size, ret;
3648*4882a593Smuzhiyun 
3649*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3650*4882a593Smuzhiyun 	if (ret)
3651*4882a593Smuzhiyun 		return ret;
3652*4882a593Smuzhiyun 
3653*4882a593Smuzhiyun 	for (steps = chip->ecc.steps; steps > 0; steps--) {
3654*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, buf, eccsize, false);
3655*4882a593Smuzhiyun 		if (ret)
3656*4882a593Smuzhiyun 			return ret;
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun 		buf += eccsize;
3659*4882a593Smuzhiyun 
3660*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
3661*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3662*4882a593Smuzhiyun 						 false);
3663*4882a593Smuzhiyun 			if (ret)
3664*4882a593Smuzhiyun 				return ret;
3665*4882a593Smuzhiyun 
3666*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
3667*4882a593Smuzhiyun 		}
3668*4882a593Smuzhiyun 
3669*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3670*4882a593Smuzhiyun 		if (ret)
3671*4882a593Smuzhiyun 			return ret;
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 		oob += eccbytes;
3674*4882a593Smuzhiyun 
3675*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
3676*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3677*4882a593Smuzhiyun 						 false);
3678*4882a593Smuzhiyun 			if (ret)
3679*4882a593Smuzhiyun 				return ret;
3680*4882a593Smuzhiyun 
3681*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
3682*4882a593Smuzhiyun 		}
3683*4882a593Smuzhiyun 	}
3684*4882a593Smuzhiyun 
3685*4882a593Smuzhiyun 	size = mtd->oobsize - (oob - chip->oob_poi);
3686*4882a593Smuzhiyun 	if (size) {
3687*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, size, false);
3688*4882a593Smuzhiyun 		if (ret)
3689*4882a593Smuzhiyun 			return ret;
3690*4882a593Smuzhiyun 	}
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
3693*4882a593Smuzhiyun }
3694*4882a593Smuzhiyun /**
3695*4882a593Smuzhiyun  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3696*4882a593Smuzhiyun  * @chip: nand chip info structure
3697*4882a593Smuzhiyun  * @buf: data buffer
3698*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3699*4882a593Smuzhiyun  * @page: page number to write
3700*4882a593Smuzhiyun  */
nand_write_page_swecc(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3701*4882a593Smuzhiyun static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3702*4882a593Smuzhiyun 				 int oob_required, int page)
3703*4882a593Smuzhiyun {
3704*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3705*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size, ret;
3706*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
3707*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
3708*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3709*4882a593Smuzhiyun 	const uint8_t *p = buf;
3710*4882a593Smuzhiyun 
3711*4882a593Smuzhiyun 	/* Software ECC calculation */
3712*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3713*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3714*4882a593Smuzhiyun 
3715*4882a593Smuzhiyun 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3716*4882a593Smuzhiyun 					 chip->ecc.total);
3717*4882a593Smuzhiyun 	if (ret)
3718*4882a593Smuzhiyun 		return ret;
3719*4882a593Smuzhiyun 
3720*4882a593Smuzhiyun 	return chip->ecc.write_page_raw(chip, buf, 1, page);
3721*4882a593Smuzhiyun }
3722*4882a593Smuzhiyun 
3723*4882a593Smuzhiyun /**
3724*4882a593Smuzhiyun  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3725*4882a593Smuzhiyun  * @chip: nand chip info structure
3726*4882a593Smuzhiyun  * @buf: data buffer
3727*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3728*4882a593Smuzhiyun  * @page: page number to write
3729*4882a593Smuzhiyun  */
nand_write_page_hwecc(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3730*4882a593Smuzhiyun static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3731*4882a593Smuzhiyun 				 int oob_required, int page)
3732*4882a593Smuzhiyun {
3733*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3734*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size, ret;
3735*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
3736*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
3737*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3738*4882a593Smuzhiyun 	const uint8_t *p = buf;
3739*4882a593Smuzhiyun 
3740*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3741*4882a593Smuzhiyun 	if (ret)
3742*4882a593Smuzhiyun 		return ret;
3743*4882a593Smuzhiyun 
3744*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3745*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3746*4882a593Smuzhiyun 
3747*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, p, eccsize, false);
3748*4882a593Smuzhiyun 		if (ret)
3749*4882a593Smuzhiyun 			return ret;
3750*4882a593Smuzhiyun 
3751*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
3752*4882a593Smuzhiyun 	}
3753*4882a593Smuzhiyun 
3754*4882a593Smuzhiyun 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3755*4882a593Smuzhiyun 					 chip->ecc.total);
3756*4882a593Smuzhiyun 	if (ret)
3757*4882a593Smuzhiyun 		return ret;
3758*4882a593Smuzhiyun 
3759*4882a593Smuzhiyun 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3760*4882a593Smuzhiyun 	if (ret)
3761*4882a593Smuzhiyun 		return ret;
3762*4882a593Smuzhiyun 
3763*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
3764*4882a593Smuzhiyun }
3765*4882a593Smuzhiyun 
3766*4882a593Smuzhiyun 
3767*4882a593Smuzhiyun /**
3768*4882a593Smuzhiyun  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3769*4882a593Smuzhiyun  * @chip:	nand chip info structure
3770*4882a593Smuzhiyun  * @offset:	column address of subpage within the page
3771*4882a593Smuzhiyun  * @data_len:	data length
3772*4882a593Smuzhiyun  * @buf:	data buffer
3773*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3774*4882a593Smuzhiyun  * @page: page number to write
3775*4882a593Smuzhiyun  */
nand_write_subpage_hwecc(struct nand_chip * chip,uint32_t offset,uint32_t data_len,const uint8_t * buf,int oob_required,int page)3776*4882a593Smuzhiyun static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3777*4882a593Smuzhiyun 				    uint32_t data_len, const uint8_t *buf,
3778*4882a593Smuzhiyun 				    int oob_required, int page)
3779*4882a593Smuzhiyun {
3780*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3781*4882a593Smuzhiyun 	uint8_t *oob_buf  = chip->oob_poi;
3782*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->ecc.calc_buf;
3783*4882a593Smuzhiyun 	int ecc_size      = chip->ecc.size;
3784*4882a593Smuzhiyun 	int ecc_bytes     = chip->ecc.bytes;
3785*4882a593Smuzhiyun 	int ecc_steps     = chip->ecc.steps;
3786*4882a593Smuzhiyun 	uint32_t start_step = offset / ecc_size;
3787*4882a593Smuzhiyun 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3788*4882a593Smuzhiyun 	int oob_bytes       = mtd->oobsize / ecc_steps;
3789*4882a593Smuzhiyun 	int step, ret;
3790*4882a593Smuzhiyun 
3791*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3792*4882a593Smuzhiyun 	if (ret)
3793*4882a593Smuzhiyun 		return ret;
3794*4882a593Smuzhiyun 
3795*4882a593Smuzhiyun 	for (step = 0; step < ecc_steps; step++) {
3796*4882a593Smuzhiyun 		/* configure controller for WRITE access */
3797*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3798*4882a593Smuzhiyun 
3799*4882a593Smuzhiyun 		/* write data (untouched subpages already masked by 0xFF) */
3800*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, buf, ecc_size, false);
3801*4882a593Smuzhiyun 		if (ret)
3802*4882a593Smuzhiyun 			return ret;
3803*4882a593Smuzhiyun 
3804*4882a593Smuzhiyun 		/* mask ECC of un-touched subpages by padding 0xFF */
3805*4882a593Smuzhiyun 		if ((step < start_step) || (step > end_step))
3806*4882a593Smuzhiyun 			memset(ecc_calc, 0xff, ecc_bytes);
3807*4882a593Smuzhiyun 		else
3808*4882a593Smuzhiyun 			chip->ecc.calculate(chip, buf, ecc_calc);
3809*4882a593Smuzhiyun 
3810*4882a593Smuzhiyun 		/* mask OOB of un-touched subpages by padding 0xFF */
3811*4882a593Smuzhiyun 		/* if oob_required, preserve OOB metadata of written subpage */
3812*4882a593Smuzhiyun 		if (!oob_required || (step < start_step) || (step > end_step))
3813*4882a593Smuzhiyun 			memset(oob_buf, 0xff, oob_bytes);
3814*4882a593Smuzhiyun 
3815*4882a593Smuzhiyun 		buf += ecc_size;
3816*4882a593Smuzhiyun 		ecc_calc += ecc_bytes;
3817*4882a593Smuzhiyun 		oob_buf  += oob_bytes;
3818*4882a593Smuzhiyun 	}
3819*4882a593Smuzhiyun 
3820*4882a593Smuzhiyun 	/* copy calculated ECC for whole page to chip->buffer->oob */
3821*4882a593Smuzhiyun 	/* this include masked-value(0xFF) for unwritten subpages */
3822*4882a593Smuzhiyun 	ecc_calc = chip->ecc.calc_buf;
3823*4882a593Smuzhiyun 	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3824*4882a593Smuzhiyun 					 chip->ecc.total);
3825*4882a593Smuzhiyun 	if (ret)
3826*4882a593Smuzhiyun 		return ret;
3827*4882a593Smuzhiyun 
3828*4882a593Smuzhiyun 	/* write OOB buffer to NAND device */
3829*4882a593Smuzhiyun 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3830*4882a593Smuzhiyun 	if (ret)
3831*4882a593Smuzhiyun 		return ret;
3832*4882a593Smuzhiyun 
3833*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
3834*4882a593Smuzhiyun }
3835*4882a593Smuzhiyun 
3836*4882a593Smuzhiyun 
3837*4882a593Smuzhiyun /**
3838*4882a593Smuzhiyun  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3839*4882a593Smuzhiyun  * @chip: nand chip info structure
3840*4882a593Smuzhiyun  * @buf: data buffer
3841*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3842*4882a593Smuzhiyun  * @page: page number to write
3843*4882a593Smuzhiyun  *
3844*4882a593Smuzhiyun  * The hw generator calculates the error syndrome automatically. Therefore we
3845*4882a593Smuzhiyun  * need a special oob layout and handling.
3846*4882a593Smuzhiyun  */
nand_write_page_syndrome(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3847*4882a593Smuzhiyun static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3848*4882a593Smuzhiyun 				    int oob_required, int page)
3849*4882a593Smuzhiyun {
3850*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3851*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
3852*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
3853*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
3854*4882a593Smuzhiyun 	const uint8_t *p = buf;
3855*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
3856*4882a593Smuzhiyun 	int ret;
3857*4882a593Smuzhiyun 
3858*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3859*4882a593Smuzhiyun 	if (ret)
3860*4882a593Smuzhiyun 		return ret;
3861*4882a593Smuzhiyun 
3862*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3863*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3864*4882a593Smuzhiyun 
3865*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, p, eccsize, false);
3866*4882a593Smuzhiyun 		if (ret)
3867*4882a593Smuzhiyun 			return ret;
3868*4882a593Smuzhiyun 
3869*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
3870*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3871*4882a593Smuzhiyun 						 false);
3872*4882a593Smuzhiyun 			if (ret)
3873*4882a593Smuzhiyun 				return ret;
3874*4882a593Smuzhiyun 
3875*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
3876*4882a593Smuzhiyun 		}
3877*4882a593Smuzhiyun 
3878*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, oob);
3879*4882a593Smuzhiyun 
3880*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3881*4882a593Smuzhiyun 		if (ret)
3882*4882a593Smuzhiyun 			return ret;
3883*4882a593Smuzhiyun 
3884*4882a593Smuzhiyun 		oob += eccbytes;
3885*4882a593Smuzhiyun 
3886*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
3887*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3888*4882a593Smuzhiyun 						 false);
3889*4882a593Smuzhiyun 			if (ret)
3890*4882a593Smuzhiyun 				return ret;
3891*4882a593Smuzhiyun 
3892*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
3893*4882a593Smuzhiyun 		}
3894*4882a593Smuzhiyun 	}
3895*4882a593Smuzhiyun 
3896*4882a593Smuzhiyun 	/* Calculate remaining oob bytes */
3897*4882a593Smuzhiyun 	i = mtd->oobsize - (oob - chip->oob_poi);
3898*4882a593Smuzhiyun 	if (i) {
3899*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, i, false);
3900*4882a593Smuzhiyun 		if (ret)
3901*4882a593Smuzhiyun 			return ret;
3902*4882a593Smuzhiyun 	}
3903*4882a593Smuzhiyun 
3904*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun 
3907*4882a593Smuzhiyun /**
3908*4882a593Smuzhiyun  * nand_write_page - write one page
3909*4882a593Smuzhiyun  * @chip: NAND chip descriptor
3910*4882a593Smuzhiyun  * @offset: address offset within the page
3911*4882a593Smuzhiyun  * @data_len: length of actual data to be written
3912*4882a593Smuzhiyun  * @buf: the data to write
3913*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3914*4882a593Smuzhiyun  * @page: page number to write
3915*4882a593Smuzhiyun  * @raw: use _raw version of write_page
3916*4882a593Smuzhiyun  */
nand_write_page(struct nand_chip * chip,uint32_t offset,int data_len,const uint8_t * buf,int oob_required,int page,int raw)3917*4882a593Smuzhiyun static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3918*4882a593Smuzhiyun 			   int data_len, const uint8_t *buf, int oob_required,
3919*4882a593Smuzhiyun 			   int page, int raw)
3920*4882a593Smuzhiyun {
3921*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3922*4882a593Smuzhiyun 	int status, subpage;
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3925*4882a593Smuzhiyun 		chip->ecc.write_subpage)
3926*4882a593Smuzhiyun 		subpage = offset || (data_len < mtd->writesize);
3927*4882a593Smuzhiyun 	else
3928*4882a593Smuzhiyun 		subpage = 0;
3929*4882a593Smuzhiyun 
3930*4882a593Smuzhiyun 	if (unlikely(raw))
3931*4882a593Smuzhiyun 		status = chip->ecc.write_page_raw(chip, buf, oob_required,
3932*4882a593Smuzhiyun 						  page);
3933*4882a593Smuzhiyun 	else if (subpage)
3934*4882a593Smuzhiyun 		status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3935*4882a593Smuzhiyun 						 oob_required, page);
3936*4882a593Smuzhiyun 	else
3937*4882a593Smuzhiyun 		status = chip->ecc.write_page(chip, buf, oob_required, page);
3938*4882a593Smuzhiyun 
3939*4882a593Smuzhiyun 	if (status < 0)
3940*4882a593Smuzhiyun 		return status;
3941*4882a593Smuzhiyun 
3942*4882a593Smuzhiyun 	return 0;
3943*4882a593Smuzhiyun }
3944*4882a593Smuzhiyun 
3945*4882a593Smuzhiyun #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
3946*4882a593Smuzhiyun 
3947*4882a593Smuzhiyun /**
3948*4882a593Smuzhiyun  * nand_do_write_ops - [INTERN] NAND write with ECC
3949*4882a593Smuzhiyun  * @chip: NAND chip object
3950*4882a593Smuzhiyun  * @to: offset to write to
3951*4882a593Smuzhiyun  * @ops: oob operations description structure
3952*4882a593Smuzhiyun  *
3953*4882a593Smuzhiyun  * NAND write with ECC.
3954*4882a593Smuzhiyun  */
nand_do_write_ops(struct nand_chip * chip,loff_t to,struct mtd_oob_ops * ops)3955*4882a593Smuzhiyun static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3956*4882a593Smuzhiyun 			     struct mtd_oob_ops *ops)
3957*4882a593Smuzhiyun {
3958*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
3959*4882a593Smuzhiyun 	int chipnr, realpage, page, column;
3960*4882a593Smuzhiyun 	uint32_t writelen = ops->len;
3961*4882a593Smuzhiyun 
3962*4882a593Smuzhiyun 	uint32_t oobwritelen = ops->ooblen;
3963*4882a593Smuzhiyun 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3964*4882a593Smuzhiyun 
3965*4882a593Smuzhiyun 	uint8_t *oob = ops->oobbuf;
3966*4882a593Smuzhiyun 	uint8_t *buf = ops->datbuf;
3967*4882a593Smuzhiyun 	int ret;
3968*4882a593Smuzhiyun 	int oob_required = oob ? 1 : 0;
3969*4882a593Smuzhiyun 
3970*4882a593Smuzhiyun 	ops->retlen = 0;
3971*4882a593Smuzhiyun 	if (!writelen)
3972*4882a593Smuzhiyun 		return 0;
3973*4882a593Smuzhiyun 
3974*4882a593Smuzhiyun 	/* Reject writes, which are not page aligned */
3975*4882a593Smuzhiyun 	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3976*4882a593Smuzhiyun 		pr_notice("%s: attempt to write non page aligned data\n",
3977*4882a593Smuzhiyun 			   __func__);
3978*4882a593Smuzhiyun 		return -EINVAL;
3979*4882a593Smuzhiyun 	}
3980*4882a593Smuzhiyun 
3981*4882a593Smuzhiyun 	column = to & (mtd->writesize - 1);
3982*4882a593Smuzhiyun 
3983*4882a593Smuzhiyun 	chipnr = (int)(to >> chip->chip_shift);
3984*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
3985*4882a593Smuzhiyun 
3986*4882a593Smuzhiyun 	/* Check, if it is write protected */
3987*4882a593Smuzhiyun 	if (nand_check_wp(chip)) {
3988*4882a593Smuzhiyun 		ret = -EIO;
3989*4882a593Smuzhiyun 		goto err_out;
3990*4882a593Smuzhiyun 	}
3991*4882a593Smuzhiyun 
3992*4882a593Smuzhiyun 	realpage = (int)(to >> chip->page_shift);
3993*4882a593Smuzhiyun 	page = realpage & chip->pagemask;
3994*4882a593Smuzhiyun 
3995*4882a593Smuzhiyun 	/* Invalidate the page cache, when we write to the cached page */
3996*4882a593Smuzhiyun 	if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3997*4882a593Smuzhiyun 	    ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3998*4882a593Smuzhiyun 		chip->pagecache.page = -1;
3999*4882a593Smuzhiyun 
4000*4882a593Smuzhiyun 	/* Don't allow multipage oob writes with offset */
4001*4882a593Smuzhiyun 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4002*4882a593Smuzhiyun 		ret = -EINVAL;
4003*4882a593Smuzhiyun 		goto err_out;
4004*4882a593Smuzhiyun 	}
4005*4882a593Smuzhiyun 
4006*4882a593Smuzhiyun 	while (1) {
4007*4882a593Smuzhiyun 		int bytes = mtd->writesize;
4008*4882a593Smuzhiyun 		uint8_t *wbuf = buf;
4009*4882a593Smuzhiyun 		int use_bounce_buf;
4010*4882a593Smuzhiyun 		int part_pagewr = (column || writelen < mtd->writesize);
4011*4882a593Smuzhiyun 
4012*4882a593Smuzhiyun 		if (part_pagewr)
4013*4882a593Smuzhiyun 			use_bounce_buf = 1;
4014*4882a593Smuzhiyun 		else if (chip->options & NAND_USES_DMA)
4015*4882a593Smuzhiyun 			use_bounce_buf = !virt_addr_valid(buf) ||
4016*4882a593Smuzhiyun 					 !IS_ALIGNED((unsigned long)buf,
4017*4882a593Smuzhiyun 						     chip->buf_align);
4018*4882a593Smuzhiyun 		else
4019*4882a593Smuzhiyun 			use_bounce_buf = 0;
4020*4882a593Smuzhiyun 
4021*4882a593Smuzhiyun 		/*
4022*4882a593Smuzhiyun 		 * Copy the data from the initial buffer when doing partial page
4023*4882a593Smuzhiyun 		 * writes or when a bounce buffer is required.
4024*4882a593Smuzhiyun 		 */
4025*4882a593Smuzhiyun 		if (use_bounce_buf) {
4026*4882a593Smuzhiyun 			pr_debug("%s: using write bounce buffer for buf@%p\n",
4027*4882a593Smuzhiyun 					 __func__, buf);
4028*4882a593Smuzhiyun 			if (part_pagewr)
4029*4882a593Smuzhiyun 				bytes = min_t(int, bytes - column, writelen);
4030*4882a593Smuzhiyun 			wbuf = nand_get_data_buf(chip);
4031*4882a593Smuzhiyun 			memset(wbuf, 0xff, mtd->writesize);
4032*4882a593Smuzhiyun 			memcpy(&wbuf[column], buf, bytes);
4033*4882a593Smuzhiyun 		}
4034*4882a593Smuzhiyun 
4035*4882a593Smuzhiyun 		if (unlikely(oob)) {
4036*4882a593Smuzhiyun 			size_t len = min(oobwritelen, oobmaxlen);
4037*4882a593Smuzhiyun 			oob = nand_fill_oob(chip, oob, len, ops);
4038*4882a593Smuzhiyun 			oobwritelen -= len;
4039*4882a593Smuzhiyun 		} else {
4040*4882a593Smuzhiyun 			/* We still need to erase leftover OOB data */
4041*4882a593Smuzhiyun 			memset(chip->oob_poi, 0xff, mtd->oobsize);
4042*4882a593Smuzhiyun 		}
4043*4882a593Smuzhiyun 
4044*4882a593Smuzhiyun 		ret = nand_write_page(chip, column, bytes, wbuf,
4045*4882a593Smuzhiyun 				      oob_required, page,
4046*4882a593Smuzhiyun 				      (ops->mode == MTD_OPS_RAW));
4047*4882a593Smuzhiyun 		if (ret)
4048*4882a593Smuzhiyun 			break;
4049*4882a593Smuzhiyun 
4050*4882a593Smuzhiyun 		writelen -= bytes;
4051*4882a593Smuzhiyun 		if (!writelen)
4052*4882a593Smuzhiyun 			break;
4053*4882a593Smuzhiyun 
4054*4882a593Smuzhiyun 		column = 0;
4055*4882a593Smuzhiyun 		buf += bytes;
4056*4882a593Smuzhiyun 		realpage++;
4057*4882a593Smuzhiyun 
4058*4882a593Smuzhiyun 		page = realpage & chip->pagemask;
4059*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
4060*4882a593Smuzhiyun 		if (!page) {
4061*4882a593Smuzhiyun 			chipnr++;
4062*4882a593Smuzhiyun 			nand_deselect_target(chip);
4063*4882a593Smuzhiyun 			nand_select_target(chip, chipnr);
4064*4882a593Smuzhiyun 		}
4065*4882a593Smuzhiyun 	}
4066*4882a593Smuzhiyun 
4067*4882a593Smuzhiyun 	ops->retlen = ops->len - writelen;
4068*4882a593Smuzhiyun 	if (unlikely(oob))
4069*4882a593Smuzhiyun 		ops->oobretlen = ops->ooblen;
4070*4882a593Smuzhiyun 
4071*4882a593Smuzhiyun err_out:
4072*4882a593Smuzhiyun 	nand_deselect_target(chip);
4073*4882a593Smuzhiyun 	return ret;
4074*4882a593Smuzhiyun }
4075*4882a593Smuzhiyun 
4076*4882a593Smuzhiyun /**
4077*4882a593Smuzhiyun  * panic_nand_write - [MTD Interface] NAND write with ECC
4078*4882a593Smuzhiyun  * @mtd: MTD device structure
4079*4882a593Smuzhiyun  * @to: offset to write to
4080*4882a593Smuzhiyun  * @len: number of bytes to write
4081*4882a593Smuzhiyun  * @retlen: pointer to variable to store the number of written bytes
4082*4882a593Smuzhiyun  * @buf: the data to write
4083*4882a593Smuzhiyun  *
4084*4882a593Smuzhiyun  * NAND write with ECC. Used when performing writes in interrupt context, this
4085*4882a593Smuzhiyun  * may for example be called by mtdoops when writing an oops while in panic.
4086*4882a593Smuzhiyun  */
panic_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const uint8_t * buf)4087*4882a593Smuzhiyun static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4088*4882a593Smuzhiyun 			    size_t *retlen, const uint8_t *buf)
4089*4882a593Smuzhiyun {
4090*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4091*4882a593Smuzhiyun 	int chipnr = (int)(to >> chip->chip_shift);
4092*4882a593Smuzhiyun 	struct mtd_oob_ops ops;
4093*4882a593Smuzhiyun 	int ret;
4094*4882a593Smuzhiyun 
4095*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
4096*4882a593Smuzhiyun 
4097*4882a593Smuzhiyun 	/* Wait for the device to get ready */
4098*4882a593Smuzhiyun 	panic_nand_wait(chip, 400);
4099*4882a593Smuzhiyun 
4100*4882a593Smuzhiyun 	memset(&ops, 0, sizeof(ops));
4101*4882a593Smuzhiyun 	ops.len = len;
4102*4882a593Smuzhiyun 	ops.datbuf = (uint8_t *)buf;
4103*4882a593Smuzhiyun 	ops.mode = MTD_OPS_PLACE_OOB;
4104*4882a593Smuzhiyun 
4105*4882a593Smuzhiyun 	ret = nand_do_write_ops(chip, to, &ops);
4106*4882a593Smuzhiyun 
4107*4882a593Smuzhiyun 	*retlen = ops.retlen;
4108*4882a593Smuzhiyun 	return ret;
4109*4882a593Smuzhiyun }
4110*4882a593Smuzhiyun 
4111*4882a593Smuzhiyun /**
4112*4882a593Smuzhiyun  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4113*4882a593Smuzhiyun  * @mtd: MTD device structure
4114*4882a593Smuzhiyun  * @to: offset to write to
4115*4882a593Smuzhiyun  * @ops: oob operation description structure
4116*4882a593Smuzhiyun  */
nand_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)4117*4882a593Smuzhiyun static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4118*4882a593Smuzhiyun 			  struct mtd_oob_ops *ops)
4119*4882a593Smuzhiyun {
4120*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4121*4882a593Smuzhiyun 	int ret = 0;
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun 	ops->retlen = 0;
4124*4882a593Smuzhiyun 
4125*4882a593Smuzhiyun 	nand_get_device(chip);
4126*4882a593Smuzhiyun 
4127*4882a593Smuzhiyun 	switch (ops->mode) {
4128*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
4129*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB:
4130*4882a593Smuzhiyun 	case MTD_OPS_RAW:
4131*4882a593Smuzhiyun 		break;
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	default:
4134*4882a593Smuzhiyun 		goto out;
4135*4882a593Smuzhiyun 	}
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun 	if (!ops->datbuf)
4138*4882a593Smuzhiyun 		ret = nand_do_write_oob(chip, to, ops);
4139*4882a593Smuzhiyun 	else
4140*4882a593Smuzhiyun 		ret = nand_do_write_ops(chip, to, ops);
4141*4882a593Smuzhiyun 
4142*4882a593Smuzhiyun out:
4143*4882a593Smuzhiyun 	nand_release_device(chip);
4144*4882a593Smuzhiyun 	return ret;
4145*4882a593Smuzhiyun }
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun /**
4148*4882a593Smuzhiyun  * nand_erase - [MTD Interface] erase block(s)
4149*4882a593Smuzhiyun  * @mtd: MTD device structure
4150*4882a593Smuzhiyun  * @instr: erase instruction
4151*4882a593Smuzhiyun  *
4152*4882a593Smuzhiyun  * Erase one ore more blocks.
4153*4882a593Smuzhiyun  */
nand_erase(struct mtd_info * mtd,struct erase_info * instr)4154*4882a593Smuzhiyun static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4155*4882a593Smuzhiyun {
4156*4882a593Smuzhiyun 	return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4157*4882a593Smuzhiyun }
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun /**
4160*4882a593Smuzhiyun  * nand_erase_nand - [INTERN] erase block(s)
4161*4882a593Smuzhiyun  * @chip: NAND chip object
4162*4882a593Smuzhiyun  * @instr: erase instruction
4163*4882a593Smuzhiyun  * @allowbbt: allow erasing the bbt area
4164*4882a593Smuzhiyun  *
4165*4882a593Smuzhiyun  * Erase one ore more blocks.
4166*4882a593Smuzhiyun  */
nand_erase_nand(struct nand_chip * chip,struct erase_info * instr,int allowbbt)4167*4882a593Smuzhiyun int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4168*4882a593Smuzhiyun 		    int allowbbt)
4169*4882a593Smuzhiyun {
4170*4882a593Smuzhiyun 	int page, pages_per_block, ret, chipnr;
4171*4882a593Smuzhiyun 	loff_t len;
4172*4882a593Smuzhiyun 
4173*4882a593Smuzhiyun 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
4174*4882a593Smuzhiyun 			__func__, (unsigned long long)instr->addr,
4175*4882a593Smuzhiyun 			(unsigned long long)instr->len);
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	if (check_offs_len(chip, instr->addr, instr->len))
4178*4882a593Smuzhiyun 		return -EINVAL;
4179*4882a593Smuzhiyun 
4180*4882a593Smuzhiyun 	/* Grab the lock and see if the device is available */
4181*4882a593Smuzhiyun 	nand_get_device(chip);
4182*4882a593Smuzhiyun 
4183*4882a593Smuzhiyun 	/* Shift to get first page */
4184*4882a593Smuzhiyun 	page = (int)(instr->addr >> chip->page_shift);
4185*4882a593Smuzhiyun 	chipnr = (int)(instr->addr >> chip->chip_shift);
4186*4882a593Smuzhiyun 
4187*4882a593Smuzhiyun 	/* Calculate pages in each block */
4188*4882a593Smuzhiyun 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4189*4882a593Smuzhiyun 
4190*4882a593Smuzhiyun 	/* Select the NAND device */
4191*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
4192*4882a593Smuzhiyun 
4193*4882a593Smuzhiyun 	/* Check, if it is write protected */
4194*4882a593Smuzhiyun 	if (nand_check_wp(chip)) {
4195*4882a593Smuzhiyun 		pr_debug("%s: device is write protected!\n",
4196*4882a593Smuzhiyun 				__func__);
4197*4882a593Smuzhiyun 		ret = -EIO;
4198*4882a593Smuzhiyun 		goto erase_exit;
4199*4882a593Smuzhiyun 	}
4200*4882a593Smuzhiyun 
4201*4882a593Smuzhiyun 	/* Loop through the pages */
4202*4882a593Smuzhiyun 	len = instr->len;
4203*4882a593Smuzhiyun 
4204*4882a593Smuzhiyun 	while (len) {
4205*4882a593Smuzhiyun 		/* Check if we have a bad block, we do not erase bad blocks! */
4206*4882a593Smuzhiyun 		if (nand_block_checkbad(chip, ((loff_t) page) <<
4207*4882a593Smuzhiyun 					chip->page_shift, allowbbt)) {
4208*4882a593Smuzhiyun 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4209*4882a593Smuzhiyun 				    __func__, page);
4210*4882a593Smuzhiyun 			ret = -EIO;
4211*4882a593Smuzhiyun 			goto erase_exit;
4212*4882a593Smuzhiyun 		}
4213*4882a593Smuzhiyun 
4214*4882a593Smuzhiyun 		/*
4215*4882a593Smuzhiyun 		 * Invalidate the page cache, if we erase the block which
4216*4882a593Smuzhiyun 		 * contains the current cached page.
4217*4882a593Smuzhiyun 		 */
4218*4882a593Smuzhiyun 		if (page <= chip->pagecache.page && chip->pagecache.page <
4219*4882a593Smuzhiyun 		    (page + pages_per_block))
4220*4882a593Smuzhiyun 			chip->pagecache.page = -1;
4221*4882a593Smuzhiyun 
4222*4882a593Smuzhiyun 		ret = nand_erase_op(chip, (page & chip->pagemask) >>
4223*4882a593Smuzhiyun 				    (chip->phys_erase_shift - chip->page_shift));
4224*4882a593Smuzhiyun 		if (ret) {
4225*4882a593Smuzhiyun 			pr_debug("%s: failed erase, page 0x%08x\n",
4226*4882a593Smuzhiyun 					__func__, page);
4227*4882a593Smuzhiyun 			instr->fail_addr =
4228*4882a593Smuzhiyun 				((loff_t)page << chip->page_shift);
4229*4882a593Smuzhiyun 			goto erase_exit;
4230*4882a593Smuzhiyun 		}
4231*4882a593Smuzhiyun 
4232*4882a593Smuzhiyun 		/* Increment page address and decrement length */
4233*4882a593Smuzhiyun 		len -= (1ULL << chip->phys_erase_shift);
4234*4882a593Smuzhiyun 		page += pages_per_block;
4235*4882a593Smuzhiyun 
4236*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
4237*4882a593Smuzhiyun 		if (len && !(page & chip->pagemask)) {
4238*4882a593Smuzhiyun 			chipnr++;
4239*4882a593Smuzhiyun 			nand_deselect_target(chip);
4240*4882a593Smuzhiyun 			nand_select_target(chip, chipnr);
4241*4882a593Smuzhiyun 		}
4242*4882a593Smuzhiyun 	}
4243*4882a593Smuzhiyun 
4244*4882a593Smuzhiyun 	ret = 0;
4245*4882a593Smuzhiyun erase_exit:
4246*4882a593Smuzhiyun 
4247*4882a593Smuzhiyun 	/* Deselect and wake up anyone waiting on the device */
4248*4882a593Smuzhiyun 	nand_deselect_target(chip);
4249*4882a593Smuzhiyun 	nand_release_device(chip);
4250*4882a593Smuzhiyun 
4251*4882a593Smuzhiyun 	/* Return more or less happy */
4252*4882a593Smuzhiyun 	return ret;
4253*4882a593Smuzhiyun }
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun /**
4256*4882a593Smuzhiyun  * nand_sync - [MTD Interface] sync
4257*4882a593Smuzhiyun  * @mtd: MTD device structure
4258*4882a593Smuzhiyun  *
4259*4882a593Smuzhiyun  * Sync is actually a wait for chip ready function.
4260*4882a593Smuzhiyun  */
nand_sync(struct mtd_info * mtd)4261*4882a593Smuzhiyun static void nand_sync(struct mtd_info *mtd)
4262*4882a593Smuzhiyun {
4263*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4264*4882a593Smuzhiyun 
4265*4882a593Smuzhiyun 	pr_debug("%s: called\n", __func__);
4266*4882a593Smuzhiyun 
4267*4882a593Smuzhiyun 	/* Grab the lock and see if the device is available */
4268*4882a593Smuzhiyun 	nand_get_device(chip);
4269*4882a593Smuzhiyun 	/* Release it and go back */
4270*4882a593Smuzhiyun 	nand_release_device(chip);
4271*4882a593Smuzhiyun }
4272*4882a593Smuzhiyun 
4273*4882a593Smuzhiyun /**
4274*4882a593Smuzhiyun  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4275*4882a593Smuzhiyun  * @mtd: MTD device structure
4276*4882a593Smuzhiyun  * @offs: offset relative to mtd start
4277*4882a593Smuzhiyun  */
nand_block_isbad(struct mtd_info * mtd,loff_t offs)4278*4882a593Smuzhiyun static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4279*4882a593Smuzhiyun {
4280*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4281*4882a593Smuzhiyun 	int chipnr = (int)(offs >> chip->chip_shift);
4282*4882a593Smuzhiyun 	int ret;
4283*4882a593Smuzhiyun 
4284*4882a593Smuzhiyun 	/* Select the NAND device */
4285*4882a593Smuzhiyun 	nand_get_device(chip);
4286*4882a593Smuzhiyun 
4287*4882a593Smuzhiyun 	nand_select_target(chip, chipnr);
4288*4882a593Smuzhiyun 
4289*4882a593Smuzhiyun 	ret = nand_block_checkbad(chip, offs, 0);
4290*4882a593Smuzhiyun 
4291*4882a593Smuzhiyun 	nand_deselect_target(chip);
4292*4882a593Smuzhiyun 	nand_release_device(chip);
4293*4882a593Smuzhiyun 
4294*4882a593Smuzhiyun 	return ret;
4295*4882a593Smuzhiyun }
4296*4882a593Smuzhiyun 
4297*4882a593Smuzhiyun /**
4298*4882a593Smuzhiyun  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4299*4882a593Smuzhiyun  * @mtd: MTD device structure
4300*4882a593Smuzhiyun  * @ofs: offset relative to mtd start
4301*4882a593Smuzhiyun  */
nand_block_markbad(struct mtd_info * mtd,loff_t ofs)4302*4882a593Smuzhiyun static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4303*4882a593Smuzhiyun {
4304*4882a593Smuzhiyun 	int ret;
4305*4882a593Smuzhiyun 
4306*4882a593Smuzhiyun 	ret = nand_block_isbad(mtd, ofs);
4307*4882a593Smuzhiyun 	if (ret) {
4308*4882a593Smuzhiyun 		/* If it was bad already, return success and do nothing */
4309*4882a593Smuzhiyun 		if (ret > 0)
4310*4882a593Smuzhiyun 			return 0;
4311*4882a593Smuzhiyun 		return ret;
4312*4882a593Smuzhiyun 	}
4313*4882a593Smuzhiyun 
4314*4882a593Smuzhiyun 	return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4315*4882a593Smuzhiyun }
4316*4882a593Smuzhiyun 
4317*4882a593Smuzhiyun /**
4318*4882a593Smuzhiyun  * nand_suspend - [MTD Interface] Suspend the NAND flash
4319*4882a593Smuzhiyun  * @mtd: MTD device structure
4320*4882a593Smuzhiyun  *
4321*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
4322*4882a593Smuzhiyun  */
nand_suspend(struct mtd_info * mtd)4323*4882a593Smuzhiyun static int nand_suspend(struct mtd_info *mtd)
4324*4882a593Smuzhiyun {
4325*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4326*4882a593Smuzhiyun 	int ret = 0;
4327*4882a593Smuzhiyun 
4328*4882a593Smuzhiyun 	mutex_lock(&chip->lock);
4329*4882a593Smuzhiyun 	if (chip->ops.suspend)
4330*4882a593Smuzhiyun 		ret = chip->ops.suspend(chip);
4331*4882a593Smuzhiyun 	if (!ret)
4332*4882a593Smuzhiyun 		chip->suspended = 1;
4333*4882a593Smuzhiyun 	mutex_unlock(&chip->lock);
4334*4882a593Smuzhiyun 
4335*4882a593Smuzhiyun 	return ret;
4336*4882a593Smuzhiyun }
4337*4882a593Smuzhiyun 
4338*4882a593Smuzhiyun /**
4339*4882a593Smuzhiyun  * nand_resume - [MTD Interface] Resume the NAND flash
4340*4882a593Smuzhiyun  * @mtd: MTD device structure
4341*4882a593Smuzhiyun  */
nand_resume(struct mtd_info * mtd)4342*4882a593Smuzhiyun static void nand_resume(struct mtd_info *mtd)
4343*4882a593Smuzhiyun {
4344*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4345*4882a593Smuzhiyun 
4346*4882a593Smuzhiyun 	mutex_lock(&chip->lock);
4347*4882a593Smuzhiyun 	if (chip->suspended) {
4348*4882a593Smuzhiyun 		if (chip->ops.resume)
4349*4882a593Smuzhiyun 			chip->ops.resume(chip);
4350*4882a593Smuzhiyun 		chip->suspended = 0;
4351*4882a593Smuzhiyun 	} else {
4352*4882a593Smuzhiyun 		pr_err("%s called for a chip which is not in suspended state\n",
4353*4882a593Smuzhiyun 			__func__);
4354*4882a593Smuzhiyun 	}
4355*4882a593Smuzhiyun 	mutex_unlock(&chip->lock);
4356*4882a593Smuzhiyun 
4357*4882a593Smuzhiyun 	wake_up_all(&chip->resume_wq);
4358*4882a593Smuzhiyun }
4359*4882a593Smuzhiyun 
4360*4882a593Smuzhiyun /**
4361*4882a593Smuzhiyun  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4362*4882a593Smuzhiyun  *                 prevent further operations
4363*4882a593Smuzhiyun  * @mtd: MTD device structure
4364*4882a593Smuzhiyun  */
nand_shutdown(struct mtd_info * mtd)4365*4882a593Smuzhiyun static void nand_shutdown(struct mtd_info *mtd)
4366*4882a593Smuzhiyun {
4367*4882a593Smuzhiyun 	nand_suspend(mtd);
4368*4882a593Smuzhiyun }
4369*4882a593Smuzhiyun 
4370*4882a593Smuzhiyun /**
4371*4882a593Smuzhiyun  * nand_lock - [MTD Interface] Lock the NAND flash
4372*4882a593Smuzhiyun  * @mtd: MTD device structure
4373*4882a593Smuzhiyun  * @ofs: offset byte address
4374*4882a593Smuzhiyun  * @len: number of bytes to lock (must be a multiple of block/page size)
4375*4882a593Smuzhiyun  */
nand_lock(struct mtd_info * mtd,loff_t ofs,uint64_t len)4376*4882a593Smuzhiyun static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4377*4882a593Smuzhiyun {
4378*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4379*4882a593Smuzhiyun 
4380*4882a593Smuzhiyun 	if (!chip->ops.lock_area)
4381*4882a593Smuzhiyun 		return -ENOTSUPP;
4382*4882a593Smuzhiyun 
4383*4882a593Smuzhiyun 	return chip->ops.lock_area(chip, ofs, len);
4384*4882a593Smuzhiyun }
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun /**
4387*4882a593Smuzhiyun  * nand_unlock - [MTD Interface] Unlock the NAND flash
4388*4882a593Smuzhiyun  * @mtd: MTD device structure
4389*4882a593Smuzhiyun  * @ofs: offset byte address
4390*4882a593Smuzhiyun  * @len: number of bytes to unlock (must be a multiple of block/page size)
4391*4882a593Smuzhiyun  */
nand_unlock(struct mtd_info * mtd,loff_t ofs,uint64_t len)4392*4882a593Smuzhiyun static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4393*4882a593Smuzhiyun {
4394*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4395*4882a593Smuzhiyun 
4396*4882a593Smuzhiyun 	if (!chip->ops.unlock_area)
4397*4882a593Smuzhiyun 		return -ENOTSUPP;
4398*4882a593Smuzhiyun 
4399*4882a593Smuzhiyun 	return chip->ops.unlock_area(chip, ofs, len);
4400*4882a593Smuzhiyun }
4401*4882a593Smuzhiyun 
4402*4882a593Smuzhiyun /* Set default functions */
nand_set_defaults(struct nand_chip * chip)4403*4882a593Smuzhiyun static void nand_set_defaults(struct nand_chip *chip)
4404*4882a593Smuzhiyun {
4405*4882a593Smuzhiyun 	/* If no controller is provided, use the dummy, legacy one. */
4406*4882a593Smuzhiyun 	if (!chip->controller) {
4407*4882a593Smuzhiyun 		chip->controller = &chip->legacy.dummy_controller;
4408*4882a593Smuzhiyun 		nand_controller_init(chip->controller);
4409*4882a593Smuzhiyun 	}
4410*4882a593Smuzhiyun 
4411*4882a593Smuzhiyun 	nand_legacy_set_defaults(chip);
4412*4882a593Smuzhiyun 
4413*4882a593Smuzhiyun 	if (!chip->buf_align)
4414*4882a593Smuzhiyun 		chip->buf_align = 1;
4415*4882a593Smuzhiyun }
4416*4882a593Smuzhiyun 
4417*4882a593Smuzhiyun /* Sanitize ONFI strings so we can safely print them */
sanitize_string(uint8_t * s,size_t len)4418*4882a593Smuzhiyun void sanitize_string(uint8_t *s, size_t len)
4419*4882a593Smuzhiyun {
4420*4882a593Smuzhiyun 	ssize_t i;
4421*4882a593Smuzhiyun 
4422*4882a593Smuzhiyun 	/* Null terminate */
4423*4882a593Smuzhiyun 	s[len - 1] = 0;
4424*4882a593Smuzhiyun 
4425*4882a593Smuzhiyun 	/* Remove non printable chars */
4426*4882a593Smuzhiyun 	for (i = 0; i < len - 1; i++) {
4427*4882a593Smuzhiyun 		if (s[i] < ' ' || s[i] > 127)
4428*4882a593Smuzhiyun 			s[i] = '?';
4429*4882a593Smuzhiyun 	}
4430*4882a593Smuzhiyun 
4431*4882a593Smuzhiyun 	/* Remove trailing spaces */
4432*4882a593Smuzhiyun 	strim(s);
4433*4882a593Smuzhiyun }
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun /*
4436*4882a593Smuzhiyun  * nand_id_has_period - Check if an ID string has a given wraparound period
4437*4882a593Smuzhiyun  * @id_data: the ID string
4438*4882a593Smuzhiyun  * @arrlen: the length of the @id_data array
4439*4882a593Smuzhiyun  * @period: the period of repitition
4440*4882a593Smuzhiyun  *
4441*4882a593Smuzhiyun  * Check if an ID string is repeated within a given sequence of bytes at
4442*4882a593Smuzhiyun  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4443*4882a593Smuzhiyun  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4444*4882a593Smuzhiyun  * if the repetition has a period of @period; otherwise, returns zero.
4445*4882a593Smuzhiyun  */
nand_id_has_period(u8 * id_data,int arrlen,int period)4446*4882a593Smuzhiyun static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4447*4882a593Smuzhiyun {
4448*4882a593Smuzhiyun 	int i, j;
4449*4882a593Smuzhiyun 	for (i = 0; i < period; i++)
4450*4882a593Smuzhiyun 		for (j = i + period; j < arrlen; j += period)
4451*4882a593Smuzhiyun 			if (id_data[i] != id_data[j])
4452*4882a593Smuzhiyun 				return 0;
4453*4882a593Smuzhiyun 	return 1;
4454*4882a593Smuzhiyun }
4455*4882a593Smuzhiyun 
4456*4882a593Smuzhiyun /*
4457*4882a593Smuzhiyun  * nand_id_len - Get the length of an ID string returned by CMD_READID
4458*4882a593Smuzhiyun  * @id_data: the ID string
4459*4882a593Smuzhiyun  * @arrlen: the length of the @id_data array
4460*4882a593Smuzhiyun 
4461*4882a593Smuzhiyun  * Returns the length of the ID string, according to known wraparound/trailing
4462*4882a593Smuzhiyun  * zero patterns. If no pattern exists, returns the length of the array.
4463*4882a593Smuzhiyun  */
nand_id_len(u8 * id_data,int arrlen)4464*4882a593Smuzhiyun static int nand_id_len(u8 *id_data, int arrlen)
4465*4882a593Smuzhiyun {
4466*4882a593Smuzhiyun 	int last_nonzero, period;
4467*4882a593Smuzhiyun 
4468*4882a593Smuzhiyun 	/* Find last non-zero byte */
4469*4882a593Smuzhiyun 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4470*4882a593Smuzhiyun 		if (id_data[last_nonzero])
4471*4882a593Smuzhiyun 			break;
4472*4882a593Smuzhiyun 
4473*4882a593Smuzhiyun 	/* All zeros */
4474*4882a593Smuzhiyun 	if (last_nonzero < 0)
4475*4882a593Smuzhiyun 		return 0;
4476*4882a593Smuzhiyun 
4477*4882a593Smuzhiyun 	/* Calculate wraparound period */
4478*4882a593Smuzhiyun 	for (period = 1; period < arrlen; period++)
4479*4882a593Smuzhiyun 		if (nand_id_has_period(id_data, arrlen, period))
4480*4882a593Smuzhiyun 			break;
4481*4882a593Smuzhiyun 
4482*4882a593Smuzhiyun 	/* There's a repeated pattern */
4483*4882a593Smuzhiyun 	if (period < arrlen)
4484*4882a593Smuzhiyun 		return period;
4485*4882a593Smuzhiyun 
4486*4882a593Smuzhiyun 	/* There are trailing zeros */
4487*4882a593Smuzhiyun 	if (last_nonzero < arrlen - 1)
4488*4882a593Smuzhiyun 		return last_nonzero + 1;
4489*4882a593Smuzhiyun 
4490*4882a593Smuzhiyun 	/* No pattern detected */
4491*4882a593Smuzhiyun 	return arrlen;
4492*4882a593Smuzhiyun }
4493*4882a593Smuzhiyun 
4494*4882a593Smuzhiyun /* Extract the bits of per cell from the 3rd byte of the extended ID */
nand_get_bits_per_cell(u8 cellinfo)4495*4882a593Smuzhiyun static int nand_get_bits_per_cell(u8 cellinfo)
4496*4882a593Smuzhiyun {
4497*4882a593Smuzhiyun 	int bits;
4498*4882a593Smuzhiyun 
4499*4882a593Smuzhiyun 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4500*4882a593Smuzhiyun 	bits >>= NAND_CI_CELLTYPE_SHIFT;
4501*4882a593Smuzhiyun 	return bits + 1;
4502*4882a593Smuzhiyun }
4503*4882a593Smuzhiyun 
4504*4882a593Smuzhiyun /*
4505*4882a593Smuzhiyun  * Many new NAND share similar device ID codes, which represent the size of the
4506*4882a593Smuzhiyun  * chip. The rest of the parameters must be decoded according to generic or
4507*4882a593Smuzhiyun  * manufacturer-specific "extended ID" decoding patterns.
4508*4882a593Smuzhiyun  */
nand_decode_ext_id(struct nand_chip * chip)4509*4882a593Smuzhiyun void nand_decode_ext_id(struct nand_chip *chip)
4510*4882a593Smuzhiyun {
4511*4882a593Smuzhiyun 	struct nand_memory_organization *memorg;
4512*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4513*4882a593Smuzhiyun 	int extid;
4514*4882a593Smuzhiyun 	u8 *id_data = chip->id.data;
4515*4882a593Smuzhiyun 
4516*4882a593Smuzhiyun 	memorg = nanddev_get_memorg(&chip->base);
4517*4882a593Smuzhiyun 
4518*4882a593Smuzhiyun 	/* The 3rd id byte holds MLC / multichip data */
4519*4882a593Smuzhiyun 	memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4520*4882a593Smuzhiyun 	/* The 4th id byte is the important one */
4521*4882a593Smuzhiyun 	extid = id_data[3];
4522*4882a593Smuzhiyun 
4523*4882a593Smuzhiyun 	/* Calc pagesize */
4524*4882a593Smuzhiyun 	memorg->pagesize = 1024 << (extid & 0x03);
4525*4882a593Smuzhiyun 	mtd->writesize = memorg->pagesize;
4526*4882a593Smuzhiyun 	extid >>= 2;
4527*4882a593Smuzhiyun 	/* Calc oobsize */
4528*4882a593Smuzhiyun 	memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4529*4882a593Smuzhiyun 	mtd->oobsize = memorg->oobsize;
4530*4882a593Smuzhiyun 	extid >>= 2;
4531*4882a593Smuzhiyun 	/* Calc blocksize. Blocksize is multiples of 64KiB */
4532*4882a593Smuzhiyun 	memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4533*4882a593Smuzhiyun 				       memorg->pagesize;
4534*4882a593Smuzhiyun 	mtd->erasesize = (64 * 1024) << (extid & 0x03);
4535*4882a593Smuzhiyun 	extid >>= 2;
4536*4882a593Smuzhiyun 	/* Get buswidth information */
4537*4882a593Smuzhiyun 	if (extid & 0x1)
4538*4882a593Smuzhiyun 		chip->options |= NAND_BUSWIDTH_16;
4539*4882a593Smuzhiyun }
4540*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4541*4882a593Smuzhiyun 
4542*4882a593Smuzhiyun /*
4543*4882a593Smuzhiyun  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4544*4882a593Smuzhiyun  * decodes a matching ID table entry and assigns the MTD size parameters for
4545*4882a593Smuzhiyun  * the chip.
4546*4882a593Smuzhiyun  */
nand_decode_id(struct nand_chip * chip,struct nand_flash_dev * type)4547*4882a593Smuzhiyun static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4548*4882a593Smuzhiyun {
4549*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4550*4882a593Smuzhiyun 	struct nand_memory_organization *memorg;
4551*4882a593Smuzhiyun 
4552*4882a593Smuzhiyun 	memorg = nanddev_get_memorg(&chip->base);
4553*4882a593Smuzhiyun 
4554*4882a593Smuzhiyun 	memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4555*4882a593Smuzhiyun 	mtd->erasesize = type->erasesize;
4556*4882a593Smuzhiyun 	memorg->pagesize = type->pagesize;
4557*4882a593Smuzhiyun 	mtd->writesize = memorg->pagesize;
4558*4882a593Smuzhiyun 	memorg->oobsize = memorg->pagesize / 32;
4559*4882a593Smuzhiyun 	mtd->oobsize = memorg->oobsize;
4560*4882a593Smuzhiyun 
4561*4882a593Smuzhiyun 	/* All legacy ID NAND are small-page, SLC */
4562*4882a593Smuzhiyun 	memorg->bits_per_cell = 1;
4563*4882a593Smuzhiyun }
4564*4882a593Smuzhiyun 
4565*4882a593Smuzhiyun /*
4566*4882a593Smuzhiyun  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4567*4882a593Smuzhiyun  * heuristic patterns using various detected parameters (e.g., manufacturer,
4568*4882a593Smuzhiyun  * page size, cell-type information).
4569*4882a593Smuzhiyun  */
nand_decode_bbm_options(struct nand_chip * chip)4570*4882a593Smuzhiyun static void nand_decode_bbm_options(struct nand_chip *chip)
4571*4882a593Smuzhiyun {
4572*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4573*4882a593Smuzhiyun 
4574*4882a593Smuzhiyun 	/* Set the bad block position */
4575*4882a593Smuzhiyun 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4576*4882a593Smuzhiyun 		chip->badblockpos = NAND_BBM_POS_LARGE;
4577*4882a593Smuzhiyun 	else
4578*4882a593Smuzhiyun 		chip->badblockpos = NAND_BBM_POS_SMALL;
4579*4882a593Smuzhiyun }
4580*4882a593Smuzhiyun 
is_full_id_nand(struct nand_flash_dev * type)4581*4882a593Smuzhiyun static inline bool is_full_id_nand(struct nand_flash_dev *type)
4582*4882a593Smuzhiyun {
4583*4882a593Smuzhiyun 	return type->id_len;
4584*4882a593Smuzhiyun }
4585*4882a593Smuzhiyun 
find_full_id_nand(struct nand_chip * chip,struct nand_flash_dev * type)4586*4882a593Smuzhiyun static bool find_full_id_nand(struct nand_chip *chip,
4587*4882a593Smuzhiyun 			      struct nand_flash_dev *type)
4588*4882a593Smuzhiyun {
4589*4882a593Smuzhiyun 	struct nand_device *base = &chip->base;
4590*4882a593Smuzhiyun 	struct nand_ecc_props requirements;
4591*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4592*4882a593Smuzhiyun 	struct nand_memory_organization *memorg;
4593*4882a593Smuzhiyun 	u8 *id_data = chip->id.data;
4594*4882a593Smuzhiyun 
4595*4882a593Smuzhiyun 	memorg = nanddev_get_memorg(&chip->base);
4596*4882a593Smuzhiyun 
4597*4882a593Smuzhiyun 	if (!strncmp(type->id, id_data, type->id_len)) {
4598*4882a593Smuzhiyun 		memorg->pagesize = type->pagesize;
4599*4882a593Smuzhiyun 		mtd->writesize = memorg->pagesize;
4600*4882a593Smuzhiyun 		memorg->pages_per_eraseblock = type->erasesize /
4601*4882a593Smuzhiyun 					       type->pagesize;
4602*4882a593Smuzhiyun 		mtd->erasesize = type->erasesize;
4603*4882a593Smuzhiyun 		memorg->oobsize = type->oobsize;
4604*4882a593Smuzhiyun 		mtd->oobsize = memorg->oobsize;
4605*4882a593Smuzhiyun 
4606*4882a593Smuzhiyun 		memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4607*4882a593Smuzhiyun 		memorg->eraseblocks_per_lun =
4608*4882a593Smuzhiyun 			DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4609*4882a593Smuzhiyun 					   memorg->pagesize *
4610*4882a593Smuzhiyun 					   memorg->pages_per_eraseblock);
4611*4882a593Smuzhiyun 		chip->options |= type->options;
4612*4882a593Smuzhiyun 		requirements.strength = NAND_ECC_STRENGTH(type);
4613*4882a593Smuzhiyun 		requirements.step_size = NAND_ECC_STEP(type);
4614*4882a593Smuzhiyun 		nanddev_set_ecc_requirements(base, &requirements);
4615*4882a593Smuzhiyun 
4616*4882a593Smuzhiyun 		chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4617*4882a593Smuzhiyun 		if (!chip->parameters.model)
4618*4882a593Smuzhiyun 			return false;
4619*4882a593Smuzhiyun 
4620*4882a593Smuzhiyun 		return true;
4621*4882a593Smuzhiyun 	}
4622*4882a593Smuzhiyun 	return false;
4623*4882a593Smuzhiyun }
4624*4882a593Smuzhiyun 
4625*4882a593Smuzhiyun /*
4626*4882a593Smuzhiyun  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4627*4882a593Smuzhiyun  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4628*4882a593Smuzhiyun  * table.
4629*4882a593Smuzhiyun  */
nand_manufacturer_detect(struct nand_chip * chip)4630*4882a593Smuzhiyun static void nand_manufacturer_detect(struct nand_chip *chip)
4631*4882a593Smuzhiyun {
4632*4882a593Smuzhiyun 	/*
4633*4882a593Smuzhiyun 	 * Try manufacturer detection if available and use
4634*4882a593Smuzhiyun 	 * nand_decode_ext_id() otherwise.
4635*4882a593Smuzhiyun 	 */
4636*4882a593Smuzhiyun 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4637*4882a593Smuzhiyun 	    chip->manufacturer.desc->ops->detect) {
4638*4882a593Smuzhiyun 		struct nand_memory_organization *memorg;
4639*4882a593Smuzhiyun 
4640*4882a593Smuzhiyun 		memorg = nanddev_get_memorg(&chip->base);
4641*4882a593Smuzhiyun 
4642*4882a593Smuzhiyun 		/* The 3rd id byte holds MLC / multichip data */
4643*4882a593Smuzhiyun 		memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4644*4882a593Smuzhiyun 		chip->manufacturer.desc->ops->detect(chip);
4645*4882a593Smuzhiyun 	} else {
4646*4882a593Smuzhiyun 		nand_decode_ext_id(chip);
4647*4882a593Smuzhiyun 	}
4648*4882a593Smuzhiyun }
4649*4882a593Smuzhiyun 
4650*4882a593Smuzhiyun /*
4651*4882a593Smuzhiyun  * Manufacturer initialization. This function is called for all NANDs including
4652*4882a593Smuzhiyun  * ONFI and JEDEC compliant ones.
4653*4882a593Smuzhiyun  * Manufacturer drivers should put all their specific initialization code in
4654*4882a593Smuzhiyun  * their ->init() hook.
4655*4882a593Smuzhiyun  */
nand_manufacturer_init(struct nand_chip * chip)4656*4882a593Smuzhiyun static int nand_manufacturer_init(struct nand_chip *chip)
4657*4882a593Smuzhiyun {
4658*4882a593Smuzhiyun 	if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4659*4882a593Smuzhiyun 	    !chip->manufacturer.desc->ops->init)
4660*4882a593Smuzhiyun 		return 0;
4661*4882a593Smuzhiyun 
4662*4882a593Smuzhiyun 	return chip->manufacturer.desc->ops->init(chip);
4663*4882a593Smuzhiyun }
4664*4882a593Smuzhiyun 
4665*4882a593Smuzhiyun /*
4666*4882a593Smuzhiyun  * Manufacturer cleanup. This function is called for all NANDs including
4667*4882a593Smuzhiyun  * ONFI and JEDEC compliant ones.
4668*4882a593Smuzhiyun  * Manufacturer drivers should put all their specific cleanup code in their
4669*4882a593Smuzhiyun  * ->cleanup() hook.
4670*4882a593Smuzhiyun  */
nand_manufacturer_cleanup(struct nand_chip * chip)4671*4882a593Smuzhiyun static void nand_manufacturer_cleanup(struct nand_chip *chip)
4672*4882a593Smuzhiyun {
4673*4882a593Smuzhiyun 	/* Release manufacturer private data */
4674*4882a593Smuzhiyun 	if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4675*4882a593Smuzhiyun 	    chip->manufacturer.desc->ops->cleanup)
4676*4882a593Smuzhiyun 		chip->manufacturer.desc->ops->cleanup(chip);
4677*4882a593Smuzhiyun }
4678*4882a593Smuzhiyun 
4679*4882a593Smuzhiyun static const char *
nand_manufacturer_name(const struct nand_manufacturer_desc * manufacturer_desc)4680*4882a593Smuzhiyun nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4681*4882a593Smuzhiyun {
4682*4882a593Smuzhiyun 	return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4683*4882a593Smuzhiyun }
4684*4882a593Smuzhiyun 
4685*4882a593Smuzhiyun /*
4686*4882a593Smuzhiyun  * Get the flash and manufacturer id and lookup if the type is supported.
4687*4882a593Smuzhiyun  */
nand_detect(struct nand_chip * chip,struct nand_flash_dev * type)4688*4882a593Smuzhiyun static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4689*4882a593Smuzhiyun {
4690*4882a593Smuzhiyun 	const struct nand_manufacturer_desc *manufacturer_desc;
4691*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4692*4882a593Smuzhiyun 	struct nand_memory_organization *memorg;
4693*4882a593Smuzhiyun 	int busw, ret;
4694*4882a593Smuzhiyun 	u8 *id_data = chip->id.data;
4695*4882a593Smuzhiyun 	u8 maf_id, dev_id;
4696*4882a593Smuzhiyun 	u64 targetsize;
4697*4882a593Smuzhiyun 
4698*4882a593Smuzhiyun 	/*
4699*4882a593Smuzhiyun 	 * Let's start by initializing memorg fields that might be left
4700*4882a593Smuzhiyun 	 * unassigned by the ID-based detection logic.
4701*4882a593Smuzhiyun 	 */
4702*4882a593Smuzhiyun 	memorg = nanddev_get_memorg(&chip->base);
4703*4882a593Smuzhiyun 	memorg->planes_per_lun = 1;
4704*4882a593Smuzhiyun 	memorg->luns_per_target = 1;
4705*4882a593Smuzhiyun 
4706*4882a593Smuzhiyun 	/*
4707*4882a593Smuzhiyun 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4708*4882a593Smuzhiyun 	 * after power-up.
4709*4882a593Smuzhiyun 	 */
4710*4882a593Smuzhiyun 	ret = nand_reset(chip, 0);
4711*4882a593Smuzhiyun 	if (ret)
4712*4882a593Smuzhiyun 		return ret;
4713*4882a593Smuzhiyun 
4714*4882a593Smuzhiyun 	/* Select the device */
4715*4882a593Smuzhiyun 	nand_select_target(chip, 0);
4716*4882a593Smuzhiyun 
4717*4882a593Smuzhiyun 	/* Send the command for reading device ID */
4718*4882a593Smuzhiyun 	ret = nand_readid_op(chip, 0, id_data, 2);
4719*4882a593Smuzhiyun 	if (ret)
4720*4882a593Smuzhiyun 		return ret;
4721*4882a593Smuzhiyun 
4722*4882a593Smuzhiyun 	/* Read manufacturer and device IDs */
4723*4882a593Smuzhiyun 	maf_id = id_data[0];
4724*4882a593Smuzhiyun 	dev_id = id_data[1];
4725*4882a593Smuzhiyun 
4726*4882a593Smuzhiyun 	/*
4727*4882a593Smuzhiyun 	 * Try again to make sure, as some systems the bus-hold or other
4728*4882a593Smuzhiyun 	 * interface concerns can cause random data which looks like a
4729*4882a593Smuzhiyun 	 * possibly credible NAND flash to appear. If the two results do
4730*4882a593Smuzhiyun 	 * not match, ignore the device completely.
4731*4882a593Smuzhiyun 	 */
4732*4882a593Smuzhiyun 
4733*4882a593Smuzhiyun 	/* Read entire ID string */
4734*4882a593Smuzhiyun 	ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4735*4882a593Smuzhiyun 	if (ret)
4736*4882a593Smuzhiyun 		return ret;
4737*4882a593Smuzhiyun 
4738*4882a593Smuzhiyun 	if (id_data[0] != maf_id || id_data[1] != dev_id) {
4739*4882a593Smuzhiyun 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4740*4882a593Smuzhiyun 			maf_id, dev_id, id_data[0], id_data[1]);
4741*4882a593Smuzhiyun 		return -ENODEV;
4742*4882a593Smuzhiyun 	}
4743*4882a593Smuzhiyun 
4744*4882a593Smuzhiyun 	chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun 	/* Try to identify manufacturer */
4747*4882a593Smuzhiyun 	manufacturer_desc = nand_get_manufacturer_desc(maf_id);
4748*4882a593Smuzhiyun 	chip->manufacturer.desc = manufacturer_desc;
4749*4882a593Smuzhiyun 
4750*4882a593Smuzhiyun 	if (!type)
4751*4882a593Smuzhiyun 		type = nand_flash_ids;
4752*4882a593Smuzhiyun 
4753*4882a593Smuzhiyun 	/*
4754*4882a593Smuzhiyun 	 * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4755*4882a593Smuzhiyun 	 * override it.
4756*4882a593Smuzhiyun 	 * This is required to make sure initial NAND bus width set by the
4757*4882a593Smuzhiyun 	 * NAND controller driver is coherent with the real NAND bus width
4758*4882a593Smuzhiyun 	 * (extracted by auto-detection code).
4759*4882a593Smuzhiyun 	 */
4760*4882a593Smuzhiyun 	busw = chip->options & NAND_BUSWIDTH_16;
4761*4882a593Smuzhiyun 
4762*4882a593Smuzhiyun 	/*
4763*4882a593Smuzhiyun 	 * The flag is only set (never cleared), reset it to its default value
4764*4882a593Smuzhiyun 	 * before starting auto-detection.
4765*4882a593Smuzhiyun 	 */
4766*4882a593Smuzhiyun 	chip->options &= ~NAND_BUSWIDTH_16;
4767*4882a593Smuzhiyun 
4768*4882a593Smuzhiyun 	for (; type->name != NULL; type++) {
4769*4882a593Smuzhiyun 		if (is_full_id_nand(type)) {
4770*4882a593Smuzhiyun 			if (find_full_id_nand(chip, type))
4771*4882a593Smuzhiyun 				goto ident_done;
4772*4882a593Smuzhiyun 		} else if (dev_id == type->dev_id) {
4773*4882a593Smuzhiyun 			break;
4774*4882a593Smuzhiyun 		}
4775*4882a593Smuzhiyun 	}
4776*4882a593Smuzhiyun 
4777*4882a593Smuzhiyun 	if (!type->name || !type->pagesize) {
4778*4882a593Smuzhiyun 		/* Check if the chip is ONFI compliant */
4779*4882a593Smuzhiyun 		ret = nand_onfi_detect(chip);
4780*4882a593Smuzhiyun 		if (ret < 0)
4781*4882a593Smuzhiyun 			return ret;
4782*4882a593Smuzhiyun 		else if (ret)
4783*4882a593Smuzhiyun 			goto ident_done;
4784*4882a593Smuzhiyun 
4785*4882a593Smuzhiyun 		/* Check if the chip is JEDEC compliant */
4786*4882a593Smuzhiyun 		ret = nand_jedec_detect(chip);
4787*4882a593Smuzhiyun 		if (ret < 0)
4788*4882a593Smuzhiyun 			return ret;
4789*4882a593Smuzhiyun 		else if (ret)
4790*4882a593Smuzhiyun 			goto ident_done;
4791*4882a593Smuzhiyun 	}
4792*4882a593Smuzhiyun 
4793*4882a593Smuzhiyun 	if (!type->name)
4794*4882a593Smuzhiyun 		return -ENODEV;
4795*4882a593Smuzhiyun 
4796*4882a593Smuzhiyun 	chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4797*4882a593Smuzhiyun 	if (!chip->parameters.model)
4798*4882a593Smuzhiyun 		return -ENOMEM;
4799*4882a593Smuzhiyun 
4800*4882a593Smuzhiyun 	if (!type->pagesize)
4801*4882a593Smuzhiyun 		nand_manufacturer_detect(chip);
4802*4882a593Smuzhiyun 	else
4803*4882a593Smuzhiyun 		nand_decode_id(chip, type);
4804*4882a593Smuzhiyun 
4805*4882a593Smuzhiyun 	/* Get chip options */
4806*4882a593Smuzhiyun 	chip->options |= type->options;
4807*4882a593Smuzhiyun 
4808*4882a593Smuzhiyun 	memorg->eraseblocks_per_lun =
4809*4882a593Smuzhiyun 			DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4810*4882a593Smuzhiyun 					   memorg->pagesize *
4811*4882a593Smuzhiyun 					   memorg->pages_per_eraseblock);
4812*4882a593Smuzhiyun 
4813*4882a593Smuzhiyun ident_done:
4814*4882a593Smuzhiyun 	if (!mtd->name)
4815*4882a593Smuzhiyun 		mtd->name = chip->parameters.model;
4816*4882a593Smuzhiyun 
4817*4882a593Smuzhiyun 	if (chip->options & NAND_BUSWIDTH_AUTO) {
4818*4882a593Smuzhiyun 		WARN_ON(busw & NAND_BUSWIDTH_16);
4819*4882a593Smuzhiyun 		nand_set_defaults(chip);
4820*4882a593Smuzhiyun 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4821*4882a593Smuzhiyun 		/*
4822*4882a593Smuzhiyun 		 * Check, if buswidth is correct. Hardware drivers should set
4823*4882a593Smuzhiyun 		 * chip correct!
4824*4882a593Smuzhiyun 		 */
4825*4882a593Smuzhiyun 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4826*4882a593Smuzhiyun 			maf_id, dev_id);
4827*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4828*4882a593Smuzhiyun 			mtd->name);
4829*4882a593Smuzhiyun 		pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4830*4882a593Smuzhiyun 			(chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4831*4882a593Smuzhiyun 		ret = -EINVAL;
4832*4882a593Smuzhiyun 
4833*4882a593Smuzhiyun 		goto free_detect_allocation;
4834*4882a593Smuzhiyun 	}
4835*4882a593Smuzhiyun 
4836*4882a593Smuzhiyun 	nand_decode_bbm_options(chip);
4837*4882a593Smuzhiyun 
4838*4882a593Smuzhiyun 	/* Calculate the address shift from the page size */
4839*4882a593Smuzhiyun 	chip->page_shift = ffs(mtd->writesize) - 1;
4840*4882a593Smuzhiyun 	/* Convert chipsize to number of pages per chip -1 */
4841*4882a593Smuzhiyun 	targetsize = nanddev_target_size(&chip->base);
4842*4882a593Smuzhiyun 	chip->pagemask = (targetsize >> chip->page_shift) - 1;
4843*4882a593Smuzhiyun 
4844*4882a593Smuzhiyun 	chip->bbt_erase_shift = chip->phys_erase_shift =
4845*4882a593Smuzhiyun 		ffs(mtd->erasesize) - 1;
4846*4882a593Smuzhiyun 	if (targetsize & 0xffffffff)
4847*4882a593Smuzhiyun 		chip->chip_shift = ffs((unsigned)targetsize) - 1;
4848*4882a593Smuzhiyun 	else {
4849*4882a593Smuzhiyun 		chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4850*4882a593Smuzhiyun 		chip->chip_shift += 32 - 1;
4851*4882a593Smuzhiyun 	}
4852*4882a593Smuzhiyun 
4853*4882a593Smuzhiyun 	if (chip->chip_shift - chip->page_shift > 16)
4854*4882a593Smuzhiyun 		chip->options |= NAND_ROW_ADDR_3;
4855*4882a593Smuzhiyun 
4856*4882a593Smuzhiyun 	chip->badblockbits = 8;
4857*4882a593Smuzhiyun 
4858*4882a593Smuzhiyun 	nand_legacy_adjust_cmdfunc(chip);
4859*4882a593Smuzhiyun 
4860*4882a593Smuzhiyun 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4861*4882a593Smuzhiyun 		maf_id, dev_id);
4862*4882a593Smuzhiyun 	pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4863*4882a593Smuzhiyun 		chip->parameters.model);
4864*4882a593Smuzhiyun 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4865*4882a593Smuzhiyun 		(int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4866*4882a593Smuzhiyun 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4867*4882a593Smuzhiyun 	return 0;
4868*4882a593Smuzhiyun 
4869*4882a593Smuzhiyun free_detect_allocation:
4870*4882a593Smuzhiyun 	kfree(chip->parameters.model);
4871*4882a593Smuzhiyun 
4872*4882a593Smuzhiyun 	return ret;
4873*4882a593Smuzhiyun }
4874*4882a593Smuzhiyun 
4875*4882a593Smuzhiyun static enum nand_ecc_engine_type
of_get_rawnand_ecc_engine_type_legacy(struct device_node * np)4876*4882a593Smuzhiyun of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
4877*4882a593Smuzhiyun {
4878*4882a593Smuzhiyun 	enum nand_ecc_legacy_mode {
4879*4882a593Smuzhiyun 		NAND_ECC_INVALID,
4880*4882a593Smuzhiyun 		NAND_ECC_NONE,
4881*4882a593Smuzhiyun 		NAND_ECC_SOFT,
4882*4882a593Smuzhiyun 		NAND_ECC_SOFT_BCH,
4883*4882a593Smuzhiyun 		NAND_ECC_HW,
4884*4882a593Smuzhiyun 		NAND_ECC_HW_SYNDROME,
4885*4882a593Smuzhiyun 		NAND_ECC_ON_DIE,
4886*4882a593Smuzhiyun 	};
4887*4882a593Smuzhiyun 	const char * const nand_ecc_legacy_modes[] = {
4888*4882a593Smuzhiyun 		[NAND_ECC_NONE]		= "none",
4889*4882a593Smuzhiyun 		[NAND_ECC_SOFT]		= "soft",
4890*4882a593Smuzhiyun 		[NAND_ECC_SOFT_BCH]	= "soft_bch",
4891*4882a593Smuzhiyun 		[NAND_ECC_HW]		= "hw",
4892*4882a593Smuzhiyun 		[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
4893*4882a593Smuzhiyun 		[NAND_ECC_ON_DIE]	= "on-die",
4894*4882a593Smuzhiyun 	};
4895*4882a593Smuzhiyun 	enum nand_ecc_legacy_mode eng_type;
4896*4882a593Smuzhiyun 	const char *pm;
4897*4882a593Smuzhiyun 	int err;
4898*4882a593Smuzhiyun 
4899*4882a593Smuzhiyun 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4900*4882a593Smuzhiyun 	if (err)
4901*4882a593Smuzhiyun 		return NAND_ECC_ENGINE_TYPE_INVALID;
4902*4882a593Smuzhiyun 
4903*4882a593Smuzhiyun 	for (eng_type = NAND_ECC_NONE;
4904*4882a593Smuzhiyun 	     eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
4905*4882a593Smuzhiyun 		if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
4906*4882a593Smuzhiyun 			switch (eng_type) {
4907*4882a593Smuzhiyun 			case NAND_ECC_NONE:
4908*4882a593Smuzhiyun 				return NAND_ECC_ENGINE_TYPE_NONE;
4909*4882a593Smuzhiyun 			case NAND_ECC_SOFT:
4910*4882a593Smuzhiyun 			case NAND_ECC_SOFT_BCH:
4911*4882a593Smuzhiyun 				return NAND_ECC_ENGINE_TYPE_SOFT;
4912*4882a593Smuzhiyun 			case NAND_ECC_HW:
4913*4882a593Smuzhiyun 			case NAND_ECC_HW_SYNDROME:
4914*4882a593Smuzhiyun 				return NAND_ECC_ENGINE_TYPE_ON_HOST;
4915*4882a593Smuzhiyun 			case NAND_ECC_ON_DIE:
4916*4882a593Smuzhiyun 				return NAND_ECC_ENGINE_TYPE_ON_DIE;
4917*4882a593Smuzhiyun 			default:
4918*4882a593Smuzhiyun 				break;
4919*4882a593Smuzhiyun 			}
4920*4882a593Smuzhiyun 		}
4921*4882a593Smuzhiyun 	}
4922*4882a593Smuzhiyun 
4923*4882a593Smuzhiyun 	return NAND_ECC_ENGINE_TYPE_INVALID;
4924*4882a593Smuzhiyun }
4925*4882a593Smuzhiyun 
4926*4882a593Smuzhiyun static enum nand_ecc_placement
of_get_rawnand_ecc_placement_legacy(struct device_node * np)4927*4882a593Smuzhiyun of_get_rawnand_ecc_placement_legacy(struct device_node *np)
4928*4882a593Smuzhiyun {
4929*4882a593Smuzhiyun 	const char *pm;
4930*4882a593Smuzhiyun 	int err;
4931*4882a593Smuzhiyun 
4932*4882a593Smuzhiyun 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4933*4882a593Smuzhiyun 	if (!err) {
4934*4882a593Smuzhiyun 		if (!strcasecmp(pm, "hw_syndrome"))
4935*4882a593Smuzhiyun 			return NAND_ECC_PLACEMENT_INTERLEAVED;
4936*4882a593Smuzhiyun 	}
4937*4882a593Smuzhiyun 
4938*4882a593Smuzhiyun 	return NAND_ECC_PLACEMENT_UNKNOWN;
4939*4882a593Smuzhiyun }
4940*4882a593Smuzhiyun 
of_get_rawnand_ecc_algo_legacy(struct device_node * np)4941*4882a593Smuzhiyun static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
4942*4882a593Smuzhiyun {
4943*4882a593Smuzhiyun 	const char *pm;
4944*4882a593Smuzhiyun 	int err;
4945*4882a593Smuzhiyun 
4946*4882a593Smuzhiyun 	err = of_property_read_string(np, "nand-ecc-mode", &pm);
4947*4882a593Smuzhiyun 	if (!err) {
4948*4882a593Smuzhiyun 		if (!strcasecmp(pm, "soft"))
4949*4882a593Smuzhiyun 			return NAND_ECC_ALGO_HAMMING;
4950*4882a593Smuzhiyun 		else if (!strcasecmp(pm, "soft_bch"))
4951*4882a593Smuzhiyun 			return NAND_ECC_ALGO_BCH;
4952*4882a593Smuzhiyun 	}
4953*4882a593Smuzhiyun 
4954*4882a593Smuzhiyun 	return NAND_ECC_ALGO_UNKNOWN;
4955*4882a593Smuzhiyun }
4956*4882a593Smuzhiyun 
of_get_nand_ecc_legacy_user_config(struct nand_chip * chip)4957*4882a593Smuzhiyun static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
4958*4882a593Smuzhiyun {
4959*4882a593Smuzhiyun 	struct device_node *dn = nand_get_flash_node(chip);
4960*4882a593Smuzhiyun 	struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
4961*4882a593Smuzhiyun 
4962*4882a593Smuzhiyun 	if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
4963*4882a593Smuzhiyun 		user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
4964*4882a593Smuzhiyun 
4965*4882a593Smuzhiyun 	if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
4966*4882a593Smuzhiyun 		user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
4967*4882a593Smuzhiyun 
4968*4882a593Smuzhiyun 	if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
4969*4882a593Smuzhiyun 		user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
4970*4882a593Smuzhiyun }
4971*4882a593Smuzhiyun 
of_get_nand_bus_width(struct device_node * np)4972*4882a593Smuzhiyun static int of_get_nand_bus_width(struct device_node *np)
4973*4882a593Smuzhiyun {
4974*4882a593Smuzhiyun 	u32 val;
4975*4882a593Smuzhiyun 
4976*4882a593Smuzhiyun 	if (of_property_read_u32(np, "nand-bus-width", &val))
4977*4882a593Smuzhiyun 		return 8;
4978*4882a593Smuzhiyun 
4979*4882a593Smuzhiyun 	switch (val) {
4980*4882a593Smuzhiyun 	case 8:
4981*4882a593Smuzhiyun 	case 16:
4982*4882a593Smuzhiyun 		return val;
4983*4882a593Smuzhiyun 	default:
4984*4882a593Smuzhiyun 		return -EIO;
4985*4882a593Smuzhiyun 	}
4986*4882a593Smuzhiyun }
4987*4882a593Smuzhiyun 
of_get_nand_on_flash_bbt(struct device_node * np)4988*4882a593Smuzhiyun static bool of_get_nand_on_flash_bbt(struct device_node *np)
4989*4882a593Smuzhiyun {
4990*4882a593Smuzhiyun 	return of_property_read_bool(np, "nand-on-flash-bbt");
4991*4882a593Smuzhiyun }
4992*4882a593Smuzhiyun 
rawnand_dt_init(struct nand_chip * chip)4993*4882a593Smuzhiyun static int rawnand_dt_init(struct nand_chip *chip)
4994*4882a593Smuzhiyun {
4995*4882a593Smuzhiyun 	struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
4996*4882a593Smuzhiyun 	struct device_node *dn = nand_get_flash_node(chip);
4997*4882a593Smuzhiyun 
4998*4882a593Smuzhiyun 	if (!dn)
4999*4882a593Smuzhiyun 		return 0;
5000*4882a593Smuzhiyun 
5001*4882a593Smuzhiyun 	if (of_get_nand_bus_width(dn) == 16)
5002*4882a593Smuzhiyun 		chip->options |= NAND_BUSWIDTH_16;
5003*4882a593Smuzhiyun 
5004*4882a593Smuzhiyun 	if (of_property_read_bool(dn, "nand-is-boot-medium"))
5005*4882a593Smuzhiyun 		chip->options |= NAND_IS_BOOT_MEDIUM;
5006*4882a593Smuzhiyun 
5007*4882a593Smuzhiyun 	if (of_get_nand_on_flash_bbt(dn))
5008*4882a593Smuzhiyun 		chip->bbt_options |= NAND_BBT_USE_FLASH;
5009*4882a593Smuzhiyun 
5010*4882a593Smuzhiyun 	of_get_nand_ecc_user_config(nand);
5011*4882a593Smuzhiyun 	of_get_nand_ecc_legacy_user_config(chip);
5012*4882a593Smuzhiyun 
5013*4882a593Smuzhiyun 	/*
5014*4882a593Smuzhiyun 	 * If neither the user nor the NAND controller have requested a specific
5015*4882a593Smuzhiyun 	 * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5016*4882a593Smuzhiyun 	 */
5017*4882a593Smuzhiyun 	nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5018*4882a593Smuzhiyun 
5019*4882a593Smuzhiyun 	/*
5020*4882a593Smuzhiyun 	 * Use the user requested engine type, unless there is none, in this
5021*4882a593Smuzhiyun 	 * case default to the NAND controller choice, otherwise fallback to
5022*4882a593Smuzhiyun 	 * the raw NAND default one.
5023*4882a593Smuzhiyun 	 */
5024*4882a593Smuzhiyun 	if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5025*4882a593Smuzhiyun 		chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5026*4882a593Smuzhiyun 	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5027*4882a593Smuzhiyun 		chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5028*4882a593Smuzhiyun 
5029*4882a593Smuzhiyun 	chip->ecc.placement = nand->ecc.user_conf.placement;
5030*4882a593Smuzhiyun 	chip->ecc.algo = nand->ecc.user_conf.algo;
5031*4882a593Smuzhiyun 	chip->ecc.strength = nand->ecc.user_conf.strength;
5032*4882a593Smuzhiyun 	chip->ecc.size = nand->ecc.user_conf.step_size;
5033*4882a593Smuzhiyun 
5034*4882a593Smuzhiyun 	return 0;
5035*4882a593Smuzhiyun }
5036*4882a593Smuzhiyun 
5037*4882a593Smuzhiyun /**
5038*4882a593Smuzhiyun  * nand_scan_ident - Scan for the NAND device
5039*4882a593Smuzhiyun  * @chip: NAND chip object
5040*4882a593Smuzhiyun  * @maxchips: number of chips to scan for
5041*4882a593Smuzhiyun  * @table: alternative NAND ID table
5042*4882a593Smuzhiyun  *
5043*4882a593Smuzhiyun  * This is the first phase of the normal nand_scan() function. It reads the
5044*4882a593Smuzhiyun  * flash ID and sets up MTD fields accordingly.
5045*4882a593Smuzhiyun  *
5046*4882a593Smuzhiyun  * This helper used to be called directly from controller drivers that needed
5047*4882a593Smuzhiyun  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5048*4882a593Smuzhiyun  * prevented dynamic allocations during this phase which was unconvenient and
5049*4882a593Smuzhiyun  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5050*4882a593Smuzhiyun  */
nand_scan_ident(struct nand_chip * chip,unsigned int maxchips,struct nand_flash_dev * table)5051*4882a593Smuzhiyun static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5052*4882a593Smuzhiyun 			   struct nand_flash_dev *table)
5053*4882a593Smuzhiyun {
5054*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5055*4882a593Smuzhiyun 	struct nand_memory_organization *memorg;
5056*4882a593Smuzhiyun 	int nand_maf_id, nand_dev_id;
5057*4882a593Smuzhiyun 	unsigned int i;
5058*4882a593Smuzhiyun 	int ret;
5059*4882a593Smuzhiyun 
5060*4882a593Smuzhiyun 	memorg = nanddev_get_memorg(&chip->base);
5061*4882a593Smuzhiyun 
5062*4882a593Smuzhiyun 	/* Assume all dies are deselected when we enter nand_scan_ident(). */
5063*4882a593Smuzhiyun 	chip->cur_cs = -1;
5064*4882a593Smuzhiyun 
5065*4882a593Smuzhiyun 	mutex_init(&chip->lock);
5066*4882a593Smuzhiyun 	init_waitqueue_head(&chip->resume_wq);
5067*4882a593Smuzhiyun 
5068*4882a593Smuzhiyun 	/* Enforce the right timings for reset/detection */
5069*4882a593Smuzhiyun 	chip->current_interface_config = nand_get_reset_interface_config();
5070*4882a593Smuzhiyun 
5071*4882a593Smuzhiyun 	ret = rawnand_dt_init(chip);
5072*4882a593Smuzhiyun 	if (ret)
5073*4882a593Smuzhiyun 		return ret;
5074*4882a593Smuzhiyun 
5075*4882a593Smuzhiyun 	if (!mtd->name && mtd->dev.parent)
5076*4882a593Smuzhiyun 		mtd->name = dev_name(mtd->dev.parent);
5077*4882a593Smuzhiyun 
5078*4882a593Smuzhiyun 	/* Set the default functions */
5079*4882a593Smuzhiyun 	nand_set_defaults(chip);
5080*4882a593Smuzhiyun 
5081*4882a593Smuzhiyun 	ret = nand_legacy_check_hooks(chip);
5082*4882a593Smuzhiyun 	if (ret)
5083*4882a593Smuzhiyun 		return ret;
5084*4882a593Smuzhiyun 
5085*4882a593Smuzhiyun 	memorg->ntargets = maxchips;
5086*4882a593Smuzhiyun 
5087*4882a593Smuzhiyun 	/* Read the flash type */
5088*4882a593Smuzhiyun 	ret = nand_detect(chip, table);
5089*4882a593Smuzhiyun 	if (ret) {
5090*4882a593Smuzhiyun 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5091*4882a593Smuzhiyun 			pr_warn("No NAND device found\n");
5092*4882a593Smuzhiyun 		nand_deselect_target(chip);
5093*4882a593Smuzhiyun 		return ret;
5094*4882a593Smuzhiyun 	}
5095*4882a593Smuzhiyun 
5096*4882a593Smuzhiyun 	nand_maf_id = chip->id.data[0];
5097*4882a593Smuzhiyun 	nand_dev_id = chip->id.data[1];
5098*4882a593Smuzhiyun 
5099*4882a593Smuzhiyun 	nand_deselect_target(chip);
5100*4882a593Smuzhiyun 
5101*4882a593Smuzhiyun 	/* Check for a chip array */
5102*4882a593Smuzhiyun 	for (i = 1; i < maxchips; i++) {
5103*4882a593Smuzhiyun 		u8 id[2];
5104*4882a593Smuzhiyun 
5105*4882a593Smuzhiyun 		/* See comment in nand_get_flash_type for reset */
5106*4882a593Smuzhiyun 		ret = nand_reset(chip, i);
5107*4882a593Smuzhiyun 		if (ret)
5108*4882a593Smuzhiyun 			break;
5109*4882a593Smuzhiyun 
5110*4882a593Smuzhiyun 		nand_select_target(chip, i);
5111*4882a593Smuzhiyun 		/* Send the command for reading device ID */
5112*4882a593Smuzhiyun 		ret = nand_readid_op(chip, 0, id, sizeof(id));
5113*4882a593Smuzhiyun 		if (ret)
5114*4882a593Smuzhiyun 			break;
5115*4882a593Smuzhiyun 		/* Read manufacturer and device IDs */
5116*4882a593Smuzhiyun 		if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5117*4882a593Smuzhiyun 			nand_deselect_target(chip);
5118*4882a593Smuzhiyun 			break;
5119*4882a593Smuzhiyun 		}
5120*4882a593Smuzhiyun 		nand_deselect_target(chip);
5121*4882a593Smuzhiyun 	}
5122*4882a593Smuzhiyun 	if (i > 1)
5123*4882a593Smuzhiyun 		pr_info("%d chips detected\n", i);
5124*4882a593Smuzhiyun 
5125*4882a593Smuzhiyun 	/* Store the number of chips and calc total size for mtd */
5126*4882a593Smuzhiyun 	memorg->ntargets = i;
5127*4882a593Smuzhiyun 	mtd->size = i * nanddev_target_size(&chip->base);
5128*4882a593Smuzhiyun 
5129*4882a593Smuzhiyun 	return 0;
5130*4882a593Smuzhiyun }
5131*4882a593Smuzhiyun 
nand_scan_ident_cleanup(struct nand_chip * chip)5132*4882a593Smuzhiyun static void nand_scan_ident_cleanup(struct nand_chip *chip)
5133*4882a593Smuzhiyun {
5134*4882a593Smuzhiyun 	kfree(chip->parameters.model);
5135*4882a593Smuzhiyun 	kfree(chip->parameters.onfi);
5136*4882a593Smuzhiyun }
5137*4882a593Smuzhiyun 
nand_set_ecc_on_host_ops(struct nand_chip * chip)5138*4882a593Smuzhiyun static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5139*4882a593Smuzhiyun {
5140*4882a593Smuzhiyun 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5141*4882a593Smuzhiyun 
5142*4882a593Smuzhiyun 	switch (ecc->placement) {
5143*4882a593Smuzhiyun 	case NAND_ECC_PLACEMENT_UNKNOWN:
5144*4882a593Smuzhiyun 	case NAND_ECC_PLACEMENT_OOB:
5145*4882a593Smuzhiyun 		/* Use standard hwecc read page function? */
5146*4882a593Smuzhiyun 		if (!ecc->read_page)
5147*4882a593Smuzhiyun 			ecc->read_page = nand_read_page_hwecc;
5148*4882a593Smuzhiyun 		if (!ecc->write_page)
5149*4882a593Smuzhiyun 			ecc->write_page = nand_write_page_hwecc;
5150*4882a593Smuzhiyun 		if (!ecc->read_page_raw)
5151*4882a593Smuzhiyun 			ecc->read_page_raw = nand_read_page_raw;
5152*4882a593Smuzhiyun 		if (!ecc->write_page_raw)
5153*4882a593Smuzhiyun 			ecc->write_page_raw = nand_write_page_raw;
5154*4882a593Smuzhiyun 		if (!ecc->read_oob)
5155*4882a593Smuzhiyun 			ecc->read_oob = nand_read_oob_std;
5156*4882a593Smuzhiyun 		if (!ecc->write_oob)
5157*4882a593Smuzhiyun 			ecc->write_oob = nand_write_oob_std;
5158*4882a593Smuzhiyun 		if (!ecc->read_subpage)
5159*4882a593Smuzhiyun 			ecc->read_subpage = nand_read_subpage;
5160*4882a593Smuzhiyun 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5161*4882a593Smuzhiyun 			ecc->write_subpage = nand_write_subpage_hwecc;
5162*4882a593Smuzhiyun 		fallthrough;
5163*4882a593Smuzhiyun 
5164*4882a593Smuzhiyun 	case NAND_ECC_PLACEMENT_INTERLEAVED:
5165*4882a593Smuzhiyun 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5166*4882a593Smuzhiyun 		    (!ecc->read_page ||
5167*4882a593Smuzhiyun 		     ecc->read_page == nand_read_page_hwecc ||
5168*4882a593Smuzhiyun 		     !ecc->write_page ||
5169*4882a593Smuzhiyun 		     ecc->write_page == nand_write_page_hwecc)) {
5170*4882a593Smuzhiyun 			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5171*4882a593Smuzhiyun 			return -EINVAL;
5172*4882a593Smuzhiyun 		}
5173*4882a593Smuzhiyun 		/* Use standard syndrome read/write page function? */
5174*4882a593Smuzhiyun 		if (!ecc->read_page)
5175*4882a593Smuzhiyun 			ecc->read_page = nand_read_page_syndrome;
5176*4882a593Smuzhiyun 		if (!ecc->write_page)
5177*4882a593Smuzhiyun 			ecc->write_page = nand_write_page_syndrome;
5178*4882a593Smuzhiyun 		if (!ecc->read_page_raw)
5179*4882a593Smuzhiyun 			ecc->read_page_raw = nand_read_page_raw_syndrome;
5180*4882a593Smuzhiyun 		if (!ecc->write_page_raw)
5181*4882a593Smuzhiyun 			ecc->write_page_raw = nand_write_page_raw_syndrome;
5182*4882a593Smuzhiyun 		if (!ecc->read_oob)
5183*4882a593Smuzhiyun 			ecc->read_oob = nand_read_oob_syndrome;
5184*4882a593Smuzhiyun 		if (!ecc->write_oob)
5185*4882a593Smuzhiyun 			ecc->write_oob = nand_write_oob_syndrome;
5186*4882a593Smuzhiyun 		break;
5187*4882a593Smuzhiyun 
5188*4882a593Smuzhiyun 	default:
5189*4882a593Smuzhiyun 		pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5190*4882a593Smuzhiyun 			ecc->placement);
5191*4882a593Smuzhiyun 		return -EINVAL;
5192*4882a593Smuzhiyun 	}
5193*4882a593Smuzhiyun 
5194*4882a593Smuzhiyun 	return 0;
5195*4882a593Smuzhiyun }
5196*4882a593Smuzhiyun 
nand_set_ecc_soft_ops(struct nand_chip * chip)5197*4882a593Smuzhiyun static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5198*4882a593Smuzhiyun {
5199*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5200*4882a593Smuzhiyun 	struct nand_device *nanddev = mtd_to_nanddev(mtd);
5201*4882a593Smuzhiyun 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5202*4882a593Smuzhiyun 
5203*4882a593Smuzhiyun 	if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5204*4882a593Smuzhiyun 		return -EINVAL;
5205*4882a593Smuzhiyun 
5206*4882a593Smuzhiyun 	switch (ecc->algo) {
5207*4882a593Smuzhiyun 	case NAND_ECC_ALGO_HAMMING:
5208*4882a593Smuzhiyun 		ecc->calculate = nand_calculate_ecc;
5209*4882a593Smuzhiyun 		ecc->correct = nand_correct_data;
5210*4882a593Smuzhiyun 		ecc->read_page = nand_read_page_swecc;
5211*4882a593Smuzhiyun 		ecc->read_subpage = nand_read_subpage;
5212*4882a593Smuzhiyun 		ecc->write_page = nand_write_page_swecc;
5213*4882a593Smuzhiyun 		if (!ecc->read_page_raw)
5214*4882a593Smuzhiyun 			ecc->read_page_raw = nand_read_page_raw;
5215*4882a593Smuzhiyun 		if (!ecc->write_page_raw)
5216*4882a593Smuzhiyun 			ecc->write_page_raw = nand_write_page_raw;
5217*4882a593Smuzhiyun 		ecc->read_oob = nand_read_oob_std;
5218*4882a593Smuzhiyun 		ecc->write_oob = nand_write_oob_std;
5219*4882a593Smuzhiyun 		if (!ecc->size)
5220*4882a593Smuzhiyun 			ecc->size = 256;
5221*4882a593Smuzhiyun 		ecc->bytes = 3;
5222*4882a593Smuzhiyun 		ecc->strength = 1;
5223*4882a593Smuzhiyun 
5224*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5225*4882a593Smuzhiyun 			ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5226*4882a593Smuzhiyun 
5227*4882a593Smuzhiyun 		return 0;
5228*4882a593Smuzhiyun 	case NAND_ECC_ALGO_BCH:
5229*4882a593Smuzhiyun 		if (!mtd_nand_has_bch()) {
5230*4882a593Smuzhiyun 			WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5231*4882a593Smuzhiyun 			return -EINVAL;
5232*4882a593Smuzhiyun 		}
5233*4882a593Smuzhiyun 		ecc->calculate = nand_bch_calculate_ecc;
5234*4882a593Smuzhiyun 		ecc->correct = nand_bch_correct_data;
5235*4882a593Smuzhiyun 		ecc->read_page = nand_read_page_swecc;
5236*4882a593Smuzhiyun 		ecc->read_subpage = nand_read_subpage;
5237*4882a593Smuzhiyun 		ecc->write_page = nand_write_page_swecc;
5238*4882a593Smuzhiyun 		if (!ecc->read_page_raw)
5239*4882a593Smuzhiyun 			ecc->read_page_raw = nand_read_page_raw;
5240*4882a593Smuzhiyun 		if (!ecc->write_page_raw)
5241*4882a593Smuzhiyun 			ecc->write_page_raw = nand_write_page_raw;
5242*4882a593Smuzhiyun 		ecc->read_oob = nand_read_oob_std;
5243*4882a593Smuzhiyun 		ecc->write_oob = nand_write_oob_std;
5244*4882a593Smuzhiyun 
5245*4882a593Smuzhiyun 		/*
5246*4882a593Smuzhiyun 		* Board driver should supply ecc.size and ecc.strength
5247*4882a593Smuzhiyun 		* values to select how many bits are correctable.
5248*4882a593Smuzhiyun 		* Otherwise, default to 4 bits for large page devices.
5249*4882a593Smuzhiyun 		*/
5250*4882a593Smuzhiyun 		if (!ecc->size && (mtd->oobsize >= 64)) {
5251*4882a593Smuzhiyun 			ecc->size = 512;
5252*4882a593Smuzhiyun 			ecc->strength = 4;
5253*4882a593Smuzhiyun 		}
5254*4882a593Smuzhiyun 
5255*4882a593Smuzhiyun 		/*
5256*4882a593Smuzhiyun 		 * if no ecc placement scheme was provided pickup the default
5257*4882a593Smuzhiyun 		 * large page one.
5258*4882a593Smuzhiyun 		 */
5259*4882a593Smuzhiyun 		if (!mtd->ooblayout) {
5260*4882a593Smuzhiyun 			/* handle large page devices only */
5261*4882a593Smuzhiyun 			if (mtd->oobsize < 64) {
5262*4882a593Smuzhiyun 				WARN(1, "OOB layout is required when using software BCH on small pages\n");
5263*4882a593Smuzhiyun 				return -EINVAL;
5264*4882a593Smuzhiyun 			}
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 			mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
5267*4882a593Smuzhiyun 
5268*4882a593Smuzhiyun 		}
5269*4882a593Smuzhiyun 
5270*4882a593Smuzhiyun 		/*
5271*4882a593Smuzhiyun 		 * We can only maximize ECC config when the default layout is
5272*4882a593Smuzhiyun 		 * used, otherwise we don't know how many bytes can really be
5273*4882a593Smuzhiyun 		 * used.
5274*4882a593Smuzhiyun 		 */
5275*4882a593Smuzhiyun 		if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
5276*4882a593Smuzhiyun 		    nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
5277*4882a593Smuzhiyun 			int steps, bytes;
5278*4882a593Smuzhiyun 
5279*4882a593Smuzhiyun 			/* Always prefer 1k blocks over 512bytes ones */
5280*4882a593Smuzhiyun 			ecc->size = 1024;
5281*4882a593Smuzhiyun 			steps = mtd->writesize / ecc->size;
5282*4882a593Smuzhiyun 
5283*4882a593Smuzhiyun 			/* Reserve 2 bytes for the BBM */
5284*4882a593Smuzhiyun 			bytes = (mtd->oobsize - 2) / steps;
5285*4882a593Smuzhiyun 			ecc->strength = bytes * 8 / fls(8 * ecc->size);
5286*4882a593Smuzhiyun 		}
5287*4882a593Smuzhiyun 
5288*4882a593Smuzhiyun 		/* See nand_bch_init() for details. */
5289*4882a593Smuzhiyun 		ecc->bytes = 0;
5290*4882a593Smuzhiyun 		ecc->priv = nand_bch_init(mtd);
5291*4882a593Smuzhiyun 		if (!ecc->priv) {
5292*4882a593Smuzhiyun 			WARN(1, "BCH ECC initialization failed!\n");
5293*4882a593Smuzhiyun 			return -EINVAL;
5294*4882a593Smuzhiyun 		}
5295*4882a593Smuzhiyun 		return 0;
5296*4882a593Smuzhiyun 	default:
5297*4882a593Smuzhiyun 		WARN(1, "Unsupported ECC algorithm!\n");
5298*4882a593Smuzhiyun 		return -EINVAL;
5299*4882a593Smuzhiyun 	}
5300*4882a593Smuzhiyun }
5301*4882a593Smuzhiyun 
5302*4882a593Smuzhiyun /**
5303*4882a593Smuzhiyun  * nand_check_ecc_caps - check the sanity of preset ECC settings
5304*4882a593Smuzhiyun  * @chip: nand chip info structure
5305*4882a593Smuzhiyun  * @caps: ECC caps info structure
5306*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
5307*4882a593Smuzhiyun  *
5308*4882a593Smuzhiyun  * When ECC step size and strength are already set, check if they are supported
5309*4882a593Smuzhiyun  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5310*4882a593Smuzhiyun  * On success, the calculated ECC bytes is set.
5311*4882a593Smuzhiyun  */
5312*4882a593Smuzhiyun static int
nand_check_ecc_caps(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5313*4882a593Smuzhiyun nand_check_ecc_caps(struct nand_chip *chip,
5314*4882a593Smuzhiyun 		    const struct nand_ecc_caps *caps, int oobavail)
5315*4882a593Smuzhiyun {
5316*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5317*4882a593Smuzhiyun 	const struct nand_ecc_step_info *stepinfo;
5318*4882a593Smuzhiyun 	int preset_step = chip->ecc.size;
5319*4882a593Smuzhiyun 	int preset_strength = chip->ecc.strength;
5320*4882a593Smuzhiyun 	int ecc_bytes, nsteps = mtd->writesize / preset_step;
5321*4882a593Smuzhiyun 	int i, j;
5322*4882a593Smuzhiyun 
5323*4882a593Smuzhiyun 	for (i = 0; i < caps->nstepinfos; i++) {
5324*4882a593Smuzhiyun 		stepinfo = &caps->stepinfos[i];
5325*4882a593Smuzhiyun 
5326*4882a593Smuzhiyun 		if (stepinfo->stepsize != preset_step)
5327*4882a593Smuzhiyun 			continue;
5328*4882a593Smuzhiyun 
5329*4882a593Smuzhiyun 		for (j = 0; j < stepinfo->nstrengths; j++) {
5330*4882a593Smuzhiyun 			if (stepinfo->strengths[j] != preset_strength)
5331*4882a593Smuzhiyun 				continue;
5332*4882a593Smuzhiyun 
5333*4882a593Smuzhiyun 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
5334*4882a593Smuzhiyun 							 preset_strength);
5335*4882a593Smuzhiyun 			if (WARN_ON_ONCE(ecc_bytes < 0))
5336*4882a593Smuzhiyun 				return ecc_bytes;
5337*4882a593Smuzhiyun 
5338*4882a593Smuzhiyun 			if (ecc_bytes * nsteps > oobavail) {
5339*4882a593Smuzhiyun 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5340*4882a593Smuzhiyun 				       preset_step, preset_strength);
5341*4882a593Smuzhiyun 				return -ENOSPC;
5342*4882a593Smuzhiyun 			}
5343*4882a593Smuzhiyun 
5344*4882a593Smuzhiyun 			chip->ecc.bytes = ecc_bytes;
5345*4882a593Smuzhiyun 
5346*4882a593Smuzhiyun 			return 0;
5347*4882a593Smuzhiyun 		}
5348*4882a593Smuzhiyun 	}
5349*4882a593Smuzhiyun 
5350*4882a593Smuzhiyun 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5351*4882a593Smuzhiyun 	       preset_step, preset_strength);
5352*4882a593Smuzhiyun 
5353*4882a593Smuzhiyun 	return -ENOTSUPP;
5354*4882a593Smuzhiyun }
5355*4882a593Smuzhiyun 
5356*4882a593Smuzhiyun /**
5357*4882a593Smuzhiyun  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5358*4882a593Smuzhiyun  * @chip: nand chip info structure
5359*4882a593Smuzhiyun  * @caps: ECC engine caps info structure
5360*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
5361*4882a593Smuzhiyun  *
5362*4882a593Smuzhiyun  * If a chip's ECC requirement is provided, try to meet it with the least
5363*4882a593Smuzhiyun  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5364*4882a593Smuzhiyun  * On success, the chosen ECC settings are set.
5365*4882a593Smuzhiyun  */
5366*4882a593Smuzhiyun static int
nand_match_ecc_req(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5367*4882a593Smuzhiyun nand_match_ecc_req(struct nand_chip *chip,
5368*4882a593Smuzhiyun 		   const struct nand_ecc_caps *caps, int oobavail)
5369*4882a593Smuzhiyun {
5370*4882a593Smuzhiyun 	const struct nand_ecc_props *requirements =
5371*4882a593Smuzhiyun 		nanddev_get_ecc_requirements(&chip->base);
5372*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5373*4882a593Smuzhiyun 	const struct nand_ecc_step_info *stepinfo;
5374*4882a593Smuzhiyun 	int req_step = requirements->step_size;
5375*4882a593Smuzhiyun 	int req_strength = requirements->strength;
5376*4882a593Smuzhiyun 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5377*4882a593Smuzhiyun 	int best_step, best_strength, best_ecc_bytes;
5378*4882a593Smuzhiyun 	int best_ecc_bytes_total = INT_MAX;
5379*4882a593Smuzhiyun 	int i, j;
5380*4882a593Smuzhiyun 
5381*4882a593Smuzhiyun 	/* No information provided by the NAND chip */
5382*4882a593Smuzhiyun 	if (!req_step || !req_strength)
5383*4882a593Smuzhiyun 		return -ENOTSUPP;
5384*4882a593Smuzhiyun 
5385*4882a593Smuzhiyun 	/* number of correctable bits the chip requires in a page */
5386*4882a593Smuzhiyun 	req_corr = mtd->writesize / req_step * req_strength;
5387*4882a593Smuzhiyun 
5388*4882a593Smuzhiyun 	for (i = 0; i < caps->nstepinfos; i++) {
5389*4882a593Smuzhiyun 		stepinfo = &caps->stepinfos[i];
5390*4882a593Smuzhiyun 		step_size = stepinfo->stepsize;
5391*4882a593Smuzhiyun 
5392*4882a593Smuzhiyun 		for (j = 0; j < stepinfo->nstrengths; j++) {
5393*4882a593Smuzhiyun 			strength = stepinfo->strengths[j];
5394*4882a593Smuzhiyun 
5395*4882a593Smuzhiyun 			/*
5396*4882a593Smuzhiyun 			 * If both step size and strength are smaller than the
5397*4882a593Smuzhiyun 			 * chip's requirement, it is not easy to compare the
5398*4882a593Smuzhiyun 			 * resulted reliability.
5399*4882a593Smuzhiyun 			 */
5400*4882a593Smuzhiyun 			if (step_size < req_step && strength < req_strength)
5401*4882a593Smuzhiyun 				continue;
5402*4882a593Smuzhiyun 
5403*4882a593Smuzhiyun 			if (mtd->writesize % step_size)
5404*4882a593Smuzhiyun 				continue;
5405*4882a593Smuzhiyun 
5406*4882a593Smuzhiyun 			nsteps = mtd->writesize / step_size;
5407*4882a593Smuzhiyun 
5408*4882a593Smuzhiyun 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5409*4882a593Smuzhiyun 			if (WARN_ON_ONCE(ecc_bytes < 0))
5410*4882a593Smuzhiyun 				continue;
5411*4882a593Smuzhiyun 			ecc_bytes_total = ecc_bytes * nsteps;
5412*4882a593Smuzhiyun 
5413*4882a593Smuzhiyun 			if (ecc_bytes_total > oobavail ||
5414*4882a593Smuzhiyun 			    strength * nsteps < req_corr)
5415*4882a593Smuzhiyun 				continue;
5416*4882a593Smuzhiyun 
5417*4882a593Smuzhiyun 			/*
5418*4882a593Smuzhiyun 			 * We assume the best is to meet the chip's requrement
5419*4882a593Smuzhiyun 			 * with the least number of ECC bytes.
5420*4882a593Smuzhiyun 			 */
5421*4882a593Smuzhiyun 			if (ecc_bytes_total < best_ecc_bytes_total) {
5422*4882a593Smuzhiyun 				best_ecc_bytes_total = ecc_bytes_total;
5423*4882a593Smuzhiyun 				best_step = step_size;
5424*4882a593Smuzhiyun 				best_strength = strength;
5425*4882a593Smuzhiyun 				best_ecc_bytes = ecc_bytes;
5426*4882a593Smuzhiyun 			}
5427*4882a593Smuzhiyun 		}
5428*4882a593Smuzhiyun 	}
5429*4882a593Smuzhiyun 
5430*4882a593Smuzhiyun 	if (best_ecc_bytes_total == INT_MAX)
5431*4882a593Smuzhiyun 		return -ENOTSUPP;
5432*4882a593Smuzhiyun 
5433*4882a593Smuzhiyun 	chip->ecc.size = best_step;
5434*4882a593Smuzhiyun 	chip->ecc.strength = best_strength;
5435*4882a593Smuzhiyun 	chip->ecc.bytes = best_ecc_bytes;
5436*4882a593Smuzhiyun 
5437*4882a593Smuzhiyun 	return 0;
5438*4882a593Smuzhiyun }
5439*4882a593Smuzhiyun 
5440*4882a593Smuzhiyun /**
5441*4882a593Smuzhiyun  * nand_maximize_ecc - choose the max ECC strength available
5442*4882a593Smuzhiyun  * @chip: nand chip info structure
5443*4882a593Smuzhiyun  * @caps: ECC engine caps info structure
5444*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
5445*4882a593Smuzhiyun  *
5446*4882a593Smuzhiyun  * Choose the max ECC strength that is supported on the controller, and can fit
5447*4882a593Smuzhiyun  * within the chip's OOB.  On success, the chosen ECC settings are set.
5448*4882a593Smuzhiyun  */
5449*4882a593Smuzhiyun static int
nand_maximize_ecc(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5450*4882a593Smuzhiyun nand_maximize_ecc(struct nand_chip *chip,
5451*4882a593Smuzhiyun 		  const struct nand_ecc_caps *caps, int oobavail)
5452*4882a593Smuzhiyun {
5453*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5454*4882a593Smuzhiyun 	const struct nand_ecc_step_info *stepinfo;
5455*4882a593Smuzhiyun 	int step_size, strength, nsteps, ecc_bytes, corr;
5456*4882a593Smuzhiyun 	int best_corr = 0;
5457*4882a593Smuzhiyun 	int best_step = 0;
5458*4882a593Smuzhiyun 	int best_strength, best_ecc_bytes;
5459*4882a593Smuzhiyun 	int i, j;
5460*4882a593Smuzhiyun 
5461*4882a593Smuzhiyun 	for (i = 0; i < caps->nstepinfos; i++) {
5462*4882a593Smuzhiyun 		stepinfo = &caps->stepinfos[i];
5463*4882a593Smuzhiyun 		step_size = stepinfo->stepsize;
5464*4882a593Smuzhiyun 
5465*4882a593Smuzhiyun 		/* If chip->ecc.size is already set, respect it */
5466*4882a593Smuzhiyun 		if (chip->ecc.size && step_size != chip->ecc.size)
5467*4882a593Smuzhiyun 			continue;
5468*4882a593Smuzhiyun 
5469*4882a593Smuzhiyun 		for (j = 0; j < stepinfo->nstrengths; j++) {
5470*4882a593Smuzhiyun 			strength = stepinfo->strengths[j];
5471*4882a593Smuzhiyun 
5472*4882a593Smuzhiyun 			if (mtd->writesize % step_size)
5473*4882a593Smuzhiyun 				continue;
5474*4882a593Smuzhiyun 
5475*4882a593Smuzhiyun 			nsteps = mtd->writesize / step_size;
5476*4882a593Smuzhiyun 
5477*4882a593Smuzhiyun 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5478*4882a593Smuzhiyun 			if (WARN_ON_ONCE(ecc_bytes < 0))
5479*4882a593Smuzhiyun 				continue;
5480*4882a593Smuzhiyun 
5481*4882a593Smuzhiyun 			if (ecc_bytes * nsteps > oobavail)
5482*4882a593Smuzhiyun 				continue;
5483*4882a593Smuzhiyun 
5484*4882a593Smuzhiyun 			corr = strength * nsteps;
5485*4882a593Smuzhiyun 
5486*4882a593Smuzhiyun 			/*
5487*4882a593Smuzhiyun 			 * If the number of correctable bits is the same,
5488*4882a593Smuzhiyun 			 * bigger step_size has more reliability.
5489*4882a593Smuzhiyun 			 */
5490*4882a593Smuzhiyun 			if (corr > best_corr ||
5491*4882a593Smuzhiyun 			    (corr == best_corr && step_size > best_step)) {
5492*4882a593Smuzhiyun 				best_corr = corr;
5493*4882a593Smuzhiyun 				best_step = step_size;
5494*4882a593Smuzhiyun 				best_strength = strength;
5495*4882a593Smuzhiyun 				best_ecc_bytes = ecc_bytes;
5496*4882a593Smuzhiyun 			}
5497*4882a593Smuzhiyun 		}
5498*4882a593Smuzhiyun 	}
5499*4882a593Smuzhiyun 
5500*4882a593Smuzhiyun 	if (!best_corr)
5501*4882a593Smuzhiyun 		return -ENOTSUPP;
5502*4882a593Smuzhiyun 
5503*4882a593Smuzhiyun 	chip->ecc.size = best_step;
5504*4882a593Smuzhiyun 	chip->ecc.strength = best_strength;
5505*4882a593Smuzhiyun 	chip->ecc.bytes = best_ecc_bytes;
5506*4882a593Smuzhiyun 
5507*4882a593Smuzhiyun 	return 0;
5508*4882a593Smuzhiyun }
5509*4882a593Smuzhiyun 
5510*4882a593Smuzhiyun /**
5511*4882a593Smuzhiyun  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5512*4882a593Smuzhiyun  * @chip: nand chip info structure
5513*4882a593Smuzhiyun  * @caps: ECC engine caps info structure
5514*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
5515*4882a593Smuzhiyun  *
5516*4882a593Smuzhiyun  * Choose the ECC configuration according to following logic.
5517*4882a593Smuzhiyun  *
5518*4882a593Smuzhiyun  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5519*4882a593Smuzhiyun  *    then check if it is supported by this controller.
5520*4882a593Smuzhiyun  * 2. If the user provided the nand-ecc-maximize property, then select maximum
5521*4882a593Smuzhiyun  *    ECC strength.
5522*4882a593Smuzhiyun  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5523*4882a593Smuzhiyun  *    to the chip's requirement. If available OOB size can't fit the chip
5524*4882a593Smuzhiyun  *    requirement then fallback to the maximum ECC step size and ECC strength.
5525*4882a593Smuzhiyun  *
5526*4882a593Smuzhiyun  * On success, the chosen ECC settings are set.
5527*4882a593Smuzhiyun  */
nand_ecc_choose_conf(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)5528*4882a593Smuzhiyun int nand_ecc_choose_conf(struct nand_chip *chip,
5529*4882a593Smuzhiyun 			 const struct nand_ecc_caps *caps, int oobavail)
5530*4882a593Smuzhiyun {
5531*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5532*4882a593Smuzhiyun 	struct nand_device *nanddev = mtd_to_nanddev(mtd);
5533*4882a593Smuzhiyun 
5534*4882a593Smuzhiyun 	if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5535*4882a593Smuzhiyun 		return -EINVAL;
5536*4882a593Smuzhiyun 
5537*4882a593Smuzhiyun 	if (chip->ecc.size && chip->ecc.strength)
5538*4882a593Smuzhiyun 		return nand_check_ecc_caps(chip, caps, oobavail);
5539*4882a593Smuzhiyun 
5540*4882a593Smuzhiyun 	if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5541*4882a593Smuzhiyun 		return nand_maximize_ecc(chip, caps, oobavail);
5542*4882a593Smuzhiyun 
5543*4882a593Smuzhiyun 	if (!nand_match_ecc_req(chip, caps, oobavail))
5544*4882a593Smuzhiyun 		return 0;
5545*4882a593Smuzhiyun 
5546*4882a593Smuzhiyun 	return nand_maximize_ecc(chip, caps, oobavail);
5547*4882a593Smuzhiyun }
5548*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5549*4882a593Smuzhiyun 
rawnand_erase(struct nand_device * nand,const struct nand_pos * pos)5550*4882a593Smuzhiyun static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5551*4882a593Smuzhiyun {
5552*4882a593Smuzhiyun 	struct nand_chip *chip = container_of(nand, struct nand_chip,
5553*4882a593Smuzhiyun 					      base);
5554*4882a593Smuzhiyun 	unsigned int eb = nanddev_pos_to_row(nand, pos);
5555*4882a593Smuzhiyun 	int ret;
5556*4882a593Smuzhiyun 
5557*4882a593Smuzhiyun 	eb >>= nand->rowconv.eraseblock_addr_shift;
5558*4882a593Smuzhiyun 
5559*4882a593Smuzhiyun 	nand_select_target(chip, pos->target);
5560*4882a593Smuzhiyun 	ret = nand_erase_op(chip, eb);
5561*4882a593Smuzhiyun 	nand_deselect_target(chip);
5562*4882a593Smuzhiyun 
5563*4882a593Smuzhiyun 	return ret;
5564*4882a593Smuzhiyun }
5565*4882a593Smuzhiyun 
rawnand_markbad(struct nand_device * nand,const struct nand_pos * pos)5566*4882a593Smuzhiyun static int rawnand_markbad(struct nand_device *nand,
5567*4882a593Smuzhiyun 			   const struct nand_pos *pos)
5568*4882a593Smuzhiyun {
5569*4882a593Smuzhiyun 	struct nand_chip *chip = container_of(nand, struct nand_chip,
5570*4882a593Smuzhiyun 					      base);
5571*4882a593Smuzhiyun 
5572*4882a593Smuzhiyun 	return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5573*4882a593Smuzhiyun }
5574*4882a593Smuzhiyun 
rawnand_isbad(struct nand_device * nand,const struct nand_pos * pos)5575*4882a593Smuzhiyun static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5576*4882a593Smuzhiyun {
5577*4882a593Smuzhiyun 	struct nand_chip *chip = container_of(nand, struct nand_chip,
5578*4882a593Smuzhiyun 					      base);
5579*4882a593Smuzhiyun 	int ret;
5580*4882a593Smuzhiyun 
5581*4882a593Smuzhiyun 	nand_select_target(chip, pos->target);
5582*4882a593Smuzhiyun 	ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5583*4882a593Smuzhiyun 	nand_deselect_target(chip);
5584*4882a593Smuzhiyun 
5585*4882a593Smuzhiyun 	return ret;
5586*4882a593Smuzhiyun }
5587*4882a593Smuzhiyun 
5588*4882a593Smuzhiyun static const struct nand_ops rawnand_ops = {
5589*4882a593Smuzhiyun 	.erase = rawnand_erase,
5590*4882a593Smuzhiyun 	.markbad = rawnand_markbad,
5591*4882a593Smuzhiyun 	.isbad = rawnand_isbad,
5592*4882a593Smuzhiyun };
5593*4882a593Smuzhiyun 
5594*4882a593Smuzhiyun /**
5595*4882a593Smuzhiyun  * nand_scan_tail - Scan for the NAND device
5596*4882a593Smuzhiyun  * @chip: NAND chip object
5597*4882a593Smuzhiyun  *
5598*4882a593Smuzhiyun  * This is the second phase of the normal nand_scan() function. It fills out
5599*4882a593Smuzhiyun  * all the uninitialized function pointers with the defaults and scans for a
5600*4882a593Smuzhiyun  * bad block table if appropriate.
5601*4882a593Smuzhiyun  */
nand_scan_tail(struct nand_chip * chip)5602*4882a593Smuzhiyun static int nand_scan_tail(struct nand_chip *chip)
5603*4882a593Smuzhiyun {
5604*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
5605*4882a593Smuzhiyun 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5606*4882a593Smuzhiyun 	int ret, i;
5607*4882a593Smuzhiyun 
5608*4882a593Smuzhiyun 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
5609*4882a593Smuzhiyun 	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5610*4882a593Smuzhiyun 		   !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5611*4882a593Smuzhiyun 		return -EINVAL;
5612*4882a593Smuzhiyun 	}
5613*4882a593Smuzhiyun 
5614*4882a593Smuzhiyun 	chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5615*4882a593Smuzhiyun 	if (!chip->data_buf)
5616*4882a593Smuzhiyun 		return -ENOMEM;
5617*4882a593Smuzhiyun 
5618*4882a593Smuzhiyun 	/*
5619*4882a593Smuzhiyun 	 * FIXME: some NAND manufacturer drivers expect the first die to be
5620*4882a593Smuzhiyun 	 * selected when manufacturer->init() is called. They should be fixed
5621*4882a593Smuzhiyun 	 * to explictly select the relevant die when interacting with the NAND
5622*4882a593Smuzhiyun 	 * chip.
5623*4882a593Smuzhiyun 	 */
5624*4882a593Smuzhiyun 	nand_select_target(chip, 0);
5625*4882a593Smuzhiyun 	ret = nand_manufacturer_init(chip);
5626*4882a593Smuzhiyun 	nand_deselect_target(chip);
5627*4882a593Smuzhiyun 	if (ret)
5628*4882a593Smuzhiyun 		goto err_free_buf;
5629*4882a593Smuzhiyun 
5630*4882a593Smuzhiyun 	/* Set the internal oob buffer location, just after the page data */
5631*4882a593Smuzhiyun 	chip->oob_poi = chip->data_buf + mtd->writesize;
5632*4882a593Smuzhiyun 
5633*4882a593Smuzhiyun 	/*
5634*4882a593Smuzhiyun 	 * If no default placement scheme is given, select an appropriate one.
5635*4882a593Smuzhiyun 	 */
5636*4882a593Smuzhiyun 	if (!mtd->ooblayout &&
5637*4882a593Smuzhiyun 	    !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5638*4882a593Smuzhiyun 	      ecc->algo == NAND_ECC_ALGO_BCH)) {
5639*4882a593Smuzhiyun 		switch (mtd->oobsize) {
5640*4882a593Smuzhiyun 		case 8:
5641*4882a593Smuzhiyun 		case 16:
5642*4882a593Smuzhiyun 			mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
5643*4882a593Smuzhiyun 			break;
5644*4882a593Smuzhiyun 		case 64:
5645*4882a593Smuzhiyun 		case 128:
5646*4882a593Smuzhiyun 			mtd_set_ooblayout(mtd,
5647*4882a593Smuzhiyun 					  nand_get_large_page_hamming_ooblayout());
5648*4882a593Smuzhiyun 			break;
5649*4882a593Smuzhiyun 		default:
5650*4882a593Smuzhiyun 			/*
5651*4882a593Smuzhiyun 			 * Expose the whole OOB area to users if ECC_NONE
5652*4882a593Smuzhiyun 			 * is passed. We could do that for all kind of
5653*4882a593Smuzhiyun 			 * ->oobsize, but we must keep the old large/small
5654*4882a593Smuzhiyun 			 * page with ECC layout when ->oobsize <= 128 for
5655*4882a593Smuzhiyun 			 * compatibility reasons.
5656*4882a593Smuzhiyun 			 */
5657*4882a593Smuzhiyun 			if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
5658*4882a593Smuzhiyun 				mtd_set_ooblayout(mtd,
5659*4882a593Smuzhiyun 						  nand_get_large_page_ooblayout());
5660*4882a593Smuzhiyun 				break;
5661*4882a593Smuzhiyun 			}
5662*4882a593Smuzhiyun 
5663*4882a593Smuzhiyun 			WARN(1, "No oob scheme defined for oobsize %d\n",
5664*4882a593Smuzhiyun 				mtd->oobsize);
5665*4882a593Smuzhiyun 			ret = -EINVAL;
5666*4882a593Smuzhiyun 			goto err_nand_manuf_cleanup;
5667*4882a593Smuzhiyun 		}
5668*4882a593Smuzhiyun 	}
5669*4882a593Smuzhiyun 
5670*4882a593Smuzhiyun 	/*
5671*4882a593Smuzhiyun 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5672*4882a593Smuzhiyun 	 * selected and we have 256 byte pagesize fallback to software ECC
5673*4882a593Smuzhiyun 	 */
5674*4882a593Smuzhiyun 
5675*4882a593Smuzhiyun 	switch (ecc->engine_type) {
5676*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
5677*4882a593Smuzhiyun 		ret = nand_set_ecc_on_host_ops(chip);
5678*4882a593Smuzhiyun 		if (ret)
5679*4882a593Smuzhiyun 			goto err_nand_manuf_cleanup;
5680*4882a593Smuzhiyun 
5681*4882a593Smuzhiyun 		if (mtd->writesize >= ecc->size) {
5682*4882a593Smuzhiyun 			if (!ecc->strength) {
5683*4882a593Smuzhiyun 				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5684*4882a593Smuzhiyun 				ret = -EINVAL;
5685*4882a593Smuzhiyun 				goto err_nand_manuf_cleanup;
5686*4882a593Smuzhiyun 			}
5687*4882a593Smuzhiyun 			break;
5688*4882a593Smuzhiyun 		}
5689*4882a593Smuzhiyun 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5690*4882a593Smuzhiyun 			ecc->size, mtd->writesize);
5691*4882a593Smuzhiyun 		ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5692*4882a593Smuzhiyun 		ecc->algo = NAND_ECC_ALGO_HAMMING;
5693*4882a593Smuzhiyun 		fallthrough;
5694*4882a593Smuzhiyun 
5695*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_SOFT:
5696*4882a593Smuzhiyun 		ret = nand_set_ecc_soft_ops(chip);
5697*4882a593Smuzhiyun 		if (ret)
5698*4882a593Smuzhiyun 			goto err_nand_manuf_cleanup;
5699*4882a593Smuzhiyun 		break;
5700*4882a593Smuzhiyun 
5701*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_ON_DIE:
5702*4882a593Smuzhiyun 		if (!ecc->read_page || !ecc->write_page) {
5703*4882a593Smuzhiyun 			WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5704*4882a593Smuzhiyun 			ret = -EINVAL;
5705*4882a593Smuzhiyun 			goto err_nand_manuf_cleanup;
5706*4882a593Smuzhiyun 		}
5707*4882a593Smuzhiyun 		if (!ecc->read_oob)
5708*4882a593Smuzhiyun 			ecc->read_oob = nand_read_oob_std;
5709*4882a593Smuzhiyun 		if (!ecc->write_oob)
5710*4882a593Smuzhiyun 			ecc->write_oob = nand_write_oob_std;
5711*4882a593Smuzhiyun 		break;
5712*4882a593Smuzhiyun 
5713*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_NONE:
5714*4882a593Smuzhiyun 		pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
5715*4882a593Smuzhiyun 		ecc->read_page = nand_read_page_raw;
5716*4882a593Smuzhiyun 		ecc->write_page = nand_write_page_raw;
5717*4882a593Smuzhiyun 		ecc->read_oob = nand_read_oob_std;
5718*4882a593Smuzhiyun 		ecc->read_page_raw = nand_read_page_raw;
5719*4882a593Smuzhiyun 		ecc->write_page_raw = nand_write_page_raw;
5720*4882a593Smuzhiyun 		ecc->write_oob = nand_write_oob_std;
5721*4882a593Smuzhiyun 		ecc->size = mtd->writesize;
5722*4882a593Smuzhiyun 		ecc->bytes = 0;
5723*4882a593Smuzhiyun 		ecc->strength = 0;
5724*4882a593Smuzhiyun 		break;
5725*4882a593Smuzhiyun 
5726*4882a593Smuzhiyun 	default:
5727*4882a593Smuzhiyun 		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
5728*4882a593Smuzhiyun 		ret = -EINVAL;
5729*4882a593Smuzhiyun 		goto err_nand_manuf_cleanup;
5730*4882a593Smuzhiyun 	}
5731*4882a593Smuzhiyun 
5732*4882a593Smuzhiyun 	if (ecc->correct || ecc->calculate) {
5733*4882a593Smuzhiyun 		ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5734*4882a593Smuzhiyun 		ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5735*4882a593Smuzhiyun 		if (!ecc->calc_buf || !ecc->code_buf) {
5736*4882a593Smuzhiyun 			ret = -ENOMEM;
5737*4882a593Smuzhiyun 			goto err_nand_manuf_cleanup;
5738*4882a593Smuzhiyun 		}
5739*4882a593Smuzhiyun 	}
5740*4882a593Smuzhiyun 
5741*4882a593Smuzhiyun 	/* For many systems, the standard OOB write also works for raw */
5742*4882a593Smuzhiyun 	if (!ecc->read_oob_raw)
5743*4882a593Smuzhiyun 		ecc->read_oob_raw = ecc->read_oob;
5744*4882a593Smuzhiyun 	if (!ecc->write_oob_raw)
5745*4882a593Smuzhiyun 		ecc->write_oob_raw = ecc->write_oob;
5746*4882a593Smuzhiyun 
5747*4882a593Smuzhiyun 	/* propagate ecc info to mtd_info */
5748*4882a593Smuzhiyun 	mtd->ecc_strength = ecc->strength;
5749*4882a593Smuzhiyun 	mtd->ecc_step_size = ecc->size;
5750*4882a593Smuzhiyun 
5751*4882a593Smuzhiyun 	/*
5752*4882a593Smuzhiyun 	 * Set the number of read / write steps for one page depending on ECC
5753*4882a593Smuzhiyun 	 * mode.
5754*4882a593Smuzhiyun 	 */
5755*4882a593Smuzhiyun 	ecc->steps = mtd->writesize / ecc->size;
5756*4882a593Smuzhiyun 	if (ecc->steps * ecc->size != mtd->writesize) {
5757*4882a593Smuzhiyun 		WARN(1, "Invalid ECC parameters\n");
5758*4882a593Smuzhiyun 		ret = -EINVAL;
5759*4882a593Smuzhiyun 		goto err_nand_manuf_cleanup;
5760*4882a593Smuzhiyun 	}
5761*4882a593Smuzhiyun 
5762*4882a593Smuzhiyun 	ecc->total = ecc->steps * ecc->bytes;
5763*4882a593Smuzhiyun 	chip->base.ecc.ctx.total = ecc->total;
5764*4882a593Smuzhiyun 
5765*4882a593Smuzhiyun 	if (ecc->total > mtd->oobsize) {
5766*4882a593Smuzhiyun 		WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5767*4882a593Smuzhiyun 		ret = -EINVAL;
5768*4882a593Smuzhiyun 		goto err_nand_manuf_cleanup;
5769*4882a593Smuzhiyun 	}
5770*4882a593Smuzhiyun 
5771*4882a593Smuzhiyun 	/*
5772*4882a593Smuzhiyun 	 * The number of bytes available for a client to place data into
5773*4882a593Smuzhiyun 	 * the out of band area.
5774*4882a593Smuzhiyun 	 */
5775*4882a593Smuzhiyun 	ret = mtd_ooblayout_count_freebytes(mtd);
5776*4882a593Smuzhiyun 	if (ret < 0)
5777*4882a593Smuzhiyun 		ret = 0;
5778*4882a593Smuzhiyun 
5779*4882a593Smuzhiyun 	mtd->oobavail = ret;
5780*4882a593Smuzhiyun 
5781*4882a593Smuzhiyun 	/* ECC sanity check: warn if it's too weak */
5782*4882a593Smuzhiyun 	if (!nand_ecc_is_strong_enough(&chip->base))
5783*4882a593Smuzhiyun 		pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
5784*4882a593Smuzhiyun 			mtd->name, chip->ecc.strength, chip->ecc.size,
5785*4882a593Smuzhiyun 			nanddev_get_ecc_requirements(&chip->base)->strength,
5786*4882a593Smuzhiyun 			nanddev_get_ecc_requirements(&chip->base)->step_size);
5787*4882a593Smuzhiyun 
5788*4882a593Smuzhiyun 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5789*4882a593Smuzhiyun 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5790*4882a593Smuzhiyun 		switch (ecc->steps) {
5791*4882a593Smuzhiyun 		case 2:
5792*4882a593Smuzhiyun 			mtd->subpage_sft = 1;
5793*4882a593Smuzhiyun 			break;
5794*4882a593Smuzhiyun 		case 4:
5795*4882a593Smuzhiyun 		case 8:
5796*4882a593Smuzhiyun 		case 16:
5797*4882a593Smuzhiyun 			mtd->subpage_sft = 2;
5798*4882a593Smuzhiyun 			break;
5799*4882a593Smuzhiyun 		}
5800*4882a593Smuzhiyun 	}
5801*4882a593Smuzhiyun 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5802*4882a593Smuzhiyun 
5803*4882a593Smuzhiyun 	/* Invalidate the pagebuffer reference */
5804*4882a593Smuzhiyun 	chip->pagecache.page = -1;
5805*4882a593Smuzhiyun 
5806*4882a593Smuzhiyun 	/* Large page NAND with SOFT_ECC should support subpage reads */
5807*4882a593Smuzhiyun 	switch (ecc->engine_type) {
5808*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_SOFT:
5809*4882a593Smuzhiyun 		if (chip->page_shift > 9)
5810*4882a593Smuzhiyun 			chip->options |= NAND_SUBPAGE_READ;
5811*4882a593Smuzhiyun 		break;
5812*4882a593Smuzhiyun 
5813*4882a593Smuzhiyun 	default:
5814*4882a593Smuzhiyun 		break;
5815*4882a593Smuzhiyun 	}
5816*4882a593Smuzhiyun 
5817*4882a593Smuzhiyun 	ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5818*4882a593Smuzhiyun 	if (ret)
5819*4882a593Smuzhiyun 		goto err_nand_manuf_cleanup;
5820*4882a593Smuzhiyun 
5821*4882a593Smuzhiyun 	/* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
5822*4882a593Smuzhiyun 	if (chip->options & NAND_ROM)
5823*4882a593Smuzhiyun 		mtd->flags = MTD_CAP_ROM;
5824*4882a593Smuzhiyun 
5825*4882a593Smuzhiyun 	/* Fill in remaining MTD driver data */
5826*4882a593Smuzhiyun 	mtd->_erase = nand_erase;
5827*4882a593Smuzhiyun 	mtd->_point = NULL;
5828*4882a593Smuzhiyun 	mtd->_unpoint = NULL;
5829*4882a593Smuzhiyun 	mtd->_panic_write = panic_nand_write;
5830*4882a593Smuzhiyun 	mtd->_read_oob = nand_read_oob;
5831*4882a593Smuzhiyun 	mtd->_write_oob = nand_write_oob;
5832*4882a593Smuzhiyun 	mtd->_sync = nand_sync;
5833*4882a593Smuzhiyun 	mtd->_lock = nand_lock;
5834*4882a593Smuzhiyun 	mtd->_unlock = nand_unlock;
5835*4882a593Smuzhiyun 	mtd->_suspend = nand_suspend;
5836*4882a593Smuzhiyun 	mtd->_resume = nand_resume;
5837*4882a593Smuzhiyun 	mtd->_reboot = nand_shutdown;
5838*4882a593Smuzhiyun 	mtd->_block_isreserved = nand_block_isreserved;
5839*4882a593Smuzhiyun 	mtd->_block_isbad = nand_block_isbad;
5840*4882a593Smuzhiyun 	mtd->_block_markbad = nand_block_markbad;
5841*4882a593Smuzhiyun 	mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5842*4882a593Smuzhiyun 
5843*4882a593Smuzhiyun 	/*
5844*4882a593Smuzhiyun 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5845*4882a593Smuzhiyun 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5846*4882a593Smuzhiyun 	 * properly set.
5847*4882a593Smuzhiyun 	 */
5848*4882a593Smuzhiyun 	if (!mtd->bitflip_threshold)
5849*4882a593Smuzhiyun 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5850*4882a593Smuzhiyun 
5851*4882a593Smuzhiyun 	/* Find the fastest data interface for this chip */
5852*4882a593Smuzhiyun 	ret = nand_choose_interface_config(chip);
5853*4882a593Smuzhiyun 	if (ret)
5854*4882a593Smuzhiyun 		goto err_nanddev_cleanup;
5855*4882a593Smuzhiyun 
5856*4882a593Smuzhiyun 	/* Enter fastest possible mode on all dies. */
5857*4882a593Smuzhiyun 	for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5858*4882a593Smuzhiyun 		ret = nand_setup_interface(chip, i);
5859*4882a593Smuzhiyun 		if (ret)
5860*4882a593Smuzhiyun 			goto err_free_interface_config;
5861*4882a593Smuzhiyun 	}
5862*4882a593Smuzhiyun 
5863*4882a593Smuzhiyun 	/* Check, if we should skip the bad block table scan */
5864*4882a593Smuzhiyun 	if (chip->options & NAND_SKIP_BBTSCAN)
5865*4882a593Smuzhiyun 		return 0;
5866*4882a593Smuzhiyun 
5867*4882a593Smuzhiyun 	/* Build bad block table */
5868*4882a593Smuzhiyun 	ret = nand_create_bbt(chip);
5869*4882a593Smuzhiyun 	if (ret)
5870*4882a593Smuzhiyun 		goto err_free_interface_config;
5871*4882a593Smuzhiyun 
5872*4882a593Smuzhiyun 	return 0;
5873*4882a593Smuzhiyun 
5874*4882a593Smuzhiyun err_free_interface_config:
5875*4882a593Smuzhiyun 	kfree(chip->best_interface_config);
5876*4882a593Smuzhiyun 
5877*4882a593Smuzhiyun err_nanddev_cleanup:
5878*4882a593Smuzhiyun 	nanddev_cleanup(&chip->base);
5879*4882a593Smuzhiyun 
5880*4882a593Smuzhiyun err_nand_manuf_cleanup:
5881*4882a593Smuzhiyun 	nand_manufacturer_cleanup(chip);
5882*4882a593Smuzhiyun 
5883*4882a593Smuzhiyun err_free_buf:
5884*4882a593Smuzhiyun 	kfree(chip->data_buf);
5885*4882a593Smuzhiyun 	kfree(ecc->code_buf);
5886*4882a593Smuzhiyun 	kfree(ecc->calc_buf);
5887*4882a593Smuzhiyun 
5888*4882a593Smuzhiyun 	return ret;
5889*4882a593Smuzhiyun }
5890*4882a593Smuzhiyun 
nand_attach(struct nand_chip * chip)5891*4882a593Smuzhiyun static int nand_attach(struct nand_chip *chip)
5892*4882a593Smuzhiyun {
5893*4882a593Smuzhiyun 	if (chip->controller->ops && chip->controller->ops->attach_chip)
5894*4882a593Smuzhiyun 		return chip->controller->ops->attach_chip(chip);
5895*4882a593Smuzhiyun 
5896*4882a593Smuzhiyun 	return 0;
5897*4882a593Smuzhiyun }
5898*4882a593Smuzhiyun 
nand_detach(struct nand_chip * chip)5899*4882a593Smuzhiyun static void nand_detach(struct nand_chip *chip)
5900*4882a593Smuzhiyun {
5901*4882a593Smuzhiyun 	if (chip->controller->ops && chip->controller->ops->detach_chip)
5902*4882a593Smuzhiyun 		chip->controller->ops->detach_chip(chip);
5903*4882a593Smuzhiyun }
5904*4882a593Smuzhiyun 
5905*4882a593Smuzhiyun /**
5906*4882a593Smuzhiyun  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5907*4882a593Smuzhiyun  * @chip: NAND chip object
5908*4882a593Smuzhiyun  * @maxchips: number of chips to scan for.
5909*4882a593Smuzhiyun  * @ids: optional flash IDs table
5910*4882a593Smuzhiyun  *
5911*4882a593Smuzhiyun  * This fills out all the uninitialized function pointers with the defaults.
5912*4882a593Smuzhiyun  * The flash ID is read and the mtd/chip structures are filled with the
5913*4882a593Smuzhiyun  * appropriate values.
5914*4882a593Smuzhiyun  */
nand_scan_with_ids(struct nand_chip * chip,unsigned int maxchips,struct nand_flash_dev * ids)5915*4882a593Smuzhiyun int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5916*4882a593Smuzhiyun 		       struct nand_flash_dev *ids)
5917*4882a593Smuzhiyun {
5918*4882a593Smuzhiyun 	int ret;
5919*4882a593Smuzhiyun 
5920*4882a593Smuzhiyun 	if (!maxchips)
5921*4882a593Smuzhiyun 		return -EINVAL;
5922*4882a593Smuzhiyun 
5923*4882a593Smuzhiyun 	ret = nand_scan_ident(chip, maxchips, ids);
5924*4882a593Smuzhiyun 	if (ret)
5925*4882a593Smuzhiyun 		return ret;
5926*4882a593Smuzhiyun 
5927*4882a593Smuzhiyun 	ret = nand_attach(chip);
5928*4882a593Smuzhiyun 	if (ret)
5929*4882a593Smuzhiyun 		goto cleanup_ident;
5930*4882a593Smuzhiyun 
5931*4882a593Smuzhiyun 	ret = nand_scan_tail(chip);
5932*4882a593Smuzhiyun 	if (ret)
5933*4882a593Smuzhiyun 		goto detach_chip;
5934*4882a593Smuzhiyun 
5935*4882a593Smuzhiyun 	return 0;
5936*4882a593Smuzhiyun 
5937*4882a593Smuzhiyun detach_chip:
5938*4882a593Smuzhiyun 	nand_detach(chip);
5939*4882a593Smuzhiyun cleanup_ident:
5940*4882a593Smuzhiyun 	nand_scan_ident_cleanup(chip);
5941*4882a593Smuzhiyun 
5942*4882a593Smuzhiyun 	return ret;
5943*4882a593Smuzhiyun }
5944*4882a593Smuzhiyun EXPORT_SYMBOL(nand_scan_with_ids);
5945*4882a593Smuzhiyun 
5946*4882a593Smuzhiyun /**
5947*4882a593Smuzhiyun  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5948*4882a593Smuzhiyun  * @chip: NAND chip object
5949*4882a593Smuzhiyun  */
nand_cleanup(struct nand_chip * chip)5950*4882a593Smuzhiyun void nand_cleanup(struct nand_chip *chip)
5951*4882a593Smuzhiyun {
5952*4882a593Smuzhiyun 	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5953*4882a593Smuzhiyun 	    chip->ecc.algo == NAND_ECC_ALGO_BCH)
5954*4882a593Smuzhiyun 		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5955*4882a593Smuzhiyun 
5956*4882a593Smuzhiyun 	nanddev_cleanup(&chip->base);
5957*4882a593Smuzhiyun 
5958*4882a593Smuzhiyun 	/* Free bad block table memory */
5959*4882a593Smuzhiyun 	kfree(chip->bbt);
5960*4882a593Smuzhiyun 	kfree(chip->data_buf);
5961*4882a593Smuzhiyun 	kfree(chip->ecc.code_buf);
5962*4882a593Smuzhiyun 	kfree(chip->ecc.calc_buf);
5963*4882a593Smuzhiyun 
5964*4882a593Smuzhiyun 	/* Free bad block descriptor memory */
5965*4882a593Smuzhiyun 	if (chip->badblock_pattern && chip->badblock_pattern->options
5966*4882a593Smuzhiyun 			& NAND_BBT_DYNAMICSTRUCT)
5967*4882a593Smuzhiyun 		kfree(chip->badblock_pattern);
5968*4882a593Smuzhiyun 
5969*4882a593Smuzhiyun 	/* Free the data interface */
5970*4882a593Smuzhiyun 	kfree(chip->best_interface_config);
5971*4882a593Smuzhiyun 
5972*4882a593Smuzhiyun 	/* Free manufacturer priv data. */
5973*4882a593Smuzhiyun 	nand_manufacturer_cleanup(chip);
5974*4882a593Smuzhiyun 
5975*4882a593Smuzhiyun 	/* Free controller specific allocations after chip identification */
5976*4882a593Smuzhiyun 	nand_detach(chip);
5977*4882a593Smuzhiyun 
5978*4882a593Smuzhiyun 	/* Free identification phase allocations */
5979*4882a593Smuzhiyun 	nand_scan_ident_cleanup(chip);
5980*4882a593Smuzhiyun }
5981*4882a593Smuzhiyun 
5982*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_cleanup);
5983*4882a593Smuzhiyun 
5984*4882a593Smuzhiyun MODULE_LICENSE("GPL");
5985*4882a593Smuzhiyun MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5986*4882a593Smuzhiyun MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5987*4882a593Smuzhiyun MODULE_DESCRIPTION("Generic NAND flash driver code");
5988