xref: /OK3568_Linux_fs/u-boot/drivers/mtd/nand/raw/nand_base.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  *  Overview:
3*4882a593Smuzhiyun  *   This is the generic MTD driver for NAND flash devices. It should be
4*4882a593Smuzhiyun  *   capable of working with almost all NAND chips currently available.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *	Additional technical information is available on
7*4882a593Smuzhiyun  *	http://www.linux-mtd.infradead.org/doc/nand.html
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
10*4882a593Smuzhiyun  *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *  Credits:
13*4882a593Smuzhiyun  *	David Woodhouse for adding multichip support
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
16*4882a593Smuzhiyun  *	rework for 2K page size chips
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *  TODO:
19*4882a593Smuzhiyun  *	Enable cached programming for 2k page size chips
20*4882a593Smuzhiyun  *	Check, if mtd->ecctype should be set to MTD_ECC_HW
21*4882a593Smuzhiyun  *	if we have HW ECC support.
22*4882a593Smuzhiyun  *	BBT table is not serialized, has to be fixed
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
25*4882a593Smuzhiyun  * it under the terms of the GNU General Public License version 2 as
26*4882a593Smuzhiyun  * published by the Free Software Foundation.
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31*4882a593Smuzhiyun #include <common.h>
32*4882a593Smuzhiyun #if CONFIG_IS_ENABLED(OF_CONTROL)
33*4882a593Smuzhiyun #include <fdtdec.h>
34*4882a593Smuzhiyun #endif
35*4882a593Smuzhiyun #include <malloc.h>
36*4882a593Smuzhiyun #include <watchdog.h>
37*4882a593Smuzhiyun #include <linux/err.h>
38*4882a593Smuzhiyun #include <linux/compat.h>
39*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
40*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
41*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
42*4882a593Smuzhiyun #include <linux/mtd/nand_bch.h>
43*4882a593Smuzhiyun #ifdef CONFIG_MTD_PARTITIONS
44*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun #include <asm/io.h>
47*4882a593Smuzhiyun #include <linux/errno.h>
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* Define default oob placement schemes for large and small page devices */
50*4882a593Smuzhiyun #ifndef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT
51*4882a593Smuzhiyun static struct nand_ecclayout nand_oob_8 = {
52*4882a593Smuzhiyun 	.eccbytes = 3,
53*4882a593Smuzhiyun 	.eccpos = {0, 1, 2},
54*4882a593Smuzhiyun 	.oobfree = {
55*4882a593Smuzhiyun 		{.offset = 3,
56*4882a593Smuzhiyun 		 .length = 2},
57*4882a593Smuzhiyun 		{.offset = 6,
58*4882a593Smuzhiyun 		 .length = 2} }
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun static struct nand_ecclayout nand_oob_16 = {
62*4882a593Smuzhiyun 	.eccbytes = 6,
63*4882a593Smuzhiyun 	.eccpos = {0, 1, 2, 3, 6, 7},
64*4882a593Smuzhiyun 	.oobfree = {
65*4882a593Smuzhiyun 		{.offset = 8,
66*4882a593Smuzhiyun 		 . length = 8} }
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static struct nand_ecclayout nand_oob_64 = {
70*4882a593Smuzhiyun 	.eccbytes = 24,
71*4882a593Smuzhiyun 	.eccpos = {
72*4882a593Smuzhiyun 		   40, 41, 42, 43, 44, 45, 46, 47,
73*4882a593Smuzhiyun 		   48, 49, 50, 51, 52, 53, 54, 55,
74*4882a593Smuzhiyun 		   56, 57, 58, 59, 60, 61, 62, 63},
75*4882a593Smuzhiyun 	.oobfree = {
76*4882a593Smuzhiyun 		{.offset = 2,
77*4882a593Smuzhiyun 		 .length = 38} }
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static struct nand_ecclayout nand_oob_128 = {
81*4882a593Smuzhiyun 	.eccbytes = 48,
82*4882a593Smuzhiyun 	.eccpos = {
83*4882a593Smuzhiyun 		   80, 81, 82, 83, 84, 85, 86, 87,
84*4882a593Smuzhiyun 		   88, 89, 90, 91, 92, 93, 94, 95,
85*4882a593Smuzhiyun 		   96, 97, 98, 99, 100, 101, 102, 103,
86*4882a593Smuzhiyun 		   104, 105, 106, 107, 108, 109, 110, 111,
87*4882a593Smuzhiyun 		   112, 113, 114, 115, 116, 117, 118, 119,
88*4882a593Smuzhiyun 		   120, 121, 122, 123, 124, 125, 126, 127},
89*4882a593Smuzhiyun 	.oobfree = {
90*4882a593Smuzhiyun 		{.offset = 2,
91*4882a593Smuzhiyun 		 .length = 78} }
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun static int nand_get_device(struct mtd_info *mtd, int new_state);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
98*4882a593Smuzhiyun 			     struct mtd_oob_ops *ops);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun  * For devices which display every fart in the system on a separate LED. Is
102*4882a593Smuzhiyun  * compiled away when LED support is disabled.
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun DEFINE_LED_TRIGGER(nand_led_trigger);
105*4882a593Smuzhiyun 
check_offs_len(struct mtd_info * mtd,loff_t ofs,uint64_t len)106*4882a593Smuzhiyun static int check_offs_len(struct mtd_info *mtd,
107*4882a593Smuzhiyun 					loff_t ofs, uint64_t len)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
110*4882a593Smuzhiyun 	int ret = 0;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* Start address must align on block boundary */
113*4882a593Smuzhiyun 	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
114*4882a593Smuzhiyun 		pr_debug("%s: unaligned address\n", __func__);
115*4882a593Smuzhiyun 		ret = -EINVAL;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	/* Length must align on block boundary */
119*4882a593Smuzhiyun 	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
120*4882a593Smuzhiyun 		pr_debug("%s: length not block aligned\n", __func__);
121*4882a593Smuzhiyun 		ret = -EINVAL;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return ret;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun  * nand_release_device - [GENERIC] release chip
129*4882a593Smuzhiyun  * @mtd: MTD device structure
130*4882a593Smuzhiyun  *
131*4882a593Smuzhiyun  * Release chip lock and wake up anyone waiting on the device.
132*4882a593Smuzhiyun  */
nand_release_device(struct mtd_info * mtd)133*4882a593Smuzhiyun static void nand_release_device(struct mtd_info *mtd)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* De-select the NAND device */
138*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /**
142*4882a593Smuzhiyun  * nand_read_byte - [DEFAULT] read one byte from the chip
143*4882a593Smuzhiyun  * @mtd: MTD device structure
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * Default read function for 8bit buswidth
146*4882a593Smuzhiyun  */
nand_read_byte(struct mtd_info * mtd)147*4882a593Smuzhiyun uint8_t nand_read_byte(struct mtd_info *mtd)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
150*4882a593Smuzhiyun 	return readb(chip->IO_ADDR_R);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun  * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
155*4882a593Smuzhiyun  * @mtd: MTD device structure
156*4882a593Smuzhiyun  *
157*4882a593Smuzhiyun  * Default read function for 16bit buswidth with endianness conversion.
158*4882a593Smuzhiyun  *
159*4882a593Smuzhiyun  */
nand_read_byte16(struct mtd_info * mtd)160*4882a593Smuzhiyun static uint8_t nand_read_byte16(struct mtd_info *mtd)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
163*4882a593Smuzhiyun 	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * nand_read_word - [DEFAULT] read one word from the chip
168*4882a593Smuzhiyun  * @mtd: MTD device structure
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  * Default read function for 16bit buswidth without endianness conversion.
171*4882a593Smuzhiyun  */
nand_read_word(struct mtd_info * mtd)172*4882a593Smuzhiyun static u16 nand_read_word(struct mtd_info *mtd)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
175*4882a593Smuzhiyun 	return readw(chip->IO_ADDR_R);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun  * nand_select_chip - [DEFAULT] control CE line
180*4882a593Smuzhiyun  * @mtd: MTD device structure
181*4882a593Smuzhiyun  * @chipnr: chipnumber to select, -1 for deselect
182*4882a593Smuzhiyun  *
183*4882a593Smuzhiyun  * Default select function for 1 chip devices.
184*4882a593Smuzhiyun  */
nand_select_chip(struct mtd_info * mtd,int chipnr)185*4882a593Smuzhiyun static void nand_select_chip(struct mtd_info *mtd, int chipnr)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	switch (chipnr) {
190*4882a593Smuzhiyun 	case -1:
191*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
192*4882a593Smuzhiyun 		break;
193*4882a593Smuzhiyun 	case 0:
194*4882a593Smuzhiyun 		break;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	default:
197*4882a593Smuzhiyun 		BUG();
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun  * nand_write_byte - [DEFAULT] write single byte to chip
203*4882a593Smuzhiyun  * @mtd: MTD device structure
204*4882a593Smuzhiyun  * @byte: value to write
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  * Default function to write a byte to I/O[7:0]
207*4882a593Smuzhiyun  */
nand_write_byte(struct mtd_info * mtd,uint8_t byte)208*4882a593Smuzhiyun static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	chip->write_buf(mtd, &byte, 1);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /**
216*4882a593Smuzhiyun  * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
217*4882a593Smuzhiyun  * @mtd: MTD device structure
218*4882a593Smuzhiyun  * @byte: value to write
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
221*4882a593Smuzhiyun  */
nand_write_byte16(struct mtd_info * mtd,uint8_t byte)222*4882a593Smuzhiyun static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
225*4882a593Smuzhiyun 	uint16_t word = byte;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/*
228*4882a593Smuzhiyun 	 * It's not entirely clear what should happen to I/O[15:8] when writing
229*4882a593Smuzhiyun 	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
230*4882a593Smuzhiyun 	 *
231*4882a593Smuzhiyun 	 *    When the host supports a 16-bit bus width, only data is
232*4882a593Smuzhiyun 	 *    transferred at the 16-bit width. All address and command line
233*4882a593Smuzhiyun 	 *    transfers shall use only the lower 8-bits of the data bus. During
234*4882a593Smuzhiyun 	 *    command transfers, the host may place any value on the upper
235*4882a593Smuzhiyun 	 *    8-bits of the data bus. During address transfers, the host shall
236*4882a593Smuzhiyun 	 *    set the upper 8-bits of the data bus to 00h.
237*4882a593Smuzhiyun 	 *
238*4882a593Smuzhiyun 	 * One user of the write_byte callback is nand_onfi_set_features. The
239*4882a593Smuzhiyun 	 * four parameters are specified to be written to I/O[7:0], but this is
240*4882a593Smuzhiyun 	 * neither an address nor a command transfer. Let's assume a 0 on the
241*4882a593Smuzhiyun 	 * upper I/O lines is OK.
242*4882a593Smuzhiyun 	 */
243*4882a593Smuzhiyun 	chip->write_buf(mtd, (uint8_t *)&word, 2);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
iowrite8_rep(void * addr,const uint8_t * buf,int len)246*4882a593Smuzhiyun static void iowrite8_rep(void *addr, const uint8_t *buf, int len)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	int i;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
251*4882a593Smuzhiyun 		writeb(buf[i], addr);
252*4882a593Smuzhiyun }
ioread8_rep(void * addr,uint8_t * buf,int len)253*4882a593Smuzhiyun static void ioread8_rep(void *addr, uint8_t *buf, int len)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	int i;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
258*4882a593Smuzhiyun 		buf[i] = readb(addr);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
ioread16_rep(void * addr,void * buf,int len)261*4882a593Smuzhiyun static void ioread16_rep(void *addr, void *buf, int len)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	int i;
264*4882a593Smuzhiyun  	u16 *p = (u16 *) buf;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
267*4882a593Smuzhiyun 		p[i] = readw(addr);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
iowrite16_rep(void * addr,void * buf,int len)270*4882a593Smuzhiyun static void iowrite16_rep(void *addr, void *buf, int len)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	int i;
273*4882a593Smuzhiyun         u16 *p = (u16 *) buf;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun         for (i = 0; i < len; i++)
276*4882a593Smuzhiyun                 writew(p[i], addr);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun  * nand_write_buf - [DEFAULT] write buffer to chip
281*4882a593Smuzhiyun  * @mtd: MTD device structure
282*4882a593Smuzhiyun  * @buf: data buffer
283*4882a593Smuzhiyun  * @len: number of bytes to write
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  * Default write function for 8bit buswidth.
286*4882a593Smuzhiyun  */
nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)287*4882a593Smuzhiyun void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	iowrite8_rep(chip->IO_ADDR_W, buf, len);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun  * nand_read_buf - [DEFAULT] read chip data into buffer
296*4882a593Smuzhiyun  * @mtd: MTD device structure
297*4882a593Smuzhiyun  * @buf: buffer to store date
298*4882a593Smuzhiyun  * @len: number of bytes to read
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  * Default read function for 8bit buswidth.
301*4882a593Smuzhiyun  */
nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)302*4882a593Smuzhiyun void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	ioread8_rep(chip->IO_ADDR_R, buf, len);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun  * nand_write_buf16 - [DEFAULT] write buffer to chip
311*4882a593Smuzhiyun  * @mtd: MTD device structure
312*4882a593Smuzhiyun  * @buf: data buffer
313*4882a593Smuzhiyun  * @len: number of bytes to write
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * Default write function for 16bit buswidth.
316*4882a593Smuzhiyun  */
nand_write_buf16(struct mtd_info * mtd,const uint8_t * buf,int len)317*4882a593Smuzhiyun void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
320*4882a593Smuzhiyun 	u16 *p = (u16 *) buf;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun  * nand_read_buf16 - [DEFAULT] read chip data into buffer
327*4882a593Smuzhiyun  * @mtd: MTD device structure
328*4882a593Smuzhiyun  * @buf: buffer to store date
329*4882a593Smuzhiyun  * @len: number of bytes to read
330*4882a593Smuzhiyun  *
331*4882a593Smuzhiyun  * Default read function for 16bit buswidth.
332*4882a593Smuzhiyun  */
nand_read_buf16(struct mtd_info * mtd,uint8_t * buf,int len)333*4882a593Smuzhiyun void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
336*4882a593Smuzhiyun 	u16 *p = (u16 *) buf;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /**
342*4882a593Smuzhiyun  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
343*4882a593Smuzhiyun  * @mtd: MTD device structure
344*4882a593Smuzhiyun  * @ofs: offset from device start
345*4882a593Smuzhiyun  *
346*4882a593Smuzhiyun  * Check, if the block is bad.
347*4882a593Smuzhiyun  */
nand_block_bad(struct mtd_info * mtd,loff_t ofs)348*4882a593Smuzhiyun static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	int page, res = 0, i = 0;
351*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
352*4882a593Smuzhiyun 	u16 bad;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
355*4882a593Smuzhiyun 		ofs += mtd->erasesize - mtd->writesize;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	do {
360*4882a593Smuzhiyun 		if (chip->options & NAND_BUSWIDTH_16) {
361*4882a593Smuzhiyun 			chip->cmdfunc(mtd, NAND_CMD_READOOB,
362*4882a593Smuzhiyun 					chip->badblockpos & 0xFE, page);
363*4882a593Smuzhiyun 			bad = cpu_to_le16(chip->read_word(mtd));
364*4882a593Smuzhiyun 			if (chip->badblockpos & 0x1)
365*4882a593Smuzhiyun 				bad >>= 8;
366*4882a593Smuzhiyun 			else
367*4882a593Smuzhiyun 				bad &= 0xFF;
368*4882a593Smuzhiyun 		} else {
369*4882a593Smuzhiyun 			chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
370*4882a593Smuzhiyun 					page);
371*4882a593Smuzhiyun 			bad = chip->read_byte(mtd);
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		if (likely(chip->badblockbits == 8))
375*4882a593Smuzhiyun 			res = bad != 0xFF;
376*4882a593Smuzhiyun 		else
377*4882a593Smuzhiyun 			res = hweight8(bad) < chip->badblockbits;
378*4882a593Smuzhiyun 		ofs += mtd->writesize;
379*4882a593Smuzhiyun 		page = (int)(ofs >> chip->page_shift) & chip->pagemask;
380*4882a593Smuzhiyun 		i++;
381*4882a593Smuzhiyun 	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	return res;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /**
387*4882a593Smuzhiyun  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
388*4882a593Smuzhiyun  * @mtd: MTD device structure
389*4882a593Smuzhiyun  * @ofs: offset from device start
390*4882a593Smuzhiyun  *
391*4882a593Smuzhiyun  * This is the default implementation, which can be overridden by a hardware
392*4882a593Smuzhiyun  * specific driver. It provides the details for writing a bad block marker to a
393*4882a593Smuzhiyun  * block.
394*4882a593Smuzhiyun  */
nand_default_block_markbad(struct mtd_info * mtd,loff_t ofs)395*4882a593Smuzhiyun static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
398*4882a593Smuzhiyun 	struct mtd_oob_ops ops;
399*4882a593Smuzhiyun 	uint8_t buf[2] = { 0, 0 };
400*4882a593Smuzhiyun 	int ret = 0, res, i = 0;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	memset(&ops, 0, sizeof(ops));
403*4882a593Smuzhiyun 	ops.oobbuf = buf;
404*4882a593Smuzhiyun 	ops.ooboffs = chip->badblockpos;
405*4882a593Smuzhiyun 	if (chip->options & NAND_BUSWIDTH_16) {
406*4882a593Smuzhiyun 		ops.ooboffs &= ~0x01;
407*4882a593Smuzhiyun 		ops.len = ops.ooblen = 2;
408*4882a593Smuzhiyun 	} else {
409*4882a593Smuzhiyun 		ops.len = ops.ooblen = 1;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 	ops.mode = MTD_OPS_PLACE_OOB;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	/* Write to first/last page(s) if necessary */
414*4882a593Smuzhiyun 	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
415*4882a593Smuzhiyun 		ofs += mtd->erasesize - mtd->writesize;
416*4882a593Smuzhiyun 	do {
417*4882a593Smuzhiyun 		res = nand_do_write_oob(mtd, ofs, &ops);
418*4882a593Smuzhiyun 		if (!ret)
419*4882a593Smuzhiyun 			ret = res;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		i++;
422*4882a593Smuzhiyun 		ofs += mtd->writesize;
423*4882a593Smuzhiyun 	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun /**
429*4882a593Smuzhiyun  * nand_block_markbad_lowlevel - mark a block bad
430*4882a593Smuzhiyun  * @mtd: MTD device structure
431*4882a593Smuzhiyun  * @ofs: offset from device start
432*4882a593Smuzhiyun  *
433*4882a593Smuzhiyun  * This function performs the generic NAND bad block marking steps (i.e., bad
434*4882a593Smuzhiyun  * block table(s) and/or marker(s)). We only allow the hardware driver to
435*4882a593Smuzhiyun  * specify how to write bad block markers to OOB (chip->block_markbad).
436*4882a593Smuzhiyun  *
437*4882a593Smuzhiyun  * We try operations in the following order:
438*4882a593Smuzhiyun  *  (1) erase the affected block, to allow OOB marker to be written cleanly
439*4882a593Smuzhiyun  *  (2) write bad block marker to OOB area of affected block (unless flag
440*4882a593Smuzhiyun  *      NAND_BBT_NO_OOB_BBM is present)
441*4882a593Smuzhiyun  *  (3) update the BBT
442*4882a593Smuzhiyun  * Note that we retain the first error encountered in (2) or (3), finish the
443*4882a593Smuzhiyun  * procedures, and dump the error in the end.
444*4882a593Smuzhiyun */
nand_block_markbad_lowlevel(struct mtd_info * mtd,loff_t ofs)445*4882a593Smuzhiyun static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
448*4882a593Smuzhiyun 	int res, ret = 0;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
451*4882a593Smuzhiyun 		struct erase_info einfo;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 		/* Attempt erase before marking OOB */
454*4882a593Smuzhiyun 		memset(&einfo, 0, sizeof(einfo));
455*4882a593Smuzhiyun 		einfo.mtd = mtd;
456*4882a593Smuzhiyun 		einfo.addr = ofs;
457*4882a593Smuzhiyun 		einfo.len = 1ULL << chip->phys_erase_shift;
458*4882a593Smuzhiyun 		nand_erase_nand(mtd, &einfo, 0);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		/* Write bad block marker to OOB */
461*4882a593Smuzhiyun 		nand_get_device(mtd, FL_WRITING);
462*4882a593Smuzhiyun 		ret = chip->block_markbad(mtd, ofs);
463*4882a593Smuzhiyun 		nand_release_device(mtd);
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Mark block bad in BBT */
467*4882a593Smuzhiyun 	if (chip->bbt) {
468*4882a593Smuzhiyun 		res = nand_markbad_bbt(mtd, ofs);
469*4882a593Smuzhiyun 		if (!ret)
470*4882a593Smuzhiyun 			ret = res;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (!ret)
474*4882a593Smuzhiyun 		mtd->ecc_stats.badblocks++;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	return ret;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun /**
480*4882a593Smuzhiyun  * nand_check_wp - [GENERIC] check if the chip is write protected
481*4882a593Smuzhiyun  * @mtd: MTD device structure
482*4882a593Smuzhiyun  *
483*4882a593Smuzhiyun  * Check, if the device is write protected. The function expects, that the
484*4882a593Smuzhiyun  * device is already selected.
485*4882a593Smuzhiyun  */
nand_check_wp(struct mtd_info * mtd)486*4882a593Smuzhiyun static int nand_check_wp(struct mtd_info *mtd)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
489*4882a593Smuzhiyun 	u8 status;
490*4882a593Smuzhiyun 	int ret;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* Broken xD cards report WP despite being writable */
493*4882a593Smuzhiyun 	if (chip->options & NAND_BROKEN_XD)
494*4882a593Smuzhiyun 		return 0;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/* Check the WP bit */
497*4882a593Smuzhiyun 	ret = nand_status_op(chip, &status);
498*4882a593Smuzhiyun 	if (ret)
499*4882a593Smuzhiyun 		return ret;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	return status & NAND_STATUS_WP ? 0 : 1;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun /**
505*4882a593Smuzhiyun  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
506*4882a593Smuzhiyun  * @mtd: MTD device structure
507*4882a593Smuzhiyun  * @ofs: offset from device start
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * Check if the block is marked as reserved.
510*4882a593Smuzhiyun  */
nand_block_isreserved(struct mtd_info * mtd,loff_t ofs)511*4882a593Smuzhiyun static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (!chip->bbt)
516*4882a593Smuzhiyun 		return 0;
517*4882a593Smuzhiyun 	/* Return info from the table */
518*4882a593Smuzhiyun 	return nand_isreserved_bbt(mtd, ofs);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun /**
522*4882a593Smuzhiyun  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
523*4882a593Smuzhiyun  * @mtd: MTD device structure
524*4882a593Smuzhiyun  * @ofs: offset from device start
525*4882a593Smuzhiyun  * @allowbbt: 1, if its allowed to access the bbt area
526*4882a593Smuzhiyun  *
527*4882a593Smuzhiyun  * Check, if the block is bad. Either by reading the bad block table or
528*4882a593Smuzhiyun  * calling of the scan function.
529*4882a593Smuzhiyun  */
nand_block_checkbad(struct mtd_info * mtd,loff_t ofs,int allowbbt)530*4882a593Smuzhiyun static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	if (!(chip->options & NAND_SKIP_BBTSCAN) &&
535*4882a593Smuzhiyun 	    !(chip->options & NAND_BBT_SCANNED)) {
536*4882a593Smuzhiyun 		chip->options |= NAND_BBT_SCANNED;
537*4882a593Smuzhiyun 		chip->scan_bbt(mtd);
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (!chip->bbt)
541*4882a593Smuzhiyun 		return chip->block_bad(mtd, ofs);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/* Return info from the table */
544*4882a593Smuzhiyun 	return nand_isbad_bbt(mtd, ofs, allowbbt);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun /**
548*4882a593Smuzhiyun  * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
549*4882a593Smuzhiyun  * @mtd: MTD device structure
550*4882a593Smuzhiyun  *
551*4882a593Smuzhiyun  * Wait for the ready pin after a command, and warn if a timeout occurs.
552*4882a593Smuzhiyun  */
nand_wait_ready(struct mtd_info * mtd)553*4882a593Smuzhiyun void nand_wait_ready(struct mtd_info *mtd)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
556*4882a593Smuzhiyun 	u32 timeo = (CONFIG_SYS_HZ * 400) / 1000;
557*4882a593Smuzhiyun 	u32 time_start;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	time_start = get_timer(0);
560*4882a593Smuzhiyun 	/* Wait until command is processed or timeout occurs */
561*4882a593Smuzhiyun 	while (get_timer(time_start) < timeo) {
562*4882a593Smuzhiyun 		if (chip->dev_ready)
563*4882a593Smuzhiyun 			if (chip->dev_ready(mtd))
564*4882a593Smuzhiyun 				break;
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	if (!chip->dev_ready(mtd))
568*4882a593Smuzhiyun 		pr_warn("timeout while waiting for chip to become ready\n");
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_wait_ready);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /**
573*4882a593Smuzhiyun  * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
574*4882a593Smuzhiyun  * @mtd: MTD device structure
575*4882a593Smuzhiyun  * @timeo: Timeout in ms
576*4882a593Smuzhiyun  *
577*4882a593Smuzhiyun  * Wait for status ready (i.e. command done) or timeout.
578*4882a593Smuzhiyun  */
nand_wait_status_ready(struct mtd_info * mtd,unsigned long timeo)579*4882a593Smuzhiyun static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	register struct nand_chip *chip = mtd_to_nand(mtd);
582*4882a593Smuzhiyun 	u32 time_start;
583*4882a593Smuzhiyun 	int ret;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	timeo = (CONFIG_SYS_HZ * timeo) / 1000;
586*4882a593Smuzhiyun 	time_start = get_timer(0);
587*4882a593Smuzhiyun 	while (get_timer(time_start) < timeo) {
588*4882a593Smuzhiyun 		u8 status;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, &status, sizeof(status), true);
591*4882a593Smuzhiyun 		if (ret)
592*4882a593Smuzhiyun 			return;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 		if (status & NAND_STATUS_READY)
595*4882a593Smuzhiyun 			break;
596*4882a593Smuzhiyun 		WATCHDOG_RESET();
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun };
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun /**
601*4882a593Smuzhiyun  * nand_command - [DEFAULT] Send command to NAND device
602*4882a593Smuzhiyun  * @mtd: MTD device structure
603*4882a593Smuzhiyun  * @command: the command to be sent
604*4882a593Smuzhiyun  * @column: the column address for this command, -1 if none
605*4882a593Smuzhiyun  * @page_addr: the page address for this command, -1 if none
606*4882a593Smuzhiyun  *
607*4882a593Smuzhiyun  * Send command to NAND device. This function is used for small page devices
608*4882a593Smuzhiyun  * (512 Bytes per page).
609*4882a593Smuzhiyun  */
nand_command(struct mtd_info * mtd,unsigned int command,int column,int page_addr)610*4882a593Smuzhiyun static void nand_command(struct mtd_info *mtd, unsigned int command,
611*4882a593Smuzhiyun 			 int column, int page_addr)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	register struct nand_chip *chip = mtd_to_nand(mtd);
614*4882a593Smuzhiyun 	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* Write out the command to the device */
617*4882a593Smuzhiyun 	if (command == NAND_CMD_SEQIN) {
618*4882a593Smuzhiyun 		int readcmd;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 		if (column >= mtd->writesize) {
621*4882a593Smuzhiyun 			/* OOB area */
622*4882a593Smuzhiyun 			column -= mtd->writesize;
623*4882a593Smuzhiyun 			readcmd = NAND_CMD_READOOB;
624*4882a593Smuzhiyun 		} else if (column < 256) {
625*4882a593Smuzhiyun 			/* First 256 bytes --> READ0 */
626*4882a593Smuzhiyun 			readcmd = NAND_CMD_READ0;
627*4882a593Smuzhiyun 		} else {
628*4882a593Smuzhiyun 			column -= 256;
629*4882a593Smuzhiyun 			readcmd = NAND_CMD_READ1;
630*4882a593Smuzhiyun 		}
631*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, readcmd, ctrl);
632*4882a593Smuzhiyun 		ctrl &= ~NAND_CTRL_CHANGE;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 	chip->cmd_ctrl(mtd, command, ctrl);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* Address cycle, when necessary */
637*4882a593Smuzhiyun 	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
638*4882a593Smuzhiyun 	/* Serially input address */
639*4882a593Smuzhiyun 	if (column != -1) {
640*4882a593Smuzhiyun 		/* Adjust columns for 16 bit buswidth */
641*4882a593Smuzhiyun 		if (chip->options & NAND_BUSWIDTH_16 &&
642*4882a593Smuzhiyun 				!nand_opcode_8bits(command))
643*4882a593Smuzhiyun 			column >>= 1;
644*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, column, ctrl);
645*4882a593Smuzhiyun 		ctrl &= ~NAND_CTRL_CHANGE;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 	if (page_addr != -1) {
648*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, page_addr, ctrl);
649*4882a593Smuzhiyun 		ctrl &= ~NAND_CTRL_CHANGE;
650*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
651*4882a593Smuzhiyun 		if (chip->options & NAND_ROW_ADDR_3)
652*4882a593Smuzhiyun 			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
653*4882a593Smuzhiyun 	}
654*4882a593Smuzhiyun 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/*
657*4882a593Smuzhiyun 	 * Program and erase have their own busy handlers status and sequential
658*4882a593Smuzhiyun 	 * in needs no delay
659*4882a593Smuzhiyun 	 */
660*4882a593Smuzhiyun 	switch (command) {
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	case NAND_CMD_PAGEPROG:
663*4882a593Smuzhiyun 	case NAND_CMD_ERASE1:
664*4882a593Smuzhiyun 	case NAND_CMD_ERASE2:
665*4882a593Smuzhiyun 	case NAND_CMD_SEQIN:
666*4882a593Smuzhiyun 	case NAND_CMD_STATUS:
667*4882a593Smuzhiyun 	case NAND_CMD_READID:
668*4882a593Smuzhiyun 	case NAND_CMD_SET_FEATURES:
669*4882a593Smuzhiyun 		return;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	case NAND_CMD_RESET:
672*4882a593Smuzhiyun 		if (chip->dev_ready)
673*4882a593Smuzhiyun 			break;
674*4882a593Smuzhiyun 		udelay(chip->chip_delay);
675*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
676*4882a593Smuzhiyun 			       NAND_CTRL_CLE | NAND_CTRL_CHANGE);
677*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd,
678*4882a593Smuzhiyun 			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
679*4882a593Smuzhiyun 		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
680*4882a593Smuzhiyun 		nand_wait_status_ready(mtd, 250);
681*4882a593Smuzhiyun 		return;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		/* This applies to read commands */
684*4882a593Smuzhiyun 	default:
685*4882a593Smuzhiyun 		/*
686*4882a593Smuzhiyun 		 * If we don't have access to the busy pin, we apply the given
687*4882a593Smuzhiyun 		 * command delay
688*4882a593Smuzhiyun 		 */
689*4882a593Smuzhiyun 		if (!chip->dev_ready) {
690*4882a593Smuzhiyun 			udelay(chip->chip_delay);
691*4882a593Smuzhiyun 			return;
692*4882a593Smuzhiyun 		}
693*4882a593Smuzhiyun 	}
694*4882a593Smuzhiyun 	/*
695*4882a593Smuzhiyun 	 * Apply this short delay always to ensure that we do wait tWB in
696*4882a593Smuzhiyun 	 * any case on any machine.
697*4882a593Smuzhiyun 	 */
698*4882a593Smuzhiyun 	ndelay(100);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	nand_wait_ready(mtd);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun /**
704*4882a593Smuzhiyun  * nand_command_lp - [DEFAULT] Send command to NAND large page device
705*4882a593Smuzhiyun  * @mtd: MTD device structure
706*4882a593Smuzhiyun  * @command: the command to be sent
707*4882a593Smuzhiyun  * @column: the column address for this command, -1 if none
708*4882a593Smuzhiyun  * @page_addr: the page address for this command, -1 if none
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * Send command to NAND device. This is the version for the new large page
711*4882a593Smuzhiyun  * devices. We don't have the separate regions as we have in the small page
712*4882a593Smuzhiyun  * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
713*4882a593Smuzhiyun  */
nand_command_lp(struct mtd_info * mtd,unsigned int command,int column,int page_addr)714*4882a593Smuzhiyun static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
715*4882a593Smuzhiyun 			    int column, int page_addr)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	register struct nand_chip *chip = mtd_to_nand(mtd);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	/* Emulate NAND_CMD_READOOB */
720*4882a593Smuzhiyun 	if (command == NAND_CMD_READOOB) {
721*4882a593Smuzhiyun 		column += mtd->writesize;
722*4882a593Smuzhiyun 		command = NAND_CMD_READ0;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* Command latch cycle */
726*4882a593Smuzhiyun 	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (column != -1 || page_addr != -1) {
729*4882a593Smuzhiyun 		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		/* Serially input address */
732*4882a593Smuzhiyun 		if (column != -1) {
733*4882a593Smuzhiyun 			/* Adjust columns for 16 bit buswidth */
734*4882a593Smuzhiyun 			if (chip->options & NAND_BUSWIDTH_16 &&
735*4882a593Smuzhiyun 					!nand_opcode_8bits(command))
736*4882a593Smuzhiyun 				column >>= 1;
737*4882a593Smuzhiyun 			chip->cmd_ctrl(mtd, column, ctrl);
738*4882a593Smuzhiyun 			ctrl &= ~NAND_CTRL_CHANGE;
739*4882a593Smuzhiyun 			chip->cmd_ctrl(mtd, column >> 8, ctrl);
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 		if (page_addr != -1) {
742*4882a593Smuzhiyun 			chip->cmd_ctrl(mtd, page_addr, ctrl);
743*4882a593Smuzhiyun 			chip->cmd_ctrl(mtd, page_addr >> 8,
744*4882a593Smuzhiyun 				       NAND_NCE | NAND_ALE);
745*4882a593Smuzhiyun 			if (chip->options & NAND_ROW_ADDR_3)
746*4882a593Smuzhiyun 				chip->cmd_ctrl(mtd, page_addr >> 16,
747*4882a593Smuzhiyun 					       NAND_NCE | NAND_ALE);
748*4882a593Smuzhiyun 		}
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/*
753*4882a593Smuzhiyun 	 * Program and erase have their own busy handlers status, sequential
754*4882a593Smuzhiyun 	 * in and status need no delay.
755*4882a593Smuzhiyun 	 */
756*4882a593Smuzhiyun 	switch (command) {
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	case NAND_CMD_CACHEDPROG:
759*4882a593Smuzhiyun 	case NAND_CMD_PAGEPROG:
760*4882a593Smuzhiyun 	case NAND_CMD_ERASE1:
761*4882a593Smuzhiyun 	case NAND_CMD_ERASE2:
762*4882a593Smuzhiyun 	case NAND_CMD_SEQIN:
763*4882a593Smuzhiyun 	case NAND_CMD_RNDIN:
764*4882a593Smuzhiyun 	case NAND_CMD_STATUS:
765*4882a593Smuzhiyun 	case NAND_CMD_READID:
766*4882a593Smuzhiyun 	case NAND_CMD_SET_FEATURES:
767*4882a593Smuzhiyun 		return;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	case NAND_CMD_RESET:
770*4882a593Smuzhiyun 		if (chip->dev_ready)
771*4882a593Smuzhiyun 			break;
772*4882a593Smuzhiyun 		udelay(chip->chip_delay);
773*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
774*4882a593Smuzhiyun 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
775*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
776*4882a593Smuzhiyun 			       NAND_NCE | NAND_CTRL_CHANGE);
777*4882a593Smuzhiyun 		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
778*4882a593Smuzhiyun 		nand_wait_status_ready(mtd, 250);
779*4882a593Smuzhiyun 		return;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	case NAND_CMD_RNDOUT:
782*4882a593Smuzhiyun 		/* No ready / busy check necessary */
783*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
784*4882a593Smuzhiyun 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
785*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
786*4882a593Smuzhiyun 			       NAND_NCE | NAND_CTRL_CHANGE);
787*4882a593Smuzhiyun 		return;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	case NAND_CMD_READ0:
790*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
791*4882a593Smuzhiyun 			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
792*4882a593Smuzhiyun 		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
793*4882a593Smuzhiyun 			       NAND_NCE | NAND_CTRL_CHANGE);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 		/* This applies to read commands */
796*4882a593Smuzhiyun 	default:
797*4882a593Smuzhiyun 		/*
798*4882a593Smuzhiyun 		 * If we don't have access to the busy pin, we apply the given
799*4882a593Smuzhiyun 		 * command delay.
800*4882a593Smuzhiyun 		 */
801*4882a593Smuzhiyun 		if (!chip->dev_ready) {
802*4882a593Smuzhiyun 			udelay(chip->chip_delay);
803*4882a593Smuzhiyun 			return;
804*4882a593Smuzhiyun 		}
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	/*
808*4882a593Smuzhiyun 	 * Apply this short delay always to ensure that we do wait tWB in
809*4882a593Smuzhiyun 	 * any case on any machine.
810*4882a593Smuzhiyun 	 */
811*4882a593Smuzhiyun 	ndelay(100);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	nand_wait_ready(mtd);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun /**
817*4882a593Smuzhiyun  * panic_nand_get_device - [GENERIC] Get chip for selected access
818*4882a593Smuzhiyun  * @chip: the nand chip descriptor
819*4882a593Smuzhiyun  * @mtd: MTD device structure
820*4882a593Smuzhiyun  * @new_state: the state which is requested
821*4882a593Smuzhiyun  *
822*4882a593Smuzhiyun  * Used when in panic, no locks are taken.
823*4882a593Smuzhiyun  */
panic_nand_get_device(struct nand_chip * chip,struct mtd_info * mtd,int new_state)824*4882a593Smuzhiyun static void panic_nand_get_device(struct nand_chip *chip,
825*4882a593Smuzhiyun 		      struct mtd_info *mtd, int new_state)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	/* Hardware controller shared among independent devices */
828*4882a593Smuzhiyun 	chip->controller->active = chip;
829*4882a593Smuzhiyun 	chip->state = new_state;
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun /**
833*4882a593Smuzhiyun  * nand_get_device - [GENERIC] Get chip for selected access
834*4882a593Smuzhiyun  * @mtd: MTD device structure
835*4882a593Smuzhiyun  * @new_state: the state which is requested
836*4882a593Smuzhiyun  *
837*4882a593Smuzhiyun  * Get the device and lock it for exclusive access
838*4882a593Smuzhiyun  */
839*4882a593Smuzhiyun static int
nand_get_device(struct mtd_info * mtd,int new_state)840*4882a593Smuzhiyun nand_get_device(struct mtd_info *mtd, int new_state)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
843*4882a593Smuzhiyun 	chip->state = new_state;
844*4882a593Smuzhiyun 	return 0;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun /**
848*4882a593Smuzhiyun  * panic_nand_wait - [GENERIC] wait until the command is done
849*4882a593Smuzhiyun  * @mtd: MTD device structure
850*4882a593Smuzhiyun  * @chip: NAND chip structure
851*4882a593Smuzhiyun  * @timeo: timeout
852*4882a593Smuzhiyun  *
853*4882a593Smuzhiyun  * Wait for command done. This is a helper function for nand_wait used when
854*4882a593Smuzhiyun  * we are in interrupt context. May happen when in panic and trying to write
855*4882a593Smuzhiyun  * an oops through mtdoops.
856*4882a593Smuzhiyun  */
panic_nand_wait(struct mtd_info * mtd,struct nand_chip * chip,unsigned long timeo)857*4882a593Smuzhiyun static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
858*4882a593Smuzhiyun 			    unsigned long timeo)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun 	int i;
861*4882a593Smuzhiyun 	for (i = 0; i < timeo; i++) {
862*4882a593Smuzhiyun 		if (chip->dev_ready) {
863*4882a593Smuzhiyun 			if (chip->dev_ready(mtd))
864*4882a593Smuzhiyun 				break;
865*4882a593Smuzhiyun 		} else {
866*4882a593Smuzhiyun 			int ret;
867*4882a593Smuzhiyun 			u8 status;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, &status, sizeof(status),
870*4882a593Smuzhiyun 						true);
871*4882a593Smuzhiyun 			if (ret)
872*4882a593Smuzhiyun 				return;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 			if (status & NAND_STATUS_READY)
875*4882a593Smuzhiyun 				break;
876*4882a593Smuzhiyun 		}
877*4882a593Smuzhiyun 		mdelay(1);
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun /**
882*4882a593Smuzhiyun  * nand_wait - [DEFAULT] wait until the command is done
883*4882a593Smuzhiyun  * @mtd: MTD device structure
884*4882a593Smuzhiyun  * @chip: NAND chip structure
885*4882a593Smuzhiyun  *
886*4882a593Smuzhiyun  * Wait for command done. This applies to erase and program only.
887*4882a593Smuzhiyun  */
nand_wait(struct mtd_info * mtd,struct nand_chip * chip)888*4882a593Smuzhiyun static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	unsigned long timeo = 400;
891*4882a593Smuzhiyun 	u8 status;
892*4882a593Smuzhiyun 	int ret;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	led_trigger_event(nand_led_trigger, LED_FULL);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	/*
897*4882a593Smuzhiyun 	 * Apply this short delay always to ensure that we do wait tWB in any
898*4882a593Smuzhiyun 	 * case on any machine.
899*4882a593Smuzhiyun 	 */
900*4882a593Smuzhiyun 	ndelay(100);
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	ret = nand_status_op(chip, NULL);
903*4882a593Smuzhiyun 	if (ret)
904*4882a593Smuzhiyun 		return ret;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun  	u32 timer = (CONFIG_SYS_HZ * timeo) / 1000;
907*4882a593Smuzhiyun  	u32 time_start;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun  	time_start = get_timer(0);
910*4882a593Smuzhiyun  	while (get_timer(time_start) < timer) {
911*4882a593Smuzhiyun 		if (chip->dev_ready) {
912*4882a593Smuzhiyun 			if (chip->dev_ready(mtd))
913*4882a593Smuzhiyun 				break;
914*4882a593Smuzhiyun 		} else {
915*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, &status,
916*4882a593Smuzhiyun 						sizeof(status), true);
917*4882a593Smuzhiyun 			if (ret)
918*4882a593Smuzhiyun 				return ret;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 			if (status & NAND_STATUS_READY)
921*4882a593Smuzhiyun 				break;
922*4882a593Smuzhiyun 		}
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 	led_trigger_event(nand_led_trigger, LED_OFF);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, &status, sizeof(status), true);
927*4882a593Smuzhiyun 	if (ret)
928*4882a593Smuzhiyun 		return ret;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	/* This can happen if in case of timeout or buggy dev_ready */
931*4882a593Smuzhiyun 	WARN_ON(!(status & NAND_STATUS_READY));
932*4882a593Smuzhiyun 	return status;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun /**
936*4882a593Smuzhiyun  * nand_reset_data_interface - Reset data interface and timings
937*4882a593Smuzhiyun  * @chip: The NAND chip
938*4882a593Smuzhiyun  * @chipnr: Internal die id
939*4882a593Smuzhiyun  *
940*4882a593Smuzhiyun  * Reset the Data interface and timings to ONFI mode 0.
941*4882a593Smuzhiyun  *
942*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
943*4882a593Smuzhiyun  */
nand_reset_data_interface(struct nand_chip * chip,int chipnr)944*4882a593Smuzhiyun static int nand_reset_data_interface(struct nand_chip *chip, int chipnr)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
947*4882a593Smuzhiyun 	const struct nand_data_interface *conf;
948*4882a593Smuzhiyun 	int ret;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	if (!chip->setup_data_interface)
951*4882a593Smuzhiyun 		return 0;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	/*
954*4882a593Smuzhiyun 	 * The ONFI specification says:
955*4882a593Smuzhiyun 	 * "
956*4882a593Smuzhiyun 	 * To transition from NV-DDR or NV-DDR2 to the SDR data
957*4882a593Smuzhiyun 	 * interface, the host shall use the Reset (FFh) command
958*4882a593Smuzhiyun 	 * using SDR timing mode 0. A device in any timing mode is
959*4882a593Smuzhiyun 	 * required to recognize Reset (FFh) command issued in SDR
960*4882a593Smuzhiyun 	 * timing mode 0.
961*4882a593Smuzhiyun 	 * "
962*4882a593Smuzhiyun 	 *
963*4882a593Smuzhiyun 	 * Configure the data interface in SDR mode and set the
964*4882a593Smuzhiyun 	 * timings to timing mode 0.
965*4882a593Smuzhiyun 	 */
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	conf = nand_get_default_data_interface();
968*4882a593Smuzhiyun 	ret = chip->setup_data_interface(mtd, chipnr, conf);
969*4882a593Smuzhiyun 	if (ret)
970*4882a593Smuzhiyun 		pr_err("Failed to configure data interface to SDR timing mode 0\n");
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	return ret;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun /**
976*4882a593Smuzhiyun  * nand_setup_data_interface - Setup the best data interface and timings
977*4882a593Smuzhiyun  * @chip: The NAND chip
978*4882a593Smuzhiyun  * @chipnr: Internal die id
979*4882a593Smuzhiyun  *
980*4882a593Smuzhiyun  * Find and configure the best data interface and NAND timings supported by
981*4882a593Smuzhiyun  * the chip and the driver.
982*4882a593Smuzhiyun  * First tries to retrieve supported timing modes from ONFI information,
983*4882a593Smuzhiyun  * and if the NAND chip does not support ONFI, relies on the
984*4882a593Smuzhiyun  * ->onfi_timing_mode_default specified in the nand_ids table.
985*4882a593Smuzhiyun  *
986*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
987*4882a593Smuzhiyun  */
nand_setup_data_interface(struct nand_chip * chip,int chipnr)988*4882a593Smuzhiyun static int nand_setup_data_interface(struct nand_chip *chip, int chipnr)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
991*4882a593Smuzhiyun 	int ret;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	if (!chip->setup_data_interface || !chip->data_interface)
994*4882a593Smuzhiyun 		return 0;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/*
997*4882a593Smuzhiyun 	 * Ensure the timing mode has been changed on the chip side
998*4882a593Smuzhiyun 	 * before changing timings on the controller side.
999*4882a593Smuzhiyun 	 */
1000*4882a593Smuzhiyun 	if (chip->onfi_version) {
1001*4882a593Smuzhiyun 		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
1002*4882a593Smuzhiyun 			chip->onfi_timing_mode_default,
1003*4882a593Smuzhiyun 		};
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 		ret = chip->onfi_set_features(mtd, chip,
1006*4882a593Smuzhiyun 				ONFI_FEATURE_ADDR_TIMING_MODE,
1007*4882a593Smuzhiyun 				tmode_param);
1008*4882a593Smuzhiyun 		if (ret)
1009*4882a593Smuzhiyun 			goto err;
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface);
1013*4882a593Smuzhiyun err:
1014*4882a593Smuzhiyun 	return ret;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun /**
1018*4882a593Smuzhiyun  * nand_init_data_interface - find the best data interface and timings
1019*4882a593Smuzhiyun  * @chip: The NAND chip
1020*4882a593Smuzhiyun  *
1021*4882a593Smuzhiyun  * Find the best data interface and NAND timings supported by the chip
1022*4882a593Smuzhiyun  * and the driver.
1023*4882a593Smuzhiyun  * First tries to retrieve supported timing modes from ONFI information,
1024*4882a593Smuzhiyun  * and if the NAND chip does not support ONFI, relies on the
1025*4882a593Smuzhiyun  * ->onfi_timing_mode_default specified in the nand_ids table. After this
1026*4882a593Smuzhiyun  * function nand_chip->data_interface is initialized with the best timing mode
1027*4882a593Smuzhiyun  * available.
1028*4882a593Smuzhiyun  *
1029*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise.
1030*4882a593Smuzhiyun  */
nand_init_data_interface(struct nand_chip * chip)1031*4882a593Smuzhiyun static int nand_init_data_interface(struct nand_chip *chip)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1034*4882a593Smuzhiyun 	int modes, mode, ret;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	if (!chip->setup_data_interface)
1037*4882a593Smuzhiyun 		return 0;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	/*
1040*4882a593Smuzhiyun 	 * First try to identify the best timings from ONFI parameters and
1041*4882a593Smuzhiyun 	 * if the NAND does not support ONFI, fallback to the default ONFI
1042*4882a593Smuzhiyun 	 * timing mode.
1043*4882a593Smuzhiyun 	 */
1044*4882a593Smuzhiyun 	modes = onfi_get_async_timing_mode(chip);
1045*4882a593Smuzhiyun 	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
1046*4882a593Smuzhiyun 		if (!chip->onfi_timing_mode_default)
1047*4882a593Smuzhiyun 			return 0;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 		modes = GENMASK(chip->onfi_timing_mode_default, 0);
1050*4882a593Smuzhiyun 	}
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	chip->data_interface = kzalloc(sizeof(*chip->data_interface),
1053*4882a593Smuzhiyun 				       GFP_KERNEL);
1054*4882a593Smuzhiyun 	if (!chip->data_interface)
1055*4882a593Smuzhiyun 		return -ENOMEM;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	for (mode = fls(modes) - 1; mode >= 0; mode--) {
1058*4882a593Smuzhiyun 		ret = onfi_init_data_interface(chip, chip->data_interface,
1059*4882a593Smuzhiyun 					       NAND_SDR_IFACE, mode);
1060*4882a593Smuzhiyun 		if (ret)
1061*4882a593Smuzhiyun 			continue;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 		/* Pass -1 to only */
1064*4882a593Smuzhiyun 		ret = chip->setup_data_interface(mtd,
1065*4882a593Smuzhiyun 						 NAND_DATA_IFACE_CHECK_ONLY,
1066*4882a593Smuzhiyun 						 chip->data_interface);
1067*4882a593Smuzhiyun 		if (!ret) {
1068*4882a593Smuzhiyun 			chip->onfi_timing_mode_default = mode;
1069*4882a593Smuzhiyun 			break;
1070*4882a593Smuzhiyun 		}
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	return 0;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun 
nand_release_data_interface(struct nand_chip * chip)1076*4882a593Smuzhiyun static void __maybe_unused nand_release_data_interface(struct nand_chip *chip)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	kfree(chip->data_interface);
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun /**
1082*4882a593Smuzhiyun  * nand_read_page_op - Do a READ PAGE operation
1083*4882a593Smuzhiyun  * @chip: The NAND chip
1084*4882a593Smuzhiyun  * @page: page to read
1085*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1086*4882a593Smuzhiyun  * @buf: buffer used to store the data
1087*4882a593Smuzhiyun  * @len: length of the buffer
1088*4882a593Smuzhiyun  *
1089*4882a593Smuzhiyun  * This function issues a READ PAGE operation.
1090*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1091*4882a593Smuzhiyun  *
1092*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1093*4882a593Smuzhiyun  */
nand_read_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,void * buf,unsigned int len)1094*4882a593Smuzhiyun int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1095*4882a593Smuzhiyun 		      unsigned int offset_in_page, void *buf, unsigned int len)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	if (len && !buf)
1100*4882a593Smuzhiyun 		return -EINVAL;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1103*4882a593Smuzhiyun 		return -EINVAL;
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_READ0, offset_in_page, page);
1106*4882a593Smuzhiyun 	if (len)
1107*4882a593Smuzhiyun 		chip->read_buf(mtd, buf, len);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	return 0;
1110*4882a593Smuzhiyun }
1111*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_read_page_op);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun /**
1114*4882a593Smuzhiyun  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1115*4882a593Smuzhiyun  * @chip: The NAND chip
1116*4882a593Smuzhiyun  * @page: parameter page to read
1117*4882a593Smuzhiyun  * @buf: buffer used to store the data
1118*4882a593Smuzhiyun  * @len: length of the buffer
1119*4882a593Smuzhiyun  *
1120*4882a593Smuzhiyun  * This function issues a READ PARAMETER PAGE operation.
1121*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1122*4882a593Smuzhiyun  *
1123*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1124*4882a593Smuzhiyun  */
nand_read_param_page_op(struct nand_chip * chip,u8 page,void * buf,unsigned int len)1125*4882a593Smuzhiyun static int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1126*4882a593Smuzhiyun 				   unsigned int len)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1129*4882a593Smuzhiyun 	unsigned int i;
1130*4882a593Smuzhiyun 	u8 *p = buf;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	if (len && !buf)
1133*4882a593Smuzhiyun 		return -EINVAL;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_PARAM, page, -1);
1136*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
1137*4882a593Smuzhiyun 		p[i] = chip->read_byte(mtd);
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	return 0;
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun /**
1143*4882a593Smuzhiyun  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1144*4882a593Smuzhiyun  * @chip: The NAND chip
1145*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1146*4882a593Smuzhiyun  * @buf: buffer used to store the data
1147*4882a593Smuzhiyun  * @len: length of the buffer
1148*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1149*4882a593Smuzhiyun  *
1150*4882a593Smuzhiyun  * This function issues a CHANGE READ COLUMN operation.
1151*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1152*4882a593Smuzhiyun  *
1153*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1154*4882a593Smuzhiyun  */
nand_change_read_column_op(struct nand_chip * chip,unsigned int offset_in_page,void * buf,unsigned int len,bool force_8bit)1155*4882a593Smuzhiyun int nand_change_read_column_op(struct nand_chip *chip,
1156*4882a593Smuzhiyun 			       unsigned int offset_in_page, void *buf,
1157*4882a593Smuzhiyun 			       unsigned int len, bool force_8bit)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	if (len && !buf)
1162*4882a593Smuzhiyun 		return -EINVAL;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1165*4882a593Smuzhiyun 		return -EINVAL;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset_in_page, -1);
1168*4882a593Smuzhiyun 	if (len)
1169*4882a593Smuzhiyun 		chip->read_buf(mtd, buf, len);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	return 0;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun /**
1176*4882a593Smuzhiyun  * nand_read_oob_op - Do a READ OOB operation
1177*4882a593Smuzhiyun  * @chip: The NAND chip
1178*4882a593Smuzhiyun  * @page: page to read
1179*4882a593Smuzhiyun  * @offset_in_oob: offset within the OOB area
1180*4882a593Smuzhiyun  * @buf: buffer used to store the data
1181*4882a593Smuzhiyun  * @len: length of the buffer
1182*4882a593Smuzhiyun  *
1183*4882a593Smuzhiyun  * This function issues a READ OOB operation.
1184*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1185*4882a593Smuzhiyun  *
1186*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1187*4882a593Smuzhiyun  */
nand_read_oob_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_oob,void * buf,unsigned int len)1188*4882a593Smuzhiyun int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1189*4882a593Smuzhiyun 		     unsigned int offset_in_oob, void *buf, unsigned int len)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	if (len && !buf)
1194*4882a593Smuzhiyun 		return -EINVAL;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	if (offset_in_oob + len > mtd->oobsize)
1197*4882a593Smuzhiyun 		return -EINVAL;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_READOOB, offset_in_oob, page);
1200*4882a593Smuzhiyun 	if (len)
1201*4882a593Smuzhiyun 		chip->read_buf(mtd, buf, len);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	return 0;
1204*4882a593Smuzhiyun }
1205*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_read_oob_op);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun /**
1208*4882a593Smuzhiyun  * nand_prog_page_begin_op - starts a PROG PAGE operation
1209*4882a593Smuzhiyun  * @chip: The NAND chip
1210*4882a593Smuzhiyun  * @page: page to write
1211*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1212*4882a593Smuzhiyun  * @buf: buffer containing the data to write to the page
1213*4882a593Smuzhiyun  * @len: length of the buffer
1214*4882a593Smuzhiyun  *
1215*4882a593Smuzhiyun  * This function issues the first half of a PROG PAGE operation.
1216*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1217*4882a593Smuzhiyun  *
1218*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1219*4882a593Smuzhiyun  */
nand_prog_page_begin_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len)1220*4882a593Smuzhiyun int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1221*4882a593Smuzhiyun 			    unsigned int offset_in_page, const void *buf,
1222*4882a593Smuzhiyun 			    unsigned int len)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	if (len && !buf)
1227*4882a593Smuzhiyun 		return -EINVAL;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1230*4882a593Smuzhiyun 		return -EINVAL;
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	if (buf)
1235*4882a593Smuzhiyun 		chip->write_buf(mtd, buf, len);
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	return 0;
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun /**
1242*4882a593Smuzhiyun  * nand_prog_page_end_op - ends a PROG PAGE operation
1243*4882a593Smuzhiyun  * @chip: The NAND chip
1244*4882a593Smuzhiyun  *
1245*4882a593Smuzhiyun  * This function issues the second half of a PROG PAGE operation.
1246*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1247*4882a593Smuzhiyun  *
1248*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1249*4882a593Smuzhiyun  */
nand_prog_page_end_op(struct nand_chip * chip)1250*4882a593Smuzhiyun int nand_prog_page_end_op(struct nand_chip *chip)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1253*4882a593Smuzhiyun 	int status;
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	status = chip->waitfunc(mtd, chip);
1258*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1259*4882a593Smuzhiyun 		return -EIO;
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	return 0;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun /**
1266*4882a593Smuzhiyun  * nand_prog_page_op - Do a full PROG PAGE operation
1267*4882a593Smuzhiyun  * @chip: The NAND chip
1268*4882a593Smuzhiyun  * @page: page to write
1269*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1270*4882a593Smuzhiyun  * @buf: buffer containing the data to write to the page
1271*4882a593Smuzhiyun  * @len: length of the buffer
1272*4882a593Smuzhiyun  *
1273*4882a593Smuzhiyun  * This function issues a full PROG PAGE operation.
1274*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1275*4882a593Smuzhiyun  *
1276*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1277*4882a593Smuzhiyun  */
nand_prog_page_op(struct nand_chip * chip,unsigned int page,unsigned int offset_in_page,const void * buf,unsigned int len)1278*4882a593Smuzhiyun int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1279*4882a593Smuzhiyun 		      unsigned int offset_in_page, const void *buf,
1280*4882a593Smuzhiyun 		      unsigned int len)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1283*4882a593Smuzhiyun 	int status;
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (!len || !buf)
1286*4882a593Smuzhiyun 		return -EINVAL;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1289*4882a593Smuzhiyun 		return -EINVAL;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_SEQIN, offset_in_page, page);
1292*4882a593Smuzhiyun 	chip->write_buf(mtd, buf, len);
1293*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	status = chip->waitfunc(mtd, chip);
1296*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1297*4882a593Smuzhiyun 		return -EIO;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	return 0;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_prog_page_op);
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun /**
1304*4882a593Smuzhiyun  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1305*4882a593Smuzhiyun  * @chip: The NAND chip
1306*4882a593Smuzhiyun  * @offset_in_page: offset within the page
1307*4882a593Smuzhiyun  * @buf: buffer containing the data to send to the NAND
1308*4882a593Smuzhiyun  * @len: length of the buffer
1309*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1310*4882a593Smuzhiyun  *
1311*4882a593Smuzhiyun  * This function issues a CHANGE WRITE COLUMN operation.
1312*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1313*4882a593Smuzhiyun  *
1314*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1315*4882a593Smuzhiyun  */
nand_change_write_column_op(struct nand_chip * chip,unsigned int offset_in_page,const void * buf,unsigned int len,bool force_8bit)1316*4882a593Smuzhiyun int nand_change_write_column_op(struct nand_chip *chip,
1317*4882a593Smuzhiyun 				unsigned int offset_in_page,
1318*4882a593Smuzhiyun 				const void *buf, unsigned int len,
1319*4882a593Smuzhiyun 				bool force_8bit)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	if (len && !buf)
1324*4882a593Smuzhiyun 		return -EINVAL;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1327*4882a593Smuzhiyun 		return -EINVAL;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset_in_page, -1);
1330*4882a593Smuzhiyun 	if (len)
1331*4882a593Smuzhiyun 		chip->write_buf(mtd, buf, len);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	return 0;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun /**
1338*4882a593Smuzhiyun  * nand_readid_op - Do a READID operation
1339*4882a593Smuzhiyun  * @chip: The NAND chip
1340*4882a593Smuzhiyun  * @addr: address cycle to pass after the READID command
1341*4882a593Smuzhiyun  * @buf: buffer used to store the ID
1342*4882a593Smuzhiyun  * @len: length of the buffer
1343*4882a593Smuzhiyun  *
1344*4882a593Smuzhiyun  * This function sends a READID command and reads back the ID returned by the
1345*4882a593Smuzhiyun  * NAND.
1346*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1347*4882a593Smuzhiyun  *
1348*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1349*4882a593Smuzhiyun  */
nand_readid_op(struct nand_chip * chip,u8 addr,void * buf,unsigned int len)1350*4882a593Smuzhiyun int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1351*4882a593Smuzhiyun 		   unsigned int len)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1354*4882a593Smuzhiyun 	unsigned int i;
1355*4882a593Smuzhiyun 	u8 *id = buf;
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	if (len && !buf)
1358*4882a593Smuzhiyun 		return -EINVAL;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_READID, addr, -1);
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	for (i = 0; i < len; i++)
1363*4882a593Smuzhiyun 		id[i] = chip->read_byte(mtd);
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	return 0;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_readid_op);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun /**
1370*4882a593Smuzhiyun  * nand_status_op - Do a STATUS operation
1371*4882a593Smuzhiyun  * @chip: The NAND chip
1372*4882a593Smuzhiyun  * @status: out variable to store the NAND status
1373*4882a593Smuzhiyun  *
1374*4882a593Smuzhiyun  * This function sends a STATUS command and reads back the status returned by
1375*4882a593Smuzhiyun  * the NAND.
1376*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1377*4882a593Smuzhiyun  *
1378*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1379*4882a593Smuzhiyun  */
nand_status_op(struct nand_chip * chip,u8 * status)1380*4882a593Smuzhiyun int nand_status_op(struct nand_chip *chip, u8 *status)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1385*4882a593Smuzhiyun 	if (status)
1386*4882a593Smuzhiyun 		*status = chip->read_byte(mtd);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	return 0;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_status_op);
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun /**
1393*4882a593Smuzhiyun  * nand_exit_status_op - Exit a STATUS operation
1394*4882a593Smuzhiyun  * @chip: The NAND chip
1395*4882a593Smuzhiyun  *
1396*4882a593Smuzhiyun  * This function sends a READ0 command to cancel the effect of the STATUS
1397*4882a593Smuzhiyun  * command to avoid reading only the status until a new read command is sent.
1398*4882a593Smuzhiyun  *
1399*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1400*4882a593Smuzhiyun  *
1401*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1402*4882a593Smuzhiyun  */
nand_exit_status_op(struct nand_chip * chip)1403*4882a593Smuzhiyun int nand_exit_status_op(struct nand_chip *chip)
1404*4882a593Smuzhiyun {
1405*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	return 0;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_exit_status_op);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun /**
1414*4882a593Smuzhiyun  * nand_erase_op - Do an erase operation
1415*4882a593Smuzhiyun  * @chip: The NAND chip
1416*4882a593Smuzhiyun  * @eraseblock: block to erase
1417*4882a593Smuzhiyun  *
1418*4882a593Smuzhiyun  * This function sends an ERASE command and waits for the NAND to be ready
1419*4882a593Smuzhiyun  * before returning.
1420*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1421*4882a593Smuzhiyun  *
1422*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1423*4882a593Smuzhiyun  */
nand_erase_op(struct nand_chip * chip,unsigned int eraseblock)1424*4882a593Smuzhiyun int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1427*4882a593Smuzhiyun 	unsigned int page = eraseblock <<
1428*4882a593Smuzhiyun 			    (chip->phys_erase_shift - chip->page_shift);
1429*4882a593Smuzhiyun 	int status;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
1432*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	status = chip->waitfunc(mtd, chip);
1435*4882a593Smuzhiyun 	if (status < 0)
1436*4882a593Smuzhiyun 		return status;
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1439*4882a593Smuzhiyun 		return -EIO;
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	return 0;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_erase_op);
1444*4882a593Smuzhiyun 
1445*4882a593Smuzhiyun /**
1446*4882a593Smuzhiyun  * nand_set_features_op - Do a SET FEATURES operation
1447*4882a593Smuzhiyun  * @chip: The NAND chip
1448*4882a593Smuzhiyun  * @feature: feature id
1449*4882a593Smuzhiyun  * @data: 4 bytes of data
1450*4882a593Smuzhiyun  *
1451*4882a593Smuzhiyun  * This function sends a SET FEATURES command and waits for the NAND to be
1452*4882a593Smuzhiyun  * ready before returning.
1453*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1454*4882a593Smuzhiyun  *
1455*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1456*4882a593Smuzhiyun  */
nand_set_features_op(struct nand_chip * chip,u8 feature,const void * data)1457*4882a593Smuzhiyun static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1458*4882a593Smuzhiyun 				const void *data)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1461*4882a593Smuzhiyun 	const u8 *params = data;
1462*4882a593Smuzhiyun 	int i, status;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
1465*4882a593Smuzhiyun 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1466*4882a593Smuzhiyun 		chip->write_byte(mtd, params[i]);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	status = chip->waitfunc(mtd, chip);
1469*4882a593Smuzhiyun 	if (status & NAND_STATUS_FAIL)
1470*4882a593Smuzhiyun 		return -EIO;
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	return 0;
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun /**
1476*4882a593Smuzhiyun  * nand_get_features_op - Do a GET FEATURES operation
1477*4882a593Smuzhiyun  * @chip: The NAND chip
1478*4882a593Smuzhiyun  * @feature: feature id
1479*4882a593Smuzhiyun  * @data: 4 bytes of data
1480*4882a593Smuzhiyun  *
1481*4882a593Smuzhiyun  * This function sends a GET FEATURES command and waits for the NAND to be
1482*4882a593Smuzhiyun  * ready before returning.
1483*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1484*4882a593Smuzhiyun  *
1485*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1486*4882a593Smuzhiyun  */
nand_get_features_op(struct nand_chip * chip,u8 feature,void * data)1487*4882a593Smuzhiyun static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1488*4882a593Smuzhiyun 				void *data)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1491*4882a593Smuzhiyun 	u8 *params = data;
1492*4882a593Smuzhiyun 	int i;
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, feature, -1);
1495*4882a593Smuzhiyun 	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1496*4882a593Smuzhiyun 		params[i] = chip->read_byte(mtd);
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	return 0;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun /**
1502*4882a593Smuzhiyun  * nand_reset_op - Do a reset operation
1503*4882a593Smuzhiyun  * @chip: The NAND chip
1504*4882a593Smuzhiyun  *
1505*4882a593Smuzhiyun  * This function sends a RESET command and waits for the NAND to be ready
1506*4882a593Smuzhiyun  * before returning.
1507*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1508*4882a593Smuzhiyun  *
1509*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1510*4882a593Smuzhiyun  */
nand_reset_op(struct nand_chip * chip)1511*4882a593Smuzhiyun int nand_reset_op(struct nand_chip *chip)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	return 0;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_reset_op);
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun /**
1522*4882a593Smuzhiyun  * nand_read_data_op - Read data from the NAND
1523*4882a593Smuzhiyun  * @chip: The NAND chip
1524*4882a593Smuzhiyun  * @buf: buffer used to store the data
1525*4882a593Smuzhiyun  * @len: length of the buffer
1526*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1527*4882a593Smuzhiyun  *
1528*4882a593Smuzhiyun  * This function does a raw data read on the bus. Usually used after launching
1529*4882a593Smuzhiyun  * another NAND operation like nand_read_page_op().
1530*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1531*4882a593Smuzhiyun  *
1532*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1533*4882a593Smuzhiyun  */
nand_read_data_op(struct nand_chip * chip,void * buf,unsigned int len,bool force_8bit)1534*4882a593Smuzhiyun int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1535*4882a593Smuzhiyun 		      bool force_8bit)
1536*4882a593Smuzhiyun {
1537*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	if (!len || !buf)
1540*4882a593Smuzhiyun 		return -EINVAL;
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun 	if (force_8bit) {
1543*4882a593Smuzhiyun 		u8 *p = buf;
1544*4882a593Smuzhiyun 		unsigned int i;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
1547*4882a593Smuzhiyun 			p[i] = chip->read_byte(mtd);
1548*4882a593Smuzhiyun 	} else {
1549*4882a593Smuzhiyun 		chip->read_buf(mtd, buf, len);
1550*4882a593Smuzhiyun 	}
1551*4882a593Smuzhiyun 
1552*4882a593Smuzhiyun 	return 0;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_read_data_op);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun /**
1557*4882a593Smuzhiyun  * nand_write_data_op - Write data from the NAND
1558*4882a593Smuzhiyun  * @chip: The NAND chip
1559*4882a593Smuzhiyun  * @buf: buffer containing the data to send on the bus
1560*4882a593Smuzhiyun  * @len: length of the buffer
1561*4882a593Smuzhiyun  * @force_8bit: force 8-bit bus access
1562*4882a593Smuzhiyun  *
1563*4882a593Smuzhiyun  * This function does a raw data write on the bus. Usually used after launching
1564*4882a593Smuzhiyun  * another NAND operation like nand_write_page_begin_op().
1565*4882a593Smuzhiyun  * This function does not select/unselect the CS line.
1566*4882a593Smuzhiyun  *
1567*4882a593Smuzhiyun  * Returns 0 on success, a negative error code otherwise.
1568*4882a593Smuzhiyun  */
nand_write_data_op(struct nand_chip * chip,const void * buf,unsigned int len,bool force_8bit)1569*4882a593Smuzhiyun int nand_write_data_op(struct nand_chip *chip, const void *buf,
1570*4882a593Smuzhiyun 		       unsigned int len, bool force_8bit)
1571*4882a593Smuzhiyun {
1572*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	if (!len || !buf)
1575*4882a593Smuzhiyun 		return -EINVAL;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	if (force_8bit) {
1578*4882a593Smuzhiyun 		const u8 *p = buf;
1579*4882a593Smuzhiyun 		unsigned int i;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
1582*4882a593Smuzhiyun 			chip->write_byte(mtd, p[i]);
1583*4882a593Smuzhiyun 	} else {
1584*4882a593Smuzhiyun 		chip->write_buf(mtd, buf, len);
1585*4882a593Smuzhiyun 	}
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	return 0;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_write_data_op);
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun /**
1592*4882a593Smuzhiyun  * nand_reset - Reset and initialize a NAND device
1593*4882a593Smuzhiyun  * @chip: The NAND chip
1594*4882a593Smuzhiyun  * @chipnr: Internal die id
1595*4882a593Smuzhiyun  *
1596*4882a593Smuzhiyun  * Returns 0 for success or negative error code otherwise
1597*4882a593Smuzhiyun  */
nand_reset(struct nand_chip * chip,int chipnr)1598*4882a593Smuzhiyun int nand_reset(struct nand_chip *chip, int chipnr)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1601*4882a593Smuzhiyun 	int ret;
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	ret = nand_reset_data_interface(chip, chipnr);
1604*4882a593Smuzhiyun 	if (ret)
1605*4882a593Smuzhiyun 		return ret;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	/*
1608*4882a593Smuzhiyun 	 * The CS line has to be released before we can apply the new NAND
1609*4882a593Smuzhiyun 	 * interface settings, hence this weird ->select_chip() dance.
1610*4882a593Smuzhiyun 	 */
1611*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
1612*4882a593Smuzhiyun 	ret = nand_reset_op(chip);
1613*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
1614*4882a593Smuzhiyun 	if (ret)
1615*4882a593Smuzhiyun 		return ret;
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
1618*4882a593Smuzhiyun 	ret = nand_setup_data_interface(chip, chipnr);
1619*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
1620*4882a593Smuzhiyun 	if (ret)
1621*4882a593Smuzhiyun 		return ret;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	return 0;
1624*4882a593Smuzhiyun }
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun /**
1627*4882a593Smuzhiyun  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
1628*4882a593Smuzhiyun  * @buf: buffer to test
1629*4882a593Smuzhiyun  * @len: buffer length
1630*4882a593Smuzhiyun  * @bitflips_threshold: maximum number of bitflips
1631*4882a593Smuzhiyun  *
1632*4882a593Smuzhiyun  * Check if a buffer contains only 0xff, which means the underlying region
1633*4882a593Smuzhiyun  * has been erased and is ready to be programmed.
1634*4882a593Smuzhiyun  * The bitflips_threshold specify the maximum number of bitflips before
1635*4882a593Smuzhiyun  * considering the region is not erased.
1636*4882a593Smuzhiyun  * Note: The logic of this function has been extracted from the memweight
1637*4882a593Smuzhiyun  * implementation, except that nand_check_erased_buf function exit before
1638*4882a593Smuzhiyun  * testing the whole buffer if the number of bitflips exceed the
1639*4882a593Smuzhiyun  * bitflips_threshold value.
1640*4882a593Smuzhiyun  *
1641*4882a593Smuzhiyun  * Returns a positive number of bitflips less than or equal to
1642*4882a593Smuzhiyun  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1643*4882a593Smuzhiyun  * threshold.
1644*4882a593Smuzhiyun  */
nand_check_erased_buf(void * buf,int len,int bitflips_threshold)1645*4882a593Smuzhiyun static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun 	const unsigned char *bitmap = buf;
1648*4882a593Smuzhiyun 	int bitflips = 0;
1649*4882a593Smuzhiyun 	int weight;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	for (; len && ((uintptr_t)bitmap) % sizeof(long);
1652*4882a593Smuzhiyun 	     len--, bitmap++) {
1653*4882a593Smuzhiyun 		weight = hweight8(*bitmap);
1654*4882a593Smuzhiyun 		bitflips += BITS_PER_BYTE - weight;
1655*4882a593Smuzhiyun 		if (unlikely(bitflips > bitflips_threshold))
1656*4882a593Smuzhiyun 			return -EBADMSG;
1657*4882a593Smuzhiyun 	}
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	for (; len >= 4; len -= 4, bitmap += 4) {
1660*4882a593Smuzhiyun 		weight = hweight32(*((u32 *)bitmap));
1661*4882a593Smuzhiyun 		bitflips += 32 - weight;
1662*4882a593Smuzhiyun 		if (unlikely(bitflips > bitflips_threshold))
1663*4882a593Smuzhiyun 			return -EBADMSG;
1664*4882a593Smuzhiyun 	}
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	for (; len > 0; len--, bitmap++) {
1667*4882a593Smuzhiyun 		weight = hweight8(*bitmap);
1668*4882a593Smuzhiyun 		bitflips += BITS_PER_BYTE - weight;
1669*4882a593Smuzhiyun 		if (unlikely(bitflips > bitflips_threshold))
1670*4882a593Smuzhiyun 			return -EBADMSG;
1671*4882a593Smuzhiyun 	}
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	return bitflips;
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun /**
1677*4882a593Smuzhiyun  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
1678*4882a593Smuzhiyun  *				 0xff data
1679*4882a593Smuzhiyun  * @data: data buffer to test
1680*4882a593Smuzhiyun  * @datalen: data length
1681*4882a593Smuzhiyun  * @ecc: ECC buffer
1682*4882a593Smuzhiyun  * @ecclen: ECC length
1683*4882a593Smuzhiyun  * @extraoob: extra OOB buffer
1684*4882a593Smuzhiyun  * @extraooblen: extra OOB length
1685*4882a593Smuzhiyun  * @bitflips_threshold: maximum number of bitflips
1686*4882a593Smuzhiyun  *
1687*4882a593Smuzhiyun  * Check if a data buffer and its associated ECC and OOB data contains only
1688*4882a593Smuzhiyun  * 0xff pattern, which means the underlying region has been erased and is
1689*4882a593Smuzhiyun  * ready to be programmed.
1690*4882a593Smuzhiyun  * The bitflips_threshold specify the maximum number of bitflips before
1691*4882a593Smuzhiyun  * considering the region as not erased.
1692*4882a593Smuzhiyun  *
1693*4882a593Smuzhiyun  * Note:
1694*4882a593Smuzhiyun  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
1695*4882a593Smuzhiyun  *    different from the NAND page size. When fixing bitflips, ECC engines will
1696*4882a593Smuzhiyun  *    report the number of errors per chunk, and the NAND core infrastructure
1697*4882a593Smuzhiyun  *    expect you to return the maximum number of bitflips for the whole page.
1698*4882a593Smuzhiyun  *    This is why you should always use this function on a single chunk and
1699*4882a593Smuzhiyun  *    not on the whole page. After checking each chunk you should update your
1700*4882a593Smuzhiyun  *    max_bitflips value accordingly.
1701*4882a593Smuzhiyun  * 2/ When checking for bitflips in erased pages you should not only check
1702*4882a593Smuzhiyun  *    the payload data but also their associated ECC data, because a user might
1703*4882a593Smuzhiyun  *    have programmed almost all bits to 1 but a few. In this case, we
1704*4882a593Smuzhiyun  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
1705*4882a593Smuzhiyun  *    this case.
1706*4882a593Smuzhiyun  * 3/ The extraoob argument is optional, and should be used if some of your OOB
1707*4882a593Smuzhiyun  *    data are protected by the ECC engine.
1708*4882a593Smuzhiyun  *    It could also be used if you support subpages and want to attach some
1709*4882a593Smuzhiyun  *    extra OOB data to an ECC chunk.
1710*4882a593Smuzhiyun  *
1711*4882a593Smuzhiyun  * Returns a positive number of bitflips less than or equal to
1712*4882a593Smuzhiyun  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
1713*4882a593Smuzhiyun  * threshold. In case of success, the passed buffers are filled with 0xff.
1714*4882a593Smuzhiyun  */
nand_check_erased_ecc_chunk(void * data,int datalen,void * ecc,int ecclen,void * extraoob,int extraooblen,int bitflips_threshold)1715*4882a593Smuzhiyun int nand_check_erased_ecc_chunk(void *data, int datalen,
1716*4882a593Smuzhiyun 				void *ecc, int ecclen,
1717*4882a593Smuzhiyun 				void *extraoob, int extraooblen,
1718*4882a593Smuzhiyun 				int bitflips_threshold)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun 	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	data_bitflips = nand_check_erased_buf(data, datalen,
1723*4882a593Smuzhiyun 					      bitflips_threshold);
1724*4882a593Smuzhiyun 	if (data_bitflips < 0)
1725*4882a593Smuzhiyun 		return data_bitflips;
1726*4882a593Smuzhiyun 
1727*4882a593Smuzhiyun 	bitflips_threshold -= data_bitflips;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
1730*4882a593Smuzhiyun 	if (ecc_bitflips < 0)
1731*4882a593Smuzhiyun 		return ecc_bitflips;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	bitflips_threshold -= ecc_bitflips;
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
1736*4882a593Smuzhiyun 						  bitflips_threshold);
1737*4882a593Smuzhiyun 	if (extraoob_bitflips < 0)
1738*4882a593Smuzhiyun 		return extraoob_bitflips;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	if (data_bitflips)
1741*4882a593Smuzhiyun 		memset(data, 0xff, datalen);
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	if (ecc_bitflips)
1744*4882a593Smuzhiyun 		memset(ecc, 0xff, ecclen);
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 	if (extraoob_bitflips)
1747*4882a593Smuzhiyun 		memset(extraoob, 0xff, extraooblen);
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	return data_bitflips + ecc_bitflips + extraoob_bitflips;
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun /**
1754*4882a593Smuzhiyun  * nand_read_page_raw - [INTERN] read raw page data without ecc
1755*4882a593Smuzhiyun  * @mtd: mtd info structure
1756*4882a593Smuzhiyun  * @chip: nand chip info structure
1757*4882a593Smuzhiyun  * @buf: buffer to store read data
1758*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
1759*4882a593Smuzhiyun  * @page: page number to read
1760*4882a593Smuzhiyun  *
1761*4882a593Smuzhiyun  * Not for syndrome calculating ECC controllers, which use a special oob layout.
1762*4882a593Smuzhiyun  */
nand_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1763*4882a593Smuzhiyun static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1764*4882a593Smuzhiyun 			      uint8_t *buf, int oob_required, int page)
1765*4882a593Smuzhiyun {
1766*4882a593Smuzhiyun 	int ret;
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, buf, mtd->writesize, false);
1769*4882a593Smuzhiyun 	if (ret)
1770*4882a593Smuzhiyun 		return ret;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	if (oob_required) {
1773*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
1774*4882a593Smuzhiyun 					false);
1775*4882a593Smuzhiyun 		if (ret)
1776*4882a593Smuzhiyun 			return ret;
1777*4882a593Smuzhiyun 	}
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 	return 0;
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun /**
1783*4882a593Smuzhiyun  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
1784*4882a593Smuzhiyun  * @mtd: mtd info structure
1785*4882a593Smuzhiyun  * @chip: nand chip info structure
1786*4882a593Smuzhiyun  * @buf: buffer to store read data
1787*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
1788*4882a593Smuzhiyun  * @page: page number to read
1789*4882a593Smuzhiyun  *
1790*4882a593Smuzhiyun  * We need a special oob layout and handling even when OOB isn't used.
1791*4882a593Smuzhiyun  */
nand_read_page_raw_syndrome(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1792*4882a593Smuzhiyun static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1793*4882a593Smuzhiyun 				       struct nand_chip *chip, uint8_t *buf,
1794*4882a593Smuzhiyun 				       int oob_required, int page)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun 	int eccsize = chip->ecc.size;
1797*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
1798*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
1799*4882a593Smuzhiyun 	int steps, size, ret;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	for (steps = chip->ecc.steps; steps > 0; steps--) {
1802*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, buf, eccsize, false);
1803*4882a593Smuzhiyun 		if (ret)
1804*4882a593Smuzhiyun 			return ret;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 		buf += eccsize;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
1809*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
1810*4882a593Smuzhiyun 						false);
1811*4882a593Smuzhiyun 			if (ret)
1812*4882a593Smuzhiyun 				return ret;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
1815*4882a593Smuzhiyun 		}
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, eccbytes, false);
1818*4882a593Smuzhiyun 		if (ret)
1819*4882a593Smuzhiyun 			return ret;
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 		oob += eccbytes;
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
1824*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
1825*4882a593Smuzhiyun 						false);
1826*4882a593Smuzhiyun 			if (ret)
1827*4882a593Smuzhiyun 				return ret;
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
1830*4882a593Smuzhiyun 		}
1831*4882a593Smuzhiyun 	}
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	size = mtd->oobsize - (oob - chip->oob_poi);
1834*4882a593Smuzhiyun 	if (size) {
1835*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, size, false);
1836*4882a593Smuzhiyun 		if (ret)
1837*4882a593Smuzhiyun 			return ret;
1838*4882a593Smuzhiyun 	}
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	return 0;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun /**
1844*4882a593Smuzhiyun  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
1845*4882a593Smuzhiyun  * @mtd: mtd info structure
1846*4882a593Smuzhiyun  * @chip: nand chip info structure
1847*4882a593Smuzhiyun  * @buf: buffer to store read data
1848*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
1849*4882a593Smuzhiyun  * @page: page number to read
1850*4882a593Smuzhiyun  */
nand_read_page_swecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1851*4882a593Smuzhiyun static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1852*4882a593Smuzhiyun 				uint8_t *buf, int oob_required, int page)
1853*4882a593Smuzhiyun {
1854*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
1855*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
1856*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
1857*4882a593Smuzhiyun 	uint8_t *p = buf;
1858*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->buffers->ecccalc;
1859*4882a593Smuzhiyun 	uint8_t *ecc_code = chip->buffers->ecccode;
1860*4882a593Smuzhiyun 	uint32_t *eccpos = chip->ecc.layout->eccpos;
1861*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1866*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.total; i++)
1869*4882a593Smuzhiyun 		ecc_code[i] = chip->oob_poi[eccpos[i]];
1870*4882a593Smuzhiyun 
1871*4882a593Smuzhiyun 	eccsteps = chip->ecc.steps;
1872*4882a593Smuzhiyun 	p = buf;
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1875*4882a593Smuzhiyun 		int stat;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1878*4882a593Smuzhiyun 		if (stat < 0) {
1879*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
1880*4882a593Smuzhiyun 		} else {
1881*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
1882*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1883*4882a593Smuzhiyun 		}
1884*4882a593Smuzhiyun 	}
1885*4882a593Smuzhiyun 	return max_bitflips;
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun /**
1889*4882a593Smuzhiyun  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
1890*4882a593Smuzhiyun  * @mtd: mtd info structure
1891*4882a593Smuzhiyun  * @chip: nand chip info structure
1892*4882a593Smuzhiyun  * @data_offs: offset of requested data within the page
1893*4882a593Smuzhiyun  * @readlen: data length
1894*4882a593Smuzhiyun  * @bufpoi: buffer to store read data
1895*4882a593Smuzhiyun  * @page: page number to read
1896*4882a593Smuzhiyun  */
nand_read_subpage(struct mtd_info * mtd,struct nand_chip * chip,uint32_t data_offs,uint32_t readlen,uint8_t * bufpoi,int page)1897*4882a593Smuzhiyun static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1898*4882a593Smuzhiyun 			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
1899*4882a593Smuzhiyun 			int page)
1900*4882a593Smuzhiyun {
1901*4882a593Smuzhiyun 	int start_step, end_step, num_steps;
1902*4882a593Smuzhiyun 	uint32_t *eccpos = chip->ecc.layout->eccpos;
1903*4882a593Smuzhiyun 	uint8_t *p;
1904*4882a593Smuzhiyun 	int data_col_addr, i, gaps = 0;
1905*4882a593Smuzhiyun 	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1906*4882a593Smuzhiyun 	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1907*4882a593Smuzhiyun 	int index;
1908*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
1909*4882a593Smuzhiyun 	int ret;
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	/* Column address within the page aligned to ECC size (256bytes) */
1912*4882a593Smuzhiyun 	start_step = data_offs / chip->ecc.size;
1913*4882a593Smuzhiyun 	end_step = (data_offs + readlen - 1) / chip->ecc.size;
1914*4882a593Smuzhiyun 	num_steps = end_step - start_step + 1;
1915*4882a593Smuzhiyun 	index = start_step * chip->ecc.bytes;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	/* Data size aligned to ECC ecc.size */
1918*4882a593Smuzhiyun 	datafrag_len = num_steps * chip->ecc.size;
1919*4882a593Smuzhiyun 	eccfrag_len = num_steps * chip->ecc.bytes;
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	data_col_addr = start_step * chip->ecc.size;
1922*4882a593Smuzhiyun 	/* If we read not a page aligned data */
1923*4882a593Smuzhiyun 	if (data_col_addr != 0)
1924*4882a593Smuzhiyun 		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	p = bufpoi + data_col_addr;
1927*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, p, datafrag_len, false);
1928*4882a593Smuzhiyun 	if (ret)
1929*4882a593Smuzhiyun 		return ret;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	/* Calculate ECC */
1932*4882a593Smuzhiyun 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
1933*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	/*
1936*4882a593Smuzhiyun 	 * The performance is faster if we position offsets according to
1937*4882a593Smuzhiyun 	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
1938*4882a593Smuzhiyun 	 */
1939*4882a593Smuzhiyun 	for (i = 0; i < eccfrag_len - 1; i++) {
1940*4882a593Smuzhiyun 		if (eccpos[i + index] + 1 != eccpos[i + index + 1]) {
1941*4882a593Smuzhiyun 			gaps = 1;
1942*4882a593Smuzhiyun 			break;
1943*4882a593Smuzhiyun 		}
1944*4882a593Smuzhiyun 	}
1945*4882a593Smuzhiyun 	if (gaps) {
1946*4882a593Smuzhiyun 		ret = nand_change_read_column_op(chip, mtd->writesize,
1947*4882a593Smuzhiyun 						 chip->oob_poi, mtd->oobsize,
1948*4882a593Smuzhiyun 						 false);
1949*4882a593Smuzhiyun 		if (ret)
1950*4882a593Smuzhiyun 			return ret;
1951*4882a593Smuzhiyun 	} else {
1952*4882a593Smuzhiyun 		/*
1953*4882a593Smuzhiyun 		 * Send the command to read the particular ECC bytes take care
1954*4882a593Smuzhiyun 		 * about buswidth alignment in read_buf.
1955*4882a593Smuzhiyun 		 */
1956*4882a593Smuzhiyun 		aligned_pos = eccpos[index] & ~(busw - 1);
1957*4882a593Smuzhiyun 		aligned_len = eccfrag_len;
1958*4882a593Smuzhiyun 		if (eccpos[index] & (busw - 1))
1959*4882a593Smuzhiyun 			aligned_len++;
1960*4882a593Smuzhiyun 		if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
1961*4882a593Smuzhiyun 			aligned_len++;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 		ret = nand_change_read_column_op(chip,
1964*4882a593Smuzhiyun 						 mtd->writesize + aligned_pos,
1965*4882a593Smuzhiyun 						 &chip->oob_poi[aligned_pos],
1966*4882a593Smuzhiyun 						 aligned_len, false);
1967*4882a593Smuzhiyun 		if (ret)
1968*4882a593Smuzhiyun 			return ret;
1969*4882a593Smuzhiyun 	}
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	for (i = 0; i < eccfrag_len; i++)
1972*4882a593Smuzhiyun 		chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	p = bufpoi + data_col_addr;
1975*4882a593Smuzhiyun 	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
1976*4882a593Smuzhiyun 		int stat;
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 		stat = chip->ecc.correct(mtd, p,
1979*4882a593Smuzhiyun 			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1980*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
1981*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
1982*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
1983*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
1984*4882a593Smuzhiyun 						&chip->buffers->ecccode[i],
1985*4882a593Smuzhiyun 						chip->ecc.bytes,
1986*4882a593Smuzhiyun 						NULL, 0,
1987*4882a593Smuzhiyun 						chip->ecc.strength);
1988*4882a593Smuzhiyun 		}
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 		if (stat < 0) {
1991*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
1992*4882a593Smuzhiyun 		} else {
1993*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
1994*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
1995*4882a593Smuzhiyun 		}
1996*4882a593Smuzhiyun 	}
1997*4882a593Smuzhiyun 	return max_bitflips;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun /**
2001*4882a593Smuzhiyun  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2002*4882a593Smuzhiyun  * @mtd: mtd info structure
2003*4882a593Smuzhiyun  * @chip: nand chip info structure
2004*4882a593Smuzhiyun  * @buf: buffer to store read data
2005*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2006*4882a593Smuzhiyun  * @page: page number to read
2007*4882a593Smuzhiyun  *
2008*4882a593Smuzhiyun  * Not for syndrome calculating ECC controllers which need a special oob layout.
2009*4882a593Smuzhiyun  */
nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2010*4882a593Smuzhiyun static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2011*4882a593Smuzhiyun 				uint8_t *buf, int oob_required, int page)
2012*4882a593Smuzhiyun {
2013*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
2014*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2015*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2016*4882a593Smuzhiyun 	uint8_t *p = buf;
2017*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2018*4882a593Smuzhiyun 	uint8_t *ecc_code = chip->buffers->ecccode;
2019*4882a593Smuzhiyun 	uint32_t *eccpos = chip->ecc.layout->eccpos;
2020*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2021*4882a593Smuzhiyun 	int ret;
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2024*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
2025*4882a593Smuzhiyun 
2026*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, eccsize, false);
2027*4882a593Smuzhiyun 		if (ret)
2028*4882a593Smuzhiyun 			return ret;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2031*4882a593Smuzhiyun 	}
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2034*4882a593Smuzhiyun 	if (ret)
2035*4882a593Smuzhiyun 		return ret;
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.total; i++)
2038*4882a593Smuzhiyun 		ecc_code[i] = chip->oob_poi[eccpos[i]];
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	eccsteps = chip->ecc.steps;
2041*4882a593Smuzhiyun 	p = buf;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2044*4882a593Smuzhiyun 		int stat;
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
2047*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
2048*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2049*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
2050*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2051*4882a593Smuzhiyun 						&ecc_code[i], eccbytes,
2052*4882a593Smuzhiyun 						NULL, 0,
2053*4882a593Smuzhiyun 						chip->ecc.strength);
2054*4882a593Smuzhiyun 		}
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 		if (stat < 0) {
2057*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
2058*4882a593Smuzhiyun 		} else {
2059*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
2060*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2061*4882a593Smuzhiyun 		}
2062*4882a593Smuzhiyun 	}
2063*4882a593Smuzhiyun 	return max_bitflips;
2064*4882a593Smuzhiyun }
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun /**
2067*4882a593Smuzhiyun  * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
2068*4882a593Smuzhiyun  * @mtd: mtd info structure
2069*4882a593Smuzhiyun  * @chip: nand chip info structure
2070*4882a593Smuzhiyun  * @buf: buffer to store read data
2071*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2072*4882a593Smuzhiyun  * @page: page number to read
2073*4882a593Smuzhiyun  *
2074*4882a593Smuzhiyun  * Hardware ECC for large page chips, require OOB to be read first. For this
2075*4882a593Smuzhiyun  * ECC mode, the write_page method is re-used from ECC_HW. These methods
2076*4882a593Smuzhiyun  * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
2077*4882a593Smuzhiyun  * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
2078*4882a593Smuzhiyun  * the data area, by overwriting the NAND manufacturer bad block markings.
2079*4882a593Smuzhiyun  */
nand_read_page_hwecc_oob_first(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2080*4882a593Smuzhiyun static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
2081*4882a593Smuzhiyun 	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
2082*4882a593Smuzhiyun {
2083*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
2084*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2085*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2086*4882a593Smuzhiyun 	uint8_t *p = buf;
2087*4882a593Smuzhiyun 	uint8_t *ecc_code = chip->buffers->ecccode;
2088*4882a593Smuzhiyun 	uint32_t *eccpos = chip->ecc.layout->eccpos;
2089*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2090*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2091*4882a593Smuzhiyun 	int ret;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	/* Read the OOB area first */
2094*4882a593Smuzhiyun 	ret = nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2095*4882a593Smuzhiyun 	if (ret)
2096*4882a593Smuzhiyun 		return ret;
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, 0, NULL, 0);
2099*4882a593Smuzhiyun 	if (ret)
2100*4882a593Smuzhiyun 		return ret;
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.total; i++)
2103*4882a593Smuzhiyun 		ecc_code[i] = chip->oob_poi[eccpos[i]];
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2106*4882a593Smuzhiyun 		int stat;
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, eccsize, false);
2111*4882a593Smuzhiyun 		if (ret)
2112*4882a593Smuzhiyun 			return ret;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
2117*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
2118*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2119*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
2120*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, eccsize,
2121*4882a593Smuzhiyun 						&ecc_code[i], eccbytes,
2122*4882a593Smuzhiyun 						NULL, 0,
2123*4882a593Smuzhiyun 						chip->ecc.strength);
2124*4882a593Smuzhiyun 		}
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 		if (stat < 0) {
2127*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
2128*4882a593Smuzhiyun 		} else {
2129*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
2130*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2131*4882a593Smuzhiyun 		}
2132*4882a593Smuzhiyun 	}
2133*4882a593Smuzhiyun 	return max_bitflips;
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun /**
2137*4882a593Smuzhiyun  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2138*4882a593Smuzhiyun  * @mtd: mtd info structure
2139*4882a593Smuzhiyun  * @chip: nand chip info structure
2140*4882a593Smuzhiyun  * @buf: buffer to store read data
2141*4882a593Smuzhiyun  * @oob_required: caller requires OOB data read to chip->oob_poi
2142*4882a593Smuzhiyun  * @page: page number to read
2143*4882a593Smuzhiyun  *
2144*4882a593Smuzhiyun  * The hw generator calculates the error syndrome automatically. Therefore we
2145*4882a593Smuzhiyun  * need a special oob layout and handling.
2146*4882a593Smuzhiyun  */
nand_read_page_syndrome(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)2147*4882a593Smuzhiyun static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2148*4882a593Smuzhiyun 				   uint8_t *buf, int oob_required, int page)
2149*4882a593Smuzhiyun {
2150*4882a593Smuzhiyun 	int ret, i, eccsize = chip->ecc.size;
2151*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2152*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2153*4882a593Smuzhiyun 	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2154*4882a593Smuzhiyun 	uint8_t *p = buf;
2155*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
2156*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2159*4882a593Smuzhiyun 		int stat;
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_READ);
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, eccsize, false);
2164*4882a593Smuzhiyun 		if (ret)
2165*4882a593Smuzhiyun 			return ret;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
2168*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2169*4882a593Smuzhiyun 						false);
2170*4882a593Smuzhiyun 			if (ret)
2171*4882a593Smuzhiyun 				return ret;
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
2174*4882a593Smuzhiyun 		}
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, eccbytes, false);
2179*4882a593Smuzhiyun 		if (ret)
2180*4882a593Smuzhiyun 			return ret;
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 		stat = chip->ecc.correct(mtd, p, oob, NULL);
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun 		oob += eccbytes;
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
2187*4882a593Smuzhiyun 			ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2188*4882a593Smuzhiyun 						false);
2189*4882a593Smuzhiyun 			if (ret)
2190*4882a593Smuzhiyun 				return ret;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
2193*4882a593Smuzhiyun 		}
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 		if (stat == -EBADMSG &&
2196*4882a593Smuzhiyun 		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2197*4882a593Smuzhiyun 			/* check for empty pages with bitflips */
2198*4882a593Smuzhiyun 			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2199*4882a593Smuzhiyun 							   oob - eccpadbytes,
2200*4882a593Smuzhiyun 							   eccpadbytes,
2201*4882a593Smuzhiyun 							   NULL, 0,
2202*4882a593Smuzhiyun 							   chip->ecc.strength);
2203*4882a593Smuzhiyun 		}
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 		if (stat < 0) {
2206*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
2207*4882a593Smuzhiyun 		} else {
2208*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
2209*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
2210*4882a593Smuzhiyun 		}
2211*4882a593Smuzhiyun 	}
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	/* Calculate remaining oob bytes */
2214*4882a593Smuzhiyun 	i = mtd->oobsize - (oob - chip->oob_poi);
2215*4882a593Smuzhiyun 	if (i) {
2216*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, oob, i, false);
2217*4882a593Smuzhiyun 		if (ret)
2218*4882a593Smuzhiyun 			return ret;
2219*4882a593Smuzhiyun 	}
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 	return max_bitflips;
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun /**
2225*4882a593Smuzhiyun  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
2226*4882a593Smuzhiyun  * @chip: nand chip structure
2227*4882a593Smuzhiyun  * @oob: oob destination address
2228*4882a593Smuzhiyun  * @ops: oob ops structure
2229*4882a593Smuzhiyun  * @len: size of oob to transfer
2230*4882a593Smuzhiyun  */
nand_transfer_oob(struct nand_chip * chip,uint8_t * oob,struct mtd_oob_ops * ops,size_t len)2231*4882a593Smuzhiyun static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
2232*4882a593Smuzhiyun 				  struct mtd_oob_ops *ops, size_t len)
2233*4882a593Smuzhiyun {
2234*4882a593Smuzhiyun 	switch (ops->mode) {
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
2237*4882a593Smuzhiyun 	case MTD_OPS_RAW:
2238*4882a593Smuzhiyun 		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
2239*4882a593Smuzhiyun 		return oob + len;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB: {
2242*4882a593Smuzhiyun 		struct nand_oobfree *free = chip->ecc.layout->oobfree;
2243*4882a593Smuzhiyun 		uint32_t boffs = 0, roffs = ops->ooboffs;
2244*4882a593Smuzhiyun 		size_t bytes = 0;
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 		for (; free->length && len; free++, len -= bytes) {
2247*4882a593Smuzhiyun 			/* Read request not from offset 0? */
2248*4882a593Smuzhiyun 			if (unlikely(roffs)) {
2249*4882a593Smuzhiyun 				if (roffs >= free->length) {
2250*4882a593Smuzhiyun 					roffs -= free->length;
2251*4882a593Smuzhiyun 					continue;
2252*4882a593Smuzhiyun 				}
2253*4882a593Smuzhiyun 				boffs = free->offset + roffs;
2254*4882a593Smuzhiyun 				bytes = min_t(size_t, len,
2255*4882a593Smuzhiyun 					      (free->length - roffs));
2256*4882a593Smuzhiyun 				roffs = 0;
2257*4882a593Smuzhiyun 			} else {
2258*4882a593Smuzhiyun 				bytes = min_t(size_t, len, free->length);
2259*4882a593Smuzhiyun 				boffs = free->offset;
2260*4882a593Smuzhiyun 			}
2261*4882a593Smuzhiyun 			memcpy(oob, chip->oob_poi + boffs, bytes);
2262*4882a593Smuzhiyun 			oob += bytes;
2263*4882a593Smuzhiyun 		}
2264*4882a593Smuzhiyun 		return oob;
2265*4882a593Smuzhiyun 	}
2266*4882a593Smuzhiyun 	default:
2267*4882a593Smuzhiyun 		BUG();
2268*4882a593Smuzhiyun 	}
2269*4882a593Smuzhiyun 	return NULL;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun /**
2273*4882a593Smuzhiyun  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
2274*4882a593Smuzhiyun  * @mtd: MTD device structure
2275*4882a593Smuzhiyun  * @retry_mode: the retry mode to use
2276*4882a593Smuzhiyun  *
2277*4882a593Smuzhiyun  * Some vendors supply a special command to shift the Vt threshold, to be used
2278*4882a593Smuzhiyun  * when there are too many bitflips in a page (i.e., ECC error). After setting
2279*4882a593Smuzhiyun  * a new threshold, the host should retry reading the page.
2280*4882a593Smuzhiyun  */
nand_setup_read_retry(struct mtd_info * mtd,int retry_mode)2281*4882a593Smuzhiyun static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
2282*4882a593Smuzhiyun {
2283*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	pr_debug("setting READ RETRY mode %d\n", retry_mode);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if (retry_mode >= chip->read_retries)
2288*4882a593Smuzhiyun 		return -EINVAL;
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	if (!chip->setup_read_retry)
2291*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 	return chip->setup_read_retry(mtd, retry_mode);
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun /**
2297*4882a593Smuzhiyun  * nand_do_read_ops - [INTERN] Read data with ECC
2298*4882a593Smuzhiyun  * @mtd: MTD device structure
2299*4882a593Smuzhiyun  * @from: offset to read from
2300*4882a593Smuzhiyun  * @ops: oob ops structure
2301*4882a593Smuzhiyun  *
2302*4882a593Smuzhiyun  * Internal function. Called with chip held.
2303*4882a593Smuzhiyun  */
nand_do_read_ops(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2304*4882a593Smuzhiyun static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
2305*4882a593Smuzhiyun 			    struct mtd_oob_ops *ops)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun 	int chipnr, page, realpage, col, bytes, aligned, oob_required;
2308*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
2309*4882a593Smuzhiyun 	int ret = 0;
2310*4882a593Smuzhiyun 	uint32_t readlen = ops->len;
2311*4882a593Smuzhiyun 	uint32_t oobreadlen = ops->ooblen;
2312*4882a593Smuzhiyun 	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	uint8_t *bufpoi, *oob, *buf;
2315*4882a593Smuzhiyun 	int use_bufpoi;
2316*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
2317*4882a593Smuzhiyun 	int retry_mode = 0;
2318*4882a593Smuzhiyun 	bool ecc_fail = false;
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	chipnr = (int)(from >> chip->chip_shift);
2321*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	realpage = (int)(from >> chip->page_shift);
2324*4882a593Smuzhiyun 	page = realpage & chip->pagemask;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	col = (int)(from & (mtd->writesize - 1));
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	buf = ops->datbuf;
2329*4882a593Smuzhiyun 	oob = ops->oobbuf;
2330*4882a593Smuzhiyun 	oob_required = oob ? 1 : 0;
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	while (1) {
2333*4882a593Smuzhiyun 		unsigned int ecc_failures = mtd->ecc_stats.failed;
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 		WATCHDOG_RESET();
2336*4882a593Smuzhiyun 		bytes = min(mtd->writesize - col, readlen);
2337*4882a593Smuzhiyun 		aligned = (bytes == mtd->writesize);
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 		if (!aligned)
2340*4882a593Smuzhiyun 			use_bufpoi = 1;
2341*4882a593Smuzhiyun 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
2342*4882a593Smuzhiyun 			use_bufpoi = !IS_ALIGNED((unsigned long)buf,
2343*4882a593Smuzhiyun 						 chip->buf_align);
2344*4882a593Smuzhiyun 		else
2345*4882a593Smuzhiyun 			use_bufpoi = 0;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 		/* Is the current page in the buffer? */
2348*4882a593Smuzhiyun 		if (realpage != chip->pagebuf || oob) {
2349*4882a593Smuzhiyun 			bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 			if (use_bufpoi && aligned)
2352*4882a593Smuzhiyun 				pr_debug("%s: using read bounce buffer for buf@%p\n",
2353*4882a593Smuzhiyun 						 __func__, buf);
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun read_retry:
2356*4882a593Smuzhiyun 			if (nand_standard_page_accessors(&chip->ecc)) {
2357*4882a593Smuzhiyun 				ret = nand_read_page_op(chip, page, 0, NULL, 0);
2358*4882a593Smuzhiyun 				if (ret)
2359*4882a593Smuzhiyun 					break;
2360*4882a593Smuzhiyun 			}
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun 			/*
2363*4882a593Smuzhiyun 			 * Now read the page into the buffer.  Absent an error,
2364*4882a593Smuzhiyun 			 * the read methods return max bitflips per ecc step.
2365*4882a593Smuzhiyun 			 */
2366*4882a593Smuzhiyun 			if (unlikely(ops->mode == MTD_OPS_RAW))
2367*4882a593Smuzhiyun 				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
2368*4882a593Smuzhiyun 							      oob_required,
2369*4882a593Smuzhiyun 							      page);
2370*4882a593Smuzhiyun 			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
2371*4882a593Smuzhiyun 				 !oob)
2372*4882a593Smuzhiyun 				ret = chip->ecc.read_subpage(mtd, chip,
2373*4882a593Smuzhiyun 							col, bytes, bufpoi,
2374*4882a593Smuzhiyun 							page);
2375*4882a593Smuzhiyun 			else
2376*4882a593Smuzhiyun 				ret = chip->ecc.read_page(mtd, chip, bufpoi,
2377*4882a593Smuzhiyun 							  oob_required, page);
2378*4882a593Smuzhiyun 			if (ret < 0) {
2379*4882a593Smuzhiyun 				if (use_bufpoi)
2380*4882a593Smuzhiyun 					/* Invalidate page cache */
2381*4882a593Smuzhiyun 					chip->pagebuf = -1;
2382*4882a593Smuzhiyun 				break;
2383*4882a593Smuzhiyun 			}
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, ret);
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 			/* Transfer not aligned data */
2388*4882a593Smuzhiyun 			if (use_bufpoi) {
2389*4882a593Smuzhiyun 				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
2390*4882a593Smuzhiyun 				    !(mtd->ecc_stats.failed - ecc_failures) &&
2391*4882a593Smuzhiyun 				    (ops->mode != MTD_OPS_RAW)) {
2392*4882a593Smuzhiyun 					chip->pagebuf = realpage;
2393*4882a593Smuzhiyun 					chip->pagebuf_bitflips = ret;
2394*4882a593Smuzhiyun 				} else {
2395*4882a593Smuzhiyun 					/* Invalidate page cache */
2396*4882a593Smuzhiyun 					chip->pagebuf = -1;
2397*4882a593Smuzhiyun 				}
2398*4882a593Smuzhiyun 				memcpy(buf, chip->buffers->databuf + col, bytes);
2399*4882a593Smuzhiyun 			}
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 			if (unlikely(oob)) {
2402*4882a593Smuzhiyun 				int toread = min(oobreadlen, max_oobsize);
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 				if (toread) {
2405*4882a593Smuzhiyun 					oob = nand_transfer_oob(chip,
2406*4882a593Smuzhiyun 						oob, ops, toread);
2407*4882a593Smuzhiyun 					oobreadlen -= toread;
2408*4882a593Smuzhiyun 				}
2409*4882a593Smuzhiyun 			}
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 			if (chip->options & NAND_NEED_READRDY) {
2412*4882a593Smuzhiyun 				/* Apply delay or wait for ready/busy pin */
2413*4882a593Smuzhiyun 				if (!chip->dev_ready)
2414*4882a593Smuzhiyun 					udelay(chip->chip_delay);
2415*4882a593Smuzhiyun 				else
2416*4882a593Smuzhiyun 					nand_wait_ready(mtd);
2417*4882a593Smuzhiyun 			}
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 			if (mtd->ecc_stats.failed - ecc_failures) {
2420*4882a593Smuzhiyun 				if (retry_mode + 1 < chip->read_retries) {
2421*4882a593Smuzhiyun 					retry_mode++;
2422*4882a593Smuzhiyun 					ret = nand_setup_read_retry(mtd,
2423*4882a593Smuzhiyun 							retry_mode);
2424*4882a593Smuzhiyun 					if (ret < 0)
2425*4882a593Smuzhiyun 						break;
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 					/* Reset failures; retry */
2428*4882a593Smuzhiyun 					mtd->ecc_stats.failed = ecc_failures;
2429*4882a593Smuzhiyun 					goto read_retry;
2430*4882a593Smuzhiyun 				} else {
2431*4882a593Smuzhiyun 					/* No more retry modes; real failure */
2432*4882a593Smuzhiyun 					ecc_fail = true;
2433*4882a593Smuzhiyun 				}
2434*4882a593Smuzhiyun 			}
2435*4882a593Smuzhiyun 
2436*4882a593Smuzhiyun 			buf += bytes;
2437*4882a593Smuzhiyun 		} else {
2438*4882a593Smuzhiyun 			memcpy(buf, chip->buffers->databuf + col, bytes);
2439*4882a593Smuzhiyun 			buf += bytes;
2440*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips,
2441*4882a593Smuzhiyun 					     chip->pagebuf_bitflips);
2442*4882a593Smuzhiyun 		}
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 		readlen -= bytes;
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 		/* Reset to retry mode 0 */
2447*4882a593Smuzhiyun 		if (retry_mode) {
2448*4882a593Smuzhiyun 			ret = nand_setup_read_retry(mtd, 0);
2449*4882a593Smuzhiyun 			if (ret < 0)
2450*4882a593Smuzhiyun 				break;
2451*4882a593Smuzhiyun 			retry_mode = 0;
2452*4882a593Smuzhiyun 		}
2453*4882a593Smuzhiyun 
2454*4882a593Smuzhiyun 		if (!readlen)
2455*4882a593Smuzhiyun 			break;
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun 		/* For subsequent reads align to page boundary */
2458*4882a593Smuzhiyun 		col = 0;
2459*4882a593Smuzhiyun 		/* Increment page address */
2460*4882a593Smuzhiyun 		realpage++;
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 		page = realpage & chip->pagemask;
2463*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
2464*4882a593Smuzhiyun 		if (!page) {
2465*4882a593Smuzhiyun 			chipnr++;
2466*4882a593Smuzhiyun 			chip->select_chip(mtd, -1);
2467*4882a593Smuzhiyun 			chip->select_chip(mtd, chipnr);
2468*4882a593Smuzhiyun 		}
2469*4882a593Smuzhiyun 	}
2470*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
2471*4882a593Smuzhiyun 
2472*4882a593Smuzhiyun 	ops->retlen = ops->len - (size_t) readlen;
2473*4882a593Smuzhiyun 	if (oob)
2474*4882a593Smuzhiyun 		ops->oobretlen = ops->ooblen - oobreadlen;
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	if (ret < 0)
2477*4882a593Smuzhiyun 		return ret;
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	if (ecc_fail)
2480*4882a593Smuzhiyun 		return -EBADMSG;
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	return max_bitflips;
2483*4882a593Smuzhiyun }
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun /**
2486*4882a593Smuzhiyun  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
2487*4882a593Smuzhiyun  * @mtd: mtd info structure
2488*4882a593Smuzhiyun  * @chip: nand chip info structure
2489*4882a593Smuzhiyun  * @page: page number to read
2490*4882a593Smuzhiyun  */
nand_read_oob_std(struct mtd_info * mtd,struct nand_chip * chip,int page)2491*4882a593Smuzhiyun static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
2492*4882a593Smuzhiyun 			     int page)
2493*4882a593Smuzhiyun {
2494*4882a593Smuzhiyun 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
2495*4882a593Smuzhiyun }
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun /**
2498*4882a593Smuzhiyun  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
2499*4882a593Smuzhiyun  *			    with syndromes
2500*4882a593Smuzhiyun  * @mtd: mtd info structure
2501*4882a593Smuzhiyun  * @chip: nand chip info structure
2502*4882a593Smuzhiyun  * @page: page number to read
2503*4882a593Smuzhiyun  */
nand_read_oob_syndrome(struct mtd_info * mtd,struct nand_chip * chip,int page)2504*4882a593Smuzhiyun static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
2505*4882a593Smuzhiyun 				  int page)
2506*4882a593Smuzhiyun {
2507*4882a593Smuzhiyun 	int length = mtd->oobsize;
2508*4882a593Smuzhiyun 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2509*4882a593Smuzhiyun 	int eccsize = chip->ecc.size;
2510*4882a593Smuzhiyun 	uint8_t *bufpoi = chip->oob_poi;
2511*4882a593Smuzhiyun 	int i, toread, sndrnd = 0, pos, ret;
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 	ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
2514*4882a593Smuzhiyun 	if (ret)
2515*4882a593Smuzhiyun 		return ret;
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.steps; i++) {
2518*4882a593Smuzhiyun 		if (sndrnd) {
2519*4882a593Smuzhiyun 			int ret;
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun 			pos = eccsize + i * (eccsize + chunk);
2522*4882a593Smuzhiyun 			if (mtd->writesize > 512)
2523*4882a593Smuzhiyun 				ret = nand_change_read_column_op(chip, pos,
2524*4882a593Smuzhiyun 								 NULL, 0,
2525*4882a593Smuzhiyun 								 false);
2526*4882a593Smuzhiyun 			else
2527*4882a593Smuzhiyun 				ret = nand_read_page_op(chip, page, pos, NULL,
2528*4882a593Smuzhiyun 							0);
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 			if (ret)
2531*4882a593Smuzhiyun 				return ret;
2532*4882a593Smuzhiyun 		} else
2533*4882a593Smuzhiyun 			sndrnd = 1;
2534*4882a593Smuzhiyun 		toread = min_t(int, length, chunk);
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, bufpoi, toread, false);
2537*4882a593Smuzhiyun 		if (ret)
2538*4882a593Smuzhiyun 			return ret;
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun 		bufpoi += toread;
2541*4882a593Smuzhiyun 		length -= toread;
2542*4882a593Smuzhiyun 	}
2543*4882a593Smuzhiyun 	if (length > 0) {
2544*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, bufpoi, length, false);
2545*4882a593Smuzhiyun 		if (ret)
2546*4882a593Smuzhiyun 			return ret;
2547*4882a593Smuzhiyun 	}
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	return 0;
2550*4882a593Smuzhiyun }
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun /**
2553*4882a593Smuzhiyun  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
2554*4882a593Smuzhiyun  * @mtd: mtd info structure
2555*4882a593Smuzhiyun  * @chip: nand chip info structure
2556*4882a593Smuzhiyun  * @page: page number to write
2557*4882a593Smuzhiyun  */
nand_write_oob_std(struct mtd_info * mtd,struct nand_chip * chip,int page)2558*4882a593Smuzhiyun static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
2559*4882a593Smuzhiyun 			      int page)
2560*4882a593Smuzhiyun {
2561*4882a593Smuzhiyun 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
2562*4882a593Smuzhiyun 				 mtd->oobsize);
2563*4882a593Smuzhiyun }
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun /**
2566*4882a593Smuzhiyun  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
2567*4882a593Smuzhiyun  *			     with syndrome - only for large page flash
2568*4882a593Smuzhiyun  * @mtd: mtd info structure
2569*4882a593Smuzhiyun  * @chip: nand chip info structure
2570*4882a593Smuzhiyun  * @page: page number to write
2571*4882a593Smuzhiyun  */
nand_write_oob_syndrome(struct mtd_info * mtd,struct nand_chip * chip,int page)2572*4882a593Smuzhiyun static int nand_write_oob_syndrome(struct mtd_info *mtd,
2573*4882a593Smuzhiyun 				   struct nand_chip *chip, int page)
2574*4882a593Smuzhiyun {
2575*4882a593Smuzhiyun 	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
2576*4882a593Smuzhiyun 	int eccsize = chip->ecc.size, length = mtd->oobsize;
2577*4882a593Smuzhiyun 	int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
2578*4882a593Smuzhiyun 	const uint8_t *bufpoi = chip->oob_poi;
2579*4882a593Smuzhiyun 
2580*4882a593Smuzhiyun 	/*
2581*4882a593Smuzhiyun 	 * data-ecc-data-ecc ... ecc-oob
2582*4882a593Smuzhiyun 	 * or
2583*4882a593Smuzhiyun 	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
2584*4882a593Smuzhiyun 	 */
2585*4882a593Smuzhiyun 	if (!chip->ecc.prepad && !chip->ecc.postpad) {
2586*4882a593Smuzhiyun 		pos = steps * (eccsize + chunk);
2587*4882a593Smuzhiyun 		steps = 0;
2588*4882a593Smuzhiyun 	} else
2589*4882a593Smuzhiyun 		pos = eccsize;
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun 	ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
2592*4882a593Smuzhiyun 	if (ret)
2593*4882a593Smuzhiyun 		return ret;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	for (i = 0; i < steps; i++) {
2596*4882a593Smuzhiyun 		if (sndcmd) {
2597*4882a593Smuzhiyun 			if (mtd->writesize <= 512) {
2598*4882a593Smuzhiyun 				uint32_t fill = 0xFFFFFFFF;
2599*4882a593Smuzhiyun 
2600*4882a593Smuzhiyun 				len = eccsize;
2601*4882a593Smuzhiyun 				while (len > 0) {
2602*4882a593Smuzhiyun 					int num = min_t(int, len, 4);
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun 					ret = nand_write_data_op(chip, &fill,
2605*4882a593Smuzhiyun 								 num, false);
2606*4882a593Smuzhiyun 					if (ret)
2607*4882a593Smuzhiyun 						return ret;
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 					len -= num;
2610*4882a593Smuzhiyun 				}
2611*4882a593Smuzhiyun 			} else {
2612*4882a593Smuzhiyun 				pos = eccsize + i * (eccsize + chunk);
2613*4882a593Smuzhiyun 				ret = nand_change_write_column_op(chip, pos,
2614*4882a593Smuzhiyun 								  NULL, 0,
2615*4882a593Smuzhiyun 								  false);
2616*4882a593Smuzhiyun 				if (ret)
2617*4882a593Smuzhiyun 					return ret;
2618*4882a593Smuzhiyun 			}
2619*4882a593Smuzhiyun 		} else
2620*4882a593Smuzhiyun 			sndcmd = 1;
2621*4882a593Smuzhiyun 		len = min_t(int, length, chunk);
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, bufpoi, len, false);
2624*4882a593Smuzhiyun 		if (ret)
2625*4882a593Smuzhiyun 			return ret;
2626*4882a593Smuzhiyun 
2627*4882a593Smuzhiyun 		bufpoi += len;
2628*4882a593Smuzhiyun 		length -= len;
2629*4882a593Smuzhiyun 	}
2630*4882a593Smuzhiyun 	if (length > 0) {
2631*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, bufpoi, length, false);
2632*4882a593Smuzhiyun 		if (ret)
2633*4882a593Smuzhiyun 			return ret;
2634*4882a593Smuzhiyun 	}
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun /**
2640*4882a593Smuzhiyun  * nand_do_read_oob - [INTERN] NAND read out-of-band
2641*4882a593Smuzhiyun  * @mtd: MTD device structure
2642*4882a593Smuzhiyun  * @from: offset to read from
2643*4882a593Smuzhiyun  * @ops: oob operations description structure
2644*4882a593Smuzhiyun  *
2645*4882a593Smuzhiyun  * NAND read out-of-band data from the spare area.
2646*4882a593Smuzhiyun  */
nand_do_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2647*4882a593Smuzhiyun static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
2648*4882a593Smuzhiyun 			    struct mtd_oob_ops *ops)
2649*4882a593Smuzhiyun {
2650*4882a593Smuzhiyun 	int page, realpage, chipnr;
2651*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
2652*4882a593Smuzhiyun 	struct mtd_ecc_stats stats;
2653*4882a593Smuzhiyun 	int readlen = ops->ooblen;
2654*4882a593Smuzhiyun 	int len;
2655*4882a593Smuzhiyun 	uint8_t *buf = ops->oobbuf;
2656*4882a593Smuzhiyun 	int ret = 0;
2657*4882a593Smuzhiyun 
2658*4882a593Smuzhiyun 	pr_debug("%s: from = 0x%08Lx, len = %i\n",
2659*4882a593Smuzhiyun 			__func__, (unsigned long long)from, readlen);
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun 	stats = mtd->ecc_stats;
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	len = mtd_oobavail(mtd, ops);
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	if (unlikely(ops->ooboffs >= len)) {
2666*4882a593Smuzhiyun 		pr_debug("%s: attempt to start read outside oob\n",
2667*4882a593Smuzhiyun 				__func__);
2668*4882a593Smuzhiyun 		return -EINVAL;
2669*4882a593Smuzhiyun 	}
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	/* Do not allow reads past end of device */
2672*4882a593Smuzhiyun 	if (unlikely(from >= mtd->size ||
2673*4882a593Smuzhiyun 		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
2674*4882a593Smuzhiyun 					(from >> chip->page_shift)) * len)) {
2675*4882a593Smuzhiyun 		pr_debug("%s: attempt to read beyond end of device\n",
2676*4882a593Smuzhiyun 				__func__);
2677*4882a593Smuzhiyun 		return -EINVAL;
2678*4882a593Smuzhiyun 	}
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	chipnr = (int)(from >> chip->chip_shift);
2681*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
2682*4882a593Smuzhiyun 
2683*4882a593Smuzhiyun 	/* Shift to get page */
2684*4882a593Smuzhiyun 	realpage = (int)(from >> chip->page_shift);
2685*4882a593Smuzhiyun 	page = realpage & chip->pagemask;
2686*4882a593Smuzhiyun 
2687*4882a593Smuzhiyun 	while (1) {
2688*4882a593Smuzhiyun 		WATCHDOG_RESET();
2689*4882a593Smuzhiyun 
2690*4882a593Smuzhiyun 		if (ops->mode == MTD_OPS_RAW)
2691*4882a593Smuzhiyun 			ret = chip->ecc.read_oob_raw(mtd, chip, page);
2692*4882a593Smuzhiyun 		else
2693*4882a593Smuzhiyun 			ret = chip->ecc.read_oob(mtd, chip, page);
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 		if (ret < 0)
2696*4882a593Smuzhiyun 			break;
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 		len = min(len, readlen);
2699*4882a593Smuzhiyun 		buf = nand_transfer_oob(chip, buf, ops, len);
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun 		if (chip->options & NAND_NEED_READRDY) {
2702*4882a593Smuzhiyun 			/* Apply delay or wait for ready/busy pin */
2703*4882a593Smuzhiyun 			if (!chip->dev_ready)
2704*4882a593Smuzhiyun 				udelay(chip->chip_delay);
2705*4882a593Smuzhiyun 			else
2706*4882a593Smuzhiyun 				nand_wait_ready(mtd);
2707*4882a593Smuzhiyun 		}
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 		readlen -= len;
2710*4882a593Smuzhiyun 		if (!readlen)
2711*4882a593Smuzhiyun 			break;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 		/* Increment page address */
2714*4882a593Smuzhiyun 		realpage++;
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 		page = realpage & chip->pagemask;
2717*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
2718*4882a593Smuzhiyun 		if (!page) {
2719*4882a593Smuzhiyun 			chipnr++;
2720*4882a593Smuzhiyun 			chip->select_chip(mtd, -1);
2721*4882a593Smuzhiyun 			chip->select_chip(mtd, chipnr);
2722*4882a593Smuzhiyun 		}
2723*4882a593Smuzhiyun 	}
2724*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	ops->oobretlen = ops->ooblen - readlen;
2727*4882a593Smuzhiyun 
2728*4882a593Smuzhiyun 	if (ret < 0)
2729*4882a593Smuzhiyun 		return ret;
2730*4882a593Smuzhiyun 
2731*4882a593Smuzhiyun 	if (mtd->ecc_stats.failed - stats.failed)
2732*4882a593Smuzhiyun 		return -EBADMSG;
2733*4882a593Smuzhiyun 
2734*4882a593Smuzhiyun 	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
2735*4882a593Smuzhiyun }
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun /**
2738*4882a593Smuzhiyun  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
2739*4882a593Smuzhiyun  * @mtd: MTD device structure
2740*4882a593Smuzhiyun  * @from: offset to read from
2741*4882a593Smuzhiyun  * @ops: oob operation description structure
2742*4882a593Smuzhiyun  *
2743*4882a593Smuzhiyun  * NAND read data and/or out-of-band data.
2744*4882a593Smuzhiyun  */
nand_read_oob(struct mtd_info * mtd,loff_t from,struct mtd_oob_ops * ops)2745*4882a593Smuzhiyun static int nand_read_oob(struct mtd_info *mtd, loff_t from,
2746*4882a593Smuzhiyun 			 struct mtd_oob_ops *ops)
2747*4882a593Smuzhiyun {
2748*4882a593Smuzhiyun 	int ret = -ENOTSUPP;
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	ops->retlen = 0;
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	/* Do not allow reads past end of device */
2753*4882a593Smuzhiyun 	if (ops->datbuf && (from + ops->len) > mtd->size) {
2754*4882a593Smuzhiyun 		pr_debug("%s: attempt to read beyond end of device\n",
2755*4882a593Smuzhiyun 				__func__);
2756*4882a593Smuzhiyun 		return -EINVAL;
2757*4882a593Smuzhiyun 	}
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	nand_get_device(mtd, FL_READING);
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 	switch (ops->mode) {
2762*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
2763*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB:
2764*4882a593Smuzhiyun 	case MTD_OPS_RAW:
2765*4882a593Smuzhiyun 		break;
2766*4882a593Smuzhiyun 
2767*4882a593Smuzhiyun 	default:
2768*4882a593Smuzhiyun 		goto out;
2769*4882a593Smuzhiyun 	}
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 	if (!ops->datbuf)
2772*4882a593Smuzhiyun 		ret = nand_do_read_oob(mtd, from, ops);
2773*4882a593Smuzhiyun 	else
2774*4882a593Smuzhiyun 		ret = nand_do_read_ops(mtd, from, ops);
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun out:
2777*4882a593Smuzhiyun 	nand_release_device(mtd);
2778*4882a593Smuzhiyun 	return ret;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun /**
2783*4882a593Smuzhiyun  * nand_write_page_raw - [INTERN] raw page write function
2784*4882a593Smuzhiyun  * @mtd: mtd info structure
2785*4882a593Smuzhiyun  * @chip: nand chip info structure
2786*4882a593Smuzhiyun  * @buf: data buffer
2787*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
2788*4882a593Smuzhiyun  * @page: page number to write
2789*4882a593Smuzhiyun  *
2790*4882a593Smuzhiyun  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2791*4882a593Smuzhiyun  */
nand_write_page_raw(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2792*4882a593Smuzhiyun static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
2793*4882a593Smuzhiyun 			       const uint8_t *buf, int oob_required, int page)
2794*4882a593Smuzhiyun {
2795*4882a593Smuzhiyun 	int ret;
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	ret = nand_write_data_op(chip, buf, mtd->writesize, false);
2798*4882a593Smuzhiyun 	if (ret)
2799*4882a593Smuzhiyun 		return ret;
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 	if (oob_required) {
2802*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
2803*4882a593Smuzhiyun 					 false);
2804*4882a593Smuzhiyun 		if (ret)
2805*4882a593Smuzhiyun 			return ret;
2806*4882a593Smuzhiyun 	}
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun 	return 0;
2809*4882a593Smuzhiyun }
2810*4882a593Smuzhiyun 
2811*4882a593Smuzhiyun /**
2812*4882a593Smuzhiyun  * nand_write_page_raw_syndrome - [INTERN] raw page write function
2813*4882a593Smuzhiyun  * @mtd: mtd info structure
2814*4882a593Smuzhiyun  * @chip: nand chip info structure
2815*4882a593Smuzhiyun  * @buf: data buffer
2816*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
2817*4882a593Smuzhiyun  * @page: page number to write
2818*4882a593Smuzhiyun  *
2819*4882a593Smuzhiyun  * We need a special oob layout and handling even when ECC isn't checked.
2820*4882a593Smuzhiyun  */
nand_write_page_raw_syndrome(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2821*4882a593Smuzhiyun static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
2822*4882a593Smuzhiyun 					struct nand_chip *chip,
2823*4882a593Smuzhiyun 					const uint8_t *buf, int oob_required,
2824*4882a593Smuzhiyun 					int page)
2825*4882a593Smuzhiyun {
2826*4882a593Smuzhiyun 	int eccsize = chip->ecc.size;
2827*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2828*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
2829*4882a593Smuzhiyun 	int steps, size, ret;
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 	for (steps = chip->ecc.steps; steps > 0; steps--) {
2832*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, buf, eccsize, false);
2833*4882a593Smuzhiyun 		if (ret)
2834*4882a593Smuzhiyun 			return ret;
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 		buf += eccsize;
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
2839*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
2840*4882a593Smuzhiyun 						 false);
2841*4882a593Smuzhiyun 			if (ret)
2842*4882a593Smuzhiyun 				return ret;
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
2845*4882a593Smuzhiyun 		}
2846*4882a593Smuzhiyun 
2847*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, eccbytes, false);
2848*4882a593Smuzhiyun 		if (ret)
2849*4882a593Smuzhiyun 			return ret;
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun 		oob += eccbytes;
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
2854*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
2855*4882a593Smuzhiyun 						 false);
2856*4882a593Smuzhiyun 			if (ret)
2857*4882a593Smuzhiyun 				return ret;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
2860*4882a593Smuzhiyun 		}
2861*4882a593Smuzhiyun 	}
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 	size = mtd->oobsize - (oob - chip->oob_poi);
2864*4882a593Smuzhiyun 	if (size) {
2865*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, size, false);
2866*4882a593Smuzhiyun 		if (ret)
2867*4882a593Smuzhiyun 			return ret;
2868*4882a593Smuzhiyun 	}
2869*4882a593Smuzhiyun 
2870*4882a593Smuzhiyun 	return 0;
2871*4882a593Smuzhiyun }
2872*4882a593Smuzhiyun /**
2873*4882a593Smuzhiyun  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
2874*4882a593Smuzhiyun  * @mtd: mtd info structure
2875*4882a593Smuzhiyun  * @chip: nand chip info structure
2876*4882a593Smuzhiyun  * @buf: data buffer
2877*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
2878*4882a593Smuzhiyun  * @page: page number to write
2879*4882a593Smuzhiyun  */
nand_write_page_swecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2880*4882a593Smuzhiyun static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
2881*4882a593Smuzhiyun 				 const uint8_t *buf, int oob_required,
2882*4882a593Smuzhiyun 				 int page)
2883*4882a593Smuzhiyun {
2884*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
2885*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2886*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2887*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2888*4882a593Smuzhiyun 	const uint8_t *p = buf;
2889*4882a593Smuzhiyun 	uint32_t *eccpos = chip->ecc.layout->eccpos;
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	/* Software ECC calculation */
2892*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2893*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.total; i++)
2896*4882a593Smuzhiyun 		chip->oob_poi[eccpos[i]] = ecc_calc[i];
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun /**
2902*4882a593Smuzhiyun  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
2903*4882a593Smuzhiyun  * @mtd: mtd info structure
2904*4882a593Smuzhiyun  * @chip: nand chip info structure
2905*4882a593Smuzhiyun  * @buf: data buffer
2906*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
2907*4882a593Smuzhiyun  * @page: page number to write
2908*4882a593Smuzhiyun  */
nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)2909*4882a593Smuzhiyun static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2910*4882a593Smuzhiyun 				  const uint8_t *buf, int oob_required,
2911*4882a593Smuzhiyun 				  int page)
2912*4882a593Smuzhiyun {
2913*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
2914*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
2915*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
2916*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2917*4882a593Smuzhiyun 	const uint8_t *p = buf;
2918*4882a593Smuzhiyun 	uint32_t *eccpos = chip->ecc.layout->eccpos;
2919*4882a593Smuzhiyun 	int ret;
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2922*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, p, eccsize, false);
2925*4882a593Smuzhiyun 		if (ret)
2926*4882a593Smuzhiyun 			return ret;
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
2929*4882a593Smuzhiyun 	}
2930*4882a593Smuzhiyun 
2931*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.total; i++)
2932*4882a593Smuzhiyun 		chip->oob_poi[eccpos[i]] = ecc_calc[i];
2933*4882a593Smuzhiyun 
2934*4882a593Smuzhiyun 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
2935*4882a593Smuzhiyun 	if (ret)
2936*4882a593Smuzhiyun 		return ret;
2937*4882a593Smuzhiyun 
2938*4882a593Smuzhiyun 	return 0;
2939*4882a593Smuzhiyun }
2940*4882a593Smuzhiyun 
2941*4882a593Smuzhiyun 
2942*4882a593Smuzhiyun /**
2943*4882a593Smuzhiyun  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
2944*4882a593Smuzhiyun  * @mtd:	mtd info structure
2945*4882a593Smuzhiyun  * @chip:	nand chip info structure
2946*4882a593Smuzhiyun  * @offset:	column address of subpage within the page
2947*4882a593Smuzhiyun  * @data_len:	data length
2948*4882a593Smuzhiyun  * @buf:	data buffer
2949*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
2950*4882a593Smuzhiyun  * @page: page number to write
2951*4882a593Smuzhiyun  */
nand_write_subpage_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint32_t offset,uint32_t data_len,const uint8_t * buf,int oob_required,int page)2952*4882a593Smuzhiyun static int nand_write_subpage_hwecc(struct mtd_info *mtd,
2953*4882a593Smuzhiyun 				struct nand_chip *chip, uint32_t offset,
2954*4882a593Smuzhiyun 				uint32_t data_len, const uint8_t *buf,
2955*4882a593Smuzhiyun 				int oob_required, int page)
2956*4882a593Smuzhiyun {
2957*4882a593Smuzhiyun 	uint8_t *oob_buf  = chip->oob_poi;
2958*4882a593Smuzhiyun 	uint8_t *ecc_calc = chip->buffers->ecccalc;
2959*4882a593Smuzhiyun 	int ecc_size      = chip->ecc.size;
2960*4882a593Smuzhiyun 	int ecc_bytes     = chip->ecc.bytes;
2961*4882a593Smuzhiyun 	int ecc_steps     = chip->ecc.steps;
2962*4882a593Smuzhiyun 	uint32_t *eccpos  = chip->ecc.layout->eccpos;
2963*4882a593Smuzhiyun 	uint32_t start_step = offset / ecc_size;
2964*4882a593Smuzhiyun 	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
2965*4882a593Smuzhiyun 	int oob_bytes       = mtd->oobsize / ecc_steps;
2966*4882a593Smuzhiyun 	int step, i;
2967*4882a593Smuzhiyun 	int ret;
2968*4882a593Smuzhiyun 
2969*4882a593Smuzhiyun 	for (step = 0; step < ecc_steps; step++) {
2970*4882a593Smuzhiyun 		/* configure controller for WRITE access */
2971*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun 		/* write data (untouched subpages already masked by 0xFF) */
2974*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, buf, ecc_size, false);
2975*4882a593Smuzhiyun 		if (ret)
2976*4882a593Smuzhiyun 			return ret;
2977*4882a593Smuzhiyun 
2978*4882a593Smuzhiyun 		/* mask ECC of un-touched subpages by padding 0xFF */
2979*4882a593Smuzhiyun 		if ((step < start_step) || (step > end_step))
2980*4882a593Smuzhiyun 			memset(ecc_calc, 0xff, ecc_bytes);
2981*4882a593Smuzhiyun 		else
2982*4882a593Smuzhiyun 			chip->ecc.calculate(mtd, buf, ecc_calc);
2983*4882a593Smuzhiyun 
2984*4882a593Smuzhiyun 		/* mask OOB of un-touched subpages by padding 0xFF */
2985*4882a593Smuzhiyun 		/* if oob_required, preserve OOB metadata of written subpage */
2986*4882a593Smuzhiyun 		if (!oob_required || (step < start_step) || (step > end_step))
2987*4882a593Smuzhiyun 			memset(oob_buf, 0xff, oob_bytes);
2988*4882a593Smuzhiyun 
2989*4882a593Smuzhiyun 		buf += ecc_size;
2990*4882a593Smuzhiyun 		ecc_calc += ecc_bytes;
2991*4882a593Smuzhiyun 		oob_buf  += oob_bytes;
2992*4882a593Smuzhiyun 	}
2993*4882a593Smuzhiyun 
2994*4882a593Smuzhiyun 	/* copy calculated ECC for whole page to chip->buffer->oob */
2995*4882a593Smuzhiyun 	/* this include masked-value(0xFF) for unwritten subpages */
2996*4882a593Smuzhiyun 	ecc_calc = chip->buffers->ecccalc;
2997*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.total; i++)
2998*4882a593Smuzhiyun 		chip->oob_poi[eccpos[i]] = ecc_calc[i];
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	/* write OOB buffer to NAND device */
3001*4882a593Smuzhiyun 	ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3002*4882a593Smuzhiyun 	if (ret)
3003*4882a593Smuzhiyun 		return ret;
3004*4882a593Smuzhiyun 
3005*4882a593Smuzhiyun 	return 0;
3006*4882a593Smuzhiyun }
3007*4882a593Smuzhiyun 
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun /**
3010*4882a593Smuzhiyun  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3011*4882a593Smuzhiyun  * @mtd: mtd info structure
3012*4882a593Smuzhiyun  * @chip: nand chip info structure
3013*4882a593Smuzhiyun  * @buf: data buffer
3014*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3015*4882a593Smuzhiyun  * @page: page number to write
3016*4882a593Smuzhiyun  *
3017*4882a593Smuzhiyun  * The hw generator calculates the error syndrome automatically. Therefore we
3018*4882a593Smuzhiyun  * need a special oob layout and handling.
3019*4882a593Smuzhiyun  */
nand_write_page_syndrome(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)3020*4882a593Smuzhiyun static int nand_write_page_syndrome(struct mtd_info *mtd,
3021*4882a593Smuzhiyun 				    struct nand_chip *chip,
3022*4882a593Smuzhiyun 				    const uint8_t *buf, int oob_required,
3023*4882a593Smuzhiyun 				    int page)
3024*4882a593Smuzhiyun {
3025*4882a593Smuzhiyun 	int i, eccsize = chip->ecc.size;
3026*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
3027*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
3028*4882a593Smuzhiyun 	const uint8_t *p = buf;
3029*4882a593Smuzhiyun 	uint8_t *oob = chip->oob_poi;
3030*4882a593Smuzhiyun 	int ret;
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun 	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3033*4882a593Smuzhiyun 		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, p, eccsize, false);
3036*4882a593Smuzhiyun 		if (ret)
3037*4882a593Smuzhiyun 			return ret;
3038*4882a593Smuzhiyun 
3039*4882a593Smuzhiyun 		if (chip->ecc.prepad) {
3040*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3041*4882a593Smuzhiyun 						 false);
3042*4882a593Smuzhiyun 			if (ret)
3043*4882a593Smuzhiyun 				return ret;
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun 			oob += chip->ecc.prepad;
3046*4882a593Smuzhiyun 		}
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 		chip->ecc.calculate(mtd, p, oob);
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, eccbytes, false);
3051*4882a593Smuzhiyun 		if (ret)
3052*4882a593Smuzhiyun 			return ret;
3053*4882a593Smuzhiyun 
3054*4882a593Smuzhiyun 		oob += eccbytes;
3055*4882a593Smuzhiyun 
3056*4882a593Smuzhiyun 		if (chip->ecc.postpad) {
3057*4882a593Smuzhiyun 			ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3058*4882a593Smuzhiyun 						 false);
3059*4882a593Smuzhiyun 			if (ret)
3060*4882a593Smuzhiyun 				return ret;
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun 			oob += chip->ecc.postpad;
3063*4882a593Smuzhiyun 		}
3064*4882a593Smuzhiyun 	}
3065*4882a593Smuzhiyun 
3066*4882a593Smuzhiyun 	/* Calculate remaining oob bytes */
3067*4882a593Smuzhiyun 	i = mtd->oobsize - (oob - chip->oob_poi);
3068*4882a593Smuzhiyun 	if (i) {
3069*4882a593Smuzhiyun 		ret = nand_write_data_op(chip, oob, i, false);
3070*4882a593Smuzhiyun 		if (ret)
3071*4882a593Smuzhiyun 			return ret;
3072*4882a593Smuzhiyun 	}
3073*4882a593Smuzhiyun 
3074*4882a593Smuzhiyun 	return 0;
3075*4882a593Smuzhiyun }
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun /**
3078*4882a593Smuzhiyun  * nand_write_page - [REPLACEABLE] write one page
3079*4882a593Smuzhiyun  * @mtd: MTD device structure
3080*4882a593Smuzhiyun  * @chip: NAND chip descriptor
3081*4882a593Smuzhiyun  * @offset: address offset within the page
3082*4882a593Smuzhiyun  * @data_len: length of actual data to be written
3083*4882a593Smuzhiyun  * @buf: the data to write
3084*4882a593Smuzhiyun  * @oob_required: must write chip->oob_poi to OOB
3085*4882a593Smuzhiyun  * @page: page number to write
3086*4882a593Smuzhiyun  * @raw: use _raw version of write_page
3087*4882a593Smuzhiyun  */
nand_write_page(struct mtd_info * mtd,struct nand_chip * chip,uint32_t offset,int data_len,const uint8_t * buf,int oob_required,int page,int raw)3088*4882a593Smuzhiyun static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
3089*4882a593Smuzhiyun 		uint32_t offset, int data_len, const uint8_t *buf,
3090*4882a593Smuzhiyun 		int oob_required, int page, int raw)
3091*4882a593Smuzhiyun {
3092*4882a593Smuzhiyun 	int status, subpage;
3093*4882a593Smuzhiyun 
3094*4882a593Smuzhiyun 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3095*4882a593Smuzhiyun 		chip->ecc.write_subpage)
3096*4882a593Smuzhiyun 		subpage = offset || (data_len < mtd->writesize);
3097*4882a593Smuzhiyun 	else
3098*4882a593Smuzhiyun 		subpage = 0;
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	if (nand_standard_page_accessors(&chip->ecc)) {
3101*4882a593Smuzhiyun 		status = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3102*4882a593Smuzhiyun 		if (status)
3103*4882a593Smuzhiyun 			return status;
3104*4882a593Smuzhiyun 	}
3105*4882a593Smuzhiyun 
3106*4882a593Smuzhiyun 	if (unlikely(raw))
3107*4882a593Smuzhiyun 		status = chip->ecc.write_page_raw(mtd, chip, buf,
3108*4882a593Smuzhiyun 						  oob_required, page);
3109*4882a593Smuzhiyun 	else if (subpage)
3110*4882a593Smuzhiyun 		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
3111*4882a593Smuzhiyun 						 buf, oob_required, page);
3112*4882a593Smuzhiyun 	else
3113*4882a593Smuzhiyun 		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
3114*4882a593Smuzhiyun 					      page);
3115*4882a593Smuzhiyun 
3116*4882a593Smuzhiyun 	if (status < 0)
3117*4882a593Smuzhiyun 		return status;
3118*4882a593Smuzhiyun 
3119*4882a593Smuzhiyun 	if (nand_standard_page_accessors(&chip->ecc))
3120*4882a593Smuzhiyun 		return nand_prog_page_end_op(chip);
3121*4882a593Smuzhiyun 
3122*4882a593Smuzhiyun 	return 0;
3123*4882a593Smuzhiyun }
3124*4882a593Smuzhiyun 
3125*4882a593Smuzhiyun /**
3126*4882a593Smuzhiyun  * nand_fill_oob - [INTERN] Transfer client buffer to oob
3127*4882a593Smuzhiyun  * @mtd: MTD device structure
3128*4882a593Smuzhiyun  * @oob: oob data buffer
3129*4882a593Smuzhiyun  * @len: oob data write length
3130*4882a593Smuzhiyun  * @ops: oob ops structure
3131*4882a593Smuzhiyun  */
nand_fill_oob(struct mtd_info * mtd,uint8_t * oob,size_t len,struct mtd_oob_ops * ops)3132*4882a593Smuzhiyun static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
3133*4882a593Smuzhiyun 			      struct mtd_oob_ops *ops)
3134*4882a593Smuzhiyun {
3135*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	/*
3138*4882a593Smuzhiyun 	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
3139*4882a593Smuzhiyun 	 * data from a previous OOB read.
3140*4882a593Smuzhiyun 	 */
3141*4882a593Smuzhiyun 	memset(chip->oob_poi, 0xff, mtd->oobsize);
3142*4882a593Smuzhiyun 
3143*4882a593Smuzhiyun 	switch (ops->mode) {
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
3146*4882a593Smuzhiyun 	case MTD_OPS_RAW:
3147*4882a593Smuzhiyun 		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
3148*4882a593Smuzhiyun 		return oob + len;
3149*4882a593Smuzhiyun 
3150*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB: {
3151*4882a593Smuzhiyun 		struct nand_oobfree *free = chip->ecc.layout->oobfree;
3152*4882a593Smuzhiyun 		uint32_t boffs = 0, woffs = ops->ooboffs;
3153*4882a593Smuzhiyun 		size_t bytes = 0;
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun 		for (; free->length && len; free++, len -= bytes) {
3156*4882a593Smuzhiyun 			/* Write request not from offset 0? */
3157*4882a593Smuzhiyun 			if (unlikely(woffs)) {
3158*4882a593Smuzhiyun 				if (woffs >= free->length) {
3159*4882a593Smuzhiyun 					woffs -= free->length;
3160*4882a593Smuzhiyun 					continue;
3161*4882a593Smuzhiyun 				}
3162*4882a593Smuzhiyun 				boffs = free->offset + woffs;
3163*4882a593Smuzhiyun 				bytes = min_t(size_t, len,
3164*4882a593Smuzhiyun 					      (free->length - woffs));
3165*4882a593Smuzhiyun 				woffs = 0;
3166*4882a593Smuzhiyun 			} else {
3167*4882a593Smuzhiyun 				bytes = min_t(size_t, len, free->length);
3168*4882a593Smuzhiyun 				boffs = free->offset;
3169*4882a593Smuzhiyun 			}
3170*4882a593Smuzhiyun 			memcpy(chip->oob_poi + boffs, oob, bytes);
3171*4882a593Smuzhiyun 			oob += bytes;
3172*4882a593Smuzhiyun 		}
3173*4882a593Smuzhiyun 		return oob;
3174*4882a593Smuzhiyun 	}
3175*4882a593Smuzhiyun 	default:
3176*4882a593Smuzhiyun 		BUG();
3177*4882a593Smuzhiyun 	}
3178*4882a593Smuzhiyun 	return NULL;
3179*4882a593Smuzhiyun }
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun #define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun /**
3184*4882a593Smuzhiyun  * nand_do_write_ops - [INTERN] NAND write with ECC
3185*4882a593Smuzhiyun  * @mtd: MTD device structure
3186*4882a593Smuzhiyun  * @to: offset to write to
3187*4882a593Smuzhiyun  * @ops: oob operations description structure
3188*4882a593Smuzhiyun  *
3189*4882a593Smuzhiyun  * NAND write with ECC.
3190*4882a593Smuzhiyun  */
nand_do_write_ops(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)3191*4882a593Smuzhiyun static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
3192*4882a593Smuzhiyun 			     struct mtd_oob_ops *ops)
3193*4882a593Smuzhiyun {
3194*4882a593Smuzhiyun 	int chipnr, realpage, page, column;
3195*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3196*4882a593Smuzhiyun 	uint32_t writelen = ops->len;
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun 	uint32_t oobwritelen = ops->ooblen;
3199*4882a593Smuzhiyun 	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3200*4882a593Smuzhiyun 
3201*4882a593Smuzhiyun 	uint8_t *oob = ops->oobbuf;
3202*4882a593Smuzhiyun 	uint8_t *buf = ops->datbuf;
3203*4882a593Smuzhiyun 	int ret;
3204*4882a593Smuzhiyun 	int oob_required = oob ? 1 : 0;
3205*4882a593Smuzhiyun 
3206*4882a593Smuzhiyun 	ops->retlen = 0;
3207*4882a593Smuzhiyun 	if (!writelen)
3208*4882a593Smuzhiyun 		return 0;
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	/* Reject writes, which are not page aligned */
3211*4882a593Smuzhiyun 	if (NOTALIGNED(to)) {
3212*4882a593Smuzhiyun 		pr_notice("%s: attempt to write non page aligned data\n",
3213*4882a593Smuzhiyun 			   __func__);
3214*4882a593Smuzhiyun 		return -EINVAL;
3215*4882a593Smuzhiyun 	}
3216*4882a593Smuzhiyun 
3217*4882a593Smuzhiyun 	column = to & (mtd->writesize - 1);
3218*4882a593Smuzhiyun 
3219*4882a593Smuzhiyun 	chipnr = (int)(to >> chip->chip_shift);
3220*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun 	/* Check, if it is write protected */
3223*4882a593Smuzhiyun 	if (nand_check_wp(mtd)) {
3224*4882a593Smuzhiyun 		ret = -EIO;
3225*4882a593Smuzhiyun 		goto err_out;
3226*4882a593Smuzhiyun 	}
3227*4882a593Smuzhiyun 
3228*4882a593Smuzhiyun 	realpage = (int)(to >> chip->page_shift);
3229*4882a593Smuzhiyun 	page = realpage & chip->pagemask;
3230*4882a593Smuzhiyun 
3231*4882a593Smuzhiyun 	/* Invalidate the page cache, when we write to the cached page */
3232*4882a593Smuzhiyun 	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
3233*4882a593Smuzhiyun 	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
3234*4882a593Smuzhiyun 		chip->pagebuf = -1;
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 	/* Don't allow multipage oob writes with offset */
3237*4882a593Smuzhiyun 	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
3238*4882a593Smuzhiyun 		ret = -EINVAL;
3239*4882a593Smuzhiyun 		goto err_out;
3240*4882a593Smuzhiyun 	}
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	while (1) {
3243*4882a593Smuzhiyun 		int bytes = mtd->writesize;
3244*4882a593Smuzhiyun 		uint8_t *wbuf = buf;
3245*4882a593Smuzhiyun 		int use_bufpoi;
3246*4882a593Smuzhiyun 		int part_pagewr = (column || writelen < mtd->writesize);
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 		if (part_pagewr)
3249*4882a593Smuzhiyun 			use_bufpoi = 1;
3250*4882a593Smuzhiyun 		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
3251*4882a593Smuzhiyun 			use_bufpoi = !IS_ALIGNED((unsigned long)buf,
3252*4882a593Smuzhiyun 						 chip->buf_align);
3253*4882a593Smuzhiyun 		else
3254*4882a593Smuzhiyun 			use_bufpoi = 0;
3255*4882a593Smuzhiyun 
3256*4882a593Smuzhiyun 		WATCHDOG_RESET();
3257*4882a593Smuzhiyun 		/* Partial page write?, or need to use bounce buffer */
3258*4882a593Smuzhiyun 		if (use_bufpoi) {
3259*4882a593Smuzhiyun 			pr_debug("%s: using write bounce buffer for buf@%p\n",
3260*4882a593Smuzhiyun 					 __func__, buf);
3261*4882a593Smuzhiyun 			if (part_pagewr)
3262*4882a593Smuzhiyun 				bytes = min_t(int, bytes - column, writelen);
3263*4882a593Smuzhiyun 			chip->pagebuf = -1;
3264*4882a593Smuzhiyun 			memset(chip->buffers->databuf, 0xff, mtd->writesize);
3265*4882a593Smuzhiyun 			memcpy(&chip->buffers->databuf[column], buf, bytes);
3266*4882a593Smuzhiyun 			wbuf = chip->buffers->databuf;
3267*4882a593Smuzhiyun 		}
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 		if (unlikely(oob)) {
3270*4882a593Smuzhiyun 			size_t len = min(oobwritelen, oobmaxlen);
3271*4882a593Smuzhiyun 			oob = nand_fill_oob(mtd, oob, len, ops);
3272*4882a593Smuzhiyun 			oobwritelen -= len;
3273*4882a593Smuzhiyun 		} else {
3274*4882a593Smuzhiyun 			/* We still need to erase leftover OOB data */
3275*4882a593Smuzhiyun 			memset(chip->oob_poi, 0xff, mtd->oobsize);
3276*4882a593Smuzhiyun 		}
3277*4882a593Smuzhiyun 		ret = chip->write_page(mtd, chip, column, bytes, wbuf,
3278*4882a593Smuzhiyun 					oob_required, page,
3279*4882a593Smuzhiyun 					(ops->mode == MTD_OPS_RAW));
3280*4882a593Smuzhiyun 		if (ret)
3281*4882a593Smuzhiyun 			break;
3282*4882a593Smuzhiyun 
3283*4882a593Smuzhiyun 		writelen -= bytes;
3284*4882a593Smuzhiyun 		if (!writelen)
3285*4882a593Smuzhiyun 			break;
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 		column = 0;
3288*4882a593Smuzhiyun 		buf += bytes;
3289*4882a593Smuzhiyun 		realpage++;
3290*4882a593Smuzhiyun 
3291*4882a593Smuzhiyun 		page = realpage & chip->pagemask;
3292*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
3293*4882a593Smuzhiyun 		if (!page) {
3294*4882a593Smuzhiyun 			chipnr++;
3295*4882a593Smuzhiyun 			chip->select_chip(mtd, -1);
3296*4882a593Smuzhiyun 			chip->select_chip(mtd, chipnr);
3297*4882a593Smuzhiyun 		}
3298*4882a593Smuzhiyun 	}
3299*4882a593Smuzhiyun 
3300*4882a593Smuzhiyun 	ops->retlen = ops->len - writelen;
3301*4882a593Smuzhiyun 	if (unlikely(oob))
3302*4882a593Smuzhiyun 		ops->oobretlen = ops->ooblen;
3303*4882a593Smuzhiyun 
3304*4882a593Smuzhiyun err_out:
3305*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
3306*4882a593Smuzhiyun 	return ret;
3307*4882a593Smuzhiyun }
3308*4882a593Smuzhiyun 
3309*4882a593Smuzhiyun /**
3310*4882a593Smuzhiyun  * panic_nand_write - [MTD Interface] NAND write with ECC
3311*4882a593Smuzhiyun  * @mtd: MTD device structure
3312*4882a593Smuzhiyun  * @to: offset to write to
3313*4882a593Smuzhiyun  * @len: number of bytes to write
3314*4882a593Smuzhiyun  * @retlen: pointer to variable to store the number of written bytes
3315*4882a593Smuzhiyun  * @buf: the data to write
3316*4882a593Smuzhiyun  *
3317*4882a593Smuzhiyun  * NAND write with ECC. Used when performing writes in interrupt context, this
3318*4882a593Smuzhiyun  * may for example be called by mtdoops when writing an oops while in panic.
3319*4882a593Smuzhiyun  */
panic_nand_write(struct mtd_info * mtd,loff_t to,size_t len,size_t * retlen,const uint8_t * buf)3320*4882a593Smuzhiyun static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
3321*4882a593Smuzhiyun 			    size_t *retlen, const uint8_t *buf)
3322*4882a593Smuzhiyun {
3323*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3324*4882a593Smuzhiyun 	struct mtd_oob_ops ops;
3325*4882a593Smuzhiyun 	int ret;
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	/* Wait for the device to get ready */
3328*4882a593Smuzhiyun 	panic_nand_wait(mtd, chip, 400);
3329*4882a593Smuzhiyun 
3330*4882a593Smuzhiyun 	/* Grab the device */
3331*4882a593Smuzhiyun 	panic_nand_get_device(chip, mtd, FL_WRITING);
3332*4882a593Smuzhiyun 
3333*4882a593Smuzhiyun 	memset(&ops, 0, sizeof(ops));
3334*4882a593Smuzhiyun 	ops.len = len;
3335*4882a593Smuzhiyun 	ops.datbuf = (uint8_t *)buf;
3336*4882a593Smuzhiyun 	ops.mode = MTD_OPS_PLACE_OOB;
3337*4882a593Smuzhiyun 
3338*4882a593Smuzhiyun 	ret = nand_do_write_ops(mtd, to, &ops);
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 	*retlen = ops.retlen;
3341*4882a593Smuzhiyun 	return ret;
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun 
3344*4882a593Smuzhiyun /**
3345*4882a593Smuzhiyun  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
3346*4882a593Smuzhiyun  * @mtd: MTD device structure
3347*4882a593Smuzhiyun  * @to: offset to write to
3348*4882a593Smuzhiyun  * @ops: oob operation description structure
3349*4882a593Smuzhiyun  *
3350*4882a593Smuzhiyun  * NAND write out-of-band.
3351*4882a593Smuzhiyun  */
nand_do_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)3352*4882a593Smuzhiyun static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
3353*4882a593Smuzhiyun 			     struct mtd_oob_ops *ops)
3354*4882a593Smuzhiyun {
3355*4882a593Smuzhiyun 	int chipnr, page, status, len;
3356*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun 	pr_debug("%s: to = 0x%08x, len = %i\n",
3359*4882a593Smuzhiyun 			 __func__, (unsigned int)to, (int)ops->ooblen);
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	len = mtd_oobavail(mtd, ops);
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun 	/* Do not allow write past end of page */
3364*4882a593Smuzhiyun 	if ((ops->ooboffs + ops->ooblen) > len) {
3365*4882a593Smuzhiyun 		pr_debug("%s: attempt to write past end of page\n",
3366*4882a593Smuzhiyun 				__func__);
3367*4882a593Smuzhiyun 		return -EINVAL;
3368*4882a593Smuzhiyun 	}
3369*4882a593Smuzhiyun 
3370*4882a593Smuzhiyun 	if (unlikely(ops->ooboffs >= len)) {
3371*4882a593Smuzhiyun 		pr_debug("%s: attempt to start write outside oob\n",
3372*4882a593Smuzhiyun 				__func__);
3373*4882a593Smuzhiyun 		return -EINVAL;
3374*4882a593Smuzhiyun 	}
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun 	/* Do not allow write past end of device */
3377*4882a593Smuzhiyun 	if (unlikely(to >= mtd->size ||
3378*4882a593Smuzhiyun 		     ops->ooboffs + ops->ooblen >
3379*4882a593Smuzhiyun 			((mtd->size >> chip->page_shift) -
3380*4882a593Smuzhiyun 			 (to >> chip->page_shift)) * len)) {
3381*4882a593Smuzhiyun 		pr_debug("%s: attempt to write beyond end of device\n",
3382*4882a593Smuzhiyun 				__func__);
3383*4882a593Smuzhiyun 		return -EINVAL;
3384*4882a593Smuzhiyun 	}
3385*4882a593Smuzhiyun 
3386*4882a593Smuzhiyun 	chipnr = (int)(to >> chip->chip_shift);
3387*4882a593Smuzhiyun 
3388*4882a593Smuzhiyun 	/*
3389*4882a593Smuzhiyun 	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
3390*4882a593Smuzhiyun 	 * of my DiskOnChip 2000 test units) will clear the whole data page too
3391*4882a593Smuzhiyun 	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
3392*4882a593Smuzhiyun 	 * it in the doc2000 driver in August 1999.  dwmw2.
3393*4882a593Smuzhiyun 	 */
3394*4882a593Smuzhiyun 	nand_reset(chip, chipnr);
3395*4882a593Smuzhiyun 
3396*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
3397*4882a593Smuzhiyun 
3398*4882a593Smuzhiyun 	/* Shift to get page */
3399*4882a593Smuzhiyun 	page = (int)(to >> chip->page_shift);
3400*4882a593Smuzhiyun 
3401*4882a593Smuzhiyun 	/* Check, if it is write protected */
3402*4882a593Smuzhiyun 	if (nand_check_wp(mtd)) {
3403*4882a593Smuzhiyun 		chip->select_chip(mtd, -1);
3404*4882a593Smuzhiyun 		return -EROFS;
3405*4882a593Smuzhiyun 	}
3406*4882a593Smuzhiyun 
3407*4882a593Smuzhiyun 	/* Invalidate the page cache, if we write to the cached page */
3408*4882a593Smuzhiyun 	if (page == chip->pagebuf)
3409*4882a593Smuzhiyun 		chip->pagebuf = -1;
3410*4882a593Smuzhiyun 
3411*4882a593Smuzhiyun 	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
3412*4882a593Smuzhiyun 
3413*4882a593Smuzhiyun 	if (ops->mode == MTD_OPS_RAW)
3414*4882a593Smuzhiyun 		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
3415*4882a593Smuzhiyun 	else
3416*4882a593Smuzhiyun 		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
3417*4882a593Smuzhiyun 
3418*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
3419*4882a593Smuzhiyun 
3420*4882a593Smuzhiyun 	if (status)
3421*4882a593Smuzhiyun 		return status;
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 	ops->oobretlen = ops->ooblen;
3424*4882a593Smuzhiyun 
3425*4882a593Smuzhiyun 	return 0;
3426*4882a593Smuzhiyun }
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun /**
3429*4882a593Smuzhiyun  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
3430*4882a593Smuzhiyun  * @mtd: MTD device structure
3431*4882a593Smuzhiyun  * @to: offset to write to
3432*4882a593Smuzhiyun  * @ops: oob operation description structure
3433*4882a593Smuzhiyun  */
nand_write_oob(struct mtd_info * mtd,loff_t to,struct mtd_oob_ops * ops)3434*4882a593Smuzhiyun static int nand_write_oob(struct mtd_info *mtd, loff_t to,
3435*4882a593Smuzhiyun 			  struct mtd_oob_ops *ops)
3436*4882a593Smuzhiyun {
3437*4882a593Smuzhiyun 	int ret = -ENOTSUPP;
3438*4882a593Smuzhiyun 
3439*4882a593Smuzhiyun 	ops->retlen = 0;
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 	/* Do not allow writes past end of device */
3442*4882a593Smuzhiyun 	if (ops->datbuf && (to + ops->len) > mtd->size) {
3443*4882a593Smuzhiyun 		pr_debug("%s: attempt to write beyond end of device\n",
3444*4882a593Smuzhiyun 				__func__);
3445*4882a593Smuzhiyun 		return -EINVAL;
3446*4882a593Smuzhiyun 	}
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun 	nand_get_device(mtd, FL_WRITING);
3449*4882a593Smuzhiyun 
3450*4882a593Smuzhiyun 	switch (ops->mode) {
3451*4882a593Smuzhiyun 	case MTD_OPS_PLACE_OOB:
3452*4882a593Smuzhiyun 	case MTD_OPS_AUTO_OOB:
3453*4882a593Smuzhiyun 	case MTD_OPS_RAW:
3454*4882a593Smuzhiyun 		break;
3455*4882a593Smuzhiyun 
3456*4882a593Smuzhiyun 	default:
3457*4882a593Smuzhiyun 		goto out;
3458*4882a593Smuzhiyun 	}
3459*4882a593Smuzhiyun 
3460*4882a593Smuzhiyun 	if (!ops->datbuf)
3461*4882a593Smuzhiyun 		ret = nand_do_write_oob(mtd, to, ops);
3462*4882a593Smuzhiyun 	else
3463*4882a593Smuzhiyun 		ret = nand_do_write_ops(mtd, to, ops);
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun out:
3466*4882a593Smuzhiyun 	nand_release_device(mtd);
3467*4882a593Smuzhiyun 	return ret;
3468*4882a593Smuzhiyun }
3469*4882a593Smuzhiyun 
3470*4882a593Smuzhiyun /**
3471*4882a593Smuzhiyun  * single_erase - [GENERIC] NAND standard block erase command function
3472*4882a593Smuzhiyun  * @mtd: MTD device structure
3473*4882a593Smuzhiyun  * @page: the page address of the block which will be erased
3474*4882a593Smuzhiyun  *
3475*4882a593Smuzhiyun  * Standard erase command for NAND chips. Returns NAND status.
3476*4882a593Smuzhiyun  */
single_erase(struct mtd_info * mtd,int page)3477*4882a593Smuzhiyun static int single_erase(struct mtd_info *mtd, int page)
3478*4882a593Smuzhiyun {
3479*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3480*4882a593Smuzhiyun 	unsigned int eraseblock;
3481*4882a593Smuzhiyun 
3482*4882a593Smuzhiyun 	/* Send commands to erase a block */
3483*4882a593Smuzhiyun 	eraseblock = page >> (chip->phys_erase_shift - chip->page_shift);
3484*4882a593Smuzhiyun 
3485*4882a593Smuzhiyun 	return nand_erase_op(chip, eraseblock);
3486*4882a593Smuzhiyun }
3487*4882a593Smuzhiyun 
3488*4882a593Smuzhiyun /**
3489*4882a593Smuzhiyun  * nand_erase - [MTD Interface] erase block(s)
3490*4882a593Smuzhiyun  * @mtd: MTD device structure
3491*4882a593Smuzhiyun  * @instr: erase instruction
3492*4882a593Smuzhiyun  *
3493*4882a593Smuzhiyun  * Erase one ore more blocks.
3494*4882a593Smuzhiyun  */
nand_erase(struct mtd_info * mtd,struct erase_info * instr)3495*4882a593Smuzhiyun static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
3496*4882a593Smuzhiyun {
3497*4882a593Smuzhiyun 	return nand_erase_nand(mtd, instr, 0);
3498*4882a593Smuzhiyun }
3499*4882a593Smuzhiyun 
3500*4882a593Smuzhiyun /**
3501*4882a593Smuzhiyun  * nand_erase_nand - [INTERN] erase block(s)
3502*4882a593Smuzhiyun  * @mtd: MTD device structure
3503*4882a593Smuzhiyun  * @instr: erase instruction
3504*4882a593Smuzhiyun  * @allowbbt: allow erasing the bbt area
3505*4882a593Smuzhiyun  *
3506*4882a593Smuzhiyun  * Erase one ore more blocks.
3507*4882a593Smuzhiyun  */
nand_erase_nand(struct mtd_info * mtd,struct erase_info * instr,int allowbbt)3508*4882a593Smuzhiyun int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
3509*4882a593Smuzhiyun 		    int allowbbt)
3510*4882a593Smuzhiyun {
3511*4882a593Smuzhiyun 	int page, status, pages_per_block, ret, chipnr;
3512*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3513*4882a593Smuzhiyun 	loff_t len;
3514*4882a593Smuzhiyun 
3515*4882a593Smuzhiyun 	pr_debug("%s: start = 0x%012llx, len = %llu\n",
3516*4882a593Smuzhiyun 			__func__, (unsigned long long)instr->addr,
3517*4882a593Smuzhiyun 			(unsigned long long)instr->len);
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 	if (check_offs_len(mtd, instr->addr, instr->len))
3520*4882a593Smuzhiyun 		return -EINVAL;
3521*4882a593Smuzhiyun 
3522*4882a593Smuzhiyun 	/* Grab the lock and see if the device is available */
3523*4882a593Smuzhiyun 	nand_get_device(mtd, FL_ERASING);
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 	/* Shift to get first page */
3526*4882a593Smuzhiyun 	page = (int)(instr->addr >> chip->page_shift);
3527*4882a593Smuzhiyun 	chipnr = (int)(instr->addr >> chip->chip_shift);
3528*4882a593Smuzhiyun 
3529*4882a593Smuzhiyun 	/* Calculate pages in each block */
3530*4882a593Smuzhiyun 	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
3531*4882a593Smuzhiyun 
3532*4882a593Smuzhiyun 	/* Select the NAND device */
3533*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
3534*4882a593Smuzhiyun 
3535*4882a593Smuzhiyun 	/* Check, if it is write protected */
3536*4882a593Smuzhiyun 	if (nand_check_wp(mtd)) {
3537*4882a593Smuzhiyun 		pr_debug("%s: device is write protected!\n",
3538*4882a593Smuzhiyun 				__func__);
3539*4882a593Smuzhiyun 		instr->state = MTD_ERASE_FAILED;
3540*4882a593Smuzhiyun 		goto erase_exit;
3541*4882a593Smuzhiyun 	}
3542*4882a593Smuzhiyun 
3543*4882a593Smuzhiyun 	/* Loop through the pages */
3544*4882a593Smuzhiyun 	len = instr->len;
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	instr->state = MTD_ERASING;
3547*4882a593Smuzhiyun 
3548*4882a593Smuzhiyun 	while (len) {
3549*4882a593Smuzhiyun 		WATCHDOG_RESET();
3550*4882a593Smuzhiyun 
3551*4882a593Smuzhiyun 		/* Check if we have a bad block, we do not erase bad blocks! */
3552*4882a593Smuzhiyun 		if (!instr->scrub && nand_block_checkbad(mtd, ((loff_t) page) <<
3553*4882a593Smuzhiyun 					chip->page_shift, allowbbt)) {
3554*4882a593Smuzhiyun 			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
3555*4882a593Smuzhiyun 				    __func__, page);
3556*4882a593Smuzhiyun 			instr->state = MTD_ERASE_FAILED;
3557*4882a593Smuzhiyun 			goto erase_exit;
3558*4882a593Smuzhiyun 		}
3559*4882a593Smuzhiyun 
3560*4882a593Smuzhiyun 		/*
3561*4882a593Smuzhiyun 		 * Invalidate the page cache, if we erase the block which
3562*4882a593Smuzhiyun 		 * contains the current cached page.
3563*4882a593Smuzhiyun 		 */
3564*4882a593Smuzhiyun 		if (page <= chip->pagebuf && chip->pagebuf <
3565*4882a593Smuzhiyun 		    (page + pages_per_block))
3566*4882a593Smuzhiyun 			chip->pagebuf = -1;
3567*4882a593Smuzhiyun 
3568*4882a593Smuzhiyun 		status = chip->erase(mtd, page & chip->pagemask);
3569*4882a593Smuzhiyun 
3570*4882a593Smuzhiyun 		/* See if block erase succeeded */
3571*4882a593Smuzhiyun 		if (status & NAND_STATUS_FAIL) {
3572*4882a593Smuzhiyun 			pr_debug("%s: failed erase, page 0x%08x\n",
3573*4882a593Smuzhiyun 					__func__, page);
3574*4882a593Smuzhiyun 			instr->state = MTD_ERASE_FAILED;
3575*4882a593Smuzhiyun 			instr->fail_addr =
3576*4882a593Smuzhiyun 				((loff_t)page << chip->page_shift);
3577*4882a593Smuzhiyun 			goto erase_exit;
3578*4882a593Smuzhiyun 		}
3579*4882a593Smuzhiyun 
3580*4882a593Smuzhiyun 		/* Increment page address and decrement length */
3581*4882a593Smuzhiyun 		len -= (1ULL << chip->phys_erase_shift);
3582*4882a593Smuzhiyun 		page += pages_per_block;
3583*4882a593Smuzhiyun 
3584*4882a593Smuzhiyun 		/* Check, if we cross a chip boundary */
3585*4882a593Smuzhiyun 		if (len && !(page & chip->pagemask)) {
3586*4882a593Smuzhiyun 			chipnr++;
3587*4882a593Smuzhiyun 			chip->select_chip(mtd, -1);
3588*4882a593Smuzhiyun 			chip->select_chip(mtd, chipnr);
3589*4882a593Smuzhiyun 		}
3590*4882a593Smuzhiyun 	}
3591*4882a593Smuzhiyun 	instr->state = MTD_ERASE_DONE;
3592*4882a593Smuzhiyun 
3593*4882a593Smuzhiyun erase_exit:
3594*4882a593Smuzhiyun 
3595*4882a593Smuzhiyun 	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun 	/* Deselect and wake up anyone waiting on the device */
3598*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
3599*4882a593Smuzhiyun 	nand_release_device(mtd);
3600*4882a593Smuzhiyun 
3601*4882a593Smuzhiyun 	/* Do call back function */
3602*4882a593Smuzhiyun 	if (!ret)
3603*4882a593Smuzhiyun 		mtd_erase_callback(instr);
3604*4882a593Smuzhiyun 
3605*4882a593Smuzhiyun 	/* Return more or less happy */
3606*4882a593Smuzhiyun 	return ret;
3607*4882a593Smuzhiyun }
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun /**
3610*4882a593Smuzhiyun  * nand_sync - [MTD Interface] sync
3611*4882a593Smuzhiyun  * @mtd: MTD device structure
3612*4882a593Smuzhiyun  *
3613*4882a593Smuzhiyun  * Sync is actually a wait for chip ready function.
3614*4882a593Smuzhiyun  */
nand_sync(struct mtd_info * mtd)3615*4882a593Smuzhiyun static void nand_sync(struct mtd_info *mtd)
3616*4882a593Smuzhiyun {
3617*4882a593Smuzhiyun 	pr_debug("%s: called\n", __func__);
3618*4882a593Smuzhiyun 
3619*4882a593Smuzhiyun 	/* Grab the lock and see if the device is available */
3620*4882a593Smuzhiyun 	nand_get_device(mtd, FL_SYNCING);
3621*4882a593Smuzhiyun 	/* Release it and go back */
3622*4882a593Smuzhiyun 	nand_release_device(mtd);
3623*4882a593Smuzhiyun }
3624*4882a593Smuzhiyun 
3625*4882a593Smuzhiyun /**
3626*4882a593Smuzhiyun  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
3627*4882a593Smuzhiyun  * @mtd: MTD device structure
3628*4882a593Smuzhiyun  * @offs: offset relative to mtd start
3629*4882a593Smuzhiyun  */
nand_block_isbad(struct mtd_info * mtd,loff_t offs)3630*4882a593Smuzhiyun static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
3631*4882a593Smuzhiyun {
3632*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3633*4882a593Smuzhiyun 	int chipnr = (int)(offs >> chip->chip_shift);
3634*4882a593Smuzhiyun 	int ret;
3635*4882a593Smuzhiyun 
3636*4882a593Smuzhiyun 	/* Select the NAND device */
3637*4882a593Smuzhiyun 	nand_get_device(mtd, FL_READING);
3638*4882a593Smuzhiyun 	chip->select_chip(mtd, chipnr);
3639*4882a593Smuzhiyun 
3640*4882a593Smuzhiyun 	ret = nand_block_checkbad(mtd, offs, 0);
3641*4882a593Smuzhiyun 
3642*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
3643*4882a593Smuzhiyun 	nand_release_device(mtd);
3644*4882a593Smuzhiyun 
3645*4882a593Smuzhiyun 	return ret;
3646*4882a593Smuzhiyun }
3647*4882a593Smuzhiyun 
3648*4882a593Smuzhiyun /**
3649*4882a593Smuzhiyun  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
3650*4882a593Smuzhiyun  * @mtd: MTD device structure
3651*4882a593Smuzhiyun  * @ofs: offset relative to mtd start
3652*4882a593Smuzhiyun  */
nand_block_markbad(struct mtd_info * mtd,loff_t ofs)3653*4882a593Smuzhiyun static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
3654*4882a593Smuzhiyun {
3655*4882a593Smuzhiyun 	int ret;
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 	ret = nand_block_isbad(mtd, ofs);
3658*4882a593Smuzhiyun 	if (ret) {
3659*4882a593Smuzhiyun 		/* If it was bad already, return success and do nothing */
3660*4882a593Smuzhiyun 		if (ret > 0)
3661*4882a593Smuzhiyun 			return 0;
3662*4882a593Smuzhiyun 		return ret;
3663*4882a593Smuzhiyun 	}
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	return nand_block_markbad_lowlevel(mtd, ofs);
3666*4882a593Smuzhiyun }
3667*4882a593Smuzhiyun 
3668*4882a593Smuzhiyun /**
3669*4882a593Smuzhiyun  * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
3670*4882a593Smuzhiyun  * @mtd: MTD device structure
3671*4882a593Smuzhiyun  * @chip: nand chip info structure
3672*4882a593Smuzhiyun  * @addr: feature address.
3673*4882a593Smuzhiyun  * @subfeature_param: the subfeature parameters, a four bytes array.
3674*4882a593Smuzhiyun  */
nand_onfi_set_features(struct mtd_info * mtd,struct nand_chip * chip,int addr,uint8_t * subfeature_param)3675*4882a593Smuzhiyun static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
3676*4882a593Smuzhiyun 			int addr, uint8_t *subfeature_param)
3677*4882a593Smuzhiyun {
3678*4882a593Smuzhiyun #ifdef CONFIG_SYS_NAND_ONFI_DETECTION
3679*4882a593Smuzhiyun 	if (!chip->onfi_version ||
3680*4882a593Smuzhiyun 	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
3681*4882a593Smuzhiyun 	      & ONFI_OPT_CMD_SET_GET_FEATURES))
3682*4882a593Smuzhiyun 		return -ENOTSUPP;
3683*4882a593Smuzhiyun #endif
3684*4882a593Smuzhiyun 
3685*4882a593Smuzhiyun 	return nand_set_features_op(chip, addr, subfeature_param);
3686*4882a593Smuzhiyun }
3687*4882a593Smuzhiyun 
3688*4882a593Smuzhiyun /**
3689*4882a593Smuzhiyun  * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
3690*4882a593Smuzhiyun  * @mtd: MTD device structure
3691*4882a593Smuzhiyun  * @chip: nand chip info structure
3692*4882a593Smuzhiyun  * @addr: feature address.
3693*4882a593Smuzhiyun  * @subfeature_param: the subfeature parameters, a four bytes array.
3694*4882a593Smuzhiyun  */
nand_onfi_get_features(struct mtd_info * mtd,struct nand_chip * chip,int addr,uint8_t * subfeature_param)3695*4882a593Smuzhiyun static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
3696*4882a593Smuzhiyun 			int addr, uint8_t *subfeature_param)
3697*4882a593Smuzhiyun {
3698*4882a593Smuzhiyun #ifdef CONFIG_SYS_NAND_ONFI_DETECTION
3699*4882a593Smuzhiyun 	if (!chip->onfi_version ||
3700*4882a593Smuzhiyun 	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
3701*4882a593Smuzhiyun 	      & ONFI_OPT_CMD_SET_GET_FEATURES))
3702*4882a593Smuzhiyun 		return -ENOTSUPP;
3703*4882a593Smuzhiyun #endif
3704*4882a593Smuzhiyun 
3705*4882a593Smuzhiyun 	return nand_get_features_op(chip, addr, subfeature_param);
3706*4882a593Smuzhiyun }
3707*4882a593Smuzhiyun 
3708*4882a593Smuzhiyun /* Set default functions */
nand_set_defaults(struct nand_chip * chip,int busw)3709*4882a593Smuzhiyun static void nand_set_defaults(struct nand_chip *chip, int busw)
3710*4882a593Smuzhiyun {
3711*4882a593Smuzhiyun 	/* check for proper chip_delay setup, set 20us if not */
3712*4882a593Smuzhiyun 	if (!chip->chip_delay)
3713*4882a593Smuzhiyun 		chip->chip_delay = 20;
3714*4882a593Smuzhiyun 
3715*4882a593Smuzhiyun 	/* check, if a user supplied command function given */
3716*4882a593Smuzhiyun 	if (chip->cmdfunc == NULL)
3717*4882a593Smuzhiyun 		chip->cmdfunc = nand_command;
3718*4882a593Smuzhiyun 
3719*4882a593Smuzhiyun 	/* check, if a user supplied wait function given */
3720*4882a593Smuzhiyun 	if (chip->waitfunc == NULL)
3721*4882a593Smuzhiyun 		chip->waitfunc = nand_wait;
3722*4882a593Smuzhiyun 
3723*4882a593Smuzhiyun 	if (!chip->select_chip)
3724*4882a593Smuzhiyun 		chip->select_chip = nand_select_chip;
3725*4882a593Smuzhiyun 
3726*4882a593Smuzhiyun 	/* set for ONFI nand */
3727*4882a593Smuzhiyun 	if (!chip->onfi_set_features)
3728*4882a593Smuzhiyun 		chip->onfi_set_features = nand_onfi_set_features;
3729*4882a593Smuzhiyun 	if (!chip->onfi_get_features)
3730*4882a593Smuzhiyun 		chip->onfi_get_features = nand_onfi_get_features;
3731*4882a593Smuzhiyun 
3732*4882a593Smuzhiyun 	/* If called twice, pointers that depend on busw may need to be reset */
3733*4882a593Smuzhiyun 	if (!chip->read_byte || chip->read_byte == nand_read_byte)
3734*4882a593Smuzhiyun 		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
3735*4882a593Smuzhiyun 	if (!chip->read_word)
3736*4882a593Smuzhiyun 		chip->read_word = nand_read_word;
3737*4882a593Smuzhiyun 	if (!chip->block_bad)
3738*4882a593Smuzhiyun 		chip->block_bad = nand_block_bad;
3739*4882a593Smuzhiyun 	if (!chip->block_markbad)
3740*4882a593Smuzhiyun 		chip->block_markbad = nand_default_block_markbad;
3741*4882a593Smuzhiyun 	if (!chip->write_buf || chip->write_buf == nand_write_buf)
3742*4882a593Smuzhiyun 		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
3743*4882a593Smuzhiyun 	if (!chip->write_byte || chip->write_byte == nand_write_byte)
3744*4882a593Smuzhiyun 		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
3745*4882a593Smuzhiyun 	if (!chip->read_buf || chip->read_buf == nand_read_buf)
3746*4882a593Smuzhiyun 		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
3747*4882a593Smuzhiyun 	if (!chip->scan_bbt)
3748*4882a593Smuzhiyun 		chip->scan_bbt = nand_default_bbt;
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun 	if (!chip->controller) {
3751*4882a593Smuzhiyun 		chip->controller = &chip->hwcontrol;
3752*4882a593Smuzhiyun 		spin_lock_init(&chip->controller->lock);
3753*4882a593Smuzhiyun 		init_waitqueue_head(&chip->controller->wq);
3754*4882a593Smuzhiyun 	}
3755*4882a593Smuzhiyun 
3756*4882a593Smuzhiyun 	if (!chip->buf_align)
3757*4882a593Smuzhiyun 		chip->buf_align = 1;
3758*4882a593Smuzhiyun }
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun /* Sanitize ONFI strings so we can safely print them */
sanitize_string(char * s,size_t len)3761*4882a593Smuzhiyun static void sanitize_string(char *s, size_t len)
3762*4882a593Smuzhiyun {
3763*4882a593Smuzhiyun 	ssize_t i;
3764*4882a593Smuzhiyun 
3765*4882a593Smuzhiyun 	/* Null terminate */
3766*4882a593Smuzhiyun 	s[len - 1] = 0;
3767*4882a593Smuzhiyun 
3768*4882a593Smuzhiyun 	/* Remove non printable chars */
3769*4882a593Smuzhiyun 	for (i = 0; i < len - 1; i++) {
3770*4882a593Smuzhiyun 		if (s[i] < ' ' || s[i] > 127)
3771*4882a593Smuzhiyun 			s[i] = '?';
3772*4882a593Smuzhiyun 	}
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 	/* Remove trailing spaces */
3775*4882a593Smuzhiyun 	strim(s);
3776*4882a593Smuzhiyun }
3777*4882a593Smuzhiyun 
onfi_crc16(u16 crc,u8 const * p,size_t len)3778*4882a593Smuzhiyun static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
3779*4882a593Smuzhiyun {
3780*4882a593Smuzhiyun 	int i;
3781*4882a593Smuzhiyun 	while (len--) {
3782*4882a593Smuzhiyun 		crc ^= *p++ << 8;
3783*4882a593Smuzhiyun 		for (i = 0; i < 8; i++)
3784*4882a593Smuzhiyun 			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
3785*4882a593Smuzhiyun 	}
3786*4882a593Smuzhiyun 
3787*4882a593Smuzhiyun 	return crc;
3788*4882a593Smuzhiyun }
3789*4882a593Smuzhiyun 
3790*4882a593Smuzhiyun #ifdef CONFIG_SYS_NAND_ONFI_DETECTION
3791*4882a593Smuzhiyun /* Parse the Extended Parameter Page. */
nand_flash_detect_ext_param_page(struct mtd_info * mtd,struct nand_chip * chip,struct nand_onfi_params * p)3792*4882a593Smuzhiyun static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
3793*4882a593Smuzhiyun 		struct nand_chip *chip, struct nand_onfi_params *p)
3794*4882a593Smuzhiyun {
3795*4882a593Smuzhiyun 	struct onfi_ext_param_page *ep;
3796*4882a593Smuzhiyun 	struct onfi_ext_section *s;
3797*4882a593Smuzhiyun 	struct onfi_ext_ecc_info *ecc;
3798*4882a593Smuzhiyun 	uint8_t *cursor;
3799*4882a593Smuzhiyun 	int ret;
3800*4882a593Smuzhiyun 	int len;
3801*4882a593Smuzhiyun 	int i;
3802*4882a593Smuzhiyun 
3803*4882a593Smuzhiyun 	len = le16_to_cpu(p->ext_param_page_length) * 16;
3804*4882a593Smuzhiyun 	ep = kmalloc(len, GFP_KERNEL);
3805*4882a593Smuzhiyun 	if (!ep)
3806*4882a593Smuzhiyun 		return -ENOMEM;
3807*4882a593Smuzhiyun 
3808*4882a593Smuzhiyun 	/* Send our own NAND_CMD_PARAM. */
3809*4882a593Smuzhiyun 	ret = nand_read_param_page_op(chip, 0, NULL, 0);
3810*4882a593Smuzhiyun 	if (ret)
3811*4882a593Smuzhiyun 		goto ext_out;
3812*4882a593Smuzhiyun 
3813*4882a593Smuzhiyun 	/* Use the Change Read Column command to skip the ONFI param pages. */
3814*4882a593Smuzhiyun 	ret = nand_change_read_column_op(chip,
3815*4882a593Smuzhiyun 					 sizeof(*p) * p->num_of_param_pages,
3816*4882a593Smuzhiyun 					 ep, len, true);
3817*4882a593Smuzhiyun 	if (ret)
3818*4882a593Smuzhiyun 		goto ext_out;
3819*4882a593Smuzhiyun 
3820*4882a593Smuzhiyun 	ret = -EINVAL;
3821*4882a593Smuzhiyun 	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
3822*4882a593Smuzhiyun 		!= le16_to_cpu(ep->crc))) {
3823*4882a593Smuzhiyun 		pr_debug("fail in the CRC.\n");
3824*4882a593Smuzhiyun 		goto ext_out;
3825*4882a593Smuzhiyun 	}
3826*4882a593Smuzhiyun 
3827*4882a593Smuzhiyun 	/*
3828*4882a593Smuzhiyun 	 * Check the signature.
3829*4882a593Smuzhiyun 	 * Do not strictly follow the ONFI spec, maybe changed in future.
3830*4882a593Smuzhiyun 	 */
3831*4882a593Smuzhiyun 	if (strncmp((char *)ep->sig, "EPPS", 4)) {
3832*4882a593Smuzhiyun 		pr_debug("The signature is invalid.\n");
3833*4882a593Smuzhiyun 		goto ext_out;
3834*4882a593Smuzhiyun 	}
3835*4882a593Smuzhiyun 
3836*4882a593Smuzhiyun 	/* find the ECC section. */
3837*4882a593Smuzhiyun 	cursor = (uint8_t *)(ep + 1);
3838*4882a593Smuzhiyun 	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
3839*4882a593Smuzhiyun 		s = ep->sections + i;
3840*4882a593Smuzhiyun 		if (s->type == ONFI_SECTION_TYPE_2)
3841*4882a593Smuzhiyun 			break;
3842*4882a593Smuzhiyun 		cursor += s->length * 16;
3843*4882a593Smuzhiyun 	}
3844*4882a593Smuzhiyun 	if (i == ONFI_EXT_SECTION_MAX) {
3845*4882a593Smuzhiyun 		pr_debug("We can not find the ECC section.\n");
3846*4882a593Smuzhiyun 		goto ext_out;
3847*4882a593Smuzhiyun 	}
3848*4882a593Smuzhiyun 
3849*4882a593Smuzhiyun 	/* get the info we want. */
3850*4882a593Smuzhiyun 	ecc = (struct onfi_ext_ecc_info *)cursor;
3851*4882a593Smuzhiyun 
3852*4882a593Smuzhiyun 	if (!ecc->codeword_size) {
3853*4882a593Smuzhiyun 		pr_debug("Invalid codeword size\n");
3854*4882a593Smuzhiyun 		goto ext_out;
3855*4882a593Smuzhiyun 	}
3856*4882a593Smuzhiyun 
3857*4882a593Smuzhiyun 	chip->ecc_strength_ds = ecc->ecc_bits;
3858*4882a593Smuzhiyun 	chip->ecc_step_ds = 1 << ecc->codeword_size;
3859*4882a593Smuzhiyun 	ret = 0;
3860*4882a593Smuzhiyun 
3861*4882a593Smuzhiyun ext_out:
3862*4882a593Smuzhiyun 	kfree(ep);
3863*4882a593Smuzhiyun 	return ret;
3864*4882a593Smuzhiyun }
3865*4882a593Smuzhiyun 
nand_setup_read_retry_micron(struct mtd_info * mtd,int retry_mode)3866*4882a593Smuzhiyun static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
3867*4882a593Smuzhiyun {
3868*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
3869*4882a593Smuzhiyun 	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun 	return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
3872*4882a593Smuzhiyun 			feature);
3873*4882a593Smuzhiyun }
3874*4882a593Smuzhiyun 
3875*4882a593Smuzhiyun /*
3876*4882a593Smuzhiyun  * Configure chip properties from Micron vendor-specific ONFI table
3877*4882a593Smuzhiyun  */
nand_onfi_detect_micron(struct nand_chip * chip,struct nand_onfi_params * p)3878*4882a593Smuzhiyun static void nand_onfi_detect_micron(struct nand_chip *chip,
3879*4882a593Smuzhiyun 		struct nand_onfi_params *p)
3880*4882a593Smuzhiyun {
3881*4882a593Smuzhiyun 	struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
3882*4882a593Smuzhiyun 
3883*4882a593Smuzhiyun 	if (le16_to_cpu(p->vendor_revision) < 1)
3884*4882a593Smuzhiyun 		return;
3885*4882a593Smuzhiyun 
3886*4882a593Smuzhiyun 	chip->read_retries = micron->read_retry_options;
3887*4882a593Smuzhiyun 	chip->setup_read_retry = nand_setup_read_retry_micron;
3888*4882a593Smuzhiyun }
3889*4882a593Smuzhiyun 
3890*4882a593Smuzhiyun /*
3891*4882a593Smuzhiyun  * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
3892*4882a593Smuzhiyun  */
nand_flash_detect_onfi(struct mtd_info * mtd,struct nand_chip * chip,int * busw)3893*4882a593Smuzhiyun static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3894*4882a593Smuzhiyun 					int *busw)
3895*4882a593Smuzhiyun {
3896*4882a593Smuzhiyun 	struct nand_onfi_params *p = &chip->onfi_params;
3897*4882a593Smuzhiyun 	char id[4];
3898*4882a593Smuzhiyun 	int i, ret, val;
3899*4882a593Smuzhiyun 
3900*4882a593Smuzhiyun 	/* Try ONFI for unknown chip or LP */
3901*4882a593Smuzhiyun 	ret = nand_readid_op(chip, 0x20, id, sizeof(id));
3902*4882a593Smuzhiyun 	if (ret || strncmp(id, "ONFI", 4))
3903*4882a593Smuzhiyun 		return 0;
3904*4882a593Smuzhiyun 
3905*4882a593Smuzhiyun 	ret = nand_read_param_page_op(chip, 0, NULL, 0);
3906*4882a593Smuzhiyun 	if (ret)
3907*4882a593Smuzhiyun 		return 0;
3908*4882a593Smuzhiyun 
3909*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
3910*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, sizeof(*p), true);
3911*4882a593Smuzhiyun 		if (ret)
3912*4882a593Smuzhiyun 			return 0;
3913*4882a593Smuzhiyun 
3914*4882a593Smuzhiyun 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
3915*4882a593Smuzhiyun 				le16_to_cpu(p->crc)) {
3916*4882a593Smuzhiyun 			break;
3917*4882a593Smuzhiyun 		}
3918*4882a593Smuzhiyun 	}
3919*4882a593Smuzhiyun 
3920*4882a593Smuzhiyun 	if (i == 3) {
3921*4882a593Smuzhiyun 		pr_err("Could not find valid ONFI parameter page; aborting\n");
3922*4882a593Smuzhiyun 		return 0;
3923*4882a593Smuzhiyun 	}
3924*4882a593Smuzhiyun 
3925*4882a593Smuzhiyun 	/* Check version */
3926*4882a593Smuzhiyun 	val = le16_to_cpu(p->revision);
3927*4882a593Smuzhiyun 	if (val & (1 << 5))
3928*4882a593Smuzhiyun 		chip->onfi_version = 23;
3929*4882a593Smuzhiyun 	else if (val & (1 << 4))
3930*4882a593Smuzhiyun 		chip->onfi_version = 22;
3931*4882a593Smuzhiyun 	else if (val & (1 << 3))
3932*4882a593Smuzhiyun 		chip->onfi_version = 21;
3933*4882a593Smuzhiyun 	else if (val & (1 << 2))
3934*4882a593Smuzhiyun 		chip->onfi_version = 20;
3935*4882a593Smuzhiyun 	else if (val & (1 << 1))
3936*4882a593Smuzhiyun 		chip->onfi_version = 10;
3937*4882a593Smuzhiyun 
3938*4882a593Smuzhiyun 	if (!chip->onfi_version) {
3939*4882a593Smuzhiyun 		pr_info("unsupported ONFI version: %d\n", val);
3940*4882a593Smuzhiyun 		return 0;
3941*4882a593Smuzhiyun 	}
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
3944*4882a593Smuzhiyun 	sanitize_string(p->model, sizeof(p->model));
3945*4882a593Smuzhiyun 	if (!mtd->name)
3946*4882a593Smuzhiyun 		mtd->name = p->model;
3947*4882a593Smuzhiyun 
3948*4882a593Smuzhiyun 	mtd->writesize = le32_to_cpu(p->byte_per_page);
3949*4882a593Smuzhiyun 
3950*4882a593Smuzhiyun 	/*
3951*4882a593Smuzhiyun 	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
3952*4882a593Smuzhiyun 	 * (don't ask me who thought of this...). MTD assumes that these
3953*4882a593Smuzhiyun 	 * dimensions will be power-of-2, so just truncate the remaining area.
3954*4882a593Smuzhiyun 	 */
3955*4882a593Smuzhiyun 	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
3956*4882a593Smuzhiyun 	mtd->erasesize *= mtd->writesize;
3957*4882a593Smuzhiyun 
3958*4882a593Smuzhiyun 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
3959*4882a593Smuzhiyun 
3960*4882a593Smuzhiyun 	/* See erasesize comment */
3961*4882a593Smuzhiyun 	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
3962*4882a593Smuzhiyun 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
3963*4882a593Smuzhiyun 	chip->bits_per_cell = p->bits_per_cell;
3964*4882a593Smuzhiyun 
3965*4882a593Smuzhiyun 	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
3966*4882a593Smuzhiyun 		*busw = NAND_BUSWIDTH_16;
3967*4882a593Smuzhiyun 	else
3968*4882a593Smuzhiyun 		*busw = 0;
3969*4882a593Smuzhiyun 
3970*4882a593Smuzhiyun 	if (p->ecc_bits != 0xff) {
3971*4882a593Smuzhiyun 		chip->ecc_strength_ds = p->ecc_bits;
3972*4882a593Smuzhiyun 		chip->ecc_step_ds = 512;
3973*4882a593Smuzhiyun 	} else if (chip->onfi_version >= 21 &&
3974*4882a593Smuzhiyun 		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
3975*4882a593Smuzhiyun 
3976*4882a593Smuzhiyun 		/*
3977*4882a593Smuzhiyun 		 * The nand_flash_detect_ext_param_page() uses the
3978*4882a593Smuzhiyun 		 * Change Read Column command which maybe not supported
3979*4882a593Smuzhiyun 		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
3980*4882a593Smuzhiyun 		 * now. We do not replace user supplied command function.
3981*4882a593Smuzhiyun 		 */
3982*4882a593Smuzhiyun 		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3983*4882a593Smuzhiyun 			chip->cmdfunc = nand_command_lp;
3984*4882a593Smuzhiyun 
3985*4882a593Smuzhiyun 		/* The Extended Parameter Page is supported since ONFI 2.1. */
3986*4882a593Smuzhiyun 		if (nand_flash_detect_ext_param_page(mtd, chip, p))
3987*4882a593Smuzhiyun 			pr_warn("Failed to detect ONFI extended param page\n");
3988*4882a593Smuzhiyun 	} else {
3989*4882a593Smuzhiyun 		pr_warn("Could not retrieve ONFI ECC requirements\n");
3990*4882a593Smuzhiyun 	}
3991*4882a593Smuzhiyun 
3992*4882a593Smuzhiyun 	if (p->jedec_id == NAND_MFR_MICRON)
3993*4882a593Smuzhiyun 		nand_onfi_detect_micron(chip, p);
3994*4882a593Smuzhiyun 
3995*4882a593Smuzhiyun 	return 1;
3996*4882a593Smuzhiyun }
3997*4882a593Smuzhiyun #else
nand_flash_detect_onfi(struct mtd_info * mtd,struct nand_chip * chip,int * busw)3998*4882a593Smuzhiyun static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
3999*4882a593Smuzhiyun 					int *busw)
4000*4882a593Smuzhiyun {
4001*4882a593Smuzhiyun 	return 0;
4002*4882a593Smuzhiyun }
4003*4882a593Smuzhiyun #endif
4004*4882a593Smuzhiyun 
4005*4882a593Smuzhiyun /*
4006*4882a593Smuzhiyun  * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
4007*4882a593Smuzhiyun  */
nand_flash_detect_jedec(struct mtd_info * mtd,struct nand_chip * chip,int * busw)4008*4882a593Smuzhiyun static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
4009*4882a593Smuzhiyun 					int *busw)
4010*4882a593Smuzhiyun {
4011*4882a593Smuzhiyun 	struct nand_jedec_params *p = &chip->jedec_params;
4012*4882a593Smuzhiyun 	struct jedec_ecc_info *ecc;
4013*4882a593Smuzhiyun 	char id[5];
4014*4882a593Smuzhiyun 	int i, val, ret;
4015*4882a593Smuzhiyun 
4016*4882a593Smuzhiyun 	/* Try JEDEC for unknown chip or LP */
4017*4882a593Smuzhiyun 	ret = nand_readid_op(chip, 0x40, id, sizeof(id));
4018*4882a593Smuzhiyun 	if (ret || strncmp(id, "JEDEC", sizeof(id)))
4019*4882a593Smuzhiyun 		return 0;
4020*4882a593Smuzhiyun 
4021*4882a593Smuzhiyun 	ret = nand_read_param_page_op(chip, 0x40, NULL, 0);
4022*4882a593Smuzhiyun 	if (ret)
4023*4882a593Smuzhiyun 		return 0;
4024*4882a593Smuzhiyun 
4025*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
4026*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, sizeof(*p), true);
4027*4882a593Smuzhiyun 		if (ret)
4028*4882a593Smuzhiyun 			return 0;
4029*4882a593Smuzhiyun 
4030*4882a593Smuzhiyun 		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
4031*4882a593Smuzhiyun 				le16_to_cpu(p->crc))
4032*4882a593Smuzhiyun 			break;
4033*4882a593Smuzhiyun 	}
4034*4882a593Smuzhiyun 
4035*4882a593Smuzhiyun 	if (i == 3) {
4036*4882a593Smuzhiyun 		pr_err("Could not find valid JEDEC parameter page; aborting\n");
4037*4882a593Smuzhiyun 		return 0;
4038*4882a593Smuzhiyun 	}
4039*4882a593Smuzhiyun 
4040*4882a593Smuzhiyun 	/* Check version */
4041*4882a593Smuzhiyun 	val = le16_to_cpu(p->revision);
4042*4882a593Smuzhiyun 	if (val & (1 << 2))
4043*4882a593Smuzhiyun 		chip->jedec_version = 10;
4044*4882a593Smuzhiyun 	else if (val & (1 << 1))
4045*4882a593Smuzhiyun 		chip->jedec_version = 1; /* vendor specific version */
4046*4882a593Smuzhiyun 
4047*4882a593Smuzhiyun 	if (!chip->jedec_version) {
4048*4882a593Smuzhiyun 		pr_info("unsupported JEDEC version: %d\n", val);
4049*4882a593Smuzhiyun 		return 0;
4050*4882a593Smuzhiyun 	}
4051*4882a593Smuzhiyun 
4052*4882a593Smuzhiyun 	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
4053*4882a593Smuzhiyun 	sanitize_string(p->model, sizeof(p->model));
4054*4882a593Smuzhiyun 	if (!mtd->name)
4055*4882a593Smuzhiyun 		mtd->name = p->model;
4056*4882a593Smuzhiyun 
4057*4882a593Smuzhiyun 	mtd->writesize = le32_to_cpu(p->byte_per_page);
4058*4882a593Smuzhiyun 
4059*4882a593Smuzhiyun 	/* Please reference to the comment for nand_flash_detect_onfi. */
4060*4882a593Smuzhiyun 	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
4061*4882a593Smuzhiyun 	mtd->erasesize *= mtd->writesize;
4062*4882a593Smuzhiyun 
4063*4882a593Smuzhiyun 	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
4064*4882a593Smuzhiyun 
4065*4882a593Smuzhiyun 	/* Please reference to the comment for nand_flash_detect_onfi. */
4066*4882a593Smuzhiyun 	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
4067*4882a593Smuzhiyun 	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
4068*4882a593Smuzhiyun 	chip->bits_per_cell = p->bits_per_cell;
4069*4882a593Smuzhiyun 
4070*4882a593Smuzhiyun 	if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
4071*4882a593Smuzhiyun 		*busw = NAND_BUSWIDTH_16;
4072*4882a593Smuzhiyun 	else
4073*4882a593Smuzhiyun 		*busw = 0;
4074*4882a593Smuzhiyun 
4075*4882a593Smuzhiyun 	/* ECC info */
4076*4882a593Smuzhiyun 	ecc = &p->ecc_info[0];
4077*4882a593Smuzhiyun 
4078*4882a593Smuzhiyun 	if (ecc->codeword_size >= 9) {
4079*4882a593Smuzhiyun 		chip->ecc_strength_ds = ecc->ecc_bits;
4080*4882a593Smuzhiyun 		chip->ecc_step_ds = 1 << ecc->codeword_size;
4081*4882a593Smuzhiyun 	} else {
4082*4882a593Smuzhiyun 		pr_warn("Invalid codeword size\n");
4083*4882a593Smuzhiyun 	}
4084*4882a593Smuzhiyun 
4085*4882a593Smuzhiyun 	return 1;
4086*4882a593Smuzhiyun }
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun /*
4089*4882a593Smuzhiyun  * nand_id_has_period - Check if an ID string has a given wraparound period
4090*4882a593Smuzhiyun  * @id_data: the ID string
4091*4882a593Smuzhiyun  * @arrlen: the length of the @id_data array
4092*4882a593Smuzhiyun  * @period: the period of repitition
4093*4882a593Smuzhiyun  *
4094*4882a593Smuzhiyun  * Check if an ID string is repeated within a given sequence of bytes at
4095*4882a593Smuzhiyun  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4096*4882a593Smuzhiyun  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4097*4882a593Smuzhiyun  * if the repetition has a period of @period; otherwise, returns zero.
4098*4882a593Smuzhiyun  */
nand_id_has_period(u8 * id_data,int arrlen,int period)4099*4882a593Smuzhiyun static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4100*4882a593Smuzhiyun {
4101*4882a593Smuzhiyun 	int i, j;
4102*4882a593Smuzhiyun 	for (i = 0; i < period; i++)
4103*4882a593Smuzhiyun 		for (j = i + period; j < arrlen; j += period)
4104*4882a593Smuzhiyun 			if (id_data[i] != id_data[j])
4105*4882a593Smuzhiyun 				return 0;
4106*4882a593Smuzhiyun 	return 1;
4107*4882a593Smuzhiyun }
4108*4882a593Smuzhiyun 
4109*4882a593Smuzhiyun /*
4110*4882a593Smuzhiyun  * nand_id_len - Get the length of an ID string returned by CMD_READID
4111*4882a593Smuzhiyun  * @id_data: the ID string
4112*4882a593Smuzhiyun  * @arrlen: the length of the @id_data array
4113*4882a593Smuzhiyun 
4114*4882a593Smuzhiyun  * Returns the length of the ID string, according to known wraparound/trailing
4115*4882a593Smuzhiyun  * zero patterns. If no pattern exists, returns the length of the array.
4116*4882a593Smuzhiyun  */
nand_id_len(u8 * id_data,int arrlen)4117*4882a593Smuzhiyun static int nand_id_len(u8 *id_data, int arrlen)
4118*4882a593Smuzhiyun {
4119*4882a593Smuzhiyun 	int last_nonzero, period;
4120*4882a593Smuzhiyun 
4121*4882a593Smuzhiyun 	/* Find last non-zero byte */
4122*4882a593Smuzhiyun 	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4123*4882a593Smuzhiyun 		if (id_data[last_nonzero])
4124*4882a593Smuzhiyun 			break;
4125*4882a593Smuzhiyun 
4126*4882a593Smuzhiyun 	/* All zeros */
4127*4882a593Smuzhiyun 	if (last_nonzero < 0)
4128*4882a593Smuzhiyun 		return 0;
4129*4882a593Smuzhiyun 
4130*4882a593Smuzhiyun 	/* Calculate wraparound period */
4131*4882a593Smuzhiyun 	for (period = 1; period < arrlen; period++)
4132*4882a593Smuzhiyun 		if (nand_id_has_period(id_data, arrlen, period))
4133*4882a593Smuzhiyun 			break;
4134*4882a593Smuzhiyun 
4135*4882a593Smuzhiyun 	/* There's a repeated pattern */
4136*4882a593Smuzhiyun 	if (period < arrlen)
4137*4882a593Smuzhiyun 		return period;
4138*4882a593Smuzhiyun 
4139*4882a593Smuzhiyun 	/* There are trailing zeros */
4140*4882a593Smuzhiyun 	if (last_nonzero < arrlen - 1)
4141*4882a593Smuzhiyun 		return last_nonzero + 1;
4142*4882a593Smuzhiyun 
4143*4882a593Smuzhiyun 	/* No pattern detected */
4144*4882a593Smuzhiyun 	return arrlen;
4145*4882a593Smuzhiyun }
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun /* Extract the bits of per cell from the 3rd byte of the extended ID */
nand_get_bits_per_cell(u8 cellinfo)4148*4882a593Smuzhiyun static int nand_get_bits_per_cell(u8 cellinfo)
4149*4882a593Smuzhiyun {
4150*4882a593Smuzhiyun 	int bits;
4151*4882a593Smuzhiyun 
4152*4882a593Smuzhiyun 	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4153*4882a593Smuzhiyun 	bits >>= NAND_CI_CELLTYPE_SHIFT;
4154*4882a593Smuzhiyun 	return bits + 1;
4155*4882a593Smuzhiyun }
4156*4882a593Smuzhiyun 
4157*4882a593Smuzhiyun /*
4158*4882a593Smuzhiyun  * Many new NAND share similar device ID codes, which represent the size of the
4159*4882a593Smuzhiyun  * chip. The rest of the parameters must be decoded according to generic or
4160*4882a593Smuzhiyun  * manufacturer-specific "extended ID" decoding patterns.
4161*4882a593Smuzhiyun  */
nand_decode_ext_id(struct mtd_info * mtd,struct nand_chip * chip,u8 id_data[8],int * busw)4162*4882a593Smuzhiyun static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
4163*4882a593Smuzhiyun 				u8 id_data[8], int *busw)
4164*4882a593Smuzhiyun {
4165*4882a593Smuzhiyun 	int extid, id_len;
4166*4882a593Smuzhiyun 	/* The 3rd id byte holds MLC / multichip data */
4167*4882a593Smuzhiyun 	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4168*4882a593Smuzhiyun 	/* The 4th id byte is the important one */
4169*4882a593Smuzhiyun 	extid = id_data[3];
4170*4882a593Smuzhiyun 
4171*4882a593Smuzhiyun 	id_len = nand_id_len(id_data, 8);
4172*4882a593Smuzhiyun 
4173*4882a593Smuzhiyun 	/*
4174*4882a593Smuzhiyun 	 * Field definitions are in the following datasheets:
4175*4882a593Smuzhiyun 	 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
4176*4882a593Smuzhiyun 	 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
4177*4882a593Smuzhiyun 	 * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22)
4178*4882a593Smuzhiyun 	 *
4179*4882a593Smuzhiyun 	 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
4180*4882a593Smuzhiyun 	 * ID to decide what to do.
4181*4882a593Smuzhiyun 	 */
4182*4882a593Smuzhiyun 	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
4183*4882a593Smuzhiyun 			!nand_is_slc(chip) && id_data[5] != 0x00) {
4184*4882a593Smuzhiyun 		/* Calc pagesize */
4185*4882a593Smuzhiyun 		mtd->writesize = 2048 << (extid & 0x03);
4186*4882a593Smuzhiyun 		extid >>= 2;
4187*4882a593Smuzhiyun 		/* Calc oobsize */
4188*4882a593Smuzhiyun 		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
4189*4882a593Smuzhiyun 		case 1:
4190*4882a593Smuzhiyun 			mtd->oobsize = 128;
4191*4882a593Smuzhiyun 			break;
4192*4882a593Smuzhiyun 		case 2:
4193*4882a593Smuzhiyun 			mtd->oobsize = 218;
4194*4882a593Smuzhiyun 			break;
4195*4882a593Smuzhiyun 		case 3:
4196*4882a593Smuzhiyun 			mtd->oobsize = 400;
4197*4882a593Smuzhiyun 			break;
4198*4882a593Smuzhiyun 		case 4:
4199*4882a593Smuzhiyun 			mtd->oobsize = 436;
4200*4882a593Smuzhiyun 			break;
4201*4882a593Smuzhiyun 		case 5:
4202*4882a593Smuzhiyun 			mtd->oobsize = 512;
4203*4882a593Smuzhiyun 			break;
4204*4882a593Smuzhiyun 		case 6:
4205*4882a593Smuzhiyun 			mtd->oobsize = 640;
4206*4882a593Smuzhiyun 			break;
4207*4882a593Smuzhiyun 		case 7:
4208*4882a593Smuzhiyun 		default: /* Other cases are "reserved" (unknown) */
4209*4882a593Smuzhiyun 			mtd->oobsize = 1024;
4210*4882a593Smuzhiyun 			break;
4211*4882a593Smuzhiyun 		}
4212*4882a593Smuzhiyun 		extid >>= 2;
4213*4882a593Smuzhiyun 		/* Calc blocksize */
4214*4882a593Smuzhiyun 		mtd->erasesize = (128 * 1024) <<
4215*4882a593Smuzhiyun 			(((extid >> 1) & 0x04) | (extid & 0x03));
4216*4882a593Smuzhiyun 		*busw = 0;
4217*4882a593Smuzhiyun 	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
4218*4882a593Smuzhiyun 			!nand_is_slc(chip)) {
4219*4882a593Smuzhiyun 		unsigned int tmp;
4220*4882a593Smuzhiyun 
4221*4882a593Smuzhiyun 		/* Calc pagesize */
4222*4882a593Smuzhiyun 		mtd->writesize = 2048 << (extid & 0x03);
4223*4882a593Smuzhiyun 		extid >>= 2;
4224*4882a593Smuzhiyun 		/* Calc oobsize */
4225*4882a593Smuzhiyun 		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
4226*4882a593Smuzhiyun 		case 0:
4227*4882a593Smuzhiyun 			mtd->oobsize = 128;
4228*4882a593Smuzhiyun 			break;
4229*4882a593Smuzhiyun 		case 1:
4230*4882a593Smuzhiyun 			mtd->oobsize = 224;
4231*4882a593Smuzhiyun 			break;
4232*4882a593Smuzhiyun 		case 2:
4233*4882a593Smuzhiyun 			mtd->oobsize = 448;
4234*4882a593Smuzhiyun 			break;
4235*4882a593Smuzhiyun 		case 3:
4236*4882a593Smuzhiyun 			mtd->oobsize = 64;
4237*4882a593Smuzhiyun 			break;
4238*4882a593Smuzhiyun 		case 4:
4239*4882a593Smuzhiyun 			mtd->oobsize = 32;
4240*4882a593Smuzhiyun 			break;
4241*4882a593Smuzhiyun 		case 5:
4242*4882a593Smuzhiyun 			mtd->oobsize = 16;
4243*4882a593Smuzhiyun 			break;
4244*4882a593Smuzhiyun 		default:
4245*4882a593Smuzhiyun 			mtd->oobsize = 640;
4246*4882a593Smuzhiyun 			break;
4247*4882a593Smuzhiyun 		}
4248*4882a593Smuzhiyun 		extid >>= 2;
4249*4882a593Smuzhiyun 		/* Calc blocksize */
4250*4882a593Smuzhiyun 		tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
4251*4882a593Smuzhiyun 		if (tmp < 0x03)
4252*4882a593Smuzhiyun 			mtd->erasesize = (128 * 1024) << tmp;
4253*4882a593Smuzhiyun 		else if (tmp == 0x03)
4254*4882a593Smuzhiyun 			mtd->erasesize = 768 * 1024;
4255*4882a593Smuzhiyun 		else
4256*4882a593Smuzhiyun 			mtd->erasesize = (64 * 1024) << tmp;
4257*4882a593Smuzhiyun 		*busw = 0;
4258*4882a593Smuzhiyun 	} else {
4259*4882a593Smuzhiyun 		/* Calc pagesize */
4260*4882a593Smuzhiyun 		mtd->writesize = 1024 << (extid & 0x03);
4261*4882a593Smuzhiyun 		extid >>= 2;
4262*4882a593Smuzhiyun 		/* Calc oobsize */
4263*4882a593Smuzhiyun 		mtd->oobsize = (8 << (extid & 0x01)) *
4264*4882a593Smuzhiyun 			(mtd->writesize >> 9);
4265*4882a593Smuzhiyun 		extid >>= 2;
4266*4882a593Smuzhiyun 		/* Calc blocksize. Blocksize is multiples of 64KiB */
4267*4882a593Smuzhiyun 		mtd->erasesize = (64 * 1024) << (extid & 0x03);
4268*4882a593Smuzhiyun 		extid >>= 2;
4269*4882a593Smuzhiyun 		/* Get buswidth information */
4270*4882a593Smuzhiyun 		*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
4271*4882a593Smuzhiyun 
4272*4882a593Smuzhiyun 		/*
4273*4882a593Smuzhiyun 		 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
4274*4882a593Smuzhiyun 		 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
4275*4882a593Smuzhiyun 		 * follows:
4276*4882a593Smuzhiyun 		 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
4277*4882a593Smuzhiyun 		 *                         110b -> 24nm
4278*4882a593Smuzhiyun 		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
4279*4882a593Smuzhiyun 		 */
4280*4882a593Smuzhiyun 		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
4281*4882a593Smuzhiyun 				nand_is_slc(chip) &&
4282*4882a593Smuzhiyun 				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
4283*4882a593Smuzhiyun 				!(id_data[4] & 0x80) /* !BENAND */) {
4284*4882a593Smuzhiyun 			mtd->oobsize = 32 * mtd->writesize >> 9;
4285*4882a593Smuzhiyun 		}
4286*4882a593Smuzhiyun 
4287*4882a593Smuzhiyun 	}
4288*4882a593Smuzhiyun }
4289*4882a593Smuzhiyun 
4290*4882a593Smuzhiyun /*
4291*4882a593Smuzhiyun  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4292*4882a593Smuzhiyun  * decodes a matching ID table entry and assigns the MTD size parameters for
4293*4882a593Smuzhiyun  * the chip.
4294*4882a593Smuzhiyun  */
nand_decode_id(struct mtd_info * mtd,struct nand_chip * chip,struct nand_flash_dev * type,u8 id_data[8],int * busw)4295*4882a593Smuzhiyun static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
4296*4882a593Smuzhiyun 				struct nand_flash_dev *type, u8 id_data[8],
4297*4882a593Smuzhiyun 				int *busw)
4298*4882a593Smuzhiyun {
4299*4882a593Smuzhiyun 	int maf_id = id_data[0];
4300*4882a593Smuzhiyun 
4301*4882a593Smuzhiyun 	mtd->erasesize = type->erasesize;
4302*4882a593Smuzhiyun 	mtd->writesize = type->pagesize;
4303*4882a593Smuzhiyun 	mtd->oobsize = mtd->writesize / 32;
4304*4882a593Smuzhiyun 	*busw = type->options & NAND_BUSWIDTH_16;
4305*4882a593Smuzhiyun 
4306*4882a593Smuzhiyun 	/* All legacy ID NAND are small-page, SLC */
4307*4882a593Smuzhiyun 	chip->bits_per_cell = 1;
4308*4882a593Smuzhiyun 
4309*4882a593Smuzhiyun 	/*
4310*4882a593Smuzhiyun 	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
4311*4882a593Smuzhiyun 	 * some Spansion chips have erasesize that conflicts with size
4312*4882a593Smuzhiyun 	 * listed in nand_ids table.
4313*4882a593Smuzhiyun 	 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
4314*4882a593Smuzhiyun 	 */
4315*4882a593Smuzhiyun 	if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
4316*4882a593Smuzhiyun 			&& id_data[6] == 0x00 && id_data[7] == 0x00
4317*4882a593Smuzhiyun 			&& mtd->writesize == 512) {
4318*4882a593Smuzhiyun 		mtd->erasesize = 128 * 1024;
4319*4882a593Smuzhiyun 		mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
4320*4882a593Smuzhiyun 	}
4321*4882a593Smuzhiyun }
4322*4882a593Smuzhiyun 
4323*4882a593Smuzhiyun /*
4324*4882a593Smuzhiyun  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4325*4882a593Smuzhiyun  * heuristic patterns using various detected parameters (e.g., manufacturer,
4326*4882a593Smuzhiyun  * page size, cell-type information).
4327*4882a593Smuzhiyun  */
nand_decode_bbm_options(struct mtd_info * mtd,struct nand_chip * chip,u8 id_data[8])4328*4882a593Smuzhiyun static void nand_decode_bbm_options(struct mtd_info *mtd,
4329*4882a593Smuzhiyun 				    struct nand_chip *chip, u8 id_data[8])
4330*4882a593Smuzhiyun {
4331*4882a593Smuzhiyun 	int maf_id = id_data[0];
4332*4882a593Smuzhiyun 
4333*4882a593Smuzhiyun 	/* Set the bad block position */
4334*4882a593Smuzhiyun 	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4335*4882a593Smuzhiyun 		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
4336*4882a593Smuzhiyun 	else
4337*4882a593Smuzhiyun 		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
4338*4882a593Smuzhiyun 
4339*4882a593Smuzhiyun 	/*
4340*4882a593Smuzhiyun 	 * Bad block marker is stored in the last page of each block on Samsung
4341*4882a593Smuzhiyun 	 * and Hynix MLC devices; stored in first two pages of each block on
4342*4882a593Smuzhiyun 	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
4343*4882a593Smuzhiyun 	 * AMD/Spansion, and Macronix.  All others scan only the first page.
4344*4882a593Smuzhiyun 	 */
4345*4882a593Smuzhiyun 	if (!nand_is_slc(chip) &&
4346*4882a593Smuzhiyun 			(maf_id == NAND_MFR_SAMSUNG ||
4347*4882a593Smuzhiyun 			 maf_id == NAND_MFR_HYNIX))
4348*4882a593Smuzhiyun 		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
4349*4882a593Smuzhiyun 	else if ((nand_is_slc(chip) &&
4350*4882a593Smuzhiyun 				(maf_id == NAND_MFR_SAMSUNG ||
4351*4882a593Smuzhiyun 				 maf_id == NAND_MFR_HYNIX ||
4352*4882a593Smuzhiyun 				 maf_id == NAND_MFR_TOSHIBA ||
4353*4882a593Smuzhiyun 				 maf_id == NAND_MFR_AMD ||
4354*4882a593Smuzhiyun 				 maf_id == NAND_MFR_MACRONIX)) ||
4355*4882a593Smuzhiyun 			(mtd->writesize == 2048 &&
4356*4882a593Smuzhiyun 			 maf_id == NAND_MFR_MICRON))
4357*4882a593Smuzhiyun 		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
4358*4882a593Smuzhiyun }
4359*4882a593Smuzhiyun 
is_full_id_nand(struct nand_flash_dev * type)4360*4882a593Smuzhiyun static inline bool is_full_id_nand(struct nand_flash_dev *type)
4361*4882a593Smuzhiyun {
4362*4882a593Smuzhiyun 	return type->id_len;
4363*4882a593Smuzhiyun }
4364*4882a593Smuzhiyun 
find_full_id_nand(struct mtd_info * mtd,struct nand_chip * chip,struct nand_flash_dev * type,u8 * id_data,int * busw)4365*4882a593Smuzhiyun static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
4366*4882a593Smuzhiyun 		   struct nand_flash_dev *type, u8 *id_data, int *busw)
4367*4882a593Smuzhiyun {
4368*4882a593Smuzhiyun 	if (!strncmp((char *)type->id, (char *)id_data, type->id_len)) {
4369*4882a593Smuzhiyun 		mtd->writesize = type->pagesize;
4370*4882a593Smuzhiyun 		mtd->erasesize = type->erasesize;
4371*4882a593Smuzhiyun 		mtd->oobsize = type->oobsize;
4372*4882a593Smuzhiyun 
4373*4882a593Smuzhiyun 		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4374*4882a593Smuzhiyun 		chip->chipsize = (uint64_t)type->chipsize << 20;
4375*4882a593Smuzhiyun 		chip->options |= type->options;
4376*4882a593Smuzhiyun 		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
4377*4882a593Smuzhiyun 		chip->ecc_step_ds = NAND_ECC_STEP(type);
4378*4882a593Smuzhiyun 		chip->onfi_timing_mode_default =
4379*4882a593Smuzhiyun 					type->onfi_timing_mode_default;
4380*4882a593Smuzhiyun 
4381*4882a593Smuzhiyun 		*busw = type->options & NAND_BUSWIDTH_16;
4382*4882a593Smuzhiyun 
4383*4882a593Smuzhiyun 		if (!mtd->name)
4384*4882a593Smuzhiyun 			mtd->name = type->name;
4385*4882a593Smuzhiyun 
4386*4882a593Smuzhiyun 		return true;
4387*4882a593Smuzhiyun 	}
4388*4882a593Smuzhiyun 	return false;
4389*4882a593Smuzhiyun }
4390*4882a593Smuzhiyun 
4391*4882a593Smuzhiyun /*
4392*4882a593Smuzhiyun  * Get the flash and manufacturer id and lookup if the type is supported.
4393*4882a593Smuzhiyun  */
nand_get_flash_type(struct mtd_info * mtd,struct nand_chip * chip,int * maf_id,int * dev_id,struct nand_flash_dev * type)4394*4882a593Smuzhiyun struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
4395*4882a593Smuzhiyun 						  struct nand_chip *chip,
4396*4882a593Smuzhiyun 						  int *maf_id, int *dev_id,
4397*4882a593Smuzhiyun 						  struct nand_flash_dev *type)
4398*4882a593Smuzhiyun {
4399*4882a593Smuzhiyun 	int busw, ret;
4400*4882a593Smuzhiyun 	int maf_idx;
4401*4882a593Smuzhiyun 	u8 id_data[8];
4402*4882a593Smuzhiyun 
4403*4882a593Smuzhiyun 	/*
4404*4882a593Smuzhiyun 	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4405*4882a593Smuzhiyun 	 * after power-up.
4406*4882a593Smuzhiyun 	 */
4407*4882a593Smuzhiyun 	ret = nand_reset(chip, 0);
4408*4882a593Smuzhiyun 	if (ret)
4409*4882a593Smuzhiyun 		return ERR_PTR(ret);
4410*4882a593Smuzhiyun 
4411*4882a593Smuzhiyun 	/* Select the device */
4412*4882a593Smuzhiyun 	chip->select_chip(mtd, 0);
4413*4882a593Smuzhiyun 
4414*4882a593Smuzhiyun 	/* Send the command for reading device ID */
4415*4882a593Smuzhiyun 	ret = nand_readid_op(chip, 0, id_data, 2);
4416*4882a593Smuzhiyun 	if (ret)
4417*4882a593Smuzhiyun 		return ERR_PTR(ret);
4418*4882a593Smuzhiyun 
4419*4882a593Smuzhiyun 	/* Read manufacturer and device IDs */
4420*4882a593Smuzhiyun 	*maf_id = id_data[0];
4421*4882a593Smuzhiyun 	*dev_id = id_data[1];
4422*4882a593Smuzhiyun 
4423*4882a593Smuzhiyun 	/*
4424*4882a593Smuzhiyun 	 * Try again to make sure, as some systems the bus-hold or other
4425*4882a593Smuzhiyun 	 * interface concerns can cause random data which looks like a
4426*4882a593Smuzhiyun 	 * possibly credible NAND flash to appear. If the two results do
4427*4882a593Smuzhiyun 	 * not match, ignore the device completely.
4428*4882a593Smuzhiyun 	 */
4429*4882a593Smuzhiyun 
4430*4882a593Smuzhiyun 	/* Read entire ID string */
4431*4882a593Smuzhiyun 	ret = nand_readid_op(chip, 0, id_data, 8);
4432*4882a593Smuzhiyun 	if (ret)
4433*4882a593Smuzhiyun 		return ERR_PTR(ret);
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun 	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
4436*4882a593Smuzhiyun 		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4437*4882a593Smuzhiyun 			*maf_id, *dev_id, id_data[0], id_data[1]);
4438*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
4439*4882a593Smuzhiyun 	}
4440*4882a593Smuzhiyun 
4441*4882a593Smuzhiyun 	if (!type)
4442*4882a593Smuzhiyun 		type = nand_flash_ids;
4443*4882a593Smuzhiyun 
4444*4882a593Smuzhiyun 	for (; type->name != NULL; type++) {
4445*4882a593Smuzhiyun 		if (is_full_id_nand(type)) {
4446*4882a593Smuzhiyun 			if (find_full_id_nand(mtd, chip, type, id_data, &busw))
4447*4882a593Smuzhiyun 				goto ident_done;
4448*4882a593Smuzhiyun 		} else if (*dev_id == type->dev_id) {
4449*4882a593Smuzhiyun 			break;
4450*4882a593Smuzhiyun 		}
4451*4882a593Smuzhiyun 	}
4452*4882a593Smuzhiyun 
4453*4882a593Smuzhiyun 	chip->onfi_version = 0;
4454*4882a593Smuzhiyun 	if (!type->name || !type->pagesize) {
4455*4882a593Smuzhiyun 		/* Check if the chip is ONFI compliant */
4456*4882a593Smuzhiyun 		if (nand_flash_detect_onfi(mtd, chip, &busw))
4457*4882a593Smuzhiyun 			goto ident_done;
4458*4882a593Smuzhiyun 
4459*4882a593Smuzhiyun 		/* Check if the chip is JEDEC compliant */
4460*4882a593Smuzhiyun 		if (nand_flash_detect_jedec(mtd, chip, &busw))
4461*4882a593Smuzhiyun 			goto ident_done;
4462*4882a593Smuzhiyun 	}
4463*4882a593Smuzhiyun 
4464*4882a593Smuzhiyun 	if (!type->name)
4465*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
4466*4882a593Smuzhiyun 
4467*4882a593Smuzhiyun 	if (!mtd->name)
4468*4882a593Smuzhiyun 		mtd->name = type->name;
4469*4882a593Smuzhiyun 
4470*4882a593Smuzhiyun 	chip->chipsize = (uint64_t)type->chipsize << 20;
4471*4882a593Smuzhiyun 
4472*4882a593Smuzhiyun 	if (!type->pagesize) {
4473*4882a593Smuzhiyun 		/* Decode parameters from extended ID */
4474*4882a593Smuzhiyun 		nand_decode_ext_id(mtd, chip, id_data, &busw);
4475*4882a593Smuzhiyun 	} else {
4476*4882a593Smuzhiyun 		nand_decode_id(mtd, chip, type, id_data, &busw);
4477*4882a593Smuzhiyun 	}
4478*4882a593Smuzhiyun 	/* Get chip options */
4479*4882a593Smuzhiyun 	chip->options |= type->options;
4480*4882a593Smuzhiyun 
4481*4882a593Smuzhiyun 	/*
4482*4882a593Smuzhiyun 	 * Check if chip is not a Samsung device. Do not clear the
4483*4882a593Smuzhiyun 	 * options for chips which do not have an extended id.
4484*4882a593Smuzhiyun 	 */
4485*4882a593Smuzhiyun 	if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
4486*4882a593Smuzhiyun 		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
4487*4882a593Smuzhiyun ident_done:
4488*4882a593Smuzhiyun 
4489*4882a593Smuzhiyun 	/* Try to identify manufacturer */
4490*4882a593Smuzhiyun 	for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
4491*4882a593Smuzhiyun 		if (nand_manuf_ids[maf_idx].id == *maf_id)
4492*4882a593Smuzhiyun 			break;
4493*4882a593Smuzhiyun 	}
4494*4882a593Smuzhiyun 
4495*4882a593Smuzhiyun 	if (chip->options & NAND_BUSWIDTH_AUTO) {
4496*4882a593Smuzhiyun 		WARN_ON(chip->options & NAND_BUSWIDTH_16);
4497*4882a593Smuzhiyun 		chip->options |= busw;
4498*4882a593Smuzhiyun 		nand_set_defaults(chip, busw);
4499*4882a593Smuzhiyun 	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4500*4882a593Smuzhiyun 		/*
4501*4882a593Smuzhiyun 		 * Check, if buswidth is correct. Hardware drivers should set
4502*4882a593Smuzhiyun 		 * chip correct!
4503*4882a593Smuzhiyun 		 */
4504*4882a593Smuzhiyun 		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4505*4882a593Smuzhiyun 			*maf_id, *dev_id);
4506*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
4507*4882a593Smuzhiyun 		pr_warn("bus width %d instead %d bit\n",
4508*4882a593Smuzhiyun 			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
4509*4882a593Smuzhiyun 			   busw ? 16 : 8);
4510*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
4511*4882a593Smuzhiyun 	}
4512*4882a593Smuzhiyun 
4513*4882a593Smuzhiyun 	nand_decode_bbm_options(mtd, chip, id_data);
4514*4882a593Smuzhiyun 
4515*4882a593Smuzhiyun 	/* Calculate the address shift from the page size */
4516*4882a593Smuzhiyun 	chip->page_shift = ffs(mtd->writesize) - 1;
4517*4882a593Smuzhiyun 	/* Convert chipsize to number of pages per chip -1 */
4518*4882a593Smuzhiyun 	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	chip->bbt_erase_shift = chip->phys_erase_shift =
4521*4882a593Smuzhiyun 		ffs(mtd->erasesize) - 1;
4522*4882a593Smuzhiyun 	if (chip->chipsize & 0xffffffff)
4523*4882a593Smuzhiyun 		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
4524*4882a593Smuzhiyun 	else {
4525*4882a593Smuzhiyun 		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
4526*4882a593Smuzhiyun 		chip->chip_shift += 32 - 1;
4527*4882a593Smuzhiyun 	}
4528*4882a593Smuzhiyun 
4529*4882a593Smuzhiyun 	if (chip->chip_shift - chip->page_shift > 16)
4530*4882a593Smuzhiyun 		chip->options |= NAND_ROW_ADDR_3;
4531*4882a593Smuzhiyun 
4532*4882a593Smuzhiyun 	chip->badblockbits = 8;
4533*4882a593Smuzhiyun 	chip->erase = single_erase;
4534*4882a593Smuzhiyun 
4535*4882a593Smuzhiyun 	/* Do not replace user supplied command function! */
4536*4882a593Smuzhiyun 	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
4537*4882a593Smuzhiyun 		chip->cmdfunc = nand_command_lp;
4538*4882a593Smuzhiyun 
4539*4882a593Smuzhiyun 	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4540*4882a593Smuzhiyun 		*maf_id, *dev_id);
4541*4882a593Smuzhiyun 
4542*4882a593Smuzhiyun #ifdef CONFIG_SYS_NAND_ONFI_DETECTION
4543*4882a593Smuzhiyun 	if (chip->onfi_version)
4544*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4545*4882a593Smuzhiyun 				chip->onfi_params.model);
4546*4882a593Smuzhiyun 	else if (chip->jedec_version)
4547*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4548*4882a593Smuzhiyun 				chip->jedec_params.model);
4549*4882a593Smuzhiyun 	else
4550*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4551*4882a593Smuzhiyun 				type->name);
4552*4882a593Smuzhiyun #else
4553*4882a593Smuzhiyun 	if (chip->jedec_version)
4554*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4555*4882a593Smuzhiyun 				chip->jedec_params.model);
4556*4882a593Smuzhiyun 	else
4557*4882a593Smuzhiyun 		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4558*4882a593Smuzhiyun 				type->name);
4559*4882a593Smuzhiyun 
4560*4882a593Smuzhiyun 	pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
4561*4882a593Smuzhiyun 		type->name);
4562*4882a593Smuzhiyun #endif
4563*4882a593Smuzhiyun 
4564*4882a593Smuzhiyun 	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4565*4882a593Smuzhiyun 		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4566*4882a593Smuzhiyun 		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4567*4882a593Smuzhiyun 	return type;
4568*4882a593Smuzhiyun }
4569*4882a593Smuzhiyun EXPORT_SYMBOL(nand_get_flash_type);
4570*4882a593Smuzhiyun 
4571*4882a593Smuzhiyun #if CONFIG_IS_ENABLED(OF_CONTROL)
4572*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
4573*4882a593Smuzhiyun 
nand_dt_init(struct mtd_info * mtd,struct nand_chip * chip,int node)4574*4882a593Smuzhiyun static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip, int node)
4575*4882a593Smuzhiyun {
4576*4882a593Smuzhiyun 	int ret, ecc_mode = -1, ecc_strength, ecc_step;
4577*4882a593Smuzhiyun 	const void *blob = gd->fdt_blob;
4578*4882a593Smuzhiyun 	const char *str;
4579*4882a593Smuzhiyun 
4580*4882a593Smuzhiyun 	ret = fdtdec_get_int(blob, node, "nand-bus-width", -1);
4581*4882a593Smuzhiyun 	if (ret == 16)
4582*4882a593Smuzhiyun 		chip->options |= NAND_BUSWIDTH_16;
4583*4882a593Smuzhiyun 
4584*4882a593Smuzhiyun 	if (fdtdec_get_bool(blob, node, "nand-on-flash-bbt"))
4585*4882a593Smuzhiyun 		chip->bbt_options |= NAND_BBT_USE_FLASH;
4586*4882a593Smuzhiyun 
4587*4882a593Smuzhiyun 	str = fdt_getprop(blob, node, "nand-ecc-mode", NULL);
4588*4882a593Smuzhiyun 	if (str) {
4589*4882a593Smuzhiyun 		if (!strcmp(str, "none"))
4590*4882a593Smuzhiyun 			ecc_mode = NAND_ECC_NONE;
4591*4882a593Smuzhiyun 		else if (!strcmp(str, "soft"))
4592*4882a593Smuzhiyun 			ecc_mode = NAND_ECC_SOFT;
4593*4882a593Smuzhiyun 		else if (!strcmp(str, "hw"))
4594*4882a593Smuzhiyun 			ecc_mode = NAND_ECC_HW;
4595*4882a593Smuzhiyun 		else if (!strcmp(str, "hw_syndrome"))
4596*4882a593Smuzhiyun 			ecc_mode = NAND_ECC_HW_SYNDROME;
4597*4882a593Smuzhiyun 		else if (!strcmp(str, "hw_oob_first"))
4598*4882a593Smuzhiyun 			ecc_mode = NAND_ECC_HW_OOB_FIRST;
4599*4882a593Smuzhiyun 		else if (!strcmp(str, "soft_bch"))
4600*4882a593Smuzhiyun 			ecc_mode = NAND_ECC_SOFT_BCH;
4601*4882a593Smuzhiyun 	}
4602*4882a593Smuzhiyun 
4603*4882a593Smuzhiyun 
4604*4882a593Smuzhiyun 	ecc_strength = fdtdec_get_int(blob, node, "nand-ecc-strength", -1);
4605*4882a593Smuzhiyun 	ecc_step = fdtdec_get_int(blob, node, "nand-ecc-step-size", -1);
4606*4882a593Smuzhiyun 
4607*4882a593Smuzhiyun 	if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
4608*4882a593Smuzhiyun 	    (!(ecc_step >= 0) && ecc_strength >= 0)) {
4609*4882a593Smuzhiyun 		pr_err("must set both strength and step size in DT\n");
4610*4882a593Smuzhiyun 		return -EINVAL;
4611*4882a593Smuzhiyun 	}
4612*4882a593Smuzhiyun 
4613*4882a593Smuzhiyun 	if (ecc_mode >= 0)
4614*4882a593Smuzhiyun 		chip->ecc.mode = ecc_mode;
4615*4882a593Smuzhiyun 
4616*4882a593Smuzhiyun 	if (ecc_strength >= 0)
4617*4882a593Smuzhiyun 		chip->ecc.strength = ecc_strength;
4618*4882a593Smuzhiyun 
4619*4882a593Smuzhiyun 	if (ecc_step > 0)
4620*4882a593Smuzhiyun 		chip->ecc.size = ecc_step;
4621*4882a593Smuzhiyun 
4622*4882a593Smuzhiyun 	if (fdt_getprop(blob, node, "nand-ecc-maximize", NULL))
4623*4882a593Smuzhiyun 		chip->ecc.options |= NAND_ECC_MAXIMIZE;
4624*4882a593Smuzhiyun 
4625*4882a593Smuzhiyun 	return 0;
4626*4882a593Smuzhiyun }
4627*4882a593Smuzhiyun #else
nand_dt_init(struct mtd_info * mtd,struct nand_chip * chip,int node)4628*4882a593Smuzhiyun static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip, int node)
4629*4882a593Smuzhiyun {
4630*4882a593Smuzhiyun 	return 0;
4631*4882a593Smuzhiyun }
4632*4882a593Smuzhiyun #endif /* CONFIG_IS_ENABLED(OF_CONTROL) */
4633*4882a593Smuzhiyun 
4634*4882a593Smuzhiyun /**
4635*4882a593Smuzhiyun  * nand_scan_ident - [NAND Interface] Scan for the NAND device
4636*4882a593Smuzhiyun  * @mtd: MTD device structure
4637*4882a593Smuzhiyun  * @maxchips: number of chips to scan for
4638*4882a593Smuzhiyun  * @table: alternative NAND ID table
4639*4882a593Smuzhiyun  *
4640*4882a593Smuzhiyun  * This is the first phase of the normal nand_scan() function. It reads the
4641*4882a593Smuzhiyun  * flash ID and sets up MTD fields accordingly.
4642*4882a593Smuzhiyun  *
4643*4882a593Smuzhiyun  */
nand_scan_ident(struct mtd_info * mtd,int maxchips,struct nand_flash_dev * table)4644*4882a593Smuzhiyun int nand_scan_ident(struct mtd_info *mtd, int maxchips,
4645*4882a593Smuzhiyun 		    struct nand_flash_dev *table)
4646*4882a593Smuzhiyun {
4647*4882a593Smuzhiyun 	int i, nand_maf_id, nand_dev_id;
4648*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4649*4882a593Smuzhiyun 	struct nand_flash_dev *type;
4650*4882a593Smuzhiyun 	int ret;
4651*4882a593Smuzhiyun 
4652*4882a593Smuzhiyun 	if (chip->flash_node) {
4653*4882a593Smuzhiyun 		ret = nand_dt_init(mtd, chip, chip->flash_node);
4654*4882a593Smuzhiyun 		if (ret)
4655*4882a593Smuzhiyun 			return ret;
4656*4882a593Smuzhiyun 	}
4657*4882a593Smuzhiyun 
4658*4882a593Smuzhiyun 	/* Set the default functions */
4659*4882a593Smuzhiyun 	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
4660*4882a593Smuzhiyun 
4661*4882a593Smuzhiyun 	/* Read the flash type */
4662*4882a593Smuzhiyun 	type = nand_get_flash_type(mtd, chip, &nand_maf_id,
4663*4882a593Smuzhiyun 				   &nand_dev_id, table);
4664*4882a593Smuzhiyun 
4665*4882a593Smuzhiyun 	if (IS_ERR(type)) {
4666*4882a593Smuzhiyun 		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
4667*4882a593Smuzhiyun 			pr_warn("No NAND device found\n");
4668*4882a593Smuzhiyun 		chip->select_chip(mtd, -1);
4669*4882a593Smuzhiyun 		return PTR_ERR(type);
4670*4882a593Smuzhiyun 	}
4671*4882a593Smuzhiyun 
4672*4882a593Smuzhiyun 	/* Initialize the ->data_interface field. */
4673*4882a593Smuzhiyun 	ret = nand_init_data_interface(chip);
4674*4882a593Smuzhiyun 	if (ret)
4675*4882a593Smuzhiyun 		return ret;
4676*4882a593Smuzhiyun 
4677*4882a593Smuzhiyun 	/*
4678*4882a593Smuzhiyun 	 * Setup the data interface correctly on the chip and controller side.
4679*4882a593Smuzhiyun 	 * This explicit call to nand_setup_data_interface() is only required
4680*4882a593Smuzhiyun 	 * for the first die, because nand_reset() has been called before
4681*4882a593Smuzhiyun 	 * ->data_interface and ->default_onfi_timing_mode were set.
4682*4882a593Smuzhiyun 	 * For the other dies, nand_reset() will automatically switch to the
4683*4882a593Smuzhiyun 	 * best mode for us.
4684*4882a593Smuzhiyun 	 */
4685*4882a593Smuzhiyun 	ret = nand_setup_data_interface(chip, 0);
4686*4882a593Smuzhiyun 	if (ret)
4687*4882a593Smuzhiyun 		return ret;
4688*4882a593Smuzhiyun 
4689*4882a593Smuzhiyun 	chip->select_chip(mtd, -1);
4690*4882a593Smuzhiyun 
4691*4882a593Smuzhiyun 	/* Check for a chip array */
4692*4882a593Smuzhiyun 	for (i = 1; i < maxchips; i++) {
4693*4882a593Smuzhiyun 		u8 id[2];
4694*4882a593Smuzhiyun 
4695*4882a593Smuzhiyun 		/* See comment in nand_get_flash_type for reset */
4696*4882a593Smuzhiyun 		nand_reset(chip, i);
4697*4882a593Smuzhiyun 
4698*4882a593Smuzhiyun 		chip->select_chip(mtd, i);
4699*4882a593Smuzhiyun 		/* Send the command for reading device ID */
4700*4882a593Smuzhiyun 		nand_readid_op(chip, 0, id, sizeof(id));
4701*4882a593Smuzhiyun 
4702*4882a593Smuzhiyun 		/* Read manufacturer and device IDs */
4703*4882a593Smuzhiyun 		if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
4704*4882a593Smuzhiyun 			chip->select_chip(mtd, -1);
4705*4882a593Smuzhiyun 			break;
4706*4882a593Smuzhiyun 		}
4707*4882a593Smuzhiyun 		chip->select_chip(mtd, -1);
4708*4882a593Smuzhiyun 	}
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun #ifdef DEBUG
4711*4882a593Smuzhiyun 	if (i > 1)
4712*4882a593Smuzhiyun 		pr_info("%d chips detected\n", i);
4713*4882a593Smuzhiyun #endif
4714*4882a593Smuzhiyun 
4715*4882a593Smuzhiyun 	/* Store the number of chips and calc total size for mtd */
4716*4882a593Smuzhiyun 	chip->numchips = i;
4717*4882a593Smuzhiyun 	mtd->size = i * chip->chipsize;
4718*4882a593Smuzhiyun 
4719*4882a593Smuzhiyun 	return 0;
4720*4882a593Smuzhiyun }
4721*4882a593Smuzhiyun EXPORT_SYMBOL(nand_scan_ident);
4722*4882a593Smuzhiyun 
4723*4882a593Smuzhiyun /**
4724*4882a593Smuzhiyun  * nand_check_ecc_caps - check the sanity of preset ECC settings
4725*4882a593Smuzhiyun  * @chip: nand chip info structure
4726*4882a593Smuzhiyun  * @caps: ECC caps info structure
4727*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
4728*4882a593Smuzhiyun  *
4729*4882a593Smuzhiyun  * When ECC step size and strength are already set, check if they are supported
4730*4882a593Smuzhiyun  * by the controller and the calculated ECC bytes fit within the chip's OOB.
4731*4882a593Smuzhiyun  * On success, the calculated ECC bytes is set.
4732*4882a593Smuzhiyun  */
nand_check_ecc_caps(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)4733*4882a593Smuzhiyun int nand_check_ecc_caps(struct nand_chip *chip,
4734*4882a593Smuzhiyun 			const struct nand_ecc_caps *caps, int oobavail)
4735*4882a593Smuzhiyun {
4736*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4737*4882a593Smuzhiyun 	const struct nand_ecc_step_info *stepinfo;
4738*4882a593Smuzhiyun 	int preset_step = chip->ecc.size;
4739*4882a593Smuzhiyun 	int preset_strength = chip->ecc.strength;
4740*4882a593Smuzhiyun 	int nsteps, ecc_bytes;
4741*4882a593Smuzhiyun 	int i, j;
4742*4882a593Smuzhiyun 
4743*4882a593Smuzhiyun 	if (WARN_ON(oobavail < 0))
4744*4882a593Smuzhiyun 		return -EINVAL;
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun 	if (!preset_step || !preset_strength)
4747*4882a593Smuzhiyun 		return -ENODATA;
4748*4882a593Smuzhiyun 
4749*4882a593Smuzhiyun 	nsteps = mtd->writesize / preset_step;
4750*4882a593Smuzhiyun 
4751*4882a593Smuzhiyun 	for (i = 0; i < caps->nstepinfos; i++) {
4752*4882a593Smuzhiyun 		stepinfo = &caps->stepinfos[i];
4753*4882a593Smuzhiyun 
4754*4882a593Smuzhiyun 		if (stepinfo->stepsize != preset_step)
4755*4882a593Smuzhiyun 			continue;
4756*4882a593Smuzhiyun 
4757*4882a593Smuzhiyun 		for (j = 0; j < stepinfo->nstrengths; j++) {
4758*4882a593Smuzhiyun 			if (stepinfo->strengths[j] != preset_strength)
4759*4882a593Smuzhiyun 				continue;
4760*4882a593Smuzhiyun 
4761*4882a593Smuzhiyun 			ecc_bytes = caps->calc_ecc_bytes(preset_step,
4762*4882a593Smuzhiyun 							 preset_strength);
4763*4882a593Smuzhiyun 			if (WARN_ON_ONCE(ecc_bytes < 0))
4764*4882a593Smuzhiyun 				return ecc_bytes;
4765*4882a593Smuzhiyun 
4766*4882a593Smuzhiyun 			if (ecc_bytes * nsteps > oobavail) {
4767*4882a593Smuzhiyun 				pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
4768*4882a593Smuzhiyun 				       preset_step, preset_strength);
4769*4882a593Smuzhiyun 				return -ENOSPC;
4770*4882a593Smuzhiyun 			}
4771*4882a593Smuzhiyun 
4772*4882a593Smuzhiyun 			chip->ecc.bytes = ecc_bytes;
4773*4882a593Smuzhiyun 
4774*4882a593Smuzhiyun 			return 0;
4775*4882a593Smuzhiyun 		}
4776*4882a593Smuzhiyun 	}
4777*4882a593Smuzhiyun 
4778*4882a593Smuzhiyun 	pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
4779*4882a593Smuzhiyun 	       preset_step, preset_strength);
4780*4882a593Smuzhiyun 
4781*4882a593Smuzhiyun 	return -ENOTSUPP;
4782*4882a593Smuzhiyun }
4783*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_check_ecc_caps);
4784*4882a593Smuzhiyun 
4785*4882a593Smuzhiyun /**
4786*4882a593Smuzhiyun  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
4787*4882a593Smuzhiyun  * @chip: nand chip info structure
4788*4882a593Smuzhiyun  * @caps: ECC engine caps info structure
4789*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
4790*4882a593Smuzhiyun  *
4791*4882a593Smuzhiyun  * If a chip's ECC requirement is provided, try to meet it with the least
4792*4882a593Smuzhiyun  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
4793*4882a593Smuzhiyun  * On success, the chosen ECC settings are set.
4794*4882a593Smuzhiyun  */
nand_match_ecc_req(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)4795*4882a593Smuzhiyun int nand_match_ecc_req(struct nand_chip *chip,
4796*4882a593Smuzhiyun 		       const struct nand_ecc_caps *caps, int oobavail)
4797*4882a593Smuzhiyun {
4798*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4799*4882a593Smuzhiyun 	const struct nand_ecc_step_info *stepinfo;
4800*4882a593Smuzhiyun 	int req_step = chip->ecc_step_ds;
4801*4882a593Smuzhiyun 	int req_strength = chip->ecc_strength_ds;
4802*4882a593Smuzhiyun 	int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
4803*4882a593Smuzhiyun 	int best_step, best_strength, best_ecc_bytes;
4804*4882a593Smuzhiyun 	int best_ecc_bytes_total = INT_MAX;
4805*4882a593Smuzhiyun 	int i, j;
4806*4882a593Smuzhiyun 
4807*4882a593Smuzhiyun 	if (WARN_ON(oobavail < 0))
4808*4882a593Smuzhiyun 		return -EINVAL;
4809*4882a593Smuzhiyun 
4810*4882a593Smuzhiyun 	/* No information provided by the NAND chip */
4811*4882a593Smuzhiyun 	if (!req_step || !req_strength)
4812*4882a593Smuzhiyun 		return -ENOTSUPP;
4813*4882a593Smuzhiyun 
4814*4882a593Smuzhiyun 	/* number of correctable bits the chip requires in a page */
4815*4882a593Smuzhiyun 	req_corr = mtd->writesize / req_step * req_strength;
4816*4882a593Smuzhiyun 
4817*4882a593Smuzhiyun 	for (i = 0; i < caps->nstepinfos; i++) {
4818*4882a593Smuzhiyun 		stepinfo = &caps->stepinfos[i];
4819*4882a593Smuzhiyun 		step_size = stepinfo->stepsize;
4820*4882a593Smuzhiyun 
4821*4882a593Smuzhiyun 		for (j = 0; j < stepinfo->nstrengths; j++) {
4822*4882a593Smuzhiyun 			strength = stepinfo->strengths[j];
4823*4882a593Smuzhiyun 
4824*4882a593Smuzhiyun 			/*
4825*4882a593Smuzhiyun 			 * If both step size and strength are smaller than the
4826*4882a593Smuzhiyun 			 * chip's requirement, it is not easy to compare the
4827*4882a593Smuzhiyun 			 * resulted reliability.
4828*4882a593Smuzhiyun 			 */
4829*4882a593Smuzhiyun 			if (step_size < req_step && strength < req_strength)
4830*4882a593Smuzhiyun 				continue;
4831*4882a593Smuzhiyun 
4832*4882a593Smuzhiyun 			if (mtd->writesize % step_size)
4833*4882a593Smuzhiyun 				continue;
4834*4882a593Smuzhiyun 
4835*4882a593Smuzhiyun 			nsteps = mtd->writesize / step_size;
4836*4882a593Smuzhiyun 
4837*4882a593Smuzhiyun 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4838*4882a593Smuzhiyun 			if (WARN_ON_ONCE(ecc_bytes < 0))
4839*4882a593Smuzhiyun 				continue;
4840*4882a593Smuzhiyun 			ecc_bytes_total = ecc_bytes * nsteps;
4841*4882a593Smuzhiyun 
4842*4882a593Smuzhiyun 			if (ecc_bytes_total > oobavail ||
4843*4882a593Smuzhiyun 			    strength * nsteps < req_corr)
4844*4882a593Smuzhiyun 				continue;
4845*4882a593Smuzhiyun 
4846*4882a593Smuzhiyun 			/*
4847*4882a593Smuzhiyun 			 * We assume the best is to meet the chip's requrement
4848*4882a593Smuzhiyun 			 * with the least number of ECC bytes.
4849*4882a593Smuzhiyun 			 */
4850*4882a593Smuzhiyun 			if (ecc_bytes_total < best_ecc_bytes_total) {
4851*4882a593Smuzhiyun 				best_ecc_bytes_total = ecc_bytes_total;
4852*4882a593Smuzhiyun 				best_step = step_size;
4853*4882a593Smuzhiyun 				best_strength = strength;
4854*4882a593Smuzhiyun 				best_ecc_bytes = ecc_bytes;
4855*4882a593Smuzhiyun 			}
4856*4882a593Smuzhiyun 		}
4857*4882a593Smuzhiyun 	}
4858*4882a593Smuzhiyun 
4859*4882a593Smuzhiyun 	if (best_ecc_bytes_total == INT_MAX)
4860*4882a593Smuzhiyun 		return -ENOTSUPP;
4861*4882a593Smuzhiyun 
4862*4882a593Smuzhiyun 	chip->ecc.size = best_step;
4863*4882a593Smuzhiyun 	chip->ecc.strength = best_strength;
4864*4882a593Smuzhiyun 	chip->ecc.bytes = best_ecc_bytes;
4865*4882a593Smuzhiyun 
4866*4882a593Smuzhiyun 	return 0;
4867*4882a593Smuzhiyun }
4868*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_match_ecc_req);
4869*4882a593Smuzhiyun 
4870*4882a593Smuzhiyun /**
4871*4882a593Smuzhiyun  * nand_maximize_ecc - choose the max ECC strength available
4872*4882a593Smuzhiyun  * @chip: nand chip info structure
4873*4882a593Smuzhiyun  * @caps: ECC engine caps info structure
4874*4882a593Smuzhiyun  * @oobavail: OOB size that the ECC engine can use
4875*4882a593Smuzhiyun  *
4876*4882a593Smuzhiyun  * Choose the max ECC strength that is supported on the controller, and can fit
4877*4882a593Smuzhiyun  * within the chip's OOB.  On success, the chosen ECC settings are set.
4878*4882a593Smuzhiyun  */
nand_maximize_ecc(struct nand_chip * chip,const struct nand_ecc_caps * caps,int oobavail)4879*4882a593Smuzhiyun int nand_maximize_ecc(struct nand_chip *chip,
4880*4882a593Smuzhiyun 		      const struct nand_ecc_caps *caps, int oobavail)
4881*4882a593Smuzhiyun {
4882*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
4883*4882a593Smuzhiyun 	const struct nand_ecc_step_info *stepinfo;
4884*4882a593Smuzhiyun 	int step_size, strength, nsteps, ecc_bytes, corr;
4885*4882a593Smuzhiyun 	int best_corr = 0;
4886*4882a593Smuzhiyun 	int best_step = 0;
4887*4882a593Smuzhiyun 	int best_strength, best_ecc_bytes;
4888*4882a593Smuzhiyun 	int i, j;
4889*4882a593Smuzhiyun 
4890*4882a593Smuzhiyun 	if (WARN_ON(oobavail < 0))
4891*4882a593Smuzhiyun 		return -EINVAL;
4892*4882a593Smuzhiyun 
4893*4882a593Smuzhiyun 	for (i = 0; i < caps->nstepinfos; i++) {
4894*4882a593Smuzhiyun 		stepinfo = &caps->stepinfos[i];
4895*4882a593Smuzhiyun 		step_size = stepinfo->stepsize;
4896*4882a593Smuzhiyun 
4897*4882a593Smuzhiyun 		/* If chip->ecc.size is already set, respect it */
4898*4882a593Smuzhiyun 		if (chip->ecc.size && step_size != chip->ecc.size)
4899*4882a593Smuzhiyun 			continue;
4900*4882a593Smuzhiyun 
4901*4882a593Smuzhiyun 		for (j = 0; j < stepinfo->nstrengths; j++) {
4902*4882a593Smuzhiyun 			strength = stepinfo->strengths[j];
4903*4882a593Smuzhiyun 
4904*4882a593Smuzhiyun 			if (mtd->writesize % step_size)
4905*4882a593Smuzhiyun 				continue;
4906*4882a593Smuzhiyun 
4907*4882a593Smuzhiyun 			nsteps = mtd->writesize / step_size;
4908*4882a593Smuzhiyun 
4909*4882a593Smuzhiyun 			ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
4910*4882a593Smuzhiyun 			if (WARN_ON_ONCE(ecc_bytes < 0))
4911*4882a593Smuzhiyun 				continue;
4912*4882a593Smuzhiyun 
4913*4882a593Smuzhiyun 			if (ecc_bytes * nsteps > oobavail)
4914*4882a593Smuzhiyun 				continue;
4915*4882a593Smuzhiyun 
4916*4882a593Smuzhiyun 			corr = strength * nsteps;
4917*4882a593Smuzhiyun 
4918*4882a593Smuzhiyun 			/*
4919*4882a593Smuzhiyun 			 * If the number of correctable bits is the same,
4920*4882a593Smuzhiyun 			 * bigger step_size has more reliability.
4921*4882a593Smuzhiyun 			 */
4922*4882a593Smuzhiyun 			if (corr > best_corr ||
4923*4882a593Smuzhiyun 			    (corr == best_corr && step_size > best_step)) {
4924*4882a593Smuzhiyun 				best_corr = corr;
4925*4882a593Smuzhiyun 				best_step = step_size;
4926*4882a593Smuzhiyun 				best_strength = strength;
4927*4882a593Smuzhiyun 				best_ecc_bytes = ecc_bytes;
4928*4882a593Smuzhiyun 			}
4929*4882a593Smuzhiyun 		}
4930*4882a593Smuzhiyun 	}
4931*4882a593Smuzhiyun 
4932*4882a593Smuzhiyun 	if (!best_corr)
4933*4882a593Smuzhiyun 		return -ENOTSUPP;
4934*4882a593Smuzhiyun 
4935*4882a593Smuzhiyun 	chip->ecc.size = best_step;
4936*4882a593Smuzhiyun 	chip->ecc.strength = best_strength;
4937*4882a593Smuzhiyun 	chip->ecc.bytes = best_ecc_bytes;
4938*4882a593Smuzhiyun 
4939*4882a593Smuzhiyun 	return 0;
4940*4882a593Smuzhiyun }
4941*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(nand_maximize_ecc);
4942*4882a593Smuzhiyun 
4943*4882a593Smuzhiyun /*
4944*4882a593Smuzhiyun  * Check if the chip configuration meet the datasheet requirements.
4945*4882a593Smuzhiyun 
4946*4882a593Smuzhiyun  * If our configuration corrects A bits per B bytes and the minimum
4947*4882a593Smuzhiyun  * required correction level is X bits per Y bytes, then we must ensure
4948*4882a593Smuzhiyun  * both of the following are true:
4949*4882a593Smuzhiyun  *
4950*4882a593Smuzhiyun  * (1) A / B >= X / Y
4951*4882a593Smuzhiyun  * (2) A >= X
4952*4882a593Smuzhiyun  *
4953*4882a593Smuzhiyun  * Requirement (1) ensures we can correct for the required bitflip density.
4954*4882a593Smuzhiyun  * Requirement (2) ensures we can correct even when all bitflips are clumped
4955*4882a593Smuzhiyun  * in the same sector.
4956*4882a593Smuzhiyun  */
nand_ecc_strength_good(struct mtd_info * mtd)4957*4882a593Smuzhiyun static bool nand_ecc_strength_good(struct mtd_info *mtd)
4958*4882a593Smuzhiyun {
4959*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
4960*4882a593Smuzhiyun 	struct nand_ecc_ctrl *ecc = &chip->ecc;
4961*4882a593Smuzhiyun 	int corr, ds_corr;
4962*4882a593Smuzhiyun 
4963*4882a593Smuzhiyun 	if (ecc->size == 0 || chip->ecc_step_ds == 0)
4964*4882a593Smuzhiyun 		/* Not enough information */
4965*4882a593Smuzhiyun 		return true;
4966*4882a593Smuzhiyun 
4967*4882a593Smuzhiyun 	/*
4968*4882a593Smuzhiyun 	 * We get the number of corrected bits per page to compare
4969*4882a593Smuzhiyun 	 * the correction density.
4970*4882a593Smuzhiyun 	 */
4971*4882a593Smuzhiyun 	corr = (mtd->writesize * ecc->strength) / ecc->size;
4972*4882a593Smuzhiyun 	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
4973*4882a593Smuzhiyun 
4974*4882a593Smuzhiyun 	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
4975*4882a593Smuzhiyun }
4976*4882a593Smuzhiyun 
invalid_ecc_page_accessors(struct nand_chip * chip)4977*4882a593Smuzhiyun static bool invalid_ecc_page_accessors(struct nand_chip *chip)
4978*4882a593Smuzhiyun {
4979*4882a593Smuzhiyun 	struct nand_ecc_ctrl *ecc = &chip->ecc;
4980*4882a593Smuzhiyun 
4981*4882a593Smuzhiyun 	if (nand_standard_page_accessors(ecc))
4982*4882a593Smuzhiyun 		return false;
4983*4882a593Smuzhiyun 
4984*4882a593Smuzhiyun 	/*
4985*4882a593Smuzhiyun 	 * NAND_ECC_CUSTOM_PAGE_ACCESS flag is set, make sure the NAND
4986*4882a593Smuzhiyun 	 * controller driver implements all the page accessors because
4987*4882a593Smuzhiyun 	 * default helpers are not suitable when the core does not
4988*4882a593Smuzhiyun 	 * send the READ0/PAGEPROG commands.
4989*4882a593Smuzhiyun 	 */
4990*4882a593Smuzhiyun 	return (!ecc->read_page || !ecc->write_page ||
4991*4882a593Smuzhiyun 		!ecc->read_page_raw || !ecc->write_page_raw ||
4992*4882a593Smuzhiyun 		(NAND_HAS_SUBPAGE_READ(chip) && !ecc->read_subpage) ||
4993*4882a593Smuzhiyun 		(NAND_HAS_SUBPAGE_WRITE(chip) && !ecc->write_subpage &&
4994*4882a593Smuzhiyun 		 ecc->hwctl && ecc->calculate));
4995*4882a593Smuzhiyun }
4996*4882a593Smuzhiyun 
4997*4882a593Smuzhiyun /**
4998*4882a593Smuzhiyun  * nand_scan_tail - [NAND Interface] Scan for the NAND device
4999*4882a593Smuzhiyun  * @mtd: MTD device structure
5000*4882a593Smuzhiyun  *
5001*4882a593Smuzhiyun  * This is the second phase of the normal nand_scan() function. It fills out
5002*4882a593Smuzhiyun  * all the uninitialized function pointers with the defaults and scans for a
5003*4882a593Smuzhiyun  * bad block table if appropriate.
5004*4882a593Smuzhiyun  */
nand_scan_tail(struct mtd_info * mtd)5005*4882a593Smuzhiyun int nand_scan_tail(struct mtd_info *mtd)
5006*4882a593Smuzhiyun {
5007*4882a593Smuzhiyun 	int i;
5008*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
5009*4882a593Smuzhiyun 	struct nand_ecc_ctrl *ecc = &chip->ecc;
5010*4882a593Smuzhiyun 	struct nand_buffers *nbuf;
5011*4882a593Smuzhiyun 
5012*4882a593Smuzhiyun 	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
5013*4882a593Smuzhiyun 	BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5014*4882a593Smuzhiyun 			!(chip->bbt_options & NAND_BBT_USE_FLASH));
5015*4882a593Smuzhiyun 
5016*4882a593Smuzhiyun 	if (invalid_ecc_page_accessors(chip)) {
5017*4882a593Smuzhiyun 		pr_err("Invalid ECC page accessors setup\n");
5018*4882a593Smuzhiyun 		return -EINVAL;
5019*4882a593Smuzhiyun 	}
5020*4882a593Smuzhiyun 
5021*4882a593Smuzhiyun 	if (!(chip->options & NAND_OWN_BUFFERS)) {
5022*4882a593Smuzhiyun 		nbuf = kzalloc(sizeof(struct nand_buffers), GFP_KERNEL);
5023*4882a593Smuzhiyun 		chip->buffers = nbuf;
5024*4882a593Smuzhiyun 	} else {
5025*4882a593Smuzhiyun 		if (!chip->buffers)
5026*4882a593Smuzhiyun 			return -ENOMEM;
5027*4882a593Smuzhiyun 	}
5028*4882a593Smuzhiyun 
5029*4882a593Smuzhiyun 	/* Set the internal oob buffer location, just after the page data */
5030*4882a593Smuzhiyun 	chip->oob_poi = chip->buffers->databuf + mtd->writesize;
5031*4882a593Smuzhiyun 
5032*4882a593Smuzhiyun 	/*
5033*4882a593Smuzhiyun 	 * If no default placement scheme is given, select an appropriate one.
5034*4882a593Smuzhiyun 	 */
5035*4882a593Smuzhiyun 	if (!ecc->layout && (ecc->mode != NAND_ECC_SOFT_BCH)) {
5036*4882a593Smuzhiyun 		switch (mtd->oobsize) {
5037*4882a593Smuzhiyun #ifndef CONFIG_SYS_NAND_DRIVER_ECC_LAYOUT
5038*4882a593Smuzhiyun 		case 8:
5039*4882a593Smuzhiyun 			ecc->layout = &nand_oob_8;
5040*4882a593Smuzhiyun 			break;
5041*4882a593Smuzhiyun 		case 16:
5042*4882a593Smuzhiyun 			ecc->layout = &nand_oob_16;
5043*4882a593Smuzhiyun 			break;
5044*4882a593Smuzhiyun 		case 64:
5045*4882a593Smuzhiyun 			ecc->layout = &nand_oob_64;
5046*4882a593Smuzhiyun 			break;
5047*4882a593Smuzhiyun 		case 128:
5048*4882a593Smuzhiyun 			ecc->layout = &nand_oob_128;
5049*4882a593Smuzhiyun 			break;
5050*4882a593Smuzhiyun #endif
5051*4882a593Smuzhiyun 		default:
5052*4882a593Smuzhiyun 			pr_warn("No oob scheme defined for oobsize %d\n",
5053*4882a593Smuzhiyun 				   mtd->oobsize);
5054*4882a593Smuzhiyun 			BUG();
5055*4882a593Smuzhiyun 		}
5056*4882a593Smuzhiyun 	}
5057*4882a593Smuzhiyun 
5058*4882a593Smuzhiyun 	if (!chip->write_page)
5059*4882a593Smuzhiyun 		chip->write_page = nand_write_page;
5060*4882a593Smuzhiyun 
5061*4882a593Smuzhiyun 	/*
5062*4882a593Smuzhiyun 	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5063*4882a593Smuzhiyun 	 * selected and we have 256 byte pagesize fallback to software ECC
5064*4882a593Smuzhiyun 	 */
5065*4882a593Smuzhiyun 
5066*4882a593Smuzhiyun 	switch (ecc->mode) {
5067*4882a593Smuzhiyun 	case NAND_ECC_HW_OOB_FIRST:
5068*4882a593Smuzhiyun 		/* Similar to NAND_ECC_HW, but a separate read_page handle */
5069*4882a593Smuzhiyun 		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
5070*4882a593Smuzhiyun 			pr_warn("No ECC functions supplied; hardware ECC not possible\n");
5071*4882a593Smuzhiyun 			BUG();
5072*4882a593Smuzhiyun 		}
5073*4882a593Smuzhiyun 		if (!ecc->read_page)
5074*4882a593Smuzhiyun 			ecc->read_page = nand_read_page_hwecc_oob_first;
5075*4882a593Smuzhiyun 
5076*4882a593Smuzhiyun 	case NAND_ECC_HW:
5077*4882a593Smuzhiyun 		/* Use standard hwecc read page function? */
5078*4882a593Smuzhiyun 		if (!ecc->read_page)
5079*4882a593Smuzhiyun 			ecc->read_page = nand_read_page_hwecc;
5080*4882a593Smuzhiyun 		if (!ecc->write_page)
5081*4882a593Smuzhiyun 			ecc->write_page = nand_write_page_hwecc;
5082*4882a593Smuzhiyun 		if (!ecc->read_page_raw)
5083*4882a593Smuzhiyun 			ecc->read_page_raw = nand_read_page_raw;
5084*4882a593Smuzhiyun 		if (!ecc->write_page_raw)
5085*4882a593Smuzhiyun 			ecc->write_page_raw = nand_write_page_raw;
5086*4882a593Smuzhiyun 		if (!ecc->read_oob)
5087*4882a593Smuzhiyun 			ecc->read_oob = nand_read_oob_std;
5088*4882a593Smuzhiyun 		if (!ecc->write_oob)
5089*4882a593Smuzhiyun 			ecc->write_oob = nand_write_oob_std;
5090*4882a593Smuzhiyun 		if (!ecc->read_subpage)
5091*4882a593Smuzhiyun 			ecc->read_subpage = nand_read_subpage;
5092*4882a593Smuzhiyun 		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5093*4882a593Smuzhiyun 			ecc->write_subpage = nand_write_subpage_hwecc;
5094*4882a593Smuzhiyun 
5095*4882a593Smuzhiyun 	case NAND_ECC_HW_SYNDROME:
5096*4882a593Smuzhiyun 		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5097*4882a593Smuzhiyun 		    (!ecc->read_page ||
5098*4882a593Smuzhiyun 		     ecc->read_page == nand_read_page_hwecc ||
5099*4882a593Smuzhiyun 		     !ecc->write_page ||
5100*4882a593Smuzhiyun 		     ecc->write_page == nand_write_page_hwecc)) {
5101*4882a593Smuzhiyun 			pr_warn("No ECC functions supplied; hardware ECC not possible\n");
5102*4882a593Smuzhiyun 			BUG();
5103*4882a593Smuzhiyun 		}
5104*4882a593Smuzhiyun 		/* Use standard syndrome read/write page function? */
5105*4882a593Smuzhiyun 		if (!ecc->read_page)
5106*4882a593Smuzhiyun 			ecc->read_page = nand_read_page_syndrome;
5107*4882a593Smuzhiyun 		if (!ecc->write_page)
5108*4882a593Smuzhiyun 			ecc->write_page = nand_write_page_syndrome;
5109*4882a593Smuzhiyun 		if (!ecc->read_page_raw)
5110*4882a593Smuzhiyun 			ecc->read_page_raw = nand_read_page_raw_syndrome;
5111*4882a593Smuzhiyun 		if (!ecc->write_page_raw)
5112*4882a593Smuzhiyun 			ecc->write_page_raw = nand_write_page_raw_syndrome;
5113*4882a593Smuzhiyun 		if (!ecc->read_oob)
5114*4882a593Smuzhiyun 			ecc->read_oob = nand_read_oob_syndrome;
5115*4882a593Smuzhiyun 		if (!ecc->write_oob)
5116*4882a593Smuzhiyun 			ecc->write_oob = nand_write_oob_syndrome;
5117*4882a593Smuzhiyun 
5118*4882a593Smuzhiyun 		if (mtd->writesize >= ecc->size) {
5119*4882a593Smuzhiyun 			if (!ecc->strength) {
5120*4882a593Smuzhiyun 				pr_warn("Driver must set ecc.strength when using hardware ECC\n");
5121*4882a593Smuzhiyun 				BUG();
5122*4882a593Smuzhiyun 			}
5123*4882a593Smuzhiyun 			break;
5124*4882a593Smuzhiyun 		}
5125*4882a593Smuzhiyun 		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5126*4882a593Smuzhiyun 			ecc->size, mtd->writesize);
5127*4882a593Smuzhiyun 		ecc->mode = NAND_ECC_SOFT;
5128*4882a593Smuzhiyun 
5129*4882a593Smuzhiyun 	case NAND_ECC_SOFT:
5130*4882a593Smuzhiyun 		ecc->calculate = nand_calculate_ecc;
5131*4882a593Smuzhiyun 		ecc->correct = nand_correct_data;
5132*4882a593Smuzhiyun 		ecc->read_page = nand_read_page_swecc;
5133*4882a593Smuzhiyun 		ecc->read_subpage = nand_read_subpage;
5134*4882a593Smuzhiyun 		ecc->write_page = nand_write_page_swecc;
5135*4882a593Smuzhiyun 		ecc->read_page_raw = nand_read_page_raw;
5136*4882a593Smuzhiyun 		ecc->write_page_raw = nand_write_page_raw;
5137*4882a593Smuzhiyun 		ecc->read_oob = nand_read_oob_std;
5138*4882a593Smuzhiyun 		ecc->write_oob = nand_write_oob_std;
5139*4882a593Smuzhiyun 		if (!ecc->size)
5140*4882a593Smuzhiyun 			ecc->size = 256;
5141*4882a593Smuzhiyun 		ecc->bytes = 3;
5142*4882a593Smuzhiyun 		ecc->strength = 1;
5143*4882a593Smuzhiyun 		break;
5144*4882a593Smuzhiyun 
5145*4882a593Smuzhiyun 	case NAND_ECC_SOFT_BCH:
5146*4882a593Smuzhiyun 		if (!mtd_nand_has_bch()) {
5147*4882a593Smuzhiyun 			pr_warn("CONFIG_MTD_NAND_ECC_BCH not enabled\n");
5148*4882a593Smuzhiyun 			BUG();
5149*4882a593Smuzhiyun 		}
5150*4882a593Smuzhiyun 		ecc->calculate = nand_bch_calculate_ecc;
5151*4882a593Smuzhiyun 		ecc->correct = nand_bch_correct_data;
5152*4882a593Smuzhiyun 		ecc->read_page = nand_read_page_swecc;
5153*4882a593Smuzhiyun 		ecc->read_subpage = nand_read_subpage;
5154*4882a593Smuzhiyun 		ecc->write_page = nand_write_page_swecc;
5155*4882a593Smuzhiyun 		ecc->read_page_raw = nand_read_page_raw;
5156*4882a593Smuzhiyun 		ecc->write_page_raw = nand_write_page_raw;
5157*4882a593Smuzhiyun 		ecc->read_oob = nand_read_oob_std;
5158*4882a593Smuzhiyun 		ecc->write_oob = nand_write_oob_std;
5159*4882a593Smuzhiyun 		/*
5160*4882a593Smuzhiyun 		 * Board driver should supply ecc.size and ecc.strength values
5161*4882a593Smuzhiyun 		 * to select how many bits are correctable. Otherwise, default
5162*4882a593Smuzhiyun 		 * to 4 bits for large page devices.
5163*4882a593Smuzhiyun 		 */
5164*4882a593Smuzhiyun 		if (!ecc->size && (mtd->oobsize >= 64)) {
5165*4882a593Smuzhiyun 			ecc->size = 512;
5166*4882a593Smuzhiyun 			ecc->strength = 4;
5167*4882a593Smuzhiyun 		}
5168*4882a593Smuzhiyun 
5169*4882a593Smuzhiyun 		/* See nand_bch_init() for details. */
5170*4882a593Smuzhiyun 		ecc->bytes = 0;
5171*4882a593Smuzhiyun 		ecc->priv = nand_bch_init(mtd);
5172*4882a593Smuzhiyun 		if (!ecc->priv) {
5173*4882a593Smuzhiyun 			pr_warn("BCH ECC initialization failed!\n");
5174*4882a593Smuzhiyun 			BUG();
5175*4882a593Smuzhiyun 		}
5176*4882a593Smuzhiyun 		break;
5177*4882a593Smuzhiyun 
5178*4882a593Smuzhiyun 	case NAND_ECC_NONE:
5179*4882a593Smuzhiyun 		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
5180*4882a593Smuzhiyun 		ecc->read_page = nand_read_page_raw;
5181*4882a593Smuzhiyun 		ecc->write_page = nand_write_page_raw;
5182*4882a593Smuzhiyun 		ecc->read_oob = nand_read_oob_std;
5183*4882a593Smuzhiyun 		ecc->read_page_raw = nand_read_page_raw;
5184*4882a593Smuzhiyun 		ecc->write_page_raw = nand_write_page_raw;
5185*4882a593Smuzhiyun 		ecc->write_oob = nand_write_oob_std;
5186*4882a593Smuzhiyun 		ecc->size = mtd->writesize;
5187*4882a593Smuzhiyun 		ecc->bytes = 0;
5188*4882a593Smuzhiyun 		ecc->strength = 0;
5189*4882a593Smuzhiyun 		break;
5190*4882a593Smuzhiyun 
5191*4882a593Smuzhiyun 	default:
5192*4882a593Smuzhiyun 		pr_warn("Invalid NAND_ECC_MODE %d\n", ecc->mode);
5193*4882a593Smuzhiyun 		BUG();
5194*4882a593Smuzhiyun 	}
5195*4882a593Smuzhiyun 
5196*4882a593Smuzhiyun 	/* For many systems, the standard OOB write also works for raw */
5197*4882a593Smuzhiyun 	if (!ecc->read_oob_raw)
5198*4882a593Smuzhiyun 		ecc->read_oob_raw = ecc->read_oob;
5199*4882a593Smuzhiyun 	if (!ecc->write_oob_raw)
5200*4882a593Smuzhiyun 		ecc->write_oob_raw = ecc->write_oob;
5201*4882a593Smuzhiyun 
5202*4882a593Smuzhiyun 	/*
5203*4882a593Smuzhiyun 	 * The number of bytes available for a client to place data into
5204*4882a593Smuzhiyun 	 * the out of band area.
5205*4882a593Smuzhiyun 	 */
5206*4882a593Smuzhiyun 	mtd->oobavail = 0;
5207*4882a593Smuzhiyun 	if (ecc->layout) {
5208*4882a593Smuzhiyun 		for (i = 0; ecc->layout->oobfree[i].length; i++)
5209*4882a593Smuzhiyun 			mtd->oobavail += ecc->layout->oobfree[i].length;
5210*4882a593Smuzhiyun 	}
5211*4882a593Smuzhiyun 
5212*4882a593Smuzhiyun 	/* ECC sanity check: warn if it's too weak */
5213*4882a593Smuzhiyun 	if (!nand_ecc_strength_good(mtd))
5214*4882a593Smuzhiyun 		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
5215*4882a593Smuzhiyun 			mtd->name);
5216*4882a593Smuzhiyun 
5217*4882a593Smuzhiyun 	/*
5218*4882a593Smuzhiyun 	 * Set the number of read / write steps for one page depending on ECC
5219*4882a593Smuzhiyun 	 * mode.
5220*4882a593Smuzhiyun 	 */
5221*4882a593Smuzhiyun 	ecc->steps = mtd->writesize / ecc->size;
5222*4882a593Smuzhiyun 	if (ecc->steps * ecc->size != mtd->writesize) {
5223*4882a593Smuzhiyun 		pr_warn("Invalid ECC parameters\n");
5224*4882a593Smuzhiyun 		BUG();
5225*4882a593Smuzhiyun 	}
5226*4882a593Smuzhiyun 	ecc->total = ecc->steps * ecc->bytes;
5227*4882a593Smuzhiyun 
5228*4882a593Smuzhiyun 	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5229*4882a593Smuzhiyun 	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5230*4882a593Smuzhiyun 		switch (ecc->steps) {
5231*4882a593Smuzhiyun 		case 2:
5232*4882a593Smuzhiyun 			mtd->subpage_sft = 1;
5233*4882a593Smuzhiyun 			break;
5234*4882a593Smuzhiyun 		case 4:
5235*4882a593Smuzhiyun 		case 8:
5236*4882a593Smuzhiyun 		case 16:
5237*4882a593Smuzhiyun 			mtd->subpage_sft = 2;
5238*4882a593Smuzhiyun 			break;
5239*4882a593Smuzhiyun 		}
5240*4882a593Smuzhiyun 	}
5241*4882a593Smuzhiyun 	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5242*4882a593Smuzhiyun 
5243*4882a593Smuzhiyun 	/* Initialize state */
5244*4882a593Smuzhiyun 	chip->state = FL_READY;
5245*4882a593Smuzhiyun 
5246*4882a593Smuzhiyun 	/* Invalidate the pagebuffer reference */
5247*4882a593Smuzhiyun 	chip->pagebuf = -1;
5248*4882a593Smuzhiyun 
5249*4882a593Smuzhiyun 	/* Large page NAND with SOFT_ECC should support subpage reads */
5250*4882a593Smuzhiyun 	switch (ecc->mode) {
5251*4882a593Smuzhiyun 	case NAND_ECC_SOFT:
5252*4882a593Smuzhiyun 	case NAND_ECC_SOFT_BCH:
5253*4882a593Smuzhiyun 		if (chip->page_shift > 9)
5254*4882a593Smuzhiyun 			chip->options |= NAND_SUBPAGE_READ;
5255*4882a593Smuzhiyun 		break;
5256*4882a593Smuzhiyun 
5257*4882a593Smuzhiyun 	default:
5258*4882a593Smuzhiyun 		break;
5259*4882a593Smuzhiyun 	}
5260*4882a593Smuzhiyun 
5261*4882a593Smuzhiyun 	/* Fill in remaining MTD driver data */
5262*4882a593Smuzhiyun 	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
5263*4882a593Smuzhiyun 	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
5264*4882a593Smuzhiyun 						MTD_CAP_NANDFLASH;
5265*4882a593Smuzhiyun 	mtd->_erase = nand_erase;
5266*4882a593Smuzhiyun 	mtd->_panic_write = panic_nand_write;
5267*4882a593Smuzhiyun 	mtd->_read_oob = nand_read_oob;
5268*4882a593Smuzhiyun 	mtd->_write_oob = nand_write_oob;
5269*4882a593Smuzhiyun 	mtd->_sync = nand_sync;
5270*4882a593Smuzhiyun 	mtd->_lock = NULL;
5271*4882a593Smuzhiyun 	mtd->_unlock = NULL;
5272*4882a593Smuzhiyun 	mtd->_block_isreserved = nand_block_isreserved;
5273*4882a593Smuzhiyun 	mtd->_block_isbad = nand_block_isbad;
5274*4882a593Smuzhiyun 	mtd->_block_markbad = nand_block_markbad;
5275*4882a593Smuzhiyun 	mtd->writebufsize = mtd->writesize;
5276*4882a593Smuzhiyun 
5277*4882a593Smuzhiyun 	/* propagate ecc info to mtd_info */
5278*4882a593Smuzhiyun 	mtd->ecclayout = ecc->layout;
5279*4882a593Smuzhiyun 	mtd->ecc_strength = ecc->strength;
5280*4882a593Smuzhiyun 	mtd->ecc_step_size = ecc->size;
5281*4882a593Smuzhiyun 	/*
5282*4882a593Smuzhiyun 	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
5283*4882a593Smuzhiyun 	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5284*4882a593Smuzhiyun 	 * properly set.
5285*4882a593Smuzhiyun 	 */
5286*4882a593Smuzhiyun 	if (!mtd->bitflip_threshold)
5287*4882a593Smuzhiyun 		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5288*4882a593Smuzhiyun 
5289*4882a593Smuzhiyun 	return 0;
5290*4882a593Smuzhiyun }
5291*4882a593Smuzhiyun EXPORT_SYMBOL(nand_scan_tail);
5292*4882a593Smuzhiyun 
5293*4882a593Smuzhiyun /**
5294*4882a593Smuzhiyun  * nand_scan - [NAND Interface] Scan for the NAND device
5295*4882a593Smuzhiyun  * @mtd: MTD device structure
5296*4882a593Smuzhiyun  * @maxchips: number of chips to scan for
5297*4882a593Smuzhiyun  *
5298*4882a593Smuzhiyun  * This fills out all the uninitialized function pointers with the defaults.
5299*4882a593Smuzhiyun  * The flash ID is read and the mtd/chip structures are filled with the
5300*4882a593Smuzhiyun  * appropriate values.
5301*4882a593Smuzhiyun  */
nand_scan(struct mtd_info * mtd,int maxchips)5302*4882a593Smuzhiyun int nand_scan(struct mtd_info *mtd, int maxchips)
5303*4882a593Smuzhiyun {
5304*4882a593Smuzhiyun 	int ret;
5305*4882a593Smuzhiyun 
5306*4882a593Smuzhiyun 	ret = nand_scan_ident(mtd, maxchips, NULL);
5307*4882a593Smuzhiyun 	if (!ret)
5308*4882a593Smuzhiyun 		ret = nand_scan_tail(mtd);
5309*4882a593Smuzhiyun 	return ret;
5310*4882a593Smuzhiyun }
5311*4882a593Smuzhiyun EXPORT_SYMBOL(nand_scan);
5312*4882a593Smuzhiyun 
5313*4882a593Smuzhiyun MODULE_LICENSE("GPL");
5314*4882a593Smuzhiyun MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5315*4882a593Smuzhiyun MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5316*4882a593Smuzhiyun MODULE_DESCRIPTION("Generic NAND flash driver code");
5317