1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * LPC32xx MLC NAND flash controller driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * (C) Copyright 2014 3ADEV <http://3adev.com>
5*4882a593Smuzhiyun * Written by Albert ARIBAUD <albert.aribaud@3adev.fr>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * NOTE:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The MLC NAND flash controller provides hardware Reed-Solomon ECC
12*4882a593Smuzhiyun * covering in- and out-of-band data together. Therefore, in- and out-
13*4882a593Smuzhiyun * of-band data must be written together in order to have a valid ECC.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Consequently, pages with meaningful in-band data are written with
16*4882a593Smuzhiyun * blank (all-ones) out-of-band data and a valid ECC, and any later
17*4882a593Smuzhiyun * out-of-band data write will void the ECC.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Therefore, code which reads such late-written out-of-band data
20*4882a593Smuzhiyun * should not rely on the ECC validity.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <common.h>
24*4882a593Smuzhiyun #include <nand.h>
25*4882a593Smuzhiyun #include <linux/errno.h>
26*4882a593Smuzhiyun #include <asm/io.h>
27*4882a593Smuzhiyun #include <nand.h>
28*4882a593Smuzhiyun #include <asm/arch/clk.h>
29*4882a593Smuzhiyun #include <asm/arch/sys_proto.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun * MLC NAND controller registers.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun struct lpc32xx_nand_mlc_registers {
35*4882a593Smuzhiyun u8 buff[32768]; /* controller's serial data buffer */
36*4882a593Smuzhiyun u8 data[32768]; /* NAND's raw data buffer */
37*4882a593Smuzhiyun u32 cmd;
38*4882a593Smuzhiyun u32 addr;
39*4882a593Smuzhiyun u32 ecc_enc_reg;
40*4882a593Smuzhiyun u32 ecc_dec_reg;
41*4882a593Smuzhiyun u32 ecc_auto_enc_reg;
42*4882a593Smuzhiyun u32 ecc_auto_dec_reg;
43*4882a593Smuzhiyun u32 rpr;
44*4882a593Smuzhiyun u32 wpr;
45*4882a593Smuzhiyun u32 rubp;
46*4882a593Smuzhiyun u32 robp;
47*4882a593Smuzhiyun u32 sw_wp_add_low;
48*4882a593Smuzhiyun u32 sw_wp_add_hig;
49*4882a593Smuzhiyun u32 icr;
50*4882a593Smuzhiyun u32 time_reg;
51*4882a593Smuzhiyun u32 irq_mr;
52*4882a593Smuzhiyun u32 irq_sr;
53*4882a593Smuzhiyun u32 lock_pr;
54*4882a593Smuzhiyun u32 isr;
55*4882a593Smuzhiyun u32 ceh;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* LOCK_PR register defines */
59*4882a593Smuzhiyun #define LOCK_PR_UNLOCK_KEY 0x0000A25E /* Magic unlock value */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* ICR defines */
62*4882a593Smuzhiyun #define ICR_LARGE_BLOCKS 0x00000004 /* configure for 2KB blocks */
63*4882a593Smuzhiyun #define ICR_ADDR4 0x00000002 /* configure for 4-word addrs */
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* CEH defines */
66*4882a593Smuzhiyun #define CEH_NORMAL_CE 0x00000001 /* do not force CE ON */
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* ISR register defines */
69*4882a593Smuzhiyun #define ISR_NAND_READY 0x00000001
70*4882a593Smuzhiyun #define ISR_CONTROLLER_READY 0x00000002
71*4882a593Smuzhiyun #define ISR_ECC_READY 0x00000004
72*4882a593Smuzhiyun #define ISR_DECODER_ERRORS(s) ((((s) >> 4) & 3)+1)
73*4882a593Smuzhiyun #define ISR_DECODER_FAILURE 0x00000040
74*4882a593Smuzhiyun #define ISR_DECODER_ERROR 0x00000008
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* time-out for NAND chip / controller loops, in us */
77*4882a593Smuzhiyun #define LPC32X_NAND_TIMEOUT 5000
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * There is a single instance of the NAND MLC controller
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static struct lpc32xx_nand_mlc_registers __iomem *lpc32xx_nand_mlc_registers
84*4882a593Smuzhiyun = (struct lpc32xx_nand_mlc_registers __iomem *)MLC_NAND_BASE;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #if !defined(CONFIG_SYS_MAX_NAND_CHIPS)
87*4882a593Smuzhiyun #define CONFIG_SYS_MAX_NAND_CHIPS 1
88*4882a593Smuzhiyun #endif
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define clkdiv(v, w, o) (((1+(clk/v)) & w) << o)
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun * OOB data in each small page are 6 'free' then 10 ECC bytes.
94*4882a593Smuzhiyun * To make things easier, when reading large pages, the four pages'
95*4882a593Smuzhiyun * 'free' OOB bytes are grouped in the first 24 bytes of the OOB buffer,
96*4882a593Smuzhiyun * while the the four ECC bytes are groupe in its last 40 bytes.
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * The struct below represents how free vs ecc oob bytes are stored
99*4882a593Smuzhiyun * in the buffer.
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun * Note: the OOB bytes contain the bad block marker at offsets 0 and 1.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct lpc32xx_oob {
105*4882a593Smuzhiyun struct {
106*4882a593Smuzhiyun uint8_t free_oob_bytes[6];
107*4882a593Smuzhiyun } free[4];
108*4882a593Smuzhiyun struct {
109*4882a593Smuzhiyun uint8_t ecc_oob_bytes[10];
110*4882a593Smuzhiyun } ecc[4];
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * Initialize the controller
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun
lpc32xx_nand_init(void)117*4882a593Smuzhiyun static void lpc32xx_nand_init(void)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun unsigned int clk;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Configure controller for no software write protection, x8 bus
122*4882a593Smuzhiyun width, large block device, and 4 address words */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* unlock controller registers with magic key */
125*4882a593Smuzhiyun writel(LOCK_PR_UNLOCK_KEY,
126*4882a593Smuzhiyun &lpc32xx_nand_mlc_registers->lock_pr);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* enable large blocks and large NANDs */
129*4882a593Smuzhiyun writel(ICR_LARGE_BLOCKS | ICR_ADDR4,
130*4882a593Smuzhiyun &lpc32xx_nand_mlc_registers->icr);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* Make sure MLC interrupts are disabled */
133*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->irq_mr);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* Normal chip enable operation */
136*4882a593Smuzhiyun writel(CEH_NORMAL_CE,
137*4882a593Smuzhiyun &lpc32xx_nand_mlc_registers->ceh);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Setup NAND timing */
140*4882a593Smuzhiyun clk = get_hclk_clk_rate();
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun writel(
143*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_TCEA_DELAY, 0x03, 24) |
144*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_BUSY_DELAY, 0x1F, 19) |
145*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_NAND_TA, 0x07, 16) |
146*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_RD_HIGH, 0x0F, 12) |
147*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_RD_LOW, 0x0F, 8) |
148*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_WR_HIGH, 0x0F, 4) |
149*4882a593Smuzhiyun clkdiv(CONFIG_LPC32XX_NAND_MLC_WR_LOW, 0x0F, 0),
150*4882a593Smuzhiyun &lpc32xx_nand_mlc_registers->time_reg);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun #if !defined(CONFIG_SPL_BUILD)
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * lpc32xx_cmd_ctrl - write command to either cmd or data register
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun
lpc32xx_cmd_ctrl(struct mtd_info * mtd,int cmd,unsigned int ctrl)159*4882a593Smuzhiyun static void lpc32xx_cmd_ctrl(struct mtd_info *mtd, int cmd,
160*4882a593Smuzhiyun unsigned int ctrl)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun if (cmd == NAND_CMD_NONE)
163*4882a593Smuzhiyun return;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun if (ctrl & NAND_CLE)
166*4882a593Smuzhiyun writeb(cmd & 0Xff, &lpc32xx_nand_mlc_registers->cmd);
167*4882a593Smuzhiyun else if (ctrl & NAND_ALE)
168*4882a593Smuzhiyun writeb(cmd & 0Xff, &lpc32xx_nand_mlc_registers->addr);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /**
172*4882a593Smuzhiyun * lpc32xx_read_byte - read a byte from the NAND
173*4882a593Smuzhiyun * @mtd: MTD device structure
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun
lpc32xx_read_byte(struct mtd_info * mtd)176*4882a593Smuzhiyun static uint8_t lpc32xx_read_byte(struct mtd_info *mtd)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun return readb(&lpc32xx_nand_mlc_registers->data);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * lpc32xx_dev_ready - test if NAND device (actually controller) is ready
183*4882a593Smuzhiyun * @mtd: MTD device structure
184*4882a593Smuzhiyun * @mode: mode to set the ECC HW to.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun
lpc32xx_dev_ready(struct mtd_info * mtd)187*4882a593Smuzhiyun static int lpc32xx_dev_ready(struct mtd_info *mtd)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun /* means *controller* ready for us */
190*4882a593Smuzhiyun int status = readl(&lpc32xx_nand_mlc_registers->isr);
191*4882a593Smuzhiyun return status & ISR_CONTROLLER_READY;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun * ECC layout -- this is needed whatever ECC mode we are using.
196*4882a593Smuzhiyun * In a 2KB (4*512B) page, R/S codes occupy 40 (4*10) bytes.
197*4882a593Smuzhiyun * To make U-Boot's life easier, we pack 'useable' OOB at the
198*4882a593Smuzhiyun * front and R/S ECC at the back.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun static struct nand_ecclayout lpc32xx_largepage_ecclayout = {
202*4882a593Smuzhiyun .eccbytes = 40,
203*4882a593Smuzhiyun .eccpos = {24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
204*4882a593Smuzhiyun 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
205*4882a593Smuzhiyun 44, 45, 46, 47, 48, 48, 50, 51, 52, 53,
206*4882a593Smuzhiyun 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
207*4882a593Smuzhiyun },
208*4882a593Smuzhiyun .oobfree = {
209*4882a593Smuzhiyun /* bytes 0 and 1 are used for the bad block marker */
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun .offset = 2,
212*4882a593Smuzhiyun .length = 22
213*4882a593Smuzhiyun },
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun * lpc32xx_read_page_hwecc - read in- and out-of-band data with ECC
219*4882a593Smuzhiyun * @mtd: mtd info structure
220*4882a593Smuzhiyun * @chip: nand chip info structure
221*4882a593Smuzhiyun * @buf: buffer to store read data
222*4882a593Smuzhiyun * @oob_required: caller requires OOB data read to chip->oob_poi
223*4882a593Smuzhiyun * @page: page number to read
224*4882a593Smuzhiyun *
225*4882a593Smuzhiyun * Use large block Auto Decode Read Mode(1) as described in User Manual
226*4882a593Smuzhiyun * section 8.6.2.1.
227*4882a593Smuzhiyun *
228*4882a593Smuzhiyun * The initial Read Mode and Read Start commands are sent by the caller.
229*4882a593Smuzhiyun *
230*4882a593Smuzhiyun * ECC will be false if out-of-band data has been updated since in-band
231*4882a593Smuzhiyun * data was initially written.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun
lpc32xx_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)234*4882a593Smuzhiyun static int lpc32xx_read_page_hwecc(struct mtd_info *mtd,
235*4882a593Smuzhiyun struct nand_chip *chip, uint8_t *buf, int oob_required,
236*4882a593Smuzhiyun int page)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun unsigned int i, status, timeout, err, max_bitflips = 0;
239*4882a593Smuzhiyun struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* go through all four small pages */
242*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
243*4882a593Smuzhiyun /* start auto decode (reads 528 NAND bytes) */
244*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_dec_reg);
245*4882a593Smuzhiyun /* wait for controller to return to ready state */
246*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
247*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
248*4882a593Smuzhiyun if (status & ISR_CONTROLLER_READY)
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun udelay(1);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun /* if decoder failed, return failure */
253*4882a593Smuzhiyun if (status & ISR_DECODER_FAILURE)
254*4882a593Smuzhiyun return -1;
255*4882a593Smuzhiyun /* keep count of maximum bitflips performed */
256*4882a593Smuzhiyun if (status & ISR_DECODER_ERROR) {
257*4882a593Smuzhiyun err = ISR_DECODER_ERRORS(status);
258*4882a593Smuzhiyun if (err > max_bitflips)
259*4882a593Smuzhiyun max_bitflips = err;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun /* copy first 512 bytes into buffer */
262*4882a593Smuzhiyun memcpy(buf+512*i, lpc32xx_nand_mlc_registers->buff, 512);
263*4882a593Smuzhiyun /* copy next 6 bytes at front of OOB buffer */
264*4882a593Smuzhiyun memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->buff, 6);
265*4882a593Smuzhiyun /* copy last 10 bytes (R/S ECC) at back of OOB buffer */
266*4882a593Smuzhiyun memcpy(&oob->ecc[i], lpc32xx_nand_mlc_registers->buff, 10);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun return max_bitflips;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * lpc32xx_read_page_raw - read raw (in-band, out-of-band and ECC) data
273*4882a593Smuzhiyun * @mtd: mtd info structure
274*4882a593Smuzhiyun * @chip: nand chip info structure
275*4882a593Smuzhiyun * @buf: buffer to store read data
276*4882a593Smuzhiyun * @oob_required: caller requires OOB data read to chip->oob_poi
277*4882a593Smuzhiyun * @page: page number to read
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun * Read NAND directly; can read pages with invalid ECC.
280*4882a593Smuzhiyun */
281*4882a593Smuzhiyun
lpc32xx_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)282*4882a593Smuzhiyun static int lpc32xx_read_page_raw(struct mtd_info *mtd,
283*4882a593Smuzhiyun struct nand_chip *chip, uint8_t *buf, int oob_required,
284*4882a593Smuzhiyun int page)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun unsigned int i, status, timeout;
287*4882a593Smuzhiyun struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* when we get here we've already had the Read Mode(1) */
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /* go through all four small pages */
292*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
293*4882a593Smuzhiyun /* wait for NAND to return to ready state */
294*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
295*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
296*4882a593Smuzhiyun if (status & ISR_NAND_READY)
297*4882a593Smuzhiyun break;
298*4882a593Smuzhiyun udelay(1);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun /* if NAND stalled, return failure */
301*4882a593Smuzhiyun if (!(status & ISR_NAND_READY))
302*4882a593Smuzhiyun return -1;
303*4882a593Smuzhiyun /* copy first 512 bytes into buffer */
304*4882a593Smuzhiyun memcpy(buf+512*i, lpc32xx_nand_mlc_registers->data, 512);
305*4882a593Smuzhiyun /* copy next 6 bytes at front of OOB buffer */
306*4882a593Smuzhiyun memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->data, 6);
307*4882a593Smuzhiyun /* copy last 10 bytes (R/S ECC) at back of OOB buffer */
308*4882a593Smuzhiyun memcpy(&oob->ecc[i], lpc32xx_nand_mlc_registers->data, 10);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /**
314*4882a593Smuzhiyun * lpc32xx_read_oob - read out-of-band data
315*4882a593Smuzhiyun * @mtd: mtd info structure
316*4882a593Smuzhiyun * @chip: nand chip info structure
317*4882a593Smuzhiyun * @page: page number to read
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * Read out-of-band data. User Manual section 8.6.4 suggests using Read
320*4882a593Smuzhiyun * Mode(3) which the controller will turn into a Read Mode(1) internally
321*4882a593Smuzhiyun * but nand_base.c will turn Mode(3) into Mode(0), so let's use Mode(0)
322*4882a593Smuzhiyun * directly.
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun * ECC covers in- and out-of-band data and was written when out-of-band
325*4882a593Smuzhiyun * data was blank. Therefore, if the out-of-band being read here is not
326*4882a593Smuzhiyun * blank, then the ECC will be false and the read will return bitflips,
327*4882a593Smuzhiyun * even in case of ECC failure where we will return 5 bitflips. The
328*4882a593Smuzhiyun * caller should be prepared to handle this.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun
lpc32xx_read_oob(struct mtd_info * mtd,struct nand_chip * chip,int page)331*4882a593Smuzhiyun static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
332*4882a593Smuzhiyun int page)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun unsigned int i, status, timeout, err, max_bitflips = 0;
335*4882a593Smuzhiyun struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* No command was sent before calling read_oob() so send one */
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* go through all four small pages */
342*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
343*4882a593Smuzhiyun /* start auto decode (reads 528 NAND bytes) */
344*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_dec_reg);
345*4882a593Smuzhiyun /* wait for controller to return to ready state */
346*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
347*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
348*4882a593Smuzhiyun if (status & ISR_CONTROLLER_READY)
349*4882a593Smuzhiyun break;
350*4882a593Smuzhiyun udelay(1);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun /* if decoder failure, count 'one too many' bitflips */
353*4882a593Smuzhiyun if (status & ISR_DECODER_FAILURE)
354*4882a593Smuzhiyun max_bitflips = 5;
355*4882a593Smuzhiyun /* keep count of maximum bitflips performed */
356*4882a593Smuzhiyun if (status & ISR_DECODER_ERROR) {
357*4882a593Smuzhiyun err = ISR_DECODER_ERRORS(status);
358*4882a593Smuzhiyun if (err > max_bitflips)
359*4882a593Smuzhiyun max_bitflips = err;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun /* set read pointer to OOB area */
362*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->robp);
363*4882a593Smuzhiyun /* copy next 6 bytes at front of OOB buffer */
364*4882a593Smuzhiyun memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->buff, 6);
365*4882a593Smuzhiyun /* copy next 10 bytes (R/S ECC) at back of OOB buffer */
366*4882a593Smuzhiyun memcpy(&oob->ecc[i], lpc32xx_nand_mlc_registers->buff, 10);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun return max_bitflips;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun * lpc32xx_write_page_hwecc - write in- and out-of-band data with ECC
373*4882a593Smuzhiyun * @mtd: mtd info structure
374*4882a593Smuzhiyun * @chip: nand chip info structure
375*4882a593Smuzhiyun * @buf: data buffer
376*4882a593Smuzhiyun * @oob_required: must write chip->oob_poi to OOB
377*4882a593Smuzhiyun *
378*4882a593Smuzhiyun * Use large block Auto Encode as per User Manual section 8.6.4.
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * The initial Write Serial Input and final Auto Program commands are
381*4882a593Smuzhiyun * sent by the caller.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun
lpc32xx_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)384*4882a593Smuzhiyun static int lpc32xx_write_page_hwecc(struct mtd_info *mtd,
385*4882a593Smuzhiyun struct nand_chip *chip, const uint8_t *buf, int oob_required,
386*4882a593Smuzhiyun int page)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun unsigned int i, status, timeout;
389*4882a593Smuzhiyun struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* when we get here we've already had the SEQIN */
392*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
393*4882a593Smuzhiyun /* start encode (expects 518 writes to buff) */
394*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->ecc_enc_reg);
395*4882a593Smuzhiyun /* copy first 512 bytes from buffer */
396*4882a593Smuzhiyun memcpy(&lpc32xx_nand_mlc_registers->buff, buf+512*i, 512);
397*4882a593Smuzhiyun /* copy next 6 bytes from OOB buffer -- excluding ECC */
398*4882a593Smuzhiyun memcpy(&lpc32xx_nand_mlc_registers->buff, &oob->free[i], 6);
399*4882a593Smuzhiyun /* wait for ECC to return to ready state */
400*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
401*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
402*4882a593Smuzhiyun if (status & ISR_ECC_READY)
403*4882a593Smuzhiyun break;
404*4882a593Smuzhiyun udelay(1);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun /* if ECC stalled, return failure */
407*4882a593Smuzhiyun if (!(status & ISR_ECC_READY))
408*4882a593Smuzhiyun return -1;
409*4882a593Smuzhiyun /* Trigger auto encode (writes 528 bytes to NAND) */
410*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_enc_reg);
411*4882a593Smuzhiyun /* wait for controller to return to ready state */
412*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
413*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
414*4882a593Smuzhiyun if (status & ISR_CONTROLLER_READY)
415*4882a593Smuzhiyun break;
416*4882a593Smuzhiyun udelay(1);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun /* if controller stalled, return error */
419*4882a593Smuzhiyun if (!(status & ISR_CONTROLLER_READY))
420*4882a593Smuzhiyun return -1;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun return 0;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun * lpc32xx_write_page_raw - write raw (in-band, out-of-band and ECC) data
427*4882a593Smuzhiyun * @mtd: mtd info structure
428*4882a593Smuzhiyun * @chip: nand chip info structure
429*4882a593Smuzhiyun * @buf: buffer to store read data
430*4882a593Smuzhiyun * @oob_required: caller requires OOB data read to chip->oob_poi
431*4882a593Smuzhiyun * @page: page number to read
432*4882a593Smuzhiyun *
433*4882a593Smuzhiyun * Use large block write but without encode.
434*4882a593Smuzhiyun *
435*4882a593Smuzhiyun * The initial Write Serial Input and final Auto Program commands are
436*4882a593Smuzhiyun * sent by the caller.
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * This function will write the full out-of-band data, including the
439*4882a593Smuzhiyun * ECC area. Therefore, it can write pages with valid *or* invalid ECC.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun
lpc32xx_write_page_raw(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)442*4882a593Smuzhiyun static int lpc32xx_write_page_raw(struct mtd_info *mtd,
443*4882a593Smuzhiyun struct nand_chip *chip, const uint8_t *buf, int oob_required,
444*4882a593Smuzhiyun int page)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun unsigned int i;
447*4882a593Smuzhiyun struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun /* when we get here we've already had the Read Mode(1) */
450*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
451*4882a593Smuzhiyun /* copy first 512 bytes from buffer */
452*4882a593Smuzhiyun memcpy(lpc32xx_nand_mlc_registers->buff, buf+512*i, 512);
453*4882a593Smuzhiyun /* copy next 6 bytes into OOB buffer -- excluding ECC */
454*4882a593Smuzhiyun memcpy(lpc32xx_nand_mlc_registers->buff, &oob->free[i], 6);
455*4882a593Smuzhiyun /* copy next 10 bytes into OOB buffer -- that is 'ECC' */
456*4882a593Smuzhiyun memcpy(lpc32xx_nand_mlc_registers->buff, &oob->ecc[i], 10);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /**
462*4882a593Smuzhiyun * lpc32xx_write_oob - write out-of-band data
463*4882a593Smuzhiyun * @mtd: mtd info structure
464*4882a593Smuzhiyun * @chip: nand chip info structure
465*4882a593Smuzhiyun * @page: page number to read
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * Since ECC covers in- and out-of-band data, writing out-of-band data
468*4882a593Smuzhiyun * with ECC will render the page ECC wrong -- or, if the page was blank,
469*4882a593Smuzhiyun * then it will produce a good ECC but a later in-band data write will
470*4882a593Smuzhiyun * render it wrong.
471*4882a593Smuzhiyun *
472*4882a593Smuzhiyun * Therefore, do not compute or write any ECC, and always return success.
473*4882a593Smuzhiyun *
474*4882a593Smuzhiyun * This implies that we do four writes, since non-ECC out-of-band data
475*4882a593Smuzhiyun * are not contiguous in a large page.
476*4882a593Smuzhiyun */
477*4882a593Smuzhiyun
lpc32xx_write_oob(struct mtd_info * mtd,struct nand_chip * chip,int page)478*4882a593Smuzhiyun static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
479*4882a593Smuzhiyun int page)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun /* update oob on all 4 subpages in sequence */
482*4882a593Smuzhiyun unsigned int i, status, timeout;
483*4882a593Smuzhiyun struct lpc32xx_oob *oob = (struct lpc32xx_oob *)chip->oob_poi;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
486*4882a593Smuzhiyun /* start data input */
487*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x200+0x210*i, page);
488*4882a593Smuzhiyun /* copy 6 non-ECC out-of-band bytes directly into NAND */
489*4882a593Smuzhiyun memcpy(lpc32xx_nand_mlc_registers->data, &oob->free[i], 6);
490*4882a593Smuzhiyun /* program page */
491*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
492*4882a593Smuzhiyun /* wait for NAND to return to ready state */
493*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
494*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
495*4882a593Smuzhiyun if (status & ISR_NAND_READY)
496*4882a593Smuzhiyun break;
497*4882a593Smuzhiyun udelay(1);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun /* if NAND stalled, return error */
500*4882a593Smuzhiyun if (!(status & ISR_NAND_READY))
501*4882a593Smuzhiyun return -1;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /**
507*4882a593Smuzhiyun * lpc32xx_waitfunc - wait until a command is done
508*4882a593Smuzhiyun * @mtd: MTD device structure
509*4882a593Smuzhiyun * @chip: NAND chip structure
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * Wait for controller and FLASH to both be ready.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun
lpc32xx_waitfunc(struct mtd_info * mtd,struct nand_chip * chip)514*4882a593Smuzhiyun static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun int status;
517*4882a593Smuzhiyun unsigned int timeout;
518*4882a593Smuzhiyun /* wait until both controller and NAND are ready */
519*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
520*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
521*4882a593Smuzhiyun if ((status & (ISR_CONTROLLER_READY || ISR_NAND_READY))
522*4882a593Smuzhiyun == (ISR_CONTROLLER_READY || ISR_NAND_READY))
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun udelay(1);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun /* if controller or NAND stalled, return error */
527*4882a593Smuzhiyun if ((status & (ISR_CONTROLLER_READY || ISR_NAND_READY))
528*4882a593Smuzhiyun != (ISR_CONTROLLER_READY || ISR_NAND_READY))
529*4882a593Smuzhiyun return -1;
530*4882a593Smuzhiyun /* write NAND status command */
531*4882a593Smuzhiyun writel(NAND_CMD_STATUS, &lpc32xx_nand_mlc_registers->cmd);
532*4882a593Smuzhiyun /* read back status and return it */
533*4882a593Smuzhiyun return readb(&lpc32xx_nand_mlc_registers->data);
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * We are self-initializing, so we need our own chip struct
538*4882a593Smuzhiyun */
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun static struct nand_chip lpc32xx_chip;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * Initialize the controller
544*4882a593Smuzhiyun */
545*4882a593Smuzhiyun
board_nand_init(void)546*4882a593Smuzhiyun void board_nand_init(void)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(&lpc32xx_chip);
549*4882a593Smuzhiyun int ret;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Set all BOARDSPECIFIC (actually core-specific) fields */
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun lpc32xx_chip.IO_ADDR_R = &lpc32xx_nand_mlc_registers->buff;
554*4882a593Smuzhiyun lpc32xx_chip.IO_ADDR_W = &lpc32xx_nand_mlc_registers->buff;
555*4882a593Smuzhiyun lpc32xx_chip.cmd_ctrl = lpc32xx_cmd_ctrl;
556*4882a593Smuzhiyun /* do not set init_size: nand_base.c will read sizes from chip */
557*4882a593Smuzhiyun lpc32xx_chip.dev_ready = lpc32xx_dev_ready;
558*4882a593Smuzhiyun /* do not set setup_read_retry: this is NAND-chip-specific */
559*4882a593Smuzhiyun /* do not set chip_delay: we have dev_ready defined. */
560*4882a593Smuzhiyun lpc32xx_chip.options |= NAND_NO_SUBPAGE_WRITE;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* Set needed ECC fields */
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun lpc32xx_chip.ecc.mode = NAND_ECC_HW;
565*4882a593Smuzhiyun lpc32xx_chip.ecc.layout = &lpc32xx_largepage_ecclayout;
566*4882a593Smuzhiyun lpc32xx_chip.ecc.size = 512;
567*4882a593Smuzhiyun lpc32xx_chip.ecc.bytes = 10;
568*4882a593Smuzhiyun lpc32xx_chip.ecc.strength = 4;
569*4882a593Smuzhiyun lpc32xx_chip.ecc.read_page = lpc32xx_read_page_hwecc;
570*4882a593Smuzhiyun lpc32xx_chip.ecc.read_page_raw = lpc32xx_read_page_raw;
571*4882a593Smuzhiyun lpc32xx_chip.ecc.write_page = lpc32xx_write_page_hwecc;
572*4882a593Smuzhiyun lpc32xx_chip.ecc.write_page_raw = lpc32xx_write_page_raw;
573*4882a593Smuzhiyun lpc32xx_chip.ecc.read_oob = lpc32xx_read_oob;
574*4882a593Smuzhiyun lpc32xx_chip.ecc.write_oob = lpc32xx_write_oob;
575*4882a593Smuzhiyun lpc32xx_chip.waitfunc = lpc32xx_waitfunc;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun lpc32xx_chip.read_byte = lpc32xx_read_byte; /* FIXME: NEEDED? */
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* BBT options: read from last two pages */
580*4882a593Smuzhiyun lpc32xx_chip.bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_LASTBLOCK
581*4882a593Smuzhiyun | NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE
582*4882a593Smuzhiyun | NAND_BBT_WRITE;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Initialize NAND interface */
585*4882a593Smuzhiyun lpc32xx_nand_init();
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun /* identify chip */
588*4882a593Smuzhiyun ret = nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_CHIPS, NULL);
589*4882a593Smuzhiyun if (ret) {
590*4882a593Smuzhiyun pr_err("nand_scan_ident returned %i", ret);
591*4882a593Smuzhiyun return;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* finish scanning the chip */
595*4882a593Smuzhiyun ret = nand_scan_tail(mtd);
596*4882a593Smuzhiyun if (ret) {
597*4882a593Smuzhiyun pr_err("nand_scan_tail returned %i", ret);
598*4882a593Smuzhiyun return;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /* chip is good, register it */
602*4882a593Smuzhiyun ret = nand_register(0, mtd);
603*4882a593Smuzhiyun if (ret)
604*4882a593Smuzhiyun pr_err("nand_register returned %i", ret);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun #else /* defined(CONFIG_SPL_BUILD) */
608*4882a593Smuzhiyun
nand_init(void)609*4882a593Smuzhiyun void nand_init(void)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun /* enable NAND controller */
612*4882a593Smuzhiyun lpc32xx_mlc_nand_init();
613*4882a593Smuzhiyun /* initialize NAND controller */
614*4882a593Smuzhiyun lpc32xx_nand_init();
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
nand_deselect(void)617*4882a593Smuzhiyun void nand_deselect(void)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun /* nothing to do, but SPL requires this function */
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
read_single_page(uint8_t * dest,int page,struct lpc32xx_oob * oob)622*4882a593Smuzhiyun static int read_single_page(uint8_t *dest, int page,
623*4882a593Smuzhiyun struct lpc32xx_oob *oob)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun int status, i, timeout, err, max_bitflips = 0;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* enter read mode */
628*4882a593Smuzhiyun writel(NAND_CMD_READ0, &lpc32xx_nand_mlc_registers->cmd);
629*4882a593Smuzhiyun /* send column (lsb then MSB) and page (lsb to MSB) */
630*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->addr);
631*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->addr);
632*4882a593Smuzhiyun writel(page & 0xff, &lpc32xx_nand_mlc_registers->addr);
633*4882a593Smuzhiyun writel((page>>8) & 0xff, &lpc32xx_nand_mlc_registers->addr);
634*4882a593Smuzhiyun writel((page>>16) & 0xff, &lpc32xx_nand_mlc_registers->addr);
635*4882a593Smuzhiyun /* start reading */
636*4882a593Smuzhiyun writel(NAND_CMD_READSTART, &lpc32xx_nand_mlc_registers->cmd);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun /* large page auto decode read */
639*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
640*4882a593Smuzhiyun /* start auto decode (reads 528 NAND bytes) */
641*4882a593Smuzhiyun writel(0, &lpc32xx_nand_mlc_registers->ecc_auto_dec_reg);
642*4882a593Smuzhiyun /* wait for controller to return to ready state */
643*4882a593Smuzhiyun for (timeout = LPC32X_NAND_TIMEOUT; timeout; timeout--) {
644*4882a593Smuzhiyun status = readl(&lpc32xx_nand_mlc_registers->isr);
645*4882a593Smuzhiyun if (status & ISR_CONTROLLER_READY)
646*4882a593Smuzhiyun break;
647*4882a593Smuzhiyun udelay(1);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun /* if controller stalled, return error */
650*4882a593Smuzhiyun if (!(status & ISR_CONTROLLER_READY))
651*4882a593Smuzhiyun return -1;
652*4882a593Smuzhiyun /* if decoder failure, return error */
653*4882a593Smuzhiyun if (status & ISR_DECODER_FAILURE)
654*4882a593Smuzhiyun return -1;
655*4882a593Smuzhiyun /* keep count of maximum bitflips performed */
656*4882a593Smuzhiyun if (status & ISR_DECODER_ERROR) {
657*4882a593Smuzhiyun err = ISR_DECODER_ERRORS(status);
658*4882a593Smuzhiyun if (err > max_bitflips)
659*4882a593Smuzhiyun max_bitflips = err;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun /* copy first 512 bytes into buffer */
662*4882a593Smuzhiyun memcpy(dest+i*512, lpc32xx_nand_mlc_registers->buff, 512);
663*4882a593Smuzhiyun /* copy next 6 bytes bytes into OOB buffer */
664*4882a593Smuzhiyun memcpy(&oob->free[i], lpc32xx_nand_mlc_registers->buff, 6);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun return max_bitflips;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun * Load U-Boot signed image.
671*4882a593Smuzhiyun * This loads an image from NAND, skipping bad blocks.
672*4882a593Smuzhiyun * A block is declared bad if at least one of its readable pages has
673*4882a593Smuzhiyun * a bad block marker in its OOB at position 0.
674*4882a593Smuzhiyun * If all pages ion a block are unreadable, the block is considered
675*4882a593Smuzhiyun * bad (i.e., assumed not to be part of the image) and skipped.
676*4882a593Smuzhiyun *
677*4882a593Smuzhiyun * IMPORTANT NOTE:
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * If the first block of the image is fully unreadable, it will be
680*4882a593Smuzhiyun * ignored and skipped as if it had been marked bad. If it was not
681*4882a593Smuzhiyun * actually marked bad at the time of writing the image, the resulting
682*4882a593Smuzhiyun * image loaded will lack a header and magic number. It could thus be
683*4882a593Smuzhiyun * considered as a raw, headerless, image and SPL might erroneously
684*4882a593Smuzhiyun * jump into it.
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * In order to avoid this risk, LPC32XX-based boards which use this
687*4882a593Smuzhiyun * driver MUST define CONFIG_SPL_PANIC_ON_RAW_IMAGE.
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun #define BYTES_PER_PAGE 2048
691*4882a593Smuzhiyun #define PAGES_PER_BLOCK 64
692*4882a593Smuzhiyun #define BYTES_PER_BLOCK (BYTES_PER_PAGE * PAGES_PER_BLOCK)
693*4882a593Smuzhiyun #define PAGES_PER_CHIP_MAX 524288
694*4882a593Smuzhiyun
nand_spl_load_image(uint32_t offs,unsigned int size,void * dst)695*4882a593Smuzhiyun int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun int bytes_left = size;
698*4882a593Smuzhiyun int pages_left = DIV_ROUND_UP(size, BYTES_PER_PAGE);
699*4882a593Smuzhiyun int blocks_left = DIV_ROUND_UP(size, BYTES_PER_BLOCK);
700*4882a593Smuzhiyun int block = 0;
701*4882a593Smuzhiyun int page = offs / BYTES_PER_PAGE;
702*4882a593Smuzhiyun /* perform reads block by block */
703*4882a593Smuzhiyun while (blocks_left) {
704*4882a593Smuzhiyun /* compute first page number to read */
705*4882a593Smuzhiyun void *block_page_dst = dst;
706*4882a593Smuzhiyun /* read at most one block, possibly less */
707*4882a593Smuzhiyun int block_bytes_left = bytes_left;
708*4882a593Smuzhiyun if (block_bytes_left > BYTES_PER_BLOCK)
709*4882a593Smuzhiyun block_bytes_left = BYTES_PER_BLOCK;
710*4882a593Smuzhiyun /* keep track of good, failed, and "bad" pages */
711*4882a593Smuzhiyun int block_pages_good = 0;
712*4882a593Smuzhiyun int block_pages_bad = 0;
713*4882a593Smuzhiyun int block_pages_err = 0;
714*4882a593Smuzhiyun /* we shall read a full block of pages, maybe less */
715*4882a593Smuzhiyun int block_pages_left = pages_left;
716*4882a593Smuzhiyun if (block_pages_left > PAGES_PER_BLOCK)
717*4882a593Smuzhiyun block_pages_left = PAGES_PER_BLOCK;
718*4882a593Smuzhiyun int block_pages = block_pages_left;
719*4882a593Smuzhiyun int block_page = page;
720*4882a593Smuzhiyun /* while pages are left and the block is not known as bad */
721*4882a593Smuzhiyun while ((block_pages > 0) && (block_pages_bad == 0)) {
722*4882a593Smuzhiyun /* we will read OOB, too, for bad block markers */
723*4882a593Smuzhiyun struct lpc32xx_oob oob;
724*4882a593Smuzhiyun /* read page */
725*4882a593Smuzhiyun int res = read_single_page(block_page_dst, block_page,
726*4882a593Smuzhiyun &oob);
727*4882a593Smuzhiyun /* count readable pages */
728*4882a593Smuzhiyun if (res >= 0) {
729*4882a593Smuzhiyun /* this page is good */
730*4882a593Smuzhiyun block_pages_good++;
731*4882a593Smuzhiyun /* this page is bad */
732*4882a593Smuzhiyun if ((oob.free[0].free_oob_bytes[0] != 0xff)
733*4882a593Smuzhiyun | (oob.free[0].free_oob_bytes[1] != 0xff))
734*4882a593Smuzhiyun block_pages_bad++;
735*4882a593Smuzhiyun } else
736*4882a593Smuzhiyun /* count errors */
737*4882a593Smuzhiyun block_pages_err++;
738*4882a593Smuzhiyun /* we're done with this page */
739*4882a593Smuzhiyun block_page++;
740*4882a593Smuzhiyun block_page_dst += BYTES_PER_PAGE;
741*4882a593Smuzhiyun if (block_pages)
742*4882a593Smuzhiyun block_pages--;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun /* a fully unreadable block is considered bad */
745*4882a593Smuzhiyun if (block_pages_good == 0)
746*4882a593Smuzhiyun block_pages_bad = block_pages_err;
747*4882a593Smuzhiyun /* errors are fatal only in good blocks */
748*4882a593Smuzhiyun if ((block_pages_err > 0) && (block_pages_bad == 0))
749*4882a593Smuzhiyun return -1;
750*4882a593Smuzhiyun /* we keep reads only of good blocks */
751*4882a593Smuzhiyun if (block_pages_bad == 0) {
752*4882a593Smuzhiyun dst += block_bytes_left;
753*4882a593Smuzhiyun bytes_left -= block_bytes_left;
754*4882a593Smuzhiyun pages_left -= block_pages_left;
755*4882a593Smuzhiyun blocks_left--;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun /* good or bad, we're done with this block */
758*4882a593Smuzhiyun block++;
759*4882a593Smuzhiyun page += PAGES_PER_BLOCK;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* report success */
763*4882a593Smuzhiyun return 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun #endif /* CONFIG_SPL_BUILD */
767