1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for NAND MLC Controller in LPC32xx
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Author: Roland Stigge <stigge@antcom.de>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright © 2011 WORK Microwave GmbH
8*4882a593Smuzhiyun * Copyright © 2011, 2012 Roland Stigge
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * NAND Flash Controller Operation:
11*4882a593Smuzhiyun * - Read: Auto Decode
12*4882a593Smuzhiyun * - Write: Auto Encode
13*4882a593Smuzhiyun * - Tested Page Sizes: 2048, 4096
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
20*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
21*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
22*4882a593Smuzhiyun #include <linux/clk.h>
23*4882a593Smuzhiyun #include <linux/err.h>
24*4882a593Smuzhiyun #include <linux/delay.h>
25*4882a593Smuzhiyun #include <linux/completion.h>
26*4882a593Smuzhiyun #include <linux/interrupt.h>
27*4882a593Smuzhiyun #include <linux/of.h>
28*4882a593Smuzhiyun #include <linux/of_gpio.h>
29*4882a593Smuzhiyun #include <linux/mtd/lpc32xx_mlc.h>
30*4882a593Smuzhiyun #include <linux/io.h>
31*4882a593Smuzhiyun #include <linux/mm.h>
32*4882a593Smuzhiyun #include <linux/dma-mapping.h>
33*4882a593Smuzhiyun #include <linux/dmaengine.h>
34*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define DRV_NAME "lpc32xx_mlc"
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /**********************************************************************
39*4882a593Smuzhiyun * MLC NAND controller register offsets
40*4882a593Smuzhiyun **********************************************************************/
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define MLC_BUFF(x) (x + 0x00000)
43*4882a593Smuzhiyun #define MLC_DATA(x) (x + 0x08000)
44*4882a593Smuzhiyun #define MLC_CMD(x) (x + 0x10000)
45*4882a593Smuzhiyun #define MLC_ADDR(x) (x + 0x10004)
46*4882a593Smuzhiyun #define MLC_ECC_ENC_REG(x) (x + 0x10008)
47*4882a593Smuzhiyun #define MLC_ECC_DEC_REG(x) (x + 0x1000C)
48*4882a593Smuzhiyun #define MLC_ECC_AUTO_ENC_REG(x) (x + 0x10010)
49*4882a593Smuzhiyun #define MLC_ECC_AUTO_DEC_REG(x) (x + 0x10014)
50*4882a593Smuzhiyun #define MLC_RPR(x) (x + 0x10018)
51*4882a593Smuzhiyun #define MLC_WPR(x) (x + 0x1001C)
52*4882a593Smuzhiyun #define MLC_RUBP(x) (x + 0x10020)
53*4882a593Smuzhiyun #define MLC_ROBP(x) (x + 0x10024)
54*4882a593Smuzhiyun #define MLC_SW_WP_ADD_LOW(x) (x + 0x10028)
55*4882a593Smuzhiyun #define MLC_SW_WP_ADD_HIG(x) (x + 0x1002C)
56*4882a593Smuzhiyun #define MLC_ICR(x) (x + 0x10030)
57*4882a593Smuzhiyun #define MLC_TIME_REG(x) (x + 0x10034)
58*4882a593Smuzhiyun #define MLC_IRQ_MR(x) (x + 0x10038)
59*4882a593Smuzhiyun #define MLC_IRQ_SR(x) (x + 0x1003C)
60*4882a593Smuzhiyun #define MLC_LOCK_PR(x) (x + 0x10044)
61*4882a593Smuzhiyun #define MLC_ISR(x) (x + 0x10048)
62*4882a593Smuzhiyun #define MLC_CEH(x) (x + 0x1004C)
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /**********************************************************************
65*4882a593Smuzhiyun * MLC_CMD bit definitions
66*4882a593Smuzhiyun **********************************************************************/
67*4882a593Smuzhiyun #define MLCCMD_RESET 0xFF
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /**********************************************************************
70*4882a593Smuzhiyun * MLC_ICR bit definitions
71*4882a593Smuzhiyun **********************************************************************/
72*4882a593Smuzhiyun #define MLCICR_WPROT (1 << 3)
73*4882a593Smuzhiyun #define MLCICR_LARGEBLOCK (1 << 2)
74*4882a593Smuzhiyun #define MLCICR_LONGADDR (1 << 1)
75*4882a593Smuzhiyun #define MLCICR_16BIT (1 << 0) /* unsupported by LPC32x0! */
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**********************************************************************
78*4882a593Smuzhiyun * MLC_TIME_REG bit definitions
79*4882a593Smuzhiyun **********************************************************************/
80*4882a593Smuzhiyun #define MLCTIMEREG_TCEA_DELAY(n) (((n) & 0x03) << 24)
81*4882a593Smuzhiyun #define MLCTIMEREG_BUSY_DELAY(n) (((n) & 0x1F) << 19)
82*4882a593Smuzhiyun #define MLCTIMEREG_NAND_TA(n) (((n) & 0x07) << 16)
83*4882a593Smuzhiyun #define MLCTIMEREG_RD_HIGH(n) (((n) & 0x0F) << 12)
84*4882a593Smuzhiyun #define MLCTIMEREG_RD_LOW(n) (((n) & 0x0F) << 8)
85*4882a593Smuzhiyun #define MLCTIMEREG_WR_HIGH(n) (((n) & 0x0F) << 4)
86*4882a593Smuzhiyun #define MLCTIMEREG_WR_LOW(n) (((n) & 0x0F) << 0)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /**********************************************************************
89*4882a593Smuzhiyun * MLC_IRQ_MR and MLC_IRQ_SR bit definitions
90*4882a593Smuzhiyun **********************************************************************/
91*4882a593Smuzhiyun #define MLCIRQ_NAND_READY (1 << 5)
92*4882a593Smuzhiyun #define MLCIRQ_CONTROLLER_READY (1 << 4)
93*4882a593Smuzhiyun #define MLCIRQ_DECODE_FAILURE (1 << 3)
94*4882a593Smuzhiyun #define MLCIRQ_DECODE_ERROR (1 << 2)
95*4882a593Smuzhiyun #define MLCIRQ_ECC_READY (1 << 1)
96*4882a593Smuzhiyun #define MLCIRQ_WRPROT_FAULT (1 << 0)
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /**********************************************************************
99*4882a593Smuzhiyun * MLC_LOCK_PR bit definitions
100*4882a593Smuzhiyun **********************************************************************/
101*4882a593Smuzhiyun #define MLCLOCKPR_MAGIC 0xA25E
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /**********************************************************************
104*4882a593Smuzhiyun * MLC_ISR bit definitions
105*4882a593Smuzhiyun **********************************************************************/
106*4882a593Smuzhiyun #define MLCISR_DECODER_FAILURE (1 << 6)
107*4882a593Smuzhiyun #define MLCISR_ERRORS ((1 << 4) | (1 << 5))
108*4882a593Smuzhiyun #define MLCISR_ERRORS_DETECTED (1 << 3)
109*4882a593Smuzhiyun #define MLCISR_ECC_READY (1 << 2)
110*4882a593Smuzhiyun #define MLCISR_CONTROLLER_READY (1 << 1)
111*4882a593Smuzhiyun #define MLCISR_NAND_READY (1 << 0)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /**********************************************************************
114*4882a593Smuzhiyun * MLC_CEH bit definitions
115*4882a593Smuzhiyun **********************************************************************/
116*4882a593Smuzhiyun #define MLCCEH_NORMAL (1 << 0)
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun struct lpc32xx_nand_cfg_mlc {
119*4882a593Smuzhiyun uint32_t tcea_delay;
120*4882a593Smuzhiyun uint32_t busy_delay;
121*4882a593Smuzhiyun uint32_t nand_ta;
122*4882a593Smuzhiyun uint32_t rd_high;
123*4882a593Smuzhiyun uint32_t rd_low;
124*4882a593Smuzhiyun uint32_t wr_high;
125*4882a593Smuzhiyun uint32_t wr_low;
126*4882a593Smuzhiyun int wp_gpio;
127*4882a593Smuzhiyun struct mtd_partition *parts;
128*4882a593Smuzhiyun unsigned num_parts;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
lpc32xx_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)131*4882a593Smuzhiyun static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
132*4882a593Smuzhiyun struct mtd_oob_region *oobregion)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun struct nand_chip *nand_chip = mtd_to_nand(mtd);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (section >= nand_chip->ecc.steps)
137*4882a593Smuzhiyun return -ERANGE;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
140*4882a593Smuzhiyun oobregion->length = nand_chip->ecc.bytes;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun return 0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
lpc32xx_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)145*4882a593Smuzhiyun static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
146*4882a593Smuzhiyun struct mtd_oob_region *oobregion)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct nand_chip *nand_chip = mtd_to_nand(mtd);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (section >= nand_chip->ecc.steps)
151*4882a593Smuzhiyun return -ERANGE;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun oobregion->offset = 16 * section;
154*4882a593Smuzhiyun oobregion->length = 16 - nand_chip->ecc.bytes;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return 0;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
160*4882a593Smuzhiyun .ecc = lpc32xx_ooblayout_ecc,
161*4882a593Smuzhiyun .free = lpc32xx_ooblayout_free,
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun static struct nand_bbt_descr lpc32xx_nand_bbt = {
165*4882a593Smuzhiyun .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
166*4882a593Smuzhiyun NAND_BBT_WRITE,
167*4882a593Smuzhiyun .pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
168*4882a593Smuzhiyun };
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
171*4882a593Smuzhiyun .options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
172*4882a593Smuzhiyun NAND_BBT_WRITE,
173*4882a593Smuzhiyun .pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun struct lpc32xx_nand_host {
177*4882a593Smuzhiyun struct platform_device *pdev;
178*4882a593Smuzhiyun struct nand_chip nand_chip;
179*4882a593Smuzhiyun struct lpc32xx_mlc_platform_data *pdata;
180*4882a593Smuzhiyun struct clk *clk;
181*4882a593Smuzhiyun void __iomem *io_base;
182*4882a593Smuzhiyun int irq;
183*4882a593Smuzhiyun struct lpc32xx_nand_cfg_mlc *ncfg;
184*4882a593Smuzhiyun struct completion comp_nand;
185*4882a593Smuzhiyun struct completion comp_controller;
186*4882a593Smuzhiyun uint32_t llptr;
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun dma_addr_t oob_buf_phy;
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun uint8_t *oob_buf;
195*4882a593Smuzhiyun /* Physical address of DMA base address */
196*4882a593Smuzhiyun dma_addr_t io_base_phy;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun struct completion comp_dma;
199*4882a593Smuzhiyun struct dma_chan *dma_chan;
200*4882a593Smuzhiyun struct dma_slave_config dma_slave_config;
201*4882a593Smuzhiyun struct scatterlist sgl;
202*4882a593Smuzhiyun uint8_t *dma_buf;
203*4882a593Smuzhiyun uint8_t *dummy_buf;
204*4882a593Smuzhiyun int mlcsubpages; /* number of 512bytes-subpages */
205*4882a593Smuzhiyun };
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * Activate/Deactivate DMA Operation:
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * Using the PL080 DMA Controller for transferring the 512 byte subpages
211*4882a593Smuzhiyun * instead of doing readl() / writel() in a loop slows it down significantly.
212*4882a593Smuzhiyun * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun * - readl() of 128 x 32 bits in a loop: ~20us
215*4882a593Smuzhiyun * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
216*4882a593Smuzhiyun * - DMA read of 512 bytes (32 bit, no bursts): ~100us
217*4882a593Smuzhiyun *
218*4882a593Smuzhiyun * This applies to the transfer itself. In the DMA case: only the
219*4882a593Smuzhiyun * wait_for_completion() (DMA setup _not_ included).
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * Note that the 512 bytes subpage transfer is done directly from/to a
222*4882a593Smuzhiyun * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
223*4882a593Smuzhiyun * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
224*4882a593Smuzhiyun * controller transferring data between its internal buffer to/from the NAND
225*4882a593Smuzhiyun * chip.)
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * Therefore, using the PL080 DMA is disabled by default, for now.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun static int use_dma;
231*4882a593Smuzhiyun
lpc32xx_nand_setup(struct lpc32xx_nand_host * host)232*4882a593Smuzhiyun static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun uint32_t clkrate, tmp;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /* Reset MLC controller */
237*4882a593Smuzhiyun writel(MLCCMD_RESET, MLC_CMD(host->io_base));
238*4882a593Smuzhiyun udelay(1000);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Get base clock for MLC block */
241*4882a593Smuzhiyun clkrate = clk_get_rate(host->clk);
242*4882a593Smuzhiyun if (clkrate == 0)
243*4882a593Smuzhiyun clkrate = 104000000;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /* Unlock MLC_ICR
246*4882a593Smuzhiyun * (among others, will be locked again automatically) */
247*4882a593Smuzhiyun writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Configure MLC Controller: Large Block, 5 Byte Address */
250*4882a593Smuzhiyun tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
251*4882a593Smuzhiyun writel(tmp, MLC_ICR(host->io_base));
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Unlock MLC_TIME_REG
254*4882a593Smuzhiyun * (among others, will be locked again automatically) */
255*4882a593Smuzhiyun writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Compute clock setup values, see LPC and NAND manual */
258*4882a593Smuzhiyun tmp = 0;
259*4882a593Smuzhiyun tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
260*4882a593Smuzhiyun tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
261*4882a593Smuzhiyun tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
262*4882a593Smuzhiyun tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
263*4882a593Smuzhiyun tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
264*4882a593Smuzhiyun tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
265*4882a593Smuzhiyun tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
266*4882a593Smuzhiyun writel(tmp, MLC_TIME_REG(host->io_base));
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Enable IRQ for CONTROLLER_READY and NAND_READY */
269*4882a593Smuzhiyun writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
270*4882a593Smuzhiyun MLC_IRQ_MR(host->io_base));
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Normal nCE operation: nCE controlled by controller */
273*4882a593Smuzhiyun writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Hardware specific access to control lines
278*4882a593Smuzhiyun */
lpc32xx_nand_cmd_ctrl(struct nand_chip * nand_chip,int cmd,unsigned int ctrl)279*4882a593Smuzhiyun static void lpc32xx_nand_cmd_ctrl(struct nand_chip *nand_chip, int cmd,
280*4882a593Smuzhiyun unsigned int ctrl)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if (cmd != NAND_CMD_NONE) {
285*4882a593Smuzhiyun if (ctrl & NAND_CLE)
286*4882a593Smuzhiyun writel(cmd, MLC_CMD(host->io_base));
287*4882a593Smuzhiyun else
288*4882a593Smuzhiyun writel(cmd, MLC_ADDR(host->io_base));
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /*
293*4882a593Smuzhiyun * Read Device Ready (NAND device _and_ controller ready)
294*4882a593Smuzhiyun */
lpc32xx_nand_device_ready(struct nand_chip * nand_chip)295*4882a593Smuzhiyun static int lpc32xx_nand_device_ready(struct nand_chip *nand_chip)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if ((readb(MLC_ISR(host->io_base)) &
300*4882a593Smuzhiyun (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
301*4882a593Smuzhiyun (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
302*4882a593Smuzhiyun return 1;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun return 0;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
lpc3xxx_nand_irq(int irq,struct lpc32xx_nand_host * host)307*4882a593Smuzhiyun static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun uint8_t sr;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Clear interrupt flag by reading status */
312*4882a593Smuzhiyun sr = readb(MLC_IRQ_SR(host->io_base));
313*4882a593Smuzhiyun if (sr & MLCIRQ_NAND_READY)
314*4882a593Smuzhiyun complete(&host->comp_nand);
315*4882a593Smuzhiyun if (sr & MLCIRQ_CONTROLLER_READY)
316*4882a593Smuzhiyun complete(&host->comp_controller);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun return IRQ_HANDLED;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
lpc32xx_waitfunc_nand(struct nand_chip * chip)321*4882a593Smuzhiyun static int lpc32xx_waitfunc_nand(struct nand_chip *chip)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
324*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
327*4882a593Smuzhiyun goto exit;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun wait_for_completion(&host->comp_nand);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
332*4882a593Smuzhiyun /* Seems to be delayed sometimes by controller */
333*4882a593Smuzhiyun dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
334*4882a593Smuzhiyun cpu_relax();
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun exit:
338*4882a593Smuzhiyun return NAND_STATUS_READY;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
lpc32xx_waitfunc_controller(struct nand_chip * chip)341*4882a593Smuzhiyun static int lpc32xx_waitfunc_controller(struct nand_chip *chip)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
344*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
347*4882a593Smuzhiyun goto exit;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun wait_for_completion(&host->comp_controller);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun while (!(readb(MLC_ISR(host->io_base)) &
352*4882a593Smuzhiyun MLCISR_CONTROLLER_READY)) {
353*4882a593Smuzhiyun dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
354*4882a593Smuzhiyun cpu_relax();
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun exit:
358*4882a593Smuzhiyun return NAND_STATUS_READY;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
lpc32xx_waitfunc(struct nand_chip * chip)361*4882a593Smuzhiyun static int lpc32xx_waitfunc(struct nand_chip *chip)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun lpc32xx_waitfunc_nand(chip);
364*4882a593Smuzhiyun lpc32xx_waitfunc_controller(chip);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return NAND_STATUS_READY;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Enable NAND write protect
371*4882a593Smuzhiyun */
lpc32xx_wp_enable(struct lpc32xx_nand_host * host)372*4882a593Smuzhiyun static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun if (gpio_is_valid(host->ncfg->wp_gpio))
375*4882a593Smuzhiyun gpio_set_value(host->ncfg->wp_gpio, 0);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * Disable NAND write protect
380*4882a593Smuzhiyun */
lpc32xx_wp_disable(struct lpc32xx_nand_host * host)381*4882a593Smuzhiyun static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun if (gpio_is_valid(host->ncfg->wp_gpio))
384*4882a593Smuzhiyun gpio_set_value(host->ncfg->wp_gpio, 1);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
lpc32xx_dma_complete_func(void * completion)387*4882a593Smuzhiyun static void lpc32xx_dma_complete_func(void *completion)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun complete(completion);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
lpc32xx_xmit_dma(struct mtd_info * mtd,void * mem,int len,enum dma_transfer_direction dir)392*4882a593Smuzhiyun static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
393*4882a593Smuzhiyun enum dma_transfer_direction dir)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
396*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
397*4882a593Smuzhiyun struct dma_async_tx_descriptor *desc;
398*4882a593Smuzhiyun int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
399*4882a593Smuzhiyun int res;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun sg_init_one(&host->sgl, mem, len);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
404*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
405*4882a593Smuzhiyun if (res != 1) {
406*4882a593Smuzhiyun dev_err(mtd->dev.parent, "Failed to map sg list\n");
407*4882a593Smuzhiyun return -ENXIO;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
410*4882a593Smuzhiyun flags);
411*4882a593Smuzhiyun if (!desc) {
412*4882a593Smuzhiyun dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
413*4882a593Smuzhiyun goto out1;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun init_completion(&host->comp_dma);
417*4882a593Smuzhiyun desc->callback = lpc32xx_dma_complete_func;
418*4882a593Smuzhiyun desc->callback_param = &host->comp_dma;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun dmaengine_submit(desc);
421*4882a593Smuzhiyun dma_async_issue_pending(host->dma_chan);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
426*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun out1:
429*4882a593Smuzhiyun dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
430*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
431*4882a593Smuzhiyun return -ENXIO;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
lpc32xx_read_page(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)434*4882a593Smuzhiyun static int lpc32xx_read_page(struct nand_chip *chip, uint8_t *buf,
435*4882a593Smuzhiyun int oob_required, int page)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
438*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
439*4882a593Smuzhiyun int i, j;
440*4882a593Smuzhiyun uint8_t *oobbuf = chip->oob_poi;
441*4882a593Smuzhiyun uint32_t mlc_isr;
442*4882a593Smuzhiyun int res;
443*4882a593Smuzhiyun uint8_t *dma_buf;
444*4882a593Smuzhiyun bool dma_mapped;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if ((void *)buf <= high_memory) {
447*4882a593Smuzhiyun dma_buf = buf;
448*4882a593Smuzhiyun dma_mapped = true;
449*4882a593Smuzhiyun } else {
450*4882a593Smuzhiyun dma_buf = host->dma_buf;
451*4882a593Smuzhiyun dma_mapped = false;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Writing Command and Address */
455*4882a593Smuzhiyun nand_read_page_op(chip, page, 0, NULL, 0);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* For all sub-pages */
458*4882a593Smuzhiyun for (i = 0; i < host->mlcsubpages; i++) {
459*4882a593Smuzhiyun /* Start Auto Decode Command */
460*4882a593Smuzhiyun writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Wait for Controller Ready */
463*4882a593Smuzhiyun lpc32xx_waitfunc_controller(chip);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Check ECC Error status */
466*4882a593Smuzhiyun mlc_isr = readl(MLC_ISR(host->io_base));
467*4882a593Smuzhiyun if (mlc_isr & MLCISR_DECODER_FAILURE) {
468*4882a593Smuzhiyun mtd->ecc_stats.failed++;
469*4882a593Smuzhiyun dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
470*4882a593Smuzhiyun } else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
471*4882a593Smuzhiyun mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Read 512 + 16 Bytes */
475*4882a593Smuzhiyun if (use_dma) {
476*4882a593Smuzhiyun res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
477*4882a593Smuzhiyun DMA_DEV_TO_MEM);
478*4882a593Smuzhiyun if (res)
479*4882a593Smuzhiyun return res;
480*4882a593Smuzhiyun } else {
481*4882a593Smuzhiyun for (j = 0; j < (512 >> 2); j++) {
482*4882a593Smuzhiyun *((uint32_t *)(buf)) =
483*4882a593Smuzhiyun readl(MLC_BUFF(host->io_base));
484*4882a593Smuzhiyun buf += 4;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun for (j = 0; j < (16 >> 2); j++) {
488*4882a593Smuzhiyun *((uint32_t *)(oobbuf)) =
489*4882a593Smuzhiyun readl(MLC_BUFF(host->io_base));
490*4882a593Smuzhiyun oobbuf += 4;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (use_dma && !dma_mapped)
495*4882a593Smuzhiyun memcpy(buf, dma_buf, mtd->writesize);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun return 0;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
lpc32xx_write_page_lowlevel(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)500*4882a593Smuzhiyun static int lpc32xx_write_page_lowlevel(struct nand_chip *chip,
501*4882a593Smuzhiyun const uint8_t *buf, int oob_required,
502*4882a593Smuzhiyun int page)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
505*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
506*4882a593Smuzhiyun const uint8_t *oobbuf = chip->oob_poi;
507*4882a593Smuzhiyun uint8_t *dma_buf = (uint8_t *)buf;
508*4882a593Smuzhiyun int res;
509*4882a593Smuzhiyun int i, j;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (use_dma && (void *)buf >= high_memory) {
512*4882a593Smuzhiyun dma_buf = host->dma_buf;
513*4882a593Smuzhiyun memcpy(dma_buf, buf, mtd->writesize);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun nand_prog_page_begin_op(chip, page, 0, NULL, 0);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun for (i = 0; i < host->mlcsubpages; i++) {
519*4882a593Smuzhiyun /* Start Encode */
520*4882a593Smuzhiyun writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* Write 512 + 6 Bytes to Buffer */
523*4882a593Smuzhiyun if (use_dma) {
524*4882a593Smuzhiyun res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
525*4882a593Smuzhiyun DMA_MEM_TO_DEV);
526*4882a593Smuzhiyun if (res)
527*4882a593Smuzhiyun return res;
528*4882a593Smuzhiyun } else {
529*4882a593Smuzhiyun for (j = 0; j < (512 >> 2); j++) {
530*4882a593Smuzhiyun writel(*((uint32_t *)(buf)),
531*4882a593Smuzhiyun MLC_BUFF(host->io_base));
532*4882a593Smuzhiyun buf += 4;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
536*4882a593Smuzhiyun oobbuf += 4;
537*4882a593Smuzhiyun writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
538*4882a593Smuzhiyun oobbuf += 12;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
541*4882a593Smuzhiyun writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /* Wait for Controller Ready */
544*4882a593Smuzhiyun lpc32xx_waitfunc_controller(chip);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun return nand_prog_page_end_op(chip);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
lpc32xx_read_oob(struct nand_chip * chip,int page)550*4882a593Smuzhiyun static int lpc32xx_read_oob(struct nand_chip *chip, int page)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Read whole page - necessary with MLC controller! */
555*4882a593Smuzhiyun lpc32xx_read_page(chip, host->dummy_buf, 1, page);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return 0;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
lpc32xx_write_oob(struct nand_chip * chip,int page)560*4882a593Smuzhiyun static int lpc32xx_write_oob(struct nand_chip *chip, int page)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun /* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
lpc32xx_ecc_enable(struct nand_chip * chip,int mode)567*4882a593Smuzhiyun static void lpc32xx_ecc_enable(struct nand_chip *chip, int mode)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun /* Always enabled! */
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
lpc32xx_dma_setup(struct lpc32xx_nand_host * host)572*4882a593Smuzhiyun static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
575*4882a593Smuzhiyun dma_cap_mask_t mask;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (!host->pdata || !host->pdata->dma_filter) {
578*4882a593Smuzhiyun dev_err(mtd->dev.parent, "no DMA platform data\n");
579*4882a593Smuzhiyun return -ENOENT;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun dma_cap_zero(mask);
583*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, mask);
584*4882a593Smuzhiyun host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
585*4882a593Smuzhiyun "nand-mlc");
586*4882a593Smuzhiyun if (!host->dma_chan) {
587*4882a593Smuzhiyun dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
588*4882a593Smuzhiyun return -EBUSY;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /*
592*4882a593Smuzhiyun * Set direction to a sensible value even if the dmaengine driver
593*4882a593Smuzhiyun * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
594*4882a593Smuzhiyun * driver criticizes it as "alien transfer direction".
595*4882a593Smuzhiyun */
596*4882a593Smuzhiyun host->dma_slave_config.direction = DMA_DEV_TO_MEM;
597*4882a593Smuzhiyun host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
598*4882a593Smuzhiyun host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
599*4882a593Smuzhiyun host->dma_slave_config.src_maxburst = 128;
600*4882a593Smuzhiyun host->dma_slave_config.dst_maxburst = 128;
601*4882a593Smuzhiyun /* DMA controller does flow control: */
602*4882a593Smuzhiyun host->dma_slave_config.device_fc = false;
603*4882a593Smuzhiyun host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
604*4882a593Smuzhiyun host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
605*4882a593Smuzhiyun if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
606*4882a593Smuzhiyun dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
607*4882a593Smuzhiyun goto out1;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun return 0;
611*4882a593Smuzhiyun out1:
612*4882a593Smuzhiyun dma_release_channel(host->dma_chan);
613*4882a593Smuzhiyun return -ENXIO;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
lpc32xx_parse_dt(struct device * dev)616*4882a593Smuzhiyun static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun struct lpc32xx_nand_cfg_mlc *ncfg;
619*4882a593Smuzhiyun struct device_node *np = dev->of_node;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
622*4882a593Smuzhiyun if (!ncfg)
623*4882a593Smuzhiyun return NULL;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
626*4882a593Smuzhiyun of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
627*4882a593Smuzhiyun of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
628*4882a593Smuzhiyun of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
629*4882a593Smuzhiyun of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
630*4882a593Smuzhiyun of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
631*4882a593Smuzhiyun of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
634*4882a593Smuzhiyun !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
635*4882a593Smuzhiyun !ncfg->wr_low) {
636*4882a593Smuzhiyun dev_err(dev, "chip parameters not specified correctly\n");
637*4882a593Smuzhiyun return NULL;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun return ncfg;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
lpc32xx_nand_attach_chip(struct nand_chip * chip)645*4882a593Smuzhiyun static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
648*4882a593Smuzhiyun struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
649*4882a593Smuzhiyun struct device *dev = &host->pdev->dev;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
652*4882a593Smuzhiyun return 0;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun host->dma_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
655*4882a593Smuzhiyun if (!host->dma_buf)
656*4882a593Smuzhiyun return -ENOMEM;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun host->dummy_buf = devm_kzalloc(dev, mtd->writesize, GFP_KERNEL);
659*4882a593Smuzhiyun if (!host->dummy_buf)
660*4882a593Smuzhiyun return -ENOMEM;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun chip->ecc.size = 512;
663*4882a593Smuzhiyun chip->ecc.hwctl = lpc32xx_ecc_enable;
664*4882a593Smuzhiyun chip->ecc.read_page_raw = lpc32xx_read_page;
665*4882a593Smuzhiyun chip->ecc.read_page = lpc32xx_read_page;
666*4882a593Smuzhiyun chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
667*4882a593Smuzhiyun chip->ecc.write_page = lpc32xx_write_page_lowlevel;
668*4882a593Smuzhiyun chip->ecc.write_oob = lpc32xx_write_oob;
669*4882a593Smuzhiyun chip->ecc.read_oob = lpc32xx_read_oob;
670*4882a593Smuzhiyun chip->ecc.strength = 4;
671*4882a593Smuzhiyun chip->ecc.bytes = 10;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
674*4882a593Smuzhiyun host->mlcsubpages = mtd->writesize / 512;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun return 0;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
680*4882a593Smuzhiyun .attach_chip = lpc32xx_nand_attach_chip,
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /*
684*4882a593Smuzhiyun * Probe for NAND controller
685*4882a593Smuzhiyun */
lpc32xx_nand_probe(struct platform_device * pdev)686*4882a593Smuzhiyun static int lpc32xx_nand_probe(struct platform_device *pdev)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun struct lpc32xx_nand_host *host;
689*4882a593Smuzhiyun struct mtd_info *mtd;
690*4882a593Smuzhiyun struct nand_chip *nand_chip;
691*4882a593Smuzhiyun struct resource *rc;
692*4882a593Smuzhiyun int res;
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /* Allocate memory for the device structure (and zero it) */
695*4882a593Smuzhiyun host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
696*4882a593Smuzhiyun if (!host)
697*4882a593Smuzhiyun return -ENOMEM;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun host->pdev = pdev;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
702*4882a593Smuzhiyun host->io_base = devm_ioremap_resource(&pdev->dev, rc);
703*4882a593Smuzhiyun if (IS_ERR(host->io_base))
704*4882a593Smuzhiyun return PTR_ERR(host->io_base);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun host->io_base_phy = rc->start;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun nand_chip = &host->nand_chip;
709*4882a593Smuzhiyun mtd = nand_to_mtd(nand_chip);
710*4882a593Smuzhiyun if (pdev->dev.of_node)
711*4882a593Smuzhiyun host->ncfg = lpc32xx_parse_dt(&pdev->dev);
712*4882a593Smuzhiyun if (!host->ncfg) {
713*4882a593Smuzhiyun dev_err(&pdev->dev,
714*4882a593Smuzhiyun "Missing or bad NAND config from device tree\n");
715*4882a593Smuzhiyun return -ENOENT;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun if (host->ncfg->wp_gpio == -EPROBE_DEFER)
718*4882a593Smuzhiyun return -EPROBE_DEFER;
719*4882a593Smuzhiyun if (gpio_is_valid(host->ncfg->wp_gpio) &&
720*4882a593Smuzhiyun gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
721*4882a593Smuzhiyun dev_err(&pdev->dev, "GPIO not available\n");
722*4882a593Smuzhiyun return -EBUSY;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun lpc32xx_wp_disable(host);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun host->pdata = dev_get_platdata(&pdev->dev);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* link the private data structures */
729*4882a593Smuzhiyun nand_set_controller_data(nand_chip, host);
730*4882a593Smuzhiyun nand_set_flash_node(nand_chip, pdev->dev.of_node);
731*4882a593Smuzhiyun mtd->dev.parent = &pdev->dev;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun /* Get NAND clock */
734*4882a593Smuzhiyun host->clk = clk_get(&pdev->dev, NULL);
735*4882a593Smuzhiyun if (IS_ERR(host->clk)) {
736*4882a593Smuzhiyun dev_err(&pdev->dev, "Clock initialization failure\n");
737*4882a593Smuzhiyun res = -ENOENT;
738*4882a593Smuzhiyun goto free_gpio;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun res = clk_prepare_enable(host->clk);
741*4882a593Smuzhiyun if (res)
742*4882a593Smuzhiyun goto put_clk;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun nand_chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
745*4882a593Smuzhiyun nand_chip->legacy.dev_ready = lpc32xx_nand_device_ready;
746*4882a593Smuzhiyun nand_chip->legacy.chip_delay = 25; /* us */
747*4882a593Smuzhiyun nand_chip->legacy.IO_ADDR_R = MLC_DATA(host->io_base);
748*4882a593Smuzhiyun nand_chip->legacy.IO_ADDR_W = MLC_DATA(host->io_base);
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* Init NAND controller */
751*4882a593Smuzhiyun lpc32xx_nand_setup(host);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun platform_set_drvdata(pdev, host);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /* Initialize function pointers */
756*4882a593Smuzhiyun nand_chip->legacy.waitfunc = lpc32xx_waitfunc;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun nand_chip->options = NAND_NO_SUBPAGE_WRITE;
759*4882a593Smuzhiyun nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
760*4882a593Smuzhiyun nand_chip->bbt_td = &lpc32xx_nand_bbt;
761*4882a593Smuzhiyun nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun if (use_dma) {
764*4882a593Smuzhiyun res = lpc32xx_dma_setup(host);
765*4882a593Smuzhiyun if (res) {
766*4882a593Smuzhiyun res = -EIO;
767*4882a593Smuzhiyun goto unprepare_clk;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /* initially clear interrupt status */
772*4882a593Smuzhiyun readb(MLC_IRQ_SR(host->io_base));
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun init_completion(&host->comp_nand);
775*4882a593Smuzhiyun init_completion(&host->comp_controller);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun host->irq = platform_get_irq(pdev, 0);
778*4882a593Smuzhiyun if (host->irq < 0) {
779*4882a593Smuzhiyun res = -EINVAL;
780*4882a593Smuzhiyun goto release_dma_chan;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
784*4882a593Smuzhiyun IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
785*4882a593Smuzhiyun dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
786*4882a593Smuzhiyun res = -ENXIO;
787*4882a593Smuzhiyun goto release_dma_chan;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun * Scan to find existence of the device and get the type of NAND device:
792*4882a593Smuzhiyun * SMALL block or LARGE block.
793*4882a593Smuzhiyun */
794*4882a593Smuzhiyun nand_chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
795*4882a593Smuzhiyun res = nand_scan(nand_chip, 1);
796*4882a593Smuzhiyun if (res)
797*4882a593Smuzhiyun goto free_irq;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun mtd->name = DRV_NAME;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun res = mtd_device_register(mtd, host->ncfg->parts,
802*4882a593Smuzhiyun host->ncfg->num_parts);
803*4882a593Smuzhiyun if (res)
804*4882a593Smuzhiyun goto cleanup_nand;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun return 0;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun cleanup_nand:
809*4882a593Smuzhiyun nand_cleanup(nand_chip);
810*4882a593Smuzhiyun free_irq:
811*4882a593Smuzhiyun free_irq(host->irq, host);
812*4882a593Smuzhiyun release_dma_chan:
813*4882a593Smuzhiyun if (use_dma)
814*4882a593Smuzhiyun dma_release_channel(host->dma_chan);
815*4882a593Smuzhiyun unprepare_clk:
816*4882a593Smuzhiyun clk_disable_unprepare(host->clk);
817*4882a593Smuzhiyun put_clk:
818*4882a593Smuzhiyun clk_put(host->clk);
819*4882a593Smuzhiyun free_gpio:
820*4882a593Smuzhiyun lpc32xx_wp_enable(host);
821*4882a593Smuzhiyun gpio_free(host->ncfg->wp_gpio);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return res;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * Remove NAND device
828*4882a593Smuzhiyun */
lpc32xx_nand_remove(struct platform_device * pdev)829*4882a593Smuzhiyun static int lpc32xx_nand_remove(struct platform_device *pdev)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
832*4882a593Smuzhiyun struct nand_chip *chip = &host->nand_chip;
833*4882a593Smuzhiyun int ret;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun ret = mtd_device_unregister(nand_to_mtd(chip));
836*4882a593Smuzhiyun WARN_ON(ret);
837*4882a593Smuzhiyun nand_cleanup(chip);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun free_irq(host->irq, host);
840*4882a593Smuzhiyun if (use_dma)
841*4882a593Smuzhiyun dma_release_channel(host->dma_chan);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun clk_disable_unprepare(host->clk);
844*4882a593Smuzhiyun clk_put(host->clk);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun lpc32xx_wp_enable(host);
847*4882a593Smuzhiyun gpio_free(host->ncfg->wp_gpio);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun return 0;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun #ifdef CONFIG_PM
lpc32xx_nand_resume(struct platform_device * pdev)853*4882a593Smuzhiyun static int lpc32xx_nand_resume(struct platform_device *pdev)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
856*4882a593Smuzhiyun int ret;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /* Re-enable NAND clock */
859*4882a593Smuzhiyun ret = clk_prepare_enable(host->clk);
860*4882a593Smuzhiyun if (ret)
861*4882a593Smuzhiyun return ret;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* Fresh init of NAND controller */
864*4882a593Smuzhiyun lpc32xx_nand_setup(host);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Disable write protect */
867*4882a593Smuzhiyun lpc32xx_wp_disable(host);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun return 0;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
lpc32xx_nand_suspend(struct platform_device * pdev,pm_message_t pm)872*4882a593Smuzhiyun static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /* Enable write protect for safety */
877*4882a593Smuzhiyun lpc32xx_wp_enable(host);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* Disable clock */
880*4882a593Smuzhiyun clk_disable_unprepare(host->clk);
881*4882a593Smuzhiyun return 0;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun #else
885*4882a593Smuzhiyun #define lpc32xx_nand_resume NULL
886*4882a593Smuzhiyun #define lpc32xx_nand_suspend NULL
887*4882a593Smuzhiyun #endif
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun static const struct of_device_id lpc32xx_nand_match[] = {
890*4882a593Smuzhiyun { .compatible = "nxp,lpc3220-mlc" },
891*4882a593Smuzhiyun { /* sentinel */ },
892*4882a593Smuzhiyun };
893*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun static struct platform_driver lpc32xx_nand_driver = {
896*4882a593Smuzhiyun .probe = lpc32xx_nand_probe,
897*4882a593Smuzhiyun .remove = lpc32xx_nand_remove,
898*4882a593Smuzhiyun .resume = lpc32xx_nand_resume,
899*4882a593Smuzhiyun .suspend = lpc32xx_nand_suspend,
900*4882a593Smuzhiyun .driver = {
901*4882a593Smuzhiyun .name = DRV_NAME,
902*4882a593Smuzhiyun .of_match_table = lpc32xx_nand_match,
903*4882a593Smuzhiyun },
904*4882a593Smuzhiyun };
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun module_platform_driver(lpc32xx_nand_driver);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun MODULE_LICENSE("GPL");
909*4882a593Smuzhiyun MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
910*4882a593Smuzhiyun MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
911