xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/lpc32xx_slc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * NXP LPC32XX NAND SLC driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Authors:
6*4882a593Smuzhiyun  *    Kevin Wells <kevin.wells@nxp.com>
7*4882a593Smuzhiyun  *    Roland Stigge <stigge@antcom.de>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Copyright © 2011 NXP Semiconductors
10*4882a593Smuzhiyun  * Copyright © 2012 Roland Stigge
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
17*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
18*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
19*4882a593Smuzhiyun #include <linux/clk.h>
20*4882a593Smuzhiyun #include <linux/err.h>
21*4882a593Smuzhiyun #include <linux/delay.h>
22*4882a593Smuzhiyun #include <linux/io.h>
23*4882a593Smuzhiyun #include <linux/mm.h>
24*4882a593Smuzhiyun #include <linux/dma-mapping.h>
25*4882a593Smuzhiyun #include <linux/dmaengine.h>
26*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
27*4882a593Smuzhiyun #include <linux/gpio.h>
28*4882a593Smuzhiyun #include <linux/of.h>
29*4882a593Smuzhiyun #include <linux/of_gpio.h>
30*4882a593Smuzhiyun #include <linux/mtd/lpc32xx_slc.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define LPC32XX_MODNAME		"lpc32xx-nand"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /**********************************************************************
35*4882a593Smuzhiyun * SLC NAND controller register offsets
36*4882a593Smuzhiyun **********************************************************************/
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define SLC_DATA(x)		(x + 0x000)
39*4882a593Smuzhiyun #define SLC_ADDR(x)		(x + 0x004)
40*4882a593Smuzhiyun #define SLC_CMD(x)		(x + 0x008)
41*4882a593Smuzhiyun #define SLC_STOP(x)		(x + 0x00C)
42*4882a593Smuzhiyun #define SLC_CTRL(x)		(x + 0x010)
43*4882a593Smuzhiyun #define SLC_CFG(x)		(x + 0x014)
44*4882a593Smuzhiyun #define SLC_STAT(x)		(x + 0x018)
45*4882a593Smuzhiyun #define SLC_INT_STAT(x)		(x + 0x01C)
46*4882a593Smuzhiyun #define SLC_IEN(x)		(x + 0x020)
47*4882a593Smuzhiyun #define SLC_ISR(x)		(x + 0x024)
48*4882a593Smuzhiyun #define SLC_ICR(x)		(x + 0x028)
49*4882a593Smuzhiyun #define SLC_TAC(x)		(x + 0x02C)
50*4882a593Smuzhiyun #define SLC_TC(x)		(x + 0x030)
51*4882a593Smuzhiyun #define SLC_ECC(x)		(x + 0x034)
52*4882a593Smuzhiyun #define SLC_DMA_DATA(x)		(x + 0x038)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /**********************************************************************
55*4882a593Smuzhiyun * slc_ctrl register definitions
56*4882a593Smuzhiyun **********************************************************************/
57*4882a593Smuzhiyun #define SLCCTRL_SW_RESET	(1 << 2) /* Reset the NAND controller bit */
58*4882a593Smuzhiyun #define SLCCTRL_ECC_CLEAR	(1 << 1) /* Reset ECC bit */
59*4882a593Smuzhiyun #define SLCCTRL_DMA_START	(1 << 0) /* Start DMA channel bit */
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /**********************************************************************
62*4882a593Smuzhiyun * slc_cfg register definitions
63*4882a593Smuzhiyun **********************************************************************/
64*4882a593Smuzhiyun #define SLCCFG_CE_LOW		(1 << 5) /* Force CE low bit */
65*4882a593Smuzhiyun #define SLCCFG_DMA_ECC		(1 << 4) /* Enable DMA ECC bit */
66*4882a593Smuzhiyun #define SLCCFG_ECC_EN		(1 << 3) /* ECC enable bit */
67*4882a593Smuzhiyun #define SLCCFG_DMA_BURST	(1 << 2) /* DMA burst bit */
68*4882a593Smuzhiyun #define SLCCFG_DMA_DIR		(1 << 1) /* DMA write(0)/read(1) bit */
69*4882a593Smuzhiyun #define SLCCFG_WIDTH		(1 << 0) /* External device width, 0=8bit */
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /**********************************************************************
72*4882a593Smuzhiyun * slc_stat register definitions
73*4882a593Smuzhiyun **********************************************************************/
74*4882a593Smuzhiyun #define SLCSTAT_DMA_FIFO	(1 << 2) /* DMA FIFO has data bit */
75*4882a593Smuzhiyun #define SLCSTAT_SLC_FIFO	(1 << 1) /* SLC FIFO has data bit */
76*4882a593Smuzhiyun #define SLCSTAT_NAND_READY	(1 << 0) /* NAND device is ready bit */
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**********************************************************************
79*4882a593Smuzhiyun * slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
80*4882a593Smuzhiyun **********************************************************************/
81*4882a593Smuzhiyun #define SLCSTAT_INT_TC		(1 << 1) /* Transfer count bit */
82*4882a593Smuzhiyun #define SLCSTAT_INT_RDY_EN	(1 << 0) /* Ready interrupt bit */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /**********************************************************************
85*4882a593Smuzhiyun * slc_tac register definitions
86*4882a593Smuzhiyun **********************************************************************/
87*4882a593Smuzhiyun /* Computation of clock cycles on basis of controller and device clock rates */
88*4882a593Smuzhiyun #define SLCTAC_CLOCKS(c, n, s)	(min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Clock setting for RDY write sample wait time in 2*n clocks */
91*4882a593Smuzhiyun #define SLCTAC_WDR(n)		(((n) & 0xF) << 28)
92*4882a593Smuzhiyun /* Write pulse width in clock cycles, 1 to 16 clocks */
93*4882a593Smuzhiyun #define SLCTAC_WWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 24))
94*4882a593Smuzhiyun /* Write hold time of control and data signals, 1 to 16 clocks */
95*4882a593Smuzhiyun #define SLCTAC_WHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 20))
96*4882a593Smuzhiyun /* Write setup time of control and data signals, 1 to 16 clocks */
97*4882a593Smuzhiyun #define SLCTAC_WSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 16))
98*4882a593Smuzhiyun /* Clock setting for RDY read sample wait time in 2*n clocks */
99*4882a593Smuzhiyun #define SLCTAC_RDR(n)		(((n) & 0xF) << 12)
100*4882a593Smuzhiyun /* Read pulse width in clock cycles, 1 to 16 clocks */
101*4882a593Smuzhiyun #define SLCTAC_RWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 8))
102*4882a593Smuzhiyun /* Read hold time of control and data signals, 1 to 16 clocks */
103*4882a593Smuzhiyun #define SLCTAC_RHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 4))
104*4882a593Smuzhiyun /* Read setup time of control and data signals, 1 to 16 clocks */
105*4882a593Smuzhiyun #define SLCTAC_RSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 0))
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**********************************************************************
108*4882a593Smuzhiyun * slc_ecc register definitions
109*4882a593Smuzhiyun **********************************************************************/
110*4882a593Smuzhiyun /* ECC line party fetch macro */
111*4882a593Smuzhiyun #define SLCECC_TO_LINEPAR(n)	(((n) >> 6) & 0x7FFF)
112*4882a593Smuzhiyun #define SLCECC_TO_COLPAR(n)	((n) & 0x3F)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /*
115*4882a593Smuzhiyun  * DMA requires storage space for the DMA local buffer and the hardware ECC
116*4882a593Smuzhiyun  * storage area. The DMA local buffer is only used if DMA mapping fails
117*4882a593Smuzhiyun  * during runtime.
118*4882a593Smuzhiyun  */
119*4882a593Smuzhiyun #define LPC32XX_DMA_DATA_SIZE		4096
120*4882a593Smuzhiyun #define LPC32XX_ECC_SAVE_SIZE		((4096 / 256) * 4)
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* Number of bytes used for ECC stored in NAND per 256 bytes */
123*4882a593Smuzhiyun #define LPC32XX_SLC_DEV_ECC_BYTES	3
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * If the NAND base clock frequency can't be fetched, this frequency will be
127*4882a593Smuzhiyun  * used instead as the base. This rate is used to setup the timing registers
128*4882a593Smuzhiyun  * used for NAND accesses.
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun #define LPC32XX_DEF_BUS_RATE		133250000
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /* Milliseconds for DMA FIFO timeout (unlikely anyway) */
133*4882a593Smuzhiyun #define LPC32XX_DMA_TIMEOUT		100
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun  * NAND ECC Layout for small page NAND devices
137*4882a593Smuzhiyun  * Note: For large and huge page devices, the default layouts are used
138*4882a593Smuzhiyun  */
lpc32xx_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)139*4882a593Smuzhiyun static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
140*4882a593Smuzhiyun 				 struct mtd_oob_region *oobregion)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	if (section)
143*4882a593Smuzhiyun 		return -ERANGE;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	oobregion->length = 6;
146*4882a593Smuzhiyun 	oobregion->offset = 10;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
lpc32xx_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)151*4882a593Smuzhiyun static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
152*4882a593Smuzhiyun 				  struct mtd_oob_region *oobregion)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	if (section > 1)
155*4882a593Smuzhiyun 		return -ERANGE;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (!section) {
158*4882a593Smuzhiyun 		oobregion->offset = 0;
159*4882a593Smuzhiyun 		oobregion->length = 4;
160*4882a593Smuzhiyun 	} else {
161*4882a593Smuzhiyun 		oobregion->offset = 6;
162*4882a593Smuzhiyun 		oobregion->length = 4;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
169*4882a593Smuzhiyun 	.ecc = lpc32xx_ooblayout_ecc,
170*4882a593Smuzhiyun 	.free = lpc32xx_ooblayout_free,
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
174*4882a593Smuzhiyun static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
178*4882a593Smuzhiyun  * Note: Large page devices used the default layout
179*4882a593Smuzhiyun  */
180*4882a593Smuzhiyun static struct nand_bbt_descr bbt_smallpage_main_descr = {
181*4882a593Smuzhiyun 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
182*4882a593Smuzhiyun 		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
183*4882a593Smuzhiyun 	.offs =	0,
184*4882a593Smuzhiyun 	.len = 4,
185*4882a593Smuzhiyun 	.veroffs = 6,
186*4882a593Smuzhiyun 	.maxblocks = 4,
187*4882a593Smuzhiyun 	.pattern = bbt_pattern
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
191*4882a593Smuzhiyun 	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
192*4882a593Smuzhiyun 		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
193*4882a593Smuzhiyun 	.offs =	0,
194*4882a593Smuzhiyun 	.len = 4,
195*4882a593Smuzhiyun 	.veroffs = 6,
196*4882a593Smuzhiyun 	.maxblocks = 4,
197*4882a593Smuzhiyun 	.pattern = mirror_pattern
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun  * NAND platform configuration structure
202*4882a593Smuzhiyun  */
203*4882a593Smuzhiyun struct lpc32xx_nand_cfg_slc {
204*4882a593Smuzhiyun 	uint32_t wdr_clks;
205*4882a593Smuzhiyun 	uint32_t wwidth;
206*4882a593Smuzhiyun 	uint32_t whold;
207*4882a593Smuzhiyun 	uint32_t wsetup;
208*4882a593Smuzhiyun 	uint32_t rdr_clks;
209*4882a593Smuzhiyun 	uint32_t rwidth;
210*4882a593Smuzhiyun 	uint32_t rhold;
211*4882a593Smuzhiyun 	uint32_t rsetup;
212*4882a593Smuzhiyun 	int wp_gpio;
213*4882a593Smuzhiyun 	struct mtd_partition *parts;
214*4882a593Smuzhiyun 	unsigned num_parts;
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun struct lpc32xx_nand_host {
218*4882a593Smuzhiyun 	struct nand_chip	nand_chip;
219*4882a593Smuzhiyun 	struct lpc32xx_slc_platform_data *pdata;
220*4882a593Smuzhiyun 	struct clk		*clk;
221*4882a593Smuzhiyun 	void __iomem		*io_base;
222*4882a593Smuzhiyun 	struct lpc32xx_nand_cfg_slc *ncfg;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	struct completion	comp;
225*4882a593Smuzhiyun 	struct dma_chan		*dma_chan;
226*4882a593Smuzhiyun 	uint32_t		dma_buf_len;
227*4882a593Smuzhiyun 	struct dma_slave_config	dma_slave_config;
228*4882a593Smuzhiyun 	struct scatterlist	sgl;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/*
231*4882a593Smuzhiyun 	 * DMA and CPU addresses of ECC work area and data buffer
232*4882a593Smuzhiyun 	 */
233*4882a593Smuzhiyun 	uint32_t		*ecc_buf;
234*4882a593Smuzhiyun 	uint8_t			*data_buf;
235*4882a593Smuzhiyun 	dma_addr_t		io_base_dma;
236*4882a593Smuzhiyun };
237*4882a593Smuzhiyun 
lpc32xx_nand_setup(struct lpc32xx_nand_host * host)238*4882a593Smuzhiyun static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	uint32_t clkrate, tmp;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Reset SLC controller */
243*4882a593Smuzhiyun 	writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
244*4882a593Smuzhiyun 	udelay(1000);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* Basic setup */
247*4882a593Smuzhiyun 	writel(0, SLC_CFG(host->io_base));
248*4882a593Smuzhiyun 	writel(0, SLC_IEN(host->io_base));
249*4882a593Smuzhiyun 	writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
250*4882a593Smuzhiyun 		SLC_ICR(host->io_base));
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Get base clock for SLC block */
253*4882a593Smuzhiyun 	clkrate = clk_get_rate(host->clk);
254*4882a593Smuzhiyun 	if (clkrate == 0)
255*4882a593Smuzhiyun 		clkrate = LPC32XX_DEF_BUS_RATE;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* Compute clock setup values */
258*4882a593Smuzhiyun 	tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
259*4882a593Smuzhiyun 		SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
260*4882a593Smuzhiyun 		SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
261*4882a593Smuzhiyun 		SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
262*4882a593Smuzhiyun 		SLCTAC_RDR(host->ncfg->rdr_clks) |
263*4882a593Smuzhiyun 		SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
264*4882a593Smuzhiyun 		SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
265*4882a593Smuzhiyun 		SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
266*4882a593Smuzhiyun 	writel(tmp, SLC_TAC(host->io_base));
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun  * Hardware specific access to control lines
271*4882a593Smuzhiyun  */
lpc32xx_nand_cmd_ctrl(struct nand_chip * chip,int cmd,unsigned int ctrl)272*4882a593Smuzhiyun static void lpc32xx_nand_cmd_ctrl(struct nand_chip *chip, int cmd,
273*4882a593Smuzhiyun 				  unsigned int ctrl)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	uint32_t tmp;
276*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/* Does CE state need to be changed? */
279*4882a593Smuzhiyun 	tmp = readl(SLC_CFG(host->io_base));
280*4882a593Smuzhiyun 	if (ctrl & NAND_NCE)
281*4882a593Smuzhiyun 		tmp |= SLCCFG_CE_LOW;
282*4882a593Smuzhiyun 	else
283*4882a593Smuzhiyun 		tmp &= ~SLCCFG_CE_LOW;
284*4882a593Smuzhiyun 	writel(tmp, SLC_CFG(host->io_base));
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (cmd != NAND_CMD_NONE) {
287*4882a593Smuzhiyun 		if (ctrl & NAND_CLE)
288*4882a593Smuzhiyun 			writel(cmd, SLC_CMD(host->io_base));
289*4882a593Smuzhiyun 		else
290*4882a593Smuzhiyun 			writel(cmd, SLC_ADDR(host->io_base));
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun  * Read the Device Ready pin
296*4882a593Smuzhiyun  */
lpc32xx_nand_device_ready(struct nand_chip * chip)297*4882a593Smuzhiyun static int lpc32xx_nand_device_ready(struct nand_chip *chip)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
300*4882a593Smuzhiyun 	int rdy = 0;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
303*4882a593Smuzhiyun 		rdy = 1;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	return rdy;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun  * Enable NAND write protect
310*4882a593Smuzhiyun  */
lpc32xx_wp_enable(struct lpc32xx_nand_host * host)311*4882a593Smuzhiyun static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	if (gpio_is_valid(host->ncfg->wp_gpio))
314*4882a593Smuzhiyun 		gpio_set_value(host->ncfg->wp_gpio, 0);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun  * Disable NAND write protect
319*4882a593Smuzhiyun  */
lpc32xx_wp_disable(struct lpc32xx_nand_host * host)320*4882a593Smuzhiyun static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	if (gpio_is_valid(host->ncfg->wp_gpio))
323*4882a593Smuzhiyun 		gpio_set_value(host->ncfg->wp_gpio, 1);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun  * Prepares SLC for transfers with H/W ECC enabled
328*4882a593Smuzhiyun  */
lpc32xx_nand_ecc_enable(struct nand_chip * chip,int mode)329*4882a593Smuzhiyun static void lpc32xx_nand_ecc_enable(struct nand_chip *chip, int mode)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	/* Hardware ECC is enabled automatically in hardware as needed */
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun  * Calculates the ECC for the data
336*4882a593Smuzhiyun  */
lpc32xx_nand_ecc_calculate(struct nand_chip * chip,const unsigned char * buf,unsigned char * code)337*4882a593Smuzhiyun static int lpc32xx_nand_ecc_calculate(struct nand_chip *chip,
338*4882a593Smuzhiyun 				      const unsigned char *buf,
339*4882a593Smuzhiyun 				      unsigned char *code)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	/*
342*4882a593Smuzhiyun 	 * ECC is calculated automatically in hardware during syndrome read
343*4882a593Smuzhiyun 	 * and write operations, so it doesn't need to be calculated here.
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun  * Read a single byte from NAND device
350*4882a593Smuzhiyun  */
lpc32xx_nand_read_byte(struct nand_chip * chip)351*4882a593Smuzhiyun static uint8_t lpc32xx_nand_read_byte(struct nand_chip *chip)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return (uint8_t)readl(SLC_DATA(host->io_base));
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /*
359*4882a593Smuzhiyun  * Simple device read without ECC
360*4882a593Smuzhiyun  */
lpc32xx_nand_read_buf(struct nand_chip * chip,u_char * buf,int len)361*4882a593Smuzhiyun static void lpc32xx_nand_read_buf(struct nand_chip *chip, u_char *buf, int len)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* Direct device read with no ECC */
366*4882a593Smuzhiyun 	while (len-- > 0)
367*4882a593Smuzhiyun 		*buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun /*
371*4882a593Smuzhiyun  * Simple device write without ECC
372*4882a593Smuzhiyun  */
lpc32xx_nand_write_buf(struct nand_chip * chip,const uint8_t * buf,int len)373*4882a593Smuzhiyun static void lpc32xx_nand_write_buf(struct nand_chip *chip, const uint8_t *buf,
374*4882a593Smuzhiyun 				   int len)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* Direct device write with no ECC */
379*4882a593Smuzhiyun 	while (len-- > 0)
380*4882a593Smuzhiyun 		writel((uint32_t)*buf++, SLC_DATA(host->io_base));
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /*
384*4882a593Smuzhiyun  * Read the OOB data from the device without ECC using FIFO method
385*4882a593Smuzhiyun  */
lpc32xx_nand_read_oob_syndrome(struct nand_chip * chip,int page)386*4882a593Smuzhiyun static int lpc32xx_nand_read_oob_syndrome(struct nand_chip *chip, int page)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun  * Write the OOB data to the device without ECC using FIFO method
395*4882a593Smuzhiyun  */
lpc32xx_nand_write_oob_syndrome(struct nand_chip * chip,int page)396*4882a593Smuzhiyun static int lpc32xx_nand_write_oob_syndrome(struct nand_chip *chip, int page)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
401*4882a593Smuzhiyun 				 mtd->oobsize);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun  * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
406*4882a593Smuzhiyun  */
lpc32xx_slc_ecc_copy(uint8_t * spare,const uint32_t * ecc,int count)407*4882a593Smuzhiyun static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	int i;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	for (i = 0; i < (count * 3); i += 3) {
412*4882a593Smuzhiyun 		uint32_t ce = ecc[i / 3];
413*4882a593Smuzhiyun 		ce = ~(ce << 2) & 0xFFFFFF;
414*4882a593Smuzhiyun 		spare[i + 2] = (uint8_t)(ce & 0xFF);
415*4882a593Smuzhiyun 		ce >>= 8;
416*4882a593Smuzhiyun 		spare[i + 1] = (uint8_t)(ce & 0xFF);
417*4882a593Smuzhiyun 		ce >>= 8;
418*4882a593Smuzhiyun 		spare[i] = (uint8_t)(ce & 0xFF);
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
lpc32xx_dma_complete_func(void * completion)422*4882a593Smuzhiyun static void lpc32xx_dma_complete_func(void *completion)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	complete(completion);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
lpc32xx_xmit_dma(struct mtd_info * mtd,dma_addr_t dma,void * mem,int len,enum dma_transfer_direction dir)427*4882a593Smuzhiyun static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
428*4882a593Smuzhiyun 			    void *mem, int len, enum dma_transfer_direction dir)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
431*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
432*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *desc;
433*4882a593Smuzhiyun 	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
434*4882a593Smuzhiyun 	int res;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	host->dma_slave_config.direction = dir;
437*4882a593Smuzhiyun 	host->dma_slave_config.src_addr = dma;
438*4882a593Smuzhiyun 	host->dma_slave_config.dst_addr = dma;
439*4882a593Smuzhiyun 	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
440*4882a593Smuzhiyun 	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
441*4882a593Smuzhiyun 	host->dma_slave_config.src_maxburst = 4;
442*4882a593Smuzhiyun 	host->dma_slave_config.dst_maxburst = 4;
443*4882a593Smuzhiyun 	/* DMA controller does flow control: */
444*4882a593Smuzhiyun 	host->dma_slave_config.device_fc = false;
445*4882a593Smuzhiyun 	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
446*4882a593Smuzhiyun 		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
447*4882a593Smuzhiyun 		return -ENXIO;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	sg_init_one(&host->sgl, mem, len);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
453*4882a593Smuzhiyun 			 DMA_BIDIRECTIONAL);
454*4882a593Smuzhiyun 	if (res != 1) {
455*4882a593Smuzhiyun 		dev_err(mtd->dev.parent, "Failed to map sg list\n");
456*4882a593Smuzhiyun 		return -ENXIO;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
459*4882a593Smuzhiyun 				       flags);
460*4882a593Smuzhiyun 	if (!desc) {
461*4882a593Smuzhiyun 		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
462*4882a593Smuzhiyun 		goto out1;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	init_completion(&host->comp);
466*4882a593Smuzhiyun 	desc->callback = lpc32xx_dma_complete_func;
467*4882a593Smuzhiyun 	desc->callback_param = &host->comp;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	dmaengine_submit(desc);
470*4882a593Smuzhiyun 	dma_async_issue_pending(host->dma_chan);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
475*4882a593Smuzhiyun 		     DMA_BIDIRECTIONAL);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	return 0;
478*4882a593Smuzhiyun out1:
479*4882a593Smuzhiyun 	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
480*4882a593Smuzhiyun 		     DMA_BIDIRECTIONAL);
481*4882a593Smuzhiyun 	return -ENXIO;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun  * DMA read/write transfers with ECC support
486*4882a593Smuzhiyun  */
lpc32xx_xfer(struct mtd_info * mtd,uint8_t * buf,int eccsubpages,int read)487*4882a593Smuzhiyun static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
488*4882a593Smuzhiyun 			int read)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
491*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
492*4882a593Smuzhiyun 	int i, status = 0;
493*4882a593Smuzhiyun 	unsigned long timeout;
494*4882a593Smuzhiyun 	int res;
495*4882a593Smuzhiyun 	enum dma_transfer_direction dir =
496*4882a593Smuzhiyun 		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
497*4882a593Smuzhiyun 	uint8_t *dma_buf;
498*4882a593Smuzhiyun 	bool dma_mapped;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if ((void *)buf <= high_memory) {
501*4882a593Smuzhiyun 		dma_buf = buf;
502*4882a593Smuzhiyun 		dma_mapped = true;
503*4882a593Smuzhiyun 	} else {
504*4882a593Smuzhiyun 		dma_buf = host->data_buf;
505*4882a593Smuzhiyun 		dma_mapped = false;
506*4882a593Smuzhiyun 		if (!read)
507*4882a593Smuzhiyun 			memcpy(host->data_buf, buf, mtd->writesize);
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (read) {
511*4882a593Smuzhiyun 		writel(readl(SLC_CFG(host->io_base)) |
512*4882a593Smuzhiyun 		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
513*4882a593Smuzhiyun 		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
514*4882a593Smuzhiyun 	} else {
515*4882a593Smuzhiyun 		writel((readl(SLC_CFG(host->io_base)) |
516*4882a593Smuzhiyun 			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
517*4882a593Smuzhiyun 		       ~SLCCFG_DMA_DIR,
518*4882a593Smuzhiyun 			SLC_CFG(host->io_base));
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Clear initial ECC */
522*4882a593Smuzhiyun 	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* Transfer size is data area only */
525*4882a593Smuzhiyun 	writel(mtd->writesize, SLC_TC(host->io_base));
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	/* Start transfer in the NAND controller */
528*4882a593Smuzhiyun 	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
529*4882a593Smuzhiyun 	       SLC_CTRL(host->io_base));
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.steps; i++) {
532*4882a593Smuzhiyun 		/* Data */
533*4882a593Smuzhiyun 		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
534*4882a593Smuzhiyun 				       dma_buf + i * chip->ecc.size,
535*4882a593Smuzhiyun 				       mtd->writesize / chip->ecc.steps, dir);
536*4882a593Smuzhiyun 		if (res)
537*4882a593Smuzhiyun 			return res;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		/* Always _read_ ECC */
540*4882a593Smuzhiyun 		if (i == chip->ecc.steps - 1)
541*4882a593Smuzhiyun 			break;
542*4882a593Smuzhiyun 		if (!read) /* ECC availability delayed on write */
543*4882a593Smuzhiyun 			udelay(10);
544*4882a593Smuzhiyun 		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
545*4882a593Smuzhiyun 				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
546*4882a593Smuzhiyun 		if (res)
547*4882a593Smuzhiyun 			return res;
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/*
551*4882a593Smuzhiyun 	 * According to NXP, the DMA can be finished here, but the NAND
552*4882a593Smuzhiyun 	 * controller may still have buffered data. After porting to using the
553*4882a593Smuzhiyun 	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
554*4882a593Smuzhiyun 	 * appears to be always true, according to tests. Keeping the check for
555*4882a593Smuzhiyun 	 * safety reasons for now.
556*4882a593Smuzhiyun 	 */
557*4882a593Smuzhiyun 	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
558*4882a593Smuzhiyun 		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
559*4882a593Smuzhiyun 		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
560*4882a593Smuzhiyun 		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
561*4882a593Smuzhiyun 		       time_before(jiffies, timeout))
562*4882a593Smuzhiyun 			cpu_relax();
563*4882a593Smuzhiyun 		if (!time_before(jiffies, timeout)) {
564*4882a593Smuzhiyun 			dev_err(mtd->dev.parent, "FIFO held data too long\n");
565*4882a593Smuzhiyun 			status = -EIO;
566*4882a593Smuzhiyun 		}
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* Read last calculated ECC value */
570*4882a593Smuzhiyun 	if (!read)
571*4882a593Smuzhiyun 		udelay(10);
572*4882a593Smuzhiyun 	host->ecc_buf[chip->ecc.steps - 1] =
573*4882a593Smuzhiyun 		readl(SLC_ECC(host->io_base));
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	/* Flush DMA */
576*4882a593Smuzhiyun 	dmaengine_terminate_all(host->dma_chan);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
579*4882a593Smuzhiyun 	    readl(SLC_TC(host->io_base))) {
580*4882a593Smuzhiyun 		/* Something is left in the FIFO, something is wrong */
581*4882a593Smuzhiyun 		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
582*4882a593Smuzhiyun 		status = -EIO;
583*4882a593Smuzhiyun 	}
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Stop DMA & HW ECC */
586*4882a593Smuzhiyun 	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
587*4882a593Smuzhiyun 	       SLC_CTRL(host->io_base));
588*4882a593Smuzhiyun 	writel(readl(SLC_CFG(host->io_base)) &
589*4882a593Smuzhiyun 	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
590*4882a593Smuzhiyun 		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (!dma_mapped && read)
593*4882a593Smuzhiyun 		memcpy(buf, host->data_buf, mtd->writesize);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	return status;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun  * Read the data and OOB data from the device, use ECC correction with the
600*4882a593Smuzhiyun  * data, disable ECC for the OOB data
601*4882a593Smuzhiyun  */
lpc32xx_nand_read_page_syndrome(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)602*4882a593Smuzhiyun static int lpc32xx_nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
603*4882a593Smuzhiyun 					   int oob_required, int page)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
606*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
607*4882a593Smuzhiyun 	struct mtd_oob_region oobregion = { };
608*4882a593Smuzhiyun 	int stat, i, status, error;
609*4882a593Smuzhiyun 	uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* Issue read command */
612*4882a593Smuzhiyun 	nand_read_page_op(chip, page, 0, NULL, 0);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/* Read data and oob, calculate ECC */
615*4882a593Smuzhiyun 	status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/* Get OOB data */
618*4882a593Smuzhiyun 	chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* Convert to stored ECC format */
621*4882a593Smuzhiyun 	lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	/* Pointer to ECC data retrieved from NAND spare area */
624*4882a593Smuzhiyun 	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
625*4882a593Smuzhiyun 	if (error)
626*4882a593Smuzhiyun 		return error;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	oobecc = chip->oob_poi + oobregion.offset;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.steps; i++) {
631*4882a593Smuzhiyun 		stat = chip->ecc.correct(chip, buf, oobecc,
632*4882a593Smuzhiyun 					 &tmpecc[i * chip->ecc.bytes]);
633*4882a593Smuzhiyun 		if (stat < 0)
634*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
635*4882a593Smuzhiyun 		else
636*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		buf += chip->ecc.size;
639*4882a593Smuzhiyun 		oobecc += chip->ecc.bytes;
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	return status;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun /*
646*4882a593Smuzhiyun  * Read the data and OOB data from the device, no ECC correction with the
647*4882a593Smuzhiyun  * data or OOB data
648*4882a593Smuzhiyun  */
lpc32xx_nand_read_page_raw_syndrome(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)649*4882a593Smuzhiyun static int lpc32xx_nand_read_page_raw_syndrome(struct nand_chip *chip,
650*4882a593Smuzhiyun 					       uint8_t *buf, int oob_required,
651*4882a593Smuzhiyun 					       int page)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	/* Issue read command */
656*4882a593Smuzhiyun 	nand_read_page_op(chip, page, 0, NULL, 0);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	/* Raw reads can just use the FIFO interface */
659*4882a593Smuzhiyun 	chip->legacy.read_buf(chip, buf, chip->ecc.size * chip->ecc.steps);
660*4882a593Smuzhiyun 	chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	return 0;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun  * Write the data and OOB data to the device, use ECC with the data,
667*4882a593Smuzhiyun  * disable ECC for the OOB data
668*4882a593Smuzhiyun  */
lpc32xx_nand_write_page_syndrome(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)669*4882a593Smuzhiyun static int lpc32xx_nand_write_page_syndrome(struct nand_chip *chip,
670*4882a593Smuzhiyun 					    const uint8_t *buf,
671*4882a593Smuzhiyun 					    int oob_required, int page)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
674*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
675*4882a593Smuzhiyun 	struct mtd_oob_region oobregion = { };
676*4882a593Smuzhiyun 	uint8_t *pb;
677*4882a593Smuzhiyun 	int error;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/* Write data, calculate ECC on outbound data */
682*4882a593Smuzhiyun 	error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
683*4882a593Smuzhiyun 	if (error)
684*4882a593Smuzhiyun 		return error;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	/*
687*4882a593Smuzhiyun 	 * The calculated ECC needs some manual work done to it before
688*4882a593Smuzhiyun 	 * committing it to NAND. Process the calculated ECC and place
689*4882a593Smuzhiyun 	 * the resultant values directly into the OOB buffer. */
690*4882a593Smuzhiyun 	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
691*4882a593Smuzhiyun 	if (error)
692*4882a593Smuzhiyun 		return error;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	pb = chip->oob_poi + oobregion.offset;
695*4882a593Smuzhiyun 	lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* Write ECC data to device */
698*4882a593Smuzhiyun 	chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun /*
704*4882a593Smuzhiyun  * Write the data and OOB data to the device, no ECC correction with the
705*4882a593Smuzhiyun  * data or OOB data
706*4882a593Smuzhiyun  */
lpc32xx_nand_write_page_raw_syndrome(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)707*4882a593Smuzhiyun static int lpc32xx_nand_write_page_raw_syndrome(struct nand_chip *chip,
708*4882a593Smuzhiyun 						const uint8_t *buf,
709*4882a593Smuzhiyun 						int oob_required, int page)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	/* Raw writes can just use the FIFO interface */
714*4882a593Smuzhiyun 	nand_prog_page_begin_op(chip, page, 0, buf,
715*4882a593Smuzhiyun 				chip->ecc.size * chip->ecc.steps);
716*4882a593Smuzhiyun 	chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
lpc32xx_nand_dma_setup(struct lpc32xx_nand_host * host)721*4882a593Smuzhiyun static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
724*4882a593Smuzhiyun 	dma_cap_mask_t mask;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	if (!host->pdata || !host->pdata->dma_filter) {
727*4882a593Smuzhiyun 		dev_err(mtd->dev.parent, "no DMA platform data\n");
728*4882a593Smuzhiyun 		return -ENOENT;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	dma_cap_zero(mask);
732*4882a593Smuzhiyun 	dma_cap_set(DMA_SLAVE, mask);
733*4882a593Smuzhiyun 	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
734*4882a593Smuzhiyun 					     "nand-slc");
735*4882a593Smuzhiyun 	if (!host->dma_chan) {
736*4882a593Smuzhiyun 		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
737*4882a593Smuzhiyun 		return -EBUSY;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	return 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun 
lpc32xx_parse_dt(struct device * dev)743*4882a593Smuzhiyun static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun 	struct lpc32xx_nand_cfg_slc *ncfg;
746*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
749*4882a593Smuzhiyun 	if (!ncfg)
750*4882a593Smuzhiyun 		return NULL;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
753*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
754*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,whold", &ncfg->whold);
755*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
756*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
757*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
758*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
759*4882a593Smuzhiyun 	of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
762*4882a593Smuzhiyun 	    !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
763*4882a593Smuzhiyun 	    !ncfg->rhold || !ncfg->rsetup) {
764*4882a593Smuzhiyun 		dev_err(dev, "chip parameters not specified correctly\n");
765*4882a593Smuzhiyun 		return NULL;
766*4882a593Smuzhiyun 	}
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return ncfg;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
lpc32xx_nand_attach_chip(struct nand_chip * chip)773*4882a593Smuzhiyun static int lpc32xx_nand_attach_chip(struct nand_chip *chip)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
776*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
779*4882a593Smuzhiyun 		return 0;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	/* OOB and ECC CPU and DMA work areas */
782*4882a593Smuzhiyun 	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	/*
785*4882a593Smuzhiyun 	 * Small page FLASH has a unique OOB layout, but large and huge
786*4882a593Smuzhiyun 	 * page FLASH use the standard layout. Small page FLASH uses a
787*4882a593Smuzhiyun 	 * custom BBT marker layout.
788*4882a593Smuzhiyun 	 */
789*4882a593Smuzhiyun 	if (mtd->writesize <= 512)
790*4882a593Smuzhiyun 		mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	chip->ecc.placement = NAND_ECC_PLACEMENT_INTERLEAVED;
793*4882a593Smuzhiyun 	/* These sizes remain the same regardless of page size */
794*4882a593Smuzhiyun 	chip->ecc.size = 256;
795*4882a593Smuzhiyun 	chip->ecc.strength = 1;
796*4882a593Smuzhiyun 	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
797*4882a593Smuzhiyun 	chip->ecc.prepad = 0;
798*4882a593Smuzhiyun 	chip->ecc.postpad = 0;
799*4882a593Smuzhiyun 	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
800*4882a593Smuzhiyun 	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
801*4882a593Smuzhiyun 	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
802*4882a593Smuzhiyun 	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
803*4882a593Smuzhiyun 	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
804*4882a593Smuzhiyun 	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
805*4882a593Smuzhiyun 	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
806*4882a593Smuzhiyun 	chip->ecc.correct = nand_correct_data;
807*4882a593Smuzhiyun 	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/*
810*4882a593Smuzhiyun 	 * Use a custom BBT marker setup for small page FLASH that
811*4882a593Smuzhiyun 	 * won't interfere with the ECC layout. Large and huge page
812*4882a593Smuzhiyun 	 * FLASH use the standard layout.
813*4882a593Smuzhiyun 	 */
814*4882a593Smuzhiyun 	if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
815*4882a593Smuzhiyun 	    mtd->writesize <= 512) {
816*4882a593Smuzhiyun 		chip->bbt_td = &bbt_smallpage_main_descr;
817*4882a593Smuzhiyun 		chip->bbt_md = &bbt_smallpage_mirror_descr;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	return 0;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun static const struct nand_controller_ops lpc32xx_nand_controller_ops = {
824*4882a593Smuzhiyun 	.attach_chip = lpc32xx_nand_attach_chip,
825*4882a593Smuzhiyun };
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun /*
828*4882a593Smuzhiyun  * Probe for NAND controller
829*4882a593Smuzhiyun  */
lpc32xx_nand_probe(struct platform_device * pdev)830*4882a593Smuzhiyun static int lpc32xx_nand_probe(struct platform_device *pdev)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host;
833*4882a593Smuzhiyun 	struct mtd_info *mtd;
834*4882a593Smuzhiyun 	struct nand_chip *chip;
835*4882a593Smuzhiyun 	struct resource *rc;
836*4882a593Smuzhiyun 	int res;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/* Allocate memory for the device structure (and zero it) */
839*4882a593Smuzhiyun 	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
840*4882a593Smuzhiyun 	if (!host)
841*4882a593Smuzhiyun 		return -ENOMEM;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
844*4882a593Smuzhiyun 	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
845*4882a593Smuzhiyun 	if (IS_ERR(host->io_base))
846*4882a593Smuzhiyun 		return PTR_ERR(host->io_base);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	host->io_base_dma = rc->start;
849*4882a593Smuzhiyun 	if (pdev->dev.of_node)
850*4882a593Smuzhiyun 		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
851*4882a593Smuzhiyun 	if (!host->ncfg) {
852*4882a593Smuzhiyun 		dev_err(&pdev->dev,
853*4882a593Smuzhiyun 			"Missing or bad NAND config from device tree\n");
854*4882a593Smuzhiyun 		return -ENOENT;
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun 	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
857*4882a593Smuzhiyun 		return -EPROBE_DEFER;
858*4882a593Smuzhiyun 	if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
859*4882a593Smuzhiyun 			host->ncfg->wp_gpio, "NAND WP")) {
860*4882a593Smuzhiyun 		dev_err(&pdev->dev, "GPIO not available\n");
861*4882a593Smuzhiyun 		return -EBUSY;
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 	lpc32xx_wp_disable(host);
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	host->pdata = dev_get_platdata(&pdev->dev);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	chip = &host->nand_chip;
868*4882a593Smuzhiyun 	mtd = nand_to_mtd(chip);
869*4882a593Smuzhiyun 	nand_set_controller_data(chip, host);
870*4882a593Smuzhiyun 	nand_set_flash_node(chip, pdev->dev.of_node);
871*4882a593Smuzhiyun 	mtd->owner = THIS_MODULE;
872*4882a593Smuzhiyun 	mtd->dev.parent = &pdev->dev;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	/* Get NAND clock */
875*4882a593Smuzhiyun 	host->clk = devm_clk_get(&pdev->dev, NULL);
876*4882a593Smuzhiyun 	if (IS_ERR(host->clk)) {
877*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Clock failure\n");
878*4882a593Smuzhiyun 		res = -ENOENT;
879*4882a593Smuzhiyun 		goto enable_wp;
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 	res = clk_prepare_enable(host->clk);
882*4882a593Smuzhiyun 	if (res)
883*4882a593Smuzhiyun 		goto enable_wp;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Set NAND IO addresses and command/ready functions */
886*4882a593Smuzhiyun 	chip->legacy.IO_ADDR_R = SLC_DATA(host->io_base);
887*4882a593Smuzhiyun 	chip->legacy.IO_ADDR_W = SLC_DATA(host->io_base);
888*4882a593Smuzhiyun 	chip->legacy.cmd_ctrl = lpc32xx_nand_cmd_ctrl;
889*4882a593Smuzhiyun 	chip->legacy.dev_ready = lpc32xx_nand_device_ready;
890*4882a593Smuzhiyun 	chip->legacy.chip_delay = 20; /* 20us command delay time */
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	/* Init NAND controller */
893*4882a593Smuzhiyun 	lpc32xx_nand_setup(host);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	platform_set_drvdata(pdev, host);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	/* NAND callbacks for LPC32xx SLC hardware */
898*4882a593Smuzhiyun 	chip->legacy.read_byte = lpc32xx_nand_read_byte;
899*4882a593Smuzhiyun 	chip->legacy.read_buf = lpc32xx_nand_read_buf;
900*4882a593Smuzhiyun 	chip->legacy.write_buf = lpc32xx_nand_write_buf;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	/*
903*4882a593Smuzhiyun 	 * Allocate a large enough buffer for a single huge page plus
904*4882a593Smuzhiyun 	 * extra space for the spare area and ECC storage area
905*4882a593Smuzhiyun 	 */
906*4882a593Smuzhiyun 	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
907*4882a593Smuzhiyun 	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
908*4882a593Smuzhiyun 				      GFP_KERNEL);
909*4882a593Smuzhiyun 	if (host->data_buf == NULL) {
910*4882a593Smuzhiyun 		res = -ENOMEM;
911*4882a593Smuzhiyun 		goto unprepare_clk;
912*4882a593Smuzhiyun 	}
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	res = lpc32xx_nand_dma_setup(host);
915*4882a593Smuzhiyun 	if (res) {
916*4882a593Smuzhiyun 		res = -EIO;
917*4882a593Smuzhiyun 		goto unprepare_clk;
918*4882a593Smuzhiyun 	}
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* Find NAND device */
921*4882a593Smuzhiyun 	chip->legacy.dummy_controller.ops = &lpc32xx_nand_controller_ops;
922*4882a593Smuzhiyun 	res = nand_scan(chip, 1);
923*4882a593Smuzhiyun 	if (res)
924*4882a593Smuzhiyun 		goto release_dma;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	mtd->name = "nxp_lpc3220_slc";
927*4882a593Smuzhiyun 	res = mtd_device_register(mtd, host->ncfg->parts,
928*4882a593Smuzhiyun 				  host->ncfg->num_parts);
929*4882a593Smuzhiyun 	if (res)
930*4882a593Smuzhiyun 		goto cleanup_nand;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	return 0;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun cleanup_nand:
935*4882a593Smuzhiyun 	nand_cleanup(chip);
936*4882a593Smuzhiyun release_dma:
937*4882a593Smuzhiyun 	dma_release_channel(host->dma_chan);
938*4882a593Smuzhiyun unprepare_clk:
939*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
940*4882a593Smuzhiyun enable_wp:
941*4882a593Smuzhiyun 	lpc32xx_wp_enable(host);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	return res;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun /*
947*4882a593Smuzhiyun  * Remove NAND device.
948*4882a593Smuzhiyun  */
lpc32xx_nand_remove(struct platform_device * pdev)949*4882a593Smuzhiyun static int lpc32xx_nand_remove(struct platform_device *pdev)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	uint32_t tmp;
952*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
953*4882a593Smuzhiyun 	struct nand_chip *chip = &host->nand_chip;
954*4882a593Smuzhiyun 	int ret;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	ret = mtd_device_unregister(nand_to_mtd(chip));
957*4882a593Smuzhiyun 	WARN_ON(ret);
958*4882a593Smuzhiyun 	nand_cleanup(chip);
959*4882a593Smuzhiyun 	dma_release_channel(host->dma_chan);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	/* Force CE high */
962*4882a593Smuzhiyun 	tmp = readl(SLC_CTRL(host->io_base));
963*4882a593Smuzhiyun 	tmp &= ~SLCCFG_CE_LOW;
964*4882a593Smuzhiyun 	writel(tmp, SLC_CTRL(host->io_base));
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
967*4882a593Smuzhiyun 	lpc32xx_wp_enable(host);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	return 0;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun #ifdef CONFIG_PM
lpc32xx_nand_resume(struct platform_device * pdev)973*4882a593Smuzhiyun static int lpc32xx_nand_resume(struct platform_device *pdev)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
976*4882a593Smuzhiyun 	int ret;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/* Re-enable NAND clock */
979*4882a593Smuzhiyun 	ret = clk_prepare_enable(host->clk);
980*4882a593Smuzhiyun 	if (ret)
981*4882a593Smuzhiyun 		return ret;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* Fresh init of NAND controller */
984*4882a593Smuzhiyun 	lpc32xx_nand_setup(host);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/* Disable write protect */
987*4882a593Smuzhiyun 	lpc32xx_wp_disable(host);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	return 0;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun 
lpc32xx_nand_suspend(struct platform_device * pdev,pm_message_t pm)992*4882a593Smuzhiyun static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	uint32_t tmp;
995*4882a593Smuzhiyun 	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	/* Force CE high */
998*4882a593Smuzhiyun 	tmp = readl(SLC_CTRL(host->io_base));
999*4882a593Smuzhiyun 	tmp &= ~SLCCFG_CE_LOW;
1000*4882a593Smuzhiyun 	writel(tmp, SLC_CTRL(host->io_base));
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/* Enable write protect for safety */
1003*4882a593Smuzhiyun 	lpc32xx_wp_enable(host);
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	/* Disable clock */
1006*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	return 0;
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun #else
1012*4882a593Smuzhiyun #define lpc32xx_nand_resume NULL
1013*4882a593Smuzhiyun #define lpc32xx_nand_suspend NULL
1014*4882a593Smuzhiyun #endif
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun static const struct of_device_id lpc32xx_nand_match[] = {
1017*4882a593Smuzhiyun 	{ .compatible = "nxp,lpc3220-slc" },
1018*4882a593Smuzhiyun 	{ /* sentinel */ },
1019*4882a593Smuzhiyun };
1020*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun static struct platform_driver lpc32xx_nand_driver = {
1023*4882a593Smuzhiyun 	.probe		= lpc32xx_nand_probe,
1024*4882a593Smuzhiyun 	.remove		= lpc32xx_nand_remove,
1025*4882a593Smuzhiyun 	.resume		= lpc32xx_nand_resume,
1026*4882a593Smuzhiyun 	.suspend	= lpc32xx_nand_suspend,
1027*4882a593Smuzhiyun 	.driver		= {
1028*4882a593Smuzhiyun 		.name	= LPC32XX_MODNAME,
1029*4882a593Smuzhiyun 		.of_match_table = lpc32xx_nand_match,
1030*4882a593Smuzhiyun 	},
1031*4882a593Smuzhiyun };
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun module_platform_driver(lpc32xx_nand_driver);
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1036*4882a593Smuzhiyun MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
1037*4882a593Smuzhiyun MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
1038*4882a593Smuzhiyun MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
1039