xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/fsmc_nand.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * ST Microelectronics
4*4882a593Smuzhiyun  * Flexible Static Memory Controller (FSMC)
5*4882a593Smuzhiyun  * Driver for NAND portions
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright © 2010 ST Microelectronics
8*4882a593Smuzhiyun  * Vipin Kumar <vipin.kumar@st.com>
9*4882a593Smuzhiyun  * Ashish Priyadarshi
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Based on drivers/mtd/nand/nomadik_nand.c (removed in v3.8)
12*4882a593Smuzhiyun  *  Copyright © 2007 STMicroelectronics Pvt. Ltd.
13*4882a593Smuzhiyun  *  Copyright © 2009 Alessandro Rubini
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/clk.h>
17*4882a593Smuzhiyun #include <linux/completion.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/dmaengine.h>
20*4882a593Smuzhiyun #include <linux/dma-direction.h>
21*4882a593Smuzhiyun #include <linux/dma-mapping.h>
22*4882a593Smuzhiyun #include <linux/err.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun #include <linux/resource.h>
26*4882a593Smuzhiyun #include <linux/sched.h>
27*4882a593Smuzhiyun #include <linux/types.h>
28*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
29*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
30*4882a593Smuzhiyun #include <linux/mtd/nand_ecc.h>
31*4882a593Smuzhiyun #include <linux/platform_device.h>
32*4882a593Smuzhiyun #include <linux/of.h>
33*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
34*4882a593Smuzhiyun #include <linux/io.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun #include <linux/amba/bus.h>
37*4882a593Smuzhiyun #include <mtd/mtd-abi.h>
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* fsmc controller registers for NOR flash */
40*4882a593Smuzhiyun #define CTRL			0x0
41*4882a593Smuzhiyun 	/* ctrl register definitions */
42*4882a593Smuzhiyun 	#define BANK_ENABLE		BIT(0)
43*4882a593Smuzhiyun 	#define MUXED			BIT(1)
44*4882a593Smuzhiyun 	#define NOR_DEV			(2 << 2)
45*4882a593Smuzhiyun 	#define WIDTH_16		BIT(4)
46*4882a593Smuzhiyun 	#define RSTPWRDWN		BIT(6)
47*4882a593Smuzhiyun 	#define WPROT			BIT(7)
48*4882a593Smuzhiyun 	#define WRT_ENABLE		BIT(12)
49*4882a593Smuzhiyun 	#define WAIT_ENB		BIT(13)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define CTRL_TIM		0x4
52*4882a593Smuzhiyun 	/* ctrl_tim register definitions */
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define FSMC_NOR_BANK_SZ	0x8
55*4882a593Smuzhiyun #define FSMC_NOR_REG_SIZE	0x40
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define FSMC_NOR_REG(base, bank, reg)	((base) +			\
58*4882a593Smuzhiyun 					 (FSMC_NOR_BANK_SZ * (bank)) +	\
59*4882a593Smuzhiyun 					 (reg))
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* fsmc controller registers for NAND flash */
62*4882a593Smuzhiyun #define FSMC_PC			0x00
63*4882a593Smuzhiyun 	/* pc register definitions */
64*4882a593Smuzhiyun 	#define FSMC_RESET		BIT(0)
65*4882a593Smuzhiyun 	#define FSMC_WAITON		BIT(1)
66*4882a593Smuzhiyun 	#define FSMC_ENABLE		BIT(2)
67*4882a593Smuzhiyun 	#define FSMC_DEVTYPE_NAND	BIT(3)
68*4882a593Smuzhiyun 	#define FSMC_DEVWID_16		BIT(4)
69*4882a593Smuzhiyun 	#define FSMC_ECCEN		BIT(6)
70*4882a593Smuzhiyun 	#define FSMC_ECCPLEN_256	BIT(7)
71*4882a593Smuzhiyun 	#define FSMC_TCLR_SHIFT		(9)
72*4882a593Smuzhiyun 	#define FSMC_TCLR_MASK		(0xF)
73*4882a593Smuzhiyun 	#define FSMC_TAR_SHIFT		(13)
74*4882a593Smuzhiyun 	#define FSMC_TAR_MASK		(0xF)
75*4882a593Smuzhiyun #define STS			0x04
76*4882a593Smuzhiyun 	/* sts register definitions */
77*4882a593Smuzhiyun 	#define FSMC_CODE_RDY		BIT(15)
78*4882a593Smuzhiyun #define COMM			0x08
79*4882a593Smuzhiyun 	/* comm register definitions */
80*4882a593Smuzhiyun 	#define FSMC_TSET_SHIFT		0
81*4882a593Smuzhiyun 	#define FSMC_TSET_MASK		0xFF
82*4882a593Smuzhiyun 	#define FSMC_TWAIT_SHIFT	8
83*4882a593Smuzhiyun 	#define FSMC_TWAIT_MASK		0xFF
84*4882a593Smuzhiyun 	#define FSMC_THOLD_SHIFT	16
85*4882a593Smuzhiyun 	#define FSMC_THOLD_MASK		0xFF
86*4882a593Smuzhiyun 	#define FSMC_THIZ_SHIFT		24
87*4882a593Smuzhiyun 	#define FSMC_THIZ_MASK		0xFF
88*4882a593Smuzhiyun #define ATTRIB			0x0C
89*4882a593Smuzhiyun #define IOATA			0x10
90*4882a593Smuzhiyun #define ECC1			0x14
91*4882a593Smuzhiyun #define ECC2			0x18
92*4882a593Smuzhiyun #define ECC3			0x1C
93*4882a593Smuzhiyun #define FSMC_NAND_BANK_SZ	0x20
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define FSMC_BUSY_WAIT_TIMEOUT	(1 * HZ)
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * According to SPEAr300 Reference Manual (RM0082)
99*4882a593Smuzhiyun  *  TOUDEL = 7ns (Output delay from the flip-flops to the board)
100*4882a593Smuzhiyun  *  TINDEL = 5ns (Input delay from the board to the flipflop)
101*4882a593Smuzhiyun  */
102*4882a593Smuzhiyun #define TOUTDEL	7000
103*4882a593Smuzhiyun #define TINDEL	5000
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun struct fsmc_nand_timings {
106*4882a593Smuzhiyun 	u8 tclr;
107*4882a593Smuzhiyun 	u8 tar;
108*4882a593Smuzhiyun 	u8 thiz;
109*4882a593Smuzhiyun 	u8 thold;
110*4882a593Smuzhiyun 	u8 twait;
111*4882a593Smuzhiyun 	u8 tset;
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun enum access_mode {
115*4882a593Smuzhiyun 	USE_DMA_ACCESS = 1,
116*4882a593Smuzhiyun 	USE_WORD_ACCESS,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun  * struct fsmc_nand_data - structure for FSMC NAND device state
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  * @base:		Inherit from the nand_controller struct
123*4882a593Smuzhiyun  * @pid:		Part ID on the AMBA PrimeCell format
124*4882a593Smuzhiyun  * @nand:		Chip related info for a NAND flash.
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  * @bank:		Bank number for probed device.
127*4882a593Smuzhiyun  * @dev:		Parent device
128*4882a593Smuzhiyun  * @mode:		Access mode
129*4882a593Smuzhiyun  * @clk:		Clock structure for FSMC.
130*4882a593Smuzhiyun  *
131*4882a593Smuzhiyun  * @read_dma_chan:	DMA channel for read access
132*4882a593Smuzhiyun  * @write_dma_chan:	DMA channel for write access to NAND
133*4882a593Smuzhiyun  * @dma_access_complete: Completion structure
134*4882a593Smuzhiyun  *
135*4882a593Smuzhiyun  * @dev_timings:	NAND timings
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * @data_pa:		NAND Physical port for Data.
138*4882a593Smuzhiyun  * @data_va:		NAND port for Data.
139*4882a593Smuzhiyun  * @cmd_va:		NAND port for Command.
140*4882a593Smuzhiyun  * @addr_va:		NAND port for Address.
141*4882a593Smuzhiyun  * @regs_va:		Registers base address for a given bank.
142*4882a593Smuzhiyun  */
143*4882a593Smuzhiyun struct fsmc_nand_data {
144*4882a593Smuzhiyun 	struct nand_controller	base;
145*4882a593Smuzhiyun 	u32			pid;
146*4882a593Smuzhiyun 	struct nand_chip	nand;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	unsigned int		bank;
149*4882a593Smuzhiyun 	struct device		*dev;
150*4882a593Smuzhiyun 	enum access_mode	mode;
151*4882a593Smuzhiyun 	struct clk		*clk;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* DMA related objects */
154*4882a593Smuzhiyun 	struct dma_chan		*read_dma_chan;
155*4882a593Smuzhiyun 	struct dma_chan		*write_dma_chan;
156*4882a593Smuzhiyun 	struct completion	dma_access_complete;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	struct fsmc_nand_timings *dev_timings;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	dma_addr_t		data_pa;
161*4882a593Smuzhiyun 	void __iomem		*data_va;
162*4882a593Smuzhiyun 	void __iomem		*cmd_va;
163*4882a593Smuzhiyun 	void __iomem		*addr_va;
164*4882a593Smuzhiyun 	void __iomem		*regs_va;
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun 
fsmc_ecc1_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)167*4882a593Smuzhiyun static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
168*4882a593Smuzhiyun 				   struct mtd_oob_region *oobregion)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (section >= chip->ecc.steps)
173*4882a593Smuzhiyun 		return -ERANGE;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	oobregion->offset = (section * 16) + 2;
176*4882a593Smuzhiyun 	oobregion->length = 3;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
fsmc_ecc1_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)181*4882a593Smuzhiyun static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
182*4882a593Smuzhiyun 				    struct mtd_oob_region *oobregion)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (section >= chip->ecc.steps)
187*4882a593Smuzhiyun 		return -ERANGE;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	oobregion->offset = (section * 16) + 8;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (section < chip->ecc.steps - 1)
192*4882a593Smuzhiyun 		oobregion->length = 8;
193*4882a593Smuzhiyun 	else
194*4882a593Smuzhiyun 		oobregion->length = mtd->oobsize - oobregion->offset;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return 0;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
200*4882a593Smuzhiyun 	.ecc = fsmc_ecc1_ooblayout_ecc,
201*4882a593Smuzhiyun 	.free = fsmc_ecc1_ooblayout_free,
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun  * ECC placement definitions in oobfree type format.
206*4882a593Smuzhiyun  * There are 13 bytes of ecc for every 512 byte block and it has to be read
207*4882a593Smuzhiyun  * consecutively and immediately after the 512 byte data block for hardware to
208*4882a593Smuzhiyun  * generate the error bit offsets in 512 byte data.
209*4882a593Smuzhiyun  */
fsmc_ecc4_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)210*4882a593Smuzhiyun static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
211*4882a593Smuzhiyun 				   struct mtd_oob_region *oobregion)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (section >= chip->ecc.steps)
216*4882a593Smuzhiyun 		return -ERANGE;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	oobregion->length = chip->ecc.bytes;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (!section && mtd->writesize <= 512)
221*4882a593Smuzhiyun 		oobregion->offset = 0;
222*4882a593Smuzhiyun 	else
223*4882a593Smuzhiyun 		oobregion->offset = (section * 16) + 2;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return 0;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
fsmc_ecc4_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)228*4882a593Smuzhiyun static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
229*4882a593Smuzhiyun 				    struct mtd_oob_region *oobregion)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (section >= chip->ecc.steps)
234*4882a593Smuzhiyun 		return -ERANGE;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	oobregion->offset = (section * 16) + 15;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (section < chip->ecc.steps - 1)
239*4882a593Smuzhiyun 		oobregion->length = 3;
240*4882a593Smuzhiyun 	else
241*4882a593Smuzhiyun 		oobregion->length = mtd->oobsize - oobregion->offset;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return 0;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
247*4882a593Smuzhiyun 	.ecc = fsmc_ecc4_ooblayout_ecc,
248*4882a593Smuzhiyun 	.free = fsmc_ecc4_ooblayout_free,
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun 
nand_to_fsmc(struct nand_chip * chip)251*4882a593Smuzhiyun static inline struct fsmc_nand_data *nand_to_fsmc(struct nand_chip *chip)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	return container_of(chip, struct fsmc_nand_data, nand);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun  * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * This routine initializes timing parameters related to NAND memory access in
260*4882a593Smuzhiyun  * FSMC registers
261*4882a593Smuzhiyun  */
fsmc_nand_setup(struct fsmc_nand_data * host,struct fsmc_nand_timings * tims)262*4882a593Smuzhiyun static void fsmc_nand_setup(struct fsmc_nand_data *host,
263*4882a593Smuzhiyun 			    struct fsmc_nand_timings *tims)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun 	u32 value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
266*4882a593Smuzhiyun 	u32 tclr, tar, thiz, thold, twait, tset;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
269*4882a593Smuzhiyun 	tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
270*4882a593Smuzhiyun 	thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
271*4882a593Smuzhiyun 	thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
272*4882a593Smuzhiyun 	twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
273*4882a593Smuzhiyun 	tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (host->nand.options & NAND_BUSWIDTH_16)
276*4882a593Smuzhiyun 		value |= FSMC_DEVWID_16;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	writel_relaxed(value | tclr | tar, host->regs_va + FSMC_PC);
279*4882a593Smuzhiyun 	writel_relaxed(thiz | thold | twait | tset, host->regs_va + COMM);
280*4882a593Smuzhiyun 	writel_relaxed(thiz | thold | twait | tset, host->regs_va + ATTRIB);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
fsmc_calc_timings(struct fsmc_nand_data * host,const struct nand_sdr_timings * sdrt,struct fsmc_nand_timings * tims)283*4882a593Smuzhiyun static int fsmc_calc_timings(struct fsmc_nand_data *host,
284*4882a593Smuzhiyun 			     const struct nand_sdr_timings *sdrt,
285*4882a593Smuzhiyun 			     struct fsmc_nand_timings *tims)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	unsigned long hclk = clk_get_rate(host->clk);
288*4882a593Smuzhiyun 	unsigned long hclkn = NSEC_PER_SEC / hclk;
289*4882a593Smuzhiyun 	u32 thiz, thold, twait, tset, twait_min;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (sdrt->tRC_min < 30000)
292*4882a593Smuzhiyun 		return -EOPNOTSUPP;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1;
295*4882a593Smuzhiyun 	if (tims->tar > FSMC_TAR_MASK)
296*4882a593Smuzhiyun 		tims->tar = FSMC_TAR_MASK;
297*4882a593Smuzhiyun 	tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1;
298*4882a593Smuzhiyun 	if (tims->tclr > FSMC_TCLR_MASK)
299*4882a593Smuzhiyun 		tims->tclr = FSMC_TCLR_MASK;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	thiz = sdrt->tCS_min - sdrt->tWP_min;
302*4882a593Smuzhiyun 	tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	thold = sdrt->tDH_min;
305*4882a593Smuzhiyun 	if (thold < sdrt->tCH_min)
306*4882a593Smuzhiyun 		thold = sdrt->tCH_min;
307*4882a593Smuzhiyun 	if (thold < sdrt->tCLH_min)
308*4882a593Smuzhiyun 		thold = sdrt->tCLH_min;
309*4882a593Smuzhiyun 	if (thold < sdrt->tWH_min)
310*4882a593Smuzhiyun 		thold = sdrt->tWH_min;
311*4882a593Smuzhiyun 	if (thold < sdrt->tALH_min)
312*4882a593Smuzhiyun 		thold = sdrt->tALH_min;
313*4882a593Smuzhiyun 	if (thold < sdrt->tREH_min)
314*4882a593Smuzhiyun 		thold = sdrt->tREH_min;
315*4882a593Smuzhiyun 	tims->thold = DIV_ROUND_UP(thold / 1000, hclkn);
316*4882a593Smuzhiyun 	if (tims->thold == 0)
317*4882a593Smuzhiyun 		tims->thold = 1;
318*4882a593Smuzhiyun 	else if (tims->thold > FSMC_THOLD_MASK)
319*4882a593Smuzhiyun 		tims->thold = FSMC_THOLD_MASK;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	tset = max(sdrt->tCS_min - sdrt->tWP_min,
322*4882a593Smuzhiyun 		   sdrt->tCEA_max - sdrt->tREA_max);
323*4882a593Smuzhiyun 	tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1;
324*4882a593Smuzhiyun 	if (tims->tset == 0)
325*4882a593Smuzhiyun 		tims->tset = 1;
326*4882a593Smuzhiyun 	else if (tims->tset > FSMC_TSET_MASK)
327*4882a593Smuzhiyun 		tims->tset = FSMC_TSET_MASK;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/*
330*4882a593Smuzhiyun 	 * According to SPEAr300 Reference Manual (RM0082) which gives more
331*4882a593Smuzhiyun 	 * information related to FSMSC timings than the SPEAr600 one (RM0305),
332*4882a593Smuzhiyun 	 *   twait >= tCEA - (tset * TCLK) + TOUTDEL + TINDEL
333*4882a593Smuzhiyun 	 */
334*4882a593Smuzhiyun 	twait_min = sdrt->tCEA_max - ((tims->tset + 1) * hclkn * 1000)
335*4882a593Smuzhiyun 		    + TOUTDEL + TINDEL;
336*4882a593Smuzhiyun 	twait = max3(sdrt->tRP_min, sdrt->tWP_min, twait_min);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1;
339*4882a593Smuzhiyun 	if (tims->twait == 0)
340*4882a593Smuzhiyun 		tims->twait = 1;
341*4882a593Smuzhiyun 	else if (tims->twait > FSMC_TWAIT_MASK)
342*4882a593Smuzhiyun 		tims->twait = FSMC_TWAIT_MASK;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return 0;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
fsmc_setup_interface(struct nand_chip * nand,int csline,const struct nand_interface_config * conf)347*4882a593Smuzhiyun static int fsmc_setup_interface(struct nand_chip *nand, int csline,
348*4882a593Smuzhiyun 				const struct nand_interface_config *conf)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(nand);
351*4882a593Smuzhiyun 	struct fsmc_nand_timings tims;
352*4882a593Smuzhiyun 	const struct nand_sdr_timings *sdrt;
353*4882a593Smuzhiyun 	int ret;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	sdrt = nand_get_sdr_timings(conf);
356*4882a593Smuzhiyun 	if (IS_ERR(sdrt))
357*4882a593Smuzhiyun 		return PTR_ERR(sdrt);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	ret = fsmc_calc_timings(host, sdrt, &tims);
360*4882a593Smuzhiyun 	if (ret)
361*4882a593Smuzhiyun 		return ret;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
364*4882a593Smuzhiyun 		return 0;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	fsmc_nand_setup(host, &tims);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun  * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
373*4882a593Smuzhiyun  */
fsmc_enable_hwecc(struct nand_chip * chip,int mode)374*4882a593Smuzhiyun static void fsmc_enable_hwecc(struct nand_chip *chip, int mode)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(chip);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCPLEN_256,
379*4882a593Smuzhiyun 		       host->regs_va + FSMC_PC);
380*4882a593Smuzhiyun 	writel_relaxed(readl(host->regs_va + FSMC_PC) & ~FSMC_ECCEN,
381*4882a593Smuzhiyun 		       host->regs_va + FSMC_PC);
382*4882a593Smuzhiyun 	writel_relaxed(readl(host->regs_va + FSMC_PC) | FSMC_ECCEN,
383*4882a593Smuzhiyun 		       host->regs_va + FSMC_PC);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun  * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
388*4882a593Smuzhiyun  * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
389*4882a593Smuzhiyun  * max of 8-bits)
390*4882a593Smuzhiyun  */
fsmc_read_hwecc_ecc4(struct nand_chip * chip,const u8 * data,u8 * ecc)391*4882a593Smuzhiyun static int fsmc_read_hwecc_ecc4(struct nand_chip *chip, const u8 *data,
392*4882a593Smuzhiyun 				u8 *ecc)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(chip);
395*4882a593Smuzhiyun 	u32 ecc_tmp;
396*4882a593Smuzhiyun 	unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	do {
399*4882a593Smuzhiyun 		if (readl_relaxed(host->regs_va + STS) & FSMC_CODE_RDY)
400*4882a593Smuzhiyun 			break;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		cond_resched();
403*4882a593Smuzhiyun 	} while (!time_after_eq(jiffies, deadline));
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (time_after_eq(jiffies, deadline)) {
406*4882a593Smuzhiyun 		dev_err(host->dev, "calculate ecc timed out\n");
407*4882a593Smuzhiyun 		return -ETIMEDOUT;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	ecc_tmp = readl_relaxed(host->regs_va + ECC1);
411*4882a593Smuzhiyun 	ecc[0] = ecc_tmp;
412*4882a593Smuzhiyun 	ecc[1] = ecc_tmp >> 8;
413*4882a593Smuzhiyun 	ecc[2] = ecc_tmp >> 16;
414*4882a593Smuzhiyun 	ecc[3] = ecc_tmp >> 24;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ecc_tmp = readl_relaxed(host->regs_va + ECC2);
417*4882a593Smuzhiyun 	ecc[4] = ecc_tmp;
418*4882a593Smuzhiyun 	ecc[5] = ecc_tmp >> 8;
419*4882a593Smuzhiyun 	ecc[6] = ecc_tmp >> 16;
420*4882a593Smuzhiyun 	ecc[7] = ecc_tmp >> 24;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	ecc_tmp = readl_relaxed(host->regs_va + ECC3);
423*4882a593Smuzhiyun 	ecc[8] = ecc_tmp;
424*4882a593Smuzhiyun 	ecc[9] = ecc_tmp >> 8;
425*4882a593Smuzhiyun 	ecc[10] = ecc_tmp >> 16;
426*4882a593Smuzhiyun 	ecc[11] = ecc_tmp >> 24;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	ecc_tmp = readl_relaxed(host->regs_va + STS);
429*4882a593Smuzhiyun 	ecc[12] = ecc_tmp >> 16;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun  * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
436*4882a593Smuzhiyun  * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
437*4882a593Smuzhiyun  * max of 1-bit)
438*4882a593Smuzhiyun  */
fsmc_read_hwecc_ecc1(struct nand_chip * chip,const u8 * data,u8 * ecc)439*4882a593Smuzhiyun static int fsmc_read_hwecc_ecc1(struct nand_chip *chip, const u8 *data,
440*4882a593Smuzhiyun 				u8 *ecc)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(chip);
443*4882a593Smuzhiyun 	u32 ecc_tmp;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	ecc_tmp = readl_relaxed(host->regs_va + ECC1);
446*4882a593Smuzhiyun 	ecc[0] = ecc_tmp;
447*4882a593Smuzhiyun 	ecc[1] = ecc_tmp >> 8;
448*4882a593Smuzhiyun 	ecc[2] = ecc_tmp >> 16;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun /* Count the number of 0's in buff upto a max of max_bits */
count_written_bits(u8 * buff,int size,int max_bits)454*4882a593Smuzhiyun static int count_written_bits(u8 *buff, int size, int max_bits)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	int k, written_bits = 0;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	for (k = 0; k < size; k++) {
459*4882a593Smuzhiyun 		written_bits += hweight8(~buff[k]);
460*4882a593Smuzhiyun 		if (written_bits > max_bits)
461*4882a593Smuzhiyun 			break;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	return written_bits;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
dma_complete(void * param)467*4882a593Smuzhiyun static void dma_complete(void *param)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	struct fsmc_nand_data *host = param;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	complete(&host->dma_access_complete);
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun 
dma_xfer(struct fsmc_nand_data * host,void * buffer,int len,enum dma_data_direction direction)474*4882a593Smuzhiyun static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
475*4882a593Smuzhiyun 		    enum dma_data_direction direction)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct dma_chan *chan;
478*4882a593Smuzhiyun 	struct dma_device *dma_dev;
479*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
480*4882a593Smuzhiyun 	dma_addr_t dma_dst, dma_src, dma_addr;
481*4882a593Smuzhiyun 	dma_cookie_t cookie;
482*4882a593Smuzhiyun 	unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
483*4882a593Smuzhiyun 	int ret;
484*4882a593Smuzhiyun 	unsigned long time_left;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (direction == DMA_TO_DEVICE)
487*4882a593Smuzhiyun 		chan = host->write_dma_chan;
488*4882a593Smuzhiyun 	else if (direction == DMA_FROM_DEVICE)
489*4882a593Smuzhiyun 		chan = host->read_dma_chan;
490*4882a593Smuzhiyun 	else
491*4882a593Smuzhiyun 		return -EINVAL;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	dma_dev = chan->device;
494*4882a593Smuzhiyun 	dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (direction == DMA_TO_DEVICE) {
497*4882a593Smuzhiyun 		dma_src = dma_addr;
498*4882a593Smuzhiyun 		dma_dst = host->data_pa;
499*4882a593Smuzhiyun 	} else {
500*4882a593Smuzhiyun 		dma_src = host->data_pa;
501*4882a593Smuzhiyun 		dma_dst = dma_addr;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
505*4882a593Smuzhiyun 			len, flags);
506*4882a593Smuzhiyun 	if (!tx) {
507*4882a593Smuzhiyun 		dev_err(host->dev, "device_prep_dma_memcpy error\n");
508*4882a593Smuzhiyun 		ret = -EIO;
509*4882a593Smuzhiyun 		goto unmap_dma;
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	tx->callback = dma_complete;
513*4882a593Smuzhiyun 	tx->callback_param = host;
514*4882a593Smuzhiyun 	cookie = tx->tx_submit(tx);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	ret = dma_submit_error(cookie);
517*4882a593Smuzhiyun 	if (ret) {
518*4882a593Smuzhiyun 		dev_err(host->dev, "dma_submit_error %d\n", cookie);
519*4882a593Smuzhiyun 		goto unmap_dma;
520*4882a593Smuzhiyun 	}
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	dma_async_issue_pending(chan);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	time_left =
525*4882a593Smuzhiyun 	wait_for_completion_timeout(&host->dma_access_complete,
526*4882a593Smuzhiyun 				    msecs_to_jiffies(3000));
527*4882a593Smuzhiyun 	if (time_left == 0) {
528*4882a593Smuzhiyun 		dmaengine_terminate_all(chan);
529*4882a593Smuzhiyun 		dev_err(host->dev, "wait_for_completion_timeout\n");
530*4882a593Smuzhiyun 		ret = -ETIMEDOUT;
531*4882a593Smuzhiyun 		goto unmap_dma;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	ret = 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun unmap_dma:
537*4882a593Smuzhiyun 	dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	return ret;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun  * fsmc_write_buf - write buffer to chip
544*4882a593Smuzhiyun  * @host:	FSMC NAND controller
545*4882a593Smuzhiyun  * @buf:	data buffer
546*4882a593Smuzhiyun  * @len:	number of bytes to write
547*4882a593Smuzhiyun  */
fsmc_write_buf(struct fsmc_nand_data * host,const u8 * buf,int len)548*4882a593Smuzhiyun static void fsmc_write_buf(struct fsmc_nand_data *host, const u8 *buf,
549*4882a593Smuzhiyun 			   int len)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	int i;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
554*4882a593Smuzhiyun 	    IS_ALIGNED(len, sizeof(u32))) {
555*4882a593Smuzhiyun 		u32 *p = (u32 *)buf;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		len = len >> 2;
558*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
559*4882a593Smuzhiyun 			writel_relaxed(p[i], host->data_va);
560*4882a593Smuzhiyun 	} else {
561*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
562*4882a593Smuzhiyun 			writeb_relaxed(buf[i], host->data_va);
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun  * fsmc_read_buf - read chip data into buffer
568*4882a593Smuzhiyun  * @host:	FSMC NAND controller
569*4882a593Smuzhiyun  * @buf:	buffer to store date
570*4882a593Smuzhiyun  * @len:	number of bytes to read
571*4882a593Smuzhiyun  */
fsmc_read_buf(struct fsmc_nand_data * host,u8 * buf,int len)572*4882a593Smuzhiyun static void fsmc_read_buf(struct fsmc_nand_data *host, u8 *buf, int len)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	int i;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (IS_ALIGNED((uintptr_t)buf, sizeof(u32)) &&
577*4882a593Smuzhiyun 	    IS_ALIGNED(len, sizeof(u32))) {
578*4882a593Smuzhiyun 		u32 *p = (u32 *)buf;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		len = len >> 2;
581*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
582*4882a593Smuzhiyun 			p[i] = readl_relaxed(host->data_va);
583*4882a593Smuzhiyun 	} else {
584*4882a593Smuzhiyun 		for (i = 0; i < len; i++)
585*4882a593Smuzhiyun 			buf[i] = readb_relaxed(host->data_va);
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun /*
590*4882a593Smuzhiyun  * fsmc_read_buf_dma - read chip data into buffer
591*4882a593Smuzhiyun  * @host:	FSMC NAND controller
592*4882a593Smuzhiyun  * @buf:	buffer to store date
593*4882a593Smuzhiyun  * @len:	number of bytes to read
594*4882a593Smuzhiyun  */
fsmc_read_buf_dma(struct fsmc_nand_data * host,u8 * buf,int len)595*4882a593Smuzhiyun static void fsmc_read_buf_dma(struct fsmc_nand_data *host, u8 *buf,
596*4882a593Smuzhiyun 			      int len)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	dma_xfer(host, buf, len, DMA_FROM_DEVICE);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun /*
602*4882a593Smuzhiyun  * fsmc_write_buf_dma - write buffer to chip
603*4882a593Smuzhiyun  * @host:	FSMC NAND controller
604*4882a593Smuzhiyun  * @buf:	data buffer
605*4882a593Smuzhiyun  * @len:	number of bytes to write
606*4882a593Smuzhiyun  */
fsmc_write_buf_dma(struct fsmc_nand_data * host,const u8 * buf,int len)607*4882a593Smuzhiyun static void fsmc_write_buf_dma(struct fsmc_nand_data *host, const u8 *buf,
608*4882a593Smuzhiyun 			       int len)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun  * fsmc_exec_op - hook called by the core to execute NAND operations
615*4882a593Smuzhiyun  *
616*4882a593Smuzhiyun  * This controller is simple enough and thus does not need to use the parser
617*4882a593Smuzhiyun  * provided by the core, instead, handle every situation here.
618*4882a593Smuzhiyun  */
fsmc_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)619*4882a593Smuzhiyun static int fsmc_exec_op(struct nand_chip *chip, const struct nand_operation *op,
620*4882a593Smuzhiyun 			bool check_only)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(chip);
623*4882a593Smuzhiyun 	const struct nand_op_instr *instr = NULL;
624*4882a593Smuzhiyun 	int ret = 0;
625*4882a593Smuzhiyun 	unsigned int op_id;
626*4882a593Smuzhiyun 	int i;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	if (check_only)
629*4882a593Smuzhiyun 		return 0;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	pr_debug("Executing operation [%d instructions]:\n", op->ninstrs);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	for (op_id = 0; op_id < op->ninstrs; op_id++) {
634*4882a593Smuzhiyun 		instr = &op->instrs[op_id];
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		nand_op_trace("  ", instr);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		switch (instr->type) {
639*4882a593Smuzhiyun 		case NAND_OP_CMD_INSTR:
640*4882a593Smuzhiyun 			writeb_relaxed(instr->ctx.cmd.opcode, host->cmd_va);
641*4882a593Smuzhiyun 			break;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		case NAND_OP_ADDR_INSTR:
644*4882a593Smuzhiyun 			for (i = 0; i < instr->ctx.addr.naddrs; i++)
645*4882a593Smuzhiyun 				writeb_relaxed(instr->ctx.addr.addrs[i],
646*4882a593Smuzhiyun 					       host->addr_va);
647*4882a593Smuzhiyun 			break;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 		case NAND_OP_DATA_IN_INSTR:
650*4882a593Smuzhiyun 			if (host->mode == USE_DMA_ACCESS)
651*4882a593Smuzhiyun 				fsmc_read_buf_dma(host, instr->ctx.data.buf.in,
652*4882a593Smuzhiyun 						  instr->ctx.data.len);
653*4882a593Smuzhiyun 			else
654*4882a593Smuzhiyun 				fsmc_read_buf(host, instr->ctx.data.buf.in,
655*4882a593Smuzhiyun 					      instr->ctx.data.len);
656*4882a593Smuzhiyun 			break;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 		case NAND_OP_DATA_OUT_INSTR:
659*4882a593Smuzhiyun 			if (host->mode == USE_DMA_ACCESS)
660*4882a593Smuzhiyun 				fsmc_write_buf_dma(host,
661*4882a593Smuzhiyun 						   instr->ctx.data.buf.out,
662*4882a593Smuzhiyun 						   instr->ctx.data.len);
663*4882a593Smuzhiyun 			else
664*4882a593Smuzhiyun 				fsmc_write_buf(host, instr->ctx.data.buf.out,
665*4882a593Smuzhiyun 					       instr->ctx.data.len);
666*4882a593Smuzhiyun 			break;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 		case NAND_OP_WAITRDY_INSTR:
669*4882a593Smuzhiyun 			ret = nand_soft_waitrdy(chip,
670*4882a593Smuzhiyun 						instr->ctx.waitrdy.timeout_ms);
671*4882a593Smuzhiyun 			break;
672*4882a593Smuzhiyun 		}
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		if (instr->delay_ns)
675*4882a593Smuzhiyun 			ndelay(instr->delay_ns);
676*4882a593Smuzhiyun 	}
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	return ret;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun  * fsmc_read_page_hwecc
683*4882a593Smuzhiyun  * @chip:	nand chip info structure
684*4882a593Smuzhiyun  * @buf:	buffer to store read data
685*4882a593Smuzhiyun  * @oob_required:	caller expects OOB data read to chip->oob_poi
686*4882a593Smuzhiyun  * @page:	page number to read
687*4882a593Smuzhiyun  *
688*4882a593Smuzhiyun  * This routine is needed for fsmc version 8 as reading from NAND chip has to be
689*4882a593Smuzhiyun  * performed in a strict sequence as follows:
690*4882a593Smuzhiyun  * data(512 byte) -> ecc(13 byte)
691*4882a593Smuzhiyun  * After this read, fsmc hardware generates and reports error data bits(up to a
692*4882a593Smuzhiyun  * max of 8 bits)
693*4882a593Smuzhiyun  */
fsmc_read_page_hwecc(struct nand_chip * chip,u8 * buf,int oob_required,int page)694*4882a593Smuzhiyun static int fsmc_read_page_hwecc(struct nand_chip *chip, u8 *buf,
695*4882a593Smuzhiyun 				int oob_required, int page)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
698*4882a593Smuzhiyun 	int i, j, s, stat, eccsize = chip->ecc.size;
699*4882a593Smuzhiyun 	int eccbytes = chip->ecc.bytes;
700*4882a593Smuzhiyun 	int eccsteps = chip->ecc.steps;
701*4882a593Smuzhiyun 	u8 *p = buf;
702*4882a593Smuzhiyun 	u8 *ecc_calc = chip->ecc.calc_buf;
703*4882a593Smuzhiyun 	u8 *ecc_code = chip->ecc.code_buf;
704*4882a593Smuzhiyun 	int off, len, ret, group = 0;
705*4882a593Smuzhiyun 	/*
706*4882a593Smuzhiyun 	 * ecc_oob is intentionally taken as u16. In 16bit devices, we
707*4882a593Smuzhiyun 	 * end up reading 14 bytes (7 words) from oob. The local array is
708*4882a593Smuzhiyun 	 * to maintain word alignment
709*4882a593Smuzhiyun 	 */
710*4882a593Smuzhiyun 	u16 ecc_oob[7];
711*4882a593Smuzhiyun 	u8 *oob = (u8 *)&ecc_oob[0];
712*4882a593Smuzhiyun 	unsigned int max_bitflips = 0;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
715*4882a593Smuzhiyun 		nand_read_page_op(chip, page, s * eccsize, NULL, 0);
716*4882a593Smuzhiyun 		chip->ecc.hwctl(chip, NAND_ECC_READ);
717*4882a593Smuzhiyun 		ret = nand_read_data_op(chip, p, eccsize, false, false);
718*4882a593Smuzhiyun 		if (ret)
719*4882a593Smuzhiyun 			return ret;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 		for (j = 0; j < eccbytes;) {
722*4882a593Smuzhiyun 			struct mtd_oob_region oobregion;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 			ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
725*4882a593Smuzhiyun 			if (ret)
726*4882a593Smuzhiyun 				return ret;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 			off = oobregion.offset;
729*4882a593Smuzhiyun 			len = oobregion.length;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 			/*
732*4882a593Smuzhiyun 			 * length is intentionally kept a higher multiple of 2
733*4882a593Smuzhiyun 			 * to read at least 13 bytes even in case of 16 bit NAND
734*4882a593Smuzhiyun 			 * devices
735*4882a593Smuzhiyun 			 */
736*4882a593Smuzhiyun 			if (chip->options & NAND_BUSWIDTH_16)
737*4882a593Smuzhiyun 				len = roundup(len, 2);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 			nand_read_oob_op(chip, page, off, oob + j, len);
740*4882a593Smuzhiyun 			j += len;
741*4882a593Smuzhiyun 		}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		memcpy(&ecc_code[i], oob, chip->ecc.bytes);
744*4882a593Smuzhiyun 		chip->ecc.calculate(chip, p, &ecc_calc[i]);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 		stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
747*4882a593Smuzhiyun 		if (stat < 0) {
748*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
749*4882a593Smuzhiyun 		} else {
750*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += stat;
751*4882a593Smuzhiyun 			max_bitflips = max_t(unsigned int, max_bitflips, stat);
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	return max_bitflips;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun /*
759*4882a593Smuzhiyun  * fsmc_bch8_correct_data
760*4882a593Smuzhiyun  * @mtd:	mtd info structure
761*4882a593Smuzhiyun  * @dat:	buffer of read data
762*4882a593Smuzhiyun  * @read_ecc:	ecc read from device spare area
763*4882a593Smuzhiyun  * @calc_ecc:	ecc calculated from read data
764*4882a593Smuzhiyun  *
765*4882a593Smuzhiyun  * calc_ecc is a 104 bit information containing maximum of 8 error
766*4882a593Smuzhiyun  * offset information of 13 bits each in 512 bytes of read data.
767*4882a593Smuzhiyun  */
fsmc_bch8_correct_data(struct nand_chip * chip,u8 * dat,u8 * read_ecc,u8 * calc_ecc)768*4882a593Smuzhiyun static int fsmc_bch8_correct_data(struct nand_chip *chip, u8 *dat,
769*4882a593Smuzhiyun 				  u8 *read_ecc, u8 *calc_ecc)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(chip);
772*4882a593Smuzhiyun 	u32 err_idx[8];
773*4882a593Smuzhiyun 	u32 num_err, i;
774*4882a593Smuzhiyun 	u32 ecc1, ecc2, ecc3, ecc4;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	num_err = (readl_relaxed(host->regs_va + STS) >> 10) & 0xF;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	/* no bit flipping */
779*4882a593Smuzhiyun 	if (likely(num_err == 0))
780*4882a593Smuzhiyun 		return 0;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/* too many errors */
783*4882a593Smuzhiyun 	if (unlikely(num_err > 8)) {
784*4882a593Smuzhiyun 		/*
785*4882a593Smuzhiyun 		 * This is a temporary erase check. A newly erased page read
786*4882a593Smuzhiyun 		 * would result in an ecc error because the oob data is also
787*4882a593Smuzhiyun 		 * erased to FF and the calculated ecc for an FF data is not
788*4882a593Smuzhiyun 		 * FF..FF.
789*4882a593Smuzhiyun 		 * This is a workaround to skip performing correction in case
790*4882a593Smuzhiyun 		 * data is FF..FF
791*4882a593Smuzhiyun 		 *
792*4882a593Smuzhiyun 		 * Logic:
793*4882a593Smuzhiyun 		 * For every page, each bit written as 0 is counted until these
794*4882a593Smuzhiyun 		 * number of bits are greater than 8 (the maximum correction
795*4882a593Smuzhiyun 		 * capability of FSMC for each 512 + 13 bytes)
796*4882a593Smuzhiyun 		 */
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
799*4882a593Smuzhiyun 		int bits_data = count_written_bits(dat, chip->ecc.size, 8);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 		if ((bits_ecc + bits_data) <= 8) {
802*4882a593Smuzhiyun 			if (bits_data)
803*4882a593Smuzhiyun 				memset(dat, 0xff, chip->ecc.size);
804*4882a593Smuzhiyun 			return bits_data;
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 		return -EBADMSG;
808*4882a593Smuzhiyun 	}
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/*
811*4882a593Smuzhiyun 	 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
812*4882a593Smuzhiyun 	 * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
813*4882a593Smuzhiyun 	 *
814*4882a593Smuzhiyun 	 * calc_ecc is a 104 bit information containing maximum of 8 error
815*4882a593Smuzhiyun 	 * offset information of 13 bits each. calc_ecc is copied into a
816*4882a593Smuzhiyun 	 * u64 array and error offset indexes are populated in err_idx
817*4882a593Smuzhiyun 	 * array
818*4882a593Smuzhiyun 	 */
819*4882a593Smuzhiyun 	ecc1 = readl_relaxed(host->regs_va + ECC1);
820*4882a593Smuzhiyun 	ecc2 = readl_relaxed(host->regs_va + ECC2);
821*4882a593Smuzhiyun 	ecc3 = readl_relaxed(host->regs_va + ECC3);
822*4882a593Smuzhiyun 	ecc4 = readl_relaxed(host->regs_va + STS);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	err_idx[0] = (ecc1 >> 0) & 0x1FFF;
825*4882a593Smuzhiyun 	err_idx[1] = (ecc1 >> 13) & 0x1FFF;
826*4882a593Smuzhiyun 	err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
827*4882a593Smuzhiyun 	err_idx[3] = (ecc2 >> 7) & 0x1FFF;
828*4882a593Smuzhiyun 	err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
829*4882a593Smuzhiyun 	err_idx[5] = (ecc3 >> 1) & 0x1FFF;
830*4882a593Smuzhiyun 	err_idx[6] = (ecc3 >> 14) & 0x1FFF;
831*4882a593Smuzhiyun 	err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	i = 0;
834*4882a593Smuzhiyun 	while (num_err--) {
835*4882a593Smuzhiyun 		err_idx[i] ^= 3;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 		if (err_idx[i] < chip->ecc.size * 8) {
838*4882a593Smuzhiyun 			int err = err_idx[i];
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 			dat[err >> 3] ^= BIT(err & 7);
841*4882a593Smuzhiyun 			i++;
842*4882a593Smuzhiyun 		}
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 	return i;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
filter(struct dma_chan * chan,void * slave)847*4882a593Smuzhiyun static bool filter(struct dma_chan *chan, void *slave)
848*4882a593Smuzhiyun {
849*4882a593Smuzhiyun 	chan->private = slave;
850*4882a593Smuzhiyun 	return true;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun 
fsmc_nand_probe_config_dt(struct platform_device * pdev,struct fsmc_nand_data * host,struct nand_chip * nand)853*4882a593Smuzhiyun static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
854*4882a593Smuzhiyun 				     struct fsmc_nand_data *host,
855*4882a593Smuzhiyun 				     struct nand_chip *nand)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
858*4882a593Smuzhiyun 	u32 val;
859*4882a593Smuzhiyun 	int ret;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	nand->options = 0;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	if (!of_property_read_u32(np, "bank-width", &val)) {
864*4882a593Smuzhiyun 		if (val == 2) {
865*4882a593Smuzhiyun 			nand->options |= NAND_BUSWIDTH_16;
866*4882a593Smuzhiyun 		} else if (val != 1) {
867*4882a593Smuzhiyun 			dev_err(&pdev->dev, "invalid bank-width %u\n", val);
868*4882a593Smuzhiyun 			return -EINVAL;
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	if (of_get_property(np, "nand-skip-bbtscan", NULL))
873*4882a593Smuzhiyun 		nand->options |= NAND_SKIP_BBTSCAN;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	host->dev_timings = devm_kzalloc(&pdev->dev,
876*4882a593Smuzhiyun 					 sizeof(*host->dev_timings),
877*4882a593Smuzhiyun 					 GFP_KERNEL);
878*4882a593Smuzhiyun 	if (!host->dev_timings)
879*4882a593Smuzhiyun 		return -ENOMEM;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings,
882*4882a593Smuzhiyun 					sizeof(*host->dev_timings));
883*4882a593Smuzhiyun 	if (ret)
884*4882a593Smuzhiyun 		host->dev_timings = NULL;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	/* Set default NAND bank to 0 */
887*4882a593Smuzhiyun 	host->bank = 0;
888*4882a593Smuzhiyun 	if (!of_property_read_u32(np, "bank", &val)) {
889*4882a593Smuzhiyun 		if (val > 3) {
890*4882a593Smuzhiyun 			dev_err(&pdev->dev, "invalid bank %u\n", val);
891*4882a593Smuzhiyun 			return -EINVAL;
892*4882a593Smuzhiyun 		}
893*4882a593Smuzhiyun 		host->bank = val;
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 	return 0;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
fsmc_nand_attach_chip(struct nand_chip * nand)898*4882a593Smuzhiyun static int fsmc_nand_attach_chip(struct nand_chip *nand)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(nand);
901*4882a593Smuzhiyun 	struct fsmc_nand_data *host = nand_to_fsmc(nand);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
904*4882a593Smuzhiyun 		nand->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	if (!nand->ecc.size)
907*4882a593Smuzhiyun 		nand->ecc.size = 512;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	if (AMBA_REV_BITS(host->pid) >= 8) {
910*4882a593Smuzhiyun 		nand->ecc.read_page = fsmc_read_page_hwecc;
911*4882a593Smuzhiyun 		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
912*4882a593Smuzhiyun 		nand->ecc.correct = fsmc_bch8_correct_data;
913*4882a593Smuzhiyun 		nand->ecc.bytes = 13;
914*4882a593Smuzhiyun 		nand->ecc.strength = 8;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (AMBA_REV_BITS(host->pid) >= 8) {
918*4882a593Smuzhiyun 		switch (mtd->oobsize) {
919*4882a593Smuzhiyun 		case 16:
920*4882a593Smuzhiyun 		case 64:
921*4882a593Smuzhiyun 		case 128:
922*4882a593Smuzhiyun 		case 224:
923*4882a593Smuzhiyun 		case 256:
924*4882a593Smuzhiyun 			break;
925*4882a593Smuzhiyun 		default:
926*4882a593Smuzhiyun 			dev_warn(host->dev,
927*4882a593Smuzhiyun 				 "No oob scheme defined for oobsize %d\n",
928*4882a593Smuzhiyun 				 mtd->oobsize);
929*4882a593Smuzhiyun 			return -EINVAL;
930*4882a593Smuzhiyun 		}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 		return 0;
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	switch (nand->ecc.engine_type) {
938*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
939*4882a593Smuzhiyun 		dev_info(host->dev, "Using 1-bit HW ECC scheme\n");
940*4882a593Smuzhiyun 		nand->ecc.calculate = fsmc_read_hwecc_ecc1;
941*4882a593Smuzhiyun 		nand->ecc.correct = nand_correct_data;
942*4882a593Smuzhiyun 		nand->ecc.hwctl = fsmc_enable_hwecc;
943*4882a593Smuzhiyun 		nand->ecc.bytes = 3;
944*4882a593Smuzhiyun 		nand->ecc.strength = 1;
945*4882a593Smuzhiyun 		nand->ecc.options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
946*4882a593Smuzhiyun 		break;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_SOFT:
949*4882a593Smuzhiyun 		if (nand->ecc.algo == NAND_ECC_ALGO_BCH) {
950*4882a593Smuzhiyun 			dev_info(host->dev,
951*4882a593Smuzhiyun 				 "Using 4-bit SW BCH ECC scheme\n");
952*4882a593Smuzhiyun 			break;
953*4882a593Smuzhiyun 		}
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_ON_DIE:
956*4882a593Smuzhiyun 		break;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	default:
959*4882a593Smuzhiyun 		dev_err(host->dev, "Unsupported ECC mode!\n");
960*4882a593Smuzhiyun 		return -ENOTSUPP;
961*4882a593Smuzhiyun 	}
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	/*
964*4882a593Smuzhiyun 	 * Don't set layout for BCH4 SW ECC. This will be
965*4882a593Smuzhiyun 	 * generated later in nand_bch_init() later.
966*4882a593Smuzhiyun 	 */
967*4882a593Smuzhiyun 	if (nand->ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST) {
968*4882a593Smuzhiyun 		switch (mtd->oobsize) {
969*4882a593Smuzhiyun 		case 16:
970*4882a593Smuzhiyun 		case 64:
971*4882a593Smuzhiyun 		case 128:
972*4882a593Smuzhiyun 			mtd_set_ooblayout(mtd,
973*4882a593Smuzhiyun 					  &fsmc_ecc1_ooblayout_ops);
974*4882a593Smuzhiyun 			break;
975*4882a593Smuzhiyun 		default:
976*4882a593Smuzhiyun 			dev_warn(host->dev,
977*4882a593Smuzhiyun 				 "No oob scheme defined for oobsize %d\n",
978*4882a593Smuzhiyun 				 mtd->oobsize);
979*4882a593Smuzhiyun 			return -EINVAL;
980*4882a593Smuzhiyun 		}
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	return 0;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun static const struct nand_controller_ops fsmc_nand_controller_ops = {
987*4882a593Smuzhiyun 	.attach_chip = fsmc_nand_attach_chip,
988*4882a593Smuzhiyun 	.exec_op = fsmc_exec_op,
989*4882a593Smuzhiyun 	.setup_interface = fsmc_setup_interface,
990*4882a593Smuzhiyun };
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun /**
993*4882a593Smuzhiyun  * fsmc_nand_disable() - Disables the NAND bank
994*4882a593Smuzhiyun  * @host: The instance to disable
995*4882a593Smuzhiyun  */
fsmc_nand_disable(struct fsmc_nand_data * host)996*4882a593Smuzhiyun static void fsmc_nand_disable(struct fsmc_nand_data *host)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 	u32 val;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	val = readl(host->regs_va + FSMC_PC);
1001*4882a593Smuzhiyun 	val &= ~FSMC_ENABLE;
1002*4882a593Smuzhiyun 	writel(val, host->regs_va + FSMC_PC);
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun /*
1006*4882a593Smuzhiyun  * fsmc_nand_probe - Probe function
1007*4882a593Smuzhiyun  * @pdev:       platform device structure
1008*4882a593Smuzhiyun  */
fsmc_nand_probe(struct platform_device * pdev)1009*4882a593Smuzhiyun static int __init fsmc_nand_probe(struct platform_device *pdev)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	struct fsmc_nand_data *host;
1012*4882a593Smuzhiyun 	struct mtd_info *mtd;
1013*4882a593Smuzhiyun 	struct nand_chip *nand;
1014*4882a593Smuzhiyun 	struct resource *res;
1015*4882a593Smuzhiyun 	void __iomem *base;
1016*4882a593Smuzhiyun 	dma_cap_mask_t mask;
1017*4882a593Smuzhiyun 	int ret = 0;
1018*4882a593Smuzhiyun 	u32 pid;
1019*4882a593Smuzhiyun 	int i;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/* Allocate memory for the device structure (and zero it) */
1022*4882a593Smuzhiyun 	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
1023*4882a593Smuzhiyun 	if (!host)
1024*4882a593Smuzhiyun 		return -ENOMEM;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	nand = &host->nand;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	ret = fsmc_nand_probe_config_dt(pdev, host, nand);
1029*4882a593Smuzhiyun 	if (ret)
1030*4882a593Smuzhiyun 		return ret;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
1033*4882a593Smuzhiyun 	host->data_va = devm_ioremap_resource(&pdev->dev, res);
1034*4882a593Smuzhiyun 	if (IS_ERR(host->data_va))
1035*4882a593Smuzhiyun 		return PTR_ERR(host->data_va);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	host->data_pa = (dma_addr_t)res->start;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
1040*4882a593Smuzhiyun 	host->addr_va = devm_ioremap_resource(&pdev->dev, res);
1041*4882a593Smuzhiyun 	if (IS_ERR(host->addr_va))
1042*4882a593Smuzhiyun 		return PTR_ERR(host->addr_va);
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
1045*4882a593Smuzhiyun 	host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
1046*4882a593Smuzhiyun 	if (IS_ERR(host->cmd_va))
1047*4882a593Smuzhiyun 		return PTR_ERR(host->cmd_va);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
1050*4882a593Smuzhiyun 	base = devm_ioremap_resource(&pdev->dev, res);
1051*4882a593Smuzhiyun 	if (IS_ERR(base))
1052*4882a593Smuzhiyun 		return PTR_ERR(base);
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	host->regs_va = base + FSMC_NOR_REG_SIZE +
1055*4882a593Smuzhiyun 		(host->bank * FSMC_NAND_BANK_SZ);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	host->clk = devm_clk_get(&pdev->dev, NULL);
1058*4882a593Smuzhiyun 	if (IS_ERR(host->clk)) {
1059*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to fetch block clock\n");
1060*4882a593Smuzhiyun 		return PTR_ERR(host->clk);
1061*4882a593Smuzhiyun 	}
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	ret = clk_prepare_enable(host->clk);
1064*4882a593Smuzhiyun 	if (ret)
1065*4882a593Smuzhiyun 		return ret;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	/*
1068*4882a593Smuzhiyun 	 * This device ID is actually a common AMBA ID as used on the
1069*4882a593Smuzhiyun 	 * AMBA PrimeCell bus. However it is not a PrimeCell.
1070*4882a593Smuzhiyun 	 */
1071*4882a593Smuzhiyun 	for (pid = 0, i = 0; i < 4; i++)
1072*4882a593Smuzhiyun 		pid |= (readl(base + resource_size(res) - 0x20 + 4 * i) &
1073*4882a593Smuzhiyun 			255) << (i * 8);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	host->pid = pid;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	dev_info(&pdev->dev,
1078*4882a593Smuzhiyun 		 "FSMC device partno %03x, manufacturer %02x, revision %02x, config %02x\n",
1079*4882a593Smuzhiyun 		 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
1080*4882a593Smuzhiyun 		 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	host->dev = &pdev->dev;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	if (host->mode == USE_DMA_ACCESS)
1085*4882a593Smuzhiyun 		init_completion(&host->dma_access_complete);
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* Link all private pointers */
1088*4882a593Smuzhiyun 	mtd = nand_to_mtd(&host->nand);
1089*4882a593Smuzhiyun 	nand_set_flash_node(nand, pdev->dev.of_node);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	mtd->dev.parent = &pdev->dev;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	nand->badblockbits = 7;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	if (host->mode == USE_DMA_ACCESS) {
1096*4882a593Smuzhiyun 		dma_cap_zero(mask);
1097*4882a593Smuzhiyun 		dma_cap_set(DMA_MEMCPY, mask);
1098*4882a593Smuzhiyun 		host->read_dma_chan = dma_request_channel(mask, filter, NULL);
1099*4882a593Smuzhiyun 		if (!host->read_dma_chan) {
1100*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Unable to get read dma channel\n");
1101*4882a593Smuzhiyun 			ret = -ENODEV;
1102*4882a593Smuzhiyun 			goto disable_clk;
1103*4882a593Smuzhiyun 		}
1104*4882a593Smuzhiyun 		host->write_dma_chan = dma_request_channel(mask, filter, NULL);
1105*4882a593Smuzhiyun 		if (!host->write_dma_chan) {
1106*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Unable to get write dma channel\n");
1107*4882a593Smuzhiyun 			ret = -ENODEV;
1108*4882a593Smuzhiyun 			goto release_dma_read_chan;
1109*4882a593Smuzhiyun 		}
1110*4882a593Smuzhiyun 	}
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	if (host->dev_timings) {
1113*4882a593Smuzhiyun 		fsmc_nand_setup(host, host->dev_timings);
1114*4882a593Smuzhiyun 		nand->options |= NAND_KEEP_TIMINGS;
1115*4882a593Smuzhiyun 	}
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	nand_controller_init(&host->base);
1118*4882a593Smuzhiyun 	host->base.ops = &fsmc_nand_controller_ops;
1119*4882a593Smuzhiyun 	nand->controller = &host->base;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	/*
1122*4882a593Smuzhiyun 	 * Scan to find existence of the device
1123*4882a593Smuzhiyun 	 */
1124*4882a593Smuzhiyun 	ret = nand_scan(nand, 1);
1125*4882a593Smuzhiyun 	if (ret)
1126*4882a593Smuzhiyun 		goto release_dma_write_chan;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	mtd->name = "nand";
1129*4882a593Smuzhiyun 	ret = mtd_device_register(mtd, NULL, 0);
1130*4882a593Smuzhiyun 	if (ret)
1131*4882a593Smuzhiyun 		goto cleanup_nand;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	platform_set_drvdata(pdev, host);
1134*4882a593Smuzhiyun 	dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	return 0;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun cleanup_nand:
1139*4882a593Smuzhiyun 	nand_cleanup(nand);
1140*4882a593Smuzhiyun release_dma_write_chan:
1141*4882a593Smuzhiyun 	if (host->mode == USE_DMA_ACCESS)
1142*4882a593Smuzhiyun 		dma_release_channel(host->write_dma_chan);
1143*4882a593Smuzhiyun release_dma_read_chan:
1144*4882a593Smuzhiyun 	if (host->mode == USE_DMA_ACCESS)
1145*4882a593Smuzhiyun 		dma_release_channel(host->read_dma_chan);
1146*4882a593Smuzhiyun disable_clk:
1147*4882a593Smuzhiyun 	fsmc_nand_disable(host);
1148*4882a593Smuzhiyun 	clk_disable_unprepare(host->clk);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	return ret;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun /*
1154*4882a593Smuzhiyun  * Clean up routine
1155*4882a593Smuzhiyun  */
fsmc_nand_remove(struct platform_device * pdev)1156*4882a593Smuzhiyun static int fsmc_nand_remove(struct platform_device *pdev)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	struct fsmc_nand_data *host = platform_get_drvdata(pdev);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	if (host) {
1161*4882a593Smuzhiyun 		struct nand_chip *chip = &host->nand;
1162*4882a593Smuzhiyun 		int ret;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 		ret = mtd_device_unregister(nand_to_mtd(chip));
1165*4882a593Smuzhiyun 		WARN_ON(ret);
1166*4882a593Smuzhiyun 		nand_cleanup(chip);
1167*4882a593Smuzhiyun 		fsmc_nand_disable(host);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 		if (host->mode == USE_DMA_ACCESS) {
1170*4882a593Smuzhiyun 			dma_release_channel(host->write_dma_chan);
1171*4882a593Smuzhiyun 			dma_release_channel(host->read_dma_chan);
1172*4882a593Smuzhiyun 		}
1173*4882a593Smuzhiyun 		clk_disable_unprepare(host->clk);
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	return 0;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
fsmc_nand_suspend(struct device * dev)1180*4882a593Smuzhiyun static int fsmc_nand_suspend(struct device *dev)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	if (host)
1185*4882a593Smuzhiyun 		clk_disable_unprepare(host->clk);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	return 0;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun 
fsmc_nand_resume(struct device * dev)1190*4882a593Smuzhiyun static int fsmc_nand_resume(struct device *dev)
1191*4882a593Smuzhiyun {
1192*4882a593Smuzhiyun 	struct fsmc_nand_data *host = dev_get_drvdata(dev);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	if (host) {
1195*4882a593Smuzhiyun 		clk_prepare_enable(host->clk);
1196*4882a593Smuzhiyun 		if (host->dev_timings)
1197*4882a593Smuzhiyun 			fsmc_nand_setup(host, host->dev_timings);
1198*4882a593Smuzhiyun 		nand_reset(&host->nand, 0);
1199*4882a593Smuzhiyun 	}
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	return 0;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun #endif
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun static const struct of_device_id fsmc_nand_id_table[] = {
1208*4882a593Smuzhiyun 	{ .compatible = "st,spear600-fsmc-nand" },
1209*4882a593Smuzhiyun 	{ .compatible = "stericsson,fsmc-nand" },
1210*4882a593Smuzhiyun 	{}
1211*4882a593Smuzhiyun };
1212*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun static struct platform_driver fsmc_nand_driver = {
1215*4882a593Smuzhiyun 	.remove = fsmc_nand_remove,
1216*4882a593Smuzhiyun 	.driver = {
1217*4882a593Smuzhiyun 		.name = "fsmc-nand",
1218*4882a593Smuzhiyun 		.of_match_table = fsmc_nand_id_table,
1219*4882a593Smuzhiyun 		.pm = &fsmc_nand_pm_ops,
1220*4882a593Smuzhiyun 	},
1221*4882a593Smuzhiyun };
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1226*4882a593Smuzhiyun MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
1227*4882a593Smuzhiyun MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
1228