xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/cadence-nand-controller.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Cadence NAND flash controller driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2019 Cadence
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Piotr Sroka <piotrs@cadence.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/bitfield.h>
11*4882a593Smuzhiyun #include <linux/clk.h>
12*4882a593Smuzhiyun #include <linux/dma-mapping.h>
13*4882a593Smuzhiyun #include <linux/dmaengine.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
17*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
18*4882a593Smuzhiyun #include <linux/of_device.h>
19*4882a593Smuzhiyun #include <linux/iopoll.h>
20*4882a593Smuzhiyun #include <linux/slab.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * HPNFC can work in 3 modes:
24*4882a593Smuzhiyun  * -  PIO - can work in master or slave DMA
25*4882a593Smuzhiyun  * -  CDMA - needs Master DMA for accessing command descriptors.
26*4882a593Smuzhiyun  * -  Generic mode - can use only slave DMA.
27*4882a593Smuzhiyun  * CDMA and PIO modes can be used to execute only base commands.
28*4882a593Smuzhiyun  * Generic mode can be used to execute any command
29*4882a593Smuzhiyun  * on NAND flash memory. Driver uses CDMA mode for
30*4882a593Smuzhiyun  * block erasing, page reading, page programing.
31*4882a593Smuzhiyun  * Generic mode is used for executing rest of commands.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define MAX_ADDRESS_CYC		6
35*4882a593Smuzhiyun #define MAX_ERASE_ADDRESS_CYC	3
36*4882a593Smuzhiyun #define MAX_DATA_SIZE		0xFFFC
37*4882a593Smuzhiyun #define DMA_DATA_SIZE_ALIGN	8
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Register definition. */
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Command register 0.
42*4882a593Smuzhiyun  * Writing data to this register will initiate a new transaction
43*4882a593Smuzhiyun  * of the NF controller.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun #define CMD_REG0			0x0000
46*4882a593Smuzhiyun /* Command type field mask. */
47*4882a593Smuzhiyun #define		CMD_REG0_CT		GENMASK(31, 30)
48*4882a593Smuzhiyun /* Command type CDMA. */
49*4882a593Smuzhiyun #define		CMD_REG0_CT_CDMA	0uL
50*4882a593Smuzhiyun /* Command type generic. */
51*4882a593Smuzhiyun #define		CMD_REG0_CT_GEN		3uL
52*4882a593Smuzhiyun /* Command thread number field mask. */
53*4882a593Smuzhiyun #define		CMD_REG0_TN		GENMASK(27, 24)
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /* Command register 2. */
56*4882a593Smuzhiyun #define CMD_REG2			0x0008
57*4882a593Smuzhiyun /* Command register 3. */
58*4882a593Smuzhiyun #define CMD_REG3			0x000C
59*4882a593Smuzhiyun /* Pointer register to select which thread status will be selected. */
60*4882a593Smuzhiyun #define CMD_STATUS_PTR			0x0010
61*4882a593Smuzhiyun /* Command status register for selected thread. */
62*4882a593Smuzhiyun #define CMD_STATUS			0x0014
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* Interrupt status register. */
65*4882a593Smuzhiyun #define INTR_STATUS			0x0110
66*4882a593Smuzhiyun #define		INTR_STATUS_SDMA_ERR	BIT(22)
67*4882a593Smuzhiyun #define		INTR_STATUS_SDMA_TRIGG	BIT(21)
68*4882a593Smuzhiyun #define		INTR_STATUS_UNSUPP_CMD	BIT(19)
69*4882a593Smuzhiyun #define		INTR_STATUS_DDMA_TERR	BIT(18)
70*4882a593Smuzhiyun #define		INTR_STATUS_CDMA_TERR	BIT(17)
71*4882a593Smuzhiyun #define		INTR_STATUS_CDMA_IDL	BIT(16)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* Interrupt enable register. */
74*4882a593Smuzhiyun #define INTR_ENABLE				0x0114
75*4882a593Smuzhiyun #define		INTR_ENABLE_INTR_EN		BIT(31)
76*4882a593Smuzhiyun #define		INTR_ENABLE_SDMA_ERR_EN		BIT(22)
77*4882a593Smuzhiyun #define		INTR_ENABLE_SDMA_TRIGG_EN	BIT(21)
78*4882a593Smuzhiyun #define		INTR_ENABLE_UNSUPP_CMD_EN	BIT(19)
79*4882a593Smuzhiyun #define		INTR_ENABLE_DDMA_TERR_EN	BIT(18)
80*4882a593Smuzhiyun #define		INTR_ENABLE_CDMA_TERR_EN	BIT(17)
81*4882a593Smuzhiyun #define		INTR_ENABLE_CDMA_IDLE_EN	BIT(16)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Controller internal state. */
84*4882a593Smuzhiyun #define CTRL_STATUS				0x0118
85*4882a593Smuzhiyun #define		CTRL_STATUS_INIT_COMP		BIT(9)
86*4882a593Smuzhiyun #define		CTRL_STATUS_CTRL_BUSY		BIT(8)
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /* Command Engine threads state. */
89*4882a593Smuzhiyun #define TRD_STATUS				0x0120
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* Command Engine interrupt thread error status. */
92*4882a593Smuzhiyun #define TRD_ERR_INT_STATUS			0x0128
93*4882a593Smuzhiyun /* Command Engine interrupt thread error enable. */
94*4882a593Smuzhiyun #define TRD_ERR_INT_STATUS_EN			0x0130
95*4882a593Smuzhiyun /* Command Engine interrupt thread complete status. */
96*4882a593Smuzhiyun #define TRD_COMP_INT_STATUS			0x0138
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * Transfer config 0 register.
100*4882a593Smuzhiyun  * Configures data transfer parameters.
101*4882a593Smuzhiyun  */
102*4882a593Smuzhiyun #define TRAN_CFG_0				0x0400
103*4882a593Smuzhiyun /* Offset value from the beginning of the page. */
104*4882a593Smuzhiyun #define		TRAN_CFG_0_OFFSET		GENMASK(31, 16)
105*4882a593Smuzhiyun /* Numbers of sectors to transfer within singlNF device's page. */
106*4882a593Smuzhiyun #define		TRAN_CFG_0_SEC_CNT		GENMASK(7, 0)
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun  * Transfer config 1 register.
110*4882a593Smuzhiyun  * Configures data transfer parameters.
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun #define TRAN_CFG_1				0x0404
113*4882a593Smuzhiyun /* Size of last data sector. */
114*4882a593Smuzhiyun #define		TRAN_CFG_1_LAST_SEC_SIZE	GENMASK(31, 16)
115*4882a593Smuzhiyun /* Size of not-last data sector. */
116*4882a593Smuzhiyun #define		TRAN_CFG_1_SECTOR_SIZE		GENMASK(15, 0)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* ECC engine configuration register 0. */
119*4882a593Smuzhiyun #define ECC_CONFIG_0				0x0428
120*4882a593Smuzhiyun /* Correction strength. */
121*4882a593Smuzhiyun #define		ECC_CONFIG_0_CORR_STR		GENMASK(10, 8)
122*4882a593Smuzhiyun /* Enable erased pages detection mechanism. */
123*4882a593Smuzhiyun #define		ECC_CONFIG_0_ERASE_DET_EN	BIT(1)
124*4882a593Smuzhiyun /* Enable controller ECC check bits generation and correction. */
125*4882a593Smuzhiyun #define		ECC_CONFIG_0_ECC_EN		BIT(0)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* ECC engine configuration register 1. */
128*4882a593Smuzhiyun #define ECC_CONFIG_1				0x042C
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* Multiplane settings register. */
131*4882a593Smuzhiyun #define MULTIPLANE_CFG				0x0434
132*4882a593Smuzhiyun /* Cache operation settings. */
133*4882a593Smuzhiyun #define CACHE_CFG				0x0438
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /* DMA settings register. */
136*4882a593Smuzhiyun #define DMA_SETINGS				0x043C
137*4882a593Smuzhiyun /* Enable SDMA error report on access unprepared slave DMA interface. */
138*4882a593Smuzhiyun #define		DMA_SETINGS_SDMA_ERR_RSP	BIT(17)
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /* Transferred data block size for the slave DMA module. */
141*4882a593Smuzhiyun #define SDMA_SIZE				0x0440
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* Thread number associated with transferred data block
144*4882a593Smuzhiyun  * for the slave DMA module.
145*4882a593Smuzhiyun  */
146*4882a593Smuzhiyun #define SDMA_TRD_NUM				0x0444
147*4882a593Smuzhiyun /* Thread number mask. */
148*4882a593Smuzhiyun #define		SDMA_TRD_NUM_SDMA_TRD		GENMASK(2, 0)
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #define CONTROL_DATA_CTRL			0x0494
151*4882a593Smuzhiyun /* Thread number mask. */
152*4882a593Smuzhiyun #define		CONTROL_DATA_CTRL_SIZE		GENMASK(15, 0)
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #define CTRL_VERSION				0x800
155*4882a593Smuzhiyun #define		CTRL_VERSION_REV		GENMASK(7, 0)
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /* Available hardware features of the controller. */
158*4882a593Smuzhiyun #define CTRL_FEATURES				0x804
159*4882a593Smuzhiyun /* Support for NV-DDR2/3 work mode. */
160*4882a593Smuzhiyun #define		CTRL_FEATURES_NVDDR_2_3		BIT(28)
161*4882a593Smuzhiyun /* Support for NV-DDR work mode. */
162*4882a593Smuzhiyun #define		CTRL_FEATURES_NVDDR		BIT(27)
163*4882a593Smuzhiyun /* Support for asynchronous work mode. */
164*4882a593Smuzhiyun #define		CTRL_FEATURES_ASYNC		BIT(26)
165*4882a593Smuzhiyun /* Support for asynchronous work mode. */
166*4882a593Smuzhiyun #define		CTRL_FEATURES_N_BANKS		GENMASK(25, 24)
167*4882a593Smuzhiyun /* Slave and Master DMA data width. */
168*4882a593Smuzhiyun #define		CTRL_FEATURES_DMA_DWITH64	BIT(21)
169*4882a593Smuzhiyun /* Availability of Control Data feature.*/
170*4882a593Smuzhiyun #define		CTRL_FEATURES_CONTROL_DATA	BIT(10)
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun /* BCH Engine identification register 0 - correction strengths. */
173*4882a593Smuzhiyun #define BCH_CFG_0				0x838
174*4882a593Smuzhiyun #define		BCH_CFG_0_CORR_CAP_0		GENMASK(7, 0)
175*4882a593Smuzhiyun #define		BCH_CFG_0_CORR_CAP_1		GENMASK(15, 8)
176*4882a593Smuzhiyun #define		BCH_CFG_0_CORR_CAP_2		GENMASK(23, 16)
177*4882a593Smuzhiyun #define		BCH_CFG_0_CORR_CAP_3		GENMASK(31, 24)
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /* BCH Engine identification register 1 - correction strengths. */
180*4882a593Smuzhiyun #define BCH_CFG_1				0x83C
181*4882a593Smuzhiyun #define		BCH_CFG_1_CORR_CAP_4		GENMASK(7, 0)
182*4882a593Smuzhiyun #define		BCH_CFG_1_CORR_CAP_5		GENMASK(15, 8)
183*4882a593Smuzhiyun #define		BCH_CFG_1_CORR_CAP_6		GENMASK(23, 16)
184*4882a593Smuzhiyun #define		BCH_CFG_1_CORR_CAP_7		GENMASK(31, 24)
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /* BCH Engine identification register 2 - sector sizes. */
187*4882a593Smuzhiyun #define BCH_CFG_2				0x840
188*4882a593Smuzhiyun #define		BCH_CFG_2_SECT_0		GENMASK(15, 0)
189*4882a593Smuzhiyun #define		BCH_CFG_2_SECT_1		GENMASK(31, 16)
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* BCH Engine identification register 3. */
192*4882a593Smuzhiyun #define BCH_CFG_3				0x844
193*4882a593Smuzhiyun #define		BCH_CFG_3_METADATA_SIZE		GENMASK(23, 16)
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /* Ready/Busy# line status. */
196*4882a593Smuzhiyun #define RBN_SETINGS				0x1004
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /* Common settings. */
199*4882a593Smuzhiyun #define COMMON_SET				0x1008
200*4882a593Smuzhiyun /* 16 bit device connected to the NAND Flash interface. */
201*4882a593Smuzhiyun #define		COMMON_SET_DEVICE_16BIT		BIT(8)
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* Skip_bytes registers. */
204*4882a593Smuzhiyun #define SKIP_BYTES_CONF				0x100C
205*4882a593Smuzhiyun #define		SKIP_BYTES_MARKER_VALUE		GENMASK(31, 16)
206*4882a593Smuzhiyun #define		SKIP_BYTES_NUM_OF_BYTES		GENMASK(7, 0)
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #define SKIP_BYTES_OFFSET			0x1010
209*4882a593Smuzhiyun #define		 SKIP_BYTES_OFFSET_VALUE	GENMASK(23, 0)
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /* Timings configuration. */
212*4882a593Smuzhiyun #define ASYNC_TOGGLE_TIMINGS			0x101c
213*4882a593Smuzhiyun #define		ASYNC_TOGGLE_TIMINGS_TRH	GENMASK(28, 24)
214*4882a593Smuzhiyun #define		ASYNC_TOGGLE_TIMINGS_TRP	GENMASK(20, 16)
215*4882a593Smuzhiyun #define		ASYNC_TOGGLE_TIMINGS_TWH	GENMASK(12, 8)
216*4882a593Smuzhiyun #define		ASYNC_TOGGLE_TIMINGS_TWP	GENMASK(4, 0)
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun #define	TIMINGS0				0x1024
219*4882a593Smuzhiyun #define		TIMINGS0_TADL			GENMASK(31, 24)
220*4882a593Smuzhiyun #define		TIMINGS0_TCCS			GENMASK(23, 16)
221*4882a593Smuzhiyun #define		TIMINGS0_TWHR			GENMASK(15, 8)
222*4882a593Smuzhiyun #define		TIMINGS0_TRHW			GENMASK(7, 0)
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #define	TIMINGS1				0x1028
225*4882a593Smuzhiyun #define		TIMINGS1_TRHZ			GENMASK(31, 24)
226*4882a593Smuzhiyun #define		TIMINGS1_TWB			GENMASK(23, 16)
227*4882a593Smuzhiyun #define		TIMINGS1_TVDLY			GENMASK(7, 0)
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun #define	TIMINGS2				0x102c
230*4882a593Smuzhiyun #define		TIMINGS2_TFEAT			GENMASK(25, 16)
231*4882a593Smuzhiyun #define		TIMINGS2_CS_HOLD_TIME		GENMASK(13, 8)
232*4882a593Smuzhiyun #define		TIMINGS2_CS_SETUP_TIME		GENMASK(5, 0)
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /* Configuration of the resynchronization of slave DLL of PHY. */
235*4882a593Smuzhiyun #define DLL_PHY_CTRL				0x1034
236*4882a593Smuzhiyun #define		DLL_PHY_CTRL_DLL_RST_N		BIT(24)
237*4882a593Smuzhiyun #define		DLL_PHY_CTRL_EXTENDED_WR_MODE	BIT(17)
238*4882a593Smuzhiyun #define		DLL_PHY_CTRL_EXTENDED_RD_MODE	BIT(16)
239*4882a593Smuzhiyun #define		DLL_PHY_CTRL_RS_HIGH_WAIT_CNT	GENMASK(11, 8)
240*4882a593Smuzhiyun #define		DLL_PHY_CTRL_RS_IDLE_CNT	GENMASK(7, 0)
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /* Register controlling DQ related timing. */
243*4882a593Smuzhiyun #define PHY_DQ_TIMING				0x2000
244*4882a593Smuzhiyun /* Register controlling DSQ related timing.  */
245*4882a593Smuzhiyun #define PHY_DQS_TIMING				0x2004
246*4882a593Smuzhiyun #define		PHY_DQS_TIMING_DQS_SEL_OE_END	GENMASK(3, 0)
247*4882a593Smuzhiyun #define		PHY_DQS_TIMING_PHONY_DQS_SEL	BIT(16)
248*4882a593Smuzhiyun #define		PHY_DQS_TIMING_USE_PHONY_DQS	BIT(20)
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun /* Register controlling the gate and loopback control related timing. */
251*4882a593Smuzhiyun #define PHY_GATE_LPBK_CTRL			0x2008
252*4882a593Smuzhiyun #define		PHY_GATE_LPBK_CTRL_RDS		GENMASK(24, 19)
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /* Register holds the control for the master DLL logic. */
255*4882a593Smuzhiyun #define PHY_DLL_MASTER_CTRL			0x200C
256*4882a593Smuzhiyun #define		PHY_DLL_MASTER_CTRL_BYPASS_MODE	BIT(23)
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun /* Register holds the control for the slave DLL logic. */
259*4882a593Smuzhiyun #define PHY_DLL_SLAVE_CTRL			0x2010
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /* This register handles the global control settings for the PHY. */
262*4882a593Smuzhiyun #define PHY_CTRL				0x2080
263*4882a593Smuzhiyun #define		PHY_CTRL_SDR_DQS		BIT(14)
264*4882a593Smuzhiyun #define		PHY_CTRL_PHONY_DQS		GENMASK(9, 4)
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun  * This register handles the global control settings
268*4882a593Smuzhiyun  * for the termination selects for reads.
269*4882a593Smuzhiyun  */
270*4882a593Smuzhiyun #define PHY_TSEL				0x2084
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /* Generic command layout. */
273*4882a593Smuzhiyun #define GCMD_LAY_CS			GENMASK_ULL(11, 8)
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun  * This bit informs the minicotroller if it has to wait for tWB
276*4882a593Smuzhiyun  * after sending the last CMD/ADDR/DATA in the sequence.
277*4882a593Smuzhiyun  */
278*4882a593Smuzhiyun #define GCMD_LAY_TWB			BIT_ULL(6)
279*4882a593Smuzhiyun /* Type of generic instruction. */
280*4882a593Smuzhiyun #define GCMD_LAY_INSTR			GENMASK_ULL(5, 0)
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /* Generic CMD sequence type. */
283*4882a593Smuzhiyun #define		GCMD_LAY_INSTR_CMD	0
284*4882a593Smuzhiyun /* Generic ADDR sequence type. */
285*4882a593Smuzhiyun #define		GCMD_LAY_INSTR_ADDR	1
286*4882a593Smuzhiyun /* Generic data transfer sequence type. */
287*4882a593Smuzhiyun #define		GCMD_LAY_INSTR_DATA	2
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /* Input part of generic command type of input is command. */
290*4882a593Smuzhiyun #define GCMD_LAY_INPUT_CMD		GENMASK_ULL(23, 16)
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /* Generic command address sequence - address fields. */
293*4882a593Smuzhiyun #define GCMD_LAY_INPUT_ADDR		GENMASK_ULL(63, 16)
294*4882a593Smuzhiyun /* Generic command address sequence - address size. */
295*4882a593Smuzhiyun #define GCMD_LAY_INPUT_ADDR_SIZE	GENMASK_ULL(13, 11)
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun /* Transfer direction field of generic command data sequence. */
298*4882a593Smuzhiyun #define GCMD_DIR			BIT_ULL(11)
299*4882a593Smuzhiyun /* Read transfer direction of generic command data sequence. */
300*4882a593Smuzhiyun #define		GCMD_DIR_READ		0
301*4882a593Smuzhiyun /* Write transfer direction of generic command data sequence. */
302*4882a593Smuzhiyun #define		GCMD_DIR_WRITE		1
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /* ECC enabled flag of generic command data sequence - ECC enabled. */
305*4882a593Smuzhiyun #define GCMD_ECC_EN			BIT_ULL(12)
306*4882a593Smuzhiyun /* Generic command data sequence - sector size. */
307*4882a593Smuzhiyun #define GCMD_SECT_SIZE			GENMASK_ULL(31, 16)
308*4882a593Smuzhiyun /* Generic command data sequence - sector count. */
309*4882a593Smuzhiyun #define GCMD_SECT_CNT			GENMASK_ULL(39, 32)
310*4882a593Smuzhiyun /* Generic command data sequence - last sector size. */
311*4882a593Smuzhiyun #define GCMD_LAST_SIZE			GENMASK_ULL(55, 40)
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun /* CDMA descriptor fields. */
314*4882a593Smuzhiyun /* Erase command type of CDMA descriptor. */
315*4882a593Smuzhiyun #define CDMA_CT_ERASE		0x1000
316*4882a593Smuzhiyun /* Program page command type of CDMA descriptor. */
317*4882a593Smuzhiyun #define CDMA_CT_WR		0x2100
318*4882a593Smuzhiyun /* Read page command type of CDMA descriptor. */
319*4882a593Smuzhiyun #define CDMA_CT_RD		0x2200
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /* Flash pointer memory shift. */
322*4882a593Smuzhiyun #define CDMA_CFPTR_MEM_SHIFT	24
323*4882a593Smuzhiyun /* Flash pointer memory mask. */
324*4882a593Smuzhiyun #define CDMA_CFPTR_MEM		GENMASK(26, 24)
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun  * Command DMA descriptor flags. If set causes issue interrupt after
328*4882a593Smuzhiyun  * the completion of descriptor processing.
329*4882a593Smuzhiyun  */
330*4882a593Smuzhiyun #define CDMA_CF_INT		BIT(8)
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun  * Command DMA descriptor flags - the next descriptor
333*4882a593Smuzhiyun  * address field is valid and descriptor processing should continue.
334*4882a593Smuzhiyun  */
335*4882a593Smuzhiyun #define CDMA_CF_CONT		BIT(9)
336*4882a593Smuzhiyun /* DMA master flag of command DMA descriptor. */
337*4882a593Smuzhiyun #define CDMA_CF_DMA_MASTER	BIT(10)
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /* Operation complete status of command descriptor. */
340*4882a593Smuzhiyun #define CDMA_CS_COMP		BIT(15)
341*4882a593Smuzhiyun /* Operation complete status of command descriptor. */
342*4882a593Smuzhiyun /* Command descriptor status - operation fail. */
343*4882a593Smuzhiyun #define CDMA_CS_FAIL		BIT(14)
344*4882a593Smuzhiyun /* Command descriptor status - page erased. */
345*4882a593Smuzhiyun #define CDMA_CS_ERP		BIT(11)
346*4882a593Smuzhiyun /* Command descriptor status - timeout occurred. */
347*4882a593Smuzhiyun #define CDMA_CS_TOUT		BIT(10)
348*4882a593Smuzhiyun /*
349*4882a593Smuzhiyun  * Maximum amount of correction applied to one ECC sector.
350*4882a593Smuzhiyun  * It is part of command descriptor status.
351*4882a593Smuzhiyun  */
352*4882a593Smuzhiyun #define CDMA_CS_MAXERR		GENMASK(9, 2)
353*4882a593Smuzhiyun /* Command descriptor status - uncorrectable ECC error. */
354*4882a593Smuzhiyun #define CDMA_CS_UNCE		BIT(1)
355*4882a593Smuzhiyun /* Command descriptor status - descriptor error. */
356*4882a593Smuzhiyun #define CDMA_CS_ERR		BIT(0)
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun /* Status of operation - OK. */
359*4882a593Smuzhiyun #define STAT_OK			0
360*4882a593Smuzhiyun /* Status of operation - FAIL. */
361*4882a593Smuzhiyun #define STAT_FAIL		2
362*4882a593Smuzhiyun /* Status of operation - uncorrectable ECC error. */
363*4882a593Smuzhiyun #define STAT_ECC_UNCORR		3
364*4882a593Smuzhiyun /* Status of operation - page erased. */
365*4882a593Smuzhiyun #define STAT_ERASED		5
366*4882a593Smuzhiyun /* Status of operation - correctable ECC error. */
367*4882a593Smuzhiyun #define STAT_ECC_CORR		6
368*4882a593Smuzhiyun /* Status of operation - unsuspected state. */
369*4882a593Smuzhiyun #define STAT_UNKNOWN		7
370*4882a593Smuzhiyun /* Status of operation - operation is not completed yet. */
371*4882a593Smuzhiyun #define STAT_BUSY		0xFF
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #define BCH_MAX_NUM_CORR_CAPS		8
374*4882a593Smuzhiyun #define BCH_MAX_NUM_SECTOR_SIZES	2
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun struct cadence_nand_timings {
377*4882a593Smuzhiyun 	u32 async_toggle_timings;
378*4882a593Smuzhiyun 	u32 timings0;
379*4882a593Smuzhiyun 	u32 timings1;
380*4882a593Smuzhiyun 	u32 timings2;
381*4882a593Smuzhiyun 	u32 dll_phy_ctrl;
382*4882a593Smuzhiyun 	u32 phy_ctrl;
383*4882a593Smuzhiyun 	u32 phy_dqs_timing;
384*4882a593Smuzhiyun 	u32 phy_gate_lpbk_ctrl;
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun /* Command DMA descriptor. */
388*4882a593Smuzhiyun struct cadence_nand_cdma_desc {
389*4882a593Smuzhiyun 	/* Next descriptor address. */
390*4882a593Smuzhiyun 	u64 next_pointer;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* Flash address is a 32-bit address comprising of BANK and ROW ADDR. */
393*4882a593Smuzhiyun 	u32 flash_pointer;
394*4882a593Smuzhiyun 	/*field appears in HPNFC version 13*/
395*4882a593Smuzhiyun 	u16 bank;
396*4882a593Smuzhiyun 	u16 rsvd0;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	/* Operation the controller needs to perform. */
399*4882a593Smuzhiyun 	u16 command_type;
400*4882a593Smuzhiyun 	u16 rsvd1;
401*4882a593Smuzhiyun 	/* Flags for operation of this command. */
402*4882a593Smuzhiyun 	u16 command_flags;
403*4882a593Smuzhiyun 	u16 rsvd2;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/* System/host memory address required for data DMA commands. */
406*4882a593Smuzhiyun 	u64 memory_pointer;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* Status of operation. */
409*4882a593Smuzhiyun 	u32 status;
410*4882a593Smuzhiyun 	u32 rsvd3;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* Address pointer to sync buffer location. */
413*4882a593Smuzhiyun 	u64 sync_flag_pointer;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* Controls the buffer sync mechanism. */
416*4882a593Smuzhiyun 	u32 sync_arguments;
417*4882a593Smuzhiyun 	u32 rsvd4;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* Control data pointer. */
420*4882a593Smuzhiyun 	u64 ctrl_data_ptr;
421*4882a593Smuzhiyun };
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /* Interrupt status. */
424*4882a593Smuzhiyun struct cadence_nand_irq_status {
425*4882a593Smuzhiyun 	/* Thread operation complete status. */
426*4882a593Smuzhiyun 	u32 trd_status;
427*4882a593Smuzhiyun 	/* Thread operation error. */
428*4882a593Smuzhiyun 	u32 trd_error;
429*4882a593Smuzhiyun 	/* Controller status. */
430*4882a593Smuzhiyun 	u32 status;
431*4882a593Smuzhiyun };
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /* Cadence NAND flash controller capabilities get from driver data. */
434*4882a593Smuzhiyun struct cadence_nand_dt_devdata {
435*4882a593Smuzhiyun 	/* Skew value of the output signals of the NAND Flash interface. */
436*4882a593Smuzhiyun 	u32 if_skew;
437*4882a593Smuzhiyun 	/* It informs if slave DMA interface is connected to DMA engine. */
438*4882a593Smuzhiyun 	unsigned int has_dma:1;
439*4882a593Smuzhiyun };
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /* Cadence NAND flash controller capabilities read from registers. */
442*4882a593Smuzhiyun struct cdns_nand_caps {
443*4882a593Smuzhiyun 	/* Maximum number of banks supported by hardware. */
444*4882a593Smuzhiyun 	u8 max_banks;
445*4882a593Smuzhiyun 	/* Slave and Master DMA data width in bytes (4 or 8). */
446*4882a593Smuzhiyun 	u8 data_dma_width;
447*4882a593Smuzhiyun 	/* Control Data feature supported. */
448*4882a593Smuzhiyun 	bool data_control_supp;
449*4882a593Smuzhiyun 	/* Is PHY type DLL. */
450*4882a593Smuzhiyun 	bool is_phy_type_dll;
451*4882a593Smuzhiyun };
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun struct cdns_nand_ctrl {
454*4882a593Smuzhiyun 	struct device *dev;
455*4882a593Smuzhiyun 	struct nand_controller controller;
456*4882a593Smuzhiyun 	struct cadence_nand_cdma_desc *cdma_desc;
457*4882a593Smuzhiyun 	/* IP capability. */
458*4882a593Smuzhiyun 	const struct cadence_nand_dt_devdata *caps1;
459*4882a593Smuzhiyun 	struct cdns_nand_caps caps2;
460*4882a593Smuzhiyun 	u8 ctrl_rev;
461*4882a593Smuzhiyun 	dma_addr_t dma_cdma_desc;
462*4882a593Smuzhiyun 	u8 *buf;
463*4882a593Smuzhiyun 	u32 buf_size;
464*4882a593Smuzhiyun 	u8 curr_corr_str_idx;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Register interface. */
467*4882a593Smuzhiyun 	void __iomem *reg;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	struct {
470*4882a593Smuzhiyun 		void __iomem *virt;
471*4882a593Smuzhiyun 		dma_addr_t dma;
472*4882a593Smuzhiyun 	} io;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	int irq;
475*4882a593Smuzhiyun 	/* Interrupts that have happened. */
476*4882a593Smuzhiyun 	struct cadence_nand_irq_status irq_status;
477*4882a593Smuzhiyun 	/* Interrupts we are waiting for. */
478*4882a593Smuzhiyun 	struct cadence_nand_irq_status irq_mask;
479*4882a593Smuzhiyun 	struct completion complete;
480*4882a593Smuzhiyun 	/* Protect irq_mask and irq_status. */
481*4882a593Smuzhiyun 	spinlock_t irq_lock;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	int ecc_strengths[BCH_MAX_NUM_CORR_CAPS];
484*4882a593Smuzhiyun 	struct nand_ecc_step_info ecc_stepinfos[BCH_MAX_NUM_SECTOR_SIZES];
485*4882a593Smuzhiyun 	struct nand_ecc_caps ecc_caps;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	int curr_trans_type;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	struct dma_chan *dmac;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	u32 nf_clk_rate;
492*4882a593Smuzhiyun 	/*
493*4882a593Smuzhiyun 	 * Estimated Board delay. The value includes the total
494*4882a593Smuzhiyun 	 * round trip delay for the signals and is used for deciding on values
495*4882a593Smuzhiyun 	 * associated with data read capture.
496*4882a593Smuzhiyun 	 */
497*4882a593Smuzhiyun 	u32 board_delay;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	struct nand_chip *selected_chip;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	unsigned long assigned_cs;
502*4882a593Smuzhiyun 	struct list_head chips;
503*4882a593Smuzhiyun 	u8 bch_metadata_size;
504*4882a593Smuzhiyun };
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun struct cdns_nand_chip {
507*4882a593Smuzhiyun 	struct cadence_nand_timings timings;
508*4882a593Smuzhiyun 	struct nand_chip chip;
509*4882a593Smuzhiyun 	u8 nsels;
510*4882a593Smuzhiyun 	struct list_head node;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/*
513*4882a593Smuzhiyun 	 * part of oob area of NAND flash memory page.
514*4882a593Smuzhiyun 	 * This part is available for user to read or write.
515*4882a593Smuzhiyun 	 */
516*4882a593Smuzhiyun 	u32 avail_oob_size;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* Sector size. There are few sectors per mtd->writesize */
519*4882a593Smuzhiyun 	u32 sector_size;
520*4882a593Smuzhiyun 	u32 sector_count;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/* Offset of BBM. */
523*4882a593Smuzhiyun 	u8 bbm_offs;
524*4882a593Smuzhiyun 	/* Number of bytes reserved for BBM. */
525*4882a593Smuzhiyun 	u8 bbm_len;
526*4882a593Smuzhiyun 	/* ECC strength index. */
527*4882a593Smuzhiyun 	u8 corr_str_idx;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	u8 cs[];
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun struct ecc_info {
533*4882a593Smuzhiyun 	int (*calc_ecc_bytes)(int step_size, int strength);
534*4882a593Smuzhiyun 	int max_step_size;
535*4882a593Smuzhiyun };
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun static inline struct
to_cdns_nand_chip(struct nand_chip * chip)538*4882a593Smuzhiyun cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	return container_of(chip, struct cdns_nand_chip, chip);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun static inline struct
to_cdns_nand_ctrl(struct nand_controller * controller)544*4882a593Smuzhiyun cdns_nand_ctrl *to_cdns_nand_ctrl(struct nand_controller *controller)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	return container_of(controller, struct cdns_nand_ctrl, controller);
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun static bool
cadence_nand_dma_buf_ok(struct cdns_nand_ctrl * cdns_ctrl,const void * buf,u32 buf_len)550*4882a593Smuzhiyun cadence_nand_dma_buf_ok(struct cdns_nand_ctrl *cdns_ctrl, const void *buf,
551*4882a593Smuzhiyun 			u32 buf_len)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	u8 data_dma_width = cdns_ctrl->caps2.data_dma_width;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return buf && virt_addr_valid(buf) &&
556*4882a593Smuzhiyun 		likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
557*4882a593Smuzhiyun 		likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
cadence_nand_wait_for_value(struct cdns_nand_ctrl * cdns_ctrl,u32 reg_offset,u32 timeout_us,u32 mask,bool is_clear)560*4882a593Smuzhiyun static int cadence_nand_wait_for_value(struct cdns_nand_ctrl *cdns_ctrl,
561*4882a593Smuzhiyun 				       u32 reg_offset, u32 timeout_us,
562*4882a593Smuzhiyun 				       u32 mask, bool is_clear)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	u32 val;
565*4882a593Smuzhiyun 	int ret;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	ret = readl_relaxed_poll_timeout(cdns_ctrl->reg + reg_offset,
568*4882a593Smuzhiyun 					 val, !(val & mask) == is_clear,
569*4882a593Smuzhiyun 					 10, timeout_us);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if (ret < 0) {
572*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
573*4882a593Smuzhiyun 			"Timeout while waiting for reg %x with mask %x is clear %d\n",
574*4882a593Smuzhiyun 			reg_offset, mask, is_clear);
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	return ret;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
cadence_nand_set_ecc_enable(struct cdns_nand_ctrl * cdns_ctrl,bool enable)580*4882a593Smuzhiyun static int cadence_nand_set_ecc_enable(struct cdns_nand_ctrl *cdns_ctrl,
581*4882a593Smuzhiyun 				       bool enable)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	u32 reg;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
586*4882a593Smuzhiyun 					1000000,
587*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
588*4882a593Smuzhiyun 		return -ETIMEDOUT;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (enable)
593*4882a593Smuzhiyun 		reg |= ECC_CONFIG_0_ECC_EN;
594*4882a593Smuzhiyun 	else
595*4882a593Smuzhiyun 		reg &= ~ECC_CONFIG_0_ECC_EN;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	return 0;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
cadence_nand_set_ecc_strength(struct cdns_nand_ctrl * cdns_ctrl,u8 corr_str_idx)602*4882a593Smuzhiyun static void cadence_nand_set_ecc_strength(struct cdns_nand_ctrl *cdns_ctrl,
603*4882a593Smuzhiyun 					  u8 corr_str_idx)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun 	u32 reg;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	if (cdns_ctrl->curr_corr_str_idx == corr_str_idx)
608*4882a593Smuzhiyun 		return;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
611*4882a593Smuzhiyun 	reg &= ~ECC_CONFIG_0_CORR_STR;
612*4882a593Smuzhiyun 	reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
613*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	cdns_ctrl->curr_corr_str_idx = corr_str_idx;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl * cdns_ctrl,u8 strength)618*4882a593Smuzhiyun static int cadence_nand_get_ecc_strength_idx(struct cdns_nand_ctrl *cdns_ctrl,
619*4882a593Smuzhiyun 					     u8 strength)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	int i, corr_str_idx = -1;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
624*4882a593Smuzhiyun 		if (cdns_ctrl->ecc_strengths[i] == strength) {
625*4882a593Smuzhiyun 			corr_str_idx = i;
626*4882a593Smuzhiyun 			break;
627*4882a593Smuzhiyun 		}
628*4882a593Smuzhiyun 	}
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	return corr_str_idx;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl * cdns_ctrl,u16 marker_value)633*4882a593Smuzhiyun static int cadence_nand_set_skip_marker_val(struct cdns_nand_ctrl *cdns_ctrl,
634*4882a593Smuzhiyun 					    u16 marker_value)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	u32 reg;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
639*4882a593Smuzhiyun 					1000000,
640*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
641*4882a593Smuzhiyun 		return -ETIMEDOUT;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
644*4882a593Smuzhiyun 	reg &= ~SKIP_BYTES_MARKER_VALUE;
645*4882a593Smuzhiyun 	reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
646*4882a593Smuzhiyun 			  marker_value);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	return 0;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl * cdns_ctrl,u8 num_of_bytes,u32 offset_value,int enable)653*4882a593Smuzhiyun static int cadence_nand_set_skip_bytes_conf(struct cdns_nand_ctrl *cdns_ctrl,
654*4882a593Smuzhiyun 					    u8 num_of_bytes,
655*4882a593Smuzhiyun 					    u32 offset_value,
656*4882a593Smuzhiyun 					    int enable)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	u32 reg, skip_bytes_offset;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
661*4882a593Smuzhiyun 					1000000,
662*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
663*4882a593Smuzhiyun 		return -ETIMEDOUT;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	if (!enable) {
666*4882a593Smuzhiyun 		num_of_bytes = 0;
667*4882a593Smuzhiyun 		offset_value = 0;
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + SKIP_BYTES_CONF);
671*4882a593Smuzhiyun 	reg &= ~SKIP_BYTES_NUM_OF_BYTES;
672*4882a593Smuzhiyun 	reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
673*4882a593Smuzhiyun 			  num_of_bytes);
674*4882a593Smuzhiyun 	skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
675*4882a593Smuzhiyun 				       offset_value);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + SKIP_BYTES_CONF);
678*4882a593Smuzhiyun 	writel_relaxed(skip_bytes_offset, cdns_ctrl->reg + SKIP_BYTES_OFFSET);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	return 0;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /* Functions enables/disables hardware detection of erased data */
cadence_nand_set_erase_detection(struct cdns_nand_ctrl * cdns_ctrl,bool enable,u8 bitflips_threshold)684*4882a593Smuzhiyun static void cadence_nand_set_erase_detection(struct cdns_nand_ctrl *cdns_ctrl,
685*4882a593Smuzhiyun 					     bool enable,
686*4882a593Smuzhiyun 					     u8 bitflips_threshold)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	u32 reg;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + ECC_CONFIG_0);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	if (enable)
693*4882a593Smuzhiyun 		reg |= ECC_CONFIG_0_ERASE_DET_EN;
694*4882a593Smuzhiyun 	else
695*4882a593Smuzhiyun 		reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + ECC_CONFIG_0);
698*4882a593Smuzhiyun 	writel_relaxed(bitflips_threshold, cdns_ctrl->reg + ECC_CONFIG_1);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
cadence_nand_set_access_width16(struct cdns_nand_ctrl * cdns_ctrl,bool bit_bus16)701*4882a593Smuzhiyun static int cadence_nand_set_access_width16(struct cdns_nand_ctrl *cdns_ctrl,
702*4882a593Smuzhiyun 					   bool bit_bus16)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	u32 reg;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
707*4882a593Smuzhiyun 					1000000,
708*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
709*4882a593Smuzhiyun 		return -ETIMEDOUT;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + COMMON_SET);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	if (!bit_bus16)
714*4882a593Smuzhiyun 		reg &= ~COMMON_SET_DEVICE_16BIT;
715*4882a593Smuzhiyun 	else
716*4882a593Smuzhiyun 		reg |= COMMON_SET_DEVICE_16BIT;
717*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + COMMON_SET);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	return 0;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun static void
cadence_nand_clear_interrupt(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_status)723*4882a593Smuzhiyun cadence_nand_clear_interrupt(struct cdns_nand_ctrl *cdns_ctrl,
724*4882a593Smuzhiyun 			     struct cadence_nand_irq_status *irq_status)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	writel_relaxed(irq_status->status, cdns_ctrl->reg + INTR_STATUS);
727*4882a593Smuzhiyun 	writel_relaxed(irq_status->trd_status,
728*4882a593Smuzhiyun 		       cdns_ctrl->reg + TRD_COMP_INT_STATUS);
729*4882a593Smuzhiyun 	writel_relaxed(irq_status->trd_error,
730*4882a593Smuzhiyun 		       cdns_ctrl->reg + TRD_ERR_INT_STATUS);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun static void
cadence_nand_read_int_status(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_status)734*4882a593Smuzhiyun cadence_nand_read_int_status(struct cdns_nand_ctrl *cdns_ctrl,
735*4882a593Smuzhiyun 			     struct cadence_nand_irq_status *irq_status)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun 	irq_status->status = readl_relaxed(cdns_ctrl->reg + INTR_STATUS);
738*4882a593Smuzhiyun 	irq_status->trd_status = readl_relaxed(cdns_ctrl->reg
739*4882a593Smuzhiyun 					       + TRD_COMP_INT_STATUS);
740*4882a593Smuzhiyun 	irq_status->trd_error = readl_relaxed(cdns_ctrl->reg
741*4882a593Smuzhiyun 					      + TRD_ERR_INT_STATUS);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
irq_detected(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_status)744*4882a593Smuzhiyun static u32 irq_detected(struct cdns_nand_ctrl *cdns_ctrl,
745*4882a593Smuzhiyun 			struct cadence_nand_irq_status *irq_status)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	cadence_nand_read_int_status(cdns_ctrl, irq_status);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	return irq_status->status || irq_status->trd_status ||
750*4882a593Smuzhiyun 		irq_status->trd_error;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
cadence_nand_reset_irq(struct cdns_nand_ctrl * cdns_ctrl)753*4882a593Smuzhiyun static void cadence_nand_reset_irq(struct cdns_nand_ctrl *cdns_ctrl)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	unsigned long flags;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	spin_lock_irqsave(&cdns_ctrl->irq_lock, flags);
758*4882a593Smuzhiyun 	memset(&cdns_ctrl->irq_status, 0, sizeof(cdns_ctrl->irq_status));
759*4882a593Smuzhiyun 	memset(&cdns_ctrl->irq_mask, 0, sizeof(cdns_ctrl->irq_mask));
760*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cdns_ctrl->irq_lock, flags);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun /*
764*4882a593Smuzhiyun  * This is the interrupt service routine. It handles all interrupts
765*4882a593Smuzhiyun  * sent to this device.
766*4882a593Smuzhiyun  */
cadence_nand_isr(int irq,void * dev_id)767*4882a593Smuzhiyun static irqreturn_t cadence_nand_isr(int irq, void *dev_id)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = dev_id;
770*4882a593Smuzhiyun 	struct cadence_nand_irq_status irq_status;
771*4882a593Smuzhiyun 	irqreturn_t result = IRQ_NONE;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	spin_lock(&cdns_ctrl->irq_lock);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	if (irq_detected(cdns_ctrl, &irq_status)) {
776*4882a593Smuzhiyun 		/* Handle interrupt. */
777*4882a593Smuzhiyun 		/* First acknowledge it. */
778*4882a593Smuzhiyun 		cadence_nand_clear_interrupt(cdns_ctrl, &irq_status);
779*4882a593Smuzhiyun 		/* Status in the device context for someone to read. */
780*4882a593Smuzhiyun 		cdns_ctrl->irq_status.status |= irq_status.status;
781*4882a593Smuzhiyun 		cdns_ctrl->irq_status.trd_status |= irq_status.trd_status;
782*4882a593Smuzhiyun 		cdns_ctrl->irq_status.trd_error |= irq_status.trd_error;
783*4882a593Smuzhiyun 		/* Notify anyone who cares that it happened. */
784*4882a593Smuzhiyun 		complete(&cdns_ctrl->complete);
785*4882a593Smuzhiyun 		/* Tell the OS that we've handled this. */
786*4882a593Smuzhiyun 		result = IRQ_HANDLED;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 	spin_unlock(&cdns_ctrl->irq_lock);
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	return result;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
cadence_nand_set_irq_mask(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_mask)793*4882a593Smuzhiyun static void cadence_nand_set_irq_mask(struct cdns_nand_ctrl *cdns_ctrl,
794*4882a593Smuzhiyun 				      struct cadence_nand_irq_status *irq_mask)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
797*4882a593Smuzhiyun 		       cdns_ctrl->reg + INTR_ENABLE);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	writel_relaxed(irq_mask->trd_error,
800*4882a593Smuzhiyun 		       cdns_ctrl->reg + TRD_ERR_INT_STATUS_EN);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun static void
cadence_nand_wait_for_irq(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_irq_status * irq_mask,struct cadence_nand_irq_status * irq_status)804*4882a593Smuzhiyun cadence_nand_wait_for_irq(struct cdns_nand_ctrl *cdns_ctrl,
805*4882a593Smuzhiyun 			  struct cadence_nand_irq_status *irq_mask,
806*4882a593Smuzhiyun 			  struct cadence_nand_irq_status *irq_status)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	unsigned long timeout = msecs_to_jiffies(10000);
809*4882a593Smuzhiyun 	unsigned long time_left;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	time_left = wait_for_completion_timeout(&cdns_ctrl->complete,
812*4882a593Smuzhiyun 						timeout);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	*irq_status = cdns_ctrl->irq_status;
815*4882a593Smuzhiyun 	if (time_left == 0) {
816*4882a593Smuzhiyun 		/* Timeout error. */
817*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "timeout occurred:\n");
818*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "\tstatus = 0x%x, mask = 0x%x\n",
819*4882a593Smuzhiyun 			irq_status->status, irq_mask->status);
820*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
821*4882a593Smuzhiyun 			"\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
822*4882a593Smuzhiyun 			irq_status->trd_status, irq_mask->trd_status);
823*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
824*4882a593Smuzhiyun 			"\t trd_error = 0x%x, trd_error mask = 0x%x\n",
825*4882a593Smuzhiyun 			irq_status->trd_error, irq_mask->trd_error);
826*4882a593Smuzhiyun 	}
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun /* Execute generic command on NAND controller. */
cadence_nand_generic_cmd_send(struct cdns_nand_ctrl * cdns_ctrl,u8 chip_nr,u64 mini_ctrl_cmd)830*4882a593Smuzhiyun static int cadence_nand_generic_cmd_send(struct cdns_nand_ctrl *cdns_ctrl,
831*4882a593Smuzhiyun 					 u8 chip_nr,
832*4882a593Smuzhiyun 					 u64 mini_ctrl_cmd)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun 	u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
837*4882a593Smuzhiyun 	mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
838*4882a593Smuzhiyun 	mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
841*4882a593Smuzhiyun 					1000000,
842*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
843*4882a593Smuzhiyun 		return -ETIMEDOUT;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	cadence_nand_reset_irq(cdns_ctrl);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	writel_relaxed(mini_ctrl_cmd_l, cdns_ctrl->reg + CMD_REG2);
848*4882a593Smuzhiyun 	writel_relaxed(mini_ctrl_cmd_h, cdns_ctrl->reg + CMD_REG3);
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	/* Select generic command. */
851*4882a593Smuzhiyun 	reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
852*4882a593Smuzhiyun 	/* Thread number. */
853*4882a593Smuzhiyun 	reg |= FIELD_PREP(CMD_REG0_TN, 0);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	/* Issue command. */
856*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	return 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun /* Wait for data on slave DMA interface. */
cadence_nand_wait_on_sdma(struct cdns_nand_ctrl * cdns_ctrl,u8 * out_sdma_trd,u32 * out_sdma_size)862*4882a593Smuzhiyun static int cadence_nand_wait_on_sdma(struct cdns_nand_ctrl *cdns_ctrl,
863*4882a593Smuzhiyun 				     u8 *out_sdma_trd,
864*4882a593Smuzhiyun 				     u32 *out_sdma_size)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	struct cadence_nand_irq_status irq_mask, irq_status;
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	irq_mask.trd_status = 0;
869*4882a593Smuzhiyun 	irq_mask.trd_error = 0;
870*4882a593Smuzhiyun 	irq_mask.status = INTR_STATUS_SDMA_TRIGG
871*4882a593Smuzhiyun 		| INTR_STATUS_SDMA_ERR
872*4882a593Smuzhiyun 		| INTR_STATUS_UNSUPP_CMD;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
875*4882a593Smuzhiyun 	cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
876*4882a593Smuzhiyun 	if (irq_status.status == 0) {
877*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Timeout while waiting for SDMA\n");
878*4882a593Smuzhiyun 		return -ETIMEDOUT;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
882*4882a593Smuzhiyun 		*out_sdma_size = readl_relaxed(cdns_ctrl->reg + SDMA_SIZE);
883*4882a593Smuzhiyun 		*out_sdma_trd  = readl_relaxed(cdns_ctrl->reg + SDMA_TRD_NUM);
884*4882a593Smuzhiyun 		*out_sdma_trd =
885*4882a593Smuzhiyun 			FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
886*4882a593Smuzhiyun 	} else {
887*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "SDMA error - irq_status %x\n",
888*4882a593Smuzhiyun 			irq_status.status);
889*4882a593Smuzhiyun 		return -EIO;
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	return 0;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
cadence_nand_get_caps(struct cdns_nand_ctrl * cdns_ctrl)895*4882a593Smuzhiyun static void cadence_nand_get_caps(struct cdns_nand_ctrl *cdns_ctrl)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun 	u32  reg;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + CTRL_FEATURES);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	cdns_ctrl->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
904*4882a593Smuzhiyun 		cdns_ctrl->caps2.data_dma_width = 8;
905*4882a593Smuzhiyun 	else
906*4882a593Smuzhiyun 		cdns_ctrl->caps2.data_dma_width = 4;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	if (reg & CTRL_FEATURES_CONTROL_DATA)
909*4882a593Smuzhiyun 		cdns_ctrl->caps2.data_control_supp = true;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	if (reg & (CTRL_FEATURES_NVDDR_2_3
912*4882a593Smuzhiyun 		   | CTRL_FEATURES_NVDDR))
913*4882a593Smuzhiyun 		cdns_ctrl->caps2.is_phy_type_dll = true;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun /* Prepare CDMA descriptor. */
917*4882a593Smuzhiyun static void
cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl * cdns_ctrl,char nf_mem,u32 flash_ptr,dma_addr_t mem_ptr,dma_addr_t ctrl_data_ptr,u16 ctype)918*4882a593Smuzhiyun cadence_nand_cdma_desc_prepare(struct cdns_nand_ctrl *cdns_ctrl,
919*4882a593Smuzhiyun 			       char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
920*4882a593Smuzhiyun 				   dma_addr_t ctrl_data_ptr, u16 ctype)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	struct cadence_nand_cdma_desc *cdma_desc = cdns_ctrl->cdma_desc;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* Set fields for one descriptor. */
927*4882a593Smuzhiyun 	cdma_desc->flash_pointer = flash_ptr;
928*4882a593Smuzhiyun 	if (cdns_ctrl->ctrl_rev >= 13)
929*4882a593Smuzhiyun 		cdma_desc->bank = nf_mem;
930*4882a593Smuzhiyun 	else
931*4882a593Smuzhiyun 		cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
934*4882a593Smuzhiyun 	cdma_desc->command_flags  |= CDMA_CF_INT;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	cdma_desc->memory_pointer = mem_ptr;
937*4882a593Smuzhiyun 	cdma_desc->status = 0;
938*4882a593Smuzhiyun 	cdma_desc->sync_flag_pointer = 0;
939*4882a593Smuzhiyun 	cdma_desc->sync_arguments = 0;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	cdma_desc->command_type = ctype;
942*4882a593Smuzhiyun 	cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun 
cadence_nand_check_desc_error(struct cdns_nand_ctrl * cdns_ctrl,u32 desc_status)945*4882a593Smuzhiyun static u8 cadence_nand_check_desc_error(struct cdns_nand_ctrl *cdns_ctrl,
946*4882a593Smuzhiyun 					u32 desc_status)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	if (desc_status & CDMA_CS_ERP)
949*4882a593Smuzhiyun 		return STAT_ERASED;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	if (desc_status & CDMA_CS_UNCE)
952*4882a593Smuzhiyun 		return STAT_ECC_UNCORR;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	if (desc_status & CDMA_CS_ERR) {
955*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, ":CDMA desc error flag detected.\n");
956*4882a593Smuzhiyun 		return STAT_FAIL;
957*4882a593Smuzhiyun 	}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
960*4882a593Smuzhiyun 		return STAT_ECC_CORR;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	return STAT_FAIL;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
cadence_nand_cdma_finish(struct cdns_nand_ctrl * cdns_ctrl)965*4882a593Smuzhiyun static int cadence_nand_cdma_finish(struct cdns_nand_ctrl *cdns_ctrl)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	struct cadence_nand_cdma_desc *desc_ptr = cdns_ctrl->cdma_desc;
968*4882a593Smuzhiyun 	u8 status = STAT_BUSY;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	if (desc_ptr->status & CDMA_CS_FAIL) {
971*4882a593Smuzhiyun 		status = cadence_nand_check_desc_error(cdns_ctrl,
972*4882a593Smuzhiyun 						       desc_ptr->status);
973*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, ":CDMA error %x\n", desc_ptr->status);
974*4882a593Smuzhiyun 	} else if (desc_ptr->status & CDMA_CS_COMP) {
975*4882a593Smuzhiyun 		/* Descriptor finished with no errors. */
976*4882a593Smuzhiyun 		if (desc_ptr->command_flags & CDMA_CF_CONT) {
977*4882a593Smuzhiyun 			dev_info(cdns_ctrl->dev, "DMA unsupported flag is set");
978*4882a593Smuzhiyun 			status = STAT_UNKNOWN;
979*4882a593Smuzhiyun 		} else {
980*4882a593Smuzhiyun 			/* Last descriptor.  */
981*4882a593Smuzhiyun 			status = STAT_OK;
982*4882a593Smuzhiyun 		}
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	return status;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun 
cadence_nand_cdma_send(struct cdns_nand_ctrl * cdns_ctrl,u8 thread)988*4882a593Smuzhiyun static int cadence_nand_cdma_send(struct cdns_nand_ctrl *cdns_ctrl,
989*4882a593Smuzhiyun 				  u8 thread)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun 	u32 reg;
992*4882a593Smuzhiyun 	int status;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	/* Wait for thread ready. */
995*4882a593Smuzhiyun 	status = cadence_nand_wait_for_value(cdns_ctrl, TRD_STATUS,
996*4882a593Smuzhiyun 					     1000000,
997*4882a593Smuzhiyun 					     BIT(thread), true);
998*4882a593Smuzhiyun 	if (status)
999*4882a593Smuzhiyun 		return status;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	cadence_nand_reset_irq(cdns_ctrl);
1002*4882a593Smuzhiyun 	reinit_completion(&cdns_ctrl->complete);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	writel_relaxed((u32)cdns_ctrl->dma_cdma_desc,
1005*4882a593Smuzhiyun 		       cdns_ctrl->reg + CMD_REG2);
1006*4882a593Smuzhiyun 	writel_relaxed(0, cdns_ctrl->reg + CMD_REG3);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	/* Select CDMA mode. */
1009*4882a593Smuzhiyun 	reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
1010*4882a593Smuzhiyun 	/* Thread number. */
1011*4882a593Smuzhiyun 	reg |= FIELD_PREP(CMD_REG0_TN, thread);
1012*4882a593Smuzhiyun 	/* Issue command. */
1013*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + CMD_REG0);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	return 0;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun /* Send SDMA command and wait for finish. */
1019*4882a593Smuzhiyun static u32
cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl * cdns_ctrl,u8 thread)1020*4882a593Smuzhiyun cadence_nand_cdma_send_and_wait(struct cdns_nand_ctrl *cdns_ctrl,
1021*4882a593Smuzhiyun 				u8 thread)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun 	struct cadence_nand_irq_status irq_mask, irq_status = {0};
1024*4882a593Smuzhiyun 	int status;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	irq_mask.trd_status = BIT(thread);
1027*4882a593Smuzhiyun 	irq_mask.trd_error = BIT(thread);
1028*4882a593Smuzhiyun 	irq_mask.status = INTR_STATUS_CDMA_TERR;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	cadence_nand_set_irq_mask(cdns_ctrl, &irq_mask);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	status = cadence_nand_cdma_send(cdns_ctrl, thread);
1033*4882a593Smuzhiyun 	if (status)
1034*4882a593Smuzhiyun 		return status;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	cadence_nand_wait_for_irq(cdns_ctrl, &irq_mask, &irq_status);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	if (irq_status.status == 0 && irq_status.trd_status == 0 &&
1039*4882a593Smuzhiyun 	    irq_status.trd_error == 0) {
1040*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "CDMA command timeout\n");
1041*4882a593Smuzhiyun 		return -ETIMEDOUT;
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun 	if (irq_status.status & irq_mask.status) {
1044*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "CDMA command failed\n");
1045*4882a593Smuzhiyun 		return -EIO;
1046*4882a593Smuzhiyun 	}
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	return 0;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun /*
1052*4882a593Smuzhiyun  * ECC size depends on configured ECC strength and on maximum supported
1053*4882a593Smuzhiyun  * ECC step size.
1054*4882a593Smuzhiyun  */
cadence_nand_calc_ecc_bytes(int max_step_size,int strength)1055*4882a593Smuzhiyun static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	return ALIGN(nbytes, 2);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun #define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
1063*4882a593Smuzhiyun 	static int \
1064*4882a593Smuzhiyun 	cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
1065*4882a593Smuzhiyun 						    int strength)\
1066*4882a593Smuzhiyun 	{\
1067*4882a593Smuzhiyun 		return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun CADENCE_NAND_CALC_ECC_BYTES(256)
1071*4882a593Smuzhiyun CADENCE_NAND_CALC_ECC_BYTES(512)
1072*4882a593Smuzhiyun CADENCE_NAND_CALC_ECC_BYTES(1024)
1073*4882a593Smuzhiyun CADENCE_NAND_CALC_ECC_BYTES(2048)
1074*4882a593Smuzhiyun CADENCE_NAND_CALC_ECC_BYTES(4096)
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun /* Function reads BCH capabilities. */
cadence_nand_read_bch_caps(struct cdns_nand_ctrl * cdns_ctrl)1077*4882a593Smuzhiyun static int cadence_nand_read_bch_caps(struct cdns_nand_ctrl *cdns_ctrl)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun 	struct nand_ecc_caps *ecc_caps = &cdns_ctrl->ecc_caps;
1080*4882a593Smuzhiyun 	int max_step_size = 0, nstrengths, i;
1081*4882a593Smuzhiyun 	u32 reg;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_3);
1084*4882a593Smuzhiyun 	cdns_ctrl->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
1085*4882a593Smuzhiyun 	if (cdns_ctrl->bch_metadata_size < 4) {
1086*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
1087*4882a593Smuzhiyun 			"Driver needs at least 4 bytes of BCH meta data\n");
1088*4882a593Smuzhiyun 		return -EIO;
1089*4882a593Smuzhiyun 	}
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_0);
1092*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
1093*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
1094*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
1095*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_1);
1098*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
1099*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
1100*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
1101*4882a593Smuzhiyun 	cdns_ctrl->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + BCH_CFG_2);
1104*4882a593Smuzhiyun 	cdns_ctrl->ecc_stepinfos[0].stepsize =
1105*4882a593Smuzhiyun 		FIELD_GET(BCH_CFG_2_SECT_0, reg);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	cdns_ctrl->ecc_stepinfos[1].stepsize =
1108*4882a593Smuzhiyun 		FIELD_GET(BCH_CFG_2_SECT_1, reg);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	nstrengths = 0;
1111*4882a593Smuzhiyun 	for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
1112*4882a593Smuzhiyun 		if (cdns_ctrl->ecc_strengths[i] != 0)
1113*4882a593Smuzhiyun 			nstrengths++;
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	ecc_caps->nstepinfos = 0;
1117*4882a593Smuzhiyun 	for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
1118*4882a593Smuzhiyun 		/* ECC strengths are common for all step infos. */
1119*4882a593Smuzhiyun 		cdns_ctrl->ecc_stepinfos[i].nstrengths = nstrengths;
1120*4882a593Smuzhiyun 		cdns_ctrl->ecc_stepinfos[i].strengths =
1121*4882a593Smuzhiyun 			cdns_ctrl->ecc_strengths;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 		if (cdns_ctrl->ecc_stepinfos[i].stepsize != 0)
1124*4882a593Smuzhiyun 			ecc_caps->nstepinfos++;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 		if (cdns_ctrl->ecc_stepinfos[i].stepsize > max_step_size)
1127*4882a593Smuzhiyun 			max_step_size = cdns_ctrl->ecc_stepinfos[i].stepsize;
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 	ecc_caps->stepinfos = &cdns_ctrl->ecc_stepinfos[0];
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	switch (max_step_size) {
1132*4882a593Smuzhiyun 	case 256:
1133*4882a593Smuzhiyun 		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
1134*4882a593Smuzhiyun 		break;
1135*4882a593Smuzhiyun 	case 512:
1136*4882a593Smuzhiyun 		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
1137*4882a593Smuzhiyun 		break;
1138*4882a593Smuzhiyun 	case 1024:
1139*4882a593Smuzhiyun 		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
1140*4882a593Smuzhiyun 		break;
1141*4882a593Smuzhiyun 	case 2048:
1142*4882a593Smuzhiyun 		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
1143*4882a593Smuzhiyun 		break;
1144*4882a593Smuzhiyun 	case 4096:
1145*4882a593Smuzhiyun 		ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
1146*4882a593Smuzhiyun 		break;
1147*4882a593Smuzhiyun 	default:
1148*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
1149*4882a593Smuzhiyun 			"Unsupported sector size(ecc step size) %d\n",
1150*4882a593Smuzhiyun 			max_step_size);
1151*4882a593Smuzhiyun 		return -EIO;
1152*4882a593Smuzhiyun 	}
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	return 0;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun /* Hardware initialization. */
cadence_nand_hw_init(struct cdns_nand_ctrl * cdns_ctrl)1158*4882a593Smuzhiyun static int cadence_nand_hw_init(struct cdns_nand_ctrl *cdns_ctrl)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	int status;
1161*4882a593Smuzhiyun 	u32 reg;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	status = cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1164*4882a593Smuzhiyun 					     1000000,
1165*4882a593Smuzhiyun 					     CTRL_STATUS_INIT_COMP, false);
1166*4882a593Smuzhiyun 	if (status)
1167*4882a593Smuzhiyun 		return status;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	reg = readl_relaxed(cdns_ctrl->reg + CTRL_VERSION);
1170*4882a593Smuzhiyun 	cdns_ctrl->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	dev_info(cdns_ctrl->dev,
1173*4882a593Smuzhiyun 		 "%s: cadence nand controller version reg %x\n",
1174*4882a593Smuzhiyun 		 __func__, reg);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	/* Disable cache and multiplane. */
1177*4882a593Smuzhiyun 	writel_relaxed(0, cdns_ctrl->reg + MULTIPLANE_CFG);
1178*4882a593Smuzhiyun 	writel_relaxed(0, cdns_ctrl->reg + CACHE_CFG);
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	/* Clear all interrupts. */
1181*4882a593Smuzhiyun 	writel_relaxed(0xFFFFFFFF, cdns_ctrl->reg + INTR_STATUS);
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	cadence_nand_get_caps(cdns_ctrl);
1184*4882a593Smuzhiyun 	if (cadence_nand_read_bch_caps(cdns_ctrl))
1185*4882a593Smuzhiyun 		return -EIO;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	/*
1188*4882a593Smuzhiyun 	 * Set IO width access to 8.
1189*4882a593Smuzhiyun 	 * It is because during SW device discovering width access
1190*4882a593Smuzhiyun 	 * is expected to be 8.
1191*4882a593Smuzhiyun 	 */
1192*4882a593Smuzhiyun 	status = cadence_nand_set_access_width16(cdns_ctrl, false);
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	return status;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun #define TT_MAIN_OOB_AREAS	2
1198*4882a593Smuzhiyun #define TT_RAW_PAGE		3
1199*4882a593Smuzhiyun #define TT_BBM			4
1200*4882a593Smuzhiyun #define TT_MAIN_OOB_AREA_EXT	5
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun /* Prepare size of data to transfer. */
1203*4882a593Smuzhiyun static void
cadence_nand_prepare_data_size(struct nand_chip * chip,int transfer_type)1204*4882a593Smuzhiyun cadence_nand_prepare_data_size(struct nand_chip *chip,
1205*4882a593Smuzhiyun 			       int transfer_type)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1208*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1209*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1210*4882a593Smuzhiyun 	u32 sec_size = 0, offset = 0, sec_cnt = 1;
1211*4882a593Smuzhiyun 	u32 last_sec_size = cdns_chip->sector_size;
1212*4882a593Smuzhiyun 	u32 data_ctrl_size = 0;
1213*4882a593Smuzhiyun 	u32 reg = 0;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	if (cdns_ctrl->curr_trans_type == transfer_type)
1216*4882a593Smuzhiyun 		return;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	switch (transfer_type) {
1219*4882a593Smuzhiyun 	case TT_MAIN_OOB_AREA_EXT:
1220*4882a593Smuzhiyun 		sec_cnt = cdns_chip->sector_count;
1221*4882a593Smuzhiyun 		sec_size = cdns_chip->sector_size;
1222*4882a593Smuzhiyun 		data_ctrl_size = cdns_chip->avail_oob_size;
1223*4882a593Smuzhiyun 		break;
1224*4882a593Smuzhiyun 	case TT_MAIN_OOB_AREAS:
1225*4882a593Smuzhiyun 		sec_cnt = cdns_chip->sector_count;
1226*4882a593Smuzhiyun 		last_sec_size = cdns_chip->sector_size
1227*4882a593Smuzhiyun 			+ cdns_chip->avail_oob_size;
1228*4882a593Smuzhiyun 		sec_size = cdns_chip->sector_size;
1229*4882a593Smuzhiyun 		break;
1230*4882a593Smuzhiyun 	case TT_RAW_PAGE:
1231*4882a593Smuzhiyun 		last_sec_size = mtd->writesize + mtd->oobsize;
1232*4882a593Smuzhiyun 		break;
1233*4882a593Smuzhiyun 	case TT_BBM:
1234*4882a593Smuzhiyun 		offset = mtd->writesize + cdns_chip->bbm_offs;
1235*4882a593Smuzhiyun 		last_sec_size = 8;
1236*4882a593Smuzhiyun 		break;
1237*4882a593Smuzhiyun 	}
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	reg = 0;
1240*4882a593Smuzhiyun 	reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
1241*4882a593Smuzhiyun 	reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
1242*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_0);
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	reg = 0;
1245*4882a593Smuzhiyun 	reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
1246*4882a593Smuzhiyun 	reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
1247*4882a593Smuzhiyun 	writel_relaxed(reg, cdns_ctrl->reg + TRAN_CFG_1);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.data_control_supp) {
1250*4882a593Smuzhiyun 		reg = readl_relaxed(cdns_ctrl->reg + CONTROL_DATA_CTRL);
1251*4882a593Smuzhiyun 		reg &= ~CONTROL_DATA_CTRL_SIZE;
1252*4882a593Smuzhiyun 		reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
1253*4882a593Smuzhiyun 		writel_relaxed(reg, cdns_ctrl->reg + CONTROL_DATA_CTRL);
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	cdns_ctrl->curr_trans_type = transfer_type;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun static int
cadence_nand_cdma_transfer(struct cdns_nand_ctrl * cdns_ctrl,u8 chip_nr,int page,void * buf,void * ctrl_dat,u32 buf_size,u32 ctrl_dat_size,enum dma_data_direction dir,bool with_ecc)1260*4882a593Smuzhiyun cadence_nand_cdma_transfer(struct cdns_nand_ctrl *cdns_ctrl, u8 chip_nr,
1261*4882a593Smuzhiyun 			   int page, void *buf, void *ctrl_dat, u32 buf_size,
1262*4882a593Smuzhiyun 			   u32 ctrl_dat_size, enum dma_data_direction dir,
1263*4882a593Smuzhiyun 			   bool with_ecc)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun 	dma_addr_t dma_buf, dma_ctrl_dat = 0;
1266*4882a593Smuzhiyun 	u8 thread_nr = chip_nr;
1267*4882a593Smuzhiyun 	int status;
1268*4882a593Smuzhiyun 	u16 ctype;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	if (dir == DMA_FROM_DEVICE)
1271*4882a593Smuzhiyun 		ctype = CDMA_CT_RD;
1272*4882a593Smuzhiyun 	else
1273*4882a593Smuzhiyun 		ctype = CDMA_CT_WR;
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	cadence_nand_set_ecc_enable(cdns_ctrl, with_ecc);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	dma_buf = dma_map_single(cdns_ctrl->dev, buf, buf_size, dir);
1278*4882a593Smuzhiyun 	if (dma_mapping_error(cdns_ctrl->dev, dma_buf)) {
1279*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1280*4882a593Smuzhiyun 		return -EIO;
1281*4882a593Smuzhiyun 	}
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	if (ctrl_dat && ctrl_dat_size) {
1284*4882a593Smuzhiyun 		dma_ctrl_dat = dma_map_single(cdns_ctrl->dev, ctrl_dat,
1285*4882a593Smuzhiyun 					      ctrl_dat_size, dir);
1286*4882a593Smuzhiyun 		if (dma_mapping_error(cdns_ctrl->dev, dma_ctrl_dat)) {
1287*4882a593Smuzhiyun 			dma_unmap_single(cdns_ctrl->dev, dma_buf,
1288*4882a593Smuzhiyun 					 buf_size, dir);
1289*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1290*4882a593Smuzhiyun 			return -EIO;
1291*4882a593Smuzhiyun 		}
1292*4882a593Smuzhiyun 	}
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	cadence_nand_cdma_desc_prepare(cdns_ctrl, chip_nr, page,
1295*4882a593Smuzhiyun 				       dma_buf, dma_ctrl_dat, ctype);
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	dma_unmap_single(cdns_ctrl->dev, dma_buf,
1300*4882a593Smuzhiyun 			 buf_size, dir);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	if (ctrl_dat && ctrl_dat_size)
1303*4882a593Smuzhiyun 		dma_unmap_single(cdns_ctrl->dev, dma_ctrl_dat,
1304*4882a593Smuzhiyun 				 ctrl_dat_size, dir);
1305*4882a593Smuzhiyun 	if (status)
1306*4882a593Smuzhiyun 		return status;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	return cadence_nand_cdma_finish(cdns_ctrl);
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun 
cadence_nand_set_timings(struct cdns_nand_ctrl * cdns_ctrl,struct cadence_nand_timings * t)1311*4882a593Smuzhiyun static void cadence_nand_set_timings(struct cdns_nand_ctrl *cdns_ctrl,
1312*4882a593Smuzhiyun 				     struct cadence_nand_timings *t)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun 	writel_relaxed(t->async_toggle_timings,
1315*4882a593Smuzhiyun 		       cdns_ctrl->reg + ASYNC_TOGGLE_TIMINGS);
1316*4882a593Smuzhiyun 	writel_relaxed(t->timings0, cdns_ctrl->reg + TIMINGS0);
1317*4882a593Smuzhiyun 	writel_relaxed(t->timings1, cdns_ctrl->reg + TIMINGS1);
1318*4882a593Smuzhiyun 	writel_relaxed(t->timings2, cdns_ctrl->reg + TIMINGS2);
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.is_phy_type_dll)
1321*4882a593Smuzhiyun 		writel_relaxed(t->dll_phy_ctrl, cdns_ctrl->reg + DLL_PHY_CTRL);
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 	writel_relaxed(t->phy_ctrl, cdns_ctrl->reg + PHY_CTRL);
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.is_phy_type_dll) {
1326*4882a593Smuzhiyun 		writel_relaxed(0, cdns_ctrl->reg + PHY_TSEL);
1327*4882a593Smuzhiyun 		writel_relaxed(2, cdns_ctrl->reg + PHY_DQ_TIMING);
1328*4882a593Smuzhiyun 		writel_relaxed(t->phy_dqs_timing,
1329*4882a593Smuzhiyun 			       cdns_ctrl->reg + PHY_DQS_TIMING);
1330*4882a593Smuzhiyun 		writel_relaxed(t->phy_gate_lpbk_ctrl,
1331*4882a593Smuzhiyun 			       cdns_ctrl->reg + PHY_GATE_LPBK_CTRL);
1332*4882a593Smuzhiyun 		writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
1333*4882a593Smuzhiyun 			       cdns_ctrl->reg + PHY_DLL_MASTER_CTRL);
1334*4882a593Smuzhiyun 		writel_relaxed(0, cdns_ctrl->reg + PHY_DLL_SLAVE_CTRL);
1335*4882a593Smuzhiyun 	}
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun 
cadence_nand_select_target(struct nand_chip * chip)1338*4882a593Smuzhiyun static int cadence_nand_select_target(struct nand_chip *chip)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1341*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	if (chip == cdns_ctrl->selected_chip)
1344*4882a593Smuzhiyun 		return 0;
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
1347*4882a593Smuzhiyun 					1000000,
1348*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
1349*4882a593Smuzhiyun 		return -ETIMEDOUT;
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	cadence_nand_set_timings(cdns_ctrl, &cdns_chip->timings);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	cadence_nand_set_ecc_strength(cdns_ctrl,
1354*4882a593Smuzhiyun 				      cdns_chip->corr_str_idx);
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	cadence_nand_set_erase_detection(cdns_ctrl, true,
1357*4882a593Smuzhiyun 					 chip->ecc.strength);
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 	cdns_ctrl->curr_trans_type = -1;
1360*4882a593Smuzhiyun 	cdns_ctrl->selected_chip = chip;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	return 0;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun 
cadence_nand_erase(struct nand_chip * chip,u32 page)1365*4882a593Smuzhiyun static int cadence_nand_erase(struct nand_chip *chip, u32 page)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1368*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1369*4882a593Smuzhiyun 	int status;
1370*4882a593Smuzhiyun 	u8 thread_nr = cdns_chip->cs[chip->cur_cs];
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	cadence_nand_cdma_desc_prepare(cdns_ctrl,
1373*4882a593Smuzhiyun 				       cdns_chip->cs[chip->cur_cs],
1374*4882a593Smuzhiyun 				       page, 0, 0,
1375*4882a593Smuzhiyun 				       CDMA_CT_ERASE);
1376*4882a593Smuzhiyun 	status = cadence_nand_cdma_send_and_wait(cdns_ctrl, thread_nr);
1377*4882a593Smuzhiyun 	if (status) {
1378*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "erase operation failed\n");
1379*4882a593Smuzhiyun 		return -EIO;
1380*4882a593Smuzhiyun 	}
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	status = cadence_nand_cdma_finish(cdns_ctrl);
1383*4882a593Smuzhiyun 	if (status)
1384*4882a593Smuzhiyun 		return status;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	return 0;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun 
cadence_nand_read_bbm(struct nand_chip * chip,int page,u8 * buf)1389*4882a593Smuzhiyun static int cadence_nand_read_bbm(struct nand_chip *chip, int page, u8 *buf)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	int status;
1392*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1393*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1394*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	cadence_nand_prepare_data_size(chip, TT_BBM);
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	/*
1401*4882a593Smuzhiyun 	 * Read only bad block marker from offset
1402*4882a593Smuzhiyun 	 * defined by a memory manufacturer.
1403*4882a593Smuzhiyun 	 */
1404*4882a593Smuzhiyun 	status = cadence_nand_cdma_transfer(cdns_ctrl,
1405*4882a593Smuzhiyun 					    cdns_chip->cs[chip->cur_cs],
1406*4882a593Smuzhiyun 					    page, cdns_ctrl->buf, NULL,
1407*4882a593Smuzhiyun 					    mtd->oobsize,
1408*4882a593Smuzhiyun 					    0, DMA_FROM_DEVICE, false);
1409*4882a593Smuzhiyun 	if (status) {
1410*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "read BBM failed\n");
1411*4882a593Smuzhiyun 		return -EIO;
1412*4882a593Smuzhiyun 	}
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	memcpy(buf + cdns_chip->bbm_offs, cdns_ctrl->buf, cdns_chip->bbm_len);
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	return 0;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun 
cadence_nand_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1419*4882a593Smuzhiyun static int cadence_nand_write_page(struct nand_chip *chip,
1420*4882a593Smuzhiyun 				   const u8 *buf, int oob_required,
1421*4882a593Smuzhiyun 				   int page)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1424*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1425*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1426*4882a593Smuzhiyun 	int status;
1427*4882a593Smuzhiyun 	u16 marker_val = 0xFFFF;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	status = cadence_nand_select_target(chip);
1430*4882a593Smuzhiyun 	if (status)
1431*4882a593Smuzhiyun 		return status;
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1434*4882a593Smuzhiyun 					 mtd->writesize
1435*4882a593Smuzhiyun 					 + cdns_chip->bbm_offs,
1436*4882a593Smuzhiyun 					 1);
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun 	if (oob_required) {
1439*4882a593Smuzhiyun 		marker_val = *(u16 *)(chip->oob_poi
1440*4882a593Smuzhiyun 				      + cdns_chip->bbm_offs);
1441*4882a593Smuzhiyun 	} else {
1442*4882a593Smuzhiyun 		/* Set oob data to 0xFF. */
1443*4882a593Smuzhiyun 		memset(cdns_ctrl->buf + mtd->writesize, 0xFF,
1444*4882a593Smuzhiyun 		       cdns_chip->avail_oob_size);
1445*4882a593Smuzhiyun 	}
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	cadence_nand_set_skip_marker_val(cdns_ctrl, marker_val);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1452*4882a593Smuzhiyun 	    cdns_ctrl->caps2.data_control_supp) {
1453*4882a593Smuzhiyun 		u8 *oob;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 		if (oob_required)
1456*4882a593Smuzhiyun 			oob = chip->oob_poi;
1457*4882a593Smuzhiyun 		else
1458*4882a593Smuzhiyun 			oob = cdns_ctrl->buf + mtd->writesize;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 		status = cadence_nand_cdma_transfer(cdns_ctrl,
1461*4882a593Smuzhiyun 						    cdns_chip->cs[chip->cur_cs],
1462*4882a593Smuzhiyun 						    page, (void *)buf, oob,
1463*4882a593Smuzhiyun 						    mtd->writesize,
1464*4882a593Smuzhiyun 						    cdns_chip->avail_oob_size,
1465*4882a593Smuzhiyun 						    DMA_TO_DEVICE, true);
1466*4882a593Smuzhiyun 		if (status) {
1467*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev, "write page failed\n");
1468*4882a593Smuzhiyun 			return -EIO;
1469*4882a593Smuzhiyun 		}
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 		return 0;
1472*4882a593Smuzhiyun 	}
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	if (oob_required) {
1475*4882a593Smuzhiyun 		/* Transfer the data to the oob area. */
1476*4882a593Smuzhiyun 		memcpy(cdns_ctrl->buf + mtd->writesize, chip->oob_poi,
1477*4882a593Smuzhiyun 		       cdns_chip->avail_oob_size);
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	memcpy(cdns_ctrl->buf, buf, mtd->writesize);
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	return cadence_nand_cdma_transfer(cdns_ctrl,
1485*4882a593Smuzhiyun 					  cdns_chip->cs[chip->cur_cs],
1486*4882a593Smuzhiyun 					  page, cdns_ctrl->buf, NULL,
1487*4882a593Smuzhiyun 					  mtd->writesize
1488*4882a593Smuzhiyun 					  + cdns_chip->avail_oob_size,
1489*4882a593Smuzhiyun 					  0, DMA_TO_DEVICE, true);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun 
cadence_nand_write_oob(struct nand_chip * chip,int page)1492*4882a593Smuzhiyun static int cadence_nand_write_oob(struct nand_chip *chip, int page)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1495*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	memset(cdns_ctrl->buf, 0xFF, mtd->writesize);
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	return cadence_nand_write_page(chip, cdns_ctrl->buf, 1, page);
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
cadence_nand_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1502*4882a593Smuzhiyun static int cadence_nand_write_page_raw(struct nand_chip *chip,
1503*4882a593Smuzhiyun 				       const u8 *buf, int oob_required,
1504*4882a593Smuzhiyun 				       int page)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1507*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1508*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1509*4882a593Smuzhiyun 	int writesize = mtd->writesize;
1510*4882a593Smuzhiyun 	int oobsize = mtd->oobsize;
1511*4882a593Smuzhiyun 	int ecc_steps = chip->ecc.steps;
1512*4882a593Smuzhiyun 	int ecc_size = chip->ecc.size;
1513*4882a593Smuzhiyun 	int ecc_bytes = chip->ecc.bytes;
1514*4882a593Smuzhiyun 	void *tmp_buf = cdns_ctrl->buf;
1515*4882a593Smuzhiyun 	int oob_skip = cdns_chip->bbm_len;
1516*4882a593Smuzhiyun 	size_t size = writesize + oobsize;
1517*4882a593Smuzhiyun 	int i, pos, len;
1518*4882a593Smuzhiyun 	int status = 0;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	status = cadence_nand_select_target(chip);
1521*4882a593Smuzhiyun 	if (status)
1522*4882a593Smuzhiyun 		return status;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	/*
1525*4882a593Smuzhiyun 	 * Fill the buffer with 0xff first except the full page transfer.
1526*4882a593Smuzhiyun 	 * This simplifies the logic.
1527*4882a593Smuzhiyun 	 */
1528*4882a593Smuzhiyun 	if (!buf || !oob_required)
1529*4882a593Smuzhiyun 		memset(tmp_buf, 0xff, size);
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	/* Arrange the buffer for syndrome payload/ecc layout. */
1534*4882a593Smuzhiyun 	if (buf) {
1535*4882a593Smuzhiyun 		for (i = 0; i < ecc_steps; i++) {
1536*4882a593Smuzhiyun 			pos = i * (ecc_size + ecc_bytes);
1537*4882a593Smuzhiyun 			len = ecc_size;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 			if (pos >= writesize)
1540*4882a593Smuzhiyun 				pos += oob_skip;
1541*4882a593Smuzhiyun 			else if (pos + len > writesize)
1542*4882a593Smuzhiyun 				len = writesize - pos;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 			memcpy(tmp_buf + pos, buf, len);
1545*4882a593Smuzhiyun 			buf += len;
1546*4882a593Smuzhiyun 			if (len < ecc_size) {
1547*4882a593Smuzhiyun 				len = ecc_size - len;
1548*4882a593Smuzhiyun 				memcpy(tmp_buf + writesize + oob_skip, buf,
1549*4882a593Smuzhiyun 				       len);
1550*4882a593Smuzhiyun 				buf += len;
1551*4882a593Smuzhiyun 			}
1552*4882a593Smuzhiyun 		}
1553*4882a593Smuzhiyun 	}
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	if (oob_required) {
1556*4882a593Smuzhiyun 		const u8 *oob = chip->oob_poi;
1557*4882a593Smuzhiyun 		u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1558*4882a593Smuzhiyun 			(cdns_chip->sector_size + chip->ecc.bytes)
1559*4882a593Smuzhiyun 			+ cdns_chip->sector_size + oob_skip;
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 		/* BBM at the beginning of the OOB area. */
1562*4882a593Smuzhiyun 		memcpy(tmp_buf + writesize, oob, oob_skip);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 		/* OOB free. */
1565*4882a593Smuzhiyun 		memcpy(tmp_buf + oob_data_offset, oob,
1566*4882a593Smuzhiyun 		       cdns_chip->avail_oob_size);
1567*4882a593Smuzhiyun 		oob += cdns_chip->avail_oob_size;
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 		/* OOB ECC. */
1570*4882a593Smuzhiyun 		for (i = 0; i < ecc_steps; i++) {
1571*4882a593Smuzhiyun 			pos = ecc_size + i * (ecc_size + ecc_bytes);
1572*4882a593Smuzhiyun 			if (i == (ecc_steps - 1))
1573*4882a593Smuzhiyun 				pos += cdns_chip->avail_oob_size;
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 			len = ecc_bytes;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 			if (pos >= writesize)
1578*4882a593Smuzhiyun 				pos += oob_skip;
1579*4882a593Smuzhiyun 			else if (pos + len > writesize)
1580*4882a593Smuzhiyun 				len = writesize - pos;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 			memcpy(tmp_buf + pos, oob, len);
1583*4882a593Smuzhiyun 			oob += len;
1584*4882a593Smuzhiyun 			if (len < ecc_bytes) {
1585*4882a593Smuzhiyun 				len = ecc_bytes - len;
1586*4882a593Smuzhiyun 				memcpy(tmp_buf + writesize + oob_skip, oob,
1587*4882a593Smuzhiyun 				       len);
1588*4882a593Smuzhiyun 				oob += len;
1589*4882a593Smuzhiyun 			}
1590*4882a593Smuzhiyun 		}
1591*4882a593Smuzhiyun 	}
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	return cadence_nand_cdma_transfer(cdns_ctrl,
1596*4882a593Smuzhiyun 					  cdns_chip->cs[chip->cur_cs],
1597*4882a593Smuzhiyun 					  page, cdns_ctrl->buf, NULL,
1598*4882a593Smuzhiyun 					  mtd->writesize +
1599*4882a593Smuzhiyun 					  mtd->oobsize,
1600*4882a593Smuzhiyun 					  0, DMA_TO_DEVICE, false);
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun 
cadence_nand_write_oob_raw(struct nand_chip * chip,int page)1603*4882a593Smuzhiyun static int cadence_nand_write_oob_raw(struct nand_chip *chip,
1604*4882a593Smuzhiyun 				      int page)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun 	return cadence_nand_write_page_raw(chip, NULL, true, page);
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun 
cadence_nand_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)1609*4882a593Smuzhiyun static int cadence_nand_read_page(struct nand_chip *chip,
1610*4882a593Smuzhiyun 				  u8 *buf, int oob_required, int page)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1613*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1614*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1615*4882a593Smuzhiyun 	int status = 0;
1616*4882a593Smuzhiyun 	int ecc_err_count = 0;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	status = cadence_nand_select_target(chip);
1619*4882a593Smuzhiyun 	if (status)
1620*4882a593Smuzhiyun 		return status;
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	cadence_nand_set_skip_bytes_conf(cdns_ctrl, cdns_chip->bbm_len,
1623*4882a593Smuzhiyun 					 mtd->writesize
1624*4882a593Smuzhiyun 					 + cdns_chip->bbm_offs, 1);
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	/*
1627*4882a593Smuzhiyun 	 * If data buffer can be accessed by DMA and data_control feature
1628*4882a593Smuzhiyun 	 * is supported then transfer data and oob directly.
1629*4882a593Smuzhiyun 	 */
1630*4882a593Smuzhiyun 	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, mtd->writesize) &&
1631*4882a593Smuzhiyun 	    cdns_ctrl->caps2.data_control_supp) {
1632*4882a593Smuzhiyun 		u8 *oob;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 		if (oob_required)
1635*4882a593Smuzhiyun 			oob = chip->oob_poi;
1636*4882a593Smuzhiyun 		else
1637*4882a593Smuzhiyun 			oob = cdns_ctrl->buf + mtd->writesize;
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 		cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREA_EXT);
1640*4882a593Smuzhiyun 		status = cadence_nand_cdma_transfer(cdns_ctrl,
1641*4882a593Smuzhiyun 						    cdns_chip->cs[chip->cur_cs],
1642*4882a593Smuzhiyun 						    page, buf, oob,
1643*4882a593Smuzhiyun 						    mtd->writesize,
1644*4882a593Smuzhiyun 						    cdns_chip->avail_oob_size,
1645*4882a593Smuzhiyun 						    DMA_FROM_DEVICE, true);
1646*4882a593Smuzhiyun 	/* Otherwise use bounce buffer. */
1647*4882a593Smuzhiyun 	} else {
1648*4882a593Smuzhiyun 		cadence_nand_prepare_data_size(chip, TT_MAIN_OOB_AREAS);
1649*4882a593Smuzhiyun 		status = cadence_nand_cdma_transfer(cdns_ctrl,
1650*4882a593Smuzhiyun 						    cdns_chip->cs[chip->cur_cs],
1651*4882a593Smuzhiyun 						    page, cdns_ctrl->buf,
1652*4882a593Smuzhiyun 						    NULL, mtd->writesize
1653*4882a593Smuzhiyun 						    + cdns_chip->avail_oob_size,
1654*4882a593Smuzhiyun 						    0, DMA_FROM_DEVICE, true);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 		memcpy(buf, cdns_ctrl->buf, mtd->writesize);
1657*4882a593Smuzhiyun 		if (oob_required)
1658*4882a593Smuzhiyun 			memcpy(chip->oob_poi,
1659*4882a593Smuzhiyun 			       cdns_ctrl->buf + mtd->writesize,
1660*4882a593Smuzhiyun 			       mtd->oobsize);
1661*4882a593Smuzhiyun 	}
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	switch (status) {
1664*4882a593Smuzhiyun 	case STAT_ECC_UNCORR:
1665*4882a593Smuzhiyun 		mtd->ecc_stats.failed++;
1666*4882a593Smuzhiyun 		ecc_err_count++;
1667*4882a593Smuzhiyun 		break;
1668*4882a593Smuzhiyun 	case STAT_ECC_CORR:
1669*4882a593Smuzhiyun 		ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
1670*4882a593Smuzhiyun 					  cdns_ctrl->cdma_desc->status);
1671*4882a593Smuzhiyun 		mtd->ecc_stats.corrected += ecc_err_count;
1672*4882a593Smuzhiyun 		break;
1673*4882a593Smuzhiyun 	case STAT_ERASED:
1674*4882a593Smuzhiyun 	case STAT_OK:
1675*4882a593Smuzhiyun 		break;
1676*4882a593Smuzhiyun 	default:
1677*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "read page failed\n");
1678*4882a593Smuzhiyun 		return -EIO;
1679*4882a593Smuzhiyun 	}
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	if (oob_required)
1682*4882a593Smuzhiyun 		if (cadence_nand_read_bbm(chip, page, chip->oob_poi))
1683*4882a593Smuzhiyun 			return -EIO;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	return ecc_err_count;
1686*4882a593Smuzhiyun }
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun /* Reads OOB data from the device. */
cadence_nand_read_oob(struct nand_chip * chip,int page)1689*4882a593Smuzhiyun static int cadence_nand_read_oob(struct nand_chip *chip, int page)
1690*4882a593Smuzhiyun {
1691*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	return cadence_nand_read_page(chip, cdns_ctrl->buf, 1, page);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun 
cadence_nand_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)1696*4882a593Smuzhiyun static int cadence_nand_read_page_raw(struct nand_chip *chip,
1697*4882a593Smuzhiyun 				      u8 *buf, int oob_required, int page)
1698*4882a593Smuzhiyun {
1699*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1700*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1701*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1702*4882a593Smuzhiyun 	int oob_skip = cdns_chip->bbm_len;
1703*4882a593Smuzhiyun 	int writesize = mtd->writesize;
1704*4882a593Smuzhiyun 	int ecc_steps = chip->ecc.steps;
1705*4882a593Smuzhiyun 	int ecc_size = chip->ecc.size;
1706*4882a593Smuzhiyun 	int ecc_bytes = chip->ecc.bytes;
1707*4882a593Smuzhiyun 	void *tmp_buf = cdns_ctrl->buf;
1708*4882a593Smuzhiyun 	int i, pos, len;
1709*4882a593Smuzhiyun 	int status = 0;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	status = cadence_nand_select_target(chip);
1712*4882a593Smuzhiyun 	if (status)
1713*4882a593Smuzhiyun 		return status;
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	cadence_nand_set_skip_bytes_conf(cdns_ctrl, 0, 0, 0);
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	cadence_nand_prepare_data_size(chip, TT_RAW_PAGE);
1718*4882a593Smuzhiyun 	status = cadence_nand_cdma_transfer(cdns_ctrl,
1719*4882a593Smuzhiyun 					    cdns_chip->cs[chip->cur_cs],
1720*4882a593Smuzhiyun 					    page, cdns_ctrl->buf, NULL,
1721*4882a593Smuzhiyun 					    mtd->writesize
1722*4882a593Smuzhiyun 					    + mtd->oobsize,
1723*4882a593Smuzhiyun 					    0, DMA_FROM_DEVICE, false);
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	switch (status) {
1726*4882a593Smuzhiyun 	case STAT_ERASED:
1727*4882a593Smuzhiyun 	case STAT_OK:
1728*4882a593Smuzhiyun 		break;
1729*4882a593Smuzhiyun 	default:
1730*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "read raw page failed\n");
1731*4882a593Smuzhiyun 		return -EIO;
1732*4882a593Smuzhiyun 	}
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	/* Arrange the buffer for syndrome payload/ecc layout. */
1735*4882a593Smuzhiyun 	if (buf) {
1736*4882a593Smuzhiyun 		for (i = 0; i < ecc_steps; i++) {
1737*4882a593Smuzhiyun 			pos = i * (ecc_size + ecc_bytes);
1738*4882a593Smuzhiyun 			len = ecc_size;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 			if (pos >= writesize)
1741*4882a593Smuzhiyun 				pos += oob_skip;
1742*4882a593Smuzhiyun 			else if (pos + len > writesize)
1743*4882a593Smuzhiyun 				len = writesize - pos;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 			memcpy(buf, tmp_buf + pos, len);
1746*4882a593Smuzhiyun 			buf += len;
1747*4882a593Smuzhiyun 			if (len < ecc_size) {
1748*4882a593Smuzhiyun 				len = ecc_size - len;
1749*4882a593Smuzhiyun 				memcpy(buf, tmp_buf + writesize + oob_skip,
1750*4882a593Smuzhiyun 				       len);
1751*4882a593Smuzhiyun 				buf += len;
1752*4882a593Smuzhiyun 			}
1753*4882a593Smuzhiyun 		}
1754*4882a593Smuzhiyun 	}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 	if (oob_required) {
1757*4882a593Smuzhiyun 		u8 *oob = chip->oob_poi;
1758*4882a593Smuzhiyun 		u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1759*4882a593Smuzhiyun 			(cdns_chip->sector_size + chip->ecc.bytes)
1760*4882a593Smuzhiyun 			+ cdns_chip->sector_size + oob_skip;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 		/* OOB free. */
1763*4882a593Smuzhiyun 		memcpy(oob, tmp_buf + oob_data_offset,
1764*4882a593Smuzhiyun 		       cdns_chip->avail_oob_size);
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 		/* BBM at the beginning of the OOB area. */
1767*4882a593Smuzhiyun 		memcpy(oob, tmp_buf + writesize, oob_skip);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 		oob += cdns_chip->avail_oob_size;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 		/* OOB ECC */
1772*4882a593Smuzhiyun 		for (i = 0; i < ecc_steps; i++) {
1773*4882a593Smuzhiyun 			pos = ecc_size + i * (ecc_size + ecc_bytes);
1774*4882a593Smuzhiyun 			len = ecc_bytes;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 			if (i == (ecc_steps - 1))
1777*4882a593Smuzhiyun 				pos += cdns_chip->avail_oob_size;
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun 			if (pos >= writesize)
1780*4882a593Smuzhiyun 				pos += oob_skip;
1781*4882a593Smuzhiyun 			else if (pos + len > writesize)
1782*4882a593Smuzhiyun 				len = writesize - pos;
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 			memcpy(oob, tmp_buf + pos, len);
1785*4882a593Smuzhiyun 			oob += len;
1786*4882a593Smuzhiyun 			if (len < ecc_bytes) {
1787*4882a593Smuzhiyun 				len = ecc_bytes - len;
1788*4882a593Smuzhiyun 				memcpy(oob, tmp_buf + writesize + oob_skip,
1789*4882a593Smuzhiyun 				       len);
1790*4882a593Smuzhiyun 				oob += len;
1791*4882a593Smuzhiyun 			}
1792*4882a593Smuzhiyun 		}
1793*4882a593Smuzhiyun 	}
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	return 0;
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun 
cadence_nand_read_oob_raw(struct nand_chip * chip,int page)1798*4882a593Smuzhiyun static int cadence_nand_read_oob_raw(struct nand_chip *chip,
1799*4882a593Smuzhiyun 				     int page)
1800*4882a593Smuzhiyun {
1801*4882a593Smuzhiyun 	return cadence_nand_read_page_raw(chip, NULL, true, page);
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun 
cadence_nand_slave_dma_transfer_finished(void * data)1804*4882a593Smuzhiyun static void cadence_nand_slave_dma_transfer_finished(void *data)
1805*4882a593Smuzhiyun {
1806*4882a593Smuzhiyun 	struct completion *finished = data;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	complete(finished);
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun 
cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl * cdns_ctrl,void * buf,dma_addr_t dev_dma,size_t len,enum dma_data_direction dir)1811*4882a593Smuzhiyun static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
1812*4882a593Smuzhiyun 					   void *buf,
1813*4882a593Smuzhiyun 					   dma_addr_t dev_dma, size_t len,
1814*4882a593Smuzhiyun 					   enum dma_data_direction dir)
1815*4882a593Smuzhiyun {
1816*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(finished);
1817*4882a593Smuzhiyun 	struct dma_chan *chan;
1818*4882a593Smuzhiyun 	struct dma_device *dma_dev;
1819*4882a593Smuzhiyun 	dma_addr_t src_dma, dst_dma, buf_dma;
1820*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
1821*4882a593Smuzhiyun 	dma_cookie_t cookie;
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	chan = cdns_ctrl->dmac;
1824*4882a593Smuzhiyun 	dma_dev = chan->device;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	buf_dma = dma_map_single(dma_dev->dev, buf, len, dir);
1827*4882a593Smuzhiyun 	if (dma_mapping_error(dma_dev->dev, buf_dma)) {
1828*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Failed to map DMA buffer\n");
1829*4882a593Smuzhiyun 		goto err;
1830*4882a593Smuzhiyun 	}
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun 	if (dir == DMA_FROM_DEVICE) {
1833*4882a593Smuzhiyun 		src_dma = cdns_ctrl->io.dma;
1834*4882a593Smuzhiyun 		dst_dma = buf_dma;
1835*4882a593Smuzhiyun 	} else {
1836*4882a593Smuzhiyun 		src_dma = buf_dma;
1837*4882a593Smuzhiyun 		dst_dma = cdns_ctrl->io.dma;
1838*4882a593Smuzhiyun 	}
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
1841*4882a593Smuzhiyun 				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
1842*4882a593Smuzhiyun 	if (!tx) {
1843*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Failed to prepare DMA memcpy\n");
1844*4882a593Smuzhiyun 		goto err_unmap;
1845*4882a593Smuzhiyun 	}
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	tx->callback = cadence_nand_slave_dma_transfer_finished;
1848*4882a593Smuzhiyun 	tx->callback_param = &finished;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	cookie = dmaengine_submit(tx);
1851*4882a593Smuzhiyun 	if (dma_submit_error(cookie)) {
1852*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Failed to do DMA tx_submit\n");
1853*4882a593Smuzhiyun 		goto err_unmap;
1854*4882a593Smuzhiyun 	}
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	dma_async_issue_pending(cdns_ctrl->dmac);
1857*4882a593Smuzhiyun 	wait_for_completion(&finished);
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	return 0;
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun err_unmap:
1864*4882a593Smuzhiyun 	dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun err:
1867*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	return -EIO;
1870*4882a593Smuzhiyun }
1871*4882a593Smuzhiyun 
cadence_nand_read_buf(struct cdns_nand_ctrl * cdns_ctrl,u8 * buf,int len)1872*4882a593Smuzhiyun static int cadence_nand_read_buf(struct cdns_nand_ctrl *cdns_ctrl,
1873*4882a593Smuzhiyun 				 u8 *buf, int len)
1874*4882a593Smuzhiyun {
1875*4882a593Smuzhiyun 	u8 thread_nr = 0;
1876*4882a593Smuzhiyun 	u32 sdma_size;
1877*4882a593Smuzhiyun 	int status;
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 	/* Wait until slave DMA interface is ready to data transfer. */
1880*4882a593Smuzhiyun 	status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1881*4882a593Smuzhiyun 	if (status)
1882*4882a593Smuzhiyun 		return status;
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 	if (!cdns_ctrl->caps1->has_dma) {
1885*4882a593Smuzhiyun 		int len_in_words = len >> 2;
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 		/* read alingment data */
1888*4882a593Smuzhiyun 		ioread32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1889*4882a593Smuzhiyun 		if (sdma_size > len) {
1890*4882a593Smuzhiyun 			/* read rest data from slave DMA interface if any */
1891*4882a593Smuzhiyun 			ioread32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
1892*4882a593Smuzhiyun 				     sdma_size / 4 - len_in_words);
1893*4882a593Smuzhiyun 			/* copy rest of data */
1894*4882a593Smuzhiyun 			memcpy(buf + (len_in_words << 2), cdns_ctrl->buf,
1895*4882a593Smuzhiyun 			       len - (len_in_words << 2));
1896*4882a593Smuzhiyun 		}
1897*4882a593Smuzhiyun 		return 0;
1898*4882a593Smuzhiyun 	}
1899*4882a593Smuzhiyun 
1900*4882a593Smuzhiyun 	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1901*4882a593Smuzhiyun 		status = cadence_nand_slave_dma_transfer(cdns_ctrl, buf,
1902*4882a593Smuzhiyun 							 cdns_ctrl->io.dma,
1903*4882a593Smuzhiyun 							 len, DMA_FROM_DEVICE);
1904*4882a593Smuzhiyun 		if (status == 0)
1905*4882a593Smuzhiyun 			return 0;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 		dev_warn(cdns_ctrl->dev,
1908*4882a593Smuzhiyun 			 "Slave DMA transfer failed. Try again using bounce buffer.");
1909*4882a593Smuzhiyun 	}
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun 	/* If DMA transfer is not possible or failed then use bounce buffer. */
1912*4882a593Smuzhiyun 	status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1913*4882a593Smuzhiyun 						 cdns_ctrl->io.dma,
1914*4882a593Smuzhiyun 						 sdma_size, DMA_FROM_DEVICE);
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	if (status) {
1917*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1918*4882a593Smuzhiyun 		return status;
1919*4882a593Smuzhiyun 	}
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	memcpy(buf, cdns_ctrl->buf, len);
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	return 0;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun 
cadence_nand_write_buf(struct cdns_nand_ctrl * cdns_ctrl,const u8 * buf,int len)1926*4882a593Smuzhiyun static int cadence_nand_write_buf(struct cdns_nand_ctrl *cdns_ctrl,
1927*4882a593Smuzhiyun 				  const u8 *buf, int len)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun 	u8 thread_nr = 0;
1930*4882a593Smuzhiyun 	u32 sdma_size;
1931*4882a593Smuzhiyun 	int status;
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	/* Wait until slave DMA interface is ready to data transfer. */
1934*4882a593Smuzhiyun 	status = cadence_nand_wait_on_sdma(cdns_ctrl, &thread_nr, &sdma_size);
1935*4882a593Smuzhiyun 	if (status)
1936*4882a593Smuzhiyun 		return status;
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	if (!cdns_ctrl->caps1->has_dma) {
1939*4882a593Smuzhiyun 		int len_in_words = len >> 2;
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun 		iowrite32_rep(cdns_ctrl->io.virt, buf, len_in_words);
1942*4882a593Smuzhiyun 		if (sdma_size > len) {
1943*4882a593Smuzhiyun 			/* copy rest of data */
1944*4882a593Smuzhiyun 			memcpy(cdns_ctrl->buf, buf + (len_in_words << 2),
1945*4882a593Smuzhiyun 			       len - (len_in_words << 2));
1946*4882a593Smuzhiyun 			/* write all expected by nand controller data */
1947*4882a593Smuzhiyun 			iowrite32_rep(cdns_ctrl->io.virt, cdns_ctrl->buf,
1948*4882a593Smuzhiyun 				      sdma_size / 4 - len_in_words);
1949*4882a593Smuzhiyun 		}
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 		return 0;
1952*4882a593Smuzhiyun 	}
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	if (cadence_nand_dma_buf_ok(cdns_ctrl, buf, len)) {
1955*4882a593Smuzhiyun 		status = cadence_nand_slave_dma_transfer(cdns_ctrl, (void *)buf,
1956*4882a593Smuzhiyun 							 cdns_ctrl->io.dma,
1957*4882a593Smuzhiyun 							 len, DMA_TO_DEVICE);
1958*4882a593Smuzhiyun 		if (status == 0)
1959*4882a593Smuzhiyun 			return 0;
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 		dev_warn(cdns_ctrl->dev,
1962*4882a593Smuzhiyun 			 "Slave DMA transfer failed. Try again using bounce buffer.");
1963*4882a593Smuzhiyun 	}
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	/* If DMA transfer is not possible or failed then use bounce buffer. */
1966*4882a593Smuzhiyun 	memcpy(cdns_ctrl->buf, buf, len);
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	status = cadence_nand_slave_dma_transfer(cdns_ctrl, cdns_ctrl->buf,
1969*4882a593Smuzhiyun 						 cdns_ctrl->io.dma,
1970*4882a593Smuzhiyun 						 sdma_size, DMA_TO_DEVICE);
1971*4882a593Smuzhiyun 
1972*4882a593Smuzhiyun 	if (status)
1973*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Slave DMA transfer failed");
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	return status;
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun 
cadence_nand_force_byte_access(struct nand_chip * chip,bool force_8bit)1978*4882a593Smuzhiyun static int cadence_nand_force_byte_access(struct nand_chip *chip,
1979*4882a593Smuzhiyun 					  bool force_8bit)
1980*4882a593Smuzhiyun {
1981*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
1982*4882a593Smuzhiyun 	int status;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	/*
1985*4882a593Smuzhiyun 	 * Callers of this function do not verify if the NAND is using a 16-bit
1986*4882a593Smuzhiyun 	 * an 8-bit bus for normal operations, so we need to take care of that
1987*4882a593Smuzhiyun 	 * here by leaving the configuration unchanged if the NAND does not have
1988*4882a593Smuzhiyun 	 * the NAND_BUSWIDTH_16 flag set.
1989*4882a593Smuzhiyun 	 */
1990*4882a593Smuzhiyun 	if (!(chip->options & NAND_BUSWIDTH_16))
1991*4882a593Smuzhiyun 		return 0;
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	status = cadence_nand_set_access_width16(cdns_ctrl, !force_8bit);
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	return status;
1996*4882a593Smuzhiyun }
1997*4882a593Smuzhiyun 
cadence_nand_cmd_opcode(struct nand_chip * chip,const struct nand_subop * subop)1998*4882a593Smuzhiyun static int cadence_nand_cmd_opcode(struct nand_chip *chip,
1999*4882a593Smuzhiyun 				   const struct nand_subop *subop)
2000*4882a593Smuzhiyun {
2001*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2002*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2003*4882a593Smuzhiyun 	const struct nand_op_instr *instr;
2004*4882a593Smuzhiyun 	unsigned int op_id = 0;
2005*4882a593Smuzhiyun 	u64 mini_ctrl_cmd = 0;
2006*4882a593Smuzhiyun 	int ret;
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	instr = &subop->instrs[op_id];
2009*4882a593Smuzhiyun 
2010*4882a593Smuzhiyun 	if (instr->delay_ns > 0)
2011*4882a593Smuzhiyun 		mini_ctrl_cmd |= GCMD_LAY_TWB;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2014*4882a593Smuzhiyun 				    GCMD_LAY_INSTR_CMD);
2015*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD,
2016*4882a593Smuzhiyun 				    instr->ctx.cmd.opcode);
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2019*4882a593Smuzhiyun 					    cdns_chip->cs[chip->cur_cs],
2020*4882a593Smuzhiyun 					    mini_ctrl_cmd);
2021*4882a593Smuzhiyun 	if (ret)
2022*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "send cmd %x failed\n",
2023*4882a593Smuzhiyun 			instr->ctx.cmd.opcode);
2024*4882a593Smuzhiyun 
2025*4882a593Smuzhiyun 	return ret;
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun 
cadence_nand_cmd_address(struct nand_chip * chip,const struct nand_subop * subop)2028*4882a593Smuzhiyun static int cadence_nand_cmd_address(struct nand_chip *chip,
2029*4882a593Smuzhiyun 				    const struct nand_subop *subop)
2030*4882a593Smuzhiyun {
2031*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2032*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2033*4882a593Smuzhiyun 	const struct nand_op_instr *instr;
2034*4882a593Smuzhiyun 	unsigned int op_id = 0;
2035*4882a593Smuzhiyun 	u64 mini_ctrl_cmd = 0;
2036*4882a593Smuzhiyun 	unsigned int offset, naddrs;
2037*4882a593Smuzhiyun 	u64 address = 0;
2038*4882a593Smuzhiyun 	const u8 *addrs;
2039*4882a593Smuzhiyun 	int ret;
2040*4882a593Smuzhiyun 	int i;
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 	instr = &subop->instrs[op_id];
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	if (instr->delay_ns > 0)
2045*4882a593Smuzhiyun 		mini_ctrl_cmd |= GCMD_LAY_TWB;
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2048*4882a593Smuzhiyun 				    GCMD_LAY_INSTR_ADDR);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	offset = nand_subop_get_addr_start_off(subop, op_id);
2051*4882a593Smuzhiyun 	naddrs = nand_subop_get_num_addr_cyc(subop, op_id);
2052*4882a593Smuzhiyun 	addrs = &instr->ctx.addr.addrs[offset];
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	for (i = 0; i < naddrs; i++)
2055*4882a593Smuzhiyun 		address |= (u64)addrs[i] << (8 * i);
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
2058*4882a593Smuzhiyun 				    address);
2059*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
2060*4882a593Smuzhiyun 				    naddrs - 1);
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2063*4882a593Smuzhiyun 					    cdns_chip->cs[chip->cur_cs],
2064*4882a593Smuzhiyun 					    mini_ctrl_cmd);
2065*4882a593Smuzhiyun 	if (ret)
2066*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "send address %llx failed\n", address);
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	return ret;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun 
cadence_nand_cmd_erase(struct nand_chip * chip,const struct nand_subop * subop)2071*4882a593Smuzhiyun static int cadence_nand_cmd_erase(struct nand_chip *chip,
2072*4882a593Smuzhiyun 				  const struct nand_subop *subop)
2073*4882a593Smuzhiyun {
2074*4882a593Smuzhiyun 	unsigned int op_id;
2075*4882a593Smuzhiyun 
2076*4882a593Smuzhiyun 	if (subop->instrs[0].ctx.cmd.opcode == NAND_CMD_ERASE1) {
2077*4882a593Smuzhiyun 		int i;
2078*4882a593Smuzhiyun 		const struct nand_op_instr *instr = NULL;
2079*4882a593Smuzhiyun 		unsigned int offset, naddrs;
2080*4882a593Smuzhiyun 		const u8 *addrs;
2081*4882a593Smuzhiyun 		u32 page = 0;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 		instr = &subop->instrs[1];
2084*4882a593Smuzhiyun 		offset = nand_subop_get_addr_start_off(subop, 1);
2085*4882a593Smuzhiyun 		naddrs = nand_subop_get_num_addr_cyc(subop, 1);
2086*4882a593Smuzhiyun 		addrs = &instr->ctx.addr.addrs[offset];
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 		for (i = 0; i < naddrs; i++)
2089*4882a593Smuzhiyun 			page |= (u32)addrs[i] << (8 * i);
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 		return cadence_nand_erase(chip, page);
2092*4882a593Smuzhiyun 	}
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	/*
2095*4882a593Smuzhiyun 	 * If it is not an erase operation then handle operation
2096*4882a593Smuzhiyun 	 * by calling exec_op function.
2097*4882a593Smuzhiyun 	 */
2098*4882a593Smuzhiyun 	for (op_id = 0; op_id < subop->ninstrs; op_id++) {
2099*4882a593Smuzhiyun 		int ret;
2100*4882a593Smuzhiyun 		const struct nand_operation nand_op = {
2101*4882a593Smuzhiyun 			.cs = chip->cur_cs,
2102*4882a593Smuzhiyun 			.instrs =  &subop->instrs[op_id],
2103*4882a593Smuzhiyun 			.ninstrs = 1};
2104*4882a593Smuzhiyun 		ret = chip->controller->ops->exec_op(chip, &nand_op, false);
2105*4882a593Smuzhiyun 		if (ret)
2106*4882a593Smuzhiyun 			return ret;
2107*4882a593Smuzhiyun 	}
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	return 0;
2110*4882a593Smuzhiyun }
2111*4882a593Smuzhiyun 
cadence_nand_cmd_data(struct nand_chip * chip,const struct nand_subop * subop)2112*4882a593Smuzhiyun static int cadence_nand_cmd_data(struct nand_chip *chip,
2113*4882a593Smuzhiyun 				 const struct nand_subop *subop)
2114*4882a593Smuzhiyun {
2115*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2116*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2117*4882a593Smuzhiyun 	const struct nand_op_instr *instr;
2118*4882a593Smuzhiyun 	unsigned int offset, op_id = 0;
2119*4882a593Smuzhiyun 	u64 mini_ctrl_cmd = 0;
2120*4882a593Smuzhiyun 	int len = 0;
2121*4882a593Smuzhiyun 	int ret;
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	instr = &subop->instrs[op_id];
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	if (instr->delay_ns > 0)
2126*4882a593Smuzhiyun 		mini_ctrl_cmd |= GCMD_LAY_TWB;
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
2129*4882a593Smuzhiyun 				    GCMD_LAY_INSTR_DATA);
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	if (instr->type == NAND_OP_DATA_OUT_INSTR)
2132*4882a593Smuzhiyun 		mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR,
2133*4882a593Smuzhiyun 					    GCMD_DIR_WRITE);
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	len = nand_subop_get_data_len(subop, op_id);
2136*4882a593Smuzhiyun 	offset = nand_subop_get_data_start_off(subop, op_id);
2137*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
2138*4882a593Smuzhiyun 	mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
2139*4882a593Smuzhiyun 	if (instr->ctx.data.force_8bit) {
2140*4882a593Smuzhiyun 		ret = cadence_nand_force_byte_access(chip, true);
2141*4882a593Smuzhiyun 		if (ret) {
2142*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev,
2143*4882a593Smuzhiyun 				"cannot change byte access generic data cmd failed\n");
2144*4882a593Smuzhiyun 			return ret;
2145*4882a593Smuzhiyun 		}
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	ret = cadence_nand_generic_cmd_send(cdns_ctrl,
2149*4882a593Smuzhiyun 					    cdns_chip->cs[chip->cur_cs],
2150*4882a593Smuzhiyun 					    mini_ctrl_cmd);
2151*4882a593Smuzhiyun 	if (ret) {
2152*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "send generic data cmd failed\n");
2153*4882a593Smuzhiyun 		return ret;
2154*4882a593Smuzhiyun 	}
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	if (instr->type == NAND_OP_DATA_IN_INSTR) {
2157*4882a593Smuzhiyun 		void *buf = instr->ctx.data.buf.in + offset;
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 		ret = cadence_nand_read_buf(cdns_ctrl, buf, len);
2160*4882a593Smuzhiyun 	} else {
2161*4882a593Smuzhiyun 		const void *buf = instr->ctx.data.buf.out + offset;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 		ret = cadence_nand_write_buf(cdns_ctrl, buf, len);
2164*4882a593Smuzhiyun 	}
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	if (ret) {
2167*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "data transfer failed for generic command\n");
2168*4882a593Smuzhiyun 		return ret;
2169*4882a593Smuzhiyun 	}
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 	if (instr->ctx.data.force_8bit) {
2172*4882a593Smuzhiyun 		ret = cadence_nand_force_byte_access(chip, false);
2173*4882a593Smuzhiyun 		if (ret) {
2174*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev,
2175*4882a593Smuzhiyun 				"cannot change byte access generic data cmd failed\n");
2176*4882a593Smuzhiyun 		}
2177*4882a593Smuzhiyun 	}
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	return ret;
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun 
cadence_nand_cmd_waitrdy(struct nand_chip * chip,const struct nand_subop * subop)2182*4882a593Smuzhiyun static int cadence_nand_cmd_waitrdy(struct nand_chip *chip,
2183*4882a593Smuzhiyun 				    const struct nand_subop *subop)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun 	int status;
2186*4882a593Smuzhiyun 	unsigned int op_id = 0;
2187*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2188*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2189*4882a593Smuzhiyun 	const struct nand_op_instr *instr = &subop->instrs[op_id];
2190*4882a593Smuzhiyun 	u32 timeout_us = instr->ctx.waitrdy.timeout_ms * 1000;
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	status = cadence_nand_wait_for_value(cdns_ctrl, RBN_SETINGS,
2193*4882a593Smuzhiyun 					     timeout_us,
2194*4882a593Smuzhiyun 					     BIT(cdns_chip->cs[chip->cur_cs]),
2195*4882a593Smuzhiyun 					     false);
2196*4882a593Smuzhiyun 	return status;
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun static const struct nand_op_parser cadence_nand_op_parser = NAND_OP_PARSER(
2200*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(
2201*4882a593Smuzhiyun 		cadence_nand_cmd_erase,
2202*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2203*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ERASE_ADDRESS_CYC),
2204*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_CMD_ELEM(false),
2205*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
2206*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(
2207*4882a593Smuzhiyun 		cadence_nand_cmd_opcode,
2208*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_CMD_ELEM(false)),
2209*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(
2210*4882a593Smuzhiyun 		cadence_nand_cmd_address,
2211*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_ADDR_ELEM(false, MAX_ADDRESS_CYC)),
2212*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(
2213*4882a593Smuzhiyun 		cadence_nand_cmd_data,
2214*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, MAX_DATA_SIZE)),
2215*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(
2216*4882a593Smuzhiyun 		cadence_nand_cmd_data,
2217*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, MAX_DATA_SIZE)),
2218*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(
2219*4882a593Smuzhiyun 		cadence_nand_cmd_waitrdy,
2220*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false))
2221*4882a593Smuzhiyun 	);
2222*4882a593Smuzhiyun 
cadence_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)2223*4882a593Smuzhiyun static int cadence_nand_exec_op(struct nand_chip *chip,
2224*4882a593Smuzhiyun 				const struct nand_operation *op,
2225*4882a593Smuzhiyun 				bool check_only)
2226*4882a593Smuzhiyun {
2227*4882a593Smuzhiyun 	if (!check_only) {
2228*4882a593Smuzhiyun 		int status = cadence_nand_select_target(chip);
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 		if (status)
2231*4882a593Smuzhiyun 			return status;
2232*4882a593Smuzhiyun 	}
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 	return nand_op_parser_exec_op(chip, &cadence_nand_op_parser, op,
2235*4882a593Smuzhiyun 				      check_only);
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun 
cadence_nand_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2238*4882a593Smuzhiyun static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
2239*4882a593Smuzhiyun 				       struct mtd_oob_region *oobregion)
2240*4882a593Smuzhiyun {
2241*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
2242*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	if (section)
2245*4882a593Smuzhiyun 		return -ERANGE;
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	oobregion->offset = cdns_chip->bbm_len;
2248*4882a593Smuzhiyun 	oobregion->length = cdns_chip->avail_oob_size
2249*4882a593Smuzhiyun 		- cdns_chip->bbm_len;
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 	return 0;
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun 
cadence_nand_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)2254*4882a593Smuzhiyun static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
2255*4882a593Smuzhiyun 				      struct mtd_oob_region *oobregion)
2256*4882a593Smuzhiyun {
2257*4882a593Smuzhiyun 	struct nand_chip *chip = mtd_to_nand(mtd);
2258*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	if (section)
2261*4882a593Smuzhiyun 		return -ERANGE;
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	oobregion->offset = cdns_chip->avail_oob_size;
2264*4882a593Smuzhiyun 	oobregion->length = chip->ecc.total;
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	return 0;
2267*4882a593Smuzhiyun }
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
2270*4882a593Smuzhiyun 	.free = cadence_nand_ooblayout_free,
2271*4882a593Smuzhiyun 	.ecc = cadence_nand_ooblayout_ecc,
2272*4882a593Smuzhiyun };
2273*4882a593Smuzhiyun 
calc_cycl(u32 timing,u32 clock)2274*4882a593Smuzhiyun static int calc_cycl(u32 timing, u32 clock)
2275*4882a593Smuzhiyun {
2276*4882a593Smuzhiyun 	if (timing == 0 || clock == 0)
2277*4882a593Smuzhiyun 		return 0;
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	if ((timing % clock) > 0)
2280*4882a593Smuzhiyun 		return timing / clock;
2281*4882a593Smuzhiyun 	else
2282*4882a593Smuzhiyun 		return timing / clock - 1;
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun /* Calculate max data valid window. */
calc_tdvw_max(u32 trp_cnt,u32 clk_period,u32 trhoh_min,u32 board_delay_skew_min,u32 ext_mode)2286*4882a593Smuzhiyun static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2287*4882a593Smuzhiyun 				u32 board_delay_skew_min, u32 ext_mode)
2288*4882a593Smuzhiyun {
2289*4882a593Smuzhiyun 	if (ext_mode == 0)
2290*4882a593Smuzhiyun 		clk_period /= 2;
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	return (trp_cnt + 1) * clk_period + trhoh_min +
2293*4882a593Smuzhiyun 		board_delay_skew_min;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun /* Calculate data valid window. */
calc_tdvw(u32 trp_cnt,u32 clk_period,u32 trhoh_min,u32 trea_max,u32 ext_mode)2297*4882a593Smuzhiyun static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
2298*4882a593Smuzhiyun 			    u32 trea_max, u32 ext_mode)
2299*4882a593Smuzhiyun {
2300*4882a593Smuzhiyun 	if (ext_mode == 0)
2301*4882a593Smuzhiyun 		clk_period /= 2;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun static int
cadence_nand_setup_interface(struct nand_chip * chip,int chipnr,const struct nand_interface_config * conf)2307*4882a593Smuzhiyun cadence_nand_setup_interface(struct nand_chip *chip, int chipnr,
2308*4882a593Smuzhiyun 			     const struct nand_interface_config *conf)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun 	const struct nand_sdr_timings *sdr;
2311*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2312*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2313*4882a593Smuzhiyun 	struct cadence_nand_timings *t = &cdns_chip->timings;
2314*4882a593Smuzhiyun 	u32 reg;
2315*4882a593Smuzhiyun 	u32 board_delay = cdns_ctrl->board_delay;
2316*4882a593Smuzhiyun 	u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
2317*4882a593Smuzhiyun 					    cdns_ctrl->nf_clk_rate);
2318*4882a593Smuzhiyun 	u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
2319*4882a593Smuzhiyun 	u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
2320*4882a593Smuzhiyun 	u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
2321*4882a593Smuzhiyun 	u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
2322*4882a593Smuzhiyun 	u32 if_skew = cdns_ctrl->caps1->if_skew;
2323*4882a593Smuzhiyun 	u32 board_delay_skew_min = board_delay - if_skew;
2324*4882a593Smuzhiyun 	u32 board_delay_skew_max = board_delay + if_skew;
2325*4882a593Smuzhiyun 	u32 dqs_sampl_res, phony_dqs_mod;
2326*4882a593Smuzhiyun 	u32 tdvw, tdvw_min, tdvw_max;
2327*4882a593Smuzhiyun 	u32 ext_rd_mode, ext_wr_mode;
2328*4882a593Smuzhiyun 	u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
2329*4882a593Smuzhiyun 	u32 sampling_point;
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 	sdr = nand_get_sdr_timings(conf);
2332*4882a593Smuzhiyun 	if (IS_ERR(sdr))
2333*4882a593Smuzhiyun 		return PTR_ERR(sdr);
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	memset(t, 0, sizeof(*t));
2336*4882a593Smuzhiyun 	/* Sampling point calculation. */
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.is_phy_type_dll)
2339*4882a593Smuzhiyun 		phony_dqs_mod = 2;
2340*4882a593Smuzhiyun 	else
2341*4882a593Smuzhiyun 		phony_dqs_mod = 1;
2342*4882a593Smuzhiyun 
2343*4882a593Smuzhiyun 	dqs_sampl_res = clk_period / phony_dqs_mod;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 	tdvw_min = sdr->tREA_max + board_delay_skew_max;
2346*4882a593Smuzhiyun 	/*
2347*4882a593Smuzhiyun 	 * The idea of those calculation is to get the optimum value
2348*4882a593Smuzhiyun 	 * for tRP and tRH timings. If it is NOT possible to sample data
2349*4882a593Smuzhiyun 	 * with optimal tRP/tRH settings, the parameters will be extended.
2350*4882a593Smuzhiyun 	 * If clk_period is 50ns (the lowest value) this condition is met
2351*4882a593Smuzhiyun 	 * for asynchronous timing modes 1, 2, 3, 4 and 5.
2352*4882a593Smuzhiyun 	 * If clk_period is 20ns the condition is met only
2353*4882a593Smuzhiyun 	 * for asynchronous timing mode 5.
2354*4882a593Smuzhiyun 	 */
2355*4882a593Smuzhiyun 	if (sdr->tRC_min <= clk_period &&
2356*4882a593Smuzhiyun 	    sdr->tRP_min <= (clk_period / 2) &&
2357*4882a593Smuzhiyun 	    sdr->tREH_min <= (clk_period / 2)) {
2358*4882a593Smuzhiyun 		/* Performance mode. */
2359*4882a593Smuzhiyun 		ext_rd_mode = 0;
2360*4882a593Smuzhiyun 		tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2361*4882a593Smuzhiyun 				 sdr->tREA_max, ext_rd_mode);
2362*4882a593Smuzhiyun 		tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
2363*4882a593Smuzhiyun 					 board_delay_skew_min,
2364*4882a593Smuzhiyun 					 ext_rd_mode);
2365*4882a593Smuzhiyun 		/*
2366*4882a593Smuzhiyun 		 * Check if data valid window and sampling point can be found
2367*4882a593Smuzhiyun 		 * and is not on the edge (ie. we have hold margin).
2368*4882a593Smuzhiyun 		 * If not extend the tRP timings.
2369*4882a593Smuzhiyun 		 */
2370*4882a593Smuzhiyun 		if (tdvw > 0) {
2371*4882a593Smuzhiyun 			if (tdvw_max <= tdvw_min ||
2372*4882a593Smuzhiyun 			    (tdvw_max % dqs_sampl_res) == 0) {
2373*4882a593Smuzhiyun 				/*
2374*4882a593Smuzhiyun 				 * No valid sampling point so the RE pulse need
2375*4882a593Smuzhiyun 				 * to be widen widening by half clock cycle.
2376*4882a593Smuzhiyun 				 */
2377*4882a593Smuzhiyun 				ext_rd_mode = 1;
2378*4882a593Smuzhiyun 			}
2379*4882a593Smuzhiyun 		} else {
2380*4882a593Smuzhiyun 			/*
2381*4882a593Smuzhiyun 			 * There is no valid window
2382*4882a593Smuzhiyun 			 * to be able to sample data the tRP need to be widen.
2383*4882a593Smuzhiyun 			 * Very safe calculations are performed here.
2384*4882a593Smuzhiyun 			 */
2385*4882a593Smuzhiyun 			trp_cnt = (sdr->tREA_max + board_delay_skew_max
2386*4882a593Smuzhiyun 				   + dqs_sampl_res) / clk_period;
2387*4882a593Smuzhiyun 			ext_rd_mode = 1;
2388*4882a593Smuzhiyun 		}
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	} else {
2391*4882a593Smuzhiyun 		/* Extended read mode. */
2392*4882a593Smuzhiyun 		u32 trh;
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 		ext_rd_mode = 1;
2395*4882a593Smuzhiyun 		trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
2396*4882a593Smuzhiyun 		trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
2397*4882a593Smuzhiyun 		if (sdr->tREH_min >= trh)
2398*4882a593Smuzhiyun 			trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
2399*4882a593Smuzhiyun 		else
2400*4882a593Smuzhiyun 			trh_cnt = calc_cycl(trh, clk_period);
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 		tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
2403*4882a593Smuzhiyun 				 sdr->tREA_max, ext_rd_mode);
2404*4882a593Smuzhiyun 		/*
2405*4882a593Smuzhiyun 		 * Check if data valid window and sampling point can be found
2406*4882a593Smuzhiyun 		 * or if it is at the edge check if previous is valid
2407*4882a593Smuzhiyun 		 * - if not extend the tRP timings.
2408*4882a593Smuzhiyun 		 */
2409*4882a593Smuzhiyun 		if (tdvw > 0) {
2410*4882a593Smuzhiyun 			tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2411*4882a593Smuzhiyun 						 sdr->tRHOH_min,
2412*4882a593Smuzhiyun 						 board_delay_skew_min,
2413*4882a593Smuzhiyun 						 ext_rd_mode);
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 			if ((((tdvw_max / dqs_sampl_res)
2416*4882a593Smuzhiyun 			      * dqs_sampl_res) <= tdvw_min) ||
2417*4882a593Smuzhiyun 			    (((tdvw_max % dqs_sampl_res) == 0) &&
2418*4882a593Smuzhiyun 			     (((tdvw_max / dqs_sampl_res - 1)
2419*4882a593Smuzhiyun 			       * dqs_sampl_res) <= tdvw_min))) {
2420*4882a593Smuzhiyun 				/*
2421*4882a593Smuzhiyun 				 * Data valid window width is lower than
2422*4882a593Smuzhiyun 				 * sampling resolution and do not hit any
2423*4882a593Smuzhiyun 				 * sampling point to be sure the sampling point
2424*4882a593Smuzhiyun 				 * will be found the RE low pulse width will be
2425*4882a593Smuzhiyun 				 *  extended by one clock cycle.
2426*4882a593Smuzhiyun 				 */
2427*4882a593Smuzhiyun 				trp_cnt = trp_cnt + 1;
2428*4882a593Smuzhiyun 			}
2429*4882a593Smuzhiyun 		} else {
2430*4882a593Smuzhiyun 			/*
2431*4882a593Smuzhiyun 			 * There is no valid window to be able to sample data.
2432*4882a593Smuzhiyun 			 * The tRP need to be widen.
2433*4882a593Smuzhiyun 			 * Very safe calculations are performed here.
2434*4882a593Smuzhiyun 			 */
2435*4882a593Smuzhiyun 			trp_cnt = (sdr->tREA_max + board_delay_skew_max
2436*4882a593Smuzhiyun 				   + dqs_sampl_res) / clk_period;
2437*4882a593Smuzhiyun 		}
2438*4882a593Smuzhiyun 	}
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 	tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
2441*4882a593Smuzhiyun 				 sdr->tRHOH_min,
2442*4882a593Smuzhiyun 				 board_delay_skew_min, ext_rd_mode);
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	if (sdr->tWC_min <= clk_period &&
2445*4882a593Smuzhiyun 	    (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
2446*4882a593Smuzhiyun 	    (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
2447*4882a593Smuzhiyun 		ext_wr_mode = 0;
2448*4882a593Smuzhiyun 	} else {
2449*4882a593Smuzhiyun 		u32 twh;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 		ext_wr_mode = 1;
2452*4882a593Smuzhiyun 		twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
2453*4882a593Smuzhiyun 		if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
2454*4882a593Smuzhiyun 			twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
2455*4882a593Smuzhiyun 					    clk_period);
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun 		twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
2458*4882a593Smuzhiyun 		if (sdr->tWH_min >= twh)
2459*4882a593Smuzhiyun 			twh = sdr->tWH_min;
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun 		twh_cnt = calc_cycl(twh + if_skew, clk_period);
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
2465*4882a593Smuzhiyun 	reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
2466*4882a593Smuzhiyun 	reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
2467*4882a593Smuzhiyun 	reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
2468*4882a593Smuzhiyun 	t->async_toggle_timings = reg;
2469*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 	tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
2472*4882a593Smuzhiyun 	tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
2473*4882a593Smuzhiyun 	twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
2474*4882a593Smuzhiyun 	trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
2475*4882a593Smuzhiyun 	reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	/*
2478*4882a593Smuzhiyun 	 * If timing exceeds delay field in timing register
2479*4882a593Smuzhiyun 	 * then use maximum value.
2480*4882a593Smuzhiyun 	 */
2481*4882a593Smuzhiyun 	if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
2482*4882a593Smuzhiyun 		reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
2483*4882a593Smuzhiyun 	else
2484*4882a593Smuzhiyun 		reg |= TIMINGS0_TCCS;
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
2487*4882a593Smuzhiyun 	reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
2488*4882a593Smuzhiyun 	t->timings0 = reg;
2489*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev, "TIMINGS0_SDR\t%x\n", reg);
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	/* The following is related to single signal so skew is not needed. */
2492*4882a593Smuzhiyun 	trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
2493*4882a593Smuzhiyun 	trhz_cnt = trhz_cnt + 1;
2494*4882a593Smuzhiyun 	twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
2495*4882a593Smuzhiyun 	/*
2496*4882a593Smuzhiyun 	 * Because of the two stage syncflop the value must be increased by 3
2497*4882a593Smuzhiyun 	 * first value is related with sync, second value is related
2498*4882a593Smuzhiyun 	 * with output if delay.
2499*4882a593Smuzhiyun 	 */
2500*4882a593Smuzhiyun 	twb_cnt = twb_cnt + 3 + 5;
2501*4882a593Smuzhiyun 	/*
2502*4882a593Smuzhiyun 	 * The following is related to the we edge of the random data input
2503*4882a593Smuzhiyun 	 * sequence so skew is not needed.
2504*4882a593Smuzhiyun 	 */
2505*4882a593Smuzhiyun 	tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
2506*4882a593Smuzhiyun 	reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
2507*4882a593Smuzhiyun 	reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
2508*4882a593Smuzhiyun 	reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
2509*4882a593Smuzhiyun 	t->timings1 = reg;
2510*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev, "TIMINGS1_SDR\t%x\n", reg);
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
2513*4882a593Smuzhiyun 	if (tfeat_cnt < twb_cnt)
2514*4882a593Smuzhiyun 		tfeat_cnt = twb_cnt;
2515*4882a593Smuzhiyun 
2516*4882a593Smuzhiyun 	tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
2517*4882a593Smuzhiyun 	tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
2520*4882a593Smuzhiyun 	reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
2521*4882a593Smuzhiyun 	reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
2522*4882a593Smuzhiyun 	t->timings2 = reg;
2523*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev, "TIMINGS2_SDR\t%x\n", reg);
2524*4882a593Smuzhiyun 
2525*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.is_phy_type_dll) {
2526*4882a593Smuzhiyun 		reg = DLL_PHY_CTRL_DLL_RST_N;
2527*4882a593Smuzhiyun 		if (ext_wr_mode)
2528*4882a593Smuzhiyun 			reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
2529*4882a593Smuzhiyun 		if (ext_rd_mode)
2530*4882a593Smuzhiyun 			reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 		reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
2533*4882a593Smuzhiyun 		reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
2534*4882a593Smuzhiyun 		t->dll_phy_ctrl = reg;
2535*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
2536*4882a593Smuzhiyun 	}
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 	/* Sampling point calculation. */
2539*4882a593Smuzhiyun 	if ((tdvw_max % dqs_sampl_res) > 0)
2540*4882a593Smuzhiyun 		sampling_point = tdvw_max / dqs_sampl_res;
2541*4882a593Smuzhiyun 	else
2542*4882a593Smuzhiyun 		sampling_point = (tdvw_max / dqs_sampl_res - 1);
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 	if (sampling_point * dqs_sampl_res > tdvw_min) {
2545*4882a593Smuzhiyun 		dll_phy_dqs_timing =
2546*4882a593Smuzhiyun 			FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
2547*4882a593Smuzhiyun 		dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
2548*4882a593Smuzhiyun 		phony_dqs_timing = sampling_point / phony_dqs_mod;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 		if ((sampling_point % 2) > 0) {
2551*4882a593Smuzhiyun 			dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
2552*4882a593Smuzhiyun 			if ((tdvw_max % dqs_sampl_res) == 0)
2553*4882a593Smuzhiyun 				/*
2554*4882a593Smuzhiyun 				 * Calculation for sampling point at the edge
2555*4882a593Smuzhiyun 				 * of data and being odd number.
2556*4882a593Smuzhiyun 				 */
2557*4882a593Smuzhiyun 				phony_dqs_timing = (tdvw_max / dqs_sampl_res)
2558*4882a593Smuzhiyun 					/ phony_dqs_mod - 1;
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 			if (!cdns_ctrl->caps2.is_phy_type_dll)
2561*4882a593Smuzhiyun 				phony_dqs_timing--;
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 		} else {
2564*4882a593Smuzhiyun 			phony_dqs_timing--;
2565*4882a593Smuzhiyun 		}
2566*4882a593Smuzhiyun 		rd_del_sel = phony_dqs_timing + 3;
2567*4882a593Smuzhiyun 	} else {
2568*4882a593Smuzhiyun 		dev_warn(cdns_ctrl->dev,
2569*4882a593Smuzhiyun 			 "ERROR : cannot find valid sampling point\n");
2570*4882a593Smuzhiyun 	}
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 	reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
2573*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.is_phy_type_dll)
2574*4882a593Smuzhiyun 		reg  |= PHY_CTRL_SDR_DQS;
2575*4882a593Smuzhiyun 	t->phy_ctrl = reg;
2576*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun 	if (cdns_ctrl->caps2.is_phy_type_dll) {
2579*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
2580*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
2581*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
2582*4882a593Smuzhiyun 			dll_phy_dqs_timing);
2583*4882a593Smuzhiyun 		t->phy_dqs_timing = dll_phy_dqs_timing;
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 		reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
2586*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
2587*4882a593Smuzhiyun 			reg);
2588*4882a593Smuzhiyun 		t->phy_gate_lpbk_ctrl = reg;
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
2591*4882a593Smuzhiyun 			PHY_DLL_MASTER_CTRL_BYPASS_MODE);
2592*4882a593Smuzhiyun 		dev_dbg(cdns_ctrl->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
2593*4882a593Smuzhiyun 	}
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	return 0;
2596*4882a593Smuzhiyun }
2597*4882a593Smuzhiyun 
cadence_nand_attach_chip(struct nand_chip * chip)2598*4882a593Smuzhiyun static int cadence_nand_attach_chip(struct nand_chip *chip)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller);
2601*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
2602*4882a593Smuzhiyun 	u32 ecc_size;
2603*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
2604*4882a593Smuzhiyun 	int ret;
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun 	if (chip->options & NAND_BUSWIDTH_16) {
2607*4882a593Smuzhiyun 		ret = cadence_nand_set_access_width16(cdns_ctrl, true);
2608*4882a593Smuzhiyun 		if (ret)
2609*4882a593Smuzhiyun 			return ret;
2610*4882a593Smuzhiyun 	}
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	chip->bbt_options |= NAND_BBT_USE_FLASH;
2613*4882a593Smuzhiyun 	chip->bbt_options |= NAND_BBT_NO_OOB;
2614*4882a593Smuzhiyun 	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	chip->options |= NAND_NO_SUBPAGE_WRITE;
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	cdns_chip->bbm_offs = chip->badblockpos;
2619*4882a593Smuzhiyun 	cdns_chip->bbm_offs &= ~0x01;
2620*4882a593Smuzhiyun 	/* this value should be even number */
2621*4882a593Smuzhiyun 	cdns_chip->bbm_len = 2;
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	ret = nand_ecc_choose_conf(chip,
2624*4882a593Smuzhiyun 				   &cdns_ctrl->ecc_caps,
2625*4882a593Smuzhiyun 				   mtd->oobsize - cdns_chip->bbm_len);
2626*4882a593Smuzhiyun 	if (ret) {
2627*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "ECC configuration failed\n");
2628*4882a593Smuzhiyun 		return ret;
2629*4882a593Smuzhiyun 	}
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	dev_dbg(cdns_ctrl->dev,
2632*4882a593Smuzhiyun 		"chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
2633*4882a593Smuzhiyun 		chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun 	/* Error correction configuration. */
2636*4882a593Smuzhiyun 	cdns_chip->sector_size = chip->ecc.size;
2637*4882a593Smuzhiyun 	cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
2638*4882a593Smuzhiyun 	ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	if (cdns_chip->avail_oob_size > cdns_ctrl->bch_metadata_size)
2643*4882a593Smuzhiyun 		cdns_chip->avail_oob_size = cdns_ctrl->bch_metadata_size;
2644*4882a593Smuzhiyun 
2645*4882a593Smuzhiyun 	if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
2646*4882a593Smuzhiyun 	    > mtd->oobsize)
2647*4882a593Smuzhiyun 		cdns_chip->avail_oob_size -= 4;
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	ret = cadence_nand_get_ecc_strength_idx(cdns_ctrl, chip->ecc.strength);
2650*4882a593Smuzhiyun 	if (ret < 0)
2651*4882a593Smuzhiyun 		return -EINVAL;
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 	cdns_chip->corr_str_idx = (u8)ret;
2654*4882a593Smuzhiyun 
2655*4882a593Smuzhiyun 	if (cadence_nand_wait_for_value(cdns_ctrl, CTRL_STATUS,
2656*4882a593Smuzhiyun 					1000000,
2657*4882a593Smuzhiyun 					CTRL_STATUS_CTRL_BUSY, true))
2658*4882a593Smuzhiyun 		return -ETIMEDOUT;
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 	cadence_nand_set_ecc_strength(cdns_ctrl,
2661*4882a593Smuzhiyun 				      cdns_chip->corr_str_idx);
2662*4882a593Smuzhiyun 
2663*4882a593Smuzhiyun 	cadence_nand_set_erase_detection(cdns_ctrl, true,
2664*4882a593Smuzhiyun 					 chip->ecc.strength);
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	/* Override the default read operations. */
2667*4882a593Smuzhiyun 	chip->ecc.read_page = cadence_nand_read_page;
2668*4882a593Smuzhiyun 	chip->ecc.read_page_raw = cadence_nand_read_page_raw;
2669*4882a593Smuzhiyun 	chip->ecc.write_page = cadence_nand_write_page;
2670*4882a593Smuzhiyun 	chip->ecc.write_page_raw = cadence_nand_write_page_raw;
2671*4882a593Smuzhiyun 	chip->ecc.read_oob = cadence_nand_read_oob;
2672*4882a593Smuzhiyun 	chip->ecc.write_oob = cadence_nand_write_oob;
2673*4882a593Smuzhiyun 	chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
2674*4882a593Smuzhiyun 	chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	if ((mtd->writesize + mtd->oobsize) > cdns_ctrl->buf_size)
2677*4882a593Smuzhiyun 		cdns_ctrl->buf_size = mtd->writesize + mtd->oobsize;
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun 	/* Is 32-bit DMA supported? */
2680*4882a593Smuzhiyun 	ret = dma_set_mask(cdns_ctrl->dev, DMA_BIT_MASK(32));
2681*4882a593Smuzhiyun 	if (ret) {
2682*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "no usable DMA configuration\n");
2683*4882a593Smuzhiyun 		return ret;
2684*4882a593Smuzhiyun 	}
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 	mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	return 0;
2689*4882a593Smuzhiyun }
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun static const struct nand_controller_ops cadence_nand_controller_ops = {
2692*4882a593Smuzhiyun 	.attach_chip = cadence_nand_attach_chip,
2693*4882a593Smuzhiyun 	.exec_op = cadence_nand_exec_op,
2694*4882a593Smuzhiyun 	.setup_interface = cadence_nand_setup_interface,
2695*4882a593Smuzhiyun };
2696*4882a593Smuzhiyun 
cadence_nand_chip_init(struct cdns_nand_ctrl * cdns_ctrl,struct device_node * np)2697*4882a593Smuzhiyun static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl,
2698*4882a593Smuzhiyun 				  struct device_node *np)
2699*4882a593Smuzhiyun {
2700*4882a593Smuzhiyun 	struct cdns_nand_chip *cdns_chip;
2701*4882a593Smuzhiyun 	struct mtd_info *mtd;
2702*4882a593Smuzhiyun 	struct nand_chip *chip;
2703*4882a593Smuzhiyun 	int nsels, ret, i;
2704*4882a593Smuzhiyun 	u32 cs;
2705*4882a593Smuzhiyun 
2706*4882a593Smuzhiyun 	nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
2707*4882a593Smuzhiyun 	if (nsels <= 0) {
2708*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "missing/invalid reg property\n");
2709*4882a593Smuzhiyun 		return -EINVAL;
2710*4882a593Smuzhiyun 	}
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 	/* Allocate the nand chip structure. */
2713*4882a593Smuzhiyun 	cdns_chip = devm_kzalloc(cdns_ctrl->dev, sizeof(*cdns_chip) +
2714*4882a593Smuzhiyun 				 (nsels * sizeof(u8)),
2715*4882a593Smuzhiyun 				 GFP_KERNEL);
2716*4882a593Smuzhiyun 	if (!cdns_chip) {
2717*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "could not allocate chip structure\n");
2718*4882a593Smuzhiyun 		return -ENOMEM;
2719*4882a593Smuzhiyun 	}
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 	cdns_chip->nsels = nsels;
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun 	for (i = 0; i < nsels; i++) {
2724*4882a593Smuzhiyun 		/* Retrieve CS id. */
2725*4882a593Smuzhiyun 		ret = of_property_read_u32_index(np, "reg", i, &cs);
2726*4882a593Smuzhiyun 		if (ret) {
2727*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev,
2728*4882a593Smuzhiyun 				"could not retrieve reg property: %d\n",
2729*4882a593Smuzhiyun 				ret);
2730*4882a593Smuzhiyun 			return ret;
2731*4882a593Smuzhiyun 		}
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 		if (cs >= cdns_ctrl->caps2.max_banks) {
2734*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev,
2735*4882a593Smuzhiyun 				"invalid reg value: %u (max CS = %d)\n",
2736*4882a593Smuzhiyun 				cs, cdns_ctrl->caps2.max_banks);
2737*4882a593Smuzhiyun 			return -EINVAL;
2738*4882a593Smuzhiyun 		}
2739*4882a593Smuzhiyun 
2740*4882a593Smuzhiyun 		if (test_and_set_bit(cs, &cdns_ctrl->assigned_cs)) {
2741*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev,
2742*4882a593Smuzhiyun 				"CS %d already assigned\n", cs);
2743*4882a593Smuzhiyun 			return -EINVAL;
2744*4882a593Smuzhiyun 		}
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 		cdns_chip->cs[i] = cs;
2747*4882a593Smuzhiyun 	}
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	chip = &cdns_chip->chip;
2750*4882a593Smuzhiyun 	chip->controller = &cdns_ctrl->controller;
2751*4882a593Smuzhiyun 	nand_set_flash_node(chip, np);
2752*4882a593Smuzhiyun 
2753*4882a593Smuzhiyun 	mtd = nand_to_mtd(chip);
2754*4882a593Smuzhiyun 	mtd->dev.parent = cdns_ctrl->dev;
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun 	/*
2757*4882a593Smuzhiyun 	 * Default to HW ECC engine mode. If the nand-ecc-mode property is given
2758*4882a593Smuzhiyun 	 * in the DT node, this entry will be overwritten in nand_scan_ident().
2759*4882a593Smuzhiyun 	 */
2760*4882a593Smuzhiyun 	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun 	ret = nand_scan(chip, cdns_chip->nsels);
2763*4882a593Smuzhiyun 	if (ret) {
2764*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "could not scan the nand chip\n");
2765*4882a593Smuzhiyun 		return ret;
2766*4882a593Smuzhiyun 	}
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	ret = mtd_device_register(mtd, NULL, 0);
2769*4882a593Smuzhiyun 	if (ret) {
2770*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
2771*4882a593Smuzhiyun 			"failed to register mtd device: %d\n", ret);
2772*4882a593Smuzhiyun 		nand_cleanup(chip);
2773*4882a593Smuzhiyun 		return ret;
2774*4882a593Smuzhiyun 	}
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun 	list_add_tail(&cdns_chip->node, &cdns_ctrl->chips);
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	return 0;
2779*4882a593Smuzhiyun }
2780*4882a593Smuzhiyun 
cadence_nand_chips_cleanup(struct cdns_nand_ctrl * cdns_ctrl)2781*4882a593Smuzhiyun static void cadence_nand_chips_cleanup(struct cdns_nand_ctrl *cdns_ctrl)
2782*4882a593Smuzhiyun {
2783*4882a593Smuzhiyun 	struct cdns_nand_chip *entry, *temp;
2784*4882a593Smuzhiyun 	struct nand_chip *chip;
2785*4882a593Smuzhiyun 	int ret;
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 	list_for_each_entry_safe(entry, temp, &cdns_ctrl->chips, node) {
2788*4882a593Smuzhiyun 		chip = &entry->chip;
2789*4882a593Smuzhiyun 		ret = mtd_device_unregister(nand_to_mtd(chip));
2790*4882a593Smuzhiyun 		WARN_ON(ret);
2791*4882a593Smuzhiyun 		nand_cleanup(chip);
2792*4882a593Smuzhiyun 		list_del(&entry->node);
2793*4882a593Smuzhiyun 	}
2794*4882a593Smuzhiyun }
2795*4882a593Smuzhiyun 
cadence_nand_chips_init(struct cdns_nand_ctrl * cdns_ctrl)2796*4882a593Smuzhiyun static int cadence_nand_chips_init(struct cdns_nand_ctrl *cdns_ctrl)
2797*4882a593Smuzhiyun {
2798*4882a593Smuzhiyun 	struct device_node *np = cdns_ctrl->dev->of_node;
2799*4882a593Smuzhiyun 	struct device_node *nand_np;
2800*4882a593Smuzhiyun 	int max_cs = cdns_ctrl->caps2.max_banks;
2801*4882a593Smuzhiyun 	int nchips, ret;
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 	nchips = of_get_child_count(np);
2804*4882a593Smuzhiyun 
2805*4882a593Smuzhiyun 	if (nchips > max_cs) {
2806*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev,
2807*4882a593Smuzhiyun 			"too many NAND chips: %d (max = %d CS)\n",
2808*4882a593Smuzhiyun 			nchips, max_cs);
2809*4882a593Smuzhiyun 		return -EINVAL;
2810*4882a593Smuzhiyun 	}
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun 	for_each_child_of_node(np, nand_np) {
2813*4882a593Smuzhiyun 		ret = cadence_nand_chip_init(cdns_ctrl, nand_np);
2814*4882a593Smuzhiyun 		if (ret) {
2815*4882a593Smuzhiyun 			of_node_put(nand_np);
2816*4882a593Smuzhiyun 			cadence_nand_chips_cleanup(cdns_ctrl);
2817*4882a593Smuzhiyun 			return ret;
2818*4882a593Smuzhiyun 		}
2819*4882a593Smuzhiyun 	}
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	return 0;
2822*4882a593Smuzhiyun }
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun static void
cadence_nand_irq_cleanup(int irqnum,struct cdns_nand_ctrl * cdns_ctrl)2825*4882a593Smuzhiyun cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
2826*4882a593Smuzhiyun {
2827*4882a593Smuzhiyun 	/* Disable interrupts. */
2828*4882a593Smuzhiyun 	writel_relaxed(INTR_ENABLE_INTR_EN, cdns_ctrl->reg + INTR_ENABLE);
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun 
cadence_nand_init(struct cdns_nand_ctrl * cdns_ctrl)2831*4882a593Smuzhiyun static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
2832*4882a593Smuzhiyun {
2833*4882a593Smuzhiyun 	dma_cap_mask_t mask;
2834*4882a593Smuzhiyun 	int ret;
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
2837*4882a593Smuzhiyun 						  sizeof(*cdns_ctrl->cdma_desc),
2838*4882a593Smuzhiyun 						  &cdns_ctrl->dma_cdma_desc,
2839*4882a593Smuzhiyun 						  GFP_KERNEL);
2840*4882a593Smuzhiyun 	if (!cdns_ctrl->dma_cdma_desc)
2841*4882a593Smuzhiyun 		return -ENOMEM;
2842*4882a593Smuzhiyun 
2843*4882a593Smuzhiyun 	cdns_ctrl->buf_size = SZ_16K;
2844*4882a593Smuzhiyun 	cdns_ctrl->buf = kmalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2845*4882a593Smuzhiyun 	if (!cdns_ctrl->buf) {
2846*4882a593Smuzhiyun 		ret = -ENOMEM;
2847*4882a593Smuzhiyun 		goto free_buf_desc;
2848*4882a593Smuzhiyun 	}
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 	if (devm_request_irq(cdns_ctrl->dev, cdns_ctrl->irq, cadence_nand_isr,
2851*4882a593Smuzhiyun 			     IRQF_SHARED, "cadence-nand-controller",
2852*4882a593Smuzhiyun 			     cdns_ctrl)) {
2853*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Unable to allocate IRQ\n");
2854*4882a593Smuzhiyun 		ret = -ENODEV;
2855*4882a593Smuzhiyun 		goto free_buf;
2856*4882a593Smuzhiyun 	}
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun 	spin_lock_init(&cdns_ctrl->irq_lock);
2859*4882a593Smuzhiyun 	init_completion(&cdns_ctrl->complete);
2860*4882a593Smuzhiyun 
2861*4882a593Smuzhiyun 	ret = cadence_nand_hw_init(cdns_ctrl);
2862*4882a593Smuzhiyun 	if (ret)
2863*4882a593Smuzhiyun 		goto disable_irq;
2864*4882a593Smuzhiyun 
2865*4882a593Smuzhiyun 	dma_cap_zero(mask);
2866*4882a593Smuzhiyun 	dma_cap_set(DMA_MEMCPY, mask);
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 	if (cdns_ctrl->caps1->has_dma) {
2869*4882a593Smuzhiyun 		cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
2870*4882a593Smuzhiyun 		if (!cdns_ctrl->dmac) {
2871*4882a593Smuzhiyun 			dev_err(cdns_ctrl->dev,
2872*4882a593Smuzhiyun 				"Unable to get a DMA channel\n");
2873*4882a593Smuzhiyun 			ret = -EBUSY;
2874*4882a593Smuzhiyun 			goto disable_irq;
2875*4882a593Smuzhiyun 		}
2876*4882a593Smuzhiyun 	}
2877*4882a593Smuzhiyun 
2878*4882a593Smuzhiyun 	nand_controller_init(&cdns_ctrl->controller);
2879*4882a593Smuzhiyun 	INIT_LIST_HEAD(&cdns_ctrl->chips);
2880*4882a593Smuzhiyun 
2881*4882a593Smuzhiyun 	cdns_ctrl->controller.ops = &cadence_nand_controller_ops;
2882*4882a593Smuzhiyun 	cdns_ctrl->curr_corr_str_idx = 0xFF;
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun 	ret = cadence_nand_chips_init(cdns_ctrl);
2885*4882a593Smuzhiyun 	if (ret) {
2886*4882a593Smuzhiyun 		dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
2887*4882a593Smuzhiyun 			ret);
2888*4882a593Smuzhiyun 		goto dma_release_chnl;
2889*4882a593Smuzhiyun 	}
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	kfree(cdns_ctrl->buf);
2892*4882a593Smuzhiyun 	cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
2893*4882a593Smuzhiyun 	if (!cdns_ctrl->buf) {
2894*4882a593Smuzhiyun 		ret = -ENOMEM;
2895*4882a593Smuzhiyun 		goto dma_release_chnl;
2896*4882a593Smuzhiyun 	}
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	return 0;
2899*4882a593Smuzhiyun 
2900*4882a593Smuzhiyun dma_release_chnl:
2901*4882a593Smuzhiyun 	if (cdns_ctrl->dmac)
2902*4882a593Smuzhiyun 		dma_release_channel(cdns_ctrl->dmac);
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun disable_irq:
2905*4882a593Smuzhiyun 	cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun free_buf:
2908*4882a593Smuzhiyun 	kfree(cdns_ctrl->buf);
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun free_buf_desc:
2911*4882a593Smuzhiyun 	dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2912*4882a593Smuzhiyun 			  cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	return ret;
2915*4882a593Smuzhiyun }
2916*4882a593Smuzhiyun 
2917*4882a593Smuzhiyun /* Driver exit point. */
cadence_nand_remove(struct cdns_nand_ctrl * cdns_ctrl)2918*4882a593Smuzhiyun static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
2919*4882a593Smuzhiyun {
2920*4882a593Smuzhiyun 	cadence_nand_chips_cleanup(cdns_ctrl);
2921*4882a593Smuzhiyun 	cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
2922*4882a593Smuzhiyun 	kfree(cdns_ctrl->buf);
2923*4882a593Smuzhiyun 	dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
2924*4882a593Smuzhiyun 			  cdns_ctrl->cdma_desc, cdns_ctrl->dma_cdma_desc);
2925*4882a593Smuzhiyun 
2926*4882a593Smuzhiyun 	if (cdns_ctrl->dmac)
2927*4882a593Smuzhiyun 		dma_release_channel(cdns_ctrl->dmac);
2928*4882a593Smuzhiyun }
2929*4882a593Smuzhiyun 
2930*4882a593Smuzhiyun struct cadence_nand_dt {
2931*4882a593Smuzhiyun 	struct cdns_nand_ctrl cdns_ctrl;
2932*4882a593Smuzhiyun 	struct clk *clk;
2933*4882a593Smuzhiyun };
2934*4882a593Smuzhiyun 
2935*4882a593Smuzhiyun static const struct cadence_nand_dt_devdata cadence_nand_default = {
2936*4882a593Smuzhiyun 	.if_skew = 0,
2937*4882a593Smuzhiyun 	.has_dma = 1,
2938*4882a593Smuzhiyun };
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun static const struct of_device_id cadence_nand_dt_ids[] = {
2941*4882a593Smuzhiyun 	{
2942*4882a593Smuzhiyun 		.compatible = "cdns,hp-nfc",
2943*4882a593Smuzhiyun 		.data = &cadence_nand_default
2944*4882a593Smuzhiyun 	}, {}
2945*4882a593Smuzhiyun };
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, cadence_nand_dt_ids);
2948*4882a593Smuzhiyun 
cadence_nand_dt_probe(struct platform_device * ofdev)2949*4882a593Smuzhiyun static int cadence_nand_dt_probe(struct platform_device *ofdev)
2950*4882a593Smuzhiyun {
2951*4882a593Smuzhiyun 	struct resource *res;
2952*4882a593Smuzhiyun 	struct cadence_nand_dt *dt;
2953*4882a593Smuzhiyun 	struct cdns_nand_ctrl *cdns_ctrl;
2954*4882a593Smuzhiyun 	int ret;
2955*4882a593Smuzhiyun 	const struct of_device_id *of_id;
2956*4882a593Smuzhiyun 	const struct cadence_nand_dt_devdata *devdata;
2957*4882a593Smuzhiyun 	u32 val;
2958*4882a593Smuzhiyun 
2959*4882a593Smuzhiyun 	of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
2960*4882a593Smuzhiyun 	if (of_id) {
2961*4882a593Smuzhiyun 		ofdev->id_entry = of_id->data;
2962*4882a593Smuzhiyun 		devdata = of_id->data;
2963*4882a593Smuzhiyun 	} else {
2964*4882a593Smuzhiyun 		pr_err("Failed to find the right device id.\n");
2965*4882a593Smuzhiyun 		return -ENOMEM;
2966*4882a593Smuzhiyun 	}
2967*4882a593Smuzhiyun 
2968*4882a593Smuzhiyun 	dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
2969*4882a593Smuzhiyun 	if (!dt)
2970*4882a593Smuzhiyun 		return -ENOMEM;
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	cdns_ctrl = &dt->cdns_ctrl;
2973*4882a593Smuzhiyun 	cdns_ctrl->caps1 = devdata;
2974*4882a593Smuzhiyun 
2975*4882a593Smuzhiyun 	cdns_ctrl->dev = &ofdev->dev;
2976*4882a593Smuzhiyun 	cdns_ctrl->irq = platform_get_irq(ofdev, 0);
2977*4882a593Smuzhiyun 	if (cdns_ctrl->irq < 0)
2978*4882a593Smuzhiyun 		return cdns_ctrl->irq;
2979*4882a593Smuzhiyun 
2980*4882a593Smuzhiyun 	dev_info(cdns_ctrl->dev, "IRQ: nr %d\n", cdns_ctrl->irq);
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun 	cdns_ctrl->reg = devm_platform_ioremap_resource(ofdev, 0);
2983*4882a593Smuzhiyun 	if (IS_ERR(cdns_ctrl->reg))
2984*4882a593Smuzhiyun 		return PTR_ERR(cdns_ctrl->reg);
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 	cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
2987*4882a593Smuzhiyun 	if (IS_ERR(cdns_ctrl->io.virt))
2988*4882a593Smuzhiyun 		return PTR_ERR(cdns_ctrl->io.virt);
2989*4882a593Smuzhiyun 	cdns_ctrl->io.dma = res->start;
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun 	dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
2992*4882a593Smuzhiyun 	if (IS_ERR(dt->clk))
2993*4882a593Smuzhiyun 		return PTR_ERR(dt->clk);
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	cdns_ctrl->nf_clk_rate = clk_get_rate(dt->clk);
2996*4882a593Smuzhiyun 
2997*4882a593Smuzhiyun 	ret = of_property_read_u32(ofdev->dev.of_node,
2998*4882a593Smuzhiyun 				   "cdns,board-delay-ps", &val);
2999*4882a593Smuzhiyun 	if (ret) {
3000*4882a593Smuzhiyun 		val = 4830;
3001*4882a593Smuzhiyun 		dev_info(cdns_ctrl->dev,
3002*4882a593Smuzhiyun 			 "missing cdns,board-delay-ps property, %d was set\n",
3003*4882a593Smuzhiyun 			 val);
3004*4882a593Smuzhiyun 	}
3005*4882a593Smuzhiyun 	cdns_ctrl->board_delay = val;
3006*4882a593Smuzhiyun 
3007*4882a593Smuzhiyun 	ret = cadence_nand_init(cdns_ctrl);
3008*4882a593Smuzhiyun 	if (ret)
3009*4882a593Smuzhiyun 		return ret;
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	platform_set_drvdata(ofdev, dt);
3012*4882a593Smuzhiyun 	return 0;
3013*4882a593Smuzhiyun }
3014*4882a593Smuzhiyun 
cadence_nand_dt_remove(struct platform_device * ofdev)3015*4882a593Smuzhiyun static int cadence_nand_dt_remove(struct platform_device *ofdev)
3016*4882a593Smuzhiyun {
3017*4882a593Smuzhiyun 	struct cadence_nand_dt *dt = platform_get_drvdata(ofdev);
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	cadence_nand_remove(&dt->cdns_ctrl);
3020*4882a593Smuzhiyun 
3021*4882a593Smuzhiyun 	return 0;
3022*4882a593Smuzhiyun }
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun static struct platform_driver cadence_nand_dt_driver = {
3025*4882a593Smuzhiyun 	.probe		= cadence_nand_dt_probe,
3026*4882a593Smuzhiyun 	.remove		= cadence_nand_dt_remove,
3027*4882a593Smuzhiyun 	.driver		= {
3028*4882a593Smuzhiyun 		.name	= "cadence-nand-controller",
3029*4882a593Smuzhiyun 		.of_match_table = cadence_nand_dt_ids,
3030*4882a593Smuzhiyun 	},
3031*4882a593Smuzhiyun };
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun module_platform_driver(cadence_nand_dt_driver);
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun MODULE_AUTHOR("Piotr Sroka <piotrs@cadence.com>");
3036*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
3037*4882a593Smuzhiyun MODULE_DESCRIPTION("Driver for Cadence NAND flash controller");
3038*4882a593Smuzhiyun 
3039