xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/atmel/nand-controller.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2017 ATMEL
4*4882a593Smuzhiyun  * Copyright 2017 Free Electrons
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Boris Brezillon <boris.brezillon@free-electrons.com>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Derived from the atmel_nand.c driver which contained the following
9*4882a593Smuzhiyun  * copyrights:
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *   Copyright 2003 Rick Bronson
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *   Derived from drivers/mtd/nand/autcpu12.c (removed in v3.8)
14*4882a593Smuzhiyun  *	Copyright 2001 Thomas Gleixner (gleixner@autronix.de)
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *   Derived from drivers/mtd/spia.c (removed in v3.8)
17*4882a593Smuzhiyun  *	Copyright 2000 Steven J. Hill (sjhill@cotw.com)
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *   Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
21*4882a593Smuzhiyun  *	Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright 2007
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *   Derived from Das U-Boot source code
24*4882a593Smuzhiyun  *	(u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
25*4882a593Smuzhiyun  *	Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  *   Add Programmable Multibit ECC support for various AT91 SoC
28*4882a593Smuzhiyun  *	Copyright 2012 ATMEL, Hong Xu
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  *   Add Nand Flash Controller support for SAMA5 SoC
31*4882a593Smuzhiyun  *	Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * A few words about the naming convention in this file. This convention
34*4882a593Smuzhiyun  * applies to structure and function names.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * Prefixes:
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * - atmel_nand_: all generic structures/functions
39*4882a593Smuzhiyun  * - atmel_smc_nand_: all structures/functions specific to the SMC interface
40*4882a593Smuzhiyun  *		      (at91sam9 and avr32 SoCs)
41*4882a593Smuzhiyun  * - atmel_hsmc_nand_: all structures/functions specific to the HSMC interface
42*4882a593Smuzhiyun  *		       (sama5 SoCs and later)
43*4882a593Smuzhiyun  * - atmel_nfc_: all structures/functions used to manipulate the NFC sub-block
44*4882a593Smuzhiyun  *		 that is available in the HSMC block
45*4882a593Smuzhiyun  * - <soc>_nand_: all SoC specific structures/functions
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <linux/clk.h>
49*4882a593Smuzhiyun #include <linux/dma-mapping.h>
50*4882a593Smuzhiyun #include <linux/dmaengine.h>
51*4882a593Smuzhiyun #include <linux/genalloc.h>
52*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
53*4882a593Smuzhiyun #include <linux/interrupt.h>
54*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
55*4882a593Smuzhiyun #include <linux/mfd/syscon/atmel-matrix.h>
56*4882a593Smuzhiyun #include <linux/mfd/syscon/atmel-smc.h>
57*4882a593Smuzhiyun #include <linux/module.h>
58*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
59*4882a593Smuzhiyun #include <linux/of_address.h>
60*4882a593Smuzhiyun #include <linux/of_irq.h>
61*4882a593Smuzhiyun #include <linux/of_platform.h>
62*4882a593Smuzhiyun #include <linux/iopoll.h>
63*4882a593Smuzhiyun #include <linux/platform_device.h>
64*4882a593Smuzhiyun #include <linux/regmap.h>
65*4882a593Smuzhiyun #include <soc/at91/atmel-sfr.h>
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #include "pmecc.h"
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG			0x0
70*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_SPARESIZE(x)		(((x) / 4) << 24)
71*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK	GENMASK(30, 24)
72*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_DTO(cyc, mul)	(((cyc) << 16) | ((mul) << 20))
73*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_DTO_MAX		GENMASK(22, 16)
74*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_RBEDGE		BIT(13)
75*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_FALLING_EDGE		BIT(12)
76*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_RSPARE		BIT(9)
77*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_WSPARE		BIT(8)
78*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK	GENMASK(2, 0)
79*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CFG_PAGESIZE(x)		(fls((x) / 512) - 1)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CTRL			0x4
82*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CTRL_EN			BIT(0)
83*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_CTRL_DIS			BIT(1)
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR			0x8
86*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_IER			0xc
87*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_IDR			0x10
88*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_IMR			0x14
89*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_ENABLED		BIT(1)
90*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_RB_RISE		BIT(4)
91*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_RB_FALL		BIT(5)
92*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_BUSY			BIT(8)
93*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_WR			BIT(11)
94*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_CSID			GENMASK(14, 12)
95*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_XFRDONE		BIT(16)
96*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_CMDDONE		BIT(17)
97*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_DTOE			BIT(20)
98*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_UNDEF			BIT(21)
99*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_AWB			BIT(22)
100*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_NFCASE		BIT(23)
101*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_ERRORS		(ATMEL_HSMC_NFC_SR_DTOE | \
102*4882a593Smuzhiyun 						 ATMEL_HSMC_NFC_SR_UNDEF | \
103*4882a593Smuzhiyun 						 ATMEL_HSMC_NFC_SR_AWB | \
104*4882a593Smuzhiyun 						 ATMEL_HSMC_NFC_SR_NFCASE)
105*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_SR_RBEDGE(x)		BIT((x) + 24)
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_ADDR			0x18
108*4882a593Smuzhiyun #define ATMEL_HSMC_NFC_BANK			0x1c
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define ATMEL_NFC_MAX_RB_ID			7
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define ATMEL_NFC_SRAM_SIZE			0x2400
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define ATMEL_NFC_CMD(pos, cmd)			((cmd) << (((pos) * 8) + 2))
115*4882a593Smuzhiyun #define ATMEL_NFC_VCMD2				BIT(18)
116*4882a593Smuzhiyun #define ATMEL_NFC_ACYCLE(naddrs)		((naddrs) << 19)
117*4882a593Smuzhiyun #define ATMEL_NFC_CSID(cs)			((cs) << 22)
118*4882a593Smuzhiyun #define ATMEL_NFC_DATAEN			BIT(25)
119*4882a593Smuzhiyun #define ATMEL_NFC_NFCWR				BIT(26)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define ATMEL_NFC_MAX_ADDR_CYCLES		5
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define ATMEL_NAND_ALE_OFFSET			BIT(21)
124*4882a593Smuzhiyun #define ATMEL_NAND_CLE_OFFSET			BIT(22)
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define DEFAULT_TIMEOUT_MS			1000
127*4882a593Smuzhiyun #define MIN_DMA_LEN				128
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun static bool atmel_nand_avoid_dma __read_mostly;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun MODULE_PARM_DESC(avoiddma, "Avoid using DMA");
132*4882a593Smuzhiyun module_param_named(avoiddma, atmel_nand_avoid_dma, bool, 0400);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun enum atmel_nand_rb_type {
135*4882a593Smuzhiyun 	ATMEL_NAND_NO_RB,
136*4882a593Smuzhiyun 	ATMEL_NAND_NATIVE_RB,
137*4882a593Smuzhiyun 	ATMEL_NAND_GPIO_RB,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun struct atmel_nand_rb {
141*4882a593Smuzhiyun 	enum atmel_nand_rb_type type;
142*4882a593Smuzhiyun 	union {
143*4882a593Smuzhiyun 		struct gpio_desc *gpio;
144*4882a593Smuzhiyun 		int id;
145*4882a593Smuzhiyun 	};
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun struct atmel_nand_cs {
149*4882a593Smuzhiyun 	int id;
150*4882a593Smuzhiyun 	struct atmel_nand_rb rb;
151*4882a593Smuzhiyun 	struct gpio_desc *csgpio;
152*4882a593Smuzhiyun 	struct {
153*4882a593Smuzhiyun 		void __iomem *virt;
154*4882a593Smuzhiyun 		dma_addr_t dma;
155*4882a593Smuzhiyun 	} io;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	struct atmel_smc_cs_conf smcconf;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun struct atmel_nand {
161*4882a593Smuzhiyun 	struct list_head node;
162*4882a593Smuzhiyun 	struct device *dev;
163*4882a593Smuzhiyun 	struct nand_chip base;
164*4882a593Smuzhiyun 	struct atmel_nand_cs *activecs;
165*4882a593Smuzhiyun 	struct atmel_pmecc_user *pmecc;
166*4882a593Smuzhiyun 	struct gpio_desc *cdgpio;
167*4882a593Smuzhiyun 	int numcs;
168*4882a593Smuzhiyun 	struct atmel_nand_cs cs[];
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
to_atmel_nand(struct nand_chip * chip)171*4882a593Smuzhiyun static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	return container_of(chip, struct atmel_nand, base);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun enum atmel_nfc_data_xfer {
177*4882a593Smuzhiyun 	ATMEL_NFC_NO_DATA,
178*4882a593Smuzhiyun 	ATMEL_NFC_READ_DATA,
179*4882a593Smuzhiyun 	ATMEL_NFC_WRITE_DATA,
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun struct atmel_nfc_op {
183*4882a593Smuzhiyun 	u8 cs;
184*4882a593Smuzhiyun 	u8 ncmds;
185*4882a593Smuzhiyun 	u8 cmds[2];
186*4882a593Smuzhiyun 	u8 naddrs;
187*4882a593Smuzhiyun 	u8 addrs[5];
188*4882a593Smuzhiyun 	enum atmel_nfc_data_xfer data;
189*4882a593Smuzhiyun 	u32 wait;
190*4882a593Smuzhiyun 	u32 errors;
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun struct atmel_nand_controller;
194*4882a593Smuzhiyun struct atmel_nand_controller_caps;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun struct atmel_nand_controller_ops {
197*4882a593Smuzhiyun 	int (*probe)(struct platform_device *pdev,
198*4882a593Smuzhiyun 		     const struct atmel_nand_controller_caps *caps);
199*4882a593Smuzhiyun 	int (*remove)(struct atmel_nand_controller *nc);
200*4882a593Smuzhiyun 	void (*nand_init)(struct atmel_nand_controller *nc,
201*4882a593Smuzhiyun 			  struct atmel_nand *nand);
202*4882a593Smuzhiyun 	int (*ecc_init)(struct nand_chip *chip);
203*4882a593Smuzhiyun 	int (*setup_interface)(struct atmel_nand *nand, int csline,
204*4882a593Smuzhiyun 			       const struct nand_interface_config *conf);
205*4882a593Smuzhiyun 	int (*exec_op)(struct atmel_nand *nand,
206*4882a593Smuzhiyun 		       const struct nand_operation *op, bool check_only);
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun struct atmel_nand_controller_caps {
210*4882a593Smuzhiyun 	bool has_dma;
211*4882a593Smuzhiyun 	bool legacy_of_bindings;
212*4882a593Smuzhiyun 	u32 ale_offs;
213*4882a593Smuzhiyun 	u32 cle_offs;
214*4882a593Smuzhiyun 	const char *ebi_csa_regmap_name;
215*4882a593Smuzhiyun 	const struct atmel_nand_controller_ops *ops;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun struct atmel_nand_controller {
219*4882a593Smuzhiyun 	struct nand_controller base;
220*4882a593Smuzhiyun 	const struct atmel_nand_controller_caps *caps;
221*4882a593Smuzhiyun 	struct device *dev;
222*4882a593Smuzhiyun 	struct regmap *smc;
223*4882a593Smuzhiyun 	struct dma_chan *dmac;
224*4882a593Smuzhiyun 	struct atmel_pmecc *pmecc;
225*4882a593Smuzhiyun 	struct list_head chips;
226*4882a593Smuzhiyun 	struct clk *mck;
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun static inline struct atmel_nand_controller *
to_nand_controller(struct nand_controller * ctl)230*4882a593Smuzhiyun to_nand_controller(struct nand_controller *ctl)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	return container_of(ctl, struct atmel_nand_controller, base);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct atmel_smc_nand_ebi_csa_cfg {
236*4882a593Smuzhiyun 	u32 offs;
237*4882a593Smuzhiyun 	u32 nfd0_on_d16;
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun struct atmel_smc_nand_controller {
241*4882a593Smuzhiyun 	struct atmel_nand_controller base;
242*4882a593Smuzhiyun 	struct regmap *ebi_csa_regmap;
243*4882a593Smuzhiyun 	struct atmel_smc_nand_ebi_csa_cfg *ebi_csa;
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun static inline struct atmel_smc_nand_controller *
to_smc_nand_controller(struct nand_controller * ctl)247*4882a593Smuzhiyun to_smc_nand_controller(struct nand_controller *ctl)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	return container_of(to_nand_controller(ctl),
250*4882a593Smuzhiyun 			    struct atmel_smc_nand_controller, base);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun struct atmel_hsmc_nand_controller {
254*4882a593Smuzhiyun 	struct atmel_nand_controller base;
255*4882a593Smuzhiyun 	struct {
256*4882a593Smuzhiyun 		struct gen_pool *pool;
257*4882a593Smuzhiyun 		void __iomem *virt;
258*4882a593Smuzhiyun 		dma_addr_t dma;
259*4882a593Smuzhiyun 	} sram;
260*4882a593Smuzhiyun 	const struct atmel_hsmc_reg_layout *hsmc_layout;
261*4882a593Smuzhiyun 	struct regmap *io;
262*4882a593Smuzhiyun 	struct atmel_nfc_op op;
263*4882a593Smuzhiyun 	struct completion complete;
264*4882a593Smuzhiyun 	u32 cfg;
265*4882a593Smuzhiyun 	int irq;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	/* Only used when instantiating from legacy DT bindings. */
268*4882a593Smuzhiyun 	struct clk *clk;
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun static inline struct atmel_hsmc_nand_controller *
to_hsmc_nand_controller(struct nand_controller * ctl)272*4882a593Smuzhiyun to_hsmc_nand_controller(struct nand_controller *ctl)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	return container_of(to_nand_controller(ctl),
275*4882a593Smuzhiyun 			    struct atmel_hsmc_nand_controller, base);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
atmel_nfc_op_done(struct atmel_nfc_op * op,u32 status)278*4882a593Smuzhiyun static bool atmel_nfc_op_done(struct atmel_nfc_op *op, u32 status)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	op->errors |= status & ATMEL_HSMC_NFC_SR_ERRORS;
281*4882a593Smuzhiyun 	op->wait ^= status & op->wait;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return !op->wait || op->errors;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
atmel_nfc_interrupt(int irq,void * data)286*4882a593Smuzhiyun static irqreturn_t atmel_nfc_interrupt(int irq, void *data)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc = data;
289*4882a593Smuzhiyun 	u32 sr, rcvd;
290*4882a593Smuzhiyun 	bool done;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &sr);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
295*4882a593Smuzhiyun 	done = atmel_nfc_op_done(&nc->op, sr);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	if (rcvd)
298*4882a593Smuzhiyun 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (done)
301*4882a593Smuzhiyun 		complete(&nc->complete);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	return rcvd ? IRQ_HANDLED : IRQ_NONE;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
atmel_nfc_wait(struct atmel_hsmc_nand_controller * nc,bool poll,unsigned int timeout_ms)306*4882a593Smuzhiyun static int atmel_nfc_wait(struct atmel_hsmc_nand_controller *nc, bool poll,
307*4882a593Smuzhiyun 			  unsigned int timeout_ms)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	int ret;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (!timeout_ms)
312*4882a593Smuzhiyun 		timeout_ms = DEFAULT_TIMEOUT_MS;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (poll) {
315*4882a593Smuzhiyun 		u32 status;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		ret = regmap_read_poll_timeout(nc->base.smc,
318*4882a593Smuzhiyun 					       ATMEL_HSMC_NFC_SR, status,
319*4882a593Smuzhiyun 					       atmel_nfc_op_done(&nc->op,
320*4882a593Smuzhiyun 								 status),
321*4882a593Smuzhiyun 					       0, timeout_ms * 1000);
322*4882a593Smuzhiyun 	} else {
323*4882a593Smuzhiyun 		init_completion(&nc->complete);
324*4882a593Smuzhiyun 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IER,
325*4882a593Smuzhiyun 			     nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
326*4882a593Smuzhiyun 		ret = wait_for_completion_timeout(&nc->complete,
327*4882a593Smuzhiyun 						msecs_to_jiffies(timeout_ms));
328*4882a593Smuzhiyun 		if (!ret)
329*4882a593Smuzhiyun 			ret = -ETIMEDOUT;
330*4882a593Smuzhiyun 		else
331*4882a593Smuzhiyun 			ret = 0;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_DTOE) {
337*4882a593Smuzhiyun 		dev_err(nc->base.dev, "Waiting NAND R/B Timeout\n");
338*4882a593Smuzhiyun 		ret = -ETIMEDOUT;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_UNDEF) {
342*4882a593Smuzhiyun 		dev_err(nc->base.dev, "Access to an undefined area\n");
343*4882a593Smuzhiyun 		ret = -EIO;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_AWB) {
347*4882a593Smuzhiyun 		dev_err(nc->base.dev, "Access while busy\n");
348*4882a593Smuzhiyun 		ret = -EIO;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (nc->op.errors & ATMEL_HSMC_NFC_SR_NFCASE) {
352*4882a593Smuzhiyun 		dev_err(nc->base.dev, "Wrong access size\n");
353*4882a593Smuzhiyun 		ret = -EIO;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	return ret;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
atmel_nand_dma_transfer_finished(void * data)359*4882a593Smuzhiyun static void atmel_nand_dma_transfer_finished(void *data)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct completion *finished = data;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	complete(finished);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
atmel_nand_dma_transfer(struct atmel_nand_controller * nc,void * buf,dma_addr_t dev_dma,size_t len,enum dma_data_direction dir)366*4882a593Smuzhiyun static int atmel_nand_dma_transfer(struct atmel_nand_controller *nc,
367*4882a593Smuzhiyun 				   void *buf, dma_addr_t dev_dma, size_t len,
368*4882a593Smuzhiyun 				   enum dma_data_direction dir)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	DECLARE_COMPLETION_ONSTACK(finished);
371*4882a593Smuzhiyun 	dma_addr_t src_dma, dst_dma, buf_dma;
372*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
373*4882a593Smuzhiyun 	dma_cookie_t cookie;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	buf_dma = dma_map_single(nc->dev, buf, len, dir);
376*4882a593Smuzhiyun 	if (dma_mapping_error(nc->dev, dev_dma)) {
377*4882a593Smuzhiyun 		dev_err(nc->dev,
378*4882a593Smuzhiyun 			"Failed to prepare a buffer for DMA access\n");
379*4882a593Smuzhiyun 		goto err;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (dir == DMA_FROM_DEVICE) {
383*4882a593Smuzhiyun 		src_dma = dev_dma;
384*4882a593Smuzhiyun 		dst_dma = buf_dma;
385*4882a593Smuzhiyun 	} else {
386*4882a593Smuzhiyun 		src_dma = buf_dma;
387*4882a593Smuzhiyun 		dst_dma = dev_dma;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	tx = dmaengine_prep_dma_memcpy(nc->dmac, dst_dma, src_dma, len,
391*4882a593Smuzhiyun 				       DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
392*4882a593Smuzhiyun 	if (!tx) {
393*4882a593Smuzhiyun 		dev_err(nc->dev, "Failed to prepare DMA memcpy\n");
394*4882a593Smuzhiyun 		goto err_unmap;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	tx->callback = atmel_nand_dma_transfer_finished;
398*4882a593Smuzhiyun 	tx->callback_param = &finished;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	cookie = dmaengine_submit(tx);
401*4882a593Smuzhiyun 	if (dma_submit_error(cookie)) {
402*4882a593Smuzhiyun 		dev_err(nc->dev, "Failed to do DMA tx_submit\n");
403*4882a593Smuzhiyun 		goto err_unmap;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	dma_async_issue_pending(nc->dmac);
407*4882a593Smuzhiyun 	wait_for_completion(&finished);
408*4882a593Smuzhiyun 	dma_unmap_single(nc->dev, buf_dma, len, dir);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return 0;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun err_unmap:
413*4882a593Smuzhiyun 	dma_unmap_single(nc->dev, buf_dma, len, dir);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun err:
416*4882a593Smuzhiyun 	dev_dbg(nc->dev, "Fall back to CPU I/O\n");
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	return -EIO;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
atmel_nfc_exec_op(struct atmel_hsmc_nand_controller * nc,bool poll)421*4882a593Smuzhiyun static int atmel_nfc_exec_op(struct atmel_hsmc_nand_controller *nc, bool poll)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	u8 *addrs = nc->op.addrs;
424*4882a593Smuzhiyun 	unsigned int op = 0;
425*4882a593Smuzhiyun 	u32 addr, val;
426*4882a593Smuzhiyun 	int i, ret;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	nc->op.wait = ATMEL_HSMC_NFC_SR_CMDDONE;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	for (i = 0; i < nc->op.ncmds; i++)
431*4882a593Smuzhiyun 		op |= ATMEL_NFC_CMD(i, nc->op.cmds[i]);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (nc->op.naddrs == ATMEL_NFC_MAX_ADDR_CYCLES)
434*4882a593Smuzhiyun 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_ADDR, *addrs++);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	op |= ATMEL_NFC_CSID(nc->op.cs) |
437*4882a593Smuzhiyun 	      ATMEL_NFC_ACYCLE(nc->op.naddrs);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (nc->op.ncmds > 1)
440*4882a593Smuzhiyun 		op |= ATMEL_NFC_VCMD2;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	addr = addrs[0] | (addrs[1] << 8) | (addrs[2] << 16) |
443*4882a593Smuzhiyun 	       (addrs[3] << 24);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	if (nc->op.data != ATMEL_NFC_NO_DATA) {
446*4882a593Smuzhiyun 		op |= ATMEL_NFC_DATAEN;
447*4882a593Smuzhiyun 		nc->op.wait |= ATMEL_HSMC_NFC_SR_XFRDONE;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		if (nc->op.data == ATMEL_NFC_WRITE_DATA)
450*4882a593Smuzhiyun 			op |= ATMEL_NFC_NFCWR;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/* Clear all flags. */
454*4882a593Smuzhiyun 	regmap_read(nc->base.smc, ATMEL_HSMC_NFC_SR, &val);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* Send the command. */
457*4882a593Smuzhiyun 	regmap_write(nc->io, op, addr);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	ret = atmel_nfc_wait(nc, poll, 0);
460*4882a593Smuzhiyun 	if (ret)
461*4882a593Smuzhiyun 		dev_err(nc->base.dev,
462*4882a593Smuzhiyun 			"Failed to send NAND command (err = %d)!",
463*4882a593Smuzhiyun 			ret);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* Reset the op state. */
466*4882a593Smuzhiyun 	memset(&nc->op, 0, sizeof(nc->op));
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	return ret;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
atmel_nand_data_in(struct atmel_nand * nand,void * buf,unsigned int len,bool force_8bit)471*4882a593Smuzhiyun static void atmel_nand_data_in(struct atmel_nand *nand, void *buf,
472*4882a593Smuzhiyun 			       unsigned int len, bool force_8bit)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/*
479*4882a593Smuzhiyun 	 * If the controller supports DMA, the buffer address is DMA-able and
480*4882a593Smuzhiyun 	 * len is long enough to make DMA transfers profitable, let's trigger
481*4882a593Smuzhiyun 	 * a DMA transfer. If it fails, fallback to PIO mode.
482*4882a593Smuzhiyun 	 */
483*4882a593Smuzhiyun 	if (nc->dmac && virt_addr_valid(buf) &&
484*4882a593Smuzhiyun 	    len >= MIN_DMA_LEN && !force_8bit &&
485*4882a593Smuzhiyun 	    !atmel_nand_dma_transfer(nc, buf, nand->activecs->io.dma, len,
486*4882a593Smuzhiyun 				     DMA_FROM_DEVICE))
487*4882a593Smuzhiyun 		return;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
490*4882a593Smuzhiyun 		ioread16_rep(nand->activecs->io.virt, buf, len / 2);
491*4882a593Smuzhiyun 	else
492*4882a593Smuzhiyun 		ioread8_rep(nand->activecs->io.virt, buf, len);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
atmel_nand_data_out(struct atmel_nand * nand,const void * buf,unsigned int len,bool force_8bit)495*4882a593Smuzhiyun static void atmel_nand_data_out(struct atmel_nand *nand, const void *buf,
496*4882a593Smuzhiyun 				unsigned int len, bool force_8bit)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/*
503*4882a593Smuzhiyun 	 * If the controller supports DMA, the buffer address is DMA-able and
504*4882a593Smuzhiyun 	 * len is long enough to make DMA transfers profitable, let's trigger
505*4882a593Smuzhiyun 	 * a DMA transfer. If it fails, fallback to PIO mode.
506*4882a593Smuzhiyun 	 */
507*4882a593Smuzhiyun 	if (nc->dmac && virt_addr_valid(buf) &&
508*4882a593Smuzhiyun 	    len >= MIN_DMA_LEN && !force_8bit &&
509*4882a593Smuzhiyun 	    !atmel_nand_dma_transfer(nc, (void *)buf, nand->activecs->io.dma,
510*4882a593Smuzhiyun 				     len, DMA_TO_DEVICE))
511*4882a593Smuzhiyun 		return;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	if ((nand->base.options & NAND_BUSWIDTH_16) && !force_8bit)
514*4882a593Smuzhiyun 		iowrite16_rep(nand->activecs->io.virt, buf, len / 2);
515*4882a593Smuzhiyun 	else
516*4882a593Smuzhiyun 		iowrite8_rep(nand->activecs->io.virt, buf, len);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
atmel_nand_waitrdy(struct atmel_nand * nand,unsigned int timeout_ms)519*4882a593Smuzhiyun static int atmel_nand_waitrdy(struct atmel_nand *nand, unsigned int timeout_ms)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	if (nand->activecs->rb.type == ATMEL_NAND_NO_RB)
522*4882a593Smuzhiyun 		return nand_soft_waitrdy(&nand->base, timeout_ms);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	return nand_gpio_waitrdy(&nand->base, nand->activecs->rb.gpio,
525*4882a593Smuzhiyun 				 timeout_ms);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
atmel_hsmc_nand_waitrdy(struct atmel_nand * nand,unsigned int timeout_ms)528*4882a593Smuzhiyun static int atmel_hsmc_nand_waitrdy(struct atmel_nand *nand,
529*4882a593Smuzhiyun 				   unsigned int timeout_ms)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
532*4882a593Smuzhiyun 	u32 status, mask;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
535*4882a593Smuzhiyun 		return atmel_nand_waitrdy(nand, timeout_ms);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(nand->base.controller);
538*4882a593Smuzhiyun 	mask = ATMEL_HSMC_NFC_SR_RBEDGE(nand->activecs->rb.id);
539*4882a593Smuzhiyun 	return regmap_read_poll_timeout_atomic(nc->base.smc, ATMEL_HSMC_NFC_SR,
540*4882a593Smuzhiyun 					       status, status & mask,
541*4882a593Smuzhiyun 					       10, timeout_ms * 1000);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
atmel_nand_select_target(struct atmel_nand * nand,unsigned int cs)544*4882a593Smuzhiyun static void atmel_nand_select_target(struct atmel_nand *nand,
545*4882a593Smuzhiyun 				     unsigned int cs)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	nand->activecs = &nand->cs[cs];
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
atmel_hsmc_nand_select_target(struct atmel_nand * nand,unsigned int cs)550*4882a593Smuzhiyun static void atmel_hsmc_nand_select_target(struct atmel_nand *nand,
551*4882a593Smuzhiyun 					  unsigned int cs)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(&nand->base);
554*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
555*4882a593Smuzhiyun 	u32 cfg = ATMEL_HSMC_NFC_CFG_PAGESIZE(mtd->writesize) |
556*4882a593Smuzhiyun 		  ATMEL_HSMC_NFC_CFG_SPARESIZE(mtd->oobsize) |
557*4882a593Smuzhiyun 		  ATMEL_HSMC_NFC_CFG_RSPARE;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	nand->activecs = &nand->cs[cs];
560*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(nand->base.controller);
561*4882a593Smuzhiyun 	if (nc->cfg == cfg)
562*4882a593Smuzhiyun 		return;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	regmap_update_bits(nc->base.smc, ATMEL_HSMC_NFC_CFG,
565*4882a593Smuzhiyun 			   ATMEL_HSMC_NFC_CFG_PAGESIZE_MASK |
566*4882a593Smuzhiyun 			   ATMEL_HSMC_NFC_CFG_SPARESIZE_MASK |
567*4882a593Smuzhiyun 			   ATMEL_HSMC_NFC_CFG_RSPARE |
568*4882a593Smuzhiyun 			   ATMEL_HSMC_NFC_CFG_WSPARE,
569*4882a593Smuzhiyun 			   cfg);
570*4882a593Smuzhiyun 	nc->cfg = cfg;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
atmel_smc_nand_exec_instr(struct atmel_nand * nand,const struct nand_op_instr * instr)573*4882a593Smuzhiyun static int atmel_smc_nand_exec_instr(struct atmel_nand *nand,
574*4882a593Smuzhiyun 				     const struct nand_op_instr *instr)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
577*4882a593Smuzhiyun 	unsigned int i;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
580*4882a593Smuzhiyun 	switch (instr->type) {
581*4882a593Smuzhiyun 	case NAND_OP_CMD_INSTR:
582*4882a593Smuzhiyun 		writeb(instr->ctx.cmd.opcode,
583*4882a593Smuzhiyun 		       nand->activecs->io.virt + nc->caps->cle_offs);
584*4882a593Smuzhiyun 		return 0;
585*4882a593Smuzhiyun 	case NAND_OP_ADDR_INSTR:
586*4882a593Smuzhiyun 		for (i = 0; i < instr->ctx.addr.naddrs; i++)
587*4882a593Smuzhiyun 			writeb(instr->ctx.addr.addrs[i],
588*4882a593Smuzhiyun 			       nand->activecs->io.virt + nc->caps->ale_offs);
589*4882a593Smuzhiyun 		return 0;
590*4882a593Smuzhiyun 	case NAND_OP_DATA_IN_INSTR:
591*4882a593Smuzhiyun 		atmel_nand_data_in(nand, instr->ctx.data.buf.in,
592*4882a593Smuzhiyun 				   instr->ctx.data.len,
593*4882a593Smuzhiyun 				   instr->ctx.data.force_8bit);
594*4882a593Smuzhiyun 		return 0;
595*4882a593Smuzhiyun 	case NAND_OP_DATA_OUT_INSTR:
596*4882a593Smuzhiyun 		atmel_nand_data_out(nand, instr->ctx.data.buf.out,
597*4882a593Smuzhiyun 				    instr->ctx.data.len,
598*4882a593Smuzhiyun 				    instr->ctx.data.force_8bit);
599*4882a593Smuzhiyun 		return 0;
600*4882a593Smuzhiyun 	case NAND_OP_WAITRDY_INSTR:
601*4882a593Smuzhiyun 		return atmel_nand_waitrdy(nand,
602*4882a593Smuzhiyun 					  instr->ctx.waitrdy.timeout_ms);
603*4882a593Smuzhiyun 	default:
604*4882a593Smuzhiyun 		break;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return -EINVAL;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
atmel_smc_nand_exec_op(struct atmel_nand * nand,const struct nand_operation * op,bool check_only)610*4882a593Smuzhiyun static int atmel_smc_nand_exec_op(struct atmel_nand *nand,
611*4882a593Smuzhiyun 				  const struct nand_operation *op,
612*4882a593Smuzhiyun 				  bool check_only)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun 	unsigned int i;
615*4882a593Smuzhiyun 	int ret = 0;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (check_only)
618*4882a593Smuzhiyun 		return 0;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	atmel_nand_select_target(nand, op->cs);
621*4882a593Smuzhiyun 	gpiod_set_value(nand->activecs->csgpio, 0);
622*4882a593Smuzhiyun 	for (i = 0; i < op->ninstrs; i++) {
623*4882a593Smuzhiyun 		ret = atmel_smc_nand_exec_instr(nand, &op->instrs[i]);
624*4882a593Smuzhiyun 		if (ret)
625*4882a593Smuzhiyun 			break;
626*4882a593Smuzhiyun 	}
627*4882a593Smuzhiyun 	gpiod_set_value(nand->activecs->csgpio, 1);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	return ret;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
atmel_hsmc_exec_cmd_addr(struct nand_chip * chip,const struct nand_subop * subop)632*4882a593Smuzhiyun static int atmel_hsmc_exec_cmd_addr(struct nand_chip *chip,
633*4882a593Smuzhiyun 				    const struct nand_subop *subop)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
636*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
637*4882a593Smuzhiyun 	unsigned int i, j;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(chip->controller);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	nc->op.cs = nand->activecs->id;
642*4882a593Smuzhiyun 	for (i = 0; i < subop->ninstrs; i++) {
643*4882a593Smuzhiyun 		const struct nand_op_instr *instr = &subop->instrs[i];
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		if (instr->type == NAND_OP_CMD_INSTR) {
646*4882a593Smuzhiyun 			nc->op.cmds[nc->op.ncmds++] = instr->ctx.cmd.opcode;
647*4882a593Smuzhiyun 			continue;
648*4882a593Smuzhiyun 		}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 		for (j = nand_subop_get_addr_start_off(subop, i);
651*4882a593Smuzhiyun 		     j < nand_subop_get_num_addr_cyc(subop, i); j++) {
652*4882a593Smuzhiyun 			nc->op.addrs[nc->op.naddrs] = instr->ctx.addr.addrs[j];
653*4882a593Smuzhiyun 			nc->op.naddrs++;
654*4882a593Smuzhiyun 		}
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	return atmel_nfc_exec_op(nc, true);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
atmel_hsmc_exec_rw(struct nand_chip * chip,const struct nand_subop * subop)660*4882a593Smuzhiyun static int atmel_hsmc_exec_rw(struct nand_chip *chip,
661*4882a593Smuzhiyun 			      const struct nand_subop *subop)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	const struct nand_op_instr *instr = subop->instrs;
664*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	if (instr->type == NAND_OP_DATA_IN_INSTR)
667*4882a593Smuzhiyun 		atmel_nand_data_in(nand, instr->ctx.data.buf.in,
668*4882a593Smuzhiyun 				   instr->ctx.data.len,
669*4882a593Smuzhiyun 				   instr->ctx.data.force_8bit);
670*4882a593Smuzhiyun 	else
671*4882a593Smuzhiyun 		atmel_nand_data_out(nand, instr->ctx.data.buf.out,
672*4882a593Smuzhiyun 				    instr->ctx.data.len,
673*4882a593Smuzhiyun 				    instr->ctx.data.force_8bit);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
atmel_hsmc_exec_waitrdy(struct nand_chip * chip,const struct nand_subop * subop)678*4882a593Smuzhiyun static int atmel_hsmc_exec_waitrdy(struct nand_chip *chip,
679*4882a593Smuzhiyun 				   const struct nand_subop *subop)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	const struct nand_op_instr *instr = subop->instrs;
682*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return atmel_hsmc_nand_waitrdy(nand, instr->ctx.waitrdy.timeout_ms);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun static const struct nand_op_parser atmel_hsmc_op_parser = NAND_OP_PARSER(
688*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_cmd_addr,
689*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_CMD_ELEM(true),
690*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_ADDR_ELEM(true, 5),
691*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_CMD_ELEM(true)),
692*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
693*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_DATA_IN_ELEM(false, 0)),
694*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_rw,
695*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_DATA_OUT_ELEM(false, 0)),
696*4882a593Smuzhiyun 	NAND_OP_PARSER_PATTERN(atmel_hsmc_exec_waitrdy,
697*4882a593Smuzhiyun 		NAND_OP_PARSER_PAT_WAITRDY_ELEM(false)),
698*4882a593Smuzhiyun );
699*4882a593Smuzhiyun 
atmel_hsmc_nand_exec_op(struct atmel_nand * nand,const struct nand_operation * op,bool check_only)700*4882a593Smuzhiyun static int atmel_hsmc_nand_exec_op(struct atmel_nand *nand,
701*4882a593Smuzhiyun 				   const struct nand_operation *op,
702*4882a593Smuzhiyun 				   bool check_only)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	int ret;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (check_only)
707*4882a593Smuzhiyun 		return nand_op_parser_exec_op(&nand->base,
708*4882a593Smuzhiyun 					      &atmel_hsmc_op_parser, op, true);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	atmel_hsmc_nand_select_target(nand, op->cs);
711*4882a593Smuzhiyun 	ret = nand_op_parser_exec_op(&nand->base, &atmel_hsmc_op_parser, op,
712*4882a593Smuzhiyun 				     false);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	return ret;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
atmel_nfc_copy_to_sram(struct nand_chip * chip,const u8 * buf,bool oob_required)717*4882a593Smuzhiyun static void atmel_nfc_copy_to_sram(struct nand_chip *chip, const u8 *buf,
718*4882a593Smuzhiyun 				   bool oob_required)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
721*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
722*4882a593Smuzhiyun 	int ret = -EIO;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(chip->controller);
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	if (nc->base.dmac)
727*4882a593Smuzhiyun 		ret = atmel_nand_dma_transfer(&nc->base, (void *)buf,
728*4882a593Smuzhiyun 					      nc->sram.dma, mtd->writesize,
729*4882a593Smuzhiyun 					      DMA_TO_DEVICE);
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	/* Falling back to CPU copy. */
732*4882a593Smuzhiyun 	if (ret)
733*4882a593Smuzhiyun 		memcpy_toio(nc->sram.virt, buf, mtd->writesize);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	if (oob_required)
736*4882a593Smuzhiyun 		memcpy_toio(nc->sram.virt + mtd->writesize, chip->oob_poi,
737*4882a593Smuzhiyun 			    mtd->oobsize);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
atmel_nfc_copy_from_sram(struct nand_chip * chip,u8 * buf,bool oob_required)740*4882a593Smuzhiyun static void atmel_nfc_copy_from_sram(struct nand_chip *chip, u8 *buf,
741*4882a593Smuzhiyun 				     bool oob_required)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
744*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
745*4882a593Smuzhiyun 	int ret = -EIO;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(chip->controller);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	if (nc->base.dmac)
750*4882a593Smuzhiyun 		ret = atmel_nand_dma_transfer(&nc->base, buf, nc->sram.dma,
751*4882a593Smuzhiyun 					      mtd->writesize, DMA_FROM_DEVICE);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/* Falling back to CPU copy. */
754*4882a593Smuzhiyun 	if (ret)
755*4882a593Smuzhiyun 		memcpy_fromio(buf, nc->sram.virt, mtd->writesize);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (oob_required)
758*4882a593Smuzhiyun 		memcpy_fromio(chip->oob_poi, nc->sram.virt + mtd->writesize,
759*4882a593Smuzhiyun 			      mtd->oobsize);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
atmel_nfc_set_op_addr(struct nand_chip * chip,int page,int column)762*4882a593Smuzhiyun static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
765*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(chip->controller);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (column >= 0) {
770*4882a593Smuzhiyun 		nc->op.addrs[nc->op.naddrs++] = column;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		/*
773*4882a593Smuzhiyun 		 * 2 address cycles for the column offset on large page NANDs.
774*4882a593Smuzhiyun 		 */
775*4882a593Smuzhiyun 		if (mtd->writesize > 512)
776*4882a593Smuzhiyun 			nc->op.addrs[nc->op.naddrs++] = column >> 8;
777*4882a593Smuzhiyun 	}
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	if (page >= 0) {
780*4882a593Smuzhiyun 		nc->op.addrs[nc->op.naddrs++] = page;
781*4882a593Smuzhiyun 		nc->op.addrs[nc->op.naddrs++] = page >> 8;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 		if (chip->options & NAND_ROW_ADDR_3)
784*4882a593Smuzhiyun 			nc->op.addrs[nc->op.naddrs++] = page >> 16;
785*4882a593Smuzhiyun 	}
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun 
atmel_nand_pmecc_enable(struct nand_chip * chip,int op,bool raw)788*4882a593Smuzhiyun static int atmel_nand_pmecc_enable(struct nand_chip *chip, int op, bool raw)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
791*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
792*4882a593Smuzhiyun 	int ret;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	nc = to_nand_controller(chip->controller);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (raw)
797*4882a593Smuzhiyun 		return 0;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	ret = atmel_pmecc_enable(nand->pmecc, op);
800*4882a593Smuzhiyun 	if (ret)
801*4882a593Smuzhiyun 		dev_err(nc->dev,
802*4882a593Smuzhiyun 			"Failed to enable ECC engine (err = %d)\n", ret);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	return ret;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun 
atmel_nand_pmecc_disable(struct nand_chip * chip,bool raw)807*4882a593Smuzhiyun static void atmel_nand_pmecc_disable(struct nand_chip *chip, bool raw)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	if (!raw)
812*4882a593Smuzhiyun 		atmel_pmecc_disable(nand->pmecc);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun 
atmel_nand_pmecc_generate_eccbytes(struct nand_chip * chip,bool raw)815*4882a593Smuzhiyun static int atmel_nand_pmecc_generate_eccbytes(struct nand_chip *chip, bool raw)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
818*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
819*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
820*4882a593Smuzhiyun 	struct mtd_oob_region oobregion;
821*4882a593Smuzhiyun 	void *eccbuf;
822*4882a593Smuzhiyun 	int ret, i;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	nc = to_nand_controller(chip->controller);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (raw)
827*4882a593Smuzhiyun 		return 0;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	ret = atmel_pmecc_wait_rdy(nand->pmecc);
830*4882a593Smuzhiyun 	if (ret) {
831*4882a593Smuzhiyun 		dev_err(nc->dev,
832*4882a593Smuzhiyun 			"Failed to transfer NAND page data (err = %d)\n",
833*4882a593Smuzhiyun 			ret);
834*4882a593Smuzhiyun 		return ret;
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	mtd_ooblayout_ecc(mtd, 0, &oobregion);
838*4882a593Smuzhiyun 	eccbuf = chip->oob_poi + oobregion.offset;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.steps; i++) {
841*4882a593Smuzhiyun 		atmel_pmecc_get_generated_eccbytes(nand->pmecc, i,
842*4882a593Smuzhiyun 						   eccbuf);
843*4882a593Smuzhiyun 		eccbuf += chip->ecc.bytes;
844*4882a593Smuzhiyun 	}
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	return 0;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun 
atmel_nand_pmecc_correct_data(struct nand_chip * chip,void * buf,bool raw)849*4882a593Smuzhiyun static int atmel_nand_pmecc_correct_data(struct nand_chip *chip, void *buf,
850*4882a593Smuzhiyun 					 bool raw)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
853*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
854*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
855*4882a593Smuzhiyun 	struct mtd_oob_region oobregion;
856*4882a593Smuzhiyun 	int ret, i, max_bitflips = 0;
857*4882a593Smuzhiyun 	void *databuf, *eccbuf;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	nc = to_nand_controller(chip->controller);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	if (raw)
862*4882a593Smuzhiyun 		return 0;
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	ret = atmel_pmecc_wait_rdy(nand->pmecc);
865*4882a593Smuzhiyun 	if (ret) {
866*4882a593Smuzhiyun 		dev_err(nc->dev,
867*4882a593Smuzhiyun 			"Failed to read NAND page data (err = %d)\n",
868*4882a593Smuzhiyun 			ret);
869*4882a593Smuzhiyun 		return ret;
870*4882a593Smuzhiyun 	}
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	mtd_ooblayout_ecc(mtd, 0, &oobregion);
873*4882a593Smuzhiyun 	eccbuf = chip->oob_poi + oobregion.offset;
874*4882a593Smuzhiyun 	databuf = buf;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	for (i = 0; i < chip->ecc.steps; i++) {
877*4882a593Smuzhiyun 		ret = atmel_pmecc_correct_sector(nand->pmecc, i, databuf,
878*4882a593Smuzhiyun 						 eccbuf);
879*4882a593Smuzhiyun 		if (ret < 0 && !atmel_pmecc_correct_erased_chunks(nand->pmecc))
880*4882a593Smuzhiyun 			ret = nand_check_erased_ecc_chunk(databuf,
881*4882a593Smuzhiyun 							  chip->ecc.size,
882*4882a593Smuzhiyun 							  eccbuf,
883*4882a593Smuzhiyun 							  chip->ecc.bytes,
884*4882a593Smuzhiyun 							  NULL, 0,
885*4882a593Smuzhiyun 							  chip->ecc.strength);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		if (ret >= 0) {
888*4882a593Smuzhiyun 			mtd->ecc_stats.corrected += ret;
889*4882a593Smuzhiyun 			max_bitflips = max(ret, max_bitflips);
890*4882a593Smuzhiyun 		} else {
891*4882a593Smuzhiyun 			mtd->ecc_stats.failed++;
892*4882a593Smuzhiyun 		}
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 		databuf += chip->ecc.size;
895*4882a593Smuzhiyun 		eccbuf += chip->ecc.bytes;
896*4882a593Smuzhiyun 	}
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	return max_bitflips;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
atmel_nand_pmecc_write_pg(struct nand_chip * chip,const u8 * buf,bool oob_required,int page,bool raw)901*4882a593Smuzhiyun static int atmel_nand_pmecc_write_pg(struct nand_chip *chip, const u8 *buf,
902*4882a593Smuzhiyun 				     bool oob_required, int page, bool raw)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
905*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
906*4882a593Smuzhiyun 	int ret;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	nand_prog_page_begin_op(chip, page, 0, NULL, 0);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
911*4882a593Smuzhiyun 	if (ret)
912*4882a593Smuzhiyun 		return ret;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	nand_write_data_op(chip, buf, mtd->writesize, false);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
917*4882a593Smuzhiyun 	if (ret) {
918*4882a593Smuzhiyun 		atmel_pmecc_disable(nand->pmecc);
919*4882a593Smuzhiyun 		return ret;
920*4882a593Smuzhiyun 	}
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	atmel_nand_pmecc_disable(chip, raw);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun 
atmel_nand_pmecc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)929*4882a593Smuzhiyun static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const u8 *buf,
930*4882a593Smuzhiyun 				       int oob_required, int page)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, false);
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
atmel_nand_pmecc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)935*4882a593Smuzhiyun static int atmel_nand_pmecc_write_page_raw(struct nand_chip *chip,
936*4882a593Smuzhiyun 					   const u8 *buf, int oob_required,
937*4882a593Smuzhiyun 					   int page)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	return atmel_nand_pmecc_write_pg(chip, buf, oob_required, page, true);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
atmel_nand_pmecc_read_pg(struct nand_chip * chip,u8 * buf,bool oob_required,int page,bool raw)942*4882a593Smuzhiyun static int atmel_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
943*4882a593Smuzhiyun 				    bool oob_required, int page, bool raw)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
946*4882a593Smuzhiyun 	int ret;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	nand_read_page_op(chip, page, 0, NULL, 0);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
951*4882a593Smuzhiyun 	if (ret)
952*4882a593Smuzhiyun 		return ret;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, buf, mtd->writesize, false, false);
955*4882a593Smuzhiyun 	if (ret)
956*4882a593Smuzhiyun 		goto out_disable;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false, false);
959*4882a593Smuzhiyun 	if (ret)
960*4882a593Smuzhiyun 		goto out_disable;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun out_disable:
965*4882a593Smuzhiyun 	atmel_nand_pmecc_disable(chip, raw);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	return ret;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun 
atmel_nand_pmecc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)970*4882a593Smuzhiyun static int atmel_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
971*4882a593Smuzhiyun 				      int oob_required, int page)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, false);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun 
atmel_nand_pmecc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)976*4882a593Smuzhiyun static int atmel_nand_pmecc_read_page_raw(struct nand_chip *chip, u8 *buf,
977*4882a593Smuzhiyun 					  int oob_required, int page)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun 	return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page, true);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
atmel_hsmc_nand_pmecc_write_pg(struct nand_chip * chip,const u8 * buf,bool oob_required,int page,bool raw)982*4882a593Smuzhiyun static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip,
983*4882a593Smuzhiyun 					  const u8 *buf, bool oob_required,
984*4882a593Smuzhiyun 					  int page, bool raw)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
987*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
988*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
989*4882a593Smuzhiyun 	int ret;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	atmel_hsmc_nand_select_target(nand, chip->cur_cs);
992*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(chip->controller);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	atmel_nfc_copy_to_sram(chip, buf, false);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	nc->op.cmds[0] = NAND_CMD_SEQIN;
997*4882a593Smuzhiyun 	nc->op.ncmds = 1;
998*4882a593Smuzhiyun 	atmel_nfc_set_op_addr(chip, page, 0x0);
999*4882a593Smuzhiyun 	nc->op.cs = nand->activecs->id;
1000*4882a593Smuzhiyun 	nc->op.data = ATMEL_NFC_WRITE_DATA;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_WRITE, raw);
1003*4882a593Smuzhiyun 	if (ret)
1004*4882a593Smuzhiyun 		return ret;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	ret = atmel_nfc_exec_op(nc, false);
1007*4882a593Smuzhiyun 	if (ret) {
1008*4882a593Smuzhiyun 		atmel_nand_pmecc_disable(chip, raw);
1009*4882a593Smuzhiyun 		dev_err(nc->base.dev,
1010*4882a593Smuzhiyun 			"Failed to transfer NAND page data (err = %d)\n",
1011*4882a593Smuzhiyun 			ret);
1012*4882a593Smuzhiyun 		return ret;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_generate_eccbytes(chip, raw);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	atmel_nand_pmecc_disable(chip, raw);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	if (ret)
1020*4882a593Smuzhiyun 		return ret;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	return nand_prog_page_end_op(chip);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun 
atmel_hsmc_nand_pmecc_write_page(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1027*4882a593Smuzhiyun static int atmel_hsmc_nand_pmecc_write_page(struct nand_chip *chip,
1028*4882a593Smuzhiyun 					    const u8 *buf, int oob_required,
1029*4882a593Smuzhiyun 					    int page)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
1032*4882a593Smuzhiyun 					      false);
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip * chip,const u8 * buf,int oob_required,int page)1035*4882a593Smuzhiyun static int atmel_hsmc_nand_pmecc_write_page_raw(struct nand_chip *chip,
1036*4882a593Smuzhiyun 						const u8 *buf,
1037*4882a593Smuzhiyun 						int oob_required, int page)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	return atmel_hsmc_nand_pmecc_write_pg(chip, buf, oob_required, page,
1040*4882a593Smuzhiyun 					      true);
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
atmel_hsmc_nand_pmecc_read_pg(struct nand_chip * chip,u8 * buf,bool oob_required,int page,bool raw)1043*4882a593Smuzhiyun static int atmel_hsmc_nand_pmecc_read_pg(struct nand_chip *chip, u8 *buf,
1044*4882a593Smuzhiyun 					 bool oob_required, int page,
1045*4882a593Smuzhiyun 					 bool raw)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1048*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
1049*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
1050*4882a593Smuzhiyun 	int ret;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	atmel_hsmc_nand_select_target(nand, chip->cur_cs);
1053*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(chip->controller);
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	/*
1056*4882a593Smuzhiyun 	 * Optimized read page accessors only work when the NAND R/B pin is
1057*4882a593Smuzhiyun 	 * connected to a native SoC R/B pin. If that's not the case, fallback
1058*4882a593Smuzhiyun 	 * to the non-optimized one.
1059*4882a593Smuzhiyun 	 */
1060*4882a593Smuzhiyun 	if (nand->activecs->rb.type != ATMEL_NAND_NATIVE_RB)
1061*4882a593Smuzhiyun 		return atmel_nand_pmecc_read_pg(chip, buf, oob_required, page,
1062*4882a593Smuzhiyun 						raw);
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READ0;
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	if (mtd->writesize > 512)
1067*4882a593Smuzhiyun 		nc->op.cmds[nc->op.ncmds++] = NAND_CMD_READSTART;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	atmel_nfc_set_op_addr(chip, page, 0x0);
1070*4882a593Smuzhiyun 	nc->op.cs = nand->activecs->id;
1071*4882a593Smuzhiyun 	nc->op.data = ATMEL_NFC_READ_DATA;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_enable(chip, NAND_ECC_READ, raw);
1074*4882a593Smuzhiyun 	if (ret)
1075*4882a593Smuzhiyun 		return ret;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	ret = atmel_nfc_exec_op(nc, false);
1078*4882a593Smuzhiyun 	if (ret) {
1079*4882a593Smuzhiyun 		atmel_nand_pmecc_disable(chip, raw);
1080*4882a593Smuzhiyun 		dev_err(nc->base.dev,
1081*4882a593Smuzhiyun 			"Failed to load NAND page data (err = %d)\n",
1082*4882a593Smuzhiyun 			ret);
1083*4882a593Smuzhiyun 		return ret;
1084*4882a593Smuzhiyun 	}
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	atmel_nfc_copy_from_sram(chip, buf, true);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	ret = atmel_nand_pmecc_correct_data(chip, buf, raw);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	atmel_nand_pmecc_disable(chip, raw);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	return ret;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
atmel_hsmc_nand_pmecc_read_page(struct nand_chip * chip,u8 * buf,int oob_required,int page)1095*4882a593Smuzhiyun static int atmel_hsmc_nand_pmecc_read_page(struct nand_chip *chip, u8 *buf,
1096*4882a593Smuzhiyun 					   int oob_required, int page)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1099*4882a593Smuzhiyun 					     false);
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun 
atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip * chip,u8 * buf,int oob_required,int page)1102*4882a593Smuzhiyun static int atmel_hsmc_nand_pmecc_read_page_raw(struct nand_chip *chip,
1103*4882a593Smuzhiyun 					       u8 *buf, int oob_required,
1104*4882a593Smuzhiyun 					       int page)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	return atmel_hsmc_nand_pmecc_read_pg(chip, buf, oob_required, page,
1107*4882a593Smuzhiyun 					     true);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
atmel_nand_pmecc_init(struct nand_chip * chip)1110*4882a593Smuzhiyun static int atmel_nand_pmecc_init(struct nand_chip *chip)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	const struct nand_ecc_props *requirements =
1113*4882a593Smuzhiyun 		nanddev_get_ecc_requirements(&chip->base);
1114*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1115*4882a593Smuzhiyun 	struct nand_device *nanddev = mtd_to_nanddev(mtd);
1116*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
1117*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
1118*4882a593Smuzhiyun 	struct atmel_pmecc_user_req req;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	nc = to_nand_controller(chip->controller);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	if (!nc->pmecc) {
1123*4882a593Smuzhiyun 		dev_err(nc->dev, "HW ECC not supported\n");
1124*4882a593Smuzhiyun 		return -ENOTSUPP;
1125*4882a593Smuzhiyun 	}
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	if (nc->caps->legacy_of_bindings) {
1128*4882a593Smuzhiyun 		u32 val;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 		if (!of_property_read_u32(nc->dev->of_node, "atmel,pmecc-cap",
1131*4882a593Smuzhiyun 					  &val))
1132*4882a593Smuzhiyun 			chip->ecc.strength = val;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 		if (!of_property_read_u32(nc->dev->of_node,
1135*4882a593Smuzhiyun 					  "atmel,pmecc-sector-size",
1136*4882a593Smuzhiyun 					  &val))
1137*4882a593Smuzhiyun 			chip->ecc.size = val;
1138*4882a593Smuzhiyun 	}
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
1141*4882a593Smuzhiyun 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1142*4882a593Smuzhiyun 	else if (chip->ecc.strength)
1143*4882a593Smuzhiyun 		req.ecc.strength = chip->ecc.strength;
1144*4882a593Smuzhiyun 	else if (requirements->strength)
1145*4882a593Smuzhiyun 		req.ecc.strength = requirements->strength;
1146*4882a593Smuzhiyun 	else
1147*4882a593Smuzhiyun 		req.ecc.strength = ATMEL_PMECC_MAXIMIZE_ECC_STRENGTH;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (chip->ecc.size)
1150*4882a593Smuzhiyun 		req.ecc.sectorsize = chip->ecc.size;
1151*4882a593Smuzhiyun 	else if (requirements->step_size)
1152*4882a593Smuzhiyun 		req.ecc.sectorsize = requirements->step_size;
1153*4882a593Smuzhiyun 	else
1154*4882a593Smuzhiyun 		req.ecc.sectorsize = ATMEL_PMECC_SECTOR_SIZE_AUTO;
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	req.pagesize = mtd->writesize;
1157*4882a593Smuzhiyun 	req.oobsize = mtd->oobsize;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 	if (mtd->writesize <= 512) {
1160*4882a593Smuzhiyun 		req.ecc.bytes = 4;
1161*4882a593Smuzhiyun 		req.ecc.ooboffset = 0;
1162*4882a593Smuzhiyun 	} else {
1163*4882a593Smuzhiyun 		req.ecc.bytes = mtd->oobsize - 2;
1164*4882a593Smuzhiyun 		req.ecc.ooboffset = ATMEL_PMECC_OOBOFFSET_AUTO;
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	nand->pmecc = atmel_pmecc_create_user(nc->pmecc, &req);
1168*4882a593Smuzhiyun 	if (IS_ERR(nand->pmecc))
1169*4882a593Smuzhiyun 		return PTR_ERR(nand->pmecc);
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	chip->ecc.algo = NAND_ECC_ALGO_BCH;
1172*4882a593Smuzhiyun 	chip->ecc.size = req.ecc.sectorsize;
1173*4882a593Smuzhiyun 	chip->ecc.bytes = req.ecc.bytes / req.ecc.nsectors;
1174*4882a593Smuzhiyun 	chip->ecc.strength = req.ecc.strength;
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	chip->options |= NAND_NO_SUBPAGE_WRITE;
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	return 0;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun 
atmel_nand_ecc_init(struct nand_chip * chip)1183*4882a593Smuzhiyun static int atmel_nand_ecc_init(struct nand_chip *chip)
1184*4882a593Smuzhiyun {
1185*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
1186*4882a593Smuzhiyun 	int ret;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	nc = to_nand_controller(chip->controller);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	switch (chip->ecc.engine_type) {
1191*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_NONE:
1192*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_SOFT:
1193*4882a593Smuzhiyun 		/*
1194*4882a593Smuzhiyun 		 * Nothing to do, the core will initialize everything for us.
1195*4882a593Smuzhiyun 		 */
1196*4882a593Smuzhiyun 		break;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	case NAND_ECC_ENGINE_TYPE_ON_HOST:
1199*4882a593Smuzhiyun 		ret = atmel_nand_pmecc_init(chip);
1200*4882a593Smuzhiyun 		if (ret)
1201*4882a593Smuzhiyun 			return ret;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 		chip->ecc.read_page = atmel_nand_pmecc_read_page;
1204*4882a593Smuzhiyun 		chip->ecc.write_page = atmel_nand_pmecc_write_page;
1205*4882a593Smuzhiyun 		chip->ecc.read_page_raw = atmel_nand_pmecc_read_page_raw;
1206*4882a593Smuzhiyun 		chip->ecc.write_page_raw = atmel_nand_pmecc_write_page_raw;
1207*4882a593Smuzhiyun 		break;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	default:
1210*4882a593Smuzhiyun 		/* Other modes are not supported. */
1211*4882a593Smuzhiyun 		dev_err(nc->dev, "Unsupported ECC mode: %d\n",
1212*4882a593Smuzhiyun 			chip->ecc.engine_type);
1213*4882a593Smuzhiyun 		return -ENOTSUPP;
1214*4882a593Smuzhiyun 	}
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	return 0;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun 
atmel_hsmc_nand_ecc_init(struct nand_chip * chip)1219*4882a593Smuzhiyun static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	int ret;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	ret = atmel_nand_ecc_init(chip);
1224*4882a593Smuzhiyun 	if (ret)
1225*4882a593Smuzhiyun 		return ret;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	if (chip->ecc.engine_type != NAND_ECC_ENGINE_TYPE_ON_HOST)
1228*4882a593Smuzhiyun 		return 0;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	/* Adjust the ECC operations for the HSMC IP. */
1231*4882a593Smuzhiyun 	chip->ecc.read_page = atmel_hsmc_nand_pmecc_read_page;
1232*4882a593Smuzhiyun 	chip->ecc.write_page = atmel_hsmc_nand_pmecc_write_page;
1233*4882a593Smuzhiyun 	chip->ecc.read_page_raw = atmel_hsmc_nand_pmecc_read_page_raw;
1234*4882a593Smuzhiyun 	chip->ecc.write_page_raw = atmel_hsmc_nand_pmecc_write_page_raw;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	return 0;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
atmel_smc_nand_prepare_smcconf(struct atmel_nand * nand,const struct nand_interface_config * conf,struct atmel_smc_cs_conf * smcconf)1239*4882a593Smuzhiyun static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand,
1240*4882a593Smuzhiyun 					const struct nand_interface_config *conf,
1241*4882a593Smuzhiyun 					struct atmel_smc_cs_conf *smcconf)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	u32 ncycles, totalcycles, timeps, mckperiodps;
1244*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
1245*4882a593Smuzhiyun 	int ret;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	/* DDR interface not supported. */
1250*4882a593Smuzhiyun 	if (!nand_interface_is_sdr(conf))
1251*4882a593Smuzhiyun 		return -ENOTSUPP;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	/*
1254*4882a593Smuzhiyun 	 * tRC < 30ns implies EDO mode. This controller does not support this
1255*4882a593Smuzhiyun 	 * mode.
1256*4882a593Smuzhiyun 	 */
1257*4882a593Smuzhiyun 	if (conf->timings.sdr.tRC_min < 30000)
1258*4882a593Smuzhiyun 		return -ENOTSUPP;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	atmel_smc_cs_conf_init(smcconf);
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck);
1263*4882a593Smuzhiyun 	mckperiodps *= 1000;
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	/*
1266*4882a593Smuzhiyun 	 * Set write pulse timing. This one is easy to extract:
1267*4882a593Smuzhiyun 	 *
1268*4882a593Smuzhiyun 	 * NWE_PULSE = tWP
1269*4882a593Smuzhiyun 	 */
1270*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps);
1271*4882a593Smuzhiyun 	totalcycles = ncycles;
1272*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT,
1273*4882a593Smuzhiyun 					  ncycles);
1274*4882a593Smuzhiyun 	if (ret)
1275*4882a593Smuzhiyun 		return ret;
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	/*
1278*4882a593Smuzhiyun 	 * The write setup timing depends on the operation done on the NAND.
1279*4882a593Smuzhiyun 	 * All operations goes through the same data bus, but the operation
1280*4882a593Smuzhiyun 	 * type depends on the address we are writing to (ALE/CLE address
1281*4882a593Smuzhiyun 	 * lines).
1282*4882a593Smuzhiyun 	 * Since we have no way to differentiate the different operations at
1283*4882a593Smuzhiyun 	 * the SMC level, we must consider the worst case (the biggest setup
1284*4882a593Smuzhiyun 	 * time among all operation types):
1285*4882a593Smuzhiyun 	 *
1286*4882a593Smuzhiyun 	 * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE
1287*4882a593Smuzhiyun 	 */
1288*4882a593Smuzhiyun 	timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min,
1289*4882a593Smuzhiyun 		      conf->timings.sdr.tALS_min);
1290*4882a593Smuzhiyun 	timeps = max(timeps, conf->timings.sdr.tDS_min);
1291*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1292*4882a593Smuzhiyun 	ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0;
1293*4882a593Smuzhiyun 	totalcycles += ncycles;
1294*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT,
1295*4882a593Smuzhiyun 					  ncycles);
1296*4882a593Smuzhiyun 	if (ret)
1297*4882a593Smuzhiyun 		return ret;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	/*
1300*4882a593Smuzhiyun 	 * As for the write setup timing, the write hold timing depends on the
1301*4882a593Smuzhiyun 	 * operation done on the NAND:
1302*4882a593Smuzhiyun 	 *
1303*4882a593Smuzhiyun 	 * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH)
1304*4882a593Smuzhiyun 	 */
1305*4882a593Smuzhiyun 	timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min,
1306*4882a593Smuzhiyun 		      conf->timings.sdr.tALH_min);
1307*4882a593Smuzhiyun 	timeps = max3(timeps, conf->timings.sdr.tDH_min,
1308*4882a593Smuzhiyun 		      conf->timings.sdr.tWH_min);
1309*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1310*4882a593Smuzhiyun 	totalcycles += ncycles;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	/*
1313*4882a593Smuzhiyun 	 * The write cycle timing is directly matching tWC, but is also
1314*4882a593Smuzhiyun 	 * dependent on the other timings on the setup and hold timings we
1315*4882a593Smuzhiyun 	 * calculated earlier, which gives:
1316*4882a593Smuzhiyun 	 *
1317*4882a593Smuzhiyun 	 * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD)
1318*4882a593Smuzhiyun 	 */
1319*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps);
1320*4882a593Smuzhiyun 	ncycles = max(totalcycles, ncycles);
1321*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT,
1322*4882a593Smuzhiyun 					  ncycles);
1323*4882a593Smuzhiyun 	if (ret)
1324*4882a593Smuzhiyun 		return ret;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	/*
1327*4882a593Smuzhiyun 	 * We don't want the CS line to be toggled between each byte/word
1328*4882a593Smuzhiyun 	 * transfer to the NAND. The only way to guarantee that is to have the
1329*4882a593Smuzhiyun 	 * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1330*4882a593Smuzhiyun 	 *
1331*4882a593Smuzhiyun 	 * NCS_WR_PULSE = NWE_CYCLE
1332*4882a593Smuzhiyun 	 */
1333*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT,
1334*4882a593Smuzhiyun 					  ncycles);
1335*4882a593Smuzhiyun 	if (ret)
1336*4882a593Smuzhiyun 		return ret;
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	/*
1339*4882a593Smuzhiyun 	 * As for the write setup timing, the read hold timing depends on the
1340*4882a593Smuzhiyun 	 * operation done on the NAND:
1341*4882a593Smuzhiyun 	 *
1342*4882a593Smuzhiyun 	 * NRD_HOLD = max(tREH, tRHOH)
1343*4882a593Smuzhiyun 	 */
1344*4882a593Smuzhiyun 	timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min);
1345*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(timeps, mckperiodps);
1346*4882a593Smuzhiyun 	totalcycles = ncycles;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	/*
1349*4882a593Smuzhiyun 	 * TDF = tRHZ - NRD_HOLD
1350*4882a593Smuzhiyun 	 */
1351*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps);
1352*4882a593Smuzhiyun 	ncycles -= totalcycles;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	/*
1355*4882a593Smuzhiyun 	 * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and
1356*4882a593Smuzhiyun 	 * we might end up with a config that does not fit in the TDF field.
1357*4882a593Smuzhiyun 	 * Just take the max value in this case and hope that the NAND is more
1358*4882a593Smuzhiyun 	 * tolerant than advertised.
1359*4882a593Smuzhiyun 	 */
1360*4882a593Smuzhiyun 	if (ncycles > ATMEL_SMC_MODE_TDF_MAX)
1361*4882a593Smuzhiyun 		ncycles = ATMEL_SMC_MODE_TDF_MAX;
1362*4882a593Smuzhiyun 	else if (ncycles < ATMEL_SMC_MODE_TDF_MIN)
1363*4882a593Smuzhiyun 		ncycles = ATMEL_SMC_MODE_TDF_MIN;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) |
1366*4882a593Smuzhiyun 			 ATMEL_SMC_MODE_TDFMODE_OPTIMIZED;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	/*
1369*4882a593Smuzhiyun 	 * Read pulse timing directly matches tRP:
1370*4882a593Smuzhiyun 	 *
1371*4882a593Smuzhiyun 	 * NRD_PULSE = tRP
1372*4882a593Smuzhiyun 	 */
1373*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps);
1374*4882a593Smuzhiyun 	totalcycles += ncycles;
1375*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT,
1376*4882a593Smuzhiyun 					  ncycles);
1377*4882a593Smuzhiyun 	if (ret)
1378*4882a593Smuzhiyun 		return ret;
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	/*
1381*4882a593Smuzhiyun 	 * The write cycle timing is directly matching tWC, but is also
1382*4882a593Smuzhiyun 	 * dependent on the setup and hold timings we calculated earlier,
1383*4882a593Smuzhiyun 	 * which gives:
1384*4882a593Smuzhiyun 	 *
1385*4882a593Smuzhiyun 	 * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD)
1386*4882a593Smuzhiyun 	 *
1387*4882a593Smuzhiyun 	 * NRD_SETUP is always 0.
1388*4882a593Smuzhiyun 	 */
1389*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps);
1390*4882a593Smuzhiyun 	ncycles = max(totalcycles, ncycles);
1391*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT,
1392*4882a593Smuzhiyun 					  ncycles);
1393*4882a593Smuzhiyun 	if (ret)
1394*4882a593Smuzhiyun 		return ret;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	/*
1397*4882a593Smuzhiyun 	 * We don't want the CS line to be toggled between each byte/word
1398*4882a593Smuzhiyun 	 * transfer from the NAND. The only way to guarantee that is to have
1399*4882a593Smuzhiyun 	 * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means:
1400*4882a593Smuzhiyun 	 *
1401*4882a593Smuzhiyun 	 * NCS_RD_PULSE = NRD_CYCLE
1402*4882a593Smuzhiyun 	 */
1403*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT,
1404*4882a593Smuzhiyun 					  ncycles);
1405*4882a593Smuzhiyun 	if (ret)
1406*4882a593Smuzhiyun 		return ret;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/* Txxx timings are directly matching tXXX ones. */
1409*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps);
1410*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1411*4882a593Smuzhiyun 					   ATMEL_HSMC_TIMINGS_TCLR_SHIFT,
1412*4882a593Smuzhiyun 					   ncycles);
1413*4882a593Smuzhiyun 	if (ret)
1414*4882a593Smuzhiyun 		return ret;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps);
1417*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1418*4882a593Smuzhiyun 					   ATMEL_HSMC_TIMINGS_TADL_SHIFT,
1419*4882a593Smuzhiyun 					   ncycles);
1420*4882a593Smuzhiyun 	/*
1421*4882a593Smuzhiyun 	 * Version 4 of the ONFI spec mandates that tADL be at least 400
1422*4882a593Smuzhiyun 	 * nanoseconds, but, depending on the master clock rate, 400 ns may not
1423*4882a593Smuzhiyun 	 * fit in the tADL field of the SMC reg. We need to relax the check and
1424*4882a593Smuzhiyun 	 * accept the -ERANGE return code.
1425*4882a593Smuzhiyun 	 *
1426*4882a593Smuzhiyun 	 * Note that previous versions of the ONFI spec had a lower tADL_min
1427*4882a593Smuzhiyun 	 * (100 or 200 ns). It's not clear why this timing constraint got
1428*4882a593Smuzhiyun 	 * increased but it seems most NANDs are fine with values lower than
1429*4882a593Smuzhiyun 	 * 400ns, so we should be safe.
1430*4882a593Smuzhiyun 	 */
1431*4882a593Smuzhiyun 	if (ret && ret != -ERANGE)
1432*4882a593Smuzhiyun 		return ret;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps);
1435*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1436*4882a593Smuzhiyun 					   ATMEL_HSMC_TIMINGS_TAR_SHIFT,
1437*4882a593Smuzhiyun 					   ncycles);
1438*4882a593Smuzhiyun 	if (ret)
1439*4882a593Smuzhiyun 		return ret;
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps);
1442*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1443*4882a593Smuzhiyun 					   ATMEL_HSMC_TIMINGS_TRR_SHIFT,
1444*4882a593Smuzhiyun 					   ncycles);
1445*4882a593Smuzhiyun 	if (ret)
1446*4882a593Smuzhiyun 		return ret;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps);
1449*4882a593Smuzhiyun 	ret = atmel_smc_cs_conf_set_timing(smcconf,
1450*4882a593Smuzhiyun 					   ATMEL_HSMC_TIMINGS_TWB_SHIFT,
1451*4882a593Smuzhiyun 					   ncycles);
1452*4882a593Smuzhiyun 	if (ret)
1453*4882a593Smuzhiyun 		return ret;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	/* Attach the CS line to the NFC logic. */
1456*4882a593Smuzhiyun 	smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL;
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	/* Set the appropriate data bus width. */
1459*4882a593Smuzhiyun 	if (nand->base.options & NAND_BUSWIDTH_16)
1460*4882a593Smuzhiyun 		smcconf->mode |= ATMEL_SMC_MODE_DBW_16;
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 	/* Operate in NRD/NWE READ/WRITEMODE. */
1463*4882a593Smuzhiyun 	smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD |
1464*4882a593Smuzhiyun 			 ATMEL_SMC_MODE_WRITEMODE_NWE;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	return 0;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun 
atmel_smc_nand_setup_interface(struct atmel_nand * nand,int csline,const struct nand_interface_config * conf)1469*4882a593Smuzhiyun static int atmel_smc_nand_setup_interface(struct atmel_nand *nand,
1470*4882a593Smuzhiyun 					int csline,
1471*4882a593Smuzhiyun 					const struct nand_interface_config *conf)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
1474*4882a593Smuzhiyun 	struct atmel_smc_cs_conf smcconf;
1475*4882a593Smuzhiyun 	struct atmel_nand_cs *cs;
1476*4882a593Smuzhiyun 	int ret;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1481*4882a593Smuzhiyun 	if (ret)
1482*4882a593Smuzhiyun 		return ret;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1485*4882a593Smuzhiyun 		return 0;
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	cs = &nand->cs[csline];
1488*4882a593Smuzhiyun 	cs->smcconf = smcconf;
1489*4882a593Smuzhiyun 	atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf);
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	return 0;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun 
atmel_hsmc_nand_setup_interface(struct atmel_nand * nand,int csline,const struct nand_interface_config * conf)1494*4882a593Smuzhiyun static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand,
1495*4882a593Smuzhiyun 					int csline,
1496*4882a593Smuzhiyun 					const struct nand_interface_config *conf)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
1499*4882a593Smuzhiyun 	struct atmel_smc_cs_conf smcconf;
1500*4882a593Smuzhiyun 	struct atmel_nand_cs *cs;
1501*4882a593Smuzhiyun 	int ret;
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun 	nc = to_hsmc_nand_controller(nand->base.controller);
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf);
1506*4882a593Smuzhiyun 	if (ret)
1507*4882a593Smuzhiyun 		return ret;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	if (csline == NAND_DATA_IFACE_CHECK_ONLY)
1510*4882a593Smuzhiyun 		return 0;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	cs = &nand->cs[csline];
1513*4882a593Smuzhiyun 	cs->smcconf = smcconf;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	if (cs->rb.type == ATMEL_NAND_NATIVE_RB)
1516*4882a593Smuzhiyun 		cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id);
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun 	atmel_hsmc_cs_conf_apply(nc->base.smc, nc->hsmc_layout, cs->id,
1519*4882a593Smuzhiyun 				 &cs->smcconf);
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	return 0;
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun 
atmel_nand_setup_interface(struct nand_chip * chip,int csline,const struct nand_interface_config * conf)1524*4882a593Smuzhiyun static int atmel_nand_setup_interface(struct nand_chip *chip, int csline,
1525*4882a593Smuzhiyun 				      const struct nand_interface_config *conf)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
1528*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	if (csline >= nand->numcs ||
1533*4882a593Smuzhiyun 	    (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY))
1534*4882a593Smuzhiyun 		return -EINVAL;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	return nc->caps->ops->setup_interface(nand, csline, conf);
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun 
atmel_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)1539*4882a593Smuzhiyun static int atmel_nand_exec_op(struct nand_chip *chip,
1540*4882a593Smuzhiyun 			      const struct nand_operation *op,
1541*4882a593Smuzhiyun 			      bool check_only)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
1544*4882a593Smuzhiyun 	struct atmel_nand_controller *nc;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	nc = to_nand_controller(nand->base.controller);
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	return nc->caps->ops->exec_op(nand, op, check_only);
1549*4882a593Smuzhiyun }
1550*4882a593Smuzhiyun 
atmel_nand_init(struct atmel_nand_controller * nc,struct atmel_nand * nand)1551*4882a593Smuzhiyun static void atmel_nand_init(struct atmel_nand_controller *nc,
1552*4882a593Smuzhiyun 			    struct atmel_nand *nand)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun 	struct nand_chip *chip = &nand->base;
1555*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	mtd->dev.parent = nc->dev;
1558*4882a593Smuzhiyun 	nand->base.controller = &nc->base;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	if (!nc->mck || !nc->caps->ops->setup_interface)
1561*4882a593Smuzhiyun 		chip->options |= NAND_KEEP_TIMINGS;
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	/*
1564*4882a593Smuzhiyun 	 * Use a bounce buffer when the buffer passed by the MTD user is not
1565*4882a593Smuzhiyun 	 * suitable for DMA.
1566*4882a593Smuzhiyun 	 */
1567*4882a593Smuzhiyun 	if (nc->dmac)
1568*4882a593Smuzhiyun 		chip->options |= NAND_USES_DMA;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	/* Default to HW ECC if pmecc is available. */
1571*4882a593Smuzhiyun 	if (nc->pmecc)
1572*4882a593Smuzhiyun 		chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun 
atmel_smc_nand_init(struct atmel_nand_controller * nc,struct atmel_nand * nand)1575*4882a593Smuzhiyun static void atmel_smc_nand_init(struct atmel_nand_controller *nc,
1576*4882a593Smuzhiyun 				struct atmel_nand *nand)
1577*4882a593Smuzhiyun {
1578*4882a593Smuzhiyun 	struct nand_chip *chip = &nand->base;
1579*4882a593Smuzhiyun 	struct atmel_smc_nand_controller *smc_nc;
1580*4882a593Smuzhiyun 	int i;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	atmel_nand_init(nc, nand);
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	smc_nc = to_smc_nand_controller(chip->controller);
1585*4882a593Smuzhiyun 	if (!smc_nc->ebi_csa_regmap)
1586*4882a593Smuzhiyun 		return;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	/* Attach the CS to the NAND Flash logic. */
1589*4882a593Smuzhiyun 	for (i = 0; i < nand->numcs; i++)
1590*4882a593Smuzhiyun 		regmap_update_bits(smc_nc->ebi_csa_regmap,
1591*4882a593Smuzhiyun 				   smc_nc->ebi_csa->offs,
1592*4882a593Smuzhiyun 				   BIT(nand->cs[i].id), BIT(nand->cs[i].id));
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	if (smc_nc->ebi_csa->nfd0_on_d16)
1595*4882a593Smuzhiyun 		regmap_update_bits(smc_nc->ebi_csa_regmap,
1596*4882a593Smuzhiyun 				   smc_nc->ebi_csa->offs,
1597*4882a593Smuzhiyun 				   smc_nc->ebi_csa->nfd0_on_d16,
1598*4882a593Smuzhiyun 				   smc_nc->ebi_csa->nfd0_on_d16);
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun 
atmel_nand_controller_remove_nand(struct atmel_nand * nand)1601*4882a593Smuzhiyun static int atmel_nand_controller_remove_nand(struct atmel_nand *nand)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun 	struct nand_chip *chip = &nand->base;
1604*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1605*4882a593Smuzhiyun 	int ret;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	ret = mtd_device_unregister(mtd);
1608*4882a593Smuzhiyun 	if (ret)
1609*4882a593Smuzhiyun 		return ret;
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	nand_cleanup(chip);
1612*4882a593Smuzhiyun 	list_del(&nand->node);
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	return 0;
1615*4882a593Smuzhiyun }
1616*4882a593Smuzhiyun 
atmel_nand_create(struct atmel_nand_controller * nc,struct device_node * np,int reg_cells)1617*4882a593Smuzhiyun static struct atmel_nand *atmel_nand_create(struct atmel_nand_controller *nc,
1618*4882a593Smuzhiyun 					    struct device_node *np,
1619*4882a593Smuzhiyun 					    int reg_cells)
1620*4882a593Smuzhiyun {
1621*4882a593Smuzhiyun 	struct atmel_nand *nand;
1622*4882a593Smuzhiyun 	struct gpio_desc *gpio;
1623*4882a593Smuzhiyun 	int numcs, ret, i;
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	numcs = of_property_count_elems_of_size(np, "reg",
1626*4882a593Smuzhiyun 						reg_cells * sizeof(u32));
1627*4882a593Smuzhiyun 	if (numcs < 1) {
1628*4882a593Smuzhiyun 		dev_err(nc->dev, "Missing or invalid reg property\n");
1629*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1630*4882a593Smuzhiyun 	}
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	nand = devm_kzalloc(nc->dev, struct_size(nand, cs, numcs), GFP_KERNEL);
1633*4882a593Smuzhiyun 	if (!nand) {
1634*4882a593Smuzhiyun 		dev_err(nc->dev, "Failed to allocate NAND object\n");
1635*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1636*4882a593Smuzhiyun 	}
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	nand->numcs = numcs;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	gpio = devm_fwnode_gpiod_get(nc->dev, of_fwnode_handle(np),
1641*4882a593Smuzhiyun 				     "det", GPIOD_IN, "nand-det");
1642*4882a593Smuzhiyun 	if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1643*4882a593Smuzhiyun 		dev_err(nc->dev,
1644*4882a593Smuzhiyun 			"Failed to get detect gpio (err = %ld)\n",
1645*4882a593Smuzhiyun 			PTR_ERR(gpio));
1646*4882a593Smuzhiyun 		return ERR_CAST(gpio);
1647*4882a593Smuzhiyun 	}
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	if (!IS_ERR(gpio))
1650*4882a593Smuzhiyun 		nand->cdgpio = gpio;
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	for (i = 0; i < numcs; i++) {
1653*4882a593Smuzhiyun 		struct resource res;
1654*4882a593Smuzhiyun 		u32 val;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 		ret = of_address_to_resource(np, 0, &res);
1657*4882a593Smuzhiyun 		if (ret) {
1658*4882a593Smuzhiyun 			dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1659*4882a593Smuzhiyun 				ret);
1660*4882a593Smuzhiyun 			return ERR_PTR(ret);
1661*4882a593Smuzhiyun 		}
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 		ret = of_property_read_u32_index(np, "reg", i * reg_cells,
1664*4882a593Smuzhiyun 						 &val);
1665*4882a593Smuzhiyun 		if (ret) {
1666*4882a593Smuzhiyun 			dev_err(nc->dev, "Invalid reg property (err = %d)\n",
1667*4882a593Smuzhiyun 				ret);
1668*4882a593Smuzhiyun 			return ERR_PTR(ret);
1669*4882a593Smuzhiyun 		}
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 		nand->cs[i].id = val;
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 		nand->cs[i].io.dma = res.start;
1674*4882a593Smuzhiyun 		nand->cs[i].io.virt = devm_ioremap_resource(nc->dev, &res);
1675*4882a593Smuzhiyun 		if (IS_ERR(nand->cs[i].io.virt))
1676*4882a593Smuzhiyun 			return ERR_CAST(nand->cs[i].io.virt);
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 		if (!of_property_read_u32(np, "atmel,rb", &val)) {
1679*4882a593Smuzhiyun 			if (val > ATMEL_NFC_MAX_RB_ID)
1680*4882a593Smuzhiyun 				return ERR_PTR(-EINVAL);
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 			nand->cs[i].rb.type = ATMEL_NAND_NATIVE_RB;
1683*4882a593Smuzhiyun 			nand->cs[i].rb.id = val;
1684*4882a593Smuzhiyun 		} else {
1685*4882a593Smuzhiyun 			gpio = devm_fwnode_gpiod_get_index(nc->dev,
1686*4882a593Smuzhiyun 							   of_fwnode_handle(np),
1687*4882a593Smuzhiyun 							   "rb", i, GPIOD_IN,
1688*4882a593Smuzhiyun 							   "nand-rb");
1689*4882a593Smuzhiyun 			if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1690*4882a593Smuzhiyun 				dev_err(nc->dev,
1691*4882a593Smuzhiyun 					"Failed to get R/B gpio (err = %ld)\n",
1692*4882a593Smuzhiyun 					PTR_ERR(gpio));
1693*4882a593Smuzhiyun 				return ERR_CAST(gpio);
1694*4882a593Smuzhiyun 			}
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 			if (!IS_ERR(gpio)) {
1697*4882a593Smuzhiyun 				nand->cs[i].rb.type = ATMEL_NAND_GPIO_RB;
1698*4882a593Smuzhiyun 				nand->cs[i].rb.gpio = gpio;
1699*4882a593Smuzhiyun 			}
1700*4882a593Smuzhiyun 		}
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 		gpio = devm_fwnode_gpiod_get_index(nc->dev,
1703*4882a593Smuzhiyun 						   of_fwnode_handle(np),
1704*4882a593Smuzhiyun 						   "cs", i, GPIOD_OUT_HIGH,
1705*4882a593Smuzhiyun 						   "nand-cs");
1706*4882a593Smuzhiyun 		if (IS_ERR(gpio) && PTR_ERR(gpio) != -ENOENT) {
1707*4882a593Smuzhiyun 			dev_err(nc->dev,
1708*4882a593Smuzhiyun 				"Failed to get CS gpio (err = %ld)\n",
1709*4882a593Smuzhiyun 				PTR_ERR(gpio));
1710*4882a593Smuzhiyun 			return ERR_CAST(gpio);
1711*4882a593Smuzhiyun 		}
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 		if (!IS_ERR(gpio))
1714*4882a593Smuzhiyun 			nand->cs[i].csgpio = gpio;
1715*4882a593Smuzhiyun 	}
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun 	nand_set_flash_node(&nand->base, np);
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	return nand;
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun static int
atmel_nand_controller_add_nand(struct atmel_nand_controller * nc,struct atmel_nand * nand)1723*4882a593Smuzhiyun atmel_nand_controller_add_nand(struct atmel_nand_controller *nc,
1724*4882a593Smuzhiyun 			       struct atmel_nand *nand)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun 	struct nand_chip *chip = &nand->base;
1727*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1728*4882a593Smuzhiyun 	int ret;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 	/* No card inserted, skip this NAND. */
1731*4882a593Smuzhiyun 	if (nand->cdgpio && gpiod_get_value(nand->cdgpio)) {
1732*4882a593Smuzhiyun 		dev_info(nc->dev, "No SmartMedia card inserted.\n");
1733*4882a593Smuzhiyun 		return 0;
1734*4882a593Smuzhiyun 	}
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	nc->caps->ops->nand_init(nc, nand);
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	ret = nand_scan(chip, nand->numcs);
1739*4882a593Smuzhiyun 	if (ret) {
1740*4882a593Smuzhiyun 		dev_err(nc->dev, "NAND scan failed: %d\n", ret);
1741*4882a593Smuzhiyun 		return ret;
1742*4882a593Smuzhiyun 	}
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	ret = mtd_device_register(mtd, NULL, 0);
1745*4882a593Smuzhiyun 	if (ret) {
1746*4882a593Smuzhiyun 		dev_err(nc->dev, "Failed to register mtd device: %d\n", ret);
1747*4882a593Smuzhiyun 		nand_cleanup(chip);
1748*4882a593Smuzhiyun 		return ret;
1749*4882a593Smuzhiyun 	}
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	list_add_tail(&nand->node, &nc->chips);
1752*4882a593Smuzhiyun 
1753*4882a593Smuzhiyun 	return 0;
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun static int
atmel_nand_controller_remove_nands(struct atmel_nand_controller * nc)1757*4882a593Smuzhiyun atmel_nand_controller_remove_nands(struct atmel_nand_controller *nc)
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun 	struct atmel_nand *nand, *tmp;
1760*4882a593Smuzhiyun 	int ret;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	list_for_each_entry_safe(nand, tmp, &nc->chips, node) {
1763*4882a593Smuzhiyun 		ret = atmel_nand_controller_remove_nand(nand);
1764*4882a593Smuzhiyun 		if (ret)
1765*4882a593Smuzhiyun 			return ret;
1766*4882a593Smuzhiyun 	}
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	return 0;
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun static int
atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller * nc)1772*4882a593Smuzhiyun atmel_nand_controller_legacy_add_nands(struct atmel_nand_controller *nc)
1773*4882a593Smuzhiyun {
1774*4882a593Smuzhiyun 	struct device *dev = nc->dev;
1775*4882a593Smuzhiyun 	struct platform_device *pdev = to_platform_device(dev);
1776*4882a593Smuzhiyun 	struct atmel_nand *nand;
1777*4882a593Smuzhiyun 	struct gpio_desc *gpio;
1778*4882a593Smuzhiyun 	struct resource *res;
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	/*
1781*4882a593Smuzhiyun 	 * Legacy bindings only allow connecting a single NAND with a unique CS
1782*4882a593Smuzhiyun 	 * line to the controller.
1783*4882a593Smuzhiyun 	 */
1784*4882a593Smuzhiyun 	nand = devm_kzalloc(nc->dev, sizeof(*nand) + sizeof(*nand->cs),
1785*4882a593Smuzhiyun 			    GFP_KERNEL);
1786*4882a593Smuzhiyun 	if (!nand)
1787*4882a593Smuzhiyun 		return -ENOMEM;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	nand->numcs = 1;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1792*4882a593Smuzhiyun 	nand->cs[0].io.virt = devm_ioremap_resource(dev, res);
1793*4882a593Smuzhiyun 	if (IS_ERR(nand->cs[0].io.virt))
1794*4882a593Smuzhiyun 		return PTR_ERR(nand->cs[0].io.virt);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	nand->cs[0].io.dma = res->start;
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	/*
1799*4882a593Smuzhiyun 	 * The old driver was hardcoding the CS id to 3 for all sama5
1800*4882a593Smuzhiyun 	 * controllers. Since this id is only meaningful for the sama5
1801*4882a593Smuzhiyun 	 * controller we can safely assign this id to 3 no matter the
1802*4882a593Smuzhiyun 	 * controller.
1803*4882a593Smuzhiyun 	 * If one wants to connect a NAND to a different CS line, he will
1804*4882a593Smuzhiyun 	 * have to use the new bindings.
1805*4882a593Smuzhiyun 	 */
1806*4882a593Smuzhiyun 	nand->cs[0].id = 3;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	/* R/B GPIO. */
1809*4882a593Smuzhiyun 	gpio = devm_gpiod_get_index_optional(dev, NULL, 0,  GPIOD_IN);
1810*4882a593Smuzhiyun 	if (IS_ERR(gpio)) {
1811*4882a593Smuzhiyun 		dev_err(dev, "Failed to get R/B gpio (err = %ld)\n",
1812*4882a593Smuzhiyun 			PTR_ERR(gpio));
1813*4882a593Smuzhiyun 		return PTR_ERR(gpio);
1814*4882a593Smuzhiyun 	}
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	if (gpio) {
1817*4882a593Smuzhiyun 		nand->cs[0].rb.type = ATMEL_NAND_GPIO_RB;
1818*4882a593Smuzhiyun 		nand->cs[0].rb.gpio = gpio;
1819*4882a593Smuzhiyun 	}
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	/* CS GPIO. */
1822*4882a593Smuzhiyun 	gpio = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_HIGH);
1823*4882a593Smuzhiyun 	if (IS_ERR(gpio)) {
1824*4882a593Smuzhiyun 		dev_err(dev, "Failed to get CS gpio (err = %ld)\n",
1825*4882a593Smuzhiyun 			PTR_ERR(gpio));
1826*4882a593Smuzhiyun 		return PTR_ERR(gpio);
1827*4882a593Smuzhiyun 	}
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	nand->cs[0].csgpio = gpio;
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	/* Card detect GPIO. */
1832*4882a593Smuzhiyun 	gpio = devm_gpiod_get_index_optional(nc->dev, NULL, 2, GPIOD_IN);
1833*4882a593Smuzhiyun 	if (IS_ERR(gpio)) {
1834*4882a593Smuzhiyun 		dev_err(dev,
1835*4882a593Smuzhiyun 			"Failed to get detect gpio (err = %ld)\n",
1836*4882a593Smuzhiyun 			PTR_ERR(gpio));
1837*4882a593Smuzhiyun 		return PTR_ERR(gpio);
1838*4882a593Smuzhiyun 	}
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	nand->cdgpio = gpio;
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun 	nand_set_flash_node(&nand->base, nc->dev->of_node);
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	return atmel_nand_controller_add_nand(nc, nand);
1845*4882a593Smuzhiyun }
1846*4882a593Smuzhiyun 
atmel_nand_controller_add_nands(struct atmel_nand_controller * nc)1847*4882a593Smuzhiyun static int atmel_nand_controller_add_nands(struct atmel_nand_controller *nc)
1848*4882a593Smuzhiyun {
1849*4882a593Smuzhiyun 	struct device_node *np, *nand_np;
1850*4882a593Smuzhiyun 	struct device *dev = nc->dev;
1851*4882a593Smuzhiyun 	int ret, reg_cells;
1852*4882a593Smuzhiyun 	u32 val;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	/* We do not retrieve the SMC syscon when parsing old DTs. */
1855*4882a593Smuzhiyun 	if (nc->caps->legacy_of_bindings)
1856*4882a593Smuzhiyun 		return atmel_nand_controller_legacy_add_nands(nc);
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	np = dev->of_node;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "#address-cells", &val);
1861*4882a593Smuzhiyun 	if (ret) {
1862*4882a593Smuzhiyun 		dev_err(dev, "missing #address-cells property\n");
1863*4882a593Smuzhiyun 		return ret;
1864*4882a593Smuzhiyun 	}
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	reg_cells = val;
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "#size-cells", &val);
1869*4882a593Smuzhiyun 	if (ret) {
1870*4882a593Smuzhiyun 		dev_err(dev, "missing #size-cells property\n");
1871*4882a593Smuzhiyun 		return ret;
1872*4882a593Smuzhiyun 	}
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	reg_cells += val;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	for_each_child_of_node(np, nand_np) {
1877*4882a593Smuzhiyun 		struct atmel_nand *nand;
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun 		nand = atmel_nand_create(nc, nand_np, reg_cells);
1880*4882a593Smuzhiyun 		if (IS_ERR(nand)) {
1881*4882a593Smuzhiyun 			ret = PTR_ERR(nand);
1882*4882a593Smuzhiyun 			goto err;
1883*4882a593Smuzhiyun 		}
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 		ret = atmel_nand_controller_add_nand(nc, nand);
1886*4882a593Smuzhiyun 		if (ret)
1887*4882a593Smuzhiyun 			goto err;
1888*4882a593Smuzhiyun 	}
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	return 0;
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun err:
1893*4882a593Smuzhiyun 	atmel_nand_controller_remove_nands(nc);
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	return ret;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun 
atmel_nand_controller_cleanup(struct atmel_nand_controller * nc)1898*4882a593Smuzhiyun static void atmel_nand_controller_cleanup(struct atmel_nand_controller *nc)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun 	if (nc->dmac)
1901*4882a593Smuzhiyun 		dma_release_channel(nc->dmac);
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	clk_put(nc->mck);
1904*4882a593Smuzhiyun }
1905*4882a593Smuzhiyun 
1906*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9260_ebi_csa = {
1907*4882a593Smuzhiyun 	.offs = AT91SAM9260_MATRIX_EBICSA,
1908*4882a593Smuzhiyun };
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9261_ebi_csa = {
1911*4882a593Smuzhiyun 	.offs = AT91SAM9261_MATRIX_EBICSA,
1912*4882a593Smuzhiyun };
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9263_ebi_csa = {
1915*4882a593Smuzhiyun 	.offs = AT91SAM9263_MATRIX_EBI0CSA,
1916*4882a593Smuzhiyun };
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9rl_ebi_csa = {
1919*4882a593Smuzhiyun 	.offs = AT91SAM9RL_MATRIX_EBICSA,
1920*4882a593Smuzhiyun };
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9g45_ebi_csa = {
1923*4882a593Smuzhiyun 	.offs = AT91SAM9G45_MATRIX_EBICSA,
1924*4882a593Smuzhiyun };
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9n12_ebi_csa = {
1927*4882a593Smuzhiyun 	.offs = AT91SAM9N12_MATRIX_EBICSA,
1928*4882a593Smuzhiyun };
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg at91sam9x5_ebi_csa = {
1931*4882a593Smuzhiyun 	.offs = AT91SAM9X5_MATRIX_EBICSA,
1932*4882a593Smuzhiyun };
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun static const struct atmel_smc_nand_ebi_csa_cfg sam9x60_ebi_csa = {
1935*4882a593Smuzhiyun 	.offs = AT91_SFR_CCFG_EBICSA,
1936*4882a593Smuzhiyun 	.nfd0_on_d16 = AT91_SFR_CCFG_NFD0_ON_D16,
1937*4882a593Smuzhiyun };
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun static const struct of_device_id atmel_ebi_csa_regmap_of_ids[] = {
1940*4882a593Smuzhiyun 	{
1941*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9260-matrix",
1942*4882a593Smuzhiyun 		.data = &at91sam9260_ebi_csa,
1943*4882a593Smuzhiyun 	},
1944*4882a593Smuzhiyun 	{
1945*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9261-matrix",
1946*4882a593Smuzhiyun 		.data = &at91sam9261_ebi_csa,
1947*4882a593Smuzhiyun 	},
1948*4882a593Smuzhiyun 	{
1949*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9263-matrix",
1950*4882a593Smuzhiyun 		.data = &at91sam9263_ebi_csa,
1951*4882a593Smuzhiyun 	},
1952*4882a593Smuzhiyun 	{
1953*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9rl-matrix",
1954*4882a593Smuzhiyun 		.data = &at91sam9rl_ebi_csa,
1955*4882a593Smuzhiyun 	},
1956*4882a593Smuzhiyun 	{
1957*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9g45-matrix",
1958*4882a593Smuzhiyun 		.data = &at91sam9g45_ebi_csa,
1959*4882a593Smuzhiyun 	},
1960*4882a593Smuzhiyun 	{
1961*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9n12-matrix",
1962*4882a593Smuzhiyun 		.data = &at91sam9n12_ebi_csa,
1963*4882a593Smuzhiyun 	},
1964*4882a593Smuzhiyun 	{
1965*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9x5-matrix",
1966*4882a593Smuzhiyun 		.data = &at91sam9x5_ebi_csa,
1967*4882a593Smuzhiyun 	},
1968*4882a593Smuzhiyun 	{
1969*4882a593Smuzhiyun 		.compatible = "microchip,sam9x60-sfr",
1970*4882a593Smuzhiyun 		.data = &sam9x60_ebi_csa,
1971*4882a593Smuzhiyun 	},
1972*4882a593Smuzhiyun 	{ /* sentinel */ },
1973*4882a593Smuzhiyun };
1974*4882a593Smuzhiyun 
atmel_nand_attach_chip(struct nand_chip * chip)1975*4882a593Smuzhiyun static int atmel_nand_attach_chip(struct nand_chip *chip)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun 	struct atmel_nand_controller *nc = to_nand_controller(chip->controller);
1978*4882a593Smuzhiyun 	struct atmel_nand *nand = to_atmel_nand(chip);
1979*4882a593Smuzhiyun 	struct mtd_info *mtd = nand_to_mtd(chip);
1980*4882a593Smuzhiyun 	int ret;
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	ret = nc->caps->ops->ecc_init(chip);
1983*4882a593Smuzhiyun 	if (ret)
1984*4882a593Smuzhiyun 		return ret;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	if (nc->caps->legacy_of_bindings || !nc->dev->of_node) {
1987*4882a593Smuzhiyun 		/*
1988*4882a593Smuzhiyun 		 * We keep the MTD name unchanged to avoid breaking platforms
1989*4882a593Smuzhiyun 		 * where the MTD cmdline parser is used and the bootloader
1990*4882a593Smuzhiyun 		 * has not been updated to use the new naming scheme.
1991*4882a593Smuzhiyun 		 */
1992*4882a593Smuzhiyun 		mtd->name = "atmel_nand";
1993*4882a593Smuzhiyun 	} else if (!mtd->name) {
1994*4882a593Smuzhiyun 		/*
1995*4882a593Smuzhiyun 		 * If the new bindings are used and the bootloader has not been
1996*4882a593Smuzhiyun 		 * updated to pass a new mtdparts parameter on the cmdline, you
1997*4882a593Smuzhiyun 		 * should define the following property in your nand node:
1998*4882a593Smuzhiyun 		 *
1999*4882a593Smuzhiyun 		 *	label = "atmel_nand";
2000*4882a593Smuzhiyun 		 *
2001*4882a593Smuzhiyun 		 * This way, mtd->name will be set by the core when
2002*4882a593Smuzhiyun 		 * nand_set_flash_node() is called.
2003*4882a593Smuzhiyun 		 */
2004*4882a593Smuzhiyun 		mtd->name = devm_kasprintf(nc->dev, GFP_KERNEL,
2005*4882a593Smuzhiyun 					   "%s:nand.%d", dev_name(nc->dev),
2006*4882a593Smuzhiyun 					   nand->cs[0].id);
2007*4882a593Smuzhiyun 		if (!mtd->name) {
2008*4882a593Smuzhiyun 			dev_err(nc->dev, "Failed to allocate mtd->name\n");
2009*4882a593Smuzhiyun 			return -ENOMEM;
2010*4882a593Smuzhiyun 		}
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	return 0;
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun static const struct nand_controller_ops atmel_nand_controller_ops = {
2017*4882a593Smuzhiyun 	.attach_chip = atmel_nand_attach_chip,
2018*4882a593Smuzhiyun 	.setup_interface = atmel_nand_setup_interface,
2019*4882a593Smuzhiyun 	.exec_op = atmel_nand_exec_op,
2020*4882a593Smuzhiyun };
2021*4882a593Smuzhiyun 
atmel_nand_controller_init(struct atmel_nand_controller * nc,struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2022*4882a593Smuzhiyun static int atmel_nand_controller_init(struct atmel_nand_controller *nc,
2023*4882a593Smuzhiyun 				struct platform_device *pdev,
2024*4882a593Smuzhiyun 				const struct atmel_nand_controller_caps *caps)
2025*4882a593Smuzhiyun {
2026*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2027*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
2028*4882a593Smuzhiyun 	int ret;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	nand_controller_init(&nc->base);
2031*4882a593Smuzhiyun 	nc->base.ops = &atmel_nand_controller_ops;
2032*4882a593Smuzhiyun 	INIT_LIST_HEAD(&nc->chips);
2033*4882a593Smuzhiyun 	nc->dev = dev;
2034*4882a593Smuzhiyun 	nc->caps = caps;
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	platform_set_drvdata(pdev, nc);
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	nc->pmecc = devm_atmel_pmecc_get(dev);
2039*4882a593Smuzhiyun 	if (IS_ERR(nc->pmecc))
2040*4882a593Smuzhiyun 		return dev_err_probe(dev, PTR_ERR(nc->pmecc),
2041*4882a593Smuzhiyun 				     "Could not get PMECC object\n");
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	if (nc->caps->has_dma && !atmel_nand_avoid_dma) {
2044*4882a593Smuzhiyun 		dma_cap_mask_t mask;
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 		dma_cap_zero(mask);
2047*4882a593Smuzhiyun 		dma_cap_set(DMA_MEMCPY, mask);
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 		nc->dmac = dma_request_channel(mask, NULL, NULL);
2050*4882a593Smuzhiyun 		if (!nc->dmac)
2051*4882a593Smuzhiyun 			dev_err(nc->dev, "Failed to request DMA channel\n");
2052*4882a593Smuzhiyun 	}
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	/* We do not retrieve the SMC syscon when parsing old DTs. */
2055*4882a593Smuzhiyun 	if (nc->caps->legacy_of_bindings)
2056*4882a593Smuzhiyun 		return 0;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	nc->mck = of_clk_get(dev->parent->of_node, 0);
2059*4882a593Smuzhiyun 	if (IS_ERR(nc->mck)) {
2060*4882a593Smuzhiyun 		dev_err(dev, "Failed to retrieve MCK clk\n");
2061*4882a593Smuzhiyun 		ret = PTR_ERR(nc->mck);
2062*4882a593Smuzhiyun 		goto out_release_dma;
2063*4882a593Smuzhiyun 	}
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun 	np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2066*4882a593Smuzhiyun 	if (!np) {
2067*4882a593Smuzhiyun 		dev_err(dev, "Missing or invalid atmel,smc property\n");
2068*4882a593Smuzhiyun 		ret = -EINVAL;
2069*4882a593Smuzhiyun 		goto out_release_dma;
2070*4882a593Smuzhiyun 	}
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	nc->smc = syscon_node_to_regmap(np);
2073*4882a593Smuzhiyun 	of_node_put(np);
2074*4882a593Smuzhiyun 	if (IS_ERR(nc->smc)) {
2075*4882a593Smuzhiyun 		ret = PTR_ERR(nc->smc);
2076*4882a593Smuzhiyun 		dev_err(dev, "Could not get SMC regmap (err = %d)\n", ret);
2077*4882a593Smuzhiyun 		goto out_release_dma;
2078*4882a593Smuzhiyun 	}
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	return 0;
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun out_release_dma:
2083*4882a593Smuzhiyun 	if (nc->dmac)
2084*4882a593Smuzhiyun 		dma_release_channel(nc->dmac);
2085*4882a593Smuzhiyun 
2086*4882a593Smuzhiyun 	return ret;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun static int
atmel_smc_nand_controller_init(struct atmel_smc_nand_controller * nc)2090*4882a593Smuzhiyun atmel_smc_nand_controller_init(struct atmel_smc_nand_controller *nc)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun 	struct device *dev = nc->base.dev;
2093*4882a593Smuzhiyun 	const struct of_device_id *match;
2094*4882a593Smuzhiyun 	struct device_node *np;
2095*4882a593Smuzhiyun 	int ret;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	/* We do not retrieve the EBICSA regmap when parsing old DTs. */
2098*4882a593Smuzhiyun 	if (nc->base.caps->legacy_of_bindings)
2099*4882a593Smuzhiyun 		return 0;
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun 	np = of_parse_phandle(dev->parent->of_node,
2102*4882a593Smuzhiyun 			      nc->base.caps->ebi_csa_regmap_name, 0);
2103*4882a593Smuzhiyun 	if (!np)
2104*4882a593Smuzhiyun 		return 0;
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	match = of_match_node(atmel_ebi_csa_regmap_of_ids, np);
2107*4882a593Smuzhiyun 	if (!match) {
2108*4882a593Smuzhiyun 		of_node_put(np);
2109*4882a593Smuzhiyun 		return 0;
2110*4882a593Smuzhiyun 	}
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	nc->ebi_csa_regmap = syscon_node_to_regmap(np);
2113*4882a593Smuzhiyun 	of_node_put(np);
2114*4882a593Smuzhiyun 	if (IS_ERR(nc->ebi_csa_regmap)) {
2115*4882a593Smuzhiyun 		ret = PTR_ERR(nc->ebi_csa_regmap);
2116*4882a593Smuzhiyun 		dev_err(dev, "Could not get EBICSA regmap (err = %d)\n", ret);
2117*4882a593Smuzhiyun 		return ret;
2118*4882a593Smuzhiyun 	}
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	nc->ebi_csa = (struct atmel_smc_nand_ebi_csa_cfg *)match->data;
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun 	/*
2123*4882a593Smuzhiyun 	 * The at91sam9263 has 2 EBIs, if the NAND controller is under EBI1
2124*4882a593Smuzhiyun 	 * add 4 to ->ebi_csa->offs.
2125*4882a593Smuzhiyun 	 */
2126*4882a593Smuzhiyun 	if (of_device_is_compatible(dev->parent->of_node,
2127*4882a593Smuzhiyun 				    "atmel,at91sam9263-ebi1"))
2128*4882a593Smuzhiyun 		nc->ebi_csa->offs += 4;
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	return 0;
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun static int
atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller * nc)2134*4882a593Smuzhiyun atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc)
2135*4882a593Smuzhiyun {
2136*4882a593Smuzhiyun 	struct regmap_config regmap_conf = {
2137*4882a593Smuzhiyun 		.reg_bits = 32,
2138*4882a593Smuzhiyun 		.val_bits = 32,
2139*4882a593Smuzhiyun 		.reg_stride = 4,
2140*4882a593Smuzhiyun 	};
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	struct device *dev = nc->base.dev;
2143*4882a593Smuzhiyun 	struct device_node *nand_np, *nfc_np;
2144*4882a593Smuzhiyun 	void __iomem *iomem;
2145*4882a593Smuzhiyun 	struct resource res;
2146*4882a593Smuzhiyun 	int ret;
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	nand_np = dev->of_node;
2149*4882a593Smuzhiyun 	nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc");
2150*4882a593Smuzhiyun 	if (!nfc_np) {
2151*4882a593Smuzhiyun 		dev_err(dev, "Could not find device node for sama5d3-nfc\n");
2152*4882a593Smuzhiyun 		return -ENODEV;
2153*4882a593Smuzhiyun 	}
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	nc->clk = of_clk_get(nfc_np, 0);
2156*4882a593Smuzhiyun 	if (IS_ERR(nc->clk)) {
2157*4882a593Smuzhiyun 		ret = PTR_ERR(nc->clk);
2158*4882a593Smuzhiyun 		dev_err(dev, "Failed to retrieve HSMC clock (err = %d)\n",
2159*4882a593Smuzhiyun 			ret);
2160*4882a593Smuzhiyun 		goto out;
2161*4882a593Smuzhiyun 	}
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	ret = clk_prepare_enable(nc->clk);
2164*4882a593Smuzhiyun 	if (ret) {
2165*4882a593Smuzhiyun 		dev_err(dev, "Failed to enable the HSMC clock (err = %d)\n",
2166*4882a593Smuzhiyun 			ret);
2167*4882a593Smuzhiyun 		goto out;
2168*4882a593Smuzhiyun 	}
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	nc->irq = of_irq_get(nand_np, 0);
2171*4882a593Smuzhiyun 	if (nc->irq <= 0) {
2172*4882a593Smuzhiyun 		ret = nc->irq ?: -ENXIO;
2173*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
2174*4882a593Smuzhiyun 			dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2175*4882a593Smuzhiyun 				ret);
2176*4882a593Smuzhiyun 		goto out;
2177*4882a593Smuzhiyun 	}
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	ret = of_address_to_resource(nfc_np, 0, &res);
2180*4882a593Smuzhiyun 	if (ret) {
2181*4882a593Smuzhiyun 		dev_err(dev, "Invalid or missing NFC IO resource (err = %d)\n",
2182*4882a593Smuzhiyun 			ret);
2183*4882a593Smuzhiyun 		goto out;
2184*4882a593Smuzhiyun 	}
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	iomem = devm_ioremap_resource(dev, &res);
2187*4882a593Smuzhiyun 	if (IS_ERR(iomem)) {
2188*4882a593Smuzhiyun 		ret = PTR_ERR(iomem);
2189*4882a593Smuzhiyun 		goto out;
2190*4882a593Smuzhiyun 	}
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	regmap_conf.name = "nfc-io";
2193*4882a593Smuzhiyun 	regmap_conf.max_register = resource_size(&res) - 4;
2194*4882a593Smuzhiyun 	nc->io = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2195*4882a593Smuzhiyun 	if (IS_ERR(nc->io)) {
2196*4882a593Smuzhiyun 		ret = PTR_ERR(nc->io);
2197*4882a593Smuzhiyun 		dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2198*4882a593Smuzhiyun 			ret);
2199*4882a593Smuzhiyun 		goto out;
2200*4882a593Smuzhiyun 	}
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	ret = of_address_to_resource(nfc_np, 1, &res);
2203*4882a593Smuzhiyun 	if (ret) {
2204*4882a593Smuzhiyun 		dev_err(dev, "Invalid or missing HSMC resource (err = %d)\n",
2205*4882a593Smuzhiyun 			ret);
2206*4882a593Smuzhiyun 		goto out;
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	iomem = devm_ioremap_resource(dev, &res);
2210*4882a593Smuzhiyun 	if (IS_ERR(iomem)) {
2211*4882a593Smuzhiyun 		ret = PTR_ERR(iomem);
2212*4882a593Smuzhiyun 		goto out;
2213*4882a593Smuzhiyun 	}
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	regmap_conf.name = "smc";
2216*4882a593Smuzhiyun 	regmap_conf.max_register = resource_size(&res) - 4;
2217*4882a593Smuzhiyun 	nc->base.smc = devm_regmap_init_mmio(dev, iomem, &regmap_conf);
2218*4882a593Smuzhiyun 	if (IS_ERR(nc->base.smc)) {
2219*4882a593Smuzhiyun 		ret = PTR_ERR(nc->base.smc);
2220*4882a593Smuzhiyun 		dev_err(dev, "Could not create NFC IO regmap (err = %d)\n",
2221*4882a593Smuzhiyun 			ret);
2222*4882a593Smuzhiyun 		goto out;
2223*4882a593Smuzhiyun 	}
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	ret = of_address_to_resource(nfc_np, 2, &res);
2226*4882a593Smuzhiyun 	if (ret) {
2227*4882a593Smuzhiyun 		dev_err(dev, "Invalid or missing SRAM resource (err = %d)\n",
2228*4882a593Smuzhiyun 			ret);
2229*4882a593Smuzhiyun 		goto out;
2230*4882a593Smuzhiyun 	}
2231*4882a593Smuzhiyun 
2232*4882a593Smuzhiyun 	nc->sram.virt = devm_ioremap_resource(dev, &res);
2233*4882a593Smuzhiyun 	if (IS_ERR(nc->sram.virt)) {
2234*4882a593Smuzhiyun 		ret = PTR_ERR(nc->sram.virt);
2235*4882a593Smuzhiyun 		goto out;
2236*4882a593Smuzhiyun 	}
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	nc->sram.dma = res.start;
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun out:
2241*4882a593Smuzhiyun 	of_node_put(nfc_np);
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	return ret;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun static int
atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller * nc)2247*4882a593Smuzhiyun atmel_hsmc_nand_controller_init(struct atmel_hsmc_nand_controller *nc)
2248*4882a593Smuzhiyun {
2249*4882a593Smuzhiyun 	struct device *dev = nc->base.dev;
2250*4882a593Smuzhiyun 	struct device_node *np;
2251*4882a593Smuzhiyun 	int ret;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0);
2254*4882a593Smuzhiyun 	if (!np) {
2255*4882a593Smuzhiyun 		dev_err(dev, "Missing or invalid atmel,smc property\n");
2256*4882a593Smuzhiyun 		return -EINVAL;
2257*4882a593Smuzhiyun 	}
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	nc->hsmc_layout = atmel_hsmc_get_reg_layout(np);
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	nc->irq = of_irq_get(np, 0);
2262*4882a593Smuzhiyun 	of_node_put(np);
2263*4882a593Smuzhiyun 	if (nc->irq <= 0) {
2264*4882a593Smuzhiyun 		ret = nc->irq ?: -ENXIO;
2265*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
2266*4882a593Smuzhiyun 			dev_err(dev, "Failed to get IRQ number (err = %d)\n",
2267*4882a593Smuzhiyun 				ret);
2268*4882a593Smuzhiyun 		return ret;
2269*4882a593Smuzhiyun 	}
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	np = of_parse_phandle(dev->of_node, "atmel,nfc-io", 0);
2272*4882a593Smuzhiyun 	if (!np) {
2273*4882a593Smuzhiyun 		dev_err(dev, "Missing or invalid atmel,nfc-io property\n");
2274*4882a593Smuzhiyun 		return -EINVAL;
2275*4882a593Smuzhiyun 	}
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	nc->io = syscon_node_to_regmap(np);
2278*4882a593Smuzhiyun 	of_node_put(np);
2279*4882a593Smuzhiyun 	if (IS_ERR(nc->io)) {
2280*4882a593Smuzhiyun 		ret = PTR_ERR(nc->io);
2281*4882a593Smuzhiyun 		dev_err(dev, "Could not get NFC IO regmap (err = %d)\n", ret);
2282*4882a593Smuzhiyun 		return ret;
2283*4882a593Smuzhiyun 	}
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	nc->sram.pool = of_gen_pool_get(nc->base.dev->of_node,
2286*4882a593Smuzhiyun 					 "atmel,nfc-sram", 0);
2287*4882a593Smuzhiyun 	if (!nc->sram.pool) {
2288*4882a593Smuzhiyun 		dev_err(nc->base.dev, "Missing SRAM\n");
2289*4882a593Smuzhiyun 		return -ENOMEM;
2290*4882a593Smuzhiyun 	}
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 	nc->sram.virt = (void __iomem *)gen_pool_dma_alloc(nc->sram.pool,
2293*4882a593Smuzhiyun 							   ATMEL_NFC_SRAM_SIZE,
2294*4882a593Smuzhiyun 							   &nc->sram.dma);
2295*4882a593Smuzhiyun 	if (!nc->sram.virt) {
2296*4882a593Smuzhiyun 		dev_err(nc->base.dev,
2297*4882a593Smuzhiyun 			"Could not allocate memory from the NFC SRAM pool\n");
2298*4882a593Smuzhiyun 		return -ENOMEM;
2299*4882a593Smuzhiyun 	}
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 	return 0;
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun 
2304*4882a593Smuzhiyun static int
atmel_hsmc_nand_controller_remove(struct atmel_nand_controller * nc)2305*4882a593Smuzhiyun atmel_hsmc_nand_controller_remove(struct atmel_nand_controller *nc)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *hsmc_nc;
2308*4882a593Smuzhiyun 	int ret;
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun 	ret = atmel_nand_controller_remove_nands(nc);
2311*4882a593Smuzhiyun 	if (ret)
2312*4882a593Smuzhiyun 		return ret;
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 	hsmc_nc = container_of(nc, struct atmel_hsmc_nand_controller, base);
2315*4882a593Smuzhiyun 	regmap_write(hsmc_nc->base.smc, ATMEL_HSMC_NFC_CTRL,
2316*4882a593Smuzhiyun 		     ATMEL_HSMC_NFC_CTRL_DIS);
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	if (hsmc_nc->sram.pool)
2319*4882a593Smuzhiyun 		gen_pool_free(hsmc_nc->sram.pool,
2320*4882a593Smuzhiyun 			      (unsigned long)hsmc_nc->sram.virt,
2321*4882a593Smuzhiyun 			      ATMEL_NFC_SRAM_SIZE);
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	if (hsmc_nc->clk) {
2324*4882a593Smuzhiyun 		clk_disable_unprepare(hsmc_nc->clk);
2325*4882a593Smuzhiyun 		clk_put(hsmc_nc->clk);
2326*4882a593Smuzhiyun 	}
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	atmel_nand_controller_cleanup(nc);
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	return 0;
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun 
atmel_hsmc_nand_controller_probe(struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2333*4882a593Smuzhiyun static int atmel_hsmc_nand_controller_probe(struct platform_device *pdev,
2334*4882a593Smuzhiyun 				const struct atmel_nand_controller_caps *caps)
2335*4882a593Smuzhiyun {
2336*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2337*4882a593Smuzhiyun 	struct atmel_hsmc_nand_controller *nc;
2338*4882a593Smuzhiyun 	int ret;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2341*4882a593Smuzhiyun 	if (!nc)
2342*4882a593Smuzhiyun 		return -ENOMEM;
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2345*4882a593Smuzhiyun 	if (ret)
2346*4882a593Smuzhiyun 		return ret;
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	if (caps->legacy_of_bindings)
2349*4882a593Smuzhiyun 		ret = atmel_hsmc_nand_controller_legacy_init(nc);
2350*4882a593Smuzhiyun 	else
2351*4882a593Smuzhiyun 		ret = atmel_hsmc_nand_controller_init(nc);
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 	if (ret)
2354*4882a593Smuzhiyun 		return ret;
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun 	/* Make sure all irqs are masked before registering our IRQ handler. */
2357*4882a593Smuzhiyun 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, 0xffffffff);
2358*4882a593Smuzhiyun 	ret = devm_request_irq(dev, nc->irq, atmel_nfc_interrupt,
2359*4882a593Smuzhiyun 			       IRQF_SHARED, "nfc", nc);
2360*4882a593Smuzhiyun 	if (ret) {
2361*4882a593Smuzhiyun 		dev_err(dev,
2362*4882a593Smuzhiyun 			"Could not get register NFC interrupt handler (err = %d)\n",
2363*4882a593Smuzhiyun 			ret);
2364*4882a593Smuzhiyun 		goto err;
2365*4882a593Smuzhiyun 	}
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	/* Initial NFC configuration. */
2368*4882a593Smuzhiyun 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CFG,
2369*4882a593Smuzhiyun 		     ATMEL_HSMC_NFC_CFG_DTO_MAX);
2370*4882a593Smuzhiyun 	regmap_write(nc->base.smc, ATMEL_HSMC_NFC_CTRL,
2371*4882a593Smuzhiyun 		     ATMEL_HSMC_NFC_CTRL_EN);
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun 	ret = atmel_nand_controller_add_nands(&nc->base);
2374*4882a593Smuzhiyun 	if (ret)
2375*4882a593Smuzhiyun 		goto err;
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	return 0;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun err:
2380*4882a593Smuzhiyun 	atmel_hsmc_nand_controller_remove(&nc->base);
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 	return ret;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = {
2386*4882a593Smuzhiyun 	.probe = atmel_hsmc_nand_controller_probe,
2387*4882a593Smuzhiyun 	.remove = atmel_hsmc_nand_controller_remove,
2388*4882a593Smuzhiyun 	.ecc_init = atmel_hsmc_nand_ecc_init,
2389*4882a593Smuzhiyun 	.nand_init = atmel_nand_init,
2390*4882a593Smuzhiyun 	.setup_interface = atmel_hsmc_nand_setup_interface,
2391*4882a593Smuzhiyun 	.exec_op = atmel_hsmc_nand_exec_op,
2392*4882a593Smuzhiyun };
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = {
2395*4882a593Smuzhiyun 	.has_dma = true,
2396*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2397*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2398*4882a593Smuzhiyun 	.ops = &atmel_hsmc_nc_ops,
2399*4882a593Smuzhiyun };
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun /* Only used to parse old bindings. */
2402*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sama5_nand_caps = {
2403*4882a593Smuzhiyun 	.has_dma = true,
2404*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2405*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2406*4882a593Smuzhiyun 	.ops = &atmel_hsmc_nc_ops,
2407*4882a593Smuzhiyun 	.legacy_of_bindings = true,
2408*4882a593Smuzhiyun };
2409*4882a593Smuzhiyun 
atmel_smc_nand_controller_probe(struct platform_device * pdev,const struct atmel_nand_controller_caps * caps)2410*4882a593Smuzhiyun static int atmel_smc_nand_controller_probe(struct platform_device *pdev,
2411*4882a593Smuzhiyun 				const struct atmel_nand_controller_caps *caps)
2412*4882a593Smuzhiyun {
2413*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2414*4882a593Smuzhiyun 	struct atmel_smc_nand_controller *nc;
2415*4882a593Smuzhiyun 	int ret;
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	nc = devm_kzalloc(dev, sizeof(*nc), GFP_KERNEL);
2418*4882a593Smuzhiyun 	if (!nc)
2419*4882a593Smuzhiyun 		return -ENOMEM;
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun 	ret = atmel_nand_controller_init(&nc->base, pdev, caps);
2422*4882a593Smuzhiyun 	if (ret)
2423*4882a593Smuzhiyun 		return ret;
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	ret = atmel_smc_nand_controller_init(nc);
2426*4882a593Smuzhiyun 	if (ret)
2427*4882a593Smuzhiyun 		return ret;
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	return atmel_nand_controller_add_nands(&nc->base);
2430*4882a593Smuzhiyun }
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun static int
atmel_smc_nand_controller_remove(struct atmel_nand_controller * nc)2433*4882a593Smuzhiyun atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc)
2434*4882a593Smuzhiyun {
2435*4882a593Smuzhiyun 	int ret;
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	ret = atmel_nand_controller_remove_nands(nc);
2438*4882a593Smuzhiyun 	if (ret)
2439*4882a593Smuzhiyun 		return ret;
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	atmel_nand_controller_cleanup(nc);
2442*4882a593Smuzhiyun 
2443*4882a593Smuzhiyun 	return 0;
2444*4882a593Smuzhiyun }
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun /*
2447*4882a593Smuzhiyun  * The SMC reg layout of at91rm9200 is completely different which prevents us
2448*4882a593Smuzhiyun  * from re-using atmel_smc_nand_setup_interface() for the
2449*4882a593Smuzhiyun  * ->setup_interface() hook.
2450*4882a593Smuzhiyun  * At this point, there's no support for the at91rm9200 SMC IP, so we leave
2451*4882a593Smuzhiyun  * ->setup_interface() unassigned.
2452*4882a593Smuzhiyun  */
2453*4882a593Smuzhiyun static const struct atmel_nand_controller_ops at91rm9200_nc_ops = {
2454*4882a593Smuzhiyun 	.probe = atmel_smc_nand_controller_probe,
2455*4882a593Smuzhiyun 	.remove = atmel_smc_nand_controller_remove,
2456*4882a593Smuzhiyun 	.ecc_init = atmel_nand_ecc_init,
2457*4882a593Smuzhiyun 	.nand_init = atmel_smc_nand_init,
2458*4882a593Smuzhiyun 	.exec_op = atmel_smc_nand_exec_op,
2459*4882a593Smuzhiyun };
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = {
2462*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2463*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2464*4882a593Smuzhiyun 	.ebi_csa_regmap_name = "atmel,matrix",
2465*4882a593Smuzhiyun 	.ops = &at91rm9200_nc_ops,
2466*4882a593Smuzhiyun };
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun static const struct atmel_nand_controller_ops atmel_smc_nc_ops = {
2469*4882a593Smuzhiyun 	.probe = atmel_smc_nand_controller_probe,
2470*4882a593Smuzhiyun 	.remove = atmel_smc_nand_controller_remove,
2471*4882a593Smuzhiyun 	.ecc_init = atmel_nand_ecc_init,
2472*4882a593Smuzhiyun 	.nand_init = atmel_smc_nand_init,
2473*4882a593Smuzhiyun 	.setup_interface = atmel_smc_nand_setup_interface,
2474*4882a593Smuzhiyun 	.exec_op = atmel_smc_nand_exec_op,
2475*4882a593Smuzhiyun };
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = {
2478*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2479*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2480*4882a593Smuzhiyun 	.ebi_csa_regmap_name = "atmel,matrix",
2481*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2482*4882a593Smuzhiyun };
2483*4882a593Smuzhiyun 
2484*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sam9261_nc_caps = {
2485*4882a593Smuzhiyun 	.ale_offs = BIT(22),
2486*4882a593Smuzhiyun 	.cle_offs = BIT(21),
2487*4882a593Smuzhiyun 	.ebi_csa_regmap_name = "atmel,matrix",
2488*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2489*4882a593Smuzhiyun };
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sam9g45_nc_caps = {
2492*4882a593Smuzhiyun 	.has_dma = true,
2493*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2494*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2495*4882a593Smuzhiyun 	.ebi_csa_regmap_name = "atmel,matrix",
2496*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2497*4882a593Smuzhiyun };
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun static const struct atmel_nand_controller_caps microchip_sam9x60_nc_caps = {
2500*4882a593Smuzhiyun 	.has_dma = true,
2501*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2502*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2503*4882a593Smuzhiyun 	.ebi_csa_regmap_name = "microchip,sfr",
2504*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2505*4882a593Smuzhiyun };
2506*4882a593Smuzhiyun 
2507*4882a593Smuzhiyun /* Only used to parse old bindings. */
2508*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_rm9200_nand_caps = {
2509*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2510*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2511*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2512*4882a593Smuzhiyun 	.legacy_of_bindings = true,
2513*4882a593Smuzhiyun };
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sam9261_nand_caps = {
2516*4882a593Smuzhiyun 	.ale_offs = BIT(22),
2517*4882a593Smuzhiyun 	.cle_offs = BIT(21),
2518*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2519*4882a593Smuzhiyun 	.legacy_of_bindings = true,
2520*4882a593Smuzhiyun };
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun static const struct atmel_nand_controller_caps atmel_sam9g45_nand_caps = {
2523*4882a593Smuzhiyun 	.has_dma = true,
2524*4882a593Smuzhiyun 	.ale_offs = BIT(21),
2525*4882a593Smuzhiyun 	.cle_offs = BIT(22),
2526*4882a593Smuzhiyun 	.ops = &atmel_smc_nc_ops,
2527*4882a593Smuzhiyun 	.legacy_of_bindings = true,
2528*4882a593Smuzhiyun };
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun static const struct of_device_id atmel_nand_controller_of_ids[] = {
2531*4882a593Smuzhiyun 	{
2532*4882a593Smuzhiyun 		.compatible = "atmel,at91rm9200-nand-controller",
2533*4882a593Smuzhiyun 		.data = &atmel_rm9200_nc_caps,
2534*4882a593Smuzhiyun 	},
2535*4882a593Smuzhiyun 	{
2536*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9260-nand-controller",
2537*4882a593Smuzhiyun 		.data = &atmel_sam9260_nc_caps,
2538*4882a593Smuzhiyun 	},
2539*4882a593Smuzhiyun 	{
2540*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9261-nand-controller",
2541*4882a593Smuzhiyun 		.data = &atmel_sam9261_nc_caps,
2542*4882a593Smuzhiyun 	},
2543*4882a593Smuzhiyun 	{
2544*4882a593Smuzhiyun 		.compatible = "atmel,at91sam9g45-nand-controller",
2545*4882a593Smuzhiyun 		.data = &atmel_sam9g45_nc_caps,
2546*4882a593Smuzhiyun 	},
2547*4882a593Smuzhiyun 	{
2548*4882a593Smuzhiyun 		.compatible = "atmel,sama5d3-nand-controller",
2549*4882a593Smuzhiyun 		.data = &atmel_sama5_nc_caps,
2550*4882a593Smuzhiyun 	},
2551*4882a593Smuzhiyun 	{
2552*4882a593Smuzhiyun 		.compatible = "microchip,sam9x60-nand-controller",
2553*4882a593Smuzhiyun 		.data = &microchip_sam9x60_nc_caps,
2554*4882a593Smuzhiyun 	},
2555*4882a593Smuzhiyun 	/* Support for old/deprecated bindings: */
2556*4882a593Smuzhiyun 	{
2557*4882a593Smuzhiyun 		.compatible = "atmel,at91rm9200-nand",
2558*4882a593Smuzhiyun 		.data = &atmel_rm9200_nand_caps,
2559*4882a593Smuzhiyun 	},
2560*4882a593Smuzhiyun 	{
2561*4882a593Smuzhiyun 		.compatible = "atmel,sama5d4-nand",
2562*4882a593Smuzhiyun 		.data = &atmel_rm9200_nand_caps,
2563*4882a593Smuzhiyun 	},
2564*4882a593Smuzhiyun 	{
2565*4882a593Smuzhiyun 		.compatible = "atmel,sama5d2-nand",
2566*4882a593Smuzhiyun 		.data = &atmel_rm9200_nand_caps,
2567*4882a593Smuzhiyun 	},
2568*4882a593Smuzhiyun 	{ /* sentinel */ },
2569*4882a593Smuzhiyun };
2570*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, atmel_nand_controller_of_ids);
2571*4882a593Smuzhiyun 
atmel_nand_controller_probe(struct platform_device * pdev)2572*4882a593Smuzhiyun static int atmel_nand_controller_probe(struct platform_device *pdev)
2573*4882a593Smuzhiyun {
2574*4882a593Smuzhiyun 	const struct atmel_nand_controller_caps *caps;
2575*4882a593Smuzhiyun 
2576*4882a593Smuzhiyun 	if (pdev->id_entry)
2577*4882a593Smuzhiyun 		caps = (void *)pdev->id_entry->driver_data;
2578*4882a593Smuzhiyun 	else
2579*4882a593Smuzhiyun 		caps = of_device_get_match_data(&pdev->dev);
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	if (!caps) {
2582*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Could not retrieve NFC caps\n");
2583*4882a593Smuzhiyun 		return -EINVAL;
2584*4882a593Smuzhiyun 	}
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun 	if (caps->legacy_of_bindings) {
2587*4882a593Smuzhiyun 		struct device_node *nfc_node;
2588*4882a593Smuzhiyun 		u32 ale_offs = 21;
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 		/*
2591*4882a593Smuzhiyun 		 * If we are parsing legacy DT props and the DT contains a
2592*4882a593Smuzhiyun 		 * valid NFC node, forward the request to the sama5 logic.
2593*4882a593Smuzhiyun 		 */
2594*4882a593Smuzhiyun 		nfc_node = of_get_compatible_child(pdev->dev.of_node,
2595*4882a593Smuzhiyun 						   "atmel,sama5d3-nfc");
2596*4882a593Smuzhiyun 		if (nfc_node) {
2597*4882a593Smuzhiyun 			caps = &atmel_sama5_nand_caps;
2598*4882a593Smuzhiyun 			of_node_put(nfc_node);
2599*4882a593Smuzhiyun 		}
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 		/*
2602*4882a593Smuzhiyun 		 * Even if the compatible says we are dealing with an
2603*4882a593Smuzhiyun 		 * at91rm9200 controller, the atmel,nand-has-dma specify that
2604*4882a593Smuzhiyun 		 * this controller supports DMA, which means we are in fact
2605*4882a593Smuzhiyun 		 * dealing with an at91sam9g45+ controller.
2606*4882a593Smuzhiyun 		 */
2607*4882a593Smuzhiyun 		if (!caps->has_dma &&
2608*4882a593Smuzhiyun 		    of_property_read_bool(pdev->dev.of_node,
2609*4882a593Smuzhiyun 					  "atmel,nand-has-dma"))
2610*4882a593Smuzhiyun 			caps = &atmel_sam9g45_nand_caps;
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 		/*
2613*4882a593Smuzhiyun 		 * All SoCs except the at91sam9261 are assigning ALE to A21 and
2614*4882a593Smuzhiyun 		 * CLE to A22. If atmel,nand-addr-offset != 21 this means we're
2615*4882a593Smuzhiyun 		 * actually dealing with an at91sam9261 controller.
2616*4882a593Smuzhiyun 		 */
2617*4882a593Smuzhiyun 		of_property_read_u32(pdev->dev.of_node,
2618*4882a593Smuzhiyun 				     "atmel,nand-addr-offset", &ale_offs);
2619*4882a593Smuzhiyun 		if (ale_offs != 21)
2620*4882a593Smuzhiyun 			caps = &atmel_sam9261_nand_caps;
2621*4882a593Smuzhiyun 	}
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	return caps->ops->probe(pdev, caps);
2624*4882a593Smuzhiyun }
2625*4882a593Smuzhiyun 
atmel_nand_controller_remove(struct platform_device * pdev)2626*4882a593Smuzhiyun static int atmel_nand_controller_remove(struct platform_device *pdev)
2627*4882a593Smuzhiyun {
2628*4882a593Smuzhiyun 	struct atmel_nand_controller *nc = platform_get_drvdata(pdev);
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	return nc->caps->ops->remove(nc);
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun 
atmel_nand_controller_resume(struct device * dev)2633*4882a593Smuzhiyun static __maybe_unused int atmel_nand_controller_resume(struct device *dev)
2634*4882a593Smuzhiyun {
2635*4882a593Smuzhiyun 	struct atmel_nand_controller *nc = dev_get_drvdata(dev);
2636*4882a593Smuzhiyun 	struct atmel_nand *nand;
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun 	if (nc->pmecc)
2639*4882a593Smuzhiyun 		atmel_pmecc_reset(nc->pmecc);
2640*4882a593Smuzhiyun 
2641*4882a593Smuzhiyun 	list_for_each_entry(nand, &nc->chips, node) {
2642*4882a593Smuzhiyun 		int i;
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun 		for (i = 0; i < nand->numcs; i++)
2645*4882a593Smuzhiyun 			nand_reset(&nand->base, i);
2646*4882a593Smuzhiyun 	}
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	return 0;
2649*4882a593Smuzhiyun }
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL,
2652*4882a593Smuzhiyun 			 atmel_nand_controller_resume);
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun static struct platform_driver atmel_nand_controller_driver = {
2655*4882a593Smuzhiyun 	.driver = {
2656*4882a593Smuzhiyun 		.name = "atmel-nand-controller",
2657*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(atmel_nand_controller_of_ids),
2658*4882a593Smuzhiyun 		.pm = &atmel_nand_controller_pm_ops,
2659*4882a593Smuzhiyun 	},
2660*4882a593Smuzhiyun 	.probe = atmel_nand_controller_probe,
2661*4882a593Smuzhiyun 	.remove = atmel_nand_controller_remove,
2662*4882a593Smuzhiyun };
2663*4882a593Smuzhiyun module_platform_driver(atmel_nand_controller_driver);
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2666*4882a593Smuzhiyun MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
2667*4882a593Smuzhiyun MODULE_DESCRIPTION("NAND Flash Controller driver for Atmel SoCs");
2668*4882a593Smuzhiyun MODULE_ALIAS("platform:atmel-nand-controller");
2669