xref: /OK3568_Linux_fs/kernel/drivers/mtd/nand/raw/gpio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Updated, and converted to generic GPIO based driver by Russell King.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Written by Ben Dooks <ben@simtec.co.uk>
6*4882a593Smuzhiyun  *   Based on 2.4 version by Mark Whittaker
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * © 2004 Simtec Electronics
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Device driver for NAND flash that uses a memory mapped interface to
11*4882a593Smuzhiyun  * read/write the NAND commands and data, and GPIO pins for control signals
12*4882a593Smuzhiyun  * (the DT binding refers to this as "GPIO assisted NAND flash")
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
23*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
24*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
25*4882a593Smuzhiyun #include <linux/mtd/nand-gpio.h>
26*4882a593Smuzhiyun #include <linux/of.h>
27*4882a593Smuzhiyun #include <linux/of_address.h>
28*4882a593Smuzhiyun #include <linux/delay.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct gpiomtd {
31*4882a593Smuzhiyun 	struct nand_controller	base;
32*4882a593Smuzhiyun 	void __iomem		*io;
33*4882a593Smuzhiyun 	void __iomem		*io_sync;
34*4882a593Smuzhiyun 	struct nand_chip	nand_chip;
35*4882a593Smuzhiyun 	struct gpio_nand_platdata plat;
36*4882a593Smuzhiyun 	struct gpio_desc *nce; /* Optional chip enable */
37*4882a593Smuzhiyun 	struct gpio_desc *cle;
38*4882a593Smuzhiyun 	struct gpio_desc *ale;
39*4882a593Smuzhiyun 	struct gpio_desc *rdy;
40*4882a593Smuzhiyun 	struct gpio_desc *nwp; /* Optional write protection */
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
gpio_nand_getpriv(struct mtd_info * mtd)43*4882a593Smuzhiyun static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #ifdef CONFIG_ARM
50*4882a593Smuzhiyun /* gpio_nand_dosync()
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * Make sure the GPIO state changes occur in-order with writes to NAND
53*4882a593Smuzhiyun  * memory region.
54*4882a593Smuzhiyun  * Needed on PXA due to bus-reordering within the SoC itself (see section on
55*4882a593Smuzhiyun  * I/O ordering in PXA manual (section 2.3, p35)
56*4882a593Smuzhiyun  */
gpio_nand_dosync(struct gpiomtd * gpiomtd)57*4882a593Smuzhiyun static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	unsigned long tmp;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (gpiomtd->io_sync) {
62*4882a593Smuzhiyun 		/*
63*4882a593Smuzhiyun 		 * Linux memory barriers don't cater for what's required here.
64*4882a593Smuzhiyun 		 * What's required is what's here - a read from a separate
65*4882a593Smuzhiyun 		 * region with a dependency on that read.
66*4882a593Smuzhiyun 		 */
67*4882a593Smuzhiyun 		tmp = readl(gpiomtd->io_sync);
68*4882a593Smuzhiyun 		asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun #else
gpio_nand_dosync(struct gpiomtd * gpiomtd)72*4882a593Smuzhiyun static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
gpio_nand_exec_instr(struct nand_chip * chip,const struct nand_op_instr * instr)75*4882a593Smuzhiyun static int gpio_nand_exec_instr(struct nand_chip *chip,
76*4882a593Smuzhiyun 				const struct nand_op_instr *instr)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
79*4882a593Smuzhiyun 	unsigned int i;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	switch (instr->type) {
82*4882a593Smuzhiyun 	case NAND_OP_CMD_INSTR:
83*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
84*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->cle, 1);
85*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
86*4882a593Smuzhiyun 		writeb(instr->ctx.cmd.opcode, gpiomtd->io);
87*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
88*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->cle, 0);
89*4882a593Smuzhiyun 		return 0;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	case NAND_OP_ADDR_INSTR:
92*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
93*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->ale, 1);
94*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
95*4882a593Smuzhiyun 		for (i = 0; i < instr->ctx.addr.naddrs; i++)
96*4882a593Smuzhiyun 			writeb(instr->ctx.addr.addrs[i], gpiomtd->io);
97*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
98*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->ale, 0);
99*4882a593Smuzhiyun 		return 0;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	case NAND_OP_DATA_IN_INSTR:
102*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
103*4882a593Smuzhiyun 		if ((chip->options & NAND_BUSWIDTH_16) &&
104*4882a593Smuzhiyun 		    !instr->ctx.data.force_8bit)
105*4882a593Smuzhiyun 			ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in,
106*4882a593Smuzhiyun 				     instr->ctx.data.len / 2);
107*4882a593Smuzhiyun 		else
108*4882a593Smuzhiyun 			ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in,
109*4882a593Smuzhiyun 				    instr->ctx.data.len);
110*4882a593Smuzhiyun 		return 0;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	case NAND_OP_DATA_OUT_INSTR:
113*4882a593Smuzhiyun 		gpio_nand_dosync(gpiomtd);
114*4882a593Smuzhiyun 		if ((chip->options & NAND_BUSWIDTH_16) &&
115*4882a593Smuzhiyun 		    !instr->ctx.data.force_8bit)
116*4882a593Smuzhiyun 			iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out,
117*4882a593Smuzhiyun 				      instr->ctx.data.len / 2);
118*4882a593Smuzhiyun 		else
119*4882a593Smuzhiyun 			iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out,
120*4882a593Smuzhiyun 				     instr->ctx.data.len);
121*4882a593Smuzhiyun 		return 0;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	case NAND_OP_WAITRDY_INSTR:
124*4882a593Smuzhiyun 		if (!gpiomtd->rdy)
125*4882a593Smuzhiyun 			return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 		return nand_gpio_waitrdy(chip, gpiomtd->rdy,
128*4882a593Smuzhiyun 					 instr->ctx.waitrdy.timeout_ms);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	default:
131*4882a593Smuzhiyun 		return -EINVAL;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
gpio_nand_exec_op(struct nand_chip * chip,const struct nand_operation * op,bool check_only)137*4882a593Smuzhiyun static int gpio_nand_exec_op(struct nand_chip *chip,
138*4882a593Smuzhiyun 			     const struct nand_operation *op,
139*4882a593Smuzhiyun 			     bool check_only)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip));
142*4882a593Smuzhiyun 	unsigned int i;
143*4882a593Smuzhiyun 	int ret = 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (check_only)
146*4882a593Smuzhiyun 		return 0;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	gpio_nand_dosync(gpiomtd);
149*4882a593Smuzhiyun 	gpiod_set_value(gpiomtd->nce, 0);
150*4882a593Smuzhiyun 	for (i = 0; i < op->ninstrs; i++) {
151*4882a593Smuzhiyun 		ret = gpio_nand_exec_instr(chip, &op->instrs[i]);
152*4882a593Smuzhiyun 		if (ret)
153*4882a593Smuzhiyun 			break;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		if (op->instrs[i].delay_ns)
156*4882a593Smuzhiyun 			ndelay(op->instrs[i].delay_ns);
157*4882a593Smuzhiyun 	}
158*4882a593Smuzhiyun 	gpio_nand_dosync(gpiomtd);
159*4882a593Smuzhiyun 	gpiod_set_value(gpiomtd->nce, 1);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
gpio_nand_attach_chip(struct nand_chip * chip)164*4882a593Smuzhiyun static int gpio_nand_attach_chip(struct nand_chip *chip)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
167*4882a593Smuzhiyun 	    chip->ecc.algo == NAND_ECC_ALGO_UNKNOWN)
168*4882a593Smuzhiyun 		chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun static const struct nand_controller_ops gpio_nand_ops = {
174*4882a593Smuzhiyun 	.exec_op = gpio_nand_exec_op,
175*4882a593Smuzhiyun 	.attach_chip = gpio_nand_attach_chip,
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifdef CONFIG_OF
179*4882a593Smuzhiyun static const struct of_device_id gpio_nand_id_table[] = {
180*4882a593Smuzhiyun 	{ .compatible = "gpio-control-nand" },
181*4882a593Smuzhiyun 	{}
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
184*4882a593Smuzhiyun 
gpio_nand_get_config_of(const struct device * dev,struct gpio_nand_platdata * plat)185*4882a593Smuzhiyun static int gpio_nand_get_config_of(const struct device *dev,
186*4882a593Smuzhiyun 				   struct gpio_nand_platdata *plat)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	u32 val;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (!dev->of_node)
191*4882a593Smuzhiyun 		return -ENODEV;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
194*4882a593Smuzhiyun 		if (val == 2) {
195*4882a593Smuzhiyun 			plat->options |= NAND_BUSWIDTH_16;
196*4882a593Smuzhiyun 		} else if (val != 1) {
197*4882a593Smuzhiyun 			dev_err(dev, "invalid bank-width %u\n", val);
198*4882a593Smuzhiyun 			return -EINVAL;
199*4882a593Smuzhiyun 		}
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
203*4882a593Smuzhiyun 		plat->chip_delay = val;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
gpio_nand_get_io_sync_of(struct platform_device * pdev)208*4882a593Smuzhiyun static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun 	struct resource *r;
211*4882a593Smuzhiyun 	u64 addr;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (of_property_read_u64(pdev->dev.of_node,
214*4882a593Smuzhiyun 				       "gpio-control-nand,io-sync-reg", &addr))
215*4882a593Smuzhiyun 		return NULL;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
218*4882a593Smuzhiyun 	if (!r)
219*4882a593Smuzhiyun 		return NULL;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	r->start = addr;
222*4882a593Smuzhiyun 	r->end = r->start + 0x3;
223*4882a593Smuzhiyun 	r->flags = IORESOURCE_MEM;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return r;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun #else /* CONFIG_OF */
gpio_nand_get_config_of(const struct device * dev,struct gpio_nand_platdata * plat)228*4882a593Smuzhiyun static inline int gpio_nand_get_config_of(const struct device *dev,
229*4882a593Smuzhiyun 					  struct gpio_nand_platdata *plat)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return -ENOSYS;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static inline struct resource *
gpio_nand_get_io_sync_of(struct platform_device * pdev)235*4882a593Smuzhiyun gpio_nand_get_io_sync_of(struct platform_device *pdev)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	return NULL;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun #endif /* CONFIG_OF */
240*4882a593Smuzhiyun 
gpio_nand_get_config(const struct device * dev,struct gpio_nand_platdata * plat)241*4882a593Smuzhiyun static inline int gpio_nand_get_config(const struct device *dev,
242*4882a593Smuzhiyun 				       struct gpio_nand_platdata *plat)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	int ret = gpio_nand_get_config_of(dev, plat);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!ret)
247*4882a593Smuzhiyun 		return ret;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (dev_get_platdata(dev)) {
250*4882a593Smuzhiyun 		memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
251*4882a593Smuzhiyun 		return 0;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return -EINVAL;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun static inline struct resource *
gpio_nand_get_io_sync(struct platform_device * pdev)258*4882a593Smuzhiyun gpio_nand_get_io_sync(struct platform_device *pdev)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct resource *r = gpio_nand_get_io_sync_of(pdev);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (r)
263*4882a593Smuzhiyun 		return r;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return platform_get_resource(pdev, IORESOURCE_MEM, 1);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
gpio_nand_remove(struct platform_device * pdev)268*4882a593Smuzhiyun static int gpio_nand_remove(struct platform_device *pdev)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
271*4882a593Smuzhiyun 	struct nand_chip *chip = &gpiomtd->nand_chip;
272*4882a593Smuzhiyun 	int ret;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	ret = mtd_device_unregister(nand_to_mtd(chip));
275*4882a593Smuzhiyun 	WARN_ON(ret);
276*4882a593Smuzhiyun 	nand_cleanup(chip);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/* Enable write protection and disable the chip */
279*4882a593Smuzhiyun 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
280*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->nwp, 0);
281*4882a593Smuzhiyun 	if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
282*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->nce, 0);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	return 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
gpio_nand_probe(struct platform_device * pdev)287*4882a593Smuzhiyun static int gpio_nand_probe(struct platform_device *pdev)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct gpiomtd *gpiomtd;
290*4882a593Smuzhiyun 	struct nand_chip *chip;
291*4882a593Smuzhiyun 	struct mtd_info *mtd;
292*4882a593Smuzhiyun 	struct resource *res;
293*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
294*4882a593Smuzhiyun 	int ret = 0;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (!dev->of_node && !dev_get_platdata(dev))
297*4882a593Smuzhiyun 		return -EINVAL;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL);
300*4882a593Smuzhiyun 	if (!gpiomtd)
301*4882a593Smuzhiyun 		return -ENOMEM;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	chip = &gpiomtd->nand_chip;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
306*4882a593Smuzhiyun 	gpiomtd->io = devm_ioremap_resource(dev, res);
307*4882a593Smuzhiyun 	if (IS_ERR(gpiomtd->io))
308*4882a593Smuzhiyun 		return PTR_ERR(gpiomtd->io);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	res = gpio_nand_get_io_sync(pdev);
311*4882a593Smuzhiyun 	if (res) {
312*4882a593Smuzhiyun 		gpiomtd->io_sync = devm_ioremap_resource(dev, res);
313*4882a593Smuzhiyun 		if (IS_ERR(gpiomtd->io_sync))
314*4882a593Smuzhiyun 			return PTR_ERR(gpiomtd->io_sync);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	ret = gpio_nand_get_config(dev, &gpiomtd->plat);
318*4882a593Smuzhiyun 	if (ret)
319*4882a593Smuzhiyun 		return ret;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* Just enable the chip */
322*4882a593Smuzhiyun 	gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH);
323*4882a593Smuzhiyun 	if (IS_ERR(gpiomtd->nce))
324*4882a593Smuzhiyun 		return PTR_ERR(gpiomtd->nce);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* We disable write protection once we know probe() will succeed */
327*4882a593Smuzhiyun 	gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW);
328*4882a593Smuzhiyun 	if (IS_ERR(gpiomtd->nwp)) {
329*4882a593Smuzhiyun 		ret = PTR_ERR(gpiomtd->nwp);
330*4882a593Smuzhiyun 		goto out_ce;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	gpiomtd->ale = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW);
334*4882a593Smuzhiyun 	if (IS_ERR(gpiomtd->ale)) {
335*4882a593Smuzhiyun 		ret = PTR_ERR(gpiomtd->ale);
336*4882a593Smuzhiyun 		goto out_ce;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW);
340*4882a593Smuzhiyun 	if (IS_ERR(gpiomtd->cle)) {
341*4882a593Smuzhiyun 		ret = PTR_ERR(gpiomtd->cle);
342*4882a593Smuzhiyun 		goto out_ce;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN);
346*4882a593Smuzhiyun 	if (IS_ERR(gpiomtd->rdy)) {
347*4882a593Smuzhiyun 		ret = PTR_ERR(gpiomtd->rdy);
348*4882a593Smuzhiyun 		goto out_ce;
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	nand_controller_init(&gpiomtd->base);
352*4882a593Smuzhiyun 	gpiomtd->base.ops = &gpio_nand_ops;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	nand_set_flash_node(chip, pdev->dev.of_node);
355*4882a593Smuzhiyun 	chip->options		= gpiomtd->plat.options;
356*4882a593Smuzhiyun 	chip->controller	= &gpiomtd->base;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	mtd			= nand_to_mtd(chip);
359*4882a593Smuzhiyun 	mtd->dev.parent		= dev;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	platform_set_drvdata(pdev, gpiomtd);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* Disable write protection, if wired up */
364*4882a593Smuzhiyun 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
365*4882a593Smuzhiyun 		gpiod_direction_output(gpiomtd->nwp, 1);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/*
368*4882a593Smuzhiyun 	 * This driver assumes that the default ECC engine should be TYPE_SOFT.
369*4882a593Smuzhiyun 	 * Set ->engine_type before registering the NAND devices in order to
370*4882a593Smuzhiyun 	 * provide a driver specific default value.
371*4882a593Smuzhiyun 	 */
372*4882a593Smuzhiyun 	chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	ret = nand_scan(chip, 1);
375*4882a593Smuzhiyun 	if (ret)
376*4882a593Smuzhiyun 		goto err_wp;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	if (gpiomtd->plat.adjust_parts)
379*4882a593Smuzhiyun 		gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	ret = mtd_device_register(mtd, gpiomtd->plat.parts,
382*4882a593Smuzhiyun 				  gpiomtd->plat.num_parts);
383*4882a593Smuzhiyun 	if (!ret)
384*4882a593Smuzhiyun 		return 0;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun err_wp:
387*4882a593Smuzhiyun 	if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp))
388*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->nwp, 0);
389*4882a593Smuzhiyun out_ce:
390*4882a593Smuzhiyun 	if (gpiomtd->nce && !IS_ERR(gpiomtd->nce))
391*4882a593Smuzhiyun 		gpiod_set_value(gpiomtd->nce, 0);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	return ret;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun static struct platform_driver gpio_nand_driver = {
397*4882a593Smuzhiyun 	.probe		= gpio_nand_probe,
398*4882a593Smuzhiyun 	.remove		= gpio_nand_remove,
399*4882a593Smuzhiyun 	.driver		= {
400*4882a593Smuzhiyun 		.name	= "gpio-nand",
401*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(gpio_nand_id_table),
402*4882a593Smuzhiyun 	},
403*4882a593Smuzhiyun };
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun module_platform_driver(gpio_nand_driver);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun MODULE_LICENSE("GPL");
408*4882a593Smuzhiyun MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
409*4882a593Smuzhiyun MODULE_DESCRIPTION("GPIO NAND Driver");
410