1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
4*4882a593Smuzhiyun * Copyright © 2004 Micron Technology Inc.
5*4882a593Smuzhiyun * Copyright © 2004 David Brownell
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/platform_device.h>
9*4882a593Smuzhiyun #include <linux/dmaengine.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/delay.h>
12*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/jiffies.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
18*4882a593Smuzhiyun #include <linux/mtd/rawnand.h>
19*4882a593Smuzhiyun #include <linux/mtd/partitions.h>
20*4882a593Smuzhiyun #include <linux/omap-dma.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/of.h>
24*4882a593Smuzhiyun #include <linux/of_device.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <linux/mtd/nand_bch.h>
27*4882a593Smuzhiyun #include <linux/platform_data/elm.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/omap-gpmc.h>
30*4882a593Smuzhiyun #include <linux/platform_data/mtd-nand-omap2.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define DRIVER_NAME "omap2-nand"
33*4882a593Smuzhiyun #define OMAP_NAND_TIMEOUT_MS 5000
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define NAND_Ecc_P1e (1 << 0)
36*4882a593Smuzhiyun #define NAND_Ecc_P2e (1 << 1)
37*4882a593Smuzhiyun #define NAND_Ecc_P4e (1 << 2)
38*4882a593Smuzhiyun #define NAND_Ecc_P8e (1 << 3)
39*4882a593Smuzhiyun #define NAND_Ecc_P16e (1 << 4)
40*4882a593Smuzhiyun #define NAND_Ecc_P32e (1 << 5)
41*4882a593Smuzhiyun #define NAND_Ecc_P64e (1 << 6)
42*4882a593Smuzhiyun #define NAND_Ecc_P128e (1 << 7)
43*4882a593Smuzhiyun #define NAND_Ecc_P256e (1 << 8)
44*4882a593Smuzhiyun #define NAND_Ecc_P512e (1 << 9)
45*4882a593Smuzhiyun #define NAND_Ecc_P1024e (1 << 10)
46*4882a593Smuzhiyun #define NAND_Ecc_P2048e (1 << 11)
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun #define NAND_Ecc_P1o (1 << 16)
49*4882a593Smuzhiyun #define NAND_Ecc_P2o (1 << 17)
50*4882a593Smuzhiyun #define NAND_Ecc_P4o (1 << 18)
51*4882a593Smuzhiyun #define NAND_Ecc_P8o (1 << 19)
52*4882a593Smuzhiyun #define NAND_Ecc_P16o (1 << 20)
53*4882a593Smuzhiyun #define NAND_Ecc_P32o (1 << 21)
54*4882a593Smuzhiyun #define NAND_Ecc_P64o (1 << 22)
55*4882a593Smuzhiyun #define NAND_Ecc_P128o (1 << 23)
56*4882a593Smuzhiyun #define NAND_Ecc_P256o (1 << 24)
57*4882a593Smuzhiyun #define NAND_Ecc_P512o (1 << 25)
58*4882a593Smuzhiyun #define NAND_Ecc_P1024o (1 << 26)
59*4882a593Smuzhiyun #define NAND_Ecc_P2048o (1 << 27)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define TF(value) (value ? 1 : 0)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define P2048e(a) (TF(a & NAND_Ecc_P2048e) << 0)
64*4882a593Smuzhiyun #define P2048o(a) (TF(a & NAND_Ecc_P2048o) << 1)
65*4882a593Smuzhiyun #define P1e(a) (TF(a & NAND_Ecc_P1e) << 2)
66*4882a593Smuzhiyun #define P1o(a) (TF(a & NAND_Ecc_P1o) << 3)
67*4882a593Smuzhiyun #define P2e(a) (TF(a & NAND_Ecc_P2e) << 4)
68*4882a593Smuzhiyun #define P2o(a) (TF(a & NAND_Ecc_P2o) << 5)
69*4882a593Smuzhiyun #define P4e(a) (TF(a & NAND_Ecc_P4e) << 6)
70*4882a593Smuzhiyun #define P4o(a) (TF(a & NAND_Ecc_P4o) << 7)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define P8e(a) (TF(a & NAND_Ecc_P8e) << 0)
73*4882a593Smuzhiyun #define P8o(a) (TF(a & NAND_Ecc_P8o) << 1)
74*4882a593Smuzhiyun #define P16e(a) (TF(a & NAND_Ecc_P16e) << 2)
75*4882a593Smuzhiyun #define P16o(a) (TF(a & NAND_Ecc_P16o) << 3)
76*4882a593Smuzhiyun #define P32e(a) (TF(a & NAND_Ecc_P32e) << 4)
77*4882a593Smuzhiyun #define P32o(a) (TF(a & NAND_Ecc_P32o) << 5)
78*4882a593Smuzhiyun #define P64e(a) (TF(a & NAND_Ecc_P64e) << 6)
79*4882a593Smuzhiyun #define P64o(a) (TF(a & NAND_Ecc_P64o) << 7)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define P128e(a) (TF(a & NAND_Ecc_P128e) << 0)
82*4882a593Smuzhiyun #define P128o(a) (TF(a & NAND_Ecc_P128o) << 1)
83*4882a593Smuzhiyun #define P256e(a) (TF(a & NAND_Ecc_P256e) << 2)
84*4882a593Smuzhiyun #define P256o(a) (TF(a & NAND_Ecc_P256o) << 3)
85*4882a593Smuzhiyun #define P512e(a) (TF(a & NAND_Ecc_P512e) << 4)
86*4882a593Smuzhiyun #define P512o(a) (TF(a & NAND_Ecc_P512o) << 5)
87*4882a593Smuzhiyun #define P1024e(a) (TF(a & NAND_Ecc_P1024e) << 6)
88*4882a593Smuzhiyun #define P1024o(a) (TF(a & NAND_Ecc_P1024o) << 7)
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define P8e_s(a) (TF(a & NAND_Ecc_P8e) << 0)
91*4882a593Smuzhiyun #define P8o_s(a) (TF(a & NAND_Ecc_P8o) << 1)
92*4882a593Smuzhiyun #define P16e_s(a) (TF(a & NAND_Ecc_P16e) << 2)
93*4882a593Smuzhiyun #define P16o_s(a) (TF(a & NAND_Ecc_P16o) << 3)
94*4882a593Smuzhiyun #define P1e_s(a) (TF(a & NAND_Ecc_P1e) << 4)
95*4882a593Smuzhiyun #define P1o_s(a) (TF(a & NAND_Ecc_P1o) << 5)
96*4882a593Smuzhiyun #define P2e_s(a) (TF(a & NAND_Ecc_P2e) << 6)
97*4882a593Smuzhiyun #define P2o_s(a) (TF(a & NAND_Ecc_P2o) << 7)
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define P4e_s(a) (TF(a & NAND_Ecc_P4e) << 0)
100*4882a593Smuzhiyun #define P4o_s(a) (TF(a & NAND_Ecc_P4o) << 1)
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #define PREFETCH_CONFIG1_CS_SHIFT 24
103*4882a593Smuzhiyun #define ECC_CONFIG_CS_SHIFT 1
104*4882a593Smuzhiyun #define CS_MASK 0x7
105*4882a593Smuzhiyun #define ENABLE_PREFETCH (0x1 << 7)
106*4882a593Smuzhiyun #define DMA_MPU_MODE_SHIFT 2
107*4882a593Smuzhiyun #define ECCSIZE0_SHIFT 12
108*4882a593Smuzhiyun #define ECCSIZE1_SHIFT 22
109*4882a593Smuzhiyun #define ECC1RESULTSIZE 0x1
110*4882a593Smuzhiyun #define ECCCLEAR 0x100
111*4882a593Smuzhiyun #define ECC1 0x1
112*4882a593Smuzhiyun #define PREFETCH_FIFOTHRESHOLD_MAX 0x40
113*4882a593Smuzhiyun #define PREFETCH_FIFOTHRESHOLD(val) ((val) << 8)
114*4882a593Smuzhiyun #define PREFETCH_STATUS_COUNT(val) (val & 0x00003fff)
115*4882a593Smuzhiyun #define PREFETCH_STATUS_FIFO_CNT(val) ((val >> 24) & 0x7F)
116*4882a593Smuzhiyun #define STATUS_BUFF_EMPTY 0x00000001
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #define SECTOR_BYTES 512
119*4882a593Smuzhiyun /* 4 bit padding to make byte aligned, 56 = 52 + 4 */
120*4882a593Smuzhiyun #define BCH4_BIT_PAD 4
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* GPMC ecc engine settings for read */
123*4882a593Smuzhiyun #define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
124*4882a593Smuzhiyun #define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
125*4882a593Smuzhiyun #define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
126*4882a593Smuzhiyun #define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
127*4882a593Smuzhiyun #define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* GPMC ecc engine settings for write */
130*4882a593Smuzhiyun #define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
131*4882a593Smuzhiyun #define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
132*4882a593Smuzhiyun #define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define BADBLOCK_MARKER_LENGTH 2
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
137*4882a593Smuzhiyun 0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
138*4882a593Smuzhiyun 0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
139*4882a593Smuzhiyun 0x07, 0x0e};
140*4882a593Smuzhiyun static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
141*4882a593Smuzhiyun 0xac, 0x6b, 0xff, 0x99, 0x7b};
142*4882a593Smuzhiyun static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun struct omap_nand_info {
145*4882a593Smuzhiyun struct nand_chip nand;
146*4882a593Smuzhiyun struct platform_device *pdev;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun int gpmc_cs;
149*4882a593Smuzhiyun bool dev_ready;
150*4882a593Smuzhiyun enum nand_io xfer_type;
151*4882a593Smuzhiyun int devsize;
152*4882a593Smuzhiyun enum omap_ecc ecc_opt;
153*4882a593Smuzhiyun struct device_node *elm_of_node;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun unsigned long phys_base;
156*4882a593Smuzhiyun struct completion comp;
157*4882a593Smuzhiyun struct dma_chan *dma;
158*4882a593Smuzhiyun int gpmc_irq_fifo;
159*4882a593Smuzhiyun int gpmc_irq_count;
160*4882a593Smuzhiyun enum {
161*4882a593Smuzhiyun OMAP_NAND_IO_READ = 0, /* read */
162*4882a593Smuzhiyun OMAP_NAND_IO_WRITE, /* write */
163*4882a593Smuzhiyun } iomode;
164*4882a593Smuzhiyun u_char *buf;
165*4882a593Smuzhiyun int buf_len;
166*4882a593Smuzhiyun /* Interface to GPMC */
167*4882a593Smuzhiyun struct gpmc_nand_regs reg;
168*4882a593Smuzhiyun struct gpmc_nand_ops *ops;
169*4882a593Smuzhiyun bool flash_bbt;
170*4882a593Smuzhiyun /* fields specific for BCHx_HW ECC scheme */
171*4882a593Smuzhiyun struct device *elm_dev;
172*4882a593Smuzhiyun /* NAND ready gpio */
173*4882a593Smuzhiyun struct gpio_desc *ready_gpiod;
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun
mtd_to_omap(struct mtd_info * mtd)176*4882a593Smuzhiyun static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /**
182*4882a593Smuzhiyun * omap_prefetch_enable - configures and starts prefetch transfer
183*4882a593Smuzhiyun * @cs: cs (chip select) number
184*4882a593Smuzhiyun * @fifo_th: fifo threshold to be used for read/ write
185*4882a593Smuzhiyun * @dma_mode: dma mode enable (1) or disable (0)
186*4882a593Smuzhiyun * @u32_count: number of bytes to be transferred
187*4882a593Smuzhiyun * @is_write: prefetch read(0) or write post(1) mode
188*4882a593Smuzhiyun */
omap_prefetch_enable(int cs,int fifo_th,int dma_mode,unsigned int u32_count,int is_write,struct omap_nand_info * info)189*4882a593Smuzhiyun static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
190*4882a593Smuzhiyun unsigned int u32_count, int is_write, struct omap_nand_info *info)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun u32 val;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
195*4882a593Smuzhiyun return -1;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (readl(info->reg.gpmc_prefetch_control))
198*4882a593Smuzhiyun return -EBUSY;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Set the amount of bytes to be prefetched */
201*4882a593Smuzhiyun writel(u32_count, info->reg.gpmc_prefetch_config2);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Set dma/mpu mode, the prefetch read / post write and
204*4882a593Smuzhiyun * enable the engine. Set which cs is has requested for.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
207*4882a593Smuzhiyun PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
208*4882a593Smuzhiyun (dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
209*4882a593Smuzhiyun writel(val, info->reg.gpmc_prefetch_config1);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* Start the prefetch engine */
212*4882a593Smuzhiyun writel(0x1, info->reg.gpmc_prefetch_control);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun * omap_prefetch_reset - disables and stops the prefetch engine
219*4882a593Smuzhiyun */
omap_prefetch_reset(int cs,struct omap_nand_info * info)220*4882a593Smuzhiyun static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun u32 config1;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* check if the same module/cs is trying to reset */
225*4882a593Smuzhiyun config1 = readl(info->reg.gpmc_prefetch_config1);
226*4882a593Smuzhiyun if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
227*4882a593Smuzhiyun return -EINVAL;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Stop the PFPW engine */
230*4882a593Smuzhiyun writel(0x0, info->reg.gpmc_prefetch_control);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Reset/disable the PFPW engine */
233*4882a593Smuzhiyun writel(0x0, info->reg.gpmc_prefetch_config1);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * omap_hwcontrol - hardware specific access to control-lines
240*4882a593Smuzhiyun * @chip: NAND chip object
241*4882a593Smuzhiyun * @cmd: command to device
242*4882a593Smuzhiyun * @ctrl:
243*4882a593Smuzhiyun * NAND_NCE: bit 0 -> don't care
244*4882a593Smuzhiyun * NAND_CLE: bit 1 -> Command Latch
245*4882a593Smuzhiyun * NAND_ALE: bit 2 -> Address Latch
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * NOTE: boards may use different bits for these!!
248*4882a593Smuzhiyun */
omap_hwcontrol(struct nand_chip * chip,int cmd,unsigned int ctrl)249*4882a593Smuzhiyun static void omap_hwcontrol(struct nand_chip *chip, int cmd, unsigned int ctrl)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (cmd != NAND_CMD_NONE) {
254*4882a593Smuzhiyun if (ctrl & NAND_CLE)
255*4882a593Smuzhiyun writeb(cmd, info->reg.gpmc_nand_command);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun else if (ctrl & NAND_ALE)
258*4882a593Smuzhiyun writeb(cmd, info->reg.gpmc_nand_address);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun else /* NAND_NCE */
261*4882a593Smuzhiyun writeb(cmd, info->reg.gpmc_nand_data);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun * omap_read_buf8 - read data from NAND controller into buffer
267*4882a593Smuzhiyun * @mtd: MTD device structure
268*4882a593Smuzhiyun * @buf: buffer to store date
269*4882a593Smuzhiyun * @len: number of bytes to read
270*4882a593Smuzhiyun */
omap_read_buf8(struct mtd_info * mtd,u_char * buf,int len)271*4882a593Smuzhiyun static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct nand_chip *nand = mtd_to_nand(mtd);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun ioread8_rep(nand->legacy.IO_ADDR_R, buf, len);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun * omap_write_buf8 - write buffer to NAND controller
280*4882a593Smuzhiyun * @mtd: MTD device structure
281*4882a593Smuzhiyun * @buf: data buffer
282*4882a593Smuzhiyun * @len: number of bytes to write
283*4882a593Smuzhiyun */
omap_write_buf8(struct mtd_info * mtd,const u_char * buf,int len)284*4882a593Smuzhiyun static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
287*4882a593Smuzhiyun u_char *p = (u_char *)buf;
288*4882a593Smuzhiyun bool status;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun while (len--) {
291*4882a593Smuzhiyun iowrite8(*p++, info->nand.legacy.IO_ADDR_W);
292*4882a593Smuzhiyun /* wait until buffer is available for write */
293*4882a593Smuzhiyun do {
294*4882a593Smuzhiyun status = info->ops->nand_writebuffer_empty();
295*4882a593Smuzhiyun } while (!status);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * omap_read_buf16 - read data from NAND controller into buffer
301*4882a593Smuzhiyun * @mtd: MTD device structure
302*4882a593Smuzhiyun * @buf: buffer to store date
303*4882a593Smuzhiyun * @len: number of bytes to read
304*4882a593Smuzhiyun */
omap_read_buf16(struct mtd_info * mtd,u_char * buf,int len)305*4882a593Smuzhiyun static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct nand_chip *nand = mtd_to_nand(mtd);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun ioread16_rep(nand->legacy.IO_ADDR_R, buf, len / 2);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun * omap_write_buf16 - write buffer to NAND controller
314*4882a593Smuzhiyun * @mtd: MTD device structure
315*4882a593Smuzhiyun * @buf: data buffer
316*4882a593Smuzhiyun * @len: number of bytes to write
317*4882a593Smuzhiyun */
omap_write_buf16(struct mtd_info * mtd,const u_char * buf,int len)318*4882a593Smuzhiyun static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
321*4882a593Smuzhiyun u16 *p = (u16 *) buf;
322*4882a593Smuzhiyun bool status;
323*4882a593Smuzhiyun /* FIXME try bursts of writesw() or DMA ... */
324*4882a593Smuzhiyun len >>= 1;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun while (len--) {
327*4882a593Smuzhiyun iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
328*4882a593Smuzhiyun /* wait until buffer is available for write */
329*4882a593Smuzhiyun do {
330*4882a593Smuzhiyun status = info->ops->nand_writebuffer_empty();
331*4882a593Smuzhiyun } while (!status);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /**
336*4882a593Smuzhiyun * omap_read_buf_pref - read data from NAND controller into buffer
337*4882a593Smuzhiyun * @chip: NAND chip object
338*4882a593Smuzhiyun * @buf: buffer to store date
339*4882a593Smuzhiyun * @len: number of bytes to read
340*4882a593Smuzhiyun */
omap_read_buf_pref(struct nand_chip * chip,u_char * buf,int len)341*4882a593Smuzhiyun static void omap_read_buf_pref(struct nand_chip *chip, u_char *buf, int len)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
344*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
345*4882a593Smuzhiyun uint32_t r_count = 0;
346*4882a593Smuzhiyun int ret = 0;
347*4882a593Smuzhiyun u32 *p = (u32 *)buf;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* take care of subpage reads */
350*4882a593Smuzhiyun if (len % 4) {
351*4882a593Smuzhiyun if (info->nand.options & NAND_BUSWIDTH_16)
352*4882a593Smuzhiyun omap_read_buf16(mtd, buf, len % 4);
353*4882a593Smuzhiyun else
354*4882a593Smuzhiyun omap_read_buf8(mtd, buf, len % 4);
355*4882a593Smuzhiyun p = (u32 *) (buf + len % 4);
356*4882a593Smuzhiyun len -= len % 4;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /* configure and start prefetch transfer */
360*4882a593Smuzhiyun ret = omap_prefetch_enable(info->gpmc_cs,
361*4882a593Smuzhiyun PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
362*4882a593Smuzhiyun if (ret) {
363*4882a593Smuzhiyun /* PFPW engine is busy, use cpu copy method */
364*4882a593Smuzhiyun if (info->nand.options & NAND_BUSWIDTH_16)
365*4882a593Smuzhiyun omap_read_buf16(mtd, (u_char *)p, len);
366*4882a593Smuzhiyun else
367*4882a593Smuzhiyun omap_read_buf8(mtd, (u_char *)p, len);
368*4882a593Smuzhiyun } else {
369*4882a593Smuzhiyun do {
370*4882a593Smuzhiyun r_count = readl(info->reg.gpmc_prefetch_status);
371*4882a593Smuzhiyun r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
372*4882a593Smuzhiyun r_count = r_count >> 2;
373*4882a593Smuzhiyun ioread32_rep(info->nand.legacy.IO_ADDR_R, p, r_count);
374*4882a593Smuzhiyun p += r_count;
375*4882a593Smuzhiyun len -= r_count << 2;
376*4882a593Smuzhiyun } while (len);
377*4882a593Smuzhiyun /* disable and stop the PFPW engine */
378*4882a593Smuzhiyun omap_prefetch_reset(info->gpmc_cs, info);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun * omap_write_buf_pref - write buffer to NAND controller
384*4882a593Smuzhiyun * @chip: NAND chip object
385*4882a593Smuzhiyun * @buf: data buffer
386*4882a593Smuzhiyun * @len: number of bytes to write
387*4882a593Smuzhiyun */
omap_write_buf_pref(struct nand_chip * chip,const u_char * buf,int len)388*4882a593Smuzhiyun static void omap_write_buf_pref(struct nand_chip *chip, const u_char *buf,
389*4882a593Smuzhiyun int len)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
392*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
393*4882a593Smuzhiyun uint32_t w_count = 0;
394*4882a593Smuzhiyun int i = 0, ret = 0;
395*4882a593Smuzhiyun u16 *p = (u16 *)buf;
396*4882a593Smuzhiyun unsigned long tim, limit;
397*4882a593Smuzhiyun u32 val;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* take care of subpage writes */
400*4882a593Smuzhiyun if (len % 2 != 0) {
401*4882a593Smuzhiyun writeb(*buf, info->nand.legacy.IO_ADDR_W);
402*4882a593Smuzhiyun p = (u16 *)(buf + 1);
403*4882a593Smuzhiyun len--;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* configure and start prefetch transfer */
407*4882a593Smuzhiyun ret = omap_prefetch_enable(info->gpmc_cs,
408*4882a593Smuzhiyun PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
409*4882a593Smuzhiyun if (ret) {
410*4882a593Smuzhiyun /* PFPW engine is busy, use cpu copy method */
411*4882a593Smuzhiyun if (info->nand.options & NAND_BUSWIDTH_16)
412*4882a593Smuzhiyun omap_write_buf16(mtd, (u_char *)p, len);
413*4882a593Smuzhiyun else
414*4882a593Smuzhiyun omap_write_buf8(mtd, (u_char *)p, len);
415*4882a593Smuzhiyun } else {
416*4882a593Smuzhiyun while (len) {
417*4882a593Smuzhiyun w_count = readl(info->reg.gpmc_prefetch_status);
418*4882a593Smuzhiyun w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
419*4882a593Smuzhiyun w_count = w_count >> 1;
420*4882a593Smuzhiyun for (i = 0; (i < w_count) && len; i++, len -= 2)
421*4882a593Smuzhiyun iowrite16(*p++, info->nand.legacy.IO_ADDR_W);
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun /* wait for data to flushed-out before reset the prefetch */
424*4882a593Smuzhiyun tim = 0;
425*4882a593Smuzhiyun limit = (loops_per_jiffy *
426*4882a593Smuzhiyun msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
427*4882a593Smuzhiyun do {
428*4882a593Smuzhiyun cpu_relax();
429*4882a593Smuzhiyun val = readl(info->reg.gpmc_prefetch_status);
430*4882a593Smuzhiyun val = PREFETCH_STATUS_COUNT(val);
431*4882a593Smuzhiyun } while (val && (tim++ < limit));
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* disable and stop the PFPW engine */
434*4882a593Smuzhiyun omap_prefetch_reset(info->gpmc_cs, info);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun * omap_nand_dma_callback: callback on the completion of dma transfer
440*4882a593Smuzhiyun * @data: pointer to completion data structure
441*4882a593Smuzhiyun */
omap_nand_dma_callback(void * data)442*4882a593Smuzhiyun static void omap_nand_dma_callback(void *data)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun complete((struct completion *) data);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun * omap_nand_dma_transfer: configure and start dma transfer
449*4882a593Smuzhiyun * @mtd: MTD device structure
450*4882a593Smuzhiyun * @addr: virtual address in RAM of source/destination
451*4882a593Smuzhiyun * @len: number of data bytes to be transferred
452*4882a593Smuzhiyun * @is_write: flag for read/write operation
453*4882a593Smuzhiyun */
omap_nand_dma_transfer(struct mtd_info * mtd,void * addr,unsigned int len,int is_write)454*4882a593Smuzhiyun static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
455*4882a593Smuzhiyun unsigned int len, int is_write)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
458*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
459*4882a593Smuzhiyun enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
460*4882a593Smuzhiyun DMA_FROM_DEVICE;
461*4882a593Smuzhiyun struct scatterlist sg;
462*4882a593Smuzhiyun unsigned long tim, limit;
463*4882a593Smuzhiyun unsigned n;
464*4882a593Smuzhiyun int ret;
465*4882a593Smuzhiyun u32 val;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (!virt_addr_valid(addr))
468*4882a593Smuzhiyun goto out_copy;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun sg_init_one(&sg, addr, len);
471*4882a593Smuzhiyun n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
472*4882a593Smuzhiyun if (n == 0) {
473*4882a593Smuzhiyun dev_err(&info->pdev->dev,
474*4882a593Smuzhiyun "Couldn't DMA map a %d byte buffer\n", len);
475*4882a593Smuzhiyun goto out_copy;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
479*4882a593Smuzhiyun is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
480*4882a593Smuzhiyun DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
481*4882a593Smuzhiyun if (!tx)
482*4882a593Smuzhiyun goto out_copy_unmap;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun tx->callback = omap_nand_dma_callback;
485*4882a593Smuzhiyun tx->callback_param = &info->comp;
486*4882a593Smuzhiyun dmaengine_submit(tx);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun init_completion(&info->comp);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* setup and start DMA using dma_addr */
491*4882a593Smuzhiyun dma_async_issue_pending(info->dma);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* configure and start prefetch transfer */
494*4882a593Smuzhiyun ret = omap_prefetch_enable(info->gpmc_cs,
495*4882a593Smuzhiyun PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
496*4882a593Smuzhiyun if (ret)
497*4882a593Smuzhiyun /* PFPW engine is busy, use cpu copy method */
498*4882a593Smuzhiyun goto out_copy_unmap;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun wait_for_completion(&info->comp);
501*4882a593Smuzhiyun tim = 0;
502*4882a593Smuzhiyun limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun do {
505*4882a593Smuzhiyun cpu_relax();
506*4882a593Smuzhiyun val = readl(info->reg.gpmc_prefetch_status);
507*4882a593Smuzhiyun val = PREFETCH_STATUS_COUNT(val);
508*4882a593Smuzhiyun } while (val && (tim++ < limit));
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* disable and stop the PFPW engine */
511*4882a593Smuzhiyun omap_prefetch_reset(info->gpmc_cs, info);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
514*4882a593Smuzhiyun return 0;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun out_copy_unmap:
517*4882a593Smuzhiyun dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
518*4882a593Smuzhiyun out_copy:
519*4882a593Smuzhiyun if (info->nand.options & NAND_BUSWIDTH_16)
520*4882a593Smuzhiyun is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
521*4882a593Smuzhiyun : omap_write_buf16(mtd, (u_char *) addr, len);
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
524*4882a593Smuzhiyun : omap_write_buf8(mtd, (u_char *) addr, len);
525*4882a593Smuzhiyun return 0;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /**
529*4882a593Smuzhiyun * omap_read_buf_dma_pref - read data from NAND controller into buffer
530*4882a593Smuzhiyun * @chip: NAND chip object
531*4882a593Smuzhiyun * @buf: buffer to store date
532*4882a593Smuzhiyun * @len: number of bytes to read
533*4882a593Smuzhiyun */
omap_read_buf_dma_pref(struct nand_chip * chip,u_char * buf,int len)534*4882a593Smuzhiyun static void omap_read_buf_dma_pref(struct nand_chip *chip, u_char *buf,
535*4882a593Smuzhiyun int len)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (len <= mtd->oobsize)
540*4882a593Smuzhiyun omap_read_buf_pref(chip, buf, len);
541*4882a593Smuzhiyun else
542*4882a593Smuzhiyun /* start transfer in DMA mode */
543*4882a593Smuzhiyun omap_nand_dma_transfer(mtd, buf, len, 0x0);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * omap_write_buf_dma_pref - write buffer to NAND controller
548*4882a593Smuzhiyun * @chip: NAND chip object
549*4882a593Smuzhiyun * @buf: data buffer
550*4882a593Smuzhiyun * @len: number of bytes to write
551*4882a593Smuzhiyun */
omap_write_buf_dma_pref(struct nand_chip * chip,const u_char * buf,int len)552*4882a593Smuzhiyun static void omap_write_buf_dma_pref(struct nand_chip *chip, const u_char *buf,
553*4882a593Smuzhiyun int len)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (len <= mtd->oobsize)
558*4882a593Smuzhiyun omap_write_buf_pref(chip, buf, len);
559*4882a593Smuzhiyun else
560*4882a593Smuzhiyun /* start transfer in DMA mode */
561*4882a593Smuzhiyun omap_nand_dma_transfer(mtd, (u_char *)buf, len, 0x1);
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun /*
565*4882a593Smuzhiyun * omap_nand_irq - GPMC irq handler
566*4882a593Smuzhiyun * @this_irq: gpmc irq number
567*4882a593Smuzhiyun * @dev: omap_nand_info structure pointer is passed here
568*4882a593Smuzhiyun */
omap_nand_irq(int this_irq,void * dev)569*4882a593Smuzhiyun static irqreturn_t omap_nand_irq(int this_irq, void *dev)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun struct omap_nand_info *info = (struct omap_nand_info *) dev;
572*4882a593Smuzhiyun u32 bytes;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun bytes = readl(info->reg.gpmc_prefetch_status);
575*4882a593Smuzhiyun bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
576*4882a593Smuzhiyun bytes = bytes & 0xFFFC; /* io in multiple of 4 bytes */
577*4882a593Smuzhiyun if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
578*4882a593Smuzhiyun if (this_irq == info->gpmc_irq_count)
579*4882a593Smuzhiyun goto done;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (info->buf_len && (info->buf_len < bytes))
582*4882a593Smuzhiyun bytes = info->buf_len;
583*4882a593Smuzhiyun else if (!info->buf_len)
584*4882a593Smuzhiyun bytes = 0;
585*4882a593Smuzhiyun iowrite32_rep(info->nand.legacy.IO_ADDR_W, (u32 *)info->buf,
586*4882a593Smuzhiyun bytes >> 2);
587*4882a593Smuzhiyun info->buf = info->buf + bytes;
588*4882a593Smuzhiyun info->buf_len -= bytes;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun } else {
591*4882a593Smuzhiyun ioread32_rep(info->nand.legacy.IO_ADDR_R, (u32 *)info->buf,
592*4882a593Smuzhiyun bytes >> 2);
593*4882a593Smuzhiyun info->buf = info->buf + bytes;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (this_irq == info->gpmc_irq_count)
596*4882a593Smuzhiyun goto done;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun return IRQ_HANDLED;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun done:
602*4882a593Smuzhiyun complete(&info->comp);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun disable_irq_nosync(info->gpmc_irq_fifo);
605*4882a593Smuzhiyun disable_irq_nosync(info->gpmc_irq_count);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun return IRQ_HANDLED;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /*
611*4882a593Smuzhiyun * omap_read_buf_irq_pref - read data from NAND controller into buffer
612*4882a593Smuzhiyun * @chip: NAND chip object
613*4882a593Smuzhiyun * @buf: buffer to store date
614*4882a593Smuzhiyun * @len: number of bytes to read
615*4882a593Smuzhiyun */
omap_read_buf_irq_pref(struct nand_chip * chip,u_char * buf,int len)616*4882a593Smuzhiyun static void omap_read_buf_irq_pref(struct nand_chip *chip, u_char *buf,
617*4882a593Smuzhiyun int len)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
620*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
621*4882a593Smuzhiyun int ret = 0;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (len <= mtd->oobsize) {
624*4882a593Smuzhiyun omap_read_buf_pref(chip, buf, len);
625*4882a593Smuzhiyun return;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun info->iomode = OMAP_NAND_IO_READ;
629*4882a593Smuzhiyun info->buf = buf;
630*4882a593Smuzhiyun init_completion(&info->comp);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* configure and start prefetch transfer */
633*4882a593Smuzhiyun ret = omap_prefetch_enable(info->gpmc_cs,
634*4882a593Smuzhiyun PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
635*4882a593Smuzhiyun if (ret)
636*4882a593Smuzhiyun /* PFPW engine is busy, use cpu copy method */
637*4882a593Smuzhiyun goto out_copy;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun info->buf_len = len;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun enable_irq(info->gpmc_irq_count);
642*4882a593Smuzhiyun enable_irq(info->gpmc_irq_fifo);
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /* waiting for read to complete */
645*4882a593Smuzhiyun wait_for_completion(&info->comp);
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* disable and stop the PFPW engine */
648*4882a593Smuzhiyun omap_prefetch_reset(info->gpmc_cs, info);
649*4882a593Smuzhiyun return;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun out_copy:
652*4882a593Smuzhiyun if (info->nand.options & NAND_BUSWIDTH_16)
653*4882a593Smuzhiyun omap_read_buf16(mtd, buf, len);
654*4882a593Smuzhiyun else
655*4882a593Smuzhiyun omap_read_buf8(mtd, buf, len);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /*
659*4882a593Smuzhiyun * omap_write_buf_irq_pref - write buffer to NAND controller
660*4882a593Smuzhiyun * @chip: NAND chip object
661*4882a593Smuzhiyun * @buf: data buffer
662*4882a593Smuzhiyun * @len: number of bytes to write
663*4882a593Smuzhiyun */
omap_write_buf_irq_pref(struct nand_chip * chip,const u_char * buf,int len)664*4882a593Smuzhiyun static void omap_write_buf_irq_pref(struct nand_chip *chip, const u_char *buf,
665*4882a593Smuzhiyun int len)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
668*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
669*4882a593Smuzhiyun int ret = 0;
670*4882a593Smuzhiyun unsigned long tim, limit;
671*4882a593Smuzhiyun u32 val;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (len <= mtd->oobsize) {
674*4882a593Smuzhiyun omap_write_buf_pref(chip, buf, len);
675*4882a593Smuzhiyun return;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun info->iomode = OMAP_NAND_IO_WRITE;
679*4882a593Smuzhiyun info->buf = (u_char *) buf;
680*4882a593Smuzhiyun init_completion(&info->comp);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* configure and start prefetch transfer : size=24 */
683*4882a593Smuzhiyun ret = omap_prefetch_enable(info->gpmc_cs,
684*4882a593Smuzhiyun (PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
685*4882a593Smuzhiyun if (ret)
686*4882a593Smuzhiyun /* PFPW engine is busy, use cpu copy method */
687*4882a593Smuzhiyun goto out_copy;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun info->buf_len = len;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun enable_irq(info->gpmc_irq_count);
692*4882a593Smuzhiyun enable_irq(info->gpmc_irq_fifo);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /* waiting for write to complete */
695*4882a593Smuzhiyun wait_for_completion(&info->comp);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* wait for data to flushed-out before reset the prefetch */
698*4882a593Smuzhiyun tim = 0;
699*4882a593Smuzhiyun limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
700*4882a593Smuzhiyun do {
701*4882a593Smuzhiyun val = readl(info->reg.gpmc_prefetch_status);
702*4882a593Smuzhiyun val = PREFETCH_STATUS_COUNT(val);
703*4882a593Smuzhiyun cpu_relax();
704*4882a593Smuzhiyun } while (val && (tim++ < limit));
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /* disable and stop the PFPW engine */
707*4882a593Smuzhiyun omap_prefetch_reset(info->gpmc_cs, info);
708*4882a593Smuzhiyun return;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun out_copy:
711*4882a593Smuzhiyun if (info->nand.options & NAND_BUSWIDTH_16)
712*4882a593Smuzhiyun omap_write_buf16(mtd, buf, len);
713*4882a593Smuzhiyun else
714*4882a593Smuzhiyun omap_write_buf8(mtd, buf, len);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /**
718*4882a593Smuzhiyun * gen_true_ecc - This function will generate true ECC value
719*4882a593Smuzhiyun * @ecc_buf: buffer to store ecc code
720*4882a593Smuzhiyun *
721*4882a593Smuzhiyun * This generated true ECC value can be used when correcting
722*4882a593Smuzhiyun * data read from NAND flash memory core
723*4882a593Smuzhiyun */
gen_true_ecc(u8 * ecc_buf)724*4882a593Smuzhiyun static void gen_true_ecc(u8 *ecc_buf)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
727*4882a593Smuzhiyun ((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
730*4882a593Smuzhiyun P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
731*4882a593Smuzhiyun ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
732*4882a593Smuzhiyun P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
733*4882a593Smuzhiyun ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
734*4882a593Smuzhiyun P1e(tmp) | P2048o(tmp) | P2048e(tmp));
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /**
738*4882a593Smuzhiyun * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
739*4882a593Smuzhiyun * @ecc_data1: ecc code from nand spare area
740*4882a593Smuzhiyun * @ecc_data2: ecc code from hardware register obtained from hardware ecc
741*4882a593Smuzhiyun * @page_data: page data
742*4882a593Smuzhiyun *
743*4882a593Smuzhiyun * This function compares two ECC's and indicates if there is an error.
744*4882a593Smuzhiyun * If the error can be corrected it will be corrected to the buffer.
745*4882a593Smuzhiyun * If there is no error, %0 is returned. If there is an error but it
746*4882a593Smuzhiyun * was corrected, %1 is returned. Otherwise, %-1 is returned.
747*4882a593Smuzhiyun */
omap_compare_ecc(u8 * ecc_data1,u8 * ecc_data2,u8 * page_data)748*4882a593Smuzhiyun static int omap_compare_ecc(u8 *ecc_data1, /* read from NAND memory */
749*4882a593Smuzhiyun u8 *ecc_data2, /* read from register */
750*4882a593Smuzhiyun u8 *page_data)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun uint i;
753*4882a593Smuzhiyun u8 tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
754*4882a593Smuzhiyun u8 comp0_bit[8], comp1_bit[8], comp2_bit[8];
755*4882a593Smuzhiyun u8 ecc_bit[24];
756*4882a593Smuzhiyun u8 ecc_sum = 0;
757*4882a593Smuzhiyun u8 find_bit = 0;
758*4882a593Smuzhiyun uint find_byte = 0;
759*4882a593Smuzhiyun int isEccFF;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun gen_true_ecc(ecc_data1);
764*4882a593Smuzhiyun gen_true_ecc(ecc_data2);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun for (i = 0; i <= 2; i++) {
767*4882a593Smuzhiyun *(ecc_data1 + i) = ~(*(ecc_data1 + i));
768*4882a593Smuzhiyun *(ecc_data2 + i) = ~(*(ecc_data2 + i));
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
772*4882a593Smuzhiyun tmp0_bit[i] = *ecc_data1 % 2;
773*4882a593Smuzhiyun *ecc_data1 = *ecc_data1 / 2;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
777*4882a593Smuzhiyun tmp1_bit[i] = *(ecc_data1 + 1) % 2;
778*4882a593Smuzhiyun *(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
782*4882a593Smuzhiyun tmp2_bit[i] = *(ecc_data1 + 2) % 2;
783*4882a593Smuzhiyun *(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
787*4882a593Smuzhiyun comp0_bit[i] = *ecc_data2 % 2;
788*4882a593Smuzhiyun *ecc_data2 = *ecc_data2 / 2;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
792*4882a593Smuzhiyun comp1_bit[i] = *(ecc_data2 + 1) % 2;
793*4882a593Smuzhiyun *(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
797*4882a593Smuzhiyun comp2_bit[i] = *(ecc_data2 + 2) % 2;
798*4882a593Smuzhiyun *(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun for (i = 0; i < 6; i++)
802*4882a593Smuzhiyun ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun for (i = 0; i < 8; i++)
805*4882a593Smuzhiyun ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun for (i = 0; i < 8; i++)
808*4882a593Smuzhiyun ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
811*4882a593Smuzhiyun ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun for (i = 0; i < 24; i++)
814*4882a593Smuzhiyun ecc_sum += ecc_bit[i];
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun switch (ecc_sum) {
817*4882a593Smuzhiyun case 0:
818*4882a593Smuzhiyun /* Not reached because this function is not called if
819*4882a593Smuzhiyun * ECC values are equal
820*4882a593Smuzhiyun */
821*4882a593Smuzhiyun return 0;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun case 1:
824*4882a593Smuzhiyun /* Uncorrectable error */
825*4882a593Smuzhiyun pr_debug("ECC UNCORRECTED_ERROR 1\n");
826*4882a593Smuzhiyun return -EBADMSG;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun case 11:
829*4882a593Smuzhiyun /* UN-Correctable error */
830*4882a593Smuzhiyun pr_debug("ECC UNCORRECTED_ERROR B\n");
831*4882a593Smuzhiyun return -EBADMSG;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun case 12:
834*4882a593Smuzhiyun /* Correctable error */
835*4882a593Smuzhiyun find_byte = (ecc_bit[23] << 8) +
836*4882a593Smuzhiyun (ecc_bit[21] << 7) +
837*4882a593Smuzhiyun (ecc_bit[19] << 6) +
838*4882a593Smuzhiyun (ecc_bit[17] << 5) +
839*4882a593Smuzhiyun (ecc_bit[15] << 4) +
840*4882a593Smuzhiyun (ecc_bit[13] << 3) +
841*4882a593Smuzhiyun (ecc_bit[11] << 2) +
842*4882a593Smuzhiyun (ecc_bit[9] << 1) +
843*4882a593Smuzhiyun ecc_bit[7];
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun pr_debug("Correcting single bit ECC error at offset: "
848*4882a593Smuzhiyun "%d, bit: %d\n", find_byte, find_bit);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun page_data[find_byte] ^= (1 << find_bit);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun return 1;
853*4882a593Smuzhiyun default:
854*4882a593Smuzhiyun if (isEccFF) {
855*4882a593Smuzhiyun if (ecc_data2[0] == 0 &&
856*4882a593Smuzhiyun ecc_data2[1] == 0 &&
857*4882a593Smuzhiyun ecc_data2[2] == 0)
858*4882a593Smuzhiyun return 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun pr_debug("UNCORRECTED_ERROR default\n");
861*4882a593Smuzhiyun return -EBADMSG;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /**
866*4882a593Smuzhiyun * omap_correct_data - Compares the ECC read with HW generated ECC
867*4882a593Smuzhiyun * @chip: NAND chip object
868*4882a593Smuzhiyun * @dat: page data
869*4882a593Smuzhiyun * @read_ecc: ecc read from nand flash
870*4882a593Smuzhiyun * @calc_ecc: ecc read from HW ECC registers
871*4882a593Smuzhiyun *
872*4882a593Smuzhiyun * Compares the ecc read from nand spare area with ECC registers values
873*4882a593Smuzhiyun * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
874*4882a593Smuzhiyun * detection and correction. If there are no errors, %0 is returned. If
875*4882a593Smuzhiyun * there were errors and all of the errors were corrected, the number of
876*4882a593Smuzhiyun * corrected errors is returned. If uncorrectable errors exist, %-1 is
877*4882a593Smuzhiyun * returned.
878*4882a593Smuzhiyun */
omap_correct_data(struct nand_chip * chip,u_char * dat,u_char * read_ecc,u_char * calc_ecc)879*4882a593Smuzhiyun static int omap_correct_data(struct nand_chip *chip, u_char *dat,
880*4882a593Smuzhiyun u_char *read_ecc, u_char *calc_ecc)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
883*4882a593Smuzhiyun int blockCnt = 0, i = 0, ret = 0;
884*4882a593Smuzhiyun int stat = 0;
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* Ex NAND_ECC_HW12_2048 */
887*4882a593Smuzhiyun if (info->nand.ecc.engine_type == NAND_ECC_ENGINE_TYPE_ON_HOST &&
888*4882a593Smuzhiyun info->nand.ecc.size == 2048)
889*4882a593Smuzhiyun blockCnt = 4;
890*4882a593Smuzhiyun else
891*4882a593Smuzhiyun blockCnt = 1;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun for (i = 0; i < blockCnt; i++) {
894*4882a593Smuzhiyun if (memcmp(read_ecc, calc_ecc, 3) != 0) {
895*4882a593Smuzhiyun ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
896*4882a593Smuzhiyun if (ret < 0)
897*4882a593Smuzhiyun return ret;
898*4882a593Smuzhiyun /* keep track of the number of corrected errors */
899*4882a593Smuzhiyun stat += ret;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun read_ecc += 3;
902*4882a593Smuzhiyun calc_ecc += 3;
903*4882a593Smuzhiyun dat += 512;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun return stat;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /**
909*4882a593Smuzhiyun * omap_calcuate_ecc - Generate non-inverted ECC bytes.
910*4882a593Smuzhiyun * @chip: NAND chip object
911*4882a593Smuzhiyun * @dat: The pointer to data on which ecc is computed
912*4882a593Smuzhiyun * @ecc_code: The ecc_code buffer
913*4882a593Smuzhiyun *
914*4882a593Smuzhiyun * Using noninverted ECC can be considered ugly since writing a blank
915*4882a593Smuzhiyun * page ie. padding will clear the ECC bytes. This is no problem as long
916*4882a593Smuzhiyun * nobody is trying to write data on the seemingly unused page. Reading
917*4882a593Smuzhiyun * an erased page will produce an ECC mismatch between generated and read
918*4882a593Smuzhiyun * ECC bytes that has to be dealt with separately.
919*4882a593Smuzhiyun */
omap_calculate_ecc(struct nand_chip * chip,const u_char * dat,u_char * ecc_code)920*4882a593Smuzhiyun static int omap_calculate_ecc(struct nand_chip *chip, const u_char *dat,
921*4882a593Smuzhiyun u_char *ecc_code)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
924*4882a593Smuzhiyun u32 val;
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun val = readl(info->reg.gpmc_ecc_config);
927*4882a593Smuzhiyun if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
928*4882a593Smuzhiyun return -EINVAL;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun /* read ecc result */
931*4882a593Smuzhiyun val = readl(info->reg.gpmc_ecc1_result);
932*4882a593Smuzhiyun *ecc_code++ = val; /* P128e, ..., P1e */
933*4882a593Smuzhiyun *ecc_code++ = val >> 16; /* P128o, ..., P1o */
934*4882a593Smuzhiyun /* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
935*4882a593Smuzhiyun *ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun return 0;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun /**
941*4882a593Smuzhiyun * omap_enable_hwecc - This function enables the hardware ecc functionality
942*4882a593Smuzhiyun * @mtd: MTD device structure
943*4882a593Smuzhiyun * @mode: Read/Write mode
944*4882a593Smuzhiyun */
omap_enable_hwecc(struct nand_chip * chip,int mode)945*4882a593Smuzhiyun static void omap_enable_hwecc(struct nand_chip *chip, int mode)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
948*4882a593Smuzhiyun unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
949*4882a593Smuzhiyun u32 val;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /* clear ecc and enable bits */
952*4882a593Smuzhiyun val = ECCCLEAR | ECC1;
953*4882a593Smuzhiyun writel(val, info->reg.gpmc_ecc_control);
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /* program ecc and result sizes */
956*4882a593Smuzhiyun val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
957*4882a593Smuzhiyun ECC1RESULTSIZE);
958*4882a593Smuzhiyun writel(val, info->reg.gpmc_ecc_size_config);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun switch (mode) {
961*4882a593Smuzhiyun case NAND_ECC_READ:
962*4882a593Smuzhiyun case NAND_ECC_WRITE:
963*4882a593Smuzhiyun writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
964*4882a593Smuzhiyun break;
965*4882a593Smuzhiyun case NAND_ECC_READSYN:
966*4882a593Smuzhiyun writel(ECCCLEAR, info->reg.gpmc_ecc_control);
967*4882a593Smuzhiyun break;
968*4882a593Smuzhiyun default:
969*4882a593Smuzhiyun dev_info(&info->pdev->dev,
970*4882a593Smuzhiyun "error: unrecognized Mode[%d]!\n", mode);
971*4882a593Smuzhiyun break;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* (ECC 16 or 8 bit col) | ( CS ) | ECC Enable */
975*4882a593Smuzhiyun val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
976*4882a593Smuzhiyun writel(val, info->reg.gpmc_ecc_config);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun /**
980*4882a593Smuzhiyun * omap_wait - wait until the command is done
981*4882a593Smuzhiyun * @this: NAND Chip structure
982*4882a593Smuzhiyun *
983*4882a593Smuzhiyun * Wait function is called during Program and erase operations and
984*4882a593Smuzhiyun * the way it is called from MTD layer, we should wait till the NAND
985*4882a593Smuzhiyun * chip is ready after the programming/erase operation has completed.
986*4882a593Smuzhiyun *
987*4882a593Smuzhiyun * Erase can take up to 400ms and program up to 20ms according to
988*4882a593Smuzhiyun * general NAND and SmartMedia specs
989*4882a593Smuzhiyun */
omap_wait(struct nand_chip * this)990*4882a593Smuzhiyun static int omap_wait(struct nand_chip *this)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(this));
993*4882a593Smuzhiyun unsigned long timeo = jiffies;
994*4882a593Smuzhiyun int status;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun timeo += msecs_to_jiffies(400);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
999*4882a593Smuzhiyun while (time_before(jiffies, timeo)) {
1000*4882a593Smuzhiyun status = readb(info->reg.gpmc_nand_data);
1001*4882a593Smuzhiyun if (status & NAND_STATUS_READY)
1002*4882a593Smuzhiyun break;
1003*4882a593Smuzhiyun cond_resched();
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun status = readb(info->reg.gpmc_nand_data);
1007*4882a593Smuzhiyun return status;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /**
1011*4882a593Smuzhiyun * omap_dev_ready - checks the NAND Ready GPIO line
1012*4882a593Smuzhiyun * @mtd: MTD device structure
1013*4882a593Smuzhiyun *
1014*4882a593Smuzhiyun * Returns true if ready and false if busy.
1015*4882a593Smuzhiyun */
omap_dev_ready(struct nand_chip * chip)1016*4882a593Smuzhiyun static int omap_dev_ready(struct nand_chip *chip)
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun return gpiod_get_value(info->ready_gpiod);
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun /**
1024*4882a593Smuzhiyun * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
1025*4882a593Smuzhiyun * @mtd: MTD device structure
1026*4882a593Smuzhiyun * @mode: Read/Write mode
1027*4882a593Smuzhiyun *
1028*4882a593Smuzhiyun * When using BCH with SW correction (i.e. no ELM), sector size is set
1029*4882a593Smuzhiyun * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
1030*4882a593Smuzhiyun * for both reading and writing with:
1031*4882a593Smuzhiyun * eccsize0 = 0 (no additional protected byte in spare area)
1032*4882a593Smuzhiyun * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1033*4882a593Smuzhiyun */
omap_enable_hwecc_bch(struct nand_chip * chip,int mode)1034*4882a593Smuzhiyun static void __maybe_unused omap_enable_hwecc_bch(struct nand_chip *chip,
1035*4882a593Smuzhiyun int mode)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun unsigned int bch_type;
1038*4882a593Smuzhiyun unsigned int dev_width, nsectors;
1039*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1040*4882a593Smuzhiyun enum omap_ecc ecc_opt = info->ecc_opt;
1041*4882a593Smuzhiyun u32 val, wr_mode;
1042*4882a593Smuzhiyun unsigned int ecc_size1, ecc_size0;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* GPMC configurations for calculating ECC */
1045*4882a593Smuzhiyun switch (ecc_opt) {
1046*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1047*4882a593Smuzhiyun bch_type = 0;
1048*4882a593Smuzhiyun nsectors = 1;
1049*4882a593Smuzhiyun wr_mode = BCH_WRAPMODE_6;
1050*4882a593Smuzhiyun ecc_size0 = BCH_ECC_SIZE0;
1051*4882a593Smuzhiyun ecc_size1 = BCH_ECC_SIZE1;
1052*4882a593Smuzhiyun break;
1053*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
1054*4882a593Smuzhiyun bch_type = 0;
1055*4882a593Smuzhiyun nsectors = chip->ecc.steps;
1056*4882a593Smuzhiyun if (mode == NAND_ECC_READ) {
1057*4882a593Smuzhiyun wr_mode = BCH_WRAPMODE_1;
1058*4882a593Smuzhiyun ecc_size0 = BCH4R_ECC_SIZE0;
1059*4882a593Smuzhiyun ecc_size1 = BCH4R_ECC_SIZE1;
1060*4882a593Smuzhiyun } else {
1061*4882a593Smuzhiyun wr_mode = BCH_WRAPMODE_6;
1062*4882a593Smuzhiyun ecc_size0 = BCH_ECC_SIZE0;
1063*4882a593Smuzhiyun ecc_size1 = BCH_ECC_SIZE1;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun break;
1066*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1067*4882a593Smuzhiyun bch_type = 1;
1068*4882a593Smuzhiyun nsectors = 1;
1069*4882a593Smuzhiyun wr_mode = BCH_WRAPMODE_6;
1070*4882a593Smuzhiyun ecc_size0 = BCH_ECC_SIZE0;
1071*4882a593Smuzhiyun ecc_size1 = BCH_ECC_SIZE1;
1072*4882a593Smuzhiyun break;
1073*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
1074*4882a593Smuzhiyun bch_type = 1;
1075*4882a593Smuzhiyun nsectors = chip->ecc.steps;
1076*4882a593Smuzhiyun if (mode == NAND_ECC_READ) {
1077*4882a593Smuzhiyun wr_mode = BCH_WRAPMODE_1;
1078*4882a593Smuzhiyun ecc_size0 = BCH8R_ECC_SIZE0;
1079*4882a593Smuzhiyun ecc_size1 = BCH8R_ECC_SIZE1;
1080*4882a593Smuzhiyun } else {
1081*4882a593Smuzhiyun wr_mode = BCH_WRAPMODE_6;
1082*4882a593Smuzhiyun ecc_size0 = BCH_ECC_SIZE0;
1083*4882a593Smuzhiyun ecc_size1 = BCH_ECC_SIZE1;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun break;
1086*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
1087*4882a593Smuzhiyun bch_type = 0x2;
1088*4882a593Smuzhiyun nsectors = chip->ecc.steps;
1089*4882a593Smuzhiyun if (mode == NAND_ECC_READ) {
1090*4882a593Smuzhiyun wr_mode = 0x01;
1091*4882a593Smuzhiyun ecc_size0 = 52; /* ECC bits in nibbles per sector */
1092*4882a593Smuzhiyun ecc_size1 = 0; /* non-ECC bits in nibbles per sector */
1093*4882a593Smuzhiyun } else {
1094*4882a593Smuzhiyun wr_mode = 0x01;
1095*4882a593Smuzhiyun ecc_size0 = 0; /* extra bits in nibbles per sector */
1096*4882a593Smuzhiyun ecc_size1 = 52; /* OOB bits in nibbles per sector */
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun break;
1099*4882a593Smuzhiyun default:
1100*4882a593Smuzhiyun return;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun writel(ECC1, info->reg.gpmc_ecc_control);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /* Configure ecc size for BCH */
1106*4882a593Smuzhiyun val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
1107*4882a593Smuzhiyun writel(val, info->reg.gpmc_ecc_size_config);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun /* BCH configuration */
1112*4882a593Smuzhiyun val = ((1 << 16) | /* enable BCH */
1113*4882a593Smuzhiyun (bch_type << 12) | /* BCH4/BCH8/BCH16 */
1114*4882a593Smuzhiyun (wr_mode << 8) | /* wrap mode */
1115*4882a593Smuzhiyun (dev_width << 7) | /* bus width */
1116*4882a593Smuzhiyun (((nsectors-1) & 0x7) << 4) | /* number of sectors */
1117*4882a593Smuzhiyun (info->gpmc_cs << 1) | /* ECC CS */
1118*4882a593Smuzhiyun (0x1)); /* enable ECC */
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun writel(val, info->reg.gpmc_ecc_config);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* Clear ecc and enable bits */
1123*4882a593Smuzhiyun writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun static u8 bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
1127*4882a593Smuzhiyun static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
1128*4882a593Smuzhiyun 0x97, 0x79, 0xe5, 0x24, 0xb5};
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun /**
1131*4882a593Smuzhiyun * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
1132*4882a593Smuzhiyun * @mtd: MTD device structure
1133*4882a593Smuzhiyun * @dat: The pointer to data on which ecc is computed
1134*4882a593Smuzhiyun * @ecc_code: The ecc_code buffer
1135*4882a593Smuzhiyun * @i: The sector number (for a multi sector page)
1136*4882a593Smuzhiyun *
1137*4882a593Smuzhiyun * Support calculating of BCH4/8/16 ECC vectors for one sector
1138*4882a593Smuzhiyun * within a page. Sector number is in @i.
1139*4882a593Smuzhiyun */
_omap_calculate_ecc_bch(struct mtd_info * mtd,const u_char * dat,u_char * ecc_calc,int i)1140*4882a593Smuzhiyun static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
1141*4882a593Smuzhiyun const u_char *dat, u_char *ecc_calc, int i)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
1144*4882a593Smuzhiyun int eccbytes = info->nand.ecc.bytes;
1145*4882a593Smuzhiyun struct gpmc_nand_regs *gpmc_regs = &info->reg;
1146*4882a593Smuzhiyun u8 *ecc_code;
1147*4882a593Smuzhiyun unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
1148*4882a593Smuzhiyun u32 val;
1149*4882a593Smuzhiyun int j;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun ecc_code = ecc_calc;
1152*4882a593Smuzhiyun switch (info->ecc_opt) {
1153*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1154*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
1155*4882a593Smuzhiyun bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1156*4882a593Smuzhiyun bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1157*4882a593Smuzhiyun bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1158*4882a593Smuzhiyun bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1159*4882a593Smuzhiyun *ecc_code++ = (bch_val4 & 0xFF);
1160*4882a593Smuzhiyun *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1161*4882a593Smuzhiyun *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1162*4882a593Smuzhiyun *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1163*4882a593Smuzhiyun *ecc_code++ = (bch_val3 & 0xFF);
1164*4882a593Smuzhiyun *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1165*4882a593Smuzhiyun *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1166*4882a593Smuzhiyun *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1167*4882a593Smuzhiyun *ecc_code++ = (bch_val2 & 0xFF);
1168*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1169*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1170*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1171*4882a593Smuzhiyun *ecc_code++ = (bch_val1 & 0xFF);
1172*4882a593Smuzhiyun break;
1173*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1174*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
1175*4882a593Smuzhiyun bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1176*4882a593Smuzhiyun bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1177*4882a593Smuzhiyun *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1178*4882a593Smuzhiyun *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1179*4882a593Smuzhiyun *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1180*4882a593Smuzhiyun ((bch_val1 >> 28) & 0xF);
1181*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1182*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1183*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1184*4882a593Smuzhiyun *ecc_code++ = ((bch_val1 & 0xF) << 4);
1185*4882a593Smuzhiyun break;
1186*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
1187*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result6[i]);
1188*4882a593Smuzhiyun ecc_code[0] = ((val >> 8) & 0xFF);
1189*4882a593Smuzhiyun ecc_code[1] = ((val >> 0) & 0xFF);
1190*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result5[i]);
1191*4882a593Smuzhiyun ecc_code[2] = ((val >> 24) & 0xFF);
1192*4882a593Smuzhiyun ecc_code[3] = ((val >> 16) & 0xFF);
1193*4882a593Smuzhiyun ecc_code[4] = ((val >> 8) & 0xFF);
1194*4882a593Smuzhiyun ecc_code[5] = ((val >> 0) & 0xFF);
1195*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result4[i]);
1196*4882a593Smuzhiyun ecc_code[6] = ((val >> 24) & 0xFF);
1197*4882a593Smuzhiyun ecc_code[7] = ((val >> 16) & 0xFF);
1198*4882a593Smuzhiyun ecc_code[8] = ((val >> 8) & 0xFF);
1199*4882a593Smuzhiyun ecc_code[9] = ((val >> 0) & 0xFF);
1200*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result3[i]);
1201*4882a593Smuzhiyun ecc_code[10] = ((val >> 24) & 0xFF);
1202*4882a593Smuzhiyun ecc_code[11] = ((val >> 16) & 0xFF);
1203*4882a593Smuzhiyun ecc_code[12] = ((val >> 8) & 0xFF);
1204*4882a593Smuzhiyun ecc_code[13] = ((val >> 0) & 0xFF);
1205*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result2[i]);
1206*4882a593Smuzhiyun ecc_code[14] = ((val >> 24) & 0xFF);
1207*4882a593Smuzhiyun ecc_code[15] = ((val >> 16) & 0xFF);
1208*4882a593Smuzhiyun ecc_code[16] = ((val >> 8) & 0xFF);
1209*4882a593Smuzhiyun ecc_code[17] = ((val >> 0) & 0xFF);
1210*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result1[i]);
1211*4882a593Smuzhiyun ecc_code[18] = ((val >> 24) & 0xFF);
1212*4882a593Smuzhiyun ecc_code[19] = ((val >> 16) & 0xFF);
1213*4882a593Smuzhiyun ecc_code[20] = ((val >> 8) & 0xFF);
1214*4882a593Smuzhiyun ecc_code[21] = ((val >> 0) & 0xFF);
1215*4882a593Smuzhiyun val = readl(gpmc_regs->gpmc_bch_result0[i]);
1216*4882a593Smuzhiyun ecc_code[22] = ((val >> 24) & 0xFF);
1217*4882a593Smuzhiyun ecc_code[23] = ((val >> 16) & 0xFF);
1218*4882a593Smuzhiyun ecc_code[24] = ((val >> 8) & 0xFF);
1219*4882a593Smuzhiyun ecc_code[25] = ((val >> 0) & 0xFF);
1220*4882a593Smuzhiyun break;
1221*4882a593Smuzhiyun default:
1222*4882a593Smuzhiyun return -EINVAL;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun /* ECC scheme specific syndrome customizations */
1226*4882a593Smuzhiyun switch (info->ecc_opt) {
1227*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1228*4882a593Smuzhiyun /* Add constant polynomial to remainder, so that
1229*4882a593Smuzhiyun * ECC of blank pages results in 0x0 on reading back
1230*4882a593Smuzhiyun */
1231*4882a593Smuzhiyun for (j = 0; j < eccbytes; j++)
1232*4882a593Smuzhiyun ecc_calc[j] ^= bch4_polynomial[j];
1233*4882a593Smuzhiyun break;
1234*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
1235*4882a593Smuzhiyun /* Set 8th ECC byte as 0x0 for ROM compatibility */
1236*4882a593Smuzhiyun ecc_calc[eccbytes - 1] = 0x0;
1237*4882a593Smuzhiyun break;
1238*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1239*4882a593Smuzhiyun /* Add constant polynomial to remainder, so that
1240*4882a593Smuzhiyun * ECC of blank pages results in 0x0 on reading back
1241*4882a593Smuzhiyun */
1242*4882a593Smuzhiyun for (j = 0; j < eccbytes; j++)
1243*4882a593Smuzhiyun ecc_calc[j] ^= bch8_polynomial[j];
1244*4882a593Smuzhiyun break;
1245*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
1246*4882a593Smuzhiyun /* Set 14th ECC byte as 0x0 for ROM compatibility */
1247*4882a593Smuzhiyun ecc_calc[eccbytes - 1] = 0x0;
1248*4882a593Smuzhiyun break;
1249*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
1250*4882a593Smuzhiyun break;
1251*4882a593Smuzhiyun default:
1252*4882a593Smuzhiyun return -EINVAL;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun return 0;
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun /**
1259*4882a593Smuzhiyun * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
1260*4882a593Smuzhiyun * @chip: NAND chip object
1261*4882a593Smuzhiyun * @dat: The pointer to data on which ecc is computed
1262*4882a593Smuzhiyun * @ecc_code: The ecc_code buffer
1263*4882a593Smuzhiyun *
1264*4882a593Smuzhiyun * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
1265*4882a593Smuzhiyun * when SW based correction is required as ECC is required for one sector
1266*4882a593Smuzhiyun * at a time.
1267*4882a593Smuzhiyun */
omap_calculate_ecc_bch_sw(struct nand_chip * chip,const u_char * dat,u_char * ecc_calc)1268*4882a593Smuzhiyun static int omap_calculate_ecc_bch_sw(struct nand_chip *chip,
1269*4882a593Smuzhiyun const u_char *dat, u_char *ecc_calc)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun return _omap_calculate_ecc_bch(nand_to_mtd(chip), dat, ecc_calc, 0);
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun /**
1275*4882a593Smuzhiyun * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
1276*4882a593Smuzhiyun * @mtd: MTD device structure
1277*4882a593Smuzhiyun * @dat: The pointer to data on which ecc is computed
1278*4882a593Smuzhiyun * @ecc_code: The ecc_code buffer
1279*4882a593Smuzhiyun *
1280*4882a593Smuzhiyun * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
1281*4882a593Smuzhiyun */
omap_calculate_ecc_bch_multi(struct mtd_info * mtd,const u_char * dat,u_char * ecc_calc)1282*4882a593Smuzhiyun static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
1283*4882a593Smuzhiyun const u_char *dat, u_char *ecc_calc)
1284*4882a593Smuzhiyun {
1285*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
1286*4882a593Smuzhiyun int eccbytes = info->nand.ecc.bytes;
1287*4882a593Smuzhiyun unsigned long nsectors;
1288*4882a593Smuzhiyun int i, ret;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1291*4882a593Smuzhiyun for (i = 0; i < nsectors; i++) {
1292*4882a593Smuzhiyun ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
1293*4882a593Smuzhiyun if (ret)
1294*4882a593Smuzhiyun return ret;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun ecc_calc += eccbytes;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun return 0;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun /**
1303*4882a593Smuzhiyun * erased_sector_bitflips - count bit flips
1304*4882a593Smuzhiyun * @data: data sector buffer
1305*4882a593Smuzhiyun * @oob: oob buffer
1306*4882a593Smuzhiyun * @info: omap_nand_info
1307*4882a593Smuzhiyun *
1308*4882a593Smuzhiyun * Check the bit flips in erased page falls below correctable level.
1309*4882a593Smuzhiyun * If falls below, report the page as erased with correctable bit
1310*4882a593Smuzhiyun * flip, else report as uncorrectable page.
1311*4882a593Smuzhiyun */
erased_sector_bitflips(u_char * data,u_char * oob,struct omap_nand_info * info)1312*4882a593Smuzhiyun static int erased_sector_bitflips(u_char *data, u_char *oob,
1313*4882a593Smuzhiyun struct omap_nand_info *info)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun int flip_bits = 0, i;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun for (i = 0; i < info->nand.ecc.size; i++) {
1318*4882a593Smuzhiyun flip_bits += hweight8(~data[i]);
1319*4882a593Smuzhiyun if (flip_bits > info->nand.ecc.strength)
1320*4882a593Smuzhiyun return 0;
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
1324*4882a593Smuzhiyun flip_bits += hweight8(~oob[i]);
1325*4882a593Smuzhiyun if (flip_bits > info->nand.ecc.strength)
1326*4882a593Smuzhiyun return 0;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun /*
1330*4882a593Smuzhiyun * Bit flips falls in correctable level.
1331*4882a593Smuzhiyun * Fill data area with 0xFF
1332*4882a593Smuzhiyun */
1333*4882a593Smuzhiyun if (flip_bits) {
1334*4882a593Smuzhiyun memset(data, 0xFF, info->nand.ecc.size);
1335*4882a593Smuzhiyun memset(oob, 0xFF, info->nand.ecc.bytes);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun return flip_bits;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun /**
1342*4882a593Smuzhiyun * omap_elm_correct_data - corrects page data area in case error reported
1343*4882a593Smuzhiyun * @chip: NAND chip object
1344*4882a593Smuzhiyun * @data: page data
1345*4882a593Smuzhiyun * @read_ecc: ecc read from nand flash
1346*4882a593Smuzhiyun * @calc_ecc: ecc read from HW ECC registers
1347*4882a593Smuzhiyun *
1348*4882a593Smuzhiyun * Calculated ecc vector reported as zero in case of non-error pages.
1349*4882a593Smuzhiyun * In case of non-zero ecc vector, first filter out erased-pages, and
1350*4882a593Smuzhiyun * then process data via ELM to detect bit-flips.
1351*4882a593Smuzhiyun */
omap_elm_correct_data(struct nand_chip * chip,u_char * data,u_char * read_ecc,u_char * calc_ecc)1352*4882a593Smuzhiyun static int omap_elm_correct_data(struct nand_chip *chip, u_char *data,
1353*4882a593Smuzhiyun u_char *read_ecc, u_char *calc_ecc)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(nand_to_mtd(chip));
1356*4882a593Smuzhiyun struct nand_ecc_ctrl *ecc = &info->nand.ecc;
1357*4882a593Smuzhiyun int eccsteps = info->nand.ecc.steps;
1358*4882a593Smuzhiyun int i , j, stat = 0;
1359*4882a593Smuzhiyun int eccflag, actual_eccbytes;
1360*4882a593Smuzhiyun struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
1361*4882a593Smuzhiyun u_char *ecc_vec = calc_ecc;
1362*4882a593Smuzhiyun u_char *spare_ecc = read_ecc;
1363*4882a593Smuzhiyun u_char *erased_ecc_vec;
1364*4882a593Smuzhiyun u_char *buf;
1365*4882a593Smuzhiyun int bitflip_count;
1366*4882a593Smuzhiyun bool is_error_reported = false;
1367*4882a593Smuzhiyun u32 bit_pos, byte_pos, error_max, pos;
1368*4882a593Smuzhiyun int err;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun switch (info->ecc_opt) {
1371*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
1372*4882a593Smuzhiyun /* omit 7th ECC byte reserved for ROM code compatibility */
1373*4882a593Smuzhiyun actual_eccbytes = ecc->bytes - 1;
1374*4882a593Smuzhiyun erased_ecc_vec = bch4_vector;
1375*4882a593Smuzhiyun break;
1376*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
1377*4882a593Smuzhiyun /* omit 14th ECC byte reserved for ROM code compatibility */
1378*4882a593Smuzhiyun actual_eccbytes = ecc->bytes - 1;
1379*4882a593Smuzhiyun erased_ecc_vec = bch8_vector;
1380*4882a593Smuzhiyun break;
1381*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
1382*4882a593Smuzhiyun actual_eccbytes = ecc->bytes;
1383*4882a593Smuzhiyun erased_ecc_vec = bch16_vector;
1384*4882a593Smuzhiyun break;
1385*4882a593Smuzhiyun default:
1386*4882a593Smuzhiyun dev_err(&info->pdev->dev, "invalid driver configuration\n");
1387*4882a593Smuzhiyun return -EINVAL;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /* Initialize elm error vector to zero */
1391*4882a593Smuzhiyun memset(err_vec, 0, sizeof(err_vec));
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun for (i = 0; i < eccsteps ; i++) {
1394*4882a593Smuzhiyun eccflag = 0; /* initialize eccflag */
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun /*
1397*4882a593Smuzhiyun * Check any error reported,
1398*4882a593Smuzhiyun * In case of error, non zero ecc reported.
1399*4882a593Smuzhiyun */
1400*4882a593Smuzhiyun for (j = 0; j < actual_eccbytes; j++) {
1401*4882a593Smuzhiyun if (calc_ecc[j] != 0) {
1402*4882a593Smuzhiyun eccflag = 1; /* non zero ecc, error present */
1403*4882a593Smuzhiyun break;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun if (eccflag == 1) {
1408*4882a593Smuzhiyun if (memcmp(calc_ecc, erased_ecc_vec,
1409*4882a593Smuzhiyun actual_eccbytes) == 0) {
1410*4882a593Smuzhiyun /*
1411*4882a593Smuzhiyun * calc_ecc[] matches pattern for ECC(all 0xff)
1412*4882a593Smuzhiyun * so this is definitely an erased-page
1413*4882a593Smuzhiyun */
1414*4882a593Smuzhiyun } else {
1415*4882a593Smuzhiyun buf = &data[info->nand.ecc.size * i];
1416*4882a593Smuzhiyun /*
1417*4882a593Smuzhiyun * count number of 0-bits in read_buf.
1418*4882a593Smuzhiyun * This check can be removed once a similar
1419*4882a593Smuzhiyun * check is introduced in generic NAND driver
1420*4882a593Smuzhiyun */
1421*4882a593Smuzhiyun bitflip_count = erased_sector_bitflips(
1422*4882a593Smuzhiyun buf, read_ecc, info);
1423*4882a593Smuzhiyun if (bitflip_count) {
1424*4882a593Smuzhiyun /*
1425*4882a593Smuzhiyun * number of 0-bits within ECC limits
1426*4882a593Smuzhiyun * So this may be an erased-page
1427*4882a593Smuzhiyun */
1428*4882a593Smuzhiyun stat += bitflip_count;
1429*4882a593Smuzhiyun } else {
1430*4882a593Smuzhiyun /*
1431*4882a593Smuzhiyun * Too many 0-bits. It may be a
1432*4882a593Smuzhiyun * - programmed-page, OR
1433*4882a593Smuzhiyun * - erased-page with many bit-flips
1434*4882a593Smuzhiyun * So this page requires check by ELM
1435*4882a593Smuzhiyun */
1436*4882a593Smuzhiyun err_vec[i].error_reported = true;
1437*4882a593Smuzhiyun is_error_reported = true;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun /* Update the ecc vector */
1443*4882a593Smuzhiyun calc_ecc += ecc->bytes;
1444*4882a593Smuzhiyun read_ecc += ecc->bytes;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun /* Check if any error reported */
1448*4882a593Smuzhiyun if (!is_error_reported)
1449*4882a593Smuzhiyun return stat;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun /* Decode BCH error using ELM module */
1452*4882a593Smuzhiyun elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
1453*4882a593Smuzhiyun
1454*4882a593Smuzhiyun err = 0;
1455*4882a593Smuzhiyun for (i = 0; i < eccsteps; i++) {
1456*4882a593Smuzhiyun if (err_vec[i].error_uncorrectable) {
1457*4882a593Smuzhiyun dev_err(&info->pdev->dev,
1458*4882a593Smuzhiyun "uncorrectable bit-flips found\n");
1459*4882a593Smuzhiyun err = -EBADMSG;
1460*4882a593Smuzhiyun } else if (err_vec[i].error_reported) {
1461*4882a593Smuzhiyun for (j = 0; j < err_vec[i].error_count; j++) {
1462*4882a593Smuzhiyun switch (info->ecc_opt) {
1463*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
1464*4882a593Smuzhiyun /* Add 4 bits to take care of padding */
1465*4882a593Smuzhiyun pos = err_vec[i].error_loc[j] +
1466*4882a593Smuzhiyun BCH4_BIT_PAD;
1467*4882a593Smuzhiyun break;
1468*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
1469*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
1470*4882a593Smuzhiyun pos = err_vec[i].error_loc[j];
1471*4882a593Smuzhiyun break;
1472*4882a593Smuzhiyun default:
1473*4882a593Smuzhiyun return -EINVAL;
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun error_max = (ecc->size + actual_eccbytes) * 8;
1476*4882a593Smuzhiyun /* Calculate bit position of error */
1477*4882a593Smuzhiyun bit_pos = pos % 8;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun /* Calculate byte position of error */
1480*4882a593Smuzhiyun byte_pos = (error_max - pos - 1) / 8;
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun if (pos < error_max) {
1483*4882a593Smuzhiyun if (byte_pos < 512) {
1484*4882a593Smuzhiyun pr_debug("bitflip@dat[%d]=%x\n",
1485*4882a593Smuzhiyun byte_pos, data[byte_pos]);
1486*4882a593Smuzhiyun data[byte_pos] ^= 1 << bit_pos;
1487*4882a593Smuzhiyun } else {
1488*4882a593Smuzhiyun pr_debug("bitflip@oob[%d]=%x\n",
1489*4882a593Smuzhiyun (byte_pos - 512),
1490*4882a593Smuzhiyun spare_ecc[byte_pos - 512]);
1491*4882a593Smuzhiyun spare_ecc[byte_pos - 512] ^=
1492*4882a593Smuzhiyun 1 << bit_pos;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun } else {
1495*4882a593Smuzhiyun dev_err(&info->pdev->dev,
1496*4882a593Smuzhiyun "invalid bit-flip @ %d:%d\n",
1497*4882a593Smuzhiyun byte_pos, bit_pos);
1498*4882a593Smuzhiyun err = -EBADMSG;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun /* Update number of correctable errors */
1504*4882a593Smuzhiyun stat = max_t(unsigned int, stat, err_vec[i].error_count);
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /* Update page data with sector size */
1507*4882a593Smuzhiyun data += ecc->size;
1508*4882a593Smuzhiyun spare_ecc += ecc->bytes;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun return (err) ? err : stat;
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun /**
1515*4882a593Smuzhiyun * omap_write_page_bch - BCH ecc based write page function for entire page
1516*4882a593Smuzhiyun * @chip: nand chip info structure
1517*4882a593Smuzhiyun * @buf: data buffer
1518*4882a593Smuzhiyun * @oob_required: must write chip->oob_poi to OOB
1519*4882a593Smuzhiyun * @page: page
1520*4882a593Smuzhiyun *
1521*4882a593Smuzhiyun * Custom write page method evolved to support multi sector writing in one shot
1522*4882a593Smuzhiyun */
omap_write_page_bch(struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1523*4882a593Smuzhiyun static int omap_write_page_bch(struct nand_chip *chip, const uint8_t *buf,
1524*4882a593Smuzhiyun int oob_required, int page)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
1527*4882a593Smuzhiyun int ret;
1528*4882a593Smuzhiyun uint8_t *ecc_calc = chip->ecc.calc_buf;
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun /* Enable GPMC ecc engine */
1533*4882a593Smuzhiyun chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun /* Write data */
1536*4882a593Smuzhiyun chip->legacy.write_buf(chip, buf, mtd->writesize);
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun /* Update ecc vector from GPMC result registers */
1539*4882a593Smuzhiyun omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1542*4882a593Smuzhiyun chip->ecc.total);
1543*4882a593Smuzhiyun if (ret)
1544*4882a593Smuzhiyun return ret;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun /* Write ecc vector to OOB area */
1547*4882a593Smuzhiyun chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun return nand_prog_page_end_op(chip);
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun /**
1553*4882a593Smuzhiyun * omap_write_subpage_bch - BCH hardware ECC based subpage write
1554*4882a593Smuzhiyun * @chip: nand chip info structure
1555*4882a593Smuzhiyun * @offset: column address of subpage within the page
1556*4882a593Smuzhiyun * @data_len: data length
1557*4882a593Smuzhiyun * @buf: data buffer
1558*4882a593Smuzhiyun * @oob_required: must write chip->oob_poi to OOB
1559*4882a593Smuzhiyun * @page: page number to write
1560*4882a593Smuzhiyun *
1561*4882a593Smuzhiyun * OMAP optimized subpage write method.
1562*4882a593Smuzhiyun */
omap_write_subpage_bch(struct nand_chip * chip,u32 offset,u32 data_len,const u8 * buf,int oob_required,int page)1563*4882a593Smuzhiyun static int omap_write_subpage_bch(struct nand_chip *chip, u32 offset,
1564*4882a593Smuzhiyun u32 data_len, const u8 *buf,
1565*4882a593Smuzhiyun int oob_required, int page)
1566*4882a593Smuzhiyun {
1567*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
1568*4882a593Smuzhiyun u8 *ecc_calc = chip->ecc.calc_buf;
1569*4882a593Smuzhiyun int ecc_size = chip->ecc.size;
1570*4882a593Smuzhiyun int ecc_bytes = chip->ecc.bytes;
1571*4882a593Smuzhiyun int ecc_steps = chip->ecc.steps;
1572*4882a593Smuzhiyun u32 start_step = offset / ecc_size;
1573*4882a593Smuzhiyun u32 end_step = (offset + data_len - 1) / ecc_size;
1574*4882a593Smuzhiyun int step, ret = 0;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun /*
1577*4882a593Smuzhiyun * Write entire page at one go as it would be optimal
1578*4882a593Smuzhiyun * as ECC is calculated by hardware.
1579*4882a593Smuzhiyun * ECC is calculated for all subpages but we choose
1580*4882a593Smuzhiyun * only what we want.
1581*4882a593Smuzhiyun */
1582*4882a593Smuzhiyun nand_prog_page_begin_op(chip, page, 0, NULL, 0);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun /* Enable GPMC ECC engine */
1585*4882a593Smuzhiyun chip->ecc.hwctl(chip, NAND_ECC_WRITE);
1586*4882a593Smuzhiyun
1587*4882a593Smuzhiyun /* Write data */
1588*4882a593Smuzhiyun chip->legacy.write_buf(chip, buf, mtd->writesize);
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun for (step = 0; step < ecc_steps; step++) {
1591*4882a593Smuzhiyun /* mask ECC of un-touched subpages by padding 0xFF */
1592*4882a593Smuzhiyun if (step < start_step || step > end_step)
1593*4882a593Smuzhiyun memset(ecc_calc, 0xff, ecc_bytes);
1594*4882a593Smuzhiyun else
1595*4882a593Smuzhiyun ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun if (ret)
1598*4882a593Smuzhiyun return ret;
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun buf += ecc_size;
1601*4882a593Smuzhiyun ecc_calc += ecc_bytes;
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun /* copy calculated ECC for whole page to chip->buffer->oob */
1605*4882a593Smuzhiyun /* this include masked-value(0xFF) for unwritten subpages */
1606*4882a593Smuzhiyun ecc_calc = chip->ecc.calc_buf;
1607*4882a593Smuzhiyun ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1608*4882a593Smuzhiyun chip->ecc.total);
1609*4882a593Smuzhiyun if (ret)
1610*4882a593Smuzhiyun return ret;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun /* write OOB buffer to NAND device */
1613*4882a593Smuzhiyun chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun return nand_prog_page_end_op(chip);
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun /**
1619*4882a593Smuzhiyun * omap_read_page_bch - BCH ecc based page read function for entire page
1620*4882a593Smuzhiyun * @chip: nand chip info structure
1621*4882a593Smuzhiyun * @buf: buffer to store read data
1622*4882a593Smuzhiyun * @oob_required: caller requires OOB data read to chip->oob_poi
1623*4882a593Smuzhiyun * @page: page number to read
1624*4882a593Smuzhiyun *
1625*4882a593Smuzhiyun * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
1626*4882a593Smuzhiyun * used for error correction.
1627*4882a593Smuzhiyun * Custom method evolved to support ELM error correction & multi sector
1628*4882a593Smuzhiyun * reading. On reading page data area is read along with OOB data with
1629*4882a593Smuzhiyun * ecc engine enabled. ecc vector updated after read of OOB data.
1630*4882a593Smuzhiyun * For non error pages ecc vector reported as zero.
1631*4882a593Smuzhiyun */
omap_read_page_bch(struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1632*4882a593Smuzhiyun static int omap_read_page_bch(struct nand_chip *chip, uint8_t *buf,
1633*4882a593Smuzhiyun int oob_required, int page)
1634*4882a593Smuzhiyun {
1635*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
1636*4882a593Smuzhiyun uint8_t *ecc_calc = chip->ecc.calc_buf;
1637*4882a593Smuzhiyun uint8_t *ecc_code = chip->ecc.code_buf;
1638*4882a593Smuzhiyun int stat, ret;
1639*4882a593Smuzhiyun unsigned int max_bitflips = 0;
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun nand_read_page_op(chip, page, 0, NULL, 0);
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun /* Enable GPMC ecc engine */
1644*4882a593Smuzhiyun chip->ecc.hwctl(chip, NAND_ECC_READ);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun /* Read data */
1647*4882a593Smuzhiyun chip->legacy.read_buf(chip, buf, mtd->writesize);
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /* Read oob bytes */
1650*4882a593Smuzhiyun nand_change_read_column_op(chip,
1651*4882a593Smuzhiyun mtd->writesize + BADBLOCK_MARKER_LENGTH,
1652*4882a593Smuzhiyun chip->oob_poi + BADBLOCK_MARKER_LENGTH,
1653*4882a593Smuzhiyun chip->ecc.total, false);
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun /* Calculate ecc bytes */
1656*4882a593Smuzhiyun omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1659*4882a593Smuzhiyun chip->ecc.total);
1660*4882a593Smuzhiyun if (ret)
1661*4882a593Smuzhiyun return ret;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun stat = chip->ecc.correct(chip, buf, ecc_code, ecc_calc);
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun if (stat < 0) {
1666*4882a593Smuzhiyun mtd->ecc_stats.failed++;
1667*4882a593Smuzhiyun } else {
1668*4882a593Smuzhiyun mtd->ecc_stats.corrected += stat;
1669*4882a593Smuzhiyun max_bitflips = max_t(unsigned int, max_bitflips, stat);
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun return max_bitflips;
1673*4882a593Smuzhiyun }
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun /**
1676*4882a593Smuzhiyun * is_elm_present - checks for presence of ELM module by scanning DT nodes
1677*4882a593Smuzhiyun * @omap_nand_info: NAND device structure containing platform data
1678*4882a593Smuzhiyun */
is_elm_present(struct omap_nand_info * info,struct device_node * elm_node)1679*4882a593Smuzhiyun static bool is_elm_present(struct omap_nand_info *info,
1680*4882a593Smuzhiyun struct device_node *elm_node)
1681*4882a593Smuzhiyun {
1682*4882a593Smuzhiyun struct platform_device *pdev;
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun /* check whether elm-id is passed via DT */
1685*4882a593Smuzhiyun if (!elm_node) {
1686*4882a593Smuzhiyun dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
1687*4882a593Smuzhiyun return false;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun pdev = of_find_device_by_node(elm_node);
1690*4882a593Smuzhiyun /* check whether ELM device is registered */
1691*4882a593Smuzhiyun if (!pdev) {
1692*4882a593Smuzhiyun dev_err(&info->pdev->dev, "ELM device not found\n");
1693*4882a593Smuzhiyun return false;
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun /* ELM module available, now configure it */
1696*4882a593Smuzhiyun info->elm_dev = &pdev->dev;
1697*4882a593Smuzhiyun return true;
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun
omap2_nand_ecc_check(struct omap_nand_info * info)1700*4882a593Smuzhiyun static bool omap2_nand_ecc_check(struct omap_nand_info *info)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun switch (info->ecc_opt) {
1705*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1706*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1707*4882a593Smuzhiyun ecc_needs_omap_bch = false;
1708*4882a593Smuzhiyun ecc_needs_bch = true;
1709*4882a593Smuzhiyun ecc_needs_elm = false;
1710*4882a593Smuzhiyun break;
1711*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
1712*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
1713*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
1714*4882a593Smuzhiyun ecc_needs_omap_bch = true;
1715*4882a593Smuzhiyun ecc_needs_bch = false;
1716*4882a593Smuzhiyun ecc_needs_elm = true;
1717*4882a593Smuzhiyun break;
1718*4882a593Smuzhiyun default:
1719*4882a593Smuzhiyun ecc_needs_omap_bch = false;
1720*4882a593Smuzhiyun ecc_needs_bch = false;
1721*4882a593Smuzhiyun ecc_needs_elm = false;
1722*4882a593Smuzhiyun break;
1723*4882a593Smuzhiyun }
1724*4882a593Smuzhiyun
1725*4882a593Smuzhiyun if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)) {
1726*4882a593Smuzhiyun dev_err(&info->pdev->dev,
1727*4882a593Smuzhiyun "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
1728*4882a593Smuzhiyun return false;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
1731*4882a593Smuzhiyun dev_err(&info->pdev->dev,
1732*4882a593Smuzhiyun "CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
1733*4882a593Smuzhiyun return false;
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
1736*4882a593Smuzhiyun dev_err(&info->pdev->dev, "ELM not available\n");
1737*4882a593Smuzhiyun return false;
1738*4882a593Smuzhiyun }
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun return true;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun static const char * const nand_xfer_types[] = {
1744*4882a593Smuzhiyun [NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
1745*4882a593Smuzhiyun [NAND_OMAP_POLLED] = "polled",
1746*4882a593Smuzhiyun [NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
1747*4882a593Smuzhiyun [NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
1748*4882a593Smuzhiyun };
1749*4882a593Smuzhiyun
omap_get_dt_info(struct device * dev,struct omap_nand_info * info)1750*4882a593Smuzhiyun static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun struct device_node *child = dev->of_node;
1753*4882a593Smuzhiyun int i;
1754*4882a593Smuzhiyun const char *s;
1755*4882a593Smuzhiyun u32 cs;
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun if (of_property_read_u32(child, "reg", &cs) < 0) {
1758*4882a593Smuzhiyun dev_err(dev, "reg not found in DT\n");
1759*4882a593Smuzhiyun return -EINVAL;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun info->gpmc_cs = cs;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun /* detect availability of ELM module. Won't be present pre-OMAP4 */
1765*4882a593Smuzhiyun info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
1766*4882a593Smuzhiyun if (!info->elm_of_node) {
1767*4882a593Smuzhiyun info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
1768*4882a593Smuzhiyun if (!info->elm_of_node)
1769*4882a593Smuzhiyun dev_dbg(dev, "ti,elm-id not in DT\n");
1770*4882a593Smuzhiyun }
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun /* select ecc-scheme for NAND */
1773*4882a593Smuzhiyun if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
1774*4882a593Smuzhiyun dev_err(dev, "ti,nand-ecc-opt not found\n");
1775*4882a593Smuzhiyun return -EINVAL;
1776*4882a593Smuzhiyun }
1777*4882a593Smuzhiyun
1778*4882a593Smuzhiyun if (!strcmp(s, "sw")) {
1779*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
1780*4882a593Smuzhiyun } else if (!strcmp(s, "ham1") ||
1781*4882a593Smuzhiyun !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
1782*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_HAM1_CODE_HW;
1783*4882a593Smuzhiyun } else if (!strcmp(s, "bch4")) {
1784*4882a593Smuzhiyun if (info->elm_of_node)
1785*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
1786*4882a593Smuzhiyun else
1787*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
1788*4882a593Smuzhiyun } else if (!strcmp(s, "bch8")) {
1789*4882a593Smuzhiyun if (info->elm_of_node)
1790*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
1791*4882a593Smuzhiyun else
1792*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
1793*4882a593Smuzhiyun } else if (!strcmp(s, "bch16")) {
1794*4882a593Smuzhiyun info->ecc_opt = OMAP_ECC_BCH16_CODE_HW;
1795*4882a593Smuzhiyun } else {
1796*4882a593Smuzhiyun dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
1797*4882a593Smuzhiyun return -EINVAL;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun /* select data transfer mode */
1801*4882a593Smuzhiyun if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
1802*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
1803*4882a593Smuzhiyun if (!strcasecmp(s, nand_xfer_types[i])) {
1804*4882a593Smuzhiyun info->xfer_type = i;
1805*4882a593Smuzhiyun return 0;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
1810*4882a593Smuzhiyun return -EINVAL;
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun return 0;
1814*4882a593Smuzhiyun }
1815*4882a593Smuzhiyun
omap_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1816*4882a593Smuzhiyun static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
1817*4882a593Smuzhiyun struct mtd_oob_region *oobregion)
1818*4882a593Smuzhiyun {
1819*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
1820*4882a593Smuzhiyun struct nand_chip *chip = &info->nand;
1821*4882a593Smuzhiyun int off = BADBLOCK_MARKER_LENGTH;
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1824*4882a593Smuzhiyun !(chip->options & NAND_BUSWIDTH_16))
1825*4882a593Smuzhiyun off = 1;
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun if (section)
1828*4882a593Smuzhiyun return -ERANGE;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun oobregion->offset = off;
1831*4882a593Smuzhiyun oobregion->length = chip->ecc.total;
1832*4882a593Smuzhiyun
1833*4882a593Smuzhiyun return 0;
1834*4882a593Smuzhiyun }
1835*4882a593Smuzhiyun
omap_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1836*4882a593Smuzhiyun static int omap_ooblayout_free(struct mtd_info *mtd, int section,
1837*4882a593Smuzhiyun struct mtd_oob_region *oobregion)
1838*4882a593Smuzhiyun {
1839*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
1840*4882a593Smuzhiyun struct nand_chip *chip = &info->nand;
1841*4882a593Smuzhiyun int off = BADBLOCK_MARKER_LENGTH;
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
1844*4882a593Smuzhiyun !(chip->options & NAND_BUSWIDTH_16))
1845*4882a593Smuzhiyun off = 1;
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun if (section)
1848*4882a593Smuzhiyun return -ERANGE;
1849*4882a593Smuzhiyun
1850*4882a593Smuzhiyun off += chip->ecc.total;
1851*4882a593Smuzhiyun if (off >= mtd->oobsize)
1852*4882a593Smuzhiyun return -ERANGE;
1853*4882a593Smuzhiyun
1854*4882a593Smuzhiyun oobregion->offset = off;
1855*4882a593Smuzhiyun oobregion->length = mtd->oobsize - off;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun return 0;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
1861*4882a593Smuzhiyun .ecc = omap_ooblayout_ecc,
1862*4882a593Smuzhiyun .free = omap_ooblayout_free,
1863*4882a593Smuzhiyun };
1864*4882a593Smuzhiyun
omap_sw_ooblayout_ecc(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1865*4882a593Smuzhiyun static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
1866*4882a593Smuzhiyun struct mtd_oob_region *oobregion)
1867*4882a593Smuzhiyun {
1868*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
1869*4882a593Smuzhiyun int off = BADBLOCK_MARKER_LENGTH;
1870*4882a593Smuzhiyun
1871*4882a593Smuzhiyun if (section >= chip->ecc.steps)
1872*4882a593Smuzhiyun return -ERANGE;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun /*
1875*4882a593Smuzhiyun * When SW correction is employed, one OMAP specific marker byte is
1876*4882a593Smuzhiyun * reserved after each ECC step.
1877*4882a593Smuzhiyun */
1878*4882a593Smuzhiyun oobregion->offset = off + (section * (chip->ecc.bytes + 1));
1879*4882a593Smuzhiyun oobregion->length = chip->ecc.bytes;
1880*4882a593Smuzhiyun
1881*4882a593Smuzhiyun return 0;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun
omap_sw_ooblayout_free(struct mtd_info * mtd,int section,struct mtd_oob_region * oobregion)1884*4882a593Smuzhiyun static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
1885*4882a593Smuzhiyun struct mtd_oob_region *oobregion)
1886*4882a593Smuzhiyun {
1887*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
1888*4882a593Smuzhiyun int off = BADBLOCK_MARKER_LENGTH;
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun if (section)
1891*4882a593Smuzhiyun return -ERANGE;
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun /*
1894*4882a593Smuzhiyun * When SW correction is employed, one OMAP specific marker byte is
1895*4882a593Smuzhiyun * reserved after each ECC step.
1896*4882a593Smuzhiyun */
1897*4882a593Smuzhiyun off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
1898*4882a593Smuzhiyun if (off >= mtd->oobsize)
1899*4882a593Smuzhiyun return -ERANGE;
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun oobregion->offset = off;
1902*4882a593Smuzhiyun oobregion->length = mtd->oobsize - off;
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun return 0;
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
1908*4882a593Smuzhiyun .ecc = omap_sw_ooblayout_ecc,
1909*4882a593Smuzhiyun .free = omap_sw_ooblayout_free,
1910*4882a593Smuzhiyun };
1911*4882a593Smuzhiyun
omap_nand_attach_chip(struct nand_chip * chip)1912*4882a593Smuzhiyun static int omap_nand_attach_chip(struct nand_chip *chip)
1913*4882a593Smuzhiyun {
1914*4882a593Smuzhiyun struct mtd_info *mtd = nand_to_mtd(chip);
1915*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
1916*4882a593Smuzhiyun struct device *dev = &info->pdev->dev;
1917*4882a593Smuzhiyun int min_oobbytes = BADBLOCK_MARKER_LENGTH;
1918*4882a593Smuzhiyun int oobbytes_per_step;
1919*4882a593Smuzhiyun dma_cap_mask_t mask;
1920*4882a593Smuzhiyun int err;
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun if (chip->bbt_options & NAND_BBT_USE_FLASH)
1923*4882a593Smuzhiyun chip->bbt_options |= NAND_BBT_NO_OOB;
1924*4882a593Smuzhiyun else
1925*4882a593Smuzhiyun chip->options |= NAND_SKIP_BBTSCAN;
1926*4882a593Smuzhiyun
1927*4882a593Smuzhiyun /* Re-populate low-level callbacks based on xfer modes */
1928*4882a593Smuzhiyun switch (info->xfer_type) {
1929*4882a593Smuzhiyun case NAND_OMAP_PREFETCH_POLLED:
1930*4882a593Smuzhiyun chip->legacy.read_buf = omap_read_buf_pref;
1931*4882a593Smuzhiyun chip->legacy.write_buf = omap_write_buf_pref;
1932*4882a593Smuzhiyun break;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun case NAND_OMAP_POLLED:
1935*4882a593Smuzhiyun /* Use nand_base defaults for {read,write}_buf */
1936*4882a593Smuzhiyun break;
1937*4882a593Smuzhiyun
1938*4882a593Smuzhiyun case NAND_OMAP_PREFETCH_DMA:
1939*4882a593Smuzhiyun dma_cap_zero(mask);
1940*4882a593Smuzhiyun dma_cap_set(DMA_SLAVE, mask);
1941*4882a593Smuzhiyun info->dma = dma_request_chan(dev->parent, "rxtx");
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun if (IS_ERR(info->dma)) {
1944*4882a593Smuzhiyun dev_err(dev, "DMA engine request failed\n");
1945*4882a593Smuzhiyun return PTR_ERR(info->dma);
1946*4882a593Smuzhiyun } else {
1947*4882a593Smuzhiyun struct dma_slave_config cfg;
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun memset(&cfg, 0, sizeof(cfg));
1950*4882a593Smuzhiyun cfg.src_addr = info->phys_base;
1951*4882a593Smuzhiyun cfg.dst_addr = info->phys_base;
1952*4882a593Smuzhiyun cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1953*4882a593Smuzhiyun cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1954*4882a593Smuzhiyun cfg.src_maxburst = 16;
1955*4882a593Smuzhiyun cfg.dst_maxburst = 16;
1956*4882a593Smuzhiyun err = dmaengine_slave_config(info->dma, &cfg);
1957*4882a593Smuzhiyun if (err) {
1958*4882a593Smuzhiyun dev_err(dev,
1959*4882a593Smuzhiyun "DMA engine slave config failed: %d\n",
1960*4882a593Smuzhiyun err);
1961*4882a593Smuzhiyun return err;
1962*4882a593Smuzhiyun }
1963*4882a593Smuzhiyun chip->legacy.read_buf = omap_read_buf_dma_pref;
1964*4882a593Smuzhiyun chip->legacy.write_buf = omap_write_buf_dma_pref;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun break;
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun case NAND_OMAP_PREFETCH_IRQ:
1969*4882a593Smuzhiyun info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
1970*4882a593Smuzhiyun if (info->gpmc_irq_fifo <= 0)
1971*4882a593Smuzhiyun return -ENODEV;
1972*4882a593Smuzhiyun err = devm_request_irq(dev, info->gpmc_irq_fifo,
1973*4882a593Smuzhiyun omap_nand_irq, IRQF_SHARED,
1974*4882a593Smuzhiyun "gpmc-nand-fifo", info);
1975*4882a593Smuzhiyun if (err) {
1976*4882a593Smuzhiyun dev_err(dev, "Requesting IRQ %d, error %d\n",
1977*4882a593Smuzhiyun info->gpmc_irq_fifo, err);
1978*4882a593Smuzhiyun info->gpmc_irq_fifo = 0;
1979*4882a593Smuzhiyun return err;
1980*4882a593Smuzhiyun }
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
1983*4882a593Smuzhiyun if (info->gpmc_irq_count <= 0)
1984*4882a593Smuzhiyun return -ENODEV;
1985*4882a593Smuzhiyun err = devm_request_irq(dev, info->gpmc_irq_count,
1986*4882a593Smuzhiyun omap_nand_irq, IRQF_SHARED,
1987*4882a593Smuzhiyun "gpmc-nand-count", info);
1988*4882a593Smuzhiyun if (err) {
1989*4882a593Smuzhiyun dev_err(dev, "Requesting IRQ %d, error %d\n",
1990*4882a593Smuzhiyun info->gpmc_irq_count, err);
1991*4882a593Smuzhiyun info->gpmc_irq_count = 0;
1992*4882a593Smuzhiyun return err;
1993*4882a593Smuzhiyun }
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun chip->legacy.read_buf = omap_read_buf_irq_pref;
1996*4882a593Smuzhiyun chip->legacy.write_buf = omap_write_buf_irq_pref;
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun break;
1999*4882a593Smuzhiyun
2000*4882a593Smuzhiyun default:
2001*4882a593Smuzhiyun dev_err(dev, "xfer_type %d not supported!\n", info->xfer_type);
2002*4882a593Smuzhiyun return -EINVAL;
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun
2005*4882a593Smuzhiyun if (!omap2_nand_ecc_check(info))
2006*4882a593Smuzhiyun return -EINVAL;
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun /*
2009*4882a593Smuzhiyun * Bail out earlier to let NAND_ECC_ENGINE_TYPE_SOFT code create its own
2010*4882a593Smuzhiyun * ooblayout instead of using ours.
2011*4882a593Smuzhiyun */
2012*4882a593Smuzhiyun if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
2013*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
2014*4882a593Smuzhiyun chip->ecc.algo = NAND_ECC_ALGO_HAMMING;
2015*4882a593Smuzhiyun return 0;
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun /* Populate MTD interface based on ECC scheme */
2019*4882a593Smuzhiyun switch (info->ecc_opt) {
2020*4882a593Smuzhiyun case OMAP_ECC_HAM1_CODE_HW:
2021*4882a593Smuzhiyun dev_info(dev, "nand: using OMAP_ECC_HAM1_CODE_HW\n");
2022*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2023*4882a593Smuzhiyun chip->ecc.bytes = 3;
2024*4882a593Smuzhiyun chip->ecc.size = 512;
2025*4882a593Smuzhiyun chip->ecc.strength = 1;
2026*4882a593Smuzhiyun chip->ecc.calculate = omap_calculate_ecc;
2027*4882a593Smuzhiyun chip->ecc.hwctl = omap_enable_hwecc;
2028*4882a593Smuzhiyun chip->ecc.correct = omap_correct_data;
2029*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2030*4882a593Smuzhiyun oobbytes_per_step = chip->ecc.bytes;
2031*4882a593Smuzhiyun
2032*4882a593Smuzhiyun if (!(chip->options & NAND_BUSWIDTH_16))
2033*4882a593Smuzhiyun min_oobbytes = 1;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun break;
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
2038*4882a593Smuzhiyun pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
2039*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2040*4882a593Smuzhiyun chip->ecc.size = 512;
2041*4882a593Smuzhiyun chip->ecc.bytes = 7;
2042*4882a593Smuzhiyun chip->ecc.strength = 4;
2043*4882a593Smuzhiyun chip->ecc.hwctl = omap_enable_hwecc_bch;
2044*4882a593Smuzhiyun chip->ecc.correct = nand_bch_correct_data;
2045*4882a593Smuzhiyun chip->ecc.calculate = omap_calculate_ecc_bch_sw;
2046*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
2047*4882a593Smuzhiyun /* Reserve one byte for the OMAP marker */
2048*4882a593Smuzhiyun oobbytes_per_step = chip->ecc.bytes + 1;
2049*4882a593Smuzhiyun /* Software BCH library is used for locating errors */
2050*4882a593Smuzhiyun chip->ecc.priv = nand_bch_init(mtd);
2051*4882a593Smuzhiyun if (!chip->ecc.priv) {
2052*4882a593Smuzhiyun dev_err(dev, "Unable to use BCH library\n");
2053*4882a593Smuzhiyun return -EINVAL;
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun break;
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun case OMAP_ECC_BCH4_CODE_HW:
2058*4882a593Smuzhiyun pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
2059*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2060*4882a593Smuzhiyun chip->ecc.size = 512;
2061*4882a593Smuzhiyun /* 14th bit is kept reserved for ROM-code compatibility */
2062*4882a593Smuzhiyun chip->ecc.bytes = 7 + 1;
2063*4882a593Smuzhiyun chip->ecc.strength = 4;
2064*4882a593Smuzhiyun chip->ecc.hwctl = omap_enable_hwecc_bch;
2065*4882a593Smuzhiyun chip->ecc.correct = omap_elm_correct_data;
2066*4882a593Smuzhiyun chip->ecc.read_page = omap_read_page_bch;
2067*4882a593Smuzhiyun chip->ecc.write_page = omap_write_page_bch;
2068*4882a593Smuzhiyun chip->ecc.write_subpage = omap_write_subpage_bch;
2069*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2070*4882a593Smuzhiyun oobbytes_per_step = chip->ecc.bytes;
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun err = elm_config(info->elm_dev, BCH4_ECC,
2073*4882a593Smuzhiyun mtd->writesize / chip->ecc.size,
2074*4882a593Smuzhiyun chip->ecc.size, chip->ecc.bytes);
2075*4882a593Smuzhiyun if (err < 0)
2076*4882a593Smuzhiyun return err;
2077*4882a593Smuzhiyun break;
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
2080*4882a593Smuzhiyun pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
2081*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2082*4882a593Smuzhiyun chip->ecc.size = 512;
2083*4882a593Smuzhiyun chip->ecc.bytes = 13;
2084*4882a593Smuzhiyun chip->ecc.strength = 8;
2085*4882a593Smuzhiyun chip->ecc.hwctl = omap_enable_hwecc_bch;
2086*4882a593Smuzhiyun chip->ecc.correct = nand_bch_correct_data;
2087*4882a593Smuzhiyun chip->ecc.calculate = omap_calculate_ecc_bch_sw;
2088*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
2089*4882a593Smuzhiyun /* Reserve one byte for the OMAP marker */
2090*4882a593Smuzhiyun oobbytes_per_step = chip->ecc.bytes + 1;
2091*4882a593Smuzhiyun /* Software BCH library is used for locating errors */
2092*4882a593Smuzhiyun chip->ecc.priv = nand_bch_init(mtd);
2093*4882a593Smuzhiyun if (!chip->ecc.priv) {
2094*4882a593Smuzhiyun dev_err(dev, "unable to use BCH library\n");
2095*4882a593Smuzhiyun return -EINVAL;
2096*4882a593Smuzhiyun }
2097*4882a593Smuzhiyun break;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun case OMAP_ECC_BCH8_CODE_HW:
2100*4882a593Smuzhiyun pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
2101*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2102*4882a593Smuzhiyun chip->ecc.size = 512;
2103*4882a593Smuzhiyun /* 14th bit is kept reserved for ROM-code compatibility */
2104*4882a593Smuzhiyun chip->ecc.bytes = 13 + 1;
2105*4882a593Smuzhiyun chip->ecc.strength = 8;
2106*4882a593Smuzhiyun chip->ecc.hwctl = omap_enable_hwecc_bch;
2107*4882a593Smuzhiyun chip->ecc.correct = omap_elm_correct_data;
2108*4882a593Smuzhiyun chip->ecc.read_page = omap_read_page_bch;
2109*4882a593Smuzhiyun chip->ecc.write_page = omap_write_page_bch;
2110*4882a593Smuzhiyun chip->ecc.write_subpage = omap_write_subpage_bch;
2111*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2112*4882a593Smuzhiyun oobbytes_per_step = chip->ecc.bytes;
2113*4882a593Smuzhiyun
2114*4882a593Smuzhiyun err = elm_config(info->elm_dev, BCH8_ECC,
2115*4882a593Smuzhiyun mtd->writesize / chip->ecc.size,
2116*4882a593Smuzhiyun chip->ecc.size, chip->ecc.bytes);
2117*4882a593Smuzhiyun if (err < 0)
2118*4882a593Smuzhiyun return err;
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun break;
2121*4882a593Smuzhiyun
2122*4882a593Smuzhiyun case OMAP_ECC_BCH16_CODE_HW:
2123*4882a593Smuzhiyun pr_info("Using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
2124*4882a593Smuzhiyun chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2125*4882a593Smuzhiyun chip->ecc.size = 512;
2126*4882a593Smuzhiyun chip->ecc.bytes = 26;
2127*4882a593Smuzhiyun chip->ecc.strength = 16;
2128*4882a593Smuzhiyun chip->ecc.hwctl = omap_enable_hwecc_bch;
2129*4882a593Smuzhiyun chip->ecc.correct = omap_elm_correct_data;
2130*4882a593Smuzhiyun chip->ecc.read_page = omap_read_page_bch;
2131*4882a593Smuzhiyun chip->ecc.write_page = omap_write_page_bch;
2132*4882a593Smuzhiyun chip->ecc.write_subpage = omap_write_subpage_bch;
2133*4882a593Smuzhiyun mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
2134*4882a593Smuzhiyun oobbytes_per_step = chip->ecc.bytes;
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun err = elm_config(info->elm_dev, BCH16_ECC,
2137*4882a593Smuzhiyun mtd->writesize / chip->ecc.size,
2138*4882a593Smuzhiyun chip->ecc.size, chip->ecc.bytes);
2139*4882a593Smuzhiyun if (err < 0)
2140*4882a593Smuzhiyun return err;
2141*4882a593Smuzhiyun
2142*4882a593Smuzhiyun break;
2143*4882a593Smuzhiyun default:
2144*4882a593Smuzhiyun dev_err(dev, "Invalid or unsupported ECC scheme\n");
2145*4882a593Smuzhiyun return -EINVAL;
2146*4882a593Smuzhiyun }
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun /* Check if NAND device's OOB is enough to store ECC signatures */
2149*4882a593Smuzhiyun min_oobbytes += (oobbytes_per_step *
2150*4882a593Smuzhiyun (mtd->writesize / chip->ecc.size));
2151*4882a593Smuzhiyun if (mtd->oobsize < min_oobbytes) {
2152*4882a593Smuzhiyun dev_err(dev,
2153*4882a593Smuzhiyun "Not enough OOB bytes: required = %d, available=%d\n",
2154*4882a593Smuzhiyun min_oobbytes, mtd->oobsize);
2155*4882a593Smuzhiyun return -EINVAL;
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun return 0;
2159*4882a593Smuzhiyun }
2160*4882a593Smuzhiyun
2161*4882a593Smuzhiyun static const struct nand_controller_ops omap_nand_controller_ops = {
2162*4882a593Smuzhiyun .attach_chip = omap_nand_attach_chip,
2163*4882a593Smuzhiyun };
2164*4882a593Smuzhiyun
2165*4882a593Smuzhiyun /* Shared among all NAND instances to synchronize access to the ECC Engine */
2166*4882a593Smuzhiyun static struct nand_controller omap_gpmc_controller;
2167*4882a593Smuzhiyun static bool omap_gpmc_controller_initialized;
2168*4882a593Smuzhiyun
omap_nand_probe(struct platform_device * pdev)2169*4882a593Smuzhiyun static int omap_nand_probe(struct platform_device *pdev)
2170*4882a593Smuzhiyun {
2171*4882a593Smuzhiyun struct omap_nand_info *info;
2172*4882a593Smuzhiyun struct mtd_info *mtd;
2173*4882a593Smuzhiyun struct nand_chip *nand_chip;
2174*4882a593Smuzhiyun int err;
2175*4882a593Smuzhiyun struct resource *res;
2176*4882a593Smuzhiyun struct device *dev = &pdev->dev;
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
2179*4882a593Smuzhiyun GFP_KERNEL);
2180*4882a593Smuzhiyun if (!info)
2181*4882a593Smuzhiyun return -ENOMEM;
2182*4882a593Smuzhiyun
2183*4882a593Smuzhiyun info->pdev = pdev;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun err = omap_get_dt_info(dev, info);
2186*4882a593Smuzhiyun if (err)
2187*4882a593Smuzhiyun return err;
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
2190*4882a593Smuzhiyun if (!info->ops) {
2191*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
2192*4882a593Smuzhiyun return -ENODEV;
2193*4882a593Smuzhiyun }
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun nand_chip = &info->nand;
2196*4882a593Smuzhiyun mtd = nand_to_mtd(nand_chip);
2197*4882a593Smuzhiyun mtd->dev.parent = &pdev->dev;
2198*4882a593Smuzhiyun nand_chip->ecc.priv = NULL;
2199*4882a593Smuzhiyun nand_set_flash_node(nand_chip, dev->of_node);
2200*4882a593Smuzhiyun
2201*4882a593Smuzhiyun if (!mtd->name) {
2202*4882a593Smuzhiyun mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
2203*4882a593Smuzhiyun "omap2-nand.%d", info->gpmc_cs);
2204*4882a593Smuzhiyun if (!mtd->name) {
2205*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to set MTD name\n");
2206*4882a593Smuzhiyun return -ENOMEM;
2207*4882a593Smuzhiyun }
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2211*4882a593Smuzhiyun nand_chip->legacy.IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
2212*4882a593Smuzhiyun if (IS_ERR(nand_chip->legacy.IO_ADDR_R))
2213*4882a593Smuzhiyun return PTR_ERR(nand_chip->legacy.IO_ADDR_R);
2214*4882a593Smuzhiyun
2215*4882a593Smuzhiyun info->phys_base = res->start;
2216*4882a593Smuzhiyun
2217*4882a593Smuzhiyun if (!omap_gpmc_controller_initialized) {
2218*4882a593Smuzhiyun omap_gpmc_controller.ops = &omap_nand_controller_ops;
2219*4882a593Smuzhiyun nand_controller_init(&omap_gpmc_controller);
2220*4882a593Smuzhiyun omap_gpmc_controller_initialized = true;
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun nand_chip->controller = &omap_gpmc_controller;
2224*4882a593Smuzhiyun
2225*4882a593Smuzhiyun nand_chip->legacy.IO_ADDR_W = nand_chip->legacy.IO_ADDR_R;
2226*4882a593Smuzhiyun nand_chip->legacy.cmd_ctrl = omap_hwcontrol;
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
2229*4882a593Smuzhiyun GPIOD_IN);
2230*4882a593Smuzhiyun if (IS_ERR(info->ready_gpiod)) {
2231*4882a593Smuzhiyun dev_err(dev, "failed to get ready gpio\n");
2232*4882a593Smuzhiyun return PTR_ERR(info->ready_gpiod);
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun /*
2236*4882a593Smuzhiyun * If RDY/BSY line is connected to OMAP then use the omap ready
2237*4882a593Smuzhiyun * function and the generic nand_wait function which reads the status
2238*4882a593Smuzhiyun * register after monitoring the RDY/BSY line. Otherwise use a standard
2239*4882a593Smuzhiyun * chip delay which is slightly more than tR (AC Timing) of the NAND
2240*4882a593Smuzhiyun * device and read status register until you get a failure or success
2241*4882a593Smuzhiyun */
2242*4882a593Smuzhiyun if (info->ready_gpiod) {
2243*4882a593Smuzhiyun nand_chip->legacy.dev_ready = omap_dev_ready;
2244*4882a593Smuzhiyun nand_chip->legacy.chip_delay = 0;
2245*4882a593Smuzhiyun } else {
2246*4882a593Smuzhiyun nand_chip->legacy.waitfunc = omap_wait;
2247*4882a593Smuzhiyun nand_chip->legacy.chip_delay = 50;
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun
2250*4882a593Smuzhiyun if (info->flash_bbt)
2251*4882a593Smuzhiyun nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
2252*4882a593Smuzhiyun
2253*4882a593Smuzhiyun /* scan NAND device connected to chip controller */
2254*4882a593Smuzhiyun nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun err = nand_scan(nand_chip, 1);
2257*4882a593Smuzhiyun if (err)
2258*4882a593Smuzhiyun goto return_error;
2259*4882a593Smuzhiyun
2260*4882a593Smuzhiyun err = mtd_device_register(mtd, NULL, 0);
2261*4882a593Smuzhiyun if (err)
2262*4882a593Smuzhiyun goto cleanup_nand;
2263*4882a593Smuzhiyun
2264*4882a593Smuzhiyun platform_set_drvdata(pdev, mtd);
2265*4882a593Smuzhiyun
2266*4882a593Smuzhiyun return 0;
2267*4882a593Smuzhiyun
2268*4882a593Smuzhiyun cleanup_nand:
2269*4882a593Smuzhiyun nand_cleanup(nand_chip);
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun return_error:
2272*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(info->dma))
2273*4882a593Smuzhiyun dma_release_channel(info->dma);
2274*4882a593Smuzhiyun if (nand_chip->ecc.priv) {
2275*4882a593Smuzhiyun nand_bch_free(nand_chip->ecc.priv);
2276*4882a593Smuzhiyun nand_chip->ecc.priv = NULL;
2277*4882a593Smuzhiyun }
2278*4882a593Smuzhiyun return err;
2279*4882a593Smuzhiyun }
2280*4882a593Smuzhiyun
omap_nand_remove(struct platform_device * pdev)2281*4882a593Smuzhiyun static int omap_nand_remove(struct platform_device *pdev)
2282*4882a593Smuzhiyun {
2283*4882a593Smuzhiyun struct mtd_info *mtd = platform_get_drvdata(pdev);
2284*4882a593Smuzhiyun struct nand_chip *nand_chip = mtd_to_nand(mtd);
2285*4882a593Smuzhiyun struct omap_nand_info *info = mtd_to_omap(mtd);
2286*4882a593Smuzhiyun int ret;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun if (nand_chip->ecc.priv) {
2289*4882a593Smuzhiyun nand_bch_free(nand_chip->ecc.priv);
2290*4882a593Smuzhiyun nand_chip->ecc.priv = NULL;
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun if (info->dma)
2293*4882a593Smuzhiyun dma_release_channel(info->dma);
2294*4882a593Smuzhiyun ret = mtd_device_unregister(mtd);
2295*4882a593Smuzhiyun WARN_ON(ret);
2296*4882a593Smuzhiyun nand_cleanup(nand_chip);
2297*4882a593Smuzhiyun return ret;
2298*4882a593Smuzhiyun }
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun static const struct of_device_id omap_nand_ids[] = {
2301*4882a593Smuzhiyun { .compatible = "ti,omap2-nand", },
2302*4882a593Smuzhiyun {},
2303*4882a593Smuzhiyun };
2304*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, omap_nand_ids);
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun static struct platform_driver omap_nand_driver = {
2307*4882a593Smuzhiyun .probe = omap_nand_probe,
2308*4882a593Smuzhiyun .remove = omap_nand_remove,
2309*4882a593Smuzhiyun .driver = {
2310*4882a593Smuzhiyun .name = DRIVER_NAME,
2311*4882a593Smuzhiyun .of_match_table = of_match_ptr(omap_nand_ids),
2312*4882a593Smuzhiyun },
2313*4882a593Smuzhiyun };
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun module_platform_driver(omap_nand_driver);
2316*4882a593Smuzhiyun
2317*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRIVER_NAME);
2318*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2319*4882a593Smuzhiyun MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
2320