1 /*
2 * drivers/mtd/nand/raw/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * SPDX-License-Identifier: GPL-2.0
8 */
9
10 #include <common.h>
11 #include <malloc.h>
12 #include <fdtdec.h>
13 #include <nand.h>
14 #include <linux/errno.h>
15 #include <asm/io.h>
16 #include <asm/arch/cpu.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/rawnand.h>
19 #include <linux/types.h>
20
21 #include "pxa3xx_nand.h"
22
23 DECLARE_GLOBAL_DATA_PTR;
24
25 #define TIMEOUT_DRAIN_FIFO 5 /* in ms */
26 #define CHIP_DELAY_TIMEOUT 200
27 #define NAND_STOP_DELAY 40
28
29 /*
30 * Define a buffer size for the initial command that detects the flash device:
31 * STATUS, READID and PARAM.
32 * ONFI param page is 256 bytes, and there are three redundant copies
33 * to be read. JEDEC param page is 512 bytes, and there are also three
34 * redundant copies to be read.
35 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
36 */
37 #define INIT_BUFFER_SIZE 2048
38
39 /* registers and bit definitions */
40 #define NDCR (0x00) /* Control register */
41 #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
42 #define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
43 #define NDSR (0x14) /* Status Register */
44 #define NDPCR (0x18) /* Page Count Register */
45 #define NDBDR0 (0x1C) /* Bad Block Register 0 */
46 #define NDBDR1 (0x20) /* Bad Block Register 1 */
47 #define NDECCCTRL (0x28) /* ECC control */
48 #define NDDB (0x40) /* Data Buffer */
49 #define NDCB0 (0x48) /* Command Buffer0 */
50 #define NDCB1 (0x4C) /* Command Buffer1 */
51 #define NDCB2 (0x50) /* Command Buffer2 */
52
53 #define NDCR_SPARE_EN (0x1 << 31)
54 #define NDCR_ECC_EN (0x1 << 30)
55 #define NDCR_DMA_EN (0x1 << 29)
56 #define NDCR_ND_RUN (0x1 << 28)
57 #define NDCR_DWIDTH_C (0x1 << 27)
58 #define NDCR_DWIDTH_M (0x1 << 26)
59 #define NDCR_PAGE_SZ (0x1 << 24)
60 #define NDCR_NCSX (0x1 << 23)
61 #define NDCR_ND_MODE (0x3 << 21)
62 #define NDCR_NAND_MODE (0x0)
63 #define NDCR_CLR_PG_CNT (0x1 << 20)
64 #define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
65 #define NDCR_RD_ID_CNT_MASK (0x7 << 16)
66 #define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
67
68 #define NDCR_RA_START (0x1 << 15)
69 #define NDCR_PG_PER_BLK (0x1 << 14)
70 #define NDCR_ND_ARB_EN (0x1 << 12)
71 #define NDCR_INT_MASK (0xFFF)
72
73 #define NDSR_MASK (0xfff)
74 #define NDSR_ERR_CNT_OFF (16)
75 #define NDSR_ERR_CNT_MASK (0x1f)
76 #define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
77 #define NDSR_RDY (0x1 << 12)
78 #define NDSR_FLASH_RDY (0x1 << 11)
79 #define NDSR_CS0_PAGED (0x1 << 10)
80 #define NDSR_CS1_PAGED (0x1 << 9)
81 #define NDSR_CS0_CMDD (0x1 << 8)
82 #define NDSR_CS1_CMDD (0x1 << 7)
83 #define NDSR_CS0_BBD (0x1 << 6)
84 #define NDSR_CS1_BBD (0x1 << 5)
85 #define NDSR_UNCORERR (0x1 << 4)
86 #define NDSR_CORERR (0x1 << 3)
87 #define NDSR_WRDREQ (0x1 << 2)
88 #define NDSR_RDDREQ (0x1 << 1)
89 #define NDSR_WRCMDREQ (0x1)
90
91 #define NDCB0_LEN_OVRD (0x1 << 28)
92 #define NDCB0_ST_ROW_EN (0x1 << 26)
93 #define NDCB0_AUTO_RS (0x1 << 25)
94 #define NDCB0_CSEL (0x1 << 24)
95 #define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
96 #define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
97 #define NDCB0_CMD_TYPE_MASK (0x7 << 21)
98 #define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
99 #define NDCB0_NC (0x1 << 20)
100 #define NDCB0_DBC (0x1 << 19)
101 #define NDCB0_ADDR_CYC_MASK (0x7 << 16)
102 #define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
103 #define NDCB0_CMD2_MASK (0xff << 8)
104 #define NDCB0_CMD1_MASK (0xff)
105 #define NDCB0_ADDR_CYC_SHIFT (16)
106
107 #define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
108 #define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
109 #define EXT_CMD_TYPE_READ 4 /* Read */
110 #define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
111 #define EXT_CMD_TYPE_FINAL 3 /* Final command */
112 #define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
113 #define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
114
115 /*
116 * This should be large enough to read 'ONFI' and 'JEDEC'.
117 * Let's use 7 bytes, which is the maximum ID count supported
118 * by the controller (see NDCR_RD_ID_CNT_MASK).
119 */
120 #define READ_ID_BYTES 7
121
122 /* macros for registers read/write */
123 #define nand_writel(info, off, val) \
124 writel((val), (info)->mmio_base + (off))
125
126 #define nand_readl(info, off) \
127 readl((info)->mmio_base + (off))
128
129 /* error code and state */
130 enum {
131 ERR_NONE = 0,
132 ERR_DMABUSERR = -1,
133 ERR_SENDCMD = -2,
134 ERR_UNCORERR = -3,
135 ERR_BBERR = -4,
136 ERR_CORERR = -5,
137 };
138
139 enum {
140 STATE_IDLE = 0,
141 STATE_PREPARED,
142 STATE_CMD_HANDLE,
143 STATE_DMA_READING,
144 STATE_DMA_WRITING,
145 STATE_DMA_DONE,
146 STATE_PIO_READING,
147 STATE_PIO_WRITING,
148 STATE_CMD_DONE,
149 STATE_READY,
150 };
151
152 enum pxa3xx_nand_variant {
153 PXA3XX_NAND_VARIANT_PXA,
154 PXA3XX_NAND_VARIANT_ARMADA370,
155 };
156
157 struct pxa3xx_nand_host {
158 struct nand_chip chip;
159 void *info_data;
160
161 /* page size of attached chip */
162 int use_ecc;
163 int cs;
164
165 /* calculated from pxa3xx_nand_flash data */
166 unsigned int col_addr_cycles;
167 unsigned int row_addr_cycles;
168 };
169
170 struct pxa3xx_nand_info {
171 struct nand_hw_control controller;
172 struct pxa3xx_nand_platform_data *pdata;
173
174 struct clk *clk;
175 void __iomem *mmio_base;
176 unsigned long mmio_phys;
177 int cmd_complete, dev_ready;
178
179 unsigned int buf_start;
180 unsigned int buf_count;
181 unsigned int buf_size;
182 unsigned int data_buff_pos;
183 unsigned int oob_buff_pos;
184
185 unsigned char *data_buff;
186 unsigned char *oob_buff;
187
188 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
189 unsigned int state;
190
191 /*
192 * This driver supports NFCv1 (as found in PXA SoC)
193 * and NFCv2 (as found in Armada 370/XP SoC).
194 */
195 enum pxa3xx_nand_variant variant;
196
197 int cs;
198 int use_ecc; /* use HW ECC ? */
199 int force_raw; /* prevent use_ecc to be set */
200 int ecc_bch; /* using BCH ECC? */
201 int use_spare; /* use spare ? */
202 int need_wait;
203
204 /* Amount of real data per full chunk */
205 unsigned int chunk_size;
206
207 /* Amount of spare data per full chunk */
208 unsigned int spare_size;
209
210 /* Number of full chunks (i.e chunk_size + spare_size) */
211 unsigned int nfullchunks;
212
213 /*
214 * Total number of chunks. If equal to nfullchunks, then there
215 * are only full chunks. Otherwise, there is one last chunk of
216 * size (last_chunk_size + last_spare_size)
217 */
218 unsigned int ntotalchunks;
219
220 /* Amount of real data in the last chunk */
221 unsigned int last_chunk_size;
222
223 /* Amount of spare data in the last chunk */
224 unsigned int last_spare_size;
225
226 unsigned int ecc_size;
227 unsigned int ecc_err_cnt;
228 unsigned int max_bitflips;
229 int retcode;
230
231 /*
232 * Variables only valid during command
233 * execution. step_chunk_size and step_spare_size is the
234 * amount of real data and spare data in the current
235 * chunk. cur_chunk is the current chunk being
236 * read/programmed.
237 */
238 unsigned int step_chunk_size;
239 unsigned int step_spare_size;
240 unsigned int cur_chunk;
241
242 /* cached register value */
243 uint32_t reg_ndcr;
244 uint32_t ndtr0cs0;
245 uint32_t ndtr1cs0;
246
247 /* generated NDCBx register values */
248 uint32_t ndcb0;
249 uint32_t ndcb1;
250 uint32_t ndcb2;
251 uint32_t ndcb3;
252 };
253
254 static struct pxa3xx_nand_timing timing[] = {
255 /*
256 * tCH Enable signal hold time
257 * tCS Enable signal setup time
258 * tWH ND_nWE high duration
259 * tWP ND_nWE pulse time
260 * tRH ND_nRE high duration
261 * tRP ND_nRE pulse width
262 * tR ND_nWE high to ND_nRE low for read
263 * tWHR ND_nWE high to ND_nRE low for status read
264 * tAR ND_ALE low to ND_nRE low delay
265 */
266 /*ch cs wh wp rh rp r whr ar */
267 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
268 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
269 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
270 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
271 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
272 };
273
274 static struct pxa3xx_nand_flash builtin_flash_types[] = {
275 /*
276 * chip_id
277 * flash_width Width of Flash memory (DWIDTH_M)
278 * dfc_width Width of flash controller(DWIDTH_C)
279 * *timing
280 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
281 */
282 { 0x46ec, 16, 16, &timing[1] },
283 { 0xdaec, 8, 8, &timing[1] },
284 { 0xd7ec, 8, 8, &timing[1] },
285 { 0xa12c, 8, 8, &timing[2] },
286 { 0xb12c, 16, 16, &timing[2] },
287 { 0xdc2c, 8, 8, &timing[2] },
288 { 0xcc2c, 16, 16, &timing[2] },
289 { 0xba20, 16, 16, &timing[3] },
290 { 0xda98, 8, 8, &timing[4] },
291 };
292
293 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
294 static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
295 static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
296
297 static struct nand_bbt_descr bbt_main_descr = {
298 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
299 | NAND_BBT_2BIT | NAND_BBT_VERSION,
300 .offs = 8,
301 .len = 6,
302 .veroffs = 14,
303 .maxblocks = 8, /* Last 8 blocks in each chip */
304 .pattern = bbt_pattern
305 };
306
307 static struct nand_bbt_descr bbt_mirror_descr = {
308 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
309 | NAND_BBT_2BIT | NAND_BBT_VERSION,
310 .offs = 8,
311 .len = 6,
312 .veroffs = 14,
313 .maxblocks = 8, /* Last 8 blocks in each chip */
314 .pattern = bbt_mirror_pattern
315 };
316 #endif
317
318 static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
319 .eccbytes = 32,
320 .eccpos = {
321 32, 33, 34, 35, 36, 37, 38, 39,
322 40, 41, 42, 43, 44, 45, 46, 47,
323 48, 49, 50, 51, 52, 53, 54, 55,
324 56, 57, 58, 59, 60, 61, 62, 63},
325 .oobfree = { {2, 30} }
326 };
327
328 static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
329 .eccbytes = 64,
330 .eccpos = {
331 32, 33, 34, 35, 36, 37, 38, 39,
332 40, 41, 42, 43, 44, 45, 46, 47,
333 48, 49, 50, 51, 52, 53, 54, 55,
334 56, 57, 58, 59, 60, 61, 62, 63,
335 64, 65, 66, 67, 68, 69, 70, 71,
336 72, 73, 74, 75, 76, 77, 78, 79,
337 80, 81, 82, 83, 84, 85, 86, 87,
338 88, 89, 90, 91, 92, 93, 94, 95},
339 .oobfree = { {1, 4}, {6, 26} }
340 };
341
342 static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
343 .eccbytes = 64,
344 .eccpos = {
345 32, 33, 34, 35, 36, 37, 38, 39,
346 40, 41, 42, 43, 44, 45, 46, 47,
347 48, 49, 50, 51, 52, 53, 54, 55,
348 56, 57, 58, 59, 60, 61, 62, 63,
349 96, 97, 98, 99, 100, 101, 102, 103,
350 104, 105, 106, 107, 108, 109, 110, 111,
351 112, 113, 114, 115, 116, 117, 118, 119,
352 120, 121, 122, 123, 124, 125, 126, 127},
353 /* Bootrom looks in bytes 0 & 5 for bad blocks */
354 .oobfree = { {6, 26}, { 64, 32} }
355 };
356
357 static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
358 .eccbytes = 128,
359 .eccpos = {
360 32, 33, 34, 35, 36, 37, 38, 39,
361 40, 41, 42, 43, 44, 45, 46, 47,
362 48, 49, 50, 51, 52, 53, 54, 55,
363 56, 57, 58, 59, 60, 61, 62, 63,
364
365 96, 97, 98, 99, 100, 101, 102, 103,
366 104, 105, 106, 107, 108, 109, 110, 111,
367 112, 113, 114, 115, 116, 117, 118, 119,
368 120, 121, 122, 123, 124, 125, 126, 127,
369
370 160, 161, 162, 163, 164, 165, 166, 167,
371 168, 169, 170, 171, 172, 173, 174, 175,
372 176, 177, 178, 179, 180, 181, 182, 183,
373 184, 185, 186, 187, 188, 189, 190, 191,
374
375 224, 225, 226, 227, 228, 229, 230, 231,
376 232, 233, 234, 235, 236, 237, 238, 239,
377 240, 241, 242, 243, 244, 245, 246, 247,
378 248, 249, 250, 251, 252, 253, 254, 255},
379
380 /* Bootrom looks in bytes 0 & 5 for bad blocks */
381 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
382 };
383
384 static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
385 .eccbytes = 128,
386 .eccpos = {
387 32, 33, 34, 35, 36, 37, 38, 39,
388 40, 41, 42, 43, 44, 45, 46, 47,
389 48, 49, 50, 51, 52, 53, 54, 55,
390 56, 57, 58, 59, 60, 61, 62, 63},
391 .oobfree = { }
392 };
393
394 static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
395 .eccbytes = 256,
396 .eccpos = {},
397 /* HW ECC handles all ECC data and all spare area is free for OOB */
398 .oobfree = {{0, 160} }
399 };
400
401 #define NDTR0_tCH(c) (min((c), 7) << 19)
402 #define NDTR0_tCS(c) (min((c), 7) << 16)
403 #define NDTR0_tWH(c) (min((c), 7) << 11)
404 #define NDTR0_tWP(c) (min((c), 7) << 8)
405 #define NDTR0_tRH(c) (min((c), 7) << 3)
406 #define NDTR0_tRP(c) (min((c), 7) << 0)
407
408 #define NDTR1_tR(c) (min((c), 65535) << 16)
409 #define NDTR1_tWHR(c) (min((c), 15) << 4)
410 #define NDTR1_tAR(c) (min((c), 15) << 0)
411
412 /* convert nano-seconds to nand flash controller clock cycles */
413 #define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
414
pxa3xx_nand_get_variant(void)415 static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
416 {
417 /* We only support the Armada 370/XP/38x for now */
418 return PXA3XX_NAND_VARIANT_ARMADA370;
419 }
420
pxa3xx_nand_set_timing(struct pxa3xx_nand_host * host,const struct pxa3xx_nand_timing * t)421 static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
422 const struct pxa3xx_nand_timing *t)
423 {
424 struct pxa3xx_nand_info *info = host->info_data;
425 unsigned long nand_clk = mvebu_get_nand_clock();
426 uint32_t ndtr0, ndtr1;
427
428 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
429 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
430 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
431 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
432 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
433 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
434
435 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
436 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
437 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
438
439 info->ndtr0cs0 = ndtr0;
440 info->ndtr1cs0 = ndtr1;
441 nand_writel(info, NDTR0CS0, ndtr0);
442 nand_writel(info, NDTR1CS0, ndtr1);
443 }
444
pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host * host,const struct nand_sdr_timings * t)445 static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
446 const struct nand_sdr_timings *t)
447 {
448 struct pxa3xx_nand_info *info = host->info_data;
449 struct nand_chip *chip = &host->chip;
450 unsigned long nand_clk = mvebu_get_nand_clock();
451 uint32_t ndtr0, ndtr1;
452
453 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
454 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
455 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
456 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
457 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
458 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
459 u32 tR = chip->chip_delay * 1000;
460 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
461 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
462
463 /* fallback to a default value if tR = 0 */
464 if (!tR)
465 tR = 20000;
466
467 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
468 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
469 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
470 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
471 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
472 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
473
474 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
475 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
476 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
477
478 info->ndtr0cs0 = ndtr0;
479 info->ndtr1cs0 = ndtr1;
480 nand_writel(info, NDTR0CS0, ndtr0);
481 nand_writel(info, NDTR1CS0, ndtr1);
482 }
483
pxa3xx_nand_init_timings(struct pxa3xx_nand_host * host)484 static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
485 {
486 const struct nand_sdr_timings *timings;
487 struct nand_chip *chip = &host->chip;
488 struct pxa3xx_nand_info *info = host->info_data;
489 const struct pxa3xx_nand_flash *f = NULL;
490 struct mtd_info *mtd = nand_to_mtd(&host->chip);
491 int mode, id, ntypes, i;
492
493 mode = onfi_get_async_timing_mode(chip);
494 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
495 ntypes = ARRAY_SIZE(builtin_flash_types);
496
497 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
498
499 id = chip->read_byte(mtd);
500 id |= chip->read_byte(mtd) << 0x8;
501
502 for (i = 0; i < ntypes; i++) {
503 f = &builtin_flash_types[i];
504
505 if (f->chip_id == id)
506 break;
507 }
508
509 if (i == ntypes) {
510 dev_err(&info->pdev->dev, "Error: timings not found\n");
511 return -EINVAL;
512 }
513
514 pxa3xx_nand_set_timing(host, f->timing);
515
516 if (f->flash_width == 16) {
517 info->reg_ndcr |= NDCR_DWIDTH_M;
518 chip->options |= NAND_BUSWIDTH_16;
519 }
520
521 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
522 } else {
523 mode = fls(mode) - 1;
524 if (mode < 0)
525 mode = 0;
526
527 timings = onfi_async_timing_mode_to_sdr_timings(mode);
528 if (IS_ERR(timings))
529 return PTR_ERR(timings);
530
531 pxa3xx_nand_set_sdr_timing(host, timings);
532 }
533
534 return 0;
535 }
536
537 /**
538 * NOTE: it is a must to set ND_RUN first, then write
539 * command buffer, otherwise, it does not work.
540 * We enable all the interrupt at the same time, and
541 * let pxa3xx_nand_irq to handle all logic.
542 */
pxa3xx_nand_start(struct pxa3xx_nand_info * info)543 static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
544 {
545 uint32_t ndcr;
546
547 ndcr = info->reg_ndcr;
548
549 if (info->use_ecc) {
550 ndcr |= NDCR_ECC_EN;
551 if (info->ecc_bch)
552 nand_writel(info, NDECCCTRL, 0x1);
553 } else {
554 ndcr &= ~NDCR_ECC_EN;
555 if (info->ecc_bch)
556 nand_writel(info, NDECCCTRL, 0x0);
557 }
558
559 ndcr &= ~NDCR_DMA_EN;
560
561 if (info->use_spare)
562 ndcr |= NDCR_SPARE_EN;
563 else
564 ndcr &= ~NDCR_SPARE_EN;
565
566 ndcr |= NDCR_ND_RUN;
567
568 /* clear status bits and run */
569 nand_writel(info, NDSR, NDSR_MASK);
570 nand_writel(info, NDCR, 0);
571 nand_writel(info, NDCR, ndcr);
572 }
573
disable_int(struct pxa3xx_nand_info * info,uint32_t int_mask)574 static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
575 {
576 uint32_t ndcr;
577
578 ndcr = nand_readl(info, NDCR);
579 nand_writel(info, NDCR, ndcr | int_mask);
580 }
581
drain_fifo(struct pxa3xx_nand_info * info,void * data,int len)582 static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
583 {
584 if (info->ecc_bch && !info->force_raw) {
585 u32 ts;
586
587 /*
588 * According to the datasheet, when reading from NDDB
589 * with BCH enabled, after each 32 bytes reads, we
590 * have to make sure that the NDSR.RDDREQ bit is set.
591 *
592 * Drain the FIFO 8 32 bits reads at a time, and skip
593 * the polling on the last read.
594 */
595 while (len > 8) {
596 readsl(info->mmio_base + NDDB, data, 8);
597
598 ts = get_timer(0);
599 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
600 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
601 dev_err(&info->pdev->dev,
602 "Timeout on RDDREQ while draining the FIFO\n");
603 return;
604 }
605 }
606
607 data += 32;
608 len -= 8;
609 }
610 }
611
612 readsl(info->mmio_base + NDDB, data, len);
613 }
614
handle_data_pio(struct pxa3xx_nand_info * info)615 static void handle_data_pio(struct pxa3xx_nand_info *info)
616 {
617 int data_len = info->step_chunk_size;
618
619 /*
620 * In raw mode, include the spare area and the ECC bytes that are not
621 * consumed by the controller in the data section. Do not reorganize
622 * here, do it in the ->read_page_raw() handler instead.
623 */
624 if (info->force_raw)
625 data_len += info->step_spare_size + info->ecc_size;
626
627 switch (info->state) {
628 case STATE_PIO_WRITING:
629 if (info->step_chunk_size)
630 writesl(info->mmio_base + NDDB,
631 info->data_buff + info->data_buff_pos,
632 DIV_ROUND_UP(data_len, 4));
633
634 if (info->step_spare_size)
635 writesl(info->mmio_base + NDDB,
636 info->oob_buff + info->oob_buff_pos,
637 DIV_ROUND_UP(info->step_spare_size, 4));
638 break;
639 case STATE_PIO_READING:
640 if (info->step_chunk_size)
641 drain_fifo(info,
642 info->data_buff + info->data_buff_pos,
643 DIV_ROUND_UP(data_len, 4));
644
645 if (info->force_raw)
646 break;
647
648 if (info->step_spare_size)
649 drain_fifo(info,
650 info->oob_buff + info->oob_buff_pos,
651 DIV_ROUND_UP(info->step_spare_size, 4));
652 break;
653 default:
654 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
655 info->state);
656 BUG();
657 }
658
659 /* Update buffer pointers for multi-page read/write */
660 info->data_buff_pos += data_len;
661 info->oob_buff_pos += info->step_spare_size;
662 }
663
pxa3xx_nand_irq_thread(struct pxa3xx_nand_info * info)664 static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
665 {
666 handle_data_pio(info);
667
668 info->state = STATE_CMD_DONE;
669 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
670 }
671
pxa3xx_nand_irq(struct pxa3xx_nand_info * info)672 static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
673 {
674 unsigned int status, is_completed = 0, is_ready = 0;
675 unsigned int ready, cmd_done;
676 irqreturn_t ret = IRQ_HANDLED;
677
678 if (info->cs == 0) {
679 ready = NDSR_FLASH_RDY;
680 cmd_done = NDSR_CS0_CMDD;
681 } else {
682 ready = NDSR_RDY;
683 cmd_done = NDSR_CS1_CMDD;
684 }
685
686 /* TODO - find out why we need the delay during write operation. */
687 ndelay(1);
688
689 status = nand_readl(info, NDSR);
690
691 if (status & NDSR_UNCORERR)
692 info->retcode = ERR_UNCORERR;
693 if (status & NDSR_CORERR) {
694 info->retcode = ERR_CORERR;
695 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
696 info->ecc_bch)
697 info->ecc_err_cnt = NDSR_ERR_CNT(status);
698 else
699 info->ecc_err_cnt = 1;
700
701 /*
702 * Each chunk composing a page is corrected independently,
703 * and we need to store maximum number of corrected bitflips
704 * to return it to the MTD layer in ecc.read_page().
705 */
706 info->max_bitflips = max_t(unsigned int,
707 info->max_bitflips,
708 info->ecc_err_cnt);
709 }
710 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
711 info->state = (status & NDSR_RDDREQ) ?
712 STATE_PIO_READING : STATE_PIO_WRITING;
713 /* Call the IRQ thread in U-Boot directly */
714 pxa3xx_nand_irq_thread(info);
715 return 0;
716 }
717 if (status & cmd_done) {
718 info->state = STATE_CMD_DONE;
719 is_completed = 1;
720 }
721 if (status & ready) {
722 info->state = STATE_READY;
723 is_ready = 1;
724 }
725
726 /*
727 * Clear all status bit before issuing the next command, which
728 * can and will alter the status bits and will deserve a new
729 * interrupt on its own. This lets the controller exit the IRQ
730 */
731 nand_writel(info, NDSR, status);
732
733 if (status & NDSR_WRCMDREQ) {
734 status &= ~NDSR_WRCMDREQ;
735 info->state = STATE_CMD_HANDLE;
736
737 /*
738 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
739 * must be loaded by writing directly either 12 or 16
740 * bytes directly to NDCB0, four bytes at a time.
741 *
742 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
743 * but each NDCBx register can be read.
744 */
745 nand_writel(info, NDCB0, info->ndcb0);
746 nand_writel(info, NDCB0, info->ndcb1);
747 nand_writel(info, NDCB0, info->ndcb2);
748
749 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
750 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
751 nand_writel(info, NDCB0, info->ndcb3);
752 }
753
754 if (is_completed)
755 info->cmd_complete = 1;
756 if (is_ready)
757 info->dev_ready = 1;
758
759 return ret;
760 }
761
is_buf_blank(uint8_t * buf,size_t len)762 static inline int is_buf_blank(uint8_t *buf, size_t len)
763 {
764 for (; len > 0; len--)
765 if (*buf++ != 0xff)
766 return 0;
767 return 1;
768 }
769
set_command_address(struct pxa3xx_nand_info * info,unsigned int page_size,uint16_t column,int page_addr)770 static void set_command_address(struct pxa3xx_nand_info *info,
771 unsigned int page_size, uint16_t column, int page_addr)
772 {
773 /* small page addr setting */
774 if (page_size < info->chunk_size) {
775 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
776 | (column & 0xFF);
777
778 info->ndcb2 = 0;
779 } else {
780 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
781 | (column & 0xFFFF);
782
783 if (page_addr & 0xFF0000)
784 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
785 else
786 info->ndcb2 = 0;
787 }
788 }
789
prepare_start_command(struct pxa3xx_nand_info * info,int command)790 static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
791 {
792 struct pxa3xx_nand_host *host = info->host[info->cs];
793 struct mtd_info *mtd = nand_to_mtd(&host->chip);
794
795 /* reset data and oob column point to handle data */
796 info->buf_start = 0;
797 info->buf_count = 0;
798 info->data_buff_pos = 0;
799 info->oob_buff_pos = 0;
800 info->step_chunk_size = 0;
801 info->step_spare_size = 0;
802 info->cur_chunk = 0;
803 info->use_ecc = 0;
804 info->use_spare = 1;
805 info->retcode = ERR_NONE;
806 info->ecc_err_cnt = 0;
807 info->ndcb3 = 0;
808 info->need_wait = 0;
809
810 switch (command) {
811 case NAND_CMD_READ0:
812 case NAND_CMD_READOOB:
813 case NAND_CMD_PAGEPROG:
814 if (!info->force_raw)
815 info->use_ecc = 1;
816 break;
817 case NAND_CMD_PARAM:
818 info->use_spare = 0;
819 break;
820 default:
821 info->ndcb1 = 0;
822 info->ndcb2 = 0;
823 break;
824 }
825
826 /*
827 * If we are about to issue a read command, or about to set
828 * the write address, then clean the data buffer.
829 */
830 if (command == NAND_CMD_READ0 ||
831 command == NAND_CMD_READOOB ||
832 command == NAND_CMD_SEQIN) {
833 info->buf_count = mtd->writesize + mtd->oobsize;
834 memset(info->data_buff, 0xFF, info->buf_count);
835 }
836 }
837
prepare_set_command(struct pxa3xx_nand_info * info,int command,int ext_cmd_type,uint16_t column,int page_addr)838 static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
839 int ext_cmd_type, uint16_t column, int page_addr)
840 {
841 int addr_cycle, exec_cmd;
842 struct pxa3xx_nand_host *host;
843 struct mtd_info *mtd;
844
845 host = info->host[info->cs];
846 mtd = nand_to_mtd(&host->chip);
847 addr_cycle = 0;
848 exec_cmd = 1;
849
850 if (info->cs != 0)
851 info->ndcb0 = NDCB0_CSEL;
852 else
853 info->ndcb0 = 0;
854
855 if (command == NAND_CMD_SEQIN)
856 exec_cmd = 0;
857
858 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
859 + host->col_addr_cycles);
860
861 switch (command) {
862 case NAND_CMD_READOOB:
863 case NAND_CMD_READ0:
864 info->buf_start = column;
865 info->ndcb0 |= NDCB0_CMD_TYPE(0)
866 | addr_cycle
867 | NAND_CMD_READ0;
868
869 if (command == NAND_CMD_READOOB)
870 info->buf_start += mtd->writesize;
871
872 if (info->cur_chunk < info->nfullchunks) {
873 info->step_chunk_size = info->chunk_size;
874 info->step_spare_size = info->spare_size;
875 } else {
876 info->step_chunk_size = info->last_chunk_size;
877 info->step_spare_size = info->last_spare_size;
878 }
879
880 /*
881 * Multiple page read needs an 'extended command type' field,
882 * which is either naked-read or last-read according to the
883 * state.
884 */
885 if (info->force_raw) {
886 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
887 NDCB0_LEN_OVRD |
888 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
889 info->ndcb3 = info->step_chunk_size +
890 info->step_spare_size + info->ecc_size;
891 } else if (mtd->writesize == info->chunk_size) {
892 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
893 } else if (mtd->writesize > info->chunk_size) {
894 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
895 | NDCB0_LEN_OVRD
896 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
897 info->ndcb3 = info->step_chunk_size +
898 info->step_spare_size;
899 }
900
901 set_command_address(info, mtd->writesize, column, page_addr);
902 break;
903
904 case NAND_CMD_SEQIN:
905
906 info->buf_start = column;
907 set_command_address(info, mtd->writesize, 0, page_addr);
908
909 /*
910 * Multiple page programming needs to execute the initial
911 * SEQIN command that sets the page address.
912 */
913 if (mtd->writesize > info->chunk_size) {
914 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
915 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
916 | addr_cycle
917 | command;
918 exec_cmd = 1;
919 }
920 break;
921
922 case NAND_CMD_PAGEPROG:
923 if (is_buf_blank(info->data_buff,
924 (mtd->writesize + mtd->oobsize))) {
925 exec_cmd = 0;
926 break;
927 }
928
929 if (info->cur_chunk < info->nfullchunks) {
930 info->step_chunk_size = info->chunk_size;
931 info->step_spare_size = info->spare_size;
932 } else {
933 info->step_chunk_size = info->last_chunk_size;
934 info->step_spare_size = info->last_spare_size;
935 }
936
937 /* Second command setting for large pages */
938 if (mtd->writesize > info->chunk_size) {
939 /*
940 * Multiple page write uses the 'extended command'
941 * field. This can be used to issue a command dispatch
942 * or a naked-write depending on the current stage.
943 */
944 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
945 | NDCB0_LEN_OVRD
946 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
947 info->ndcb3 = info->step_chunk_size +
948 info->step_spare_size;
949
950 /*
951 * This is the command dispatch that completes a chunked
952 * page program operation.
953 */
954 if (info->cur_chunk == info->ntotalchunks) {
955 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
956 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
957 | command;
958 info->ndcb1 = 0;
959 info->ndcb2 = 0;
960 info->ndcb3 = 0;
961 }
962 } else {
963 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
964 | NDCB0_AUTO_RS
965 | NDCB0_ST_ROW_EN
966 | NDCB0_DBC
967 | (NAND_CMD_PAGEPROG << 8)
968 | NAND_CMD_SEQIN
969 | addr_cycle;
970 }
971 break;
972
973 case NAND_CMD_PARAM:
974 info->buf_count = INIT_BUFFER_SIZE;
975 info->ndcb0 |= NDCB0_CMD_TYPE(0)
976 | NDCB0_ADDR_CYC(1)
977 | NDCB0_LEN_OVRD
978 | command;
979 info->ndcb1 = (column & 0xFF);
980 info->ndcb3 = INIT_BUFFER_SIZE;
981 info->step_chunk_size = INIT_BUFFER_SIZE;
982 break;
983
984 case NAND_CMD_READID:
985 info->buf_count = READ_ID_BYTES;
986 info->ndcb0 |= NDCB0_CMD_TYPE(3)
987 | NDCB0_ADDR_CYC(1)
988 | command;
989 info->ndcb1 = (column & 0xFF);
990
991 info->step_chunk_size = 8;
992 break;
993 case NAND_CMD_STATUS:
994 info->buf_count = 1;
995 info->ndcb0 |= NDCB0_CMD_TYPE(4)
996 | NDCB0_ADDR_CYC(1)
997 | command;
998
999 info->step_chunk_size = 8;
1000 break;
1001
1002 case NAND_CMD_ERASE1:
1003 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1004 | NDCB0_AUTO_RS
1005 | NDCB0_ADDR_CYC(3)
1006 | NDCB0_DBC
1007 | (NAND_CMD_ERASE2 << 8)
1008 | NAND_CMD_ERASE1;
1009 info->ndcb1 = page_addr;
1010 info->ndcb2 = 0;
1011
1012 break;
1013 case NAND_CMD_RESET:
1014 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1015 | command;
1016
1017 break;
1018
1019 case NAND_CMD_ERASE2:
1020 exec_cmd = 0;
1021 break;
1022
1023 default:
1024 exec_cmd = 0;
1025 dev_err(&info->pdev->dev, "non-supported command %x\n",
1026 command);
1027 break;
1028 }
1029
1030 return exec_cmd;
1031 }
1032
nand_cmdfunc(struct mtd_info * mtd,unsigned command,int column,int page_addr)1033 static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1034 int column, int page_addr)
1035 {
1036 struct nand_chip *chip = mtd_to_nand(mtd);
1037 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1038 struct pxa3xx_nand_info *info = host->info_data;
1039 int exec_cmd;
1040
1041 /*
1042 * if this is a x16 device ,then convert the input
1043 * "byte" address into a "word" address appropriate
1044 * for indexing a word-oriented device
1045 */
1046 if (info->reg_ndcr & NDCR_DWIDTH_M)
1047 column /= 2;
1048
1049 /*
1050 * There may be different NAND chip hooked to
1051 * different chip select, so check whether
1052 * chip select has been changed, if yes, reset the timing
1053 */
1054 if (info->cs != host->cs) {
1055 info->cs = host->cs;
1056 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1057 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1058 }
1059
1060 prepare_start_command(info, command);
1061
1062 info->state = STATE_PREPARED;
1063 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1064
1065 if (exec_cmd) {
1066 u32 ts;
1067
1068 info->cmd_complete = 0;
1069 info->dev_ready = 0;
1070 info->need_wait = 1;
1071 pxa3xx_nand_start(info);
1072
1073 ts = get_timer(0);
1074 while (1) {
1075 u32 status;
1076
1077 status = nand_readl(info, NDSR);
1078 if (status)
1079 pxa3xx_nand_irq(info);
1080
1081 if (info->cmd_complete)
1082 break;
1083
1084 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1085 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1086 return;
1087 }
1088 }
1089 }
1090 info->state = STATE_IDLE;
1091 }
1092
nand_cmdfunc_extended(struct mtd_info * mtd,const unsigned command,int column,int page_addr)1093 static void nand_cmdfunc_extended(struct mtd_info *mtd,
1094 const unsigned command,
1095 int column, int page_addr)
1096 {
1097 struct nand_chip *chip = mtd_to_nand(mtd);
1098 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1099 struct pxa3xx_nand_info *info = host->info_data;
1100 int exec_cmd, ext_cmd_type;
1101
1102 /*
1103 * if this is a x16 device then convert the input
1104 * "byte" address into a "word" address appropriate
1105 * for indexing a word-oriented device
1106 */
1107 if (info->reg_ndcr & NDCR_DWIDTH_M)
1108 column /= 2;
1109
1110 /*
1111 * There may be different NAND chip hooked to
1112 * different chip select, so check whether
1113 * chip select has been changed, if yes, reset the timing
1114 */
1115 if (info->cs != host->cs) {
1116 info->cs = host->cs;
1117 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1118 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1119 }
1120
1121 /* Select the extended command for the first command */
1122 switch (command) {
1123 case NAND_CMD_READ0:
1124 case NAND_CMD_READOOB:
1125 ext_cmd_type = EXT_CMD_TYPE_MONO;
1126 break;
1127 case NAND_CMD_SEQIN:
1128 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1129 break;
1130 case NAND_CMD_PAGEPROG:
1131 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1132 break;
1133 default:
1134 ext_cmd_type = 0;
1135 break;
1136 }
1137
1138 prepare_start_command(info, command);
1139
1140 /*
1141 * Prepare the "is ready" completion before starting a command
1142 * transaction sequence. If the command is not executed the
1143 * completion will be completed, see below.
1144 *
1145 * We can do that inside the loop because the command variable
1146 * is invariant and thus so is the exec_cmd.
1147 */
1148 info->need_wait = 1;
1149 info->dev_ready = 0;
1150
1151 do {
1152 u32 ts;
1153
1154 info->state = STATE_PREPARED;
1155 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1156 column, page_addr);
1157 if (!exec_cmd) {
1158 info->need_wait = 0;
1159 info->dev_ready = 1;
1160 break;
1161 }
1162
1163 info->cmd_complete = 0;
1164 pxa3xx_nand_start(info);
1165
1166 ts = get_timer(0);
1167 while (1) {
1168 u32 status;
1169
1170 status = nand_readl(info, NDSR);
1171 if (status)
1172 pxa3xx_nand_irq(info);
1173
1174 if (info->cmd_complete)
1175 break;
1176
1177 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1178 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1179 return;
1180 }
1181 }
1182
1183 /* Only a few commands need several steps */
1184 if (command != NAND_CMD_PAGEPROG &&
1185 command != NAND_CMD_READ0 &&
1186 command != NAND_CMD_READOOB)
1187 break;
1188
1189 info->cur_chunk++;
1190
1191 /* Check if the sequence is complete */
1192 if (info->cur_chunk == info->ntotalchunks &&
1193 command != NAND_CMD_PAGEPROG)
1194 break;
1195
1196 /*
1197 * After a splitted program command sequence has issued
1198 * the command dispatch, the command sequence is complete.
1199 */
1200 if (info->cur_chunk == (info->ntotalchunks + 1) &&
1201 command == NAND_CMD_PAGEPROG &&
1202 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1203 break;
1204
1205 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1206 /* Last read: issue a 'last naked read' */
1207 if (info->cur_chunk == info->ntotalchunks - 1)
1208 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1209 else
1210 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1211
1212 /*
1213 * If a splitted program command has no more data to transfer,
1214 * the command dispatch must be issued to complete.
1215 */
1216 } else if (command == NAND_CMD_PAGEPROG &&
1217 info->cur_chunk == info->ntotalchunks) {
1218 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1219 }
1220 } while (1);
1221
1222 info->state = STATE_IDLE;
1223 }
1224
pxa3xx_nand_write_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,const uint8_t * buf,int oob_required,int page)1225 static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
1226 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1227 int page)
1228 {
1229 chip->write_buf(mtd, buf, mtd->writesize);
1230 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1231
1232 return 0;
1233 }
1234
pxa3xx_nand_read_page_hwecc(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1235 static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1236 struct nand_chip *chip, uint8_t *buf, int oob_required,
1237 int page)
1238 {
1239 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1240 struct pxa3xx_nand_info *info = host->info_data;
1241 int bf;
1242
1243 chip->read_buf(mtd, buf, mtd->writesize);
1244 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1245
1246 if (info->retcode == ERR_CORERR && info->use_ecc) {
1247 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1248
1249 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
1250 /*
1251 * Empty pages will trigger uncorrectable errors. Re-read the
1252 * entire page in raw mode and check for bits not being "1".
1253 * If there are more than the supported strength, then it means
1254 * this is an actual uncorrectable error.
1255 */
1256 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1257 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1258 chip->oob_poi, mtd->oobsize,
1259 NULL, 0, chip->ecc.strength);
1260 if (bf < 0) {
1261 mtd->ecc_stats.failed++;
1262 } else if (bf) {
1263 mtd->ecc_stats.corrected += bf;
1264 info->max_bitflips = max_t(unsigned int,
1265 info->max_bitflips, bf);
1266 info->retcode = ERR_CORERR;
1267 } else {
1268 info->retcode = ERR_NONE;
1269 }
1270
1271 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1272 /* Raw read is not supported with Hamming ECC engine */
1273 if (is_buf_blank(buf, mtd->writesize))
1274 info->retcode = ERR_NONE;
1275 else
1276 mtd->ecc_stats.failed++;
1277 }
1278
1279 return info->max_bitflips;
1280 }
1281
pxa3xx_nand_read_page_raw(struct mtd_info * mtd,struct nand_chip * chip,uint8_t * buf,int oob_required,int page)1282 static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1283 struct nand_chip *chip, uint8_t *buf,
1284 int oob_required, int page)
1285 {
1286 struct pxa3xx_nand_host *host = chip->priv;
1287 struct pxa3xx_nand_info *info = host->info_data;
1288 int chunk, ecc_off_buf;
1289
1290 if (!info->ecc_bch)
1291 return -ENOTSUPP;
1292
1293 /*
1294 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1295 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1296 */
1297 info->force_raw = true;
1298 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1299
1300 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1301 info->last_spare_size;
1302 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1303 chip->read_buf(mtd,
1304 buf + (chunk * info->chunk_size),
1305 info->chunk_size);
1306 chip->read_buf(mtd,
1307 chip->oob_poi +
1308 (chunk * (info->spare_size)),
1309 info->spare_size);
1310 chip->read_buf(mtd,
1311 chip->oob_poi + ecc_off_buf +
1312 (chunk * (info->ecc_size)),
1313 info->ecc_size - 2);
1314 }
1315
1316 if (info->ntotalchunks > info->nfullchunks) {
1317 chip->read_buf(mtd,
1318 buf + (info->nfullchunks * info->chunk_size),
1319 info->last_chunk_size);
1320 chip->read_buf(mtd,
1321 chip->oob_poi +
1322 (info->nfullchunks * (info->spare_size)),
1323 info->last_spare_size);
1324 chip->read_buf(mtd,
1325 chip->oob_poi + ecc_off_buf +
1326 (info->nfullchunks * (info->ecc_size)),
1327 info->ecc_size - 2);
1328 }
1329
1330 info->force_raw = false;
1331
1332 return 0;
1333 }
1334
pxa3xx_nand_read_oob_raw(struct mtd_info * mtd,struct nand_chip * chip,int page)1335 static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1336 struct nand_chip *chip, int page)
1337 {
1338 /* Invalidate page cache */
1339 chip->pagebuf = -1;
1340
1341 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1342 page);
1343 }
1344
pxa3xx_nand_read_byte(struct mtd_info * mtd)1345 static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1346 {
1347 struct nand_chip *chip = mtd_to_nand(mtd);
1348 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1349 struct pxa3xx_nand_info *info = host->info_data;
1350 char retval = 0xFF;
1351
1352 if (info->buf_start < info->buf_count)
1353 /* Has just send a new command? */
1354 retval = info->data_buff[info->buf_start++];
1355
1356 return retval;
1357 }
1358
pxa3xx_nand_read_word(struct mtd_info * mtd)1359 static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1360 {
1361 struct nand_chip *chip = mtd_to_nand(mtd);
1362 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1363 struct pxa3xx_nand_info *info = host->info_data;
1364 u16 retval = 0xFFFF;
1365
1366 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1367 retval = *((u16 *)(info->data_buff+info->buf_start));
1368 info->buf_start += 2;
1369 }
1370 return retval;
1371 }
1372
pxa3xx_nand_read_buf(struct mtd_info * mtd,uint8_t * buf,int len)1373 static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1374 {
1375 struct nand_chip *chip = mtd_to_nand(mtd);
1376 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1377 struct pxa3xx_nand_info *info = host->info_data;
1378 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1379
1380 memcpy(buf, info->data_buff + info->buf_start, real_len);
1381 info->buf_start += real_len;
1382 }
1383
pxa3xx_nand_write_buf(struct mtd_info * mtd,const uint8_t * buf,int len)1384 static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1385 const uint8_t *buf, int len)
1386 {
1387 struct nand_chip *chip = mtd_to_nand(mtd);
1388 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1389 struct pxa3xx_nand_info *info = host->info_data;
1390 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1391
1392 memcpy(info->data_buff + info->buf_start, buf, real_len);
1393 info->buf_start += real_len;
1394 }
1395
pxa3xx_nand_select_chip(struct mtd_info * mtd,int chip)1396 static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1397 {
1398 return;
1399 }
1400
pxa3xx_nand_waitfunc(struct mtd_info * mtd,struct nand_chip * this)1401 static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1402 {
1403 struct nand_chip *chip = mtd_to_nand(mtd);
1404 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1405 struct pxa3xx_nand_info *info = host->info_data;
1406
1407 if (info->need_wait) {
1408 u32 ts;
1409
1410 info->need_wait = 0;
1411
1412 ts = get_timer(0);
1413 while (1) {
1414 u32 status;
1415
1416 status = nand_readl(info, NDSR);
1417 if (status)
1418 pxa3xx_nand_irq(info);
1419
1420 if (info->dev_ready)
1421 break;
1422
1423 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1424 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1425 return NAND_STATUS_FAIL;
1426 }
1427 }
1428 }
1429
1430 /* pxa3xx_nand_send_command has waited for command complete */
1431 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1432 if (info->retcode == ERR_NONE)
1433 return 0;
1434 else
1435 return NAND_STATUS_FAIL;
1436 }
1437
1438 return NAND_STATUS_READY;
1439 }
1440
pxa3xx_nand_config_ident(struct pxa3xx_nand_info * info)1441 static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1442 {
1443 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1444
1445 /* Configure default flash values */
1446 info->reg_ndcr = 0x0; /* enable all interrupts */
1447 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1448 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1449 info->reg_ndcr |= NDCR_SPARE_EN;
1450
1451 return 0;
1452 }
1453
pxa3xx_nand_config_tail(struct pxa3xx_nand_info * info)1454 static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
1455 {
1456 struct pxa3xx_nand_host *host = info->host[info->cs];
1457 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
1458 struct nand_chip *chip = mtd_to_nand(mtd);
1459
1460 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1461 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1462 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1463 }
1464
pxa3xx_nand_detect_config(struct pxa3xx_nand_info * info)1465 static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1466 {
1467 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1468 uint32_t ndcr = nand_readl(info, NDCR);
1469
1470 /* Set an initial chunk size */
1471 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
1472 info->reg_ndcr = ndcr &
1473 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1474 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1475 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1476 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1477 }
1478
pxa3xx_nand_init_buff(struct pxa3xx_nand_info * info)1479 static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1480 {
1481 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1482 if (info->data_buff == NULL)
1483 return -ENOMEM;
1484 return 0;
1485 }
1486
pxa3xx_nand_sensing(struct pxa3xx_nand_host * host)1487 static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1488 {
1489 struct pxa3xx_nand_info *info = host->info_data;
1490 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1491 struct mtd_info *mtd;
1492 struct nand_chip *chip;
1493 const struct nand_sdr_timings *timings;
1494 int ret;
1495
1496 mtd = nand_to_mtd(&info->host[info->cs]->chip);
1497 chip = mtd_to_nand(mtd);
1498
1499 /* configure default flash values */
1500 info->reg_ndcr = 0x0; /* enable all interrupts */
1501 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1502 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1503 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1504
1505 /* use the common timing to make a try */
1506 timings = onfi_async_timing_mode_to_sdr_timings(0);
1507 if (IS_ERR(timings))
1508 return PTR_ERR(timings);
1509
1510 pxa3xx_nand_set_sdr_timing(host, timings);
1511
1512 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1513 ret = chip->waitfunc(mtd, chip);
1514 if (ret & NAND_STATUS_FAIL)
1515 return -ENODEV;
1516
1517 return 0;
1518 }
1519
pxa_ecc_init(struct pxa3xx_nand_info * info,struct nand_ecc_ctrl * ecc,int strength,int ecc_stepsize,int page_size)1520 static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1521 struct nand_ecc_ctrl *ecc,
1522 int strength, int ecc_stepsize, int page_size)
1523 {
1524 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1525 info->nfullchunks = 1;
1526 info->ntotalchunks = 1;
1527 info->chunk_size = 2048;
1528 info->spare_size = 40;
1529 info->ecc_size = 24;
1530 ecc->mode = NAND_ECC_HW;
1531 ecc->size = 512;
1532 ecc->strength = 1;
1533
1534 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1535 info->nfullchunks = 1;
1536 info->ntotalchunks = 1;
1537 info->chunk_size = 512;
1538 info->spare_size = 8;
1539 info->ecc_size = 8;
1540 ecc->mode = NAND_ECC_HW;
1541 ecc->size = 512;
1542 ecc->strength = 1;
1543
1544 /*
1545 * Required ECC: 4-bit correction per 512 bytes
1546 * Select: 16-bit correction per 2048 bytes
1547 */
1548 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1549 info->ecc_bch = 1;
1550 info->nfullchunks = 1;
1551 info->ntotalchunks = 1;
1552 info->chunk_size = 2048;
1553 info->spare_size = 32;
1554 info->ecc_size = 32;
1555 ecc->mode = NAND_ECC_HW;
1556 ecc->size = info->chunk_size;
1557 ecc->layout = &ecc_layout_2KB_bch4bit;
1558 ecc->strength = 16;
1559
1560 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1561 info->ecc_bch = 1;
1562 info->nfullchunks = 2;
1563 info->ntotalchunks = 2;
1564 info->chunk_size = 2048;
1565 info->spare_size = 32;
1566 info->ecc_size = 32;
1567 ecc->mode = NAND_ECC_HW;
1568 ecc->size = info->chunk_size;
1569 ecc->layout = &ecc_layout_4KB_bch4bit;
1570 ecc->strength = 16;
1571
1572 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1573 info->ecc_bch = 1;
1574 info->nfullchunks = 4;
1575 info->ntotalchunks = 4;
1576 info->chunk_size = 2048;
1577 info->spare_size = 32;
1578 info->ecc_size = 32;
1579 ecc->mode = NAND_ECC_HW;
1580 ecc->size = info->chunk_size;
1581 ecc->layout = &ecc_layout_8KB_bch4bit;
1582 ecc->strength = 16;
1583
1584 /*
1585 * Required ECC: 8-bit correction per 512 bytes
1586 * Select: 16-bit correction per 1024 bytes
1587 */
1588 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1589 info->ecc_bch = 1;
1590 info->nfullchunks = 1;
1591 info->ntotalchunks = 2;
1592 info->chunk_size = 1024;
1593 info->spare_size = 0;
1594 info->last_chunk_size = 1024;
1595 info->last_spare_size = 32;
1596 info->ecc_size = 32;
1597 ecc->mode = NAND_ECC_HW;
1598 ecc->size = info->chunk_size;
1599 ecc->layout = &ecc_layout_2KB_bch8bit;
1600 ecc->strength = 16;
1601
1602 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1603 info->ecc_bch = 1;
1604 info->nfullchunks = 4;
1605 info->ntotalchunks = 5;
1606 info->chunk_size = 1024;
1607 info->spare_size = 0;
1608 info->last_chunk_size = 0;
1609 info->last_spare_size = 64;
1610 info->ecc_size = 32;
1611 ecc->mode = NAND_ECC_HW;
1612 ecc->size = info->chunk_size;
1613 ecc->layout = &ecc_layout_4KB_bch8bit;
1614 ecc->strength = 16;
1615
1616 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
1617 info->ecc_bch = 1;
1618 info->nfullchunks = 8;
1619 info->ntotalchunks = 9;
1620 info->chunk_size = 1024;
1621 info->spare_size = 0;
1622 info->last_chunk_size = 0;
1623 info->last_spare_size = 160;
1624 info->ecc_size = 32;
1625 ecc->mode = NAND_ECC_HW;
1626 ecc->size = info->chunk_size;
1627 ecc->layout = &ecc_layout_8KB_bch8bit;
1628 ecc->strength = 16;
1629
1630 } else {
1631 dev_err(&info->pdev->dev,
1632 "ECC strength %d at page size %d is not supported\n",
1633 strength, page_size);
1634 return -ENODEV;
1635 }
1636
1637 return 0;
1638 }
1639
pxa3xx_nand_scan(struct mtd_info * mtd)1640 static int pxa3xx_nand_scan(struct mtd_info *mtd)
1641 {
1642 struct nand_chip *chip = mtd_to_nand(mtd);
1643 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
1644 struct pxa3xx_nand_info *info = host->info_data;
1645 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1646 int ret;
1647 uint16_t ecc_strength, ecc_step;
1648
1649 if (pdata->keep_config) {
1650 pxa3xx_nand_detect_config(info);
1651 } else {
1652 ret = pxa3xx_nand_config_ident(info);
1653 if (ret)
1654 return ret;
1655 ret = pxa3xx_nand_sensing(host);
1656 if (ret) {
1657 dev_info(&info->pdev->dev,
1658 "There is no chip on cs %d!\n",
1659 info->cs);
1660 return ret;
1661 }
1662 }
1663
1664 /* Device detection must be done with ECC disabled */
1665 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1666 nand_writel(info, NDECCCTRL, 0x0);
1667
1668 if (nand_scan_ident(mtd, 1, NULL))
1669 return -ENODEV;
1670
1671 if (!pdata->keep_config) {
1672 ret = pxa3xx_nand_init_timings(host);
1673 if (ret) {
1674 dev_err(&info->pdev->dev,
1675 "Failed to set timings: %d\n", ret);
1676 return ret;
1677 }
1678 }
1679
1680 #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1681 /*
1682 * We'll use a bad block table stored in-flash and don't
1683 * allow writing the bad block marker to the flash.
1684 */
1685 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1686 chip->bbt_td = &bbt_main_descr;
1687 chip->bbt_md = &bbt_mirror_descr;
1688 #endif
1689
1690 if (pdata->ecc_strength && pdata->ecc_step_size) {
1691 ecc_strength = pdata->ecc_strength;
1692 ecc_step = pdata->ecc_step_size;
1693 } else {
1694 ecc_strength = chip->ecc_strength_ds;
1695 ecc_step = chip->ecc_step_ds;
1696 }
1697
1698 /* Set default ECC strength requirements on non-ONFI devices */
1699 if (ecc_strength < 1 && ecc_step < 1) {
1700 ecc_strength = 1;
1701 ecc_step = 512;
1702 }
1703
1704 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1705 ecc_step, mtd->writesize);
1706 if (ret)
1707 return ret;
1708
1709 /*
1710 * If the page size is bigger than the FIFO size, let's check
1711 * we are given the right variant and then switch to the extended
1712 * (aka split) command handling,
1713 */
1714 if (mtd->writesize > info->chunk_size) {
1715 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1716 chip->cmdfunc = nand_cmdfunc_extended;
1717 } else {
1718 dev_err(&info->pdev->dev,
1719 "unsupported page size on this variant\n");
1720 return -ENODEV;
1721 }
1722 }
1723
1724 /* calculate addressing information */
1725 if (mtd->writesize >= 2048)
1726 host->col_addr_cycles = 2;
1727 else
1728 host->col_addr_cycles = 1;
1729
1730 /* release the initial buffer */
1731 kfree(info->data_buff);
1732
1733 /* allocate the real data + oob buffer */
1734 info->buf_size = mtd->writesize + mtd->oobsize;
1735 ret = pxa3xx_nand_init_buff(info);
1736 if (ret)
1737 return ret;
1738 info->oob_buff = info->data_buff + mtd->writesize;
1739
1740 if ((mtd->size >> chip->page_shift) > 65536)
1741 host->row_addr_cycles = 3;
1742 else
1743 host->row_addr_cycles = 2;
1744
1745 if (!pdata->keep_config)
1746 pxa3xx_nand_config_tail(info);
1747
1748 return nand_scan_tail(mtd);
1749 }
1750
alloc_nand_resource(struct pxa3xx_nand_info * info)1751 static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1752 {
1753 struct pxa3xx_nand_platform_data *pdata;
1754 struct pxa3xx_nand_host *host;
1755 struct nand_chip *chip = NULL;
1756 struct mtd_info *mtd;
1757 int ret, cs;
1758
1759 pdata = info->pdata;
1760 if (pdata->num_cs <= 0)
1761 return -ENODEV;
1762
1763 info->variant = pxa3xx_nand_get_variant();
1764 for (cs = 0; cs < pdata->num_cs; cs++) {
1765 chip = (struct nand_chip *)
1766 ((u8 *)&info[1] + sizeof(*host) * cs);
1767 mtd = nand_to_mtd(chip);
1768 host = (struct pxa3xx_nand_host *)chip;
1769 info->host[cs] = host;
1770 host->cs = cs;
1771 host->info_data = info;
1772 mtd->owner = THIS_MODULE;
1773
1774 nand_set_controller_data(chip, host);
1775 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1776 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1777 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
1778 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1779 chip->controller = &info->controller;
1780 chip->waitfunc = pxa3xx_nand_waitfunc;
1781 chip->select_chip = pxa3xx_nand_select_chip;
1782 chip->read_word = pxa3xx_nand_read_word;
1783 chip->read_byte = pxa3xx_nand_read_byte;
1784 chip->read_buf = pxa3xx_nand_read_buf;
1785 chip->write_buf = pxa3xx_nand_write_buf;
1786 chip->options |= NAND_NO_SUBPAGE_WRITE;
1787 chip->cmdfunc = nand_cmdfunc;
1788 }
1789
1790 /* Allocate a buffer to allow flash detection */
1791 info->buf_size = INIT_BUFFER_SIZE;
1792 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1793 if (info->data_buff == NULL) {
1794 ret = -ENOMEM;
1795 goto fail_disable_clk;
1796 }
1797
1798 /* initialize all interrupts to be disabled */
1799 disable_int(info, NDSR_MASK);
1800
1801 return 0;
1802
1803 kfree(info->data_buff);
1804 fail_disable_clk:
1805 return ret;
1806 }
1807
pxa3xx_nand_probe_dt(struct pxa3xx_nand_info * info)1808 static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1809 {
1810 struct pxa3xx_nand_platform_data *pdata;
1811 const void *blob = gd->fdt_blob;
1812 int node = -1;
1813
1814 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1815 if (!pdata)
1816 return -ENOMEM;
1817
1818 /* Get address decoding nodes from the FDT blob */
1819 do {
1820 node = fdt_node_offset_by_compatible(blob, node,
1821 "marvell,mvebu-pxa3xx-nand");
1822 if (node < 0)
1823 break;
1824
1825 /* Bypass disabeld nodes */
1826 if (!fdtdec_get_is_enabled(blob, node))
1827 continue;
1828
1829 /* Get the first enabled NAND controler base address */
1830 info->mmio_base =
1831 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1832 blob, node, "reg", 0, NULL, true);
1833
1834 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1835 if (pdata->num_cs != 1) {
1836 pr_err("pxa3xx driver supports single CS only\n");
1837 break;
1838 }
1839
1840 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1841 pdata->enable_arbiter = 1;
1842
1843 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1844 pdata->keep_config = 1;
1845
1846 /*
1847 * ECC parameters.
1848 * If these are not set, they will be selected according
1849 * to the detected flash type.
1850 */
1851 /* ECC strength */
1852 pdata->ecc_strength = fdtdec_get_int(blob, node,
1853 "nand-ecc-strength", 0);
1854
1855 /* ECC step size */
1856 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1857 "nand-ecc-step-size", 0);
1858
1859 info->pdata = pdata;
1860
1861 /* Currently support only a single NAND controller */
1862 return 0;
1863
1864 } while (node >= 0);
1865
1866 return -EINVAL;
1867 }
1868
pxa3xx_nand_probe(struct pxa3xx_nand_info * info)1869 static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1870 {
1871 struct pxa3xx_nand_platform_data *pdata;
1872 int ret, cs, probe_success;
1873
1874 ret = pxa3xx_nand_probe_dt(info);
1875 if (ret)
1876 return ret;
1877
1878 pdata = info->pdata;
1879
1880 ret = alloc_nand_resource(info);
1881 if (ret) {
1882 dev_err(&pdev->dev, "alloc nand resource failed\n");
1883 return ret;
1884 }
1885
1886 probe_success = 0;
1887 for (cs = 0; cs < pdata->num_cs; cs++) {
1888 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
1889
1890 /*
1891 * The mtd name matches the one used in 'mtdparts' kernel
1892 * parameter. This name cannot be changed or otherwise
1893 * user's mtd partitions configuration would get broken.
1894 */
1895 mtd->name = "pxa3xx_nand-0";
1896 info->cs = cs;
1897 ret = pxa3xx_nand_scan(mtd);
1898 if (ret) {
1899 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1900 cs);
1901 continue;
1902 }
1903
1904 if (nand_register(cs, mtd))
1905 continue;
1906
1907 probe_success = 1;
1908 }
1909
1910 if (!probe_success)
1911 return -ENODEV;
1912
1913 return 0;
1914 }
1915
1916 /*
1917 * Main initialization routine
1918 */
board_nand_init(void)1919 void board_nand_init(void)
1920 {
1921 struct pxa3xx_nand_info *info;
1922 struct pxa3xx_nand_host *host;
1923 int ret;
1924
1925 info = kzalloc(sizeof(*info) +
1926 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1927 GFP_KERNEL);
1928 if (!info)
1929 return;
1930
1931 ret = pxa3xx_nand_probe(info);
1932 if (ret)
1933 return;
1934 }
1935