1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * drivers/mtd/nand/raw/nand_util.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2006 by Weiss-Electronic GmbH.
5*4882a593Smuzhiyun * All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * @author: Guido Classen <clagix@gmail.com>
8*4882a593Smuzhiyun * @descr: NAND Flash support
9*4882a593Smuzhiyun * @references: borrowed heavily from Linux mtd-utils code:
10*4882a593Smuzhiyun * flash_eraseall.c by Arcom Control System Ltd
11*4882a593Smuzhiyun * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
12*4882a593Smuzhiyun * and Thomas Gleixner (tglx@linutronix.de)
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by
15*4882a593Smuzhiyun * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * Copyright 2010 Freescale Semiconductor
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <common.h>
23*4882a593Smuzhiyun #include <command.h>
24*4882a593Smuzhiyun #include <watchdog.h>
25*4882a593Smuzhiyun #include <malloc.h>
26*4882a593Smuzhiyun #include <memalign.h>
27*4882a593Smuzhiyun #include <div64.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/errno.h>
30*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
31*4882a593Smuzhiyun #include <nand.h>
32*4882a593Smuzhiyun #include <jffs2/jffs2.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun typedef struct erase_info erase_info_t;
35*4882a593Smuzhiyun typedef struct mtd_info mtd_info_t;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* support only for native endian JFFS2 */
38*4882a593Smuzhiyun #define cpu_to_je16(x) (x)
39*4882a593Smuzhiyun #define cpu_to_je32(x) (x)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun * nand_erase_opts: - erase NAND flash with support for various options
43*4882a593Smuzhiyun * (jffs2 formatting)
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * @param mtd nand mtd instance to erase
46*4882a593Smuzhiyun * @param opts options, @see struct nand_erase_options
47*4882a593Smuzhiyun * @return 0 in case of success
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * This code is ported from flash_eraseall.c from Linux mtd utils by
50*4882a593Smuzhiyun * Arcom Control System Ltd.
51*4882a593Smuzhiyun */
nand_erase_opts(struct mtd_info * mtd,const nand_erase_options_t * opts)52*4882a593Smuzhiyun int nand_erase_opts(struct mtd_info *mtd,
53*4882a593Smuzhiyun const nand_erase_options_t *opts)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun struct jffs2_unknown_node cleanmarker;
56*4882a593Smuzhiyun erase_info_t erase;
57*4882a593Smuzhiyun unsigned long erase_length, erased_length; /* in blocks */
58*4882a593Smuzhiyun int result;
59*4882a593Smuzhiyun int percent_complete = -1;
60*4882a593Smuzhiyun const char *mtd_device = mtd->name;
61*4882a593Smuzhiyun struct mtd_oob_ops oob_opts;
62*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if ((opts->offset & (mtd->erasesize - 1)) != 0) {
65*4882a593Smuzhiyun printf("Attempt to erase non block-aligned data\n");
66*4882a593Smuzhiyun return -1;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun memset(&erase, 0, sizeof(erase));
70*4882a593Smuzhiyun memset(&oob_opts, 0, sizeof(oob_opts));
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun erase.mtd = mtd;
73*4882a593Smuzhiyun erase.len = mtd->erasesize;
74*4882a593Smuzhiyun erase.addr = opts->offset;
75*4882a593Smuzhiyun erase_length = lldiv(opts->length + mtd->erasesize - 1,
76*4882a593Smuzhiyun mtd->erasesize);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
79*4882a593Smuzhiyun cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
80*4882a593Smuzhiyun cleanmarker.totlen = cpu_to_je32(8);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* scrub option allows to erase badblock. To prevent internal
83*4882a593Smuzhiyun * check from erase() method, set block check method to dummy
84*4882a593Smuzhiyun * and disable bad block table while erasing.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun if (opts->scrub) {
87*4882a593Smuzhiyun erase.scrub = opts->scrub;
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * We don't need the bad block table anymore...
90*4882a593Smuzhiyun * after scrub, there are no bad blocks left!
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun if (chip->bbt) {
93*4882a593Smuzhiyun kfree(chip->bbt);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun chip->bbt = NULL;
96*4882a593Smuzhiyun chip->options &= ~NAND_BBT_SCANNED;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun for (erased_length = 0;
100*4882a593Smuzhiyun erased_length < erase_length;
101*4882a593Smuzhiyun erase.addr += mtd->erasesize) {
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun WATCHDOG_RESET();
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) {
106*4882a593Smuzhiyun puts("Size of erase exceeds limit\n");
107*4882a593Smuzhiyun return -EFBIG;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun if (!opts->scrub) {
110*4882a593Smuzhiyun int ret = mtd_block_isbad(mtd, erase.addr);
111*4882a593Smuzhiyun if (ret > 0) {
112*4882a593Smuzhiyun if (!opts->quiet)
113*4882a593Smuzhiyun printf("\rSkipping bad block at "
114*4882a593Smuzhiyun "0x%08llx "
115*4882a593Smuzhiyun " \n",
116*4882a593Smuzhiyun erase.addr);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!opts->spread)
119*4882a593Smuzhiyun erased_length++;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun continue;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun } else if (ret < 0) {
124*4882a593Smuzhiyun printf("\n%s: MTD get bad block failed: %d\n",
125*4882a593Smuzhiyun mtd_device,
126*4882a593Smuzhiyun ret);
127*4882a593Smuzhiyun return -1;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun erased_length++;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun result = mtd_erase(mtd, &erase);
134*4882a593Smuzhiyun if (result != 0) {
135*4882a593Smuzhiyun printf("\n%s: MTD Erase failure: %d\n",
136*4882a593Smuzhiyun mtd_device, result);
137*4882a593Smuzhiyun continue;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* format for JFFS2 ? */
141*4882a593Smuzhiyun if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
142*4882a593Smuzhiyun struct mtd_oob_ops ops;
143*4882a593Smuzhiyun ops.ooblen = 8;
144*4882a593Smuzhiyun ops.datbuf = NULL;
145*4882a593Smuzhiyun ops.oobbuf = (uint8_t *)&cleanmarker;
146*4882a593Smuzhiyun ops.ooboffs = 0;
147*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun result = mtd_write_oob(mtd, erase.addr, &ops);
150*4882a593Smuzhiyun if (result != 0) {
151*4882a593Smuzhiyun printf("\n%s: MTD writeoob failure: %d\n",
152*4882a593Smuzhiyun mtd_device, result);
153*4882a593Smuzhiyun continue;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (!opts->quiet) {
158*4882a593Smuzhiyun unsigned long long n = erased_length * 100ULL;
159*4882a593Smuzhiyun int percent;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun do_div(n, erase_length);
162*4882a593Smuzhiyun percent = (int)n;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* output progress message only at whole percent
165*4882a593Smuzhiyun * steps to reduce the number of messages printed
166*4882a593Smuzhiyun * on (slow) serial consoles
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun if (percent != percent_complete) {
169*4882a593Smuzhiyun percent_complete = percent;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun printf("\rErasing at 0x%llx -- %3d%% complete.",
172*4882a593Smuzhiyun erase.addr, percent);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (opts->jffs2 && result == 0)
175*4882a593Smuzhiyun printf(" Cleanmarker written at 0x%llx.",
176*4882a593Smuzhiyun erase.addr);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun if (!opts->quiet)
181*4882a593Smuzhiyun printf("\n");
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define NAND_CMD_LOCK_TIGHT 0x2c
189*4882a593Smuzhiyun #define NAND_CMD_LOCK_STATUS 0x7a
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /******************************************************************************
192*4882a593Smuzhiyun * Support for locking / unlocking operations of some NAND devices
193*4882a593Smuzhiyun *****************************************************************************/
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /**
196*4882a593Smuzhiyun * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
197*4882a593Smuzhiyun * state
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * @param mtd nand mtd instance
200*4882a593Smuzhiyun * @param tight bring device in lock tight mode
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * @return 0 on success, -1 in case of error
203*4882a593Smuzhiyun *
204*4882a593Smuzhiyun * The lock / lock-tight command only applies to the whole chip. To get some
205*4882a593Smuzhiyun * parts of the chip lock and others unlocked use the following sequence:
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
208*4882a593Smuzhiyun * - Call nand_unlock() once for each consecutive area to be unlocked
209*4882a593Smuzhiyun * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun * If the device is in lock-tight state software can't change the
212*4882a593Smuzhiyun * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
213*4882a593Smuzhiyun * calls will fail. It is only posible to leave lock-tight state by
214*4882a593Smuzhiyun * an hardware signal (low pulse on _WP pin) or by power down.
215*4882a593Smuzhiyun */
nand_lock(struct mtd_info * mtd,int tight)216*4882a593Smuzhiyun int nand_lock(struct mtd_info *mtd, int tight)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun int ret = 0;
219*4882a593Smuzhiyun int status;
220*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* select the NAND device */
223*4882a593Smuzhiyun chip->select_chip(mtd, 0);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* check the Lock Tight Status */
226*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0);
227*4882a593Smuzhiyun if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
228*4882a593Smuzhiyun printf("nand_lock: Device is locked tight!\n");
229*4882a593Smuzhiyun ret = -1;
230*4882a593Smuzhiyun goto out;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun chip->cmdfunc(mtd,
234*4882a593Smuzhiyun (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
235*4882a593Smuzhiyun -1, -1);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* call wait ready function */
238*4882a593Smuzhiyun status = chip->waitfunc(mtd, chip);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* see if device thinks it succeeded */
241*4882a593Smuzhiyun if (status & 0x01) {
242*4882a593Smuzhiyun ret = -1;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun out:
246*4882a593Smuzhiyun /* de-select the NAND device */
247*4882a593Smuzhiyun chip->select_chip(mtd, -1);
248*4882a593Smuzhiyun return ret;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /**
252*4882a593Smuzhiyun * nand_get_lock_status: - query current lock state from one page of NAND
253*4882a593Smuzhiyun * flash
254*4882a593Smuzhiyun *
255*4882a593Smuzhiyun * @param mtd nand mtd instance
256*4882a593Smuzhiyun * @param offset page address to query (must be page-aligned!)
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * @return -1 in case of error
259*4882a593Smuzhiyun * >0 lock status:
260*4882a593Smuzhiyun * bitfield with the following combinations:
261*4882a593Smuzhiyun * NAND_LOCK_STATUS_TIGHT: page in tight state
262*4882a593Smuzhiyun * NAND_LOCK_STATUS_UNLOCK: page unlocked
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun */
nand_get_lock_status(struct mtd_info * mtd,loff_t offset)265*4882a593Smuzhiyun int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun int ret = 0;
268*4882a593Smuzhiyun int chipnr;
269*4882a593Smuzhiyun int page;
270*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* select the NAND device */
273*4882a593Smuzhiyun chipnr = (int)(offset >> chip->chip_shift);
274*4882a593Smuzhiyun chip->select_chip(mtd, chipnr);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if ((offset & (mtd->writesize - 1)) != 0) {
278*4882a593Smuzhiyun printf("nand_get_lock_status: "
279*4882a593Smuzhiyun "Start address must be beginning of "
280*4882a593Smuzhiyun "nand page!\n");
281*4882a593Smuzhiyun ret = -1;
282*4882a593Smuzhiyun goto out;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* check the Lock Status */
286*4882a593Smuzhiyun page = (int)(offset >> chip->page_shift);
287*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
290*4882a593Smuzhiyun | NAND_LOCK_STATUS_UNLOCK);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun out:
293*4882a593Smuzhiyun /* de-select the NAND device */
294*4882a593Smuzhiyun chip->select_chip(mtd, -1);
295*4882a593Smuzhiyun return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun * nand_unlock: - Unlock area of NAND pages
300*4882a593Smuzhiyun * only one consecutive area can be unlocked at one time!
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * @param mtd nand mtd instance
303*4882a593Smuzhiyun * @param start start byte address
304*4882a593Smuzhiyun * @param length number of bytes to unlock (must be a multiple of
305*4882a593Smuzhiyun * page size mtd->writesize)
306*4882a593Smuzhiyun * @param allexcept if set, unlock everything not selected
307*4882a593Smuzhiyun *
308*4882a593Smuzhiyun * @return 0 on success, -1 in case of error
309*4882a593Smuzhiyun */
nand_unlock(struct mtd_info * mtd,loff_t start,size_t length,int allexcept)310*4882a593Smuzhiyun int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length,
311*4882a593Smuzhiyun int allexcept)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun int ret = 0;
314*4882a593Smuzhiyun int chipnr;
315*4882a593Smuzhiyun int status;
316*4882a593Smuzhiyun int page;
317*4882a593Smuzhiyun struct nand_chip *chip = mtd_to_nand(mtd);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun debug("nand_unlock%s: start: %08llx, length: %zd!\n",
320*4882a593Smuzhiyun allexcept ? " (allexcept)" : "", start, length);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* select the NAND device */
323*4882a593Smuzhiyun chipnr = (int)(start >> chip->chip_shift);
324*4882a593Smuzhiyun chip->select_chip(mtd, chipnr);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /* check the WP bit */
327*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
328*4882a593Smuzhiyun if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
329*4882a593Smuzhiyun printf("nand_unlock: Device is write protected!\n");
330*4882a593Smuzhiyun ret = -1;
331*4882a593Smuzhiyun goto out;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* check the Lock Tight Status */
335*4882a593Smuzhiyun page = (int)(start >> chip->page_shift);
336*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
337*4882a593Smuzhiyun if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
338*4882a593Smuzhiyun printf("nand_unlock: Device is locked tight!\n");
339*4882a593Smuzhiyun ret = -1;
340*4882a593Smuzhiyun goto out;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if ((start & (mtd->erasesize - 1)) != 0) {
344*4882a593Smuzhiyun printf("nand_unlock: Start address must be beginning of "
345*4882a593Smuzhiyun "nand block!\n");
346*4882a593Smuzhiyun ret = -1;
347*4882a593Smuzhiyun goto out;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
351*4882a593Smuzhiyun printf("nand_unlock: Length must be a multiple of nand block "
352*4882a593Smuzhiyun "size %08x!\n", mtd->erasesize);
353*4882a593Smuzhiyun ret = -1;
354*4882a593Smuzhiyun goto out;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Set length so that the last address is set to the
359*4882a593Smuzhiyun * starting address of the last block
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun length -= mtd->erasesize;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* submit address of first page to unlock */
364*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* submit ADDRESS of LAST page to unlock */
367*4882a593Smuzhiyun page += (int)(length >> chip->page_shift);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * Page addresses for unlocking are supposed to be block-aligned.
371*4882a593Smuzhiyun * At least some NAND chips use the low bit to indicate that the
372*4882a593Smuzhiyun * page range should be inverted.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun if (allexcept)
375*4882a593Smuzhiyun page |= 1;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* call wait ready function */
380*4882a593Smuzhiyun status = chip->waitfunc(mtd, chip);
381*4882a593Smuzhiyun /* see if device thinks it succeeded */
382*4882a593Smuzhiyun if (status & 0x01) {
383*4882a593Smuzhiyun /* there was an error */
384*4882a593Smuzhiyun ret = -1;
385*4882a593Smuzhiyun goto out;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun out:
389*4882a593Smuzhiyun /* de-select the NAND device */
390*4882a593Smuzhiyun chip->select_chip(mtd, -1);
391*4882a593Smuzhiyun return ret;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun #endif
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * check_skip_len
397*4882a593Smuzhiyun *
398*4882a593Smuzhiyun * Check if there are any bad blocks, and whether length including bad
399*4882a593Smuzhiyun * blocks fits into device
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * @param mtd nand mtd instance
402*4882a593Smuzhiyun * @param offset offset in flash
403*4882a593Smuzhiyun * @param length image length
404*4882a593Smuzhiyun * @param used length of flash needed for the requested length
405*4882a593Smuzhiyun * @return 0 if the image fits and there are no bad blocks
406*4882a593Smuzhiyun * 1 if the image fits, but there are bad blocks
407*4882a593Smuzhiyun * -1 if the image does not fit
408*4882a593Smuzhiyun */
check_skip_len(struct mtd_info * mtd,loff_t offset,size_t length,size_t * used)409*4882a593Smuzhiyun static int check_skip_len(struct mtd_info *mtd, loff_t offset, size_t length,
410*4882a593Smuzhiyun size_t *used)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun size_t len_excl_bad = 0;
413*4882a593Smuzhiyun int ret = 0;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun while (len_excl_bad < length) {
416*4882a593Smuzhiyun size_t block_len, block_off;
417*4882a593Smuzhiyun loff_t block_start;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (offset >= mtd->size)
420*4882a593Smuzhiyun return -1;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun block_start = offset & ~(loff_t)(mtd->erasesize - 1);
423*4882a593Smuzhiyun block_off = offset & (mtd->erasesize - 1);
424*4882a593Smuzhiyun block_len = mtd->erasesize - block_off;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!nand_block_isbad(mtd, block_start))
427*4882a593Smuzhiyun len_excl_bad += block_len;
428*4882a593Smuzhiyun else
429*4882a593Smuzhiyun ret = 1;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun offset += block_len;
432*4882a593Smuzhiyun *used += block_len;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* If the length is not a multiple of block_len, adjust. */
436*4882a593Smuzhiyun if (len_excl_bad > length)
437*4882a593Smuzhiyun *used -= (len_excl_bad - length);
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun #ifdef CONFIG_CMD_NAND_TRIMFFS
drop_ffs(const struct mtd_info * mtd,const u_char * buf,const size_t * len)443*4882a593Smuzhiyun static size_t drop_ffs(const struct mtd_info *mtd, const u_char *buf,
444*4882a593Smuzhiyun const size_t *len)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun size_t l = *len;
447*4882a593Smuzhiyun ssize_t i;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun for (i = l - 1; i >= 0; i--)
450*4882a593Smuzhiyun if (buf[i] != 0xFF)
451*4882a593Smuzhiyun break;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /* The resulting length must be aligned to the minimum flash I/O size */
454*4882a593Smuzhiyun l = i + 1;
455*4882a593Smuzhiyun l = (l + mtd->writesize - 1) / mtd->writesize;
456*4882a593Smuzhiyun l *= mtd->writesize;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /*
459*4882a593Smuzhiyun * since the input length may be unaligned, prevent access past the end
460*4882a593Smuzhiyun * of the buffer
461*4882a593Smuzhiyun */
462*4882a593Smuzhiyun return min(l, *len);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun #endif
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun * nand_verify_page_oob:
468*4882a593Smuzhiyun *
469*4882a593Smuzhiyun * Verify a page of NAND flash, including the OOB.
470*4882a593Smuzhiyun * Reads page of NAND and verifies the contents and OOB against the
471*4882a593Smuzhiyun * values in ops.
472*4882a593Smuzhiyun *
473*4882a593Smuzhiyun * @param mtd nand mtd instance
474*4882a593Smuzhiyun * @param ops MTD operations, including data to verify
475*4882a593Smuzhiyun * @param ofs offset in flash
476*4882a593Smuzhiyun * @return 0 in case of success
477*4882a593Smuzhiyun */
nand_verify_page_oob(struct mtd_info * mtd,struct mtd_oob_ops * ops,loff_t ofs)478*4882a593Smuzhiyun int nand_verify_page_oob(struct mtd_info *mtd, struct mtd_oob_ops *ops,
479*4882a593Smuzhiyun loff_t ofs)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun int rval;
482*4882a593Smuzhiyun struct mtd_oob_ops vops;
483*4882a593Smuzhiyun size_t verlen = mtd->writesize + mtd->oobsize;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun memcpy(&vops, ops, sizeof(vops));
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun vops.datbuf = memalign(ARCH_DMA_MINALIGN, verlen);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!vops.datbuf)
490*4882a593Smuzhiyun return -ENOMEM;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun vops.oobbuf = vops.datbuf + mtd->writesize;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun rval = mtd_read_oob(mtd, ofs, &vops);
495*4882a593Smuzhiyun if (!rval)
496*4882a593Smuzhiyun rval = memcmp(ops->datbuf, vops.datbuf, vops.len);
497*4882a593Smuzhiyun if (!rval)
498*4882a593Smuzhiyun rval = memcmp(ops->oobbuf, vops.oobbuf, vops.ooblen);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun free(vops.datbuf);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return rval ? -EIO : 0;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun * nand_verify:
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Verify a region of NAND flash.
509*4882a593Smuzhiyun * Reads NAND in page-sized chunks and verifies the contents against
510*4882a593Smuzhiyun * the contents of a buffer. The offset into the NAND must be
511*4882a593Smuzhiyun * page-aligned, and the function doesn't handle skipping bad blocks.
512*4882a593Smuzhiyun *
513*4882a593Smuzhiyun * @param mtd nand mtd instance
514*4882a593Smuzhiyun * @param ofs offset in flash
515*4882a593Smuzhiyun * @param len buffer length
516*4882a593Smuzhiyun * @param buf buffer to read from
517*4882a593Smuzhiyun * @return 0 in case of success
518*4882a593Smuzhiyun */
nand_verify(struct mtd_info * mtd,loff_t ofs,size_t len,u_char * buf)519*4882a593Smuzhiyun int nand_verify(struct mtd_info *mtd, loff_t ofs, size_t len, u_char *buf)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun int rval = 0;
522*4882a593Smuzhiyun size_t verofs;
523*4882a593Smuzhiyun size_t verlen = mtd->writesize;
524*4882a593Smuzhiyun uint8_t *verbuf = memalign(ARCH_DMA_MINALIGN, verlen);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (!verbuf)
527*4882a593Smuzhiyun return -ENOMEM;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Read the NAND back in page-size groups to limit malloc size */
530*4882a593Smuzhiyun for (verofs = ofs; verofs < ofs + len;
531*4882a593Smuzhiyun verofs += verlen, buf += verlen) {
532*4882a593Smuzhiyun verlen = min(mtd->writesize, (uint32_t)(ofs + len - verofs));
533*4882a593Smuzhiyun rval = nand_read(mtd, verofs, &verlen, verbuf);
534*4882a593Smuzhiyun if (!rval || (rval == -EUCLEAN))
535*4882a593Smuzhiyun rval = memcmp(buf, verbuf, verlen);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun if (rval)
538*4882a593Smuzhiyun break;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun free(verbuf);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun return rval ? -EIO : 0;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun * nand_write_skip_bad:
550*4882a593Smuzhiyun *
551*4882a593Smuzhiyun * Write image to NAND flash.
552*4882a593Smuzhiyun * Blocks that are marked bad are skipped and the is written to the next
553*4882a593Smuzhiyun * block instead as long as the image is short enough to fit even after
554*4882a593Smuzhiyun * skipping the bad blocks. Due to bad blocks we may not be able to
555*4882a593Smuzhiyun * perform the requested write. In the case where the write would
556*4882a593Smuzhiyun * extend beyond the end of the NAND device, both length and actual (if
557*4882a593Smuzhiyun * not NULL) are set to 0. In the case where the write would extend
558*4882a593Smuzhiyun * beyond the limit we are passed, length is set to 0 and actual is set
559*4882a593Smuzhiyun * to the required length.
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * @param mtd nand mtd instance
562*4882a593Smuzhiyun * @param offset offset in flash
563*4882a593Smuzhiyun * @param length buffer length
564*4882a593Smuzhiyun * @param actual set to size required to write length worth of
565*4882a593Smuzhiyun * buffer or 0 on error, if not NULL
566*4882a593Smuzhiyun * @param lim maximum size that actual may be in order to not
567*4882a593Smuzhiyun * exceed the buffer
568*4882a593Smuzhiyun * @param buffer buffer to read from
569*4882a593Smuzhiyun * @param flags flags modifying the behaviour of the write to NAND
570*4882a593Smuzhiyun * @return 0 in case of success
571*4882a593Smuzhiyun */
nand_write_skip_bad(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer,int flags)572*4882a593Smuzhiyun int nand_write_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
573*4882a593Smuzhiyun size_t *actual, loff_t lim, u_char *buffer, int flags)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun int rval = 0, blocksize;
576*4882a593Smuzhiyun size_t left_to_write = *length;
577*4882a593Smuzhiyun size_t used_for_write = 0;
578*4882a593Smuzhiyun u_char *p_buffer = buffer;
579*4882a593Smuzhiyun int need_skip;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (actual)
582*4882a593Smuzhiyun *actual = 0;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun blocksize = mtd->erasesize;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun * nand_write() handles unaligned, partial page writes.
588*4882a593Smuzhiyun *
589*4882a593Smuzhiyun * We allow length to be unaligned, for convenience in
590*4882a593Smuzhiyun * using the $filesize variable.
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * However, starting at an unaligned offset makes the
593*4882a593Smuzhiyun * semantics of bad block skipping ambiguous (really,
594*4882a593Smuzhiyun * you should only start a block skipping access at a
595*4882a593Smuzhiyun * partition boundary). So don't try to handle that.
596*4882a593Smuzhiyun */
597*4882a593Smuzhiyun if ((offset & (mtd->writesize - 1)) != 0) {
598*4882a593Smuzhiyun printf("Attempt to write non page-aligned data\n");
599*4882a593Smuzhiyun *length = 0;
600*4882a593Smuzhiyun return -EINVAL;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun need_skip = check_skip_len(mtd, offset, *length, &used_for_write);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if (actual)
606*4882a593Smuzhiyun *actual = used_for_write;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (need_skip < 0) {
609*4882a593Smuzhiyun printf("Attempt to write outside the flash area\n");
610*4882a593Smuzhiyun *length = 0;
611*4882a593Smuzhiyun return -EINVAL;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (used_for_write > lim) {
615*4882a593Smuzhiyun puts("Size of write exceeds partition or device limit\n");
616*4882a593Smuzhiyun *length = 0;
617*4882a593Smuzhiyun return -EFBIG;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (!need_skip && !(flags & WITH_DROP_FFS)) {
621*4882a593Smuzhiyun rval = nand_write(mtd, offset, length, buffer);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if ((flags & WITH_WR_VERIFY) && !rval)
624*4882a593Smuzhiyun rval = nand_verify(mtd, offset, *length, buffer);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (rval == 0)
627*4882a593Smuzhiyun return 0;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun *length = 0;
630*4882a593Smuzhiyun printf("NAND write to offset %llx failed %d\n",
631*4882a593Smuzhiyun offset, rval);
632*4882a593Smuzhiyun return rval;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun while (left_to_write > 0) {
636*4882a593Smuzhiyun size_t block_offset = offset & (mtd->erasesize - 1);
637*4882a593Smuzhiyun size_t write_size, truncated_write_size;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun WATCHDOG_RESET();
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
642*4882a593Smuzhiyun printf("Skip bad block 0x%08llx\n",
643*4882a593Smuzhiyun offset & ~(mtd->erasesize - 1));
644*4882a593Smuzhiyun offset += mtd->erasesize - block_offset;
645*4882a593Smuzhiyun continue;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (left_to_write < (blocksize - block_offset))
649*4882a593Smuzhiyun write_size = left_to_write;
650*4882a593Smuzhiyun else
651*4882a593Smuzhiyun write_size = blocksize - block_offset;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun truncated_write_size = write_size;
654*4882a593Smuzhiyun #ifdef CONFIG_CMD_NAND_TRIMFFS
655*4882a593Smuzhiyun if (flags & WITH_DROP_FFS)
656*4882a593Smuzhiyun truncated_write_size = drop_ffs(mtd, p_buffer,
657*4882a593Smuzhiyun &write_size);
658*4882a593Smuzhiyun #endif
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun rval = nand_write(mtd, offset, &truncated_write_size,
661*4882a593Smuzhiyun p_buffer);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun if ((flags & WITH_WR_VERIFY) && !rval)
664*4882a593Smuzhiyun rval = nand_verify(mtd, offset,
665*4882a593Smuzhiyun truncated_write_size, p_buffer);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun offset += write_size;
668*4882a593Smuzhiyun p_buffer += write_size;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (rval != 0) {
671*4882a593Smuzhiyun printf("NAND write to offset %llx failed %d\n",
672*4882a593Smuzhiyun offset, rval);
673*4882a593Smuzhiyun *length -= left_to_write;
674*4882a593Smuzhiyun return rval;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun left_to_write -= write_size;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun return 0;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /**
684*4882a593Smuzhiyun * nand_read_skip_bad:
685*4882a593Smuzhiyun *
686*4882a593Smuzhiyun * Read image from NAND flash.
687*4882a593Smuzhiyun * Blocks that are marked bad are skipped and the next block is read
688*4882a593Smuzhiyun * instead as long as the image is short enough to fit even after
689*4882a593Smuzhiyun * skipping the bad blocks. Due to bad blocks we may not be able to
690*4882a593Smuzhiyun * perform the requested read. In the case where the read would extend
691*4882a593Smuzhiyun * beyond the end of the NAND device, both length and actual (if not
692*4882a593Smuzhiyun * NULL) are set to 0. In the case where the read would extend beyond
693*4882a593Smuzhiyun * the limit we are passed, length is set to 0 and actual is set to the
694*4882a593Smuzhiyun * required length.
695*4882a593Smuzhiyun *
696*4882a593Smuzhiyun * @param mtd nand mtd instance
697*4882a593Smuzhiyun * @param offset offset in flash
698*4882a593Smuzhiyun * @param length buffer length, on return holds number of read bytes
699*4882a593Smuzhiyun * @param actual set to size required to read length worth of buffer or 0
700*4882a593Smuzhiyun * on error, if not NULL
701*4882a593Smuzhiyun * @param lim maximum size that actual may be in order to not exceed the
702*4882a593Smuzhiyun * buffer
703*4882a593Smuzhiyun * @param buffer buffer to write to
704*4882a593Smuzhiyun * @return 0 in case of success
705*4882a593Smuzhiyun */
nand_read_skip_bad(struct mtd_info * mtd,loff_t offset,size_t * length,size_t * actual,loff_t lim,u_char * buffer)706*4882a593Smuzhiyun int nand_read_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
707*4882a593Smuzhiyun size_t *actual, loff_t lim, u_char *buffer)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun int rval;
710*4882a593Smuzhiyun size_t left_to_read = *length;
711*4882a593Smuzhiyun size_t used_for_read = 0;
712*4882a593Smuzhiyun u_char *p_buffer = buffer;
713*4882a593Smuzhiyun int need_skip;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun need_skip = check_skip_len(mtd, offset, *length, &used_for_read);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (actual)
718*4882a593Smuzhiyun *actual = used_for_read;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (need_skip < 0) {
721*4882a593Smuzhiyun printf("Attempt to read outside the flash area\n");
722*4882a593Smuzhiyun *length = 0;
723*4882a593Smuzhiyun return -EINVAL;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (used_for_read > lim) {
727*4882a593Smuzhiyun puts("Size of read exceeds partition or device limit\n");
728*4882a593Smuzhiyun *length = 0;
729*4882a593Smuzhiyun return -EFBIG;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (!need_skip) {
733*4882a593Smuzhiyun rval = nand_read(mtd, offset, length, buffer);
734*4882a593Smuzhiyun if (!rval || rval == -EUCLEAN)
735*4882a593Smuzhiyun return 0;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun *length = 0;
738*4882a593Smuzhiyun printf("NAND read from offset %llx failed %d\n",
739*4882a593Smuzhiyun offset, rval);
740*4882a593Smuzhiyun return rval;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun while (left_to_read > 0) {
744*4882a593Smuzhiyun size_t block_offset = offset & (mtd->erasesize - 1);
745*4882a593Smuzhiyun size_t read_length;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun WATCHDOG_RESET();
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
750*4882a593Smuzhiyun printf("Skipping bad block 0x%08llx\n",
751*4882a593Smuzhiyun offset & ~(mtd->erasesize - 1));
752*4882a593Smuzhiyun offset += mtd->erasesize - block_offset;
753*4882a593Smuzhiyun continue;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun if (left_to_read < (mtd->erasesize - block_offset))
757*4882a593Smuzhiyun read_length = left_to_read;
758*4882a593Smuzhiyun else
759*4882a593Smuzhiyun read_length = mtd->erasesize - block_offset;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun rval = nand_read(mtd, offset, &read_length, p_buffer);
762*4882a593Smuzhiyun if (rval && rval != -EUCLEAN) {
763*4882a593Smuzhiyun printf("NAND read from offset %llx failed %d\n",
764*4882a593Smuzhiyun offset, rval);
765*4882a593Smuzhiyun *length -= left_to_read;
766*4882a593Smuzhiyun return rval;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun left_to_read -= read_length;
770*4882a593Smuzhiyun offset += read_length;
771*4882a593Smuzhiyun p_buffer += read_length;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun return 0;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun #ifdef CONFIG_CMD_NAND_TORTURE
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /**
780*4882a593Smuzhiyun * check_pattern:
781*4882a593Smuzhiyun *
782*4882a593Smuzhiyun * Check if buffer contains only a certain byte pattern.
783*4882a593Smuzhiyun *
784*4882a593Smuzhiyun * @param buf buffer to check
785*4882a593Smuzhiyun * @param patt the pattern to check
786*4882a593Smuzhiyun * @param size buffer size in bytes
787*4882a593Smuzhiyun * @return 1 if there are only patt bytes in buf
788*4882a593Smuzhiyun * 0 if something else was found
789*4882a593Smuzhiyun */
check_pattern(const u_char * buf,u_char patt,int size)790*4882a593Smuzhiyun static int check_pattern(const u_char *buf, u_char patt, int size)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun int i;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun for (i = 0; i < size; i++)
795*4882a593Smuzhiyun if (buf[i] != patt)
796*4882a593Smuzhiyun return 0;
797*4882a593Smuzhiyun return 1;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /**
801*4882a593Smuzhiyun * nand_torture:
802*4882a593Smuzhiyun *
803*4882a593Smuzhiyun * Torture a block of NAND flash.
804*4882a593Smuzhiyun * This is useful to determine if a block that caused a write error is still
805*4882a593Smuzhiyun * good or should be marked as bad.
806*4882a593Smuzhiyun *
807*4882a593Smuzhiyun * @param mtd nand mtd instance
808*4882a593Smuzhiyun * @param offset offset in flash
809*4882a593Smuzhiyun * @return 0 if the block is still good
810*4882a593Smuzhiyun */
nand_torture(struct mtd_info * mtd,loff_t offset)811*4882a593Smuzhiyun int nand_torture(struct mtd_info *mtd, loff_t offset)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun u_char patterns[] = {0xa5, 0x5a, 0x00};
814*4882a593Smuzhiyun struct erase_info instr = {
815*4882a593Smuzhiyun .mtd = mtd,
816*4882a593Smuzhiyun .addr = offset,
817*4882a593Smuzhiyun .len = mtd->erasesize,
818*4882a593Smuzhiyun };
819*4882a593Smuzhiyun size_t retlen;
820*4882a593Smuzhiyun int err, ret = -1, i, patt_count;
821*4882a593Smuzhiyun u_char *buf;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if ((offset & (mtd->erasesize - 1)) != 0) {
824*4882a593Smuzhiyun puts("Attempt to torture a block at a non block-aligned offset\n");
825*4882a593Smuzhiyun return -EINVAL;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (offset + mtd->erasesize > mtd->size) {
829*4882a593Smuzhiyun puts("Attempt to torture a block outside the flash area\n");
830*4882a593Smuzhiyun return -EINVAL;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun patt_count = ARRAY_SIZE(patterns);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun buf = malloc_cache_aligned(mtd->erasesize);
836*4882a593Smuzhiyun if (buf == NULL) {
837*4882a593Smuzhiyun puts("Out of memory for erase block buffer\n");
838*4882a593Smuzhiyun return -ENOMEM;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun for (i = 0; i < patt_count; i++) {
842*4882a593Smuzhiyun err = mtd_erase(mtd, &instr);
843*4882a593Smuzhiyun if (err) {
844*4882a593Smuzhiyun printf("%s: erase() failed for block at 0x%llx: %d\n",
845*4882a593Smuzhiyun mtd->name, instr.addr, err);
846*4882a593Smuzhiyun goto out;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun /* Make sure the block contains only 0xff bytes */
850*4882a593Smuzhiyun err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
851*4882a593Smuzhiyun if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
852*4882a593Smuzhiyun printf("%s: read() failed for block at 0x%llx: %d\n",
853*4882a593Smuzhiyun mtd->name, instr.addr, err);
854*4882a593Smuzhiyun goto out;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun err = check_pattern(buf, 0xff, mtd->erasesize);
858*4882a593Smuzhiyun if (!err) {
859*4882a593Smuzhiyun printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
860*4882a593Smuzhiyun offset);
861*4882a593Smuzhiyun ret = -EIO;
862*4882a593Smuzhiyun goto out;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* Write a pattern and check it */
866*4882a593Smuzhiyun memset(buf, patterns[i], mtd->erasesize);
867*4882a593Smuzhiyun err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
868*4882a593Smuzhiyun if (err || retlen != mtd->erasesize) {
869*4882a593Smuzhiyun printf("%s: write() failed for block at 0x%llx: %d\n",
870*4882a593Smuzhiyun mtd->name, instr.addr, err);
871*4882a593Smuzhiyun goto out;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
875*4882a593Smuzhiyun if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
876*4882a593Smuzhiyun printf("%s: read() failed for block at 0x%llx: %d\n",
877*4882a593Smuzhiyun mtd->name, instr.addr, err);
878*4882a593Smuzhiyun goto out;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun err = check_pattern(buf, patterns[i], mtd->erasesize);
882*4882a593Smuzhiyun if (!err) {
883*4882a593Smuzhiyun printf("Pattern 0x%.2x checking failed for block at "
884*4882a593Smuzhiyun "0x%llx\n", patterns[i], offset);
885*4882a593Smuzhiyun ret = -EIO;
886*4882a593Smuzhiyun goto out;
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun ret = 0;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun out:
893*4882a593Smuzhiyun free(buf);
894*4882a593Smuzhiyun return ret;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun #endif
898