1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2006-2008 Nokia Corporation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Test OOB read and write on MTD device.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Adrian Hunter <ext-adrian.hunter@nokia.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/div64.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/moduleparam.h>
16*4882a593Smuzhiyun #include <linux/err.h>
17*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/sched.h>
20*4882a593Smuzhiyun #include <linux/random.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include "mtd_test.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun static int dev = -EINVAL;
25*4882a593Smuzhiyun static int bitflip_limit;
26*4882a593Smuzhiyun module_param(dev, int, S_IRUGO);
27*4882a593Smuzhiyun MODULE_PARM_DESC(dev, "MTD device number to use");
28*4882a593Smuzhiyun module_param(bitflip_limit, int, S_IRUGO);
29*4882a593Smuzhiyun MODULE_PARM_DESC(bitflip_limit, "Max. allowed bitflips per page");
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun static struct mtd_info *mtd;
32*4882a593Smuzhiyun static unsigned char *readbuf;
33*4882a593Smuzhiyun static unsigned char *writebuf;
34*4882a593Smuzhiyun static unsigned char *bbt;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun static int ebcnt;
37*4882a593Smuzhiyun static int pgcnt;
38*4882a593Smuzhiyun static int errcnt;
39*4882a593Smuzhiyun static int use_offset;
40*4882a593Smuzhiyun static int use_len;
41*4882a593Smuzhiyun static int use_len_max;
42*4882a593Smuzhiyun static int vary_offset;
43*4882a593Smuzhiyun static struct rnd_state rnd_state;
44*4882a593Smuzhiyun
do_vary_offset(void)45*4882a593Smuzhiyun static void do_vary_offset(void)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun use_len -= 1;
48*4882a593Smuzhiyun if (use_len < 1) {
49*4882a593Smuzhiyun use_offset += 1;
50*4882a593Smuzhiyun if (use_offset >= use_len_max)
51*4882a593Smuzhiyun use_offset = 0;
52*4882a593Smuzhiyun use_len = use_len_max - use_offset;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
write_eraseblock(int ebnum)56*4882a593Smuzhiyun static int write_eraseblock(int ebnum)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun int i;
59*4882a593Smuzhiyun struct mtd_oob_ops ops;
60*4882a593Smuzhiyun int err = 0;
61*4882a593Smuzhiyun loff_t addr = (loff_t)ebnum * mtd->erasesize;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
64*4882a593Smuzhiyun for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
65*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
66*4882a593Smuzhiyun ops.len = 0;
67*4882a593Smuzhiyun ops.retlen = 0;
68*4882a593Smuzhiyun ops.ooblen = use_len;
69*4882a593Smuzhiyun ops.oobretlen = 0;
70*4882a593Smuzhiyun ops.ooboffs = use_offset;
71*4882a593Smuzhiyun ops.datbuf = NULL;
72*4882a593Smuzhiyun ops.oobbuf = writebuf + (use_len_max * i) + use_offset;
73*4882a593Smuzhiyun err = mtd_write_oob(mtd, addr, &ops);
74*4882a593Smuzhiyun if (err || ops.oobretlen != use_len) {
75*4882a593Smuzhiyun pr_err("error: writeoob failed at %#llx\n",
76*4882a593Smuzhiyun (long long)addr);
77*4882a593Smuzhiyun pr_err("error: use_len %d, use_offset %d\n",
78*4882a593Smuzhiyun use_len, use_offset);
79*4882a593Smuzhiyun errcnt += 1;
80*4882a593Smuzhiyun return err ? err : -1;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun if (vary_offset)
83*4882a593Smuzhiyun do_vary_offset();
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return err;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
write_whole_device(void)89*4882a593Smuzhiyun static int write_whole_device(void)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun int err;
92*4882a593Smuzhiyun unsigned int i;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun pr_info("writing OOBs of whole device\n");
95*4882a593Smuzhiyun for (i = 0; i < ebcnt; ++i) {
96*4882a593Smuzhiyun if (bbt[i])
97*4882a593Smuzhiyun continue;
98*4882a593Smuzhiyun err = write_eraseblock(i);
99*4882a593Smuzhiyun if (err)
100*4882a593Smuzhiyun return err;
101*4882a593Smuzhiyun if (i % 256 == 0)
102*4882a593Smuzhiyun pr_info("written up to eraseblock %u\n", i);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun err = mtdtest_relax();
105*4882a593Smuzhiyun if (err)
106*4882a593Smuzhiyun return err;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun pr_info("written %u eraseblocks\n", i);
109*4882a593Smuzhiyun return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Display the address, offset and data bytes at comparison failure.
114*4882a593Smuzhiyun * Return number of bitflips encountered.
115*4882a593Smuzhiyun */
memcmpshowoffset(loff_t addr,loff_t offset,const void * cs,const void * ct,size_t count)116*4882a593Smuzhiyun static size_t memcmpshowoffset(loff_t addr, loff_t offset, const void *cs,
117*4882a593Smuzhiyun const void *ct, size_t count)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun const unsigned char *su1, *su2;
120*4882a593Smuzhiyun int res;
121*4882a593Smuzhiyun size_t i = 0;
122*4882a593Smuzhiyun size_t bitflips = 0;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--, i++) {
125*4882a593Smuzhiyun res = *su1 ^ *su2;
126*4882a593Smuzhiyun if (res) {
127*4882a593Smuzhiyun pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0x%x diff 0x%x\n",
128*4882a593Smuzhiyun (unsigned long)addr, (unsigned long)offset + i,
129*4882a593Smuzhiyun *su1, *su2, res);
130*4882a593Smuzhiyun bitflips += hweight8(res);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return bitflips;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #define memcmpshow(addr, cs, ct, count) memcmpshowoffset((addr), 0, (cs), (ct),\
138*4882a593Smuzhiyun (count))
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * Compare with 0xff and show the address, offset and data bytes at
142*4882a593Smuzhiyun * comparison failure. Return number of bitflips encountered.
143*4882a593Smuzhiyun */
memffshow(loff_t addr,loff_t offset,const void * cs,size_t count)144*4882a593Smuzhiyun static size_t memffshow(loff_t addr, loff_t offset, const void *cs,
145*4882a593Smuzhiyun size_t count)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun const unsigned char *su1;
148*4882a593Smuzhiyun int res;
149*4882a593Smuzhiyun size_t i = 0;
150*4882a593Smuzhiyun size_t bitflips = 0;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun for (su1 = cs; 0 < count; ++su1, count--, i++) {
153*4882a593Smuzhiyun res = *su1 ^ 0xff;
154*4882a593Smuzhiyun if (res) {
155*4882a593Smuzhiyun pr_info("error @addr[0x%lx:0x%lx] 0x%x -> 0xff diff 0x%x\n",
156*4882a593Smuzhiyun (unsigned long)addr, (unsigned long)offset + i,
157*4882a593Smuzhiyun *su1, res);
158*4882a593Smuzhiyun bitflips += hweight8(res);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return bitflips;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
verify_eraseblock(int ebnum)165*4882a593Smuzhiyun static int verify_eraseblock(int ebnum)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun int i;
168*4882a593Smuzhiyun struct mtd_oob_ops ops;
169*4882a593Smuzhiyun int err = 0;
170*4882a593Smuzhiyun loff_t addr = (loff_t)ebnum * mtd->erasesize;
171*4882a593Smuzhiyun size_t bitflips;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun prandom_bytes_state(&rnd_state, writebuf, use_len_max * pgcnt);
174*4882a593Smuzhiyun for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
175*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
176*4882a593Smuzhiyun ops.len = 0;
177*4882a593Smuzhiyun ops.retlen = 0;
178*4882a593Smuzhiyun ops.ooblen = use_len;
179*4882a593Smuzhiyun ops.oobretlen = 0;
180*4882a593Smuzhiyun ops.ooboffs = use_offset;
181*4882a593Smuzhiyun ops.datbuf = NULL;
182*4882a593Smuzhiyun ops.oobbuf = readbuf;
183*4882a593Smuzhiyun err = mtd_read_oob(mtd, addr, &ops);
184*4882a593Smuzhiyun if (mtd_is_bitflip(err))
185*4882a593Smuzhiyun err = 0;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (err || ops.oobretlen != use_len) {
188*4882a593Smuzhiyun pr_err("error: readoob failed at %#llx\n",
189*4882a593Smuzhiyun (long long)addr);
190*4882a593Smuzhiyun errcnt += 1;
191*4882a593Smuzhiyun return err ? err : -1;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun bitflips = memcmpshow(addr, readbuf,
195*4882a593Smuzhiyun writebuf + (use_len_max * i) + use_offset,
196*4882a593Smuzhiyun use_len);
197*4882a593Smuzhiyun if (bitflips > bitflip_limit) {
198*4882a593Smuzhiyun pr_err("error: verify failed at %#llx\n",
199*4882a593Smuzhiyun (long long)addr);
200*4882a593Smuzhiyun errcnt += 1;
201*4882a593Smuzhiyun if (errcnt > 1000) {
202*4882a593Smuzhiyun pr_err("error: too many errors\n");
203*4882a593Smuzhiyun return -1;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun } else if (bitflips) {
206*4882a593Smuzhiyun pr_info("ignoring error as within bitflip_limit\n");
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (use_offset != 0 || use_len < mtd->oobavail) {
210*4882a593Smuzhiyun int k;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
213*4882a593Smuzhiyun ops.len = 0;
214*4882a593Smuzhiyun ops.retlen = 0;
215*4882a593Smuzhiyun ops.ooblen = mtd->oobavail;
216*4882a593Smuzhiyun ops.oobretlen = 0;
217*4882a593Smuzhiyun ops.ooboffs = 0;
218*4882a593Smuzhiyun ops.datbuf = NULL;
219*4882a593Smuzhiyun ops.oobbuf = readbuf;
220*4882a593Smuzhiyun err = mtd_read_oob(mtd, addr, &ops);
221*4882a593Smuzhiyun if (mtd_is_bitflip(err))
222*4882a593Smuzhiyun err = 0;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (err || ops.oobretlen != mtd->oobavail) {
225*4882a593Smuzhiyun pr_err("error: readoob failed at %#llx\n",
226*4882a593Smuzhiyun (long long)addr);
227*4882a593Smuzhiyun errcnt += 1;
228*4882a593Smuzhiyun return err ? err : -1;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun bitflips = memcmpshowoffset(addr, use_offset,
231*4882a593Smuzhiyun readbuf + use_offset,
232*4882a593Smuzhiyun writebuf + (use_len_max * i) + use_offset,
233*4882a593Smuzhiyun use_len);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* verify pre-offset area for 0xff */
236*4882a593Smuzhiyun bitflips += memffshow(addr, 0, readbuf, use_offset);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* verify post-(use_offset + use_len) area for 0xff */
239*4882a593Smuzhiyun k = use_offset + use_len;
240*4882a593Smuzhiyun bitflips += memffshow(addr, k, readbuf + k,
241*4882a593Smuzhiyun mtd->oobavail - k);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (bitflips > bitflip_limit) {
244*4882a593Smuzhiyun pr_err("error: verify failed at %#llx\n",
245*4882a593Smuzhiyun (long long)addr);
246*4882a593Smuzhiyun errcnt += 1;
247*4882a593Smuzhiyun if (errcnt > 1000) {
248*4882a593Smuzhiyun pr_err("error: too many errors\n");
249*4882a593Smuzhiyun return -1;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun } else if (bitflips) {
252*4882a593Smuzhiyun pr_info("ignoring errors as within bitflip limit\n");
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun if (vary_offset)
256*4882a593Smuzhiyun do_vary_offset();
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun return err;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
verify_eraseblock_in_one_go(int ebnum)261*4882a593Smuzhiyun static int verify_eraseblock_in_one_go(int ebnum)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct mtd_oob_ops ops;
264*4882a593Smuzhiyun int err = 0;
265*4882a593Smuzhiyun loff_t addr = (loff_t)ebnum * mtd->erasesize;
266*4882a593Smuzhiyun size_t len = mtd->oobavail * pgcnt;
267*4882a593Smuzhiyun size_t oobavail = mtd->oobavail;
268*4882a593Smuzhiyun size_t bitflips;
269*4882a593Smuzhiyun int i;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun prandom_bytes_state(&rnd_state, writebuf, len);
272*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
273*4882a593Smuzhiyun ops.len = 0;
274*4882a593Smuzhiyun ops.retlen = 0;
275*4882a593Smuzhiyun ops.ooblen = len;
276*4882a593Smuzhiyun ops.oobretlen = 0;
277*4882a593Smuzhiyun ops.ooboffs = 0;
278*4882a593Smuzhiyun ops.datbuf = NULL;
279*4882a593Smuzhiyun ops.oobbuf = readbuf;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* read entire block's OOB at one go */
282*4882a593Smuzhiyun err = mtd_read_oob(mtd, addr, &ops);
283*4882a593Smuzhiyun if (mtd_is_bitflip(err))
284*4882a593Smuzhiyun err = 0;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (err || ops.oobretlen != len) {
287*4882a593Smuzhiyun pr_err("error: readoob failed at %#llx\n",
288*4882a593Smuzhiyun (long long)addr);
289*4882a593Smuzhiyun errcnt += 1;
290*4882a593Smuzhiyun return err ? err : -1;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* verify one page OOB at a time for bitflip per page limit check */
294*4882a593Smuzhiyun for (i = 0; i < pgcnt; ++i, addr += mtd->writesize) {
295*4882a593Smuzhiyun bitflips = memcmpshow(addr, readbuf + (i * oobavail),
296*4882a593Smuzhiyun writebuf + (i * oobavail), oobavail);
297*4882a593Smuzhiyun if (bitflips > bitflip_limit) {
298*4882a593Smuzhiyun pr_err("error: verify failed at %#llx\n",
299*4882a593Smuzhiyun (long long)addr);
300*4882a593Smuzhiyun errcnt += 1;
301*4882a593Smuzhiyun if (errcnt > 1000) {
302*4882a593Smuzhiyun pr_err("error: too many errors\n");
303*4882a593Smuzhiyun return -1;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun } else if (bitflips) {
306*4882a593Smuzhiyun pr_info("ignoring error as within bitflip_limit\n");
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return err;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
verify_all_eraseblocks(void)313*4882a593Smuzhiyun static int verify_all_eraseblocks(void)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun int err;
316*4882a593Smuzhiyun unsigned int i;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun pr_info("verifying all eraseblocks\n");
319*4882a593Smuzhiyun for (i = 0; i < ebcnt; ++i) {
320*4882a593Smuzhiyun if (bbt[i])
321*4882a593Smuzhiyun continue;
322*4882a593Smuzhiyun err = verify_eraseblock(i);
323*4882a593Smuzhiyun if (err)
324*4882a593Smuzhiyun return err;
325*4882a593Smuzhiyun if (i % 256 == 0)
326*4882a593Smuzhiyun pr_info("verified up to eraseblock %u\n", i);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun err = mtdtest_relax();
329*4882a593Smuzhiyun if (err)
330*4882a593Smuzhiyun return err;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun pr_info("verified %u eraseblocks\n", i);
333*4882a593Smuzhiyun return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
mtd_oobtest_init(void)336*4882a593Smuzhiyun static int __init mtd_oobtest_init(void)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun int err = 0;
339*4882a593Smuzhiyun unsigned int i;
340*4882a593Smuzhiyun uint64_t tmp;
341*4882a593Smuzhiyun struct mtd_oob_ops ops;
342*4882a593Smuzhiyun loff_t addr = 0, addr0;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun printk(KERN_INFO "\n");
345*4882a593Smuzhiyun printk(KERN_INFO "=================================================\n");
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (dev < 0) {
348*4882a593Smuzhiyun pr_info("Please specify a valid mtd-device via module parameter\n");
349*4882a593Smuzhiyun pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n");
350*4882a593Smuzhiyun return -EINVAL;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun pr_info("MTD device: %d\n", dev);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun mtd = get_mtd_device(NULL, dev);
356*4882a593Smuzhiyun if (IS_ERR(mtd)) {
357*4882a593Smuzhiyun err = PTR_ERR(mtd);
358*4882a593Smuzhiyun pr_err("error: cannot get MTD device\n");
359*4882a593Smuzhiyun return err;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun if (!mtd_type_is_nand(mtd)) {
363*4882a593Smuzhiyun pr_info("this test requires NAND flash\n");
364*4882a593Smuzhiyun goto out;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun tmp = mtd->size;
368*4882a593Smuzhiyun do_div(tmp, mtd->erasesize);
369*4882a593Smuzhiyun ebcnt = tmp;
370*4882a593Smuzhiyun pgcnt = mtd->erasesize / mtd->writesize;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun pr_info("MTD device size %llu, eraseblock size %u, "
373*4882a593Smuzhiyun "page size %u, count of eraseblocks %u, pages per "
374*4882a593Smuzhiyun "eraseblock %u, OOB size %u\n",
375*4882a593Smuzhiyun (unsigned long long)mtd->size, mtd->erasesize,
376*4882a593Smuzhiyun mtd->writesize, ebcnt, pgcnt, mtd->oobsize);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun err = -ENOMEM;
379*4882a593Smuzhiyun readbuf = kmalloc(mtd->erasesize, GFP_KERNEL);
380*4882a593Smuzhiyun if (!readbuf)
381*4882a593Smuzhiyun goto out;
382*4882a593Smuzhiyun writebuf = kmalloc(mtd->erasesize, GFP_KERNEL);
383*4882a593Smuzhiyun if (!writebuf)
384*4882a593Smuzhiyun goto out;
385*4882a593Smuzhiyun bbt = kzalloc(ebcnt, GFP_KERNEL);
386*4882a593Smuzhiyun if (!bbt)
387*4882a593Smuzhiyun goto out;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun err = mtdtest_scan_for_bad_eraseblocks(mtd, bbt, 0, ebcnt);
390*4882a593Smuzhiyun if (err)
391*4882a593Smuzhiyun goto out;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun use_offset = 0;
394*4882a593Smuzhiyun use_len = mtd->oobavail;
395*4882a593Smuzhiyun use_len_max = mtd->oobavail;
396*4882a593Smuzhiyun vary_offset = 0;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* First test: write all OOB, read it back and verify */
399*4882a593Smuzhiyun pr_info("test 1 of 5\n");
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
402*4882a593Smuzhiyun if (err)
403*4882a593Smuzhiyun goto out;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 1);
406*4882a593Smuzhiyun err = write_whole_device();
407*4882a593Smuzhiyun if (err)
408*4882a593Smuzhiyun goto out;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 1);
411*4882a593Smuzhiyun err = verify_all_eraseblocks();
412*4882a593Smuzhiyun if (err)
413*4882a593Smuzhiyun goto out;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * Second test: write all OOB, a block at a time, read it back and
417*4882a593Smuzhiyun * verify.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun pr_info("test 2 of 5\n");
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
422*4882a593Smuzhiyun if (err)
423*4882a593Smuzhiyun goto out;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 3);
426*4882a593Smuzhiyun err = write_whole_device();
427*4882a593Smuzhiyun if (err)
428*4882a593Smuzhiyun goto out;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* Check all eraseblocks */
431*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 3);
432*4882a593Smuzhiyun pr_info("verifying all eraseblocks\n");
433*4882a593Smuzhiyun for (i = 0; i < ebcnt; ++i) {
434*4882a593Smuzhiyun if (bbt[i])
435*4882a593Smuzhiyun continue;
436*4882a593Smuzhiyun err = verify_eraseblock_in_one_go(i);
437*4882a593Smuzhiyun if (err)
438*4882a593Smuzhiyun goto out;
439*4882a593Smuzhiyun if (i % 256 == 0)
440*4882a593Smuzhiyun pr_info("verified up to eraseblock %u\n", i);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun err = mtdtest_relax();
443*4882a593Smuzhiyun if (err)
444*4882a593Smuzhiyun goto out;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun pr_info("verified %u eraseblocks\n", i);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * Third test: write OOB at varying offsets and lengths, read it back
450*4882a593Smuzhiyun * and verify.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun pr_info("test 3 of 5\n");
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
455*4882a593Smuzhiyun if (err)
456*4882a593Smuzhiyun goto out;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Write all eraseblocks */
459*4882a593Smuzhiyun use_offset = 0;
460*4882a593Smuzhiyun use_len = mtd->oobavail;
461*4882a593Smuzhiyun use_len_max = mtd->oobavail;
462*4882a593Smuzhiyun vary_offset = 1;
463*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 5);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun err = write_whole_device();
466*4882a593Smuzhiyun if (err)
467*4882a593Smuzhiyun goto out;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Check all eraseblocks */
470*4882a593Smuzhiyun use_offset = 0;
471*4882a593Smuzhiyun use_len = mtd->oobavail;
472*4882a593Smuzhiyun use_len_max = mtd->oobavail;
473*4882a593Smuzhiyun vary_offset = 1;
474*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 5);
475*4882a593Smuzhiyun err = verify_all_eraseblocks();
476*4882a593Smuzhiyun if (err)
477*4882a593Smuzhiyun goto out;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun use_offset = 0;
480*4882a593Smuzhiyun use_len = mtd->oobavail;
481*4882a593Smuzhiyun use_len_max = mtd->oobavail;
482*4882a593Smuzhiyun vary_offset = 0;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* Fourth test: try to write off end of device */
485*4882a593Smuzhiyun pr_info("test 4 of 5\n");
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
488*4882a593Smuzhiyun if (err)
489*4882a593Smuzhiyun goto out;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun addr0 = 0;
492*4882a593Smuzhiyun for (i = 0; i < ebcnt && bbt[i]; ++i)
493*4882a593Smuzhiyun addr0 += mtd->erasesize;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* Attempt to write off end of OOB */
496*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
497*4882a593Smuzhiyun ops.len = 0;
498*4882a593Smuzhiyun ops.retlen = 0;
499*4882a593Smuzhiyun ops.ooblen = 1;
500*4882a593Smuzhiyun ops.oobretlen = 0;
501*4882a593Smuzhiyun ops.ooboffs = mtd->oobavail;
502*4882a593Smuzhiyun ops.datbuf = NULL;
503*4882a593Smuzhiyun ops.oobbuf = writebuf;
504*4882a593Smuzhiyun pr_info("attempting to start write past end of OOB\n");
505*4882a593Smuzhiyun pr_info("an error is expected...\n");
506*4882a593Smuzhiyun err = mtd_write_oob(mtd, addr0, &ops);
507*4882a593Smuzhiyun if (err) {
508*4882a593Smuzhiyun pr_info("error occurred as expected\n");
509*4882a593Smuzhiyun err = 0;
510*4882a593Smuzhiyun } else {
511*4882a593Smuzhiyun pr_err("error: can write past end of OOB\n");
512*4882a593Smuzhiyun errcnt += 1;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /* Attempt to read off end of OOB */
516*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
517*4882a593Smuzhiyun ops.len = 0;
518*4882a593Smuzhiyun ops.retlen = 0;
519*4882a593Smuzhiyun ops.ooblen = 1;
520*4882a593Smuzhiyun ops.oobretlen = 0;
521*4882a593Smuzhiyun ops.ooboffs = mtd->oobavail;
522*4882a593Smuzhiyun ops.datbuf = NULL;
523*4882a593Smuzhiyun ops.oobbuf = readbuf;
524*4882a593Smuzhiyun pr_info("attempting to start read past end of OOB\n");
525*4882a593Smuzhiyun pr_info("an error is expected...\n");
526*4882a593Smuzhiyun err = mtd_read_oob(mtd, addr0, &ops);
527*4882a593Smuzhiyun if (mtd_is_bitflip(err))
528*4882a593Smuzhiyun err = 0;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (err) {
531*4882a593Smuzhiyun pr_info("error occurred as expected\n");
532*4882a593Smuzhiyun err = 0;
533*4882a593Smuzhiyun } else {
534*4882a593Smuzhiyun pr_err("error: can read past end of OOB\n");
535*4882a593Smuzhiyun errcnt += 1;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun if (bbt[ebcnt - 1])
539*4882a593Smuzhiyun pr_info("skipping end of device tests because last "
540*4882a593Smuzhiyun "block is bad\n");
541*4882a593Smuzhiyun else {
542*4882a593Smuzhiyun /* Attempt to write off end of device */
543*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
544*4882a593Smuzhiyun ops.len = 0;
545*4882a593Smuzhiyun ops.retlen = 0;
546*4882a593Smuzhiyun ops.ooblen = mtd->oobavail + 1;
547*4882a593Smuzhiyun ops.oobretlen = 0;
548*4882a593Smuzhiyun ops.ooboffs = 0;
549*4882a593Smuzhiyun ops.datbuf = NULL;
550*4882a593Smuzhiyun ops.oobbuf = writebuf;
551*4882a593Smuzhiyun pr_info("attempting to write past end of device\n");
552*4882a593Smuzhiyun pr_info("an error is expected...\n");
553*4882a593Smuzhiyun err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
554*4882a593Smuzhiyun if (err) {
555*4882a593Smuzhiyun pr_info("error occurred as expected\n");
556*4882a593Smuzhiyun err = 0;
557*4882a593Smuzhiyun } else {
558*4882a593Smuzhiyun pr_err("error: wrote past end of device\n");
559*4882a593Smuzhiyun errcnt += 1;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* Attempt to read off end of device */
563*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
564*4882a593Smuzhiyun ops.len = 0;
565*4882a593Smuzhiyun ops.retlen = 0;
566*4882a593Smuzhiyun ops.ooblen = mtd->oobavail + 1;
567*4882a593Smuzhiyun ops.oobretlen = 0;
568*4882a593Smuzhiyun ops.ooboffs = 0;
569*4882a593Smuzhiyun ops.datbuf = NULL;
570*4882a593Smuzhiyun ops.oobbuf = readbuf;
571*4882a593Smuzhiyun pr_info("attempting to read past end of device\n");
572*4882a593Smuzhiyun pr_info("an error is expected...\n");
573*4882a593Smuzhiyun err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
574*4882a593Smuzhiyun if (mtd_is_bitflip(err))
575*4882a593Smuzhiyun err = 0;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (err) {
578*4882a593Smuzhiyun pr_info("error occurred as expected\n");
579*4882a593Smuzhiyun err = 0;
580*4882a593Smuzhiyun } else {
581*4882a593Smuzhiyun pr_err("error: read past end of device\n");
582*4882a593Smuzhiyun errcnt += 1;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun err = mtdtest_erase_eraseblock(mtd, ebcnt - 1);
586*4882a593Smuzhiyun if (err)
587*4882a593Smuzhiyun goto out;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* Attempt to write off end of device */
590*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
591*4882a593Smuzhiyun ops.len = 0;
592*4882a593Smuzhiyun ops.retlen = 0;
593*4882a593Smuzhiyun ops.ooblen = mtd->oobavail;
594*4882a593Smuzhiyun ops.oobretlen = 0;
595*4882a593Smuzhiyun ops.ooboffs = 1;
596*4882a593Smuzhiyun ops.datbuf = NULL;
597*4882a593Smuzhiyun ops.oobbuf = writebuf;
598*4882a593Smuzhiyun pr_info("attempting to write past end of device\n");
599*4882a593Smuzhiyun pr_info("an error is expected...\n");
600*4882a593Smuzhiyun err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
601*4882a593Smuzhiyun if (err) {
602*4882a593Smuzhiyun pr_info("error occurred as expected\n");
603*4882a593Smuzhiyun err = 0;
604*4882a593Smuzhiyun } else {
605*4882a593Smuzhiyun pr_err("error: wrote past end of device\n");
606*4882a593Smuzhiyun errcnt += 1;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Attempt to read off end of device */
610*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
611*4882a593Smuzhiyun ops.len = 0;
612*4882a593Smuzhiyun ops.retlen = 0;
613*4882a593Smuzhiyun ops.ooblen = mtd->oobavail;
614*4882a593Smuzhiyun ops.oobretlen = 0;
615*4882a593Smuzhiyun ops.ooboffs = 1;
616*4882a593Smuzhiyun ops.datbuf = NULL;
617*4882a593Smuzhiyun ops.oobbuf = readbuf;
618*4882a593Smuzhiyun pr_info("attempting to read past end of device\n");
619*4882a593Smuzhiyun pr_info("an error is expected...\n");
620*4882a593Smuzhiyun err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
621*4882a593Smuzhiyun if (mtd_is_bitflip(err))
622*4882a593Smuzhiyun err = 0;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (err) {
625*4882a593Smuzhiyun pr_info("error occurred as expected\n");
626*4882a593Smuzhiyun err = 0;
627*4882a593Smuzhiyun } else {
628*4882a593Smuzhiyun pr_err("error: read past end of device\n");
629*4882a593Smuzhiyun errcnt += 1;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* Fifth test: write / read across block boundaries */
634*4882a593Smuzhiyun pr_info("test 5 of 5\n");
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* Erase all eraseblocks */
637*4882a593Smuzhiyun err = mtdtest_erase_good_eraseblocks(mtd, bbt, 0, ebcnt);
638*4882a593Smuzhiyun if (err)
639*4882a593Smuzhiyun goto out;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /* Write all eraseblocks */
642*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 11);
643*4882a593Smuzhiyun pr_info("writing OOBs of whole device\n");
644*4882a593Smuzhiyun for (i = 0; i < ebcnt - 1; ++i) {
645*4882a593Smuzhiyun int cnt = 2;
646*4882a593Smuzhiyun int pg;
647*4882a593Smuzhiyun size_t sz = mtd->oobavail;
648*4882a593Smuzhiyun if (bbt[i] || bbt[i + 1])
649*4882a593Smuzhiyun continue;
650*4882a593Smuzhiyun addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
651*4882a593Smuzhiyun prandom_bytes_state(&rnd_state, writebuf, sz * cnt);
652*4882a593Smuzhiyun for (pg = 0; pg < cnt; ++pg) {
653*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
654*4882a593Smuzhiyun ops.len = 0;
655*4882a593Smuzhiyun ops.retlen = 0;
656*4882a593Smuzhiyun ops.ooblen = sz;
657*4882a593Smuzhiyun ops.oobretlen = 0;
658*4882a593Smuzhiyun ops.ooboffs = 0;
659*4882a593Smuzhiyun ops.datbuf = NULL;
660*4882a593Smuzhiyun ops.oobbuf = writebuf + pg * sz;
661*4882a593Smuzhiyun err = mtd_write_oob(mtd, addr, &ops);
662*4882a593Smuzhiyun if (err)
663*4882a593Smuzhiyun goto out;
664*4882a593Smuzhiyun if (i % 256 == 0)
665*4882a593Smuzhiyun pr_info("written up to eraseblock %u\n", i);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun err = mtdtest_relax();
668*4882a593Smuzhiyun if (err)
669*4882a593Smuzhiyun goto out;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun addr += mtd->writesize;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun pr_info("written %u eraseblocks\n", i);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* Check all eraseblocks */
677*4882a593Smuzhiyun prandom_seed_state(&rnd_state, 11);
678*4882a593Smuzhiyun pr_info("verifying all eraseblocks\n");
679*4882a593Smuzhiyun for (i = 0; i < ebcnt - 1; ++i) {
680*4882a593Smuzhiyun if (bbt[i] || bbt[i + 1])
681*4882a593Smuzhiyun continue;
682*4882a593Smuzhiyun prandom_bytes_state(&rnd_state, writebuf, mtd->oobavail * 2);
683*4882a593Smuzhiyun addr = (loff_t)(i + 1) * mtd->erasesize - mtd->writesize;
684*4882a593Smuzhiyun ops.mode = MTD_OPS_AUTO_OOB;
685*4882a593Smuzhiyun ops.len = 0;
686*4882a593Smuzhiyun ops.retlen = 0;
687*4882a593Smuzhiyun ops.ooblen = mtd->oobavail * 2;
688*4882a593Smuzhiyun ops.oobretlen = 0;
689*4882a593Smuzhiyun ops.ooboffs = 0;
690*4882a593Smuzhiyun ops.datbuf = NULL;
691*4882a593Smuzhiyun ops.oobbuf = readbuf;
692*4882a593Smuzhiyun err = mtd_read_oob(mtd, addr, &ops);
693*4882a593Smuzhiyun if (mtd_is_bitflip(err))
694*4882a593Smuzhiyun err = 0;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (err)
697*4882a593Smuzhiyun goto out;
698*4882a593Smuzhiyun if (memcmpshow(addr, readbuf, writebuf,
699*4882a593Smuzhiyun mtd->oobavail * 2)) {
700*4882a593Smuzhiyun pr_err("error: verify failed at %#llx\n",
701*4882a593Smuzhiyun (long long)addr);
702*4882a593Smuzhiyun errcnt += 1;
703*4882a593Smuzhiyun if (errcnt > 1000) {
704*4882a593Smuzhiyun pr_err("error: too many errors\n");
705*4882a593Smuzhiyun goto out;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun if (i % 256 == 0)
709*4882a593Smuzhiyun pr_info("verified up to eraseblock %u\n", i);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun err = mtdtest_relax();
712*4882a593Smuzhiyun if (err)
713*4882a593Smuzhiyun goto out;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun pr_info("verified %u eraseblocks\n", i);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun pr_info("finished with %d errors\n", errcnt);
718*4882a593Smuzhiyun out:
719*4882a593Smuzhiyun kfree(bbt);
720*4882a593Smuzhiyun kfree(writebuf);
721*4882a593Smuzhiyun kfree(readbuf);
722*4882a593Smuzhiyun put_mtd_device(mtd);
723*4882a593Smuzhiyun if (err)
724*4882a593Smuzhiyun pr_info("error %d occurred\n", err);
725*4882a593Smuzhiyun printk(KERN_INFO "=================================================\n");
726*4882a593Smuzhiyun return err;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun module_init(mtd_oobtest_init);
729*4882a593Smuzhiyun
mtd_oobtest_exit(void)730*4882a593Smuzhiyun static void __exit mtd_oobtest_exit(void)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun return;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun module_exit(mtd_oobtest_exit);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun MODULE_DESCRIPTION("Out-of-band test module");
737*4882a593Smuzhiyun MODULE_AUTHOR("Adrian Hunter");
738*4882a593Smuzhiyun MODULE_LICENSE("GPL");
739