xref: /OK3568_Linux_fs/kernel/fs/jffs2/scan.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * JFFS2 -- Journalling Flash File System, Version 2.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright © 2001-2007 Red Hat, Inc.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Created by David Woodhouse <dwmw2@infradead.org>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * For licensing information, see the file 'LICENCE' in this directory.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
18*4882a593Smuzhiyun #include <linux/pagemap.h>
19*4882a593Smuzhiyun #include <linux/crc32.h>
20*4882a593Smuzhiyun #include <linux/compiler.h>
21*4882a593Smuzhiyun #include "nodelist.h"
22*4882a593Smuzhiyun #include "summary.h"
23*4882a593Smuzhiyun #include "debug.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define DEFAULT_EMPTY_SCAN_SIZE 256
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define noisy_printk(noise, fmt, ...)					\
28*4882a593Smuzhiyun do {									\
29*4882a593Smuzhiyun 	if (*(noise)) {							\
30*4882a593Smuzhiyun 		pr_notice(fmt, ##__VA_ARGS__);				\
31*4882a593Smuzhiyun 		(*(noise))--;						\
32*4882a593Smuzhiyun 		if (!(*(noise)))					\
33*4882a593Smuzhiyun 			pr_notice("Further such events for this erase block will not be printed\n"); \
34*4882a593Smuzhiyun 	}								\
35*4882a593Smuzhiyun } while (0)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static uint32_t pseudo_random;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
40*4882a593Smuzhiyun 				  unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s);
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* These helper functions _must_ increase ofs and also do the dirty/used space accounting.
43*4882a593Smuzhiyun  * Returning an error will abort the mount - bad checksums etc. should just mark the space
44*4882a593Smuzhiyun  * as dirty.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
47*4882a593Smuzhiyun 				 struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s);
48*4882a593Smuzhiyun static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
49*4882a593Smuzhiyun 				 struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s);
50*4882a593Smuzhiyun 
min_free(struct jffs2_sb_info * c)51*4882a593Smuzhiyun static inline int min_free(struct jffs2_sb_info *c)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	uint32_t min = 2 * sizeof(struct jffs2_raw_inode);
54*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
55*4882a593Smuzhiyun 	if (!jffs2_can_mark_obsolete(c) && min < c->wbuf_pagesize)
56*4882a593Smuzhiyun 		return c->wbuf_pagesize;
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun 	return min;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
EMPTY_SCAN_SIZE(uint32_t sector_size)62*4882a593Smuzhiyun static inline uint32_t EMPTY_SCAN_SIZE(uint32_t sector_size) {
63*4882a593Smuzhiyun 	if (sector_size < DEFAULT_EMPTY_SCAN_SIZE)
64*4882a593Smuzhiyun 		return sector_size;
65*4882a593Smuzhiyun 	else
66*4882a593Smuzhiyun 		return DEFAULT_EMPTY_SCAN_SIZE;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
file_dirty(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)69*4882a593Smuzhiyun static int file_dirty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	int ret;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if ((ret = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
74*4882a593Smuzhiyun 		return ret;
75*4882a593Smuzhiyun 	if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
76*4882a593Smuzhiyun 		return ret;
77*4882a593Smuzhiyun 	/* Turned wasted size into dirty, since we apparently
78*4882a593Smuzhiyun 	   think it's recoverable now. */
79*4882a593Smuzhiyun 	jeb->dirty_size += jeb->wasted_size;
80*4882a593Smuzhiyun 	c->dirty_size += jeb->wasted_size;
81*4882a593Smuzhiyun 	c->wasted_size -= jeb->wasted_size;
82*4882a593Smuzhiyun 	jeb->wasted_size = 0;
83*4882a593Smuzhiyun 	if (VERYDIRTY(c, jeb->dirty_size)) {
84*4882a593Smuzhiyun 		list_add(&jeb->list, &c->very_dirty_list);
85*4882a593Smuzhiyun 	} else {
86*4882a593Smuzhiyun 		list_add(&jeb->list, &c->dirty_list);
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
jffs2_scan_medium(struct jffs2_sb_info * c)91*4882a593Smuzhiyun int jffs2_scan_medium(struct jffs2_sb_info *c)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	int i, ret;
94*4882a593Smuzhiyun 	uint32_t empty_blocks = 0, bad_blocks = 0;
95*4882a593Smuzhiyun 	unsigned char *flashbuf = NULL;
96*4882a593Smuzhiyun 	uint32_t buf_size = 0;
97*4882a593Smuzhiyun 	struct jffs2_summary *s = NULL; /* summary info collected by the scan process */
98*4882a593Smuzhiyun #ifndef __ECOS
99*4882a593Smuzhiyun 	size_t pointlen, try_size;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	ret = mtd_point(c->mtd, 0, c->mtd->size, &pointlen,
102*4882a593Smuzhiyun 			(void **)&flashbuf, NULL);
103*4882a593Smuzhiyun 	if (!ret && pointlen < c->mtd->size) {
104*4882a593Smuzhiyun 		/* Don't muck about if it won't let us point to the whole flash */
105*4882a593Smuzhiyun 		jffs2_dbg(1, "MTD point returned len too short: 0x%zx\n",
106*4882a593Smuzhiyun 			  pointlen);
107*4882a593Smuzhiyun 		mtd_unpoint(c->mtd, 0, pointlen);
108*4882a593Smuzhiyun 		flashbuf = NULL;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 	if (ret && ret != -EOPNOTSUPP)
111*4882a593Smuzhiyun 		jffs2_dbg(1, "MTD point failed %d\n", ret);
112*4882a593Smuzhiyun #endif
113*4882a593Smuzhiyun 	if (!flashbuf) {
114*4882a593Smuzhiyun 		/* For NAND it's quicker to read a whole eraseblock at a time,
115*4882a593Smuzhiyun 		   apparently */
116*4882a593Smuzhiyun 		if (jffs2_cleanmarker_oob(c))
117*4882a593Smuzhiyun 			try_size = c->sector_size;
118*4882a593Smuzhiyun 		else
119*4882a593Smuzhiyun 			try_size = PAGE_SIZE;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		jffs2_dbg(1, "Trying to allocate readbuf of %zu "
122*4882a593Smuzhiyun 			  "bytes\n", try_size);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		flashbuf = mtd_kmalloc_up_to(c->mtd, &try_size);
125*4882a593Smuzhiyun 		if (!flashbuf)
126*4882a593Smuzhiyun 			return -ENOMEM;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 		jffs2_dbg(1, "Allocated readbuf of %zu bytes\n",
129*4882a593Smuzhiyun 			  try_size);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		buf_size = (uint32_t)try_size;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (jffs2_sum_active()) {
135*4882a593Smuzhiyun 		s = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
136*4882a593Smuzhiyun 		if (!s) {
137*4882a593Smuzhiyun 			JFFS2_WARNING("Can't allocate memory for summary\n");
138*4882a593Smuzhiyun 			ret = -ENOMEM;
139*4882a593Smuzhiyun 			goto out_buf;
140*4882a593Smuzhiyun 		}
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	for (i=0; i<c->nr_blocks; i++) {
144*4882a593Smuzhiyun 		struct jffs2_eraseblock *jeb = &c->blocks[i];
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		cond_resched();
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		/* reset summary info for next eraseblock scan */
149*4882a593Smuzhiyun 		jffs2_sum_reset_collected(s);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
152*4882a593Smuzhiyun 						buf_size, s);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		if (ret < 0)
155*4882a593Smuzhiyun 			goto out;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		/* Now decide which list to put it on */
160*4882a593Smuzhiyun 		switch(ret) {
161*4882a593Smuzhiyun 		case BLK_STATE_ALLFF:
162*4882a593Smuzhiyun 			/*
163*4882a593Smuzhiyun 			 * Empty block.   Since we can't be sure it
164*4882a593Smuzhiyun 			 * was entirely erased, we just queue it for erase
165*4882a593Smuzhiyun 			 * again.  It will be marked as such when the erase
166*4882a593Smuzhiyun 			 * is complete.  Meanwhile we still count it as empty
167*4882a593Smuzhiyun 			 * for later checks.
168*4882a593Smuzhiyun 			 */
169*4882a593Smuzhiyun 			empty_blocks++;
170*4882a593Smuzhiyun 			list_add(&jeb->list, &c->erase_pending_list);
171*4882a593Smuzhiyun 			c->nr_erasing_blocks++;
172*4882a593Smuzhiyun 			break;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		case BLK_STATE_CLEANMARKER:
175*4882a593Smuzhiyun 			/* Only a CLEANMARKER node is valid */
176*4882a593Smuzhiyun 			if (!jeb->dirty_size) {
177*4882a593Smuzhiyun 				/* It's actually free */
178*4882a593Smuzhiyun 				list_add(&jeb->list, &c->free_list);
179*4882a593Smuzhiyun 				c->nr_free_blocks++;
180*4882a593Smuzhiyun 			} else {
181*4882a593Smuzhiyun 				/* Dirt */
182*4882a593Smuzhiyun 				jffs2_dbg(1, "Adding all-dirty block at 0x%08x to erase_pending_list\n",
183*4882a593Smuzhiyun 					  jeb->offset);
184*4882a593Smuzhiyun 				list_add(&jeb->list, &c->erase_pending_list);
185*4882a593Smuzhiyun 				c->nr_erasing_blocks++;
186*4882a593Smuzhiyun 			}
187*4882a593Smuzhiyun 			break;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		case BLK_STATE_CLEAN:
190*4882a593Smuzhiyun 			/* Full (or almost full) of clean data. Clean list */
191*4882a593Smuzhiyun 			list_add(&jeb->list, &c->clean_list);
192*4882a593Smuzhiyun 			break;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		case BLK_STATE_PARTDIRTY:
195*4882a593Smuzhiyun 			/* Some data, but not full. Dirty list. */
196*4882a593Smuzhiyun 			/* We want to remember the block with most free space
197*4882a593Smuzhiyun 			and stick it in the 'nextblock' position to start writing to it. */
198*4882a593Smuzhiyun 			if (jeb->free_size > min_free(c) &&
199*4882a593Smuzhiyun 					(!c->nextblock || c->nextblock->free_size < jeb->free_size)) {
200*4882a593Smuzhiyun 				/* Better candidate for the next writes to go to */
201*4882a593Smuzhiyun 				if (c->nextblock) {
202*4882a593Smuzhiyun 					ret = file_dirty(c, c->nextblock);
203*4882a593Smuzhiyun 					if (ret)
204*4882a593Smuzhiyun 						goto out;
205*4882a593Smuzhiyun 					/* deleting summary information of the old nextblock */
206*4882a593Smuzhiyun 					jffs2_sum_reset_collected(c->summary);
207*4882a593Smuzhiyun 				}
208*4882a593Smuzhiyun 				/* update collected summary information for the current nextblock */
209*4882a593Smuzhiyun 				jffs2_sum_move_collected(c, s);
210*4882a593Smuzhiyun 				jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
211*4882a593Smuzhiyun 					  __func__, jeb->offset);
212*4882a593Smuzhiyun 				c->nextblock = jeb;
213*4882a593Smuzhiyun 			} else {
214*4882a593Smuzhiyun 				ret = file_dirty(c, jeb);
215*4882a593Smuzhiyun 				if (ret)
216*4882a593Smuzhiyun 					goto out;
217*4882a593Smuzhiyun 			}
218*4882a593Smuzhiyun 			break;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		case BLK_STATE_ALLDIRTY:
221*4882a593Smuzhiyun 			/* Nothing valid - not even a clean marker. Needs erasing. */
222*4882a593Smuzhiyun 			/* For now we just put it on the erasing list. We'll start the erases later */
223*4882a593Smuzhiyun 			jffs2_dbg(1, "Erase block at 0x%08x is not formatted. It will be erased\n",
224*4882a593Smuzhiyun 				  jeb->offset);
225*4882a593Smuzhiyun 			list_add(&jeb->list, &c->erase_pending_list);
226*4882a593Smuzhiyun 			c->nr_erasing_blocks++;
227*4882a593Smuzhiyun 			break;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		case BLK_STATE_BADBLOCK:
230*4882a593Smuzhiyun 			jffs2_dbg(1, "Block at 0x%08x is bad\n", jeb->offset);
231*4882a593Smuzhiyun 			list_add(&jeb->list, &c->bad_list);
232*4882a593Smuzhiyun 			c->bad_size += c->sector_size;
233*4882a593Smuzhiyun 			c->free_size -= c->sector_size;
234*4882a593Smuzhiyun 			bad_blocks++;
235*4882a593Smuzhiyun 			break;
236*4882a593Smuzhiyun 		default:
237*4882a593Smuzhiyun 			pr_warn("%s(): unknown block state\n", __func__);
238*4882a593Smuzhiyun 			BUG();
239*4882a593Smuzhiyun 		}
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Nextblock dirty is always seen as wasted, because we cannot recycle it now */
243*4882a593Smuzhiyun 	if (c->nextblock && (c->nextblock->dirty_size)) {
244*4882a593Smuzhiyun 		c->nextblock->wasted_size += c->nextblock->dirty_size;
245*4882a593Smuzhiyun 		c->wasted_size += c->nextblock->dirty_size;
246*4882a593Smuzhiyun 		c->dirty_size -= c->nextblock->dirty_size;
247*4882a593Smuzhiyun 		c->nextblock->dirty_size = 0;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
250*4882a593Smuzhiyun 	if (!jffs2_can_mark_obsolete(c) && c->wbuf_pagesize && c->nextblock && (c->nextblock->free_size % c->wbuf_pagesize)) {
251*4882a593Smuzhiyun 		/* If we're going to start writing into a block which already
252*4882a593Smuzhiyun 		   contains data, and the end of the data isn't page-aligned,
253*4882a593Smuzhiyun 		   skip a little and align it. */
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 		uint32_t skip = c->nextblock->free_size % c->wbuf_pagesize;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		jffs2_dbg(1, "%s(): Skipping %d bytes in nextblock to ensure page alignment\n",
258*4882a593Smuzhiyun 			  __func__, skip);
259*4882a593Smuzhiyun 		jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
260*4882a593Smuzhiyun 		jffs2_scan_dirty_space(c, c->nextblock, skip);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun #endif
263*4882a593Smuzhiyun 	if (c->nr_erasing_blocks) {
264*4882a593Smuzhiyun 		if (!c->used_size && !c->unchecked_size &&
265*4882a593Smuzhiyun 			((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) {
266*4882a593Smuzhiyun 			pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n");
267*4882a593Smuzhiyun 			pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n",
268*4882a593Smuzhiyun 				  empty_blocks, bad_blocks, c->nr_blocks);
269*4882a593Smuzhiyun 			ret = -EIO;
270*4882a593Smuzhiyun 			goto out;
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun 		spin_lock(&c->erase_completion_lock);
273*4882a593Smuzhiyun 		jffs2_garbage_collect_trigger(c);
274*4882a593Smuzhiyun 		spin_unlock(&c->erase_completion_lock);
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 	ret = 0;
277*4882a593Smuzhiyun  out:
278*4882a593Smuzhiyun 	jffs2_sum_reset_collected(s);
279*4882a593Smuzhiyun 	kfree(s);
280*4882a593Smuzhiyun  out_buf:
281*4882a593Smuzhiyun 	if (buf_size)
282*4882a593Smuzhiyun 		kfree(flashbuf);
283*4882a593Smuzhiyun #ifndef __ECOS
284*4882a593Smuzhiyun 	else
285*4882a593Smuzhiyun 		mtd_unpoint(c->mtd, 0, c->mtd->size);
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun 	return ret;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
jffs2_fill_scan_buf(struct jffs2_sb_info * c,void * buf,uint32_t ofs,uint32_t len)290*4882a593Smuzhiyun static int jffs2_fill_scan_buf(struct jffs2_sb_info *c, void *buf,
291*4882a593Smuzhiyun 			       uint32_t ofs, uint32_t len)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	int ret;
294*4882a593Smuzhiyun 	size_t retlen;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	ret = jffs2_flash_read(c, ofs, len, &retlen, buf);
297*4882a593Smuzhiyun 	if (ret) {
298*4882a593Smuzhiyun 		jffs2_dbg(1, "mtd->read(0x%x bytes from 0x%x) returned %d\n",
299*4882a593Smuzhiyun 			  len, ofs, ret);
300*4882a593Smuzhiyun 		return ret;
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 	if (retlen < len) {
303*4882a593Smuzhiyun 		jffs2_dbg(1, "Read at 0x%x gave only 0x%zx bytes\n",
304*4882a593Smuzhiyun 			  ofs, retlen);
305*4882a593Smuzhiyun 		return -EIO;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 	return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
jffs2_scan_classify_jeb(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)310*4882a593Smuzhiyun int jffs2_scan_classify_jeb(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	if ((jeb->used_size + jeb->unchecked_size) == PAD(c->cleanmarker_size) && !jeb->dirty_size
313*4882a593Smuzhiyun 	    && (!jeb->first_node || !ref_next(jeb->first_node)) )
314*4882a593Smuzhiyun 		return BLK_STATE_CLEANMARKER;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* move blocks with max 4 byte dirty space to cleanlist */
317*4882a593Smuzhiyun 	else if (!ISDIRTY(c->sector_size - (jeb->used_size + jeb->unchecked_size))) {
318*4882a593Smuzhiyun 		c->dirty_size -= jeb->dirty_size;
319*4882a593Smuzhiyun 		c->wasted_size += jeb->dirty_size;
320*4882a593Smuzhiyun 		jeb->wasted_size += jeb->dirty_size;
321*4882a593Smuzhiyun 		jeb->dirty_size = 0;
322*4882a593Smuzhiyun 		return BLK_STATE_CLEAN;
323*4882a593Smuzhiyun 	} else if (jeb->used_size || jeb->unchecked_size)
324*4882a593Smuzhiyun 		return BLK_STATE_PARTDIRTY;
325*4882a593Smuzhiyun 	else
326*4882a593Smuzhiyun 		return BLK_STATE_ALLDIRTY;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_XATTR
jffs2_scan_xattr_node(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_raw_xattr * rx,uint32_t ofs,struct jffs2_summary * s)330*4882a593Smuzhiyun static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
331*4882a593Smuzhiyun 				 struct jffs2_raw_xattr *rx, uint32_t ofs,
332*4882a593Smuzhiyun 				 struct jffs2_summary *s)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct jffs2_xattr_datum *xd;
335*4882a593Smuzhiyun 	uint32_t xid, version, totlen, crc;
336*4882a593Smuzhiyun 	int err;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
339*4882a593Smuzhiyun 	if (crc != je32_to_cpu(rx->node_crc)) {
340*4882a593Smuzhiyun 		JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
341*4882a593Smuzhiyun 			      ofs, je32_to_cpu(rx->node_crc), crc);
342*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
343*4882a593Smuzhiyun 			return err;
344*4882a593Smuzhiyun 		return 0;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	xid = je32_to_cpu(rx->xid);
348*4882a593Smuzhiyun 	version = je32_to_cpu(rx->version);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	totlen = PAD(sizeof(struct jffs2_raw_xattr)
351*4882a593Smuzhiyun 			+ rx->name_len + 1 + je16_to_cpu(rx->value_len));
352*4882a593Smuzhiyun 	if (totlen != je32_to_cpu(rx->totlen)) {
353*4882a593Smuzhiyun 		JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
354*4882a593Smuzhiyun 			      ofs, je32_to_cpu(rx->totlen), totlen);
355*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
356*4882a593Smuzhiyun 			return err;
357*4882a593Smuzhiyun 		return 0;
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	xd = jffs2_setup_xattr_datum(c, xid, version);
361*4882a593Smuzhiyun 	if (IS_ERR(xd))
362*4882a593Smuzhiyun 		return PTR_ERR(xd);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	if (xd->version > version) {
365*4882a593Smuzhiyun 		struct jffs2_raw_node_ref *raw
366*4882a593Smuzhiyun 			= jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
367*4882a593Smuzhiyun 		raw->next_in_ino = xd->node->next_in_ino;
368*4882a593Smuzhiyun 		xd->node->next_in_ino = raw;
369*4882a593Smuzhiyun 	} else {
370*4882a593Smuzhiyun 		xd->version = version;
371*4882a593Smuzhiyun 		xd->xprefix = rx->xprefix;
372*4882a593Smuzhiyun 		xd->name_len = rx->name_len;
373*4882a593Smuzhiyun 		xd->value_len = je16_to_cpu(rx->value_len);
374*4882a593Smuzhiyun 		xd->data_crc = je32_to_cpu(rx->data_crc);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 		jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (jffs2_sum_active())
380*4882a593Smuzhiyun 		jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
381*4882a593Smuzhiyun 	dbg_xattr("scanning xdatum at %#08x (xid=%u, version=%u)\n",
382*4882a593Smuzhiyun 		  ofs, xd->xid, xd->version);
383*4882a593Smuzhiyun 	return 0;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
jffs2_scan_xref_node(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_raw_xref * rr,uint32_t ofs,struct jffs2_summary * s)386*4882a593Smuzhiyun static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
387*4882a593Smuzhiyun 				struct jffs2_raw_xref *rr, uint32_t ofs,
388*4882a593Smuzhiyun 				struct jffs2_summary *s)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct jffs2_xattr_ref *ref;
391*4882a593Smuzhiyun 	uint32_t crc;
392*4882a593Smuzhiyun 	int err;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	crc = crc32(0, rr, sizeof(*rr) - 4);
395*4882a593Smuzhiyun 	if (crc != je32_to_cpu(rr->node_crc)) {
396*4882a593Smuzhiyun 		JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
397*4882a593Smuzhiyun 			      ofs, je32_to_cpu(rr->node_crc), crc);
398*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
399*4882a593Smuzhiyun 			return err;
400*4882a593Smuzhiyun 		return 0;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (PAD(sizeof(struct jffs2_raw_xref)) != je32_to_cpu(rr->totlen)) {
404*4882a593Smuzhiyun 		JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%zd\n",
405*4882a593Smuzhiyun 			      ofs, je32_to_cpu(rr->totlen),
406*4882a593Smuzhiyun 			      PAD(sizeof(struct jffs2_raw_xref)));
407*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rr->totlen))))
408*4882a593Smuzhiyun 			return err;
409*4882a593Smuzhiyun 		return 0;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	ref = jffs2_alloc_xattr_ref();
413*4882a593Smuzhiyun 	if (!ref)
414*4882a593Smuzhiyun 		return -ENOMEM;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* BEFORE jffs2_build_xattr_subsystem() called,
417*4882a593Smuzhiyun 	 * and AFTER xattr_ref is marked as a dead xref,
418*4882a593Smuzhiyun 	 * ref->xid is used to store 32bit xid, xd is not used
419*4882a593Smuzhiyun 	 * ref->ino is used to store 32bit inode-number, ic is not used
420*4882a593Smuzhiyun 	 * Thoes variables are declared as union, thus using those
421*4882a593Smuzhiyun 	 * are exclusive. In a similar way, ref->next is temporarily
422*4882a593Smuzhiyun 	 * used to chain all xattr_ref object. It's re-chained to
423*4882a593Smuzhiyun 	 * jffs2_inode_cache in jffs2_build_xattr_subsystem() correctly.
424*4882a593Smuzhiyun 	 */
425*4882a593Smuzhiyun 	ref->ino = je32_to_cpu(rr->ino);
426*4882a593Smuzhiyun 	ref->xid = je32_to_cpu(rr->xid);
427*4882a593Smuzhiyun 	ref->xseqno = je32_to_cpu(rr->xseqno);
428*4882a593Smuzhiyun 	if (ref->xseqno > c->highest_xseqno)
429*4882a593Smuzhiyun 		c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
430*4882a593Smuzhiyun 	ref->next = c->xref_temp;
431*4882a593Smuzhiyun 	c->xref_temp = ref;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (jffs2_sum_active())
436*4882a593Smuzhiyun 		jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
437*4882a593Smuzhiyun 	dbg_xattr("scan xref at %#08x (xid=%u, ino=%u)\n",
438*4882a593Smuzhiyun 		  ofs, ref->xid, ref->ino);
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun #endif
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun /* Called with 'buf_size == 0' if buf is in fact a pointer _directly_ into
444*4882a593Smuzhiyun    the flash, XIP-style */
jffs2_scan_eraseblock(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,unsigned char * buf,uint32_t buf_size,struct jffs2_summary * s)445*4882a593Smuzhiyun static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
446*4882a593Smuzhiyun 				  unsigned char *buf, uint32_t buf_size, struct jffs2_summary *s) {
447*4882a593Smuzhiyun 	struct jffs2_unknown_node *node;
448*4882a593Smuzhiyun 	struct jffs2_unknown_node crcnode;
449*4882a593Smuzhiyun 	uint32_t ofs, prevofs, max_ofs;
450*4882a593Smuzhiyun 	uint32_t hdr_crc, buf_ofs, buf_len;
451*4882a593Smuzhiyun 	int err;
452*4882a593Smuzhiyun 	int noise = 0;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
456*4882a593Smuzhiyun 	int cleanmarkerfound = 0;
457*4882a593Smuzhiyun #endif
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	ofs = jeb->offset;
460*4882a593Smuzhiyun 	prevofs = jeb->offset - 1;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	jffs2_dbg(1, "%s(): Scanning block at 0x%x\n", __func__, ofs);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
465*4882a593Smuzhiyun 	if (jffs2_cleanmarker_oob(c)) {
466*4882a593Smuzhiyun 		int ret;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		if (mtd_block_isbad(c->mtd, jeb->offset))
469*4882a593Smuzhiyun 			return BLK_STATE_BADBLOCK;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 		ret = jffs2_check_nand_cleanmarker(c, jeb);
472*4882a593Smuzhiyun 		jffs2_dbg(2, "jffs_check_nand_cleanmarker returned %d\n", ret);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 		/* Even if it's not found, we still scan to see
475*4882a593Smuzhiyun 		   if the block is empty. We use this information
476*4882a593Smuzhiyun 		   to decide whether to erase it or not. */
477*4882a593Smuzhiyun 		switch (ret) {
478*4882a593Smuzhiyun 		case 0:		cleanmarkerfound = 1; break;
479*4882a593Smuzhiyun 		case 1: 	break;
480*4882a593Smuzhiyun 		default: 	return ret;
481*4882a593Smuzhiyun 		}
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun #endif
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (jffs2_sum_active()) {
486*4882a593Smuzhiyun 		struct jffs2_sum_marker *sm;
487*4882a593Smuzhiyun 		void *sumptr = NULL;
488*4882a593Smuzhiyun 		uint32_t sumlen;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		if (!buf_size) {
491*4882a593Smuzhiyun 			/* XIP case. Just look, point at the summary if it's there */
492*4882a593Smuzhiyun 			sm = (void *)buf + c->sector_size - sizeof(*sm);
493*4882a593Smuzhiyun 			if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
494*4882a593Smuzhiyun 				sumptr = buf + je32_to_cpu(sm->offset);
495*4882a593Smuzhiyun 				sumlen = c->sector_size - je32_to_cpu(sm->offset);
496*4882a593Smuzhiyun 			}
497*4882a593Smuzhiyun 		} else {
498*4882a593Smuzhiyun 			/* If NAND flash, read a whole page of it. Else just the end */
499*4882a593Smuzhiyun 			if (c->wbuf_pagesize)
500*4882a593Smuzhiyun 				buf_len = c->wbuf_pagesize;
501*4882a593Smuzhiyun 			else
502*4882a593Smuzhiyun 				buf_len = sizeof(*sm);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 			/* Read as much as we want into the _end_ of the preallocated buffer */
505*4882a593Smuzhiyun 			err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
506*4882a593Smuzhiyun 						  jeb->offset + c->sector_size - buf_len,
507*4882a593Smuzhiyun 						  buf_len);
508*4882a593Smuzhiyun 			if (err)
509*4882a593Smuzhiyun 				return err;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 			sm = (void *)buf + buf_size - sizeof(*sm);
512*4882a593Smuzhiyun 			if (je32_to_cpu(sm->magic) == JFFS2_SUM_MAGIC) {
513*4882a593Smuzhiyun 				sumlen = c->sector_size - je32_to_cpu(sm->offset);
514*4882a593Smuzhiyun 				sumptr = buf + buf_size - sumlen;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 				/* sm->offset maybe wrong but MAGIC maybe right */
517*4882a593Smuzhiyun 				if (sumlen > c->sector_size)
518*4882a593Smuzhiyun 					goto full_scan;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 				/* Now, make sure the summary itself is available */
521*4882a593Smuzhiyun 				if (sumlen > buf_size) {
522*4882a593Smuzhiyun 					/* Need to kmalloc for this. */
523*4882a593Smuzhiyun 					sumptr = kmalloc(sumlen, GFP_KERNEL);
524*4882a593Smuzhiyun 					if (!sumptr)
525*4882a593Smuzhiyun 						return -ENOMEM;
526*4882a593Smuzhiyun 					memcpy(sumptr + sumlen - buf_len, buf + buf_size - buf_len, buf_len);
527*4882a593Smuzhiyun 				}
528*4882a593Smuzhiyun 				if (buf_len < sumlen) {
529*4882a593Smuzhiyun 					/* Need to read more so that the entire summary node is present */
530*4882a593Smuzhiyun 					err = jffs2_fill_scan_buf(c, sumptr,
531*4882a593Smuzhiyun 								  jeb->offset + c->sector_size - sumlen,
532*4882a593Smuzhiyun 								  sumlen - buf_len);
533*4882a593Smuzhiyun 					if (err) {
534*4882a593Smuzhiyun 						if (sumlen > buf_size)
535*4882a593Smuzhiyun 							kfree(sumptr);
536*4882a593Smuzhiyun 						return err;
537*4882a593Smuzhiyun 					}
538*4882a593Smuzhiyun 				}
539*4882a593Smuzhiyun 			}
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		if (sumptr) {
544*4882a593Smuzhiyun 			err = jffs2_sum_scan_sumnode(c, jeb, sumptr, sumlen, &pseudo_random);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 			if (buf_size && sumlen > buf_size)
547*4882a593Smuzhiyun 				kfree(sumptr);
548*4882a593Smuzhiyun 			/* If it returns with a real error, bail.
549*4882a593Smuzhiyun 			   If it returns positive, that's a block classification
550*4882a593Smuzhiyun 			   (i.e. BLK_STATE_xxx) so return that too.
551*4882a593Smuzhiyun 			   If it returns zero, fall through to full scan. */
552*4882a593Smuzhiyun 			if (err)
553*4882a593Smuzhiyun 				return err;
554*4882a593Smuzhiyun 		}
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun full_scan:
558*4882a593Smuzhiyun 	buf_ofs = jeb->offset;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	if (!buf_size) {
561*4882a593Smuzhiyun 		/* This is the XIP case -- we're reading _directly_ from the flash chip */
562*4882a593Smuzhiyun 		buf_len = c->sector_size;
563*4882a593Smuzhiyun 	} else {
564*4882a593Smuzhiyun 		buf_len = EMPTY_SCAN_SIZE(c->sector_size);
565*4882a593Smuzhiyun 		err = jffs2_fill_scan_buf(c, buf, buf_ofs, buf_len);
566*4882a593Smuzhiyun 		if (err)
567*4882a593Smuzhiyun 			return err;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
571*4882a593Smuzhiyun 	ofs = 0;
572*4882a593Smuzhiyun 	max_ofs = EMPTY_SCAN_SIZE(c->sector_size);
573*4882a593Smuzhiyun 	/* Scan only EMPTY_SCAN_SIZE of 0xFF before declaring it's empty */
574*4882a593Smuzhiyun 	while(ofs < max_ofs && *(uint32_t *)(&buf[ofs]) == 0xFFFFFFFF)
575*4882a593Smuzhiyun 		ofs += 4;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (ofs == max_ofs) {
578*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
579*4882a593Smuzhiyun 		if (jffs2_cleanmarker_oob(c)) {
580*4882a593Smuzhiyun 			/* scan oob, take care of cleanmarker */
581*4882a593Smuzhiyun 			int ret = jffs2_check_oob_empty(c, jeb, cleanmarkerfound);
582*4882a593Smuzhiyun 			jffs2_dbg(2, "jffs2_check_oob_empty returned %d\n",
583*4882a593Smuzhiyun 				  ret);
584*4882a593Smuzhiyun 			switch (ret) {
585*4882a593Smuzhiyun 			case 0:		return cleanmarkerfound ? BLK_STATE_CLEANMARKER : BLK_STATE_ALLFF;
586*4882a593Smuzhiyun 			case 1: 	return BLK_STATE_ALLDIRTY;
587*4882a593Smuzhiyun 			default: 	return ret;
588*4882a593Smuzhiyun 			}
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun #endif
591*4882a593Smuzhiyun 		jffs2_dbg(1, "Block at 0x%08x is empty (erased)\n",
592*4882a593Smuzhiyun 			  jeb->offset);
593*4882a593Smuzhiyun 		if (c->cleanmarker_size == 0)
594*4882a593Smuzhiyun 			return BLK_STATE_CLEANMARKER;	/* don't bother with re-erase */
595*4882a593Smuzhiyun 		else
596*4882a593Smuzhiyun 			return BLK_STATE_ALLFF;	/* OK to erase if all blocks are like this */
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 	if (ofs) {
599*4882a593Smuzhiyun 		jffs2_dbg(1, "Free space at %08x ends at %08x\n", jeb->offset,
600*4882a593Smuzhiyun 			  jeb->offset + ofs);
601*4882a593Smuzhiyun 		if ((err = jffs2_prealloc_raw_node_refs(c, jeb, 1)))
602*4882a593Smuzhiyun 			return err;
603*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, ofs)))
604*4882a593Smuzhiyun 			return err;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	/* Now ofs is a complete physical flash offset as it always was... */
608*4882a593Smuzhiyun 	ofs += jeb->offset;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	noise = 10;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	dbg_summary("no summary found in jeb 0x%08x. Apply original scan.\n",jeb->offset);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun scan_more:
615*4882a593Smuzhiyun 	while(ofs < jeb->offset + c->sector_size) {
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		/* Make sure there are node refs available for use */
620*4882a593Smuzhiyun 		err = jffs2_prealloc_raw_node_refs(c, jeb, 2);
621*4882a593Smuzhiyun 		if (err)
622*4882a593Smuzhiyun 			return err;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		cond_resched();
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		if (ofs & 3) {
627*4882a593Smuzhiyun 			pr_warn("Eep. ofs 0x%08x not word-aligned!\n", ofs);
628*4882a593Smuzhiyun 			ofs = PAD(ofs);
629*4882a593Smuzhiyun 			continue;
630*4882a593Smuzhiyun 		}
631*4882a593Smuzhiyun 		if (ofs == prevofs) {
632*4882a593Smuzhiyun 			pr_warn("ofs 0x%08x has already been seen. Skipping\n",
633*4882a593Smuzhiyun 				ofs);
634*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
635*4882a593Smuzhiyun 				return err;
636*4882a593Smuzhiyun 			ofs += 4;
637*4882a593Smuzhiyun 			continue;
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 		prevofs = ofs;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 		if (jeb->offset + c->sector_size < ofs + sizeof(*node)) {
642*4882a593Smuzhiyun 			jffs2_dbg(1, "Fewer than %zd bytes left to end of block. (%x+%x<%x+%zx) Not reading\n",
643*4882a593Smuzhiyun 				  sizeof(struct jffs2_unknown_node),
644*4882a593Smuzhiyun 				  jeb->offset, c->sector_size, ofs,
645*4882a593Smuzhiyun 				  sizeof(*node));
646*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, (jeb->offset + c->sector_size)-ofs)))
647*4882a593Smuzhiyun 				return err;
648*4882a593Smuzhiyun 			break;
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 		if (buf_ofs + buf_len < ofs + sizeof(*node)) {
652*4882a593Smuzhiyun 			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
653*4882a593Smuzhiyun 			jffs2_dbg(1, "Fewer than %zd bytes (node header) left to end of buf. Reading 0x%x at 0x%08x\n",
654*4882a593Smuzhiyun 				  sizeof(struct jffs2_unknown_node),
655*4882a593Smuzhiyun 				  buf_len, ofs);
656*4882a593Smuzhiyun 			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
657*4882a593Smuzhiyun 			if (err)
658*4882a593Smuzhiyun 				return err;
659*4882a593Smuzhiyun 			buf_ofs = ofs;
660*4882a593Smuzhiyun 		}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		node = (struct jffs2_unknown_node *)&buf[ofs-buf_ofs];
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 		if (*(uint32_t *)(&buf[ofs-buf_ofs]) == 0xffffffff) {
665*4882a593Smuzhiyun 			uint32_t inbuf_ofs;
666*4882a593Smuzhiyun 			uint32_t empty_start, scan_end;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 			empty_start = ofs;
669*4882a593Smuzhiyun 			ofs += 4;
670*4882a593Smuzhiyun 			scan_end = min_t(uint32_t, EMPTY_SCAN_SIZE(c->sector_size)/8, buf_len);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 			jffs2_dbg(1, "Found empty flash at 0x%08x\n", ofs);
673*4882a593Smuzhiyun 		more_empty:
674*4882a593Smuzhiyun 			inbuf_ofs = ofs - buf_ofs;
675*4882a593Smuzhiyun 			while (inbuf_ofs < scan_end) {
676*4882a593Smuzhiyun 				if (unlikely(*(uint32_t *)(&buf[inbuf_ofs]) != 0xffffffff)) {
677*4882a593Smuzhiyun 					pr_warn("Empty flash at 0x%08x ends at 0x%08x\n",
678*4882a593Smuzhiyun 						empty_start, ofs);
679*4882a593Smuzhiyun 					if ((err = jffs2_scan_dirty_space(c, jeb, ofs-empty_start)))
680*4882a593Smuzhiyun 						return err;
681*4882a593Smuzhiyun 					goto scan_more;
682*4882a593Smuzhiyun 				}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 				inbuf_ofs+=4;
685*4882a593Smuzhiyun 				ofs += 4;
686*4882a593Smuzhiyun 			}
687*4882a593Smuzhiyun 			/* Ran off end. */
688*4882a593Smuzhiyun 			jffs2_dbg(1, "Empty flash to end of buffer at 0x%08x\n",
689*4882a593Smuzhiyun 				  ofs);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 			/* If we're only checking the beginning of a block with a cleanmarker,
692*4882a593Smuzhiyun 			   bail now */
693*4882a593Smuzhiyun 			if (buf_ofs == jeb->offset && jeb->used_size == PAD(c->cleanmarker_size) &&
694*4882a593Smuzhiyun 			    c->cleanmarker_size && !jeb->dirty_size && !ref_next(jeb->first_node)) {
695*4882a593Smuzhiyun 				jffs2_dbg(1, "%d bytes at start of block seems clean... assuming all clean\n",
696*4882a593Smuzhiyun 					  EMPTY_SCAN_SIZE(c->sector_size));
697*4882a593Smuzhiyun 				return BLK_STATE_CLEANMARKER;
698*4882a593Smuzhiyun 			}
699*4882a593Smuzhiyun 			if (!buf_size && (scan_end != buf_len)) {/* XIP/point case */
700*4882a593Smuzhiyun 				scan_end = buf_len;
701*4882a593Smuzhiyun 				goto more_empty;
702*4882a593Smuzhiyun 			}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 			/* See how much more there is to read in this eraseblock... */
705*4882a593Smuzhiyun 			buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
706*4882a593Smuzhiyun 			if (!buf_len) {
707*4882a593Smuzhiyun 				/* No more to read. Break out of main loop without marking
708*4882a593Smuzhiyun 				   this range of empty space as dirty (because it's not) */
709*4882a593Smuzhiyun 				jffs2_dbg(1, "Empty flash at %08x runs to end of block. Treating as free_space\n",
710*4882a593Smuzhiyun 					  empty_start);
711*4882a593Smuzhiyun 				break;
712*4882a593Smuzhiyun 			}
713*4882a593Smuzhiyun 			/* point never reaches here */
714*4882a593Smuzhiyun 			scan_end = buf_len;
715*4882a593Smuzhiyun 			jffs2_dbg(1, "Reading another 0x%x at 0x%08x\n",
716*4882a593Smuzhiyun 				  buf_len, ofs);
717*4882a593Smuzhiyun 			err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
718*4882a593Smuzhiyun 			if (err)
719*4882a593Smuzhiyun 				return err;
720*4882a593Smuzhiyun 			buf_ofs = ofs;
721*4882a593Smuzhiyun 			goto more_empty;
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		if (ofs == jeb->offset && je16_to_cpu(node->magic) == KSAMTIB_CIGAM_2SFFJ) {
725*4882a593Smuzhiyun 			pr_warn("Magic bitmask is backwards at offset 0x%08x. Wrong endian filesystem?\n",
726*4882a593Smuzhiyun 				ofs);
727*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
728*4882a593Smuzhiyun 				return err;
729*4882a593Smuzhiyun 			ofs += 4;
730*4882a593Smuzhiyun 			continue;
731*4882a593Smuzhiyun 		}
732*4882a593Smuzhiyun 		if (je16_to_cpu(node->magic) == JFFS2_DIRTY_BITMASK) {
733*4882a593Smuzhiyun 			jffs2_dbg(1, "Dirty bitmask at 0x%08x\n", ofs);
734*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
735*4882a593Smuzhiyun 				return err;
736*4882a593Smuzhiyun 			ofs += 4;
737*4882a593Smuzhiyun 			continue;
738*4882a593Smuzhiyun 		}
739*4882a593Smuzhiyun 		if (je16_to_cpu(node->magic) == JFFS2_OLD_MAGIC_BITMASK) {
740*4882a593Smuzhiyun 			pr_warn("Old JFFS2 bitmask found at 0x%08x\n", ofs);
741*4882a593Smuzhiyun 			pr_warn("You cannot use older JFFS2 filesystems with newer kernels\n");
742*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
743*4882a593Smuzhiyun 				return err;
744*4882a593Smuzhiyun 			ofs += 4;
745*4882a593Smuzhiyun 			continue;
746*4882a593Smuzhiyun 		}
747*4882a593Smuzhiyun 		if (je16_to_cpu(node->magic) != JFFS2_MAGIC_BITMASK) {
748*4882a593Smuzhiyun 			/* OK. We're out of possibilities. Whinge and move on */
749*4882a593Smuzhiyun 			noisy_printk(&noise, "%s(): Magic bitmask 0x%04x not found at 0x%08x: 0x%04x instead\n",
750*4882a593Smuzhiyun 				     __func__,
751*4882a593Smuzhiyun 				     JFFS2_MAGIC_BITMASK, ofs,
752*4882a593Smuzhiyun 				     je16_to_cpu(node->magic));
753*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
754*4882a593Smuzhiyun 				return err;
755*4882a593Smuzhiyun 			ofs += 4;
756*4882a593Smuzhiyun 			continue;
757*4882a593Smuzhiyun 		}
758*4882a593Smuzhiyun 		/* We seem to have a node of sorts. Check the CRC */
759*4882a593Smuzhiyun 		crcnode.magic = node->magic;
760*4882a593Smuzhiyun 		crcnode.nodetype = cpu_to_je16( je16_to_cpu(node->nodetype) | JFFS2_NODE_ACCURATE);
761*4882a593Smuzhiyun 		crcnode.totlen = node->totlen;
762*4882a593Smuzhiyun 		hdr_crc = crc32(0, &crcnode, sizeof(crcnode)-4);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		if (hdr_crc != je32_to_cpu(node->hdr_crc)) {
765*4882a593Smuzhiyun 			noisy_printk(&noise, "%s(): Node at 0x%08x {0x%04x, 0x%04x, 0x%08x) has invalid CRC 0x%08x (calculated 0x%08x)\n",
766*4882a593Smuzhiyun 				     __func__,
767*4882a593Smuzhiyun 				     ofs, je16_to_cpu(node->magic),
768*4882a593Smuzhiyun 				     je16_to_cpu(node->nodetype),
769*4882a593Smuzhiyun 				     je32_to_cpu(node->totlen),
770*4882a593Smuzhiyun 				     je32_to_cpu(node->hdr_crc),
771*4882a593Smuzhiyun 				     hdr_crc);
772*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
773*4882a593Smuzhiyun 				return err;
774*4882a593Smuzhiyun 			ofs += 4;
775*4882a593Smuzhiyun 			continue;
776*4882a593Smuzhiyun 		}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 		if (ofs + je32_to_cpu(node->totlen) > jeb->offset + c->sector_size) {
779*4882a593Smuzhiyun 			/* Eep. Node goes over the end of the erase block. */
780*4882a593Smuzhiyun 			pr_warn("Node at 0x%08x with length 0x%08x would run over the end of the erase block\n",
781*4882a593Smuzhiyun 				ofs, je32_to_cpu(node->totlen));
782*4882a593Smuzhiyun 			pr_warn("Perhaps the file system was created with the wrong erase size?\n");
783*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, 4)))
784*4882a593Smuzhiyun 				return err;
785*4882a593Smuzhiyun 			ofs += 4;
786*4882a593Smuzhiyun 			continue;
787*4882a593Smuzhiyun 		}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 		if (!(je16_to_cpu(node->nodetype) & JFFS2_NODE_ACCURATE)) {
790*4882a593Smuzhiyun 			/* Wheee. This is an obsoleted node */
791*4882a593Smuzhiyun 			jffs2_dbg(2, "Node at 0x%08x is obsolete. Skipping\n",
792*4882a593Smuzhiyun 				  ofs);
793*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
794*4882a593Smuzhiyun 				return err;
795*4882a593Smuzhiyun 			ofs += PAD(je32_to_cpu(node->totlen));
796*4882a593Smuzhiyun 			continue;
797*4882a593Smuzhiyun 		}
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 		switch(je16_to_cpu(node->nodetype)) {
800*4882a593Smuzhiyun 		case JFFS2_NODETYPE_INODE:
801*4882a593Smuzhiyun 			if (buf_ofs + buf_len < ofs + sizeof(struct jffs2_raw_inode)) {
802*4882a593Smuzhiyun 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
803*4882a593Smuzhiyun 				jffs2_dbg(1, "Fewer than %zd bytes (inode node) left to end of buf. Reading 0x%x at 0x%08x\n",
804*4882a593Smuzhiyun 					  sizeof(struct jffs2_raw_inode),
805*4882a593Smuzhiyun 					  buf_len, ofs);
806*4882a593Smuzhiyun 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
807*4882a593Smuzhiyun 				if (err)
808*4882a593Smuzhiyun 					return err;
809*4882a593Smuzhiyun 				buf_ofs = ofs;
810*4882a593Smuzhiyun 				node = (void *)buf;
811*4882a593Smuzhiyun 			}
812*4882a593Smuzhiyun 			err = jffs2_scan_inode_node(c, jeb, (void *)node, ofs, s);
813*4882a593Smuzhiyun 			if (err) return err;
814*4882a593Smuzhiyun 			ofs += PAD(je32_to_cpu(node->totlen));
815*4882a593Smuzhiyun 			break;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		case JFFS2_NODETYPE_DIRENT:
818*4882a593Smuzhiyun 			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
819*4882a593Smuzhiyun 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
820*4882a593Smuzhiyun 				jffs2_dbg(1, "Fewer than %d bytes (dirent node) left to end of buf. Reading 0x%x at 0x%08x\n",
821*4882a593Smuzhiyun 					  je32_to_cpu(node->totlen), buf_len,
822*4882a593Smuzhiyun 					  ofs);
823*4882a593Smuzhiyun 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
824*4882a593Smuzhiyun 				if (err)
825*4882a593Smuzhiyun 					return err;
826*4882a593Smuzhiyun 				buf_ofs = ofs;
827*4882a593Smuzhiyun 				node = (void *)buf;
828*4882a593Smuzhiyun 			}
829*4882a593Smuzhiyun 			err = jffs2_scan_dirent_node(c, jeb, (void *)node, ofs, s);
830*4882a593Smuzhiyun 			if (err) return err;
831*4882a593Smuzhiyun 			ofs += PAD(je32_to_cpu(node->totlen));
832*4882a593Smuzhiyun 			break;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_XATTR
835*4882a593Smuzhiyun 		case JFFS2_NODETYPE_XATTR:
836*4882a593Smuzhiyun 			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
837*4882a593Smuzhiyun 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
838*4882a593Smuzhiyun 				jffs2_dbg(1, "Fewer than %d bytes (xattr node) left to end of buf. Reading 0x%x at 0x%08x\n",
839*4882a593Smuzhiyun 					  je32_to_cpu(node->totlen), buf_len,
840*4882a593Smuzhiyun 					  ofs);
841*4882a593Smuzhiyun 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
842*4882a593Smuzhiyun 				if (err)
843*4882a593Smuzhiyun 					return err;
844*4882a593Smuzhiyun 				buf_ofs = ofs;
845*4882a593Smuzhiyun 				node = (void *)buf;
846*4882a593Smuzhiyun 			}
847*4882a593Smuzhiyun 			err = jffs2_scan_xattr_node(c, jeb, (void *)node, ofs, s);
848*4882a593Smuzhiyun 			if (err)
849*4882a593Smuzhiyun 				return err;
850*4882a593Smuzhiyun 			ofs += PAD(je32_to_cpu(node->totlen));
851*4882a593Smuzhiyun 			break;
852*4882a593Smuzhiyun 		case JFFS2_NODETYPE_XREF:
853*4882a593Smuzhiyun 			if (buf_ofs + buf_len < ofs + je32_to_cpu(node->totlen)) {
854*4882a593Smuzhiyun 				buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
855*4882a593Smuzhiyun 				jffs2_dbg(1, "Fewer than %d bytes (xref node) left to end of buf. Reading 0x%x at 0x%08x\n",
856*4882a593Smuzhiyun 					  je32_to_cpu(node->totlen), buf_len,
857*4882a593Smuzhiyun 					  ofs);
858*4882a593Smuzhiyun 				err = jffs2_fill_scan_buf(c, buf, ofs, buf_len);
859*4882a593Smuzhiyun 				if (err)
860*4882a593Smuzhiyun 					return err;
861*4882a593Smuzhiyun 				buf_ofs = ofs;
862*4882a593Smuzhiyun 				node = (void *)buf;
863*4882a593Smuzhiyun 			}
864*4882a593Smuzhiyun 			err = jffs2_scan_xref_node(c, jeb, (void *)node, ofs, s);
865*4882a593Smuzhiyun 			if (err)
866*4882a593Smuzhiyun 				return err;
867*4882a593Smuzhiyun 			ofs += PAD(je32_to_cpu(node->totlen));
868*4882a593Smuzhiyun 			break;
869*4882a593Smuzhiyun #endif	/* CONFIG_JFFS2_FS_XATTR */
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 		case JFFS2_NODETYPE_CLEANMARKER:
872*4882a593Smuzhiyun 			jffs2_dbg(1, "CLEANMARKER node found at 0x%08x\n", ofs);
873*4882a593Smuzhiyun 			if (je32_to_cpu(node->totlen) != c->cleanmarker_size) {
874*4882a593Smuzhiyun 				pr_notice("CLEANMARKER node found at 0x%08x has totlen 0x%x != normal 0x%x\n",
875*4882a593Smuzhiyun 					  ofs, je32_to_cpu(node->totlen),
876*4882a593Smuzhiyun 					  c->cleanmarker_size);
877*4882a593Smuzhiyun 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
878*4882a593Smuzhiyun 					return err;
879*4882a593Smuzhiyun 				ofs += PAD(sizeof(struct jffs2_unknown_node));
880*4882a593Smuzhiyun 			} else if (jeb->first_node) {
881*4882a593Smuzhiyun 				pr_notice("CLEANMARKER node found at 0x%08x, not first node in block (0x%08x)\n",
882*4882a593Smuzhiyun 					  ofs, jeb->offset);
883*4882a593Smuzhiyun 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(sizeof(struct jffs2_unknown_node)))))
884*4882a593Smuzhiyun 					return err;
885*4882a593Smuzhiyun 				ofs += PAD(sizeof(struct jffs2_unknown_node));
886*4882a593Smuzhiyun 			} else {
887*4882a593Smuzhiyun 				jffs2_link_node_ref(c, jeb, ofs | REF_NORMAL, c->cleanmarker_size, NULL);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 				ofs += PAD(c->cleanmarker_size);
890*4882a593Smuzhiyun 			}
891*4882a593Smuzhiyun 			break;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 		case JFFS2_NODETYPE_PADDING:
894*4882a593Smuzhiyun 			if (jffs2_sum_active())
895*4882a593Smuzhiyun 				jffs2_sum_add_padding_mem(s, je32_to_cpu(node->totlen));
896*4882a593Smuzhiyun 			if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
897*4882a593Smuzhiyun 				return err;
898*4882a593Smuzhiyun 			ofs += PAD(je32_to_cpu(node->totlen));
899*4882a593Smuzhiyun 			break;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 		default:
902*4882a593Smuzhiyun 			switch (je16_to_cpu(node->nodetype) & JFFS2_COMPAT_MASK) {
903*4882a593Smuzhiyun 			case JFFS2_FEATURE_ROCOMPAT:
904*4882a593Smuzhiyun 				pr_notice("Read-only compatible feature node (0x%04x) found at offset 0x%08x\n",
905*4882a593Smuzhiyun 					  je16_to_cpu(node->nodetype), ofs);
906*4882a593Smuzhiyun 				c->flags |= JFFS2_SB_FLAG_RO;
907*4882a593Smuzhiyun 				if (!(jffs2_is_readonly(c)))
908*4882a593Smuzhiyun 					return -EROFS;
909*4882a593Smuzhiyun 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
910*4882a593Smuzhiyun 					return err;
911*4882a593Smuzhiyun 				ofs += PAD(je32_to_cpu(node->totlen));
912*4882a593Smuzhiyun 				break;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 			case JFFS2_FEATURE_INCOMPAT:
915*4882a593Smuzhiyun 				pr_notice("Incompatible feature node (0x%04x) found at offset 0x%08x\n",
916*4882a593Smuzhiyun 					  je16_to_cpu(node->nodetype), ofs);
917*4882a593Smuzhiyun 				return -EINVAL;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 			case JFFS2_FEATURE_RWCOMPAT_DELETE:
920*4882a593Smuzhiyun 				jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
921*4882a593Smuzhiyun 					  je16_to_cpu(node->nodetype), ofs);
922*4882a593Smuzhiyun 				if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(node->totlen)))))
923*4882a593Smuzhiyun 					return err;
924*4882a593Smuzhiyun 				ofs += PAD(je32_to_cpu(node->totlen));
925*4882a593Smuzhiyun 				break;
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 			case JFFS2_FEATURE_RWCOMPAT_COPY: {
928*4882a593Smuzhiyun 				jffs2_dbg(1, "Unknown but compatible feature node (0x%04x) found at offset 0x%08x\n",
929*4882a593Smuzhiyun 					  je16_to_cpu(node->nodetype), ofs);
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 				jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(node->totlen)), NULL);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 				/* We can't summarise nodes we don't grok */
934*4882a593Smuzhiyun 				jffs2_sum_disable_collecting(s);
935*4882a593Smuzhiyun 				ofs += PAD(je32_to_cpu(node->totlen));
936*4882a593Smuzhiyun 				break;
937*4882a593Smuzhiyun 				}
938*4882a593Smuzhiyun 			}
939*4882a593Smuzhiyun 		}
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (jffs2_sum_active()) {
943*4882a593Smuzhiyun 		if (PAD(s->sum_size + JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size) {
944*4882a593Smuzhiyun 			dbg_summary("There is not enough space for "
945*4882a593Smuzhiyun 				"summary information, disabling for this jeb!\n");
946*4882a593Smuzhiyun 			jffs2_sum_disable_collecting(s);
947*4882a593Smuzhiyun 		}
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	jffs2_dbg(1, "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
951*4882a593Smuzhiyun 		  jeb->offset, jeb->free_size, jeb->dirty_size,
952*4882a593Smuzhiyun 		  jeb->unchecked_size, jeb->used_size, jeb->wasted_size);
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	/* mark_node_obsolete can add to wasted !! */
955*4882a593Smuzhiyun 	if (jeb->wasted_size) {
956*4882a593Smuzhiyun 		jeb->dirty_size += jeb->wasted_size;
957*4882a593Smuzhiyun 		c->dirty_size += jeb->wasted_size;
958*4882a593Smuzhiyun 		c->wasted_size -= jeb->wasted_size;
959*4882a593Smuzhiyun 		jeb->wasted_size = 0;
960*4882a593Smuzhiyun 	}
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	return jffs2_scan_classify_jeb(c, jeb);
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
jffs2_scan_make_ino_cache(struct jffs2_sb_info * c,uint32_t ino)965*4882a593Smuzhiyun struct jffs2_inode_cache *jffs2_scan_make_ino_cache(struct jffs2_sb_info *c, uint32_t ino)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	struct jffs2_inode_cache *ic;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	ic = jffs2_get_ino_cache(c, ino);
970*4882a593Smuzhiyun 	if (ic)
971*4882a593Smuzhiyun 		return ic;
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	if (ino > c->highest_ino)
974*4882a593Smuzhiyun 		c->highest_ino = ino;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	ic = jffs2_alloc_inode_cache();
977*4882a593Smuzhiyun 	if (!ic) {
978*4882a593Smuzhiyun 		pr_notice("%s(): allocation of inode cache failed\n", __func__);
979*4882a593Smuzhiyun 		return NULL;
980*4882a593Smuzhiyun 	}
981*4882a593Smuzhiyun 	memset(ic, 0, sizeof(*ic));
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	ic->ino = ino;
984*4882a593Smuzhiyun 	ic->nodes = (void *)ic;
985*4882a593Smuzhiyun 	jffs2_add_ino_cache(c, ic);
986*4882a593Smuzhiyun 	if (ino == 1)
987*4882a593Smuzhiyun 		ic->pino_nlink = 1;
988*4882a593Smuzhiyun 	return ic;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun 
jffs2_scan_inode_node(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_raw_inode * ri,uint32_t ofs,struct jffs2_summary * s)991*4882a593Smuzhiyun static int jffs2_scan_inode_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
992*4882a593Smuzhiyun 				 struct jffs2_raw_inode *ri, uint32_t ofs, struct jffs2_summary *s)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	struct jffs2_inode_cache *ic;
995*4882a593Smuzhiyun 	uint32_t crc, ino = je32_to_cpu(ri->ino);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	/* We do very little here now. Just check the ino# to which we should attribute
1000*4882a593Smuzhiyun 	   this node; we can do all the CRC checking etc. later. There's a tradeoff here --
1001*4882a593Smuzhiyun 	   we used to scan the flash once only, reading everything we want from it into
1002*4882a593Smuzhiyun 	   memory, then building all our in-core data structures and freeing the extra
1003*4882a593Smuzhiyun 	   information. Now we allow the first part of the mount to complete a lot quicker,
1004*4882a593Smuzhiyun 	   but we have to go _back_ to the flash in order to finish the CRC checking, etc.
1005*4882a593Smuzhiyun 	   Which means that the _full_ amount of time to get to proper write mode with GC
1006*4882a593Smuzhiyun 	   operational may actually be _longer_ than before. Sucks to be me. */
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	/* Check the node CRC in any case. */
1009*4882a593Smuzhiyun 	crc = crc32(0, ri, sizeof(*ri)-8);
1010*4882a593Smuzhiyun 	if (crc != je32_to_cpu(ri->node_crc)) {
1011*4882a593Smuzhiyun 		pr_notice("%s(): CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
1012*4882a593Smuzhiyun 			  __func__, ofs, je32_to_cpu(ri->node_crc), crc);
1013*4882a593Smuzhiyun 		/*
1014*4882a593Smuzhiyun 		 * We believe totlen because the CRC on the node
1015*4882a593Smuzhiyun 		 * _header_ was OK, just the node itself failed.
1016*4882a593Smuzhiyun 		 */
1017*4882a593Smuzhiyun 		return jffs2_scan_dirty_space(c, jeb,
1018*4882a593Smuzhiyun 					      PAD(je32_to_cpu(ri->totlen)));
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	ic = jffs2_get_ino_cache(c, ino);
1022*4882a593Smuzhiyun 	if (!ic) {
1023*4882a593Smuzhiyun 		ic = jffs2_scan_make_ino_cache(c, ino);
1024*4882a593Smuzhiyun 		if (!ic)
1025*4882a593Smuzhiyun 			return -ENOMEM;
1026*4882a593Smuzhiyun 	}
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	/* Wheee. It worked */
1029*4882a593Smuzhiyun 	jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	jffs2_dbg(1, "Node is ino #%u, version %d. Range 0x%x-0x%x\n",
1032*4882a593Smuzhiyun 		  je32_to_cpu(ri->ino), je32_to_cpu(ri->version),
1033*4882a593Smuzhiyun 		  je32_to_cpu(ri->offset),
1034*4882a593Smuzhiyun 		  je32_to_cpu(ri->offset)+je32_to_cpu(ri->dsize));
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	pseudo_random += je32_to_cpu(ri->version);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	if (jffs2_sum_active()) {
1039*4882a593Smuzhiyun 		jffs2_sum_add_inode_mem(s, ri, ofs - jeb->offset);
1040*4882a593Smuzhiyun 	}
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	return 0;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
jffs2_scan_dirent_node(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_raw_dirent * rd,uint32_t ofs,struct jffs2_summary * s)1045*4882a593Smuzhiyun static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1046*4882a593Smuzhiyun 				  struct jffs2_raw_dirent *rd, uint32_t ofs, struct jffs2_summary *s)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun 	struct jffs2_full_dirent *fd;
1049*4882a593Smuzhiyun 	struct jffs2_inode_cache *ic;
1050*4882a593Smuzhiyun 	uint32_t checkedlen;
1051*4882a593Smuzhiyun 	uint32_t crc;
1052*4882a593Smuzhiyun 	int err;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	jffs2_dbg(1, "%s(): Node at 0x%08x\n", __func__, ofs);
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	/* We don't get here unless the node is still valid, so we don't have to
1057*4882a593Smuzhiyun 	   mask in the ACCURATE bit any more. */
1058*4882a593Smuzhiyun 	crc = crc32(0, rd, sizeof(*rd)-8);
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	if (crc != je32_to_cpu(rd->node_crc)) {
1061*4882a593Smuzhiyun 		pr_notice("%s(): Node CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
1062*4882a593Smuzhiyun 			  __func__, ofs, je32_to_cpu(rd->node_crc), crc);
1063*4882a593Smuzhiyun 		/* We believe totlen because the CRC on the node _header_ was OK, just the node itself failed. */
1064*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
1065*4882a593Smuzhiyun 			return err;
1066*4882a593Smuzhiyun 		return 0;
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	pseudo_random += je32_to_cpu(rd->version);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	/* Should never happen. Did. (OLPC trac #4184)*/
1072*4882a593Smuzhiyun 	checkedlen = strnlen(rd->name, rd->nsize);
1073*4882a593Smuzhiyun 	if (checkedlen < rd->nsize) {
1074*4882a593Smuzhiyun 		pr_err("Dirent at %08x has zeroes in name. Truncating to %d chars\n",
1075*4882a593Smuzhiyun 		       ofs, checkedlen);
1076*4882a593Smuzhiyun 	}
1077*4882a593Smuzhiyun 	fd = jffs2_alloc_full_dirent(checkedlen+1);
1078*4882a593Smuzhiyun 	if (!fd) {
1079*4882a593Smuzhiyun 		return -ENOMEM;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 	memcpy(&fd->name, rd->name, checkedlen);
1082*4882a593Smuzhiyun 	fd->name[checkedlen] = 0;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	crc = crc32(0, fd->name, checkedlen);
1085*4882a593Smuzhiyun 	if (crc != je32_to_cpu(rd->name_crc)) {
1086*4882a593Smuzhiyun 		pr_notice("%s(): Name CRC failed on node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
1087*4882a593Smuzhiyun 			  __func__, ofs, je32_to_cpu(rd->name_crc), crc);
1088*4882a593Smuzhiyun 		jffs2_dbg(1, "Name for which CRC failed is (now) '%s', ino #%d\n",
1089*4882a593Smuzhiyun 			  fd->name, je32_to_cpu(rd->ino));
1090*4882a593Smuzhiyun 		jffs2_free_full_dirent(fd);
1091*4882a593Smuzhiyun 		/* FIXME: Why do we believe totlen? */
1092*4882a593Smuzhiyun 		/* We believe totlen because the CRC on the node _header_ was OK, just the name failed. */
1093*4882a593Smuzhiyun 		if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rd->totlen)))))
1094*4882a593Smuzhiyun 			return err;
1095*4882a593Smuzhiyun 		return 0;
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 	ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino));
1098*4882a593Smuzhiyun 	if (!ic) {
1099*4882a593Smuzhiyun 		jffs2_free_full_dirent(fd);
1100*4882a593Smuzhiyun 		return -ENOMEM;
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	fd->raw = jffs2_link_node_ref(c, jeb, ofs | dirent_node_state(rd),
1104*4882a593Smuzhiyun 				      PAD(je32_to_cpu(rd->totlen)), ic);
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	fd->next = NULL;
1107*4882a593Smuzhiyun 	fd->version = je32_to_cpu(rd->version);
1108*4882a593Smuzhiyun 	fd->ino = je32_to_cpu(rd->ino);
1109*4882a593Smuzhiyun 	fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
1110*4882a593Smuzhiyun 	fd->type = rd->type;
1111*4882a593Smuzhiyun 	jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	if (jffs2_sum_active()) {
1114*4882a593Smuzhiyun 		jffs2_sum_add_dirent_mem(s, rd, ofs - jeb->offset);
1115*4882a593Smuzhiyun 	}
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	return 0;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun 
count_list(struct list_head * l)1120*4882a593Smuzhiyun static int count_list(struct list_head *l)
1121*4882a593Smuzhiyun {
1122*4882a593Smuzhiyun 	uint32_t count = 0;
1123*4882a593Smuzhiyun 	struct list_head *tmp;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	list_for_each(tmp, l) {
1126*4882a593Smuzhiyun 		count++;
1127*4882a593Smuzhiyun 	}
1128*4882a593Smuzhiyun 	return count;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /* Note: This breaks if list_empty(head). I don't care. You
1132*4882a593Smuzhiyun    might, if you copy this code and use it elsewhere :) */
rotate_list(struct list_head * head,uint32_t count)1133*4882a593Smuzhiyun static void rotate_list(struct list_head *head, uint32_t count)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	struct list_head *n = head->next;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	list_del(head);
1138*4882a593Smuzhiyun 	while(count--) {
1139*4882a593Smuzhiyun 		n = n->next;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 	list_add(head, n);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun 
jffs2_rotate_lists(struct jffs2_sb_info * c)1144*4882a593Smuzhiyun void jffs2_rotate_lists(struct jffs2_sb_info *c)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	uint32_t x;
1147*4882a593Smuzhiyun 	uint32_t rotateby;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	x = count_list(&c->clean_list);
1150*4882a593Smuzhiyun 	if (x) {
1151*4882a593Smuzhiyun 		rotateby = pseudo_random % x;
1152*4882a593Smuzhiyun 		rotate_list((&c->clean_list), rotateby);
1153*4882a593Smuzhiyun 	}
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	x = count_list(&c->very_dirty_list);
1156*4882a593Smuzhiyun 	if (x) {
1157*4882a593Smuzhiyun 		rotateby = pseudo_random % x;
1158*4882a593Smuzhiyun 		rotate_list((&c->very_dirty_list), rotateby);
1159*4882a593Smuzhiyun 	}
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	x = count_list(&c->dirty_list);
1162*4882a593Smuzhiyun 	if (x) {
1163*4882a593Smuzhiyun 		rotateby = pseudo_random % x;
1164*4882a593Smuzhiyun 		rotate_list((&c->dirty_list), rotateby);
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	x = count_list(&c->erasable_list);
1168*4882a593Smuzhiyun 	if (x) {
1169*4882a593Smuzhiyun 		rotateby = pseudo_random % x;
1170*4882a593Smuzhiyun 		rotate_list((&c->erasable_list), rotateby);
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	if (c->nr_erasing_blocks) {
1174*4882a593Smuzhiyun 		rotateby = pseudo_random % c->nr_erasing_blocks;
1175*4882a593Smuzhiyun 		rotate_list((&c->erase_pending_list), rotateby);
1176*4882a593Smuzhiyun 	}
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	if (c->nr_free_blocks) {
1179*4882a593Smuzhiyun 		rotateby = pseudo_random % c->nr_free_blocks;
1180*4882a593Smuzhiyun 		rotate_list((&c->free_list), rotateby);
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun }
1183