1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * JFFS2 -- Journalling Flash File System, Version 2.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright © 2001-2007 Red Hat, Inc.
5*4882a593Smuzhiyun * Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Created by David Woodhouse <dwmw2@infradead.org>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * For licensing information, see the file 'LICENCE' in this directory.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/pagemap.h>
19*4882a593Smuzhiyun #include <linux/crc32.h>
20*4882a593Smuzhiyun #include <linux/compiler.h>
21*4882a593Smuzhiyun #include <linux/stat.h>
22*4882a593Smuzhiyun #include "nodelist.h"
23*4882a593Smuzhiyun #include "compr.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
26*4882a593Smuzhiyun struct jffs2_inode_cache *ic,
27*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw);
28*4882a593Smuzhiyun static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
29*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dnode *fd);
30*4882a593Smuzhiyun static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
31*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
32*4882a593Smuzhiyun static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
33*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dirent *fd);
34*4882a593Smuzhiyun static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
35*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
36*4882a593Smuzhiyun uint32_t start, uint32_t end);
37*4882a593Smuzhiyun static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
38*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
39*4882a593Smuzhiyun uint32_t start, uint32_t end);
40*4882a593Smuzhiyun static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
41*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* Called with erase_completion_lock held */
jffs2_find_gc_block(struct jffs2_sb_info * c)44*4882a593Smuzhiyun static struct jffs2_eraseblock *jffs2_find_gc_block(struct jffs2_sb_info *c)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun struct jffs2_eraseblock *ret;
47*4882a593Smuzhiyun struct list_head *nextlist = NULL;
48*4882a593Smuzhiyun int n = jiffies % 128;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* Pick an eraseblock to garbage collect next. This is where we'll
51*4882a593Smuzhiyun put the clever wear-levelling algorithms. Eventually. */
52*4882a593Smuzhiyun /* We possibly want to favour the dirtier blocks more when the
53*4882a593Smuzhiyun number of free blocks is low. */
54*4882a593Smuzhiyun again:
55*4882a593Smuzhiyun if (!list_empty(&c->bad_used_list) && c->nr_free_blocks > c->resv_blocks_gcbad) {
56*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from bad_used_list to GC next\n");
57*4882a593Smuzhiyun nextlist = &c->bad_used_list;
58*4882a593Smuzhiyun } else if (n < 50 && !list_empty(&c->erasable_list)) {
59*4882a593Smuzhiyun /* Note that most of them will have gone directly to be erased.
60*4882a593Smuzhiyun So don't favour the erasable_list _too_ much. */
61*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from erasable_list to GC next\n");
62*4882a593Smuzhiyun nextlist = &c->erasable_list;
63*4882a593Smuzhiyun } else if (n < 110 && !list_empty(&c->very_dirty_list)) {
64*4882a593Smuzhiyun /* Most of the time, pick one off the very_dirty list */
65*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from very_dirty_list to GC next\n");
66*4882a593Smuzhiyun nextlist = &c->very_dirty_list;
67*4882a593Smuzhiyun } else if (n < 126 && !list_empty(&c->dirty_list)) {
68*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from dirty_list to GC next\n");
69*4882a593Smuzhiyun nextlist = &c->dirty_list;
70*4882a593Smuzhiyun } else if (!list_empty(&c->clean_list)) {
71*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from clean_list to GC next\n");
72*4882a593Smuzhiyun nextlist = &c->clean_list;
73*4882a593Smuzhiyun } else if (!list_empty(&c->dirty_list)) {
74*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from dirty_list to GC next (clean_list was empty)\n");
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun nextlist = &c->dirty_list;
77*4882a593Smuzhiyun } else if (!list_empty(&c->very_dirty_list)) {
78*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n");
79*4882a593Smuzhiyun nextlist = &c->very_dirty_list;
80*4882a593Smuzhiyun } else if (!list_empty(&c->erasable_list)) {
81*4882a593Smuzhiyun jffs2_dbg(1, "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n");
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun nextlist = &c->erasable_list;
84*4882a593Smuzhiyun } else if (!list_empty(&c->erasable_pending_wbuf_list)) {
85*4882a593Smuzhiyun /* There are blocks are wating for the wbuf sync */
86*4882a593Smuzhiyun jffs2_dbg(1, "Synching wbuf in order to reuse erasable_pending_wbuf_list blocks\n");
87*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
88*4882a593Smuzhiyun jffs2_flush_wbuf_pad(c);
89*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
90*4882a593Smuzhiyun goto again;
91*4882a593Smuzhiyun } else {
92*4882a593Smuzhiyun /* Eep. All were empty */
93*4882a593Smuzhiyun jffs2_dbg(1, "No clean, dirty _or_ erasable blocks to GC from! Where are they all?\n");
94*4882a593Smuzhiyun return NULL;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
98*4882a593Smuzhiyun list_del(&ret->list);
99*4882a593Smuzhiyun c->gcblock = ret;
100*4882a593Smuzhiyun ret->gc_node = ret->first_node;
101*4882a593Smuzhiyun if (!ret->gc_node) {
102*4882a593Smuzhiyun pr_warn("Eep. ret->gc_node for block at 0x%08x is NULL\n",
103*4882a593Smuzhiyun ret->offset);
104*4882a593Smuzhiyun BUG();
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Have we accidentally picked a clean block with wasted space ? */
108*4882a593Smuzhiyun if (ret->wasted_size) {
109*4882a593Smuzhiyun jffs2_dbg(1, "Converting wasted_size %08x to dirty_size\n",
110*4882a593Smuzhiyun ret->wasted_size);
111*4882a593Smuzhiyun ret->dirty_size += ret->wasted_size;
112*4882a593Smuzhiyun c->wasted_size -= ret->wasted_size;
113*4882a593Smuzhiyun c->dirty_size += ret->wasted_size;
114*4882a593Smuzhiyun ret->wasted_size = 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun return ret;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* jffs2_garbage_collect_pass
121*4882a593Smuzhiyun * Make a single attempt to progress GC. Move one node, and possibly
122*4882a593Smuzhiyun * start erasing one eraseblock.
123*4882a593Smuzhiyun */
jffs2_garbage_collect_pass(struct jffs2_sb_info * c)124*4882a593Smuzhiyun int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct jffs2_inode_info *f;
127*4882a593Smuzhiyun struct jffs2_inode_cache *ic;
128*4882a593Smuzhiyun struct jffs2_eraseblock *jeb;
129*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw;
130*4882a593Smuzhiyun uint32_t gcblock_dirty;
131*4882a593Smuzhiyun int ret = 0, inum, nlink;
132*4882a593Smuzhiyun int xattr = 0;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (mutex_lock_interruptible(&c->alloc_sem))
135*4882a593Smuzhiyun return -EINTR;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun for (;;) {
139*4882a593Smuzhiyun /* We can't start doing GC until we've finished checking
140*4882a593Smuzhiyun the node CRCs etc. */
141*4882a593Smuzhiyun int bucket, want_ino;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
144*4882a593Smuzhiyun if (!c->unchecked_size)
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (!xattr)
149*4882a593Smuzhiyun xattr = jffs2_verify_xattr(c);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun spin_lock(&c->inocache_lock);
152*4882a593Smuzhiyun /* Instead of doing the inodes in numeric order, doing a lookup
153*4882a593Smuzhiyun * in the hash for each possible number, just walk the hash
154*4882a593Smuzhiyun * buckets of *existing* inodes. This means that we process
155*4882a593Smuzhiyun * them out-of-order, but it can be a lot faster if there's
156*4882a593Smuzhiyun * a sparse inode# space. Which there often is. */
157*4882a593Smuzhiyun want_ino = c->check_ino;
158*4882a593Smuzhiyun for (bucket = c->check_ino % c->inocache_hashsize ; bucket < c->inocache_hashsize; bucket++) {
159*4882a593Smuzhiyun for (ic = c->inocache_list[bucket]; ic; ic = ic->next) {
160*4882a593Smuzhiyun if (ic->ino < want_ino)
161*4882a593Smuzhiyun continue;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (ic->state != INO_STATE_CHECKEDABSENT &&
164*4882a593Smuzhiyun ic->state != INO_STATE_PRESENT)
165*4882a593Smuzhiyun goto got_next; /* with inocache_lock held */
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun jffs2_dbg(1, "Skipping ino #%u already checked\n",
168*4882a593Smuzhiyun ic->ino);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun want_ino = 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* Point c->check_ino past the end of the last bucket. */
174*4882a593Smuzhiyun c->check_ino = ((c->highest_ino + c->inocache_hashsize + 1) &
175*4882a593Smuzhiyun ~c->inocache_hashsize) - 1;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun pr_crit("Checked all inodes but still 0x%x bytes of unchecked space?\n",
180*4882a593Smuzhiyun c->unchecked_size);
181*4882a593Smuzhiyun jffs2_dbg_dump_block_lists_nolock(c);
182*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
183*4882a593Smuzhiyun return -ENOSPC;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun got_next:
186*4882a593Smuzhiyun /* For next time round the loop, we want c->checked_ino to indicate
187*4882a593Smuzhiyun * the *next* one we want to check. And since we're walking the
188*4882a593Smuzhiyun * buckets rather than doing it sequentially, it's: */
189*4882a593Smuzhiyun c->check_ino = ic->ino + c->inocache_hashsize;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (!ic->pino_nlink) {
192*4882a593Smuzhiyun jffs2_dbg(1, "Skipping check of ino #%d with nlink/pino zero\n",
193*4882a593Smuzhiyun ic->ino);
194*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
195*4882a593Smuzhiyun jffs2_xattr_delete_inode(c, ic);
196*4882a593Smuzhiyun continue;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun switch(ic->state) {
199*4882a593Smuzhiyun case INO_STATE_CHECKEDABSENT:
200*4882a593Smuzhiyun case INO_STATE_PRESENT:
201*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
202*4882a593Smuzhiyun continue;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun case INO_STATE_GC:
205*4882a593Smuzhiyun case INO_STATE_CHECKING:
206*4882a593Smuzhiyun pr_warn("Inode #%u is in state %d during CRC check phase!\n",
207*4882a593Smuzhiyun ic->ino, ic->state);
208*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
209*4882a593Smuzhiyun BUG();
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun case INO_STATE_READING:
212*4882a593Smuzhiyun /* We need to wait for it to finish, lest we move on
213*4882a593Smuzhiyun and trigger the BUG() above while we haven't yet
214*4882a593Smuzhiyun finished checking all its nodes */
215*4882a593Smuzhiyun jffs2_dbg(1, "Waiting for ino #%u to finish reading\n",
216*4882a593Smuzhiyun ic->ino);
217*4882a593Smuzhiyun /* We need to come back again for the _same_ inode. We've
218*4882a593Smuzhiyun made no progress in this case, but that should be OK */
219*4882a593Smuzhiyun c->check_ino = ic->ino;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
222*4882a593Smuzhiyun sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
223*4882a593Smuzhiyun return 0;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun default:
226*4882a593Smuzhiyun BUG();
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun case INO_STATE_UNCHECKED:
229*4882a593Smuzhiyun ;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun ic->state = INO_STATE_CHECKING;
232*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun jffs2_dbg(1, "%s(): triggering inode scan of ino#%u\n",
235*4882a593Smuzhiyun __func__, ic->ino);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun ret = jffs2_do_crccheck_inode(c, ic);
238*4882a593Smuzhiyun if (ret)
239*4882a593Smuzhiyun pr_warn("Returned error for crccheck of ino #%u. Expect badness...\n",
240*4882a593Smuzhiyun ic->ino);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT);
243*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
244*4882a593Smuzhiyun return ret;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* If there are any blocks which need erasing, erase them now */
248*4882a593Smuzhiyun if (!list_empty(&c->erase_complete_list) ||
249*4882a593Smuzhiyun !list_empty(&c->erase_pending_list)) {
250*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
251*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
252*4882a593Smuzhiyun jffs2_dbg(1, "%s(): erasing pending blocks\n", __func__);
253*4882a593Smuzhiyun if (jffs2_erase_pending_blocks(c, 1))
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun jffs2_dbg(1, "No progress from erasing block; doing GC anyway\n");
257*4882a593Smuzhiyun mutex_lock(&c->alloc_sem);
258*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* First, work out which block we're garbage-collecting */
262*4882a593Smuzhiyun jeb = c->gcblock;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (!jeb)
265*4882a593Smuzhiyun jeb = jffs2_find_gc_block(c);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (!jeb) {
268*4882a593Smuzhiyun /* Couldn't find a free block. But maybe we can just erase one and make 'progress'? */
269*4882a593Smuzhiyun if (c->nr_erasing_blocks) {
270*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
271*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
272*4882a593Smuzhiyun return -EAGAIN;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun jffs2_dbg(1, "Couldn't find erase block to garbage collect!\n");
275*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
276*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
277*4882a593Smuzhiyun return -EIO;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun jffs2_dbg(1, "GC from block %08x, used_size %08x, dirty_size %08x, free_size %08x\n",
281*4882a593Smuzhiyun jeb->offset, jeb->used_size, jeb->dirty_size, jeb->free_size);
282*4882a593Smuzhiyun D1(if (c->nextblock)
283*4882a593Smuzhiyun printk(KERN_DEBUG "Nextblock at %08x, used_size %08x, dirty_size %08x, wasted_size %08x, free_size %08x\n", c->nextblock->offset, c->nextblock->used_size, c->nextblock->dirty_size, c->nextblock->wasted_size, c->nextblock->free_size));
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun if (!jeb->used_size) {
286*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
287*4882a593Smuzhiyun goto eraseit;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun raw = jeb->gc_node;
291*4882a593Smuzhiyun gcblock_dirty = jeb->dirty_size;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun while(ref_obsolete(raw)) {
294*4882a593Smuzhiyun jffs2_dbg(1, "Node at 0x%08x is obsolete... skipping\n",
295*4882a593Smuzhiyun ref_offset(raw));
296*4882a593Smuzhiyun raw = ref_next(raw);
297*4882a593Smuzhiyun if (unlikely(!raw)) {
298*4882a593Smuzhiyun pr_warn("eep. End of raw list while still supposedly nodes to GC\n");
299*4882a593Smuzhiyun pr_warn("erase block at 0x%08x. free_size 0x%08x, dirty_size 0x%08x, used_size 0x%08x\n",
300*4882a593Smuzhiyun jeb->offset, jeb->free_size,
301*4882a593Smuzhiyun jeb->dirty_size, jeb->used_size);
302*4882a593Smuzhiyun jeb->gc_node = raw;
303*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
304*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
305*4882a593Smuzhiyun BUG();
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun jeb->gc_node = raw;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun jffs2_dbg(1, "Going to garbage collect node at 0x%08x\n",
311*4882a593Smuzhiyun ref_offset(raw));
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (!raw->next_in_ino) {
314*4882a593Smuzhiyun /* Inode-less node. Clean marker, snapshot or something like that */
315*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
316*4882a593Smuzhiyun if (ref_flags(raw) == REF_PRISTINE) {
317*4882a593Smuzhiyun /* It's an unknown node with JFFS2_FEATURE_RWCOMPAT_COPY */
318*4882a593Smuzhiyun jffs2_garbage_collect_pristine(c, NULL, raw);
319*4882a593Smuzhiyun } else {
320*4882a593Smuzhiyun /* Just mark it obsolete */
321*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, raw);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
324*4882a593Smuzhiyun goto eraseit_lock;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun ic = jffs2_raw_ref_to_ic(raw);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_XATTR
330*4882a593Smuzhiyun /* When 'ic' refers xattr_datum/xattr_ref, this node is GCed as xattr.
331*4882a593Smuzhiyun * We can decide whether this node is inode or xattr by ic->class. */
332*4882a593Smuzhiyun if (ic->class == RAWNODE_CLASS_XATTR_DATUM
333*4882a593Smuzhiyun || ic->class == RAWNODE_CLASS_XATTR_REF) {
334*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
337*4882a593Smuzhiyun ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
338*4882a593Smuzhiyun } else {
339*4882a593Smuzhiyun ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun goto test_gcnode;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun #endif
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* We need to hold the inocache. Either the erase_completion_lock or
346*4882a593Smuzhiyun the inocache_lock are sufficient; we trade down since the inocache_lock
347*4882a593Smuzhiyun causes less contention. */
348*4882a593Smuzhiyun spin_lock(&c->inocache_lock);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun jffs2_dbg(1, "%s(): collecting from block @0x%08x. Node @0x%08x(%d), ino #%u\n",
353*4882a593Smuzhiyun __func__, jeb->offset, ref_offset(raw), ref_flags(raw),
354*4882a593Smuzhiyun ic->ino);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* Three possibilities:
357*4882a593Smuzhiyun 1. Inode is already in-core. We must iget it and do proper
358*4882a593Smuzhiyun updating to its fragtree, etc.
359*4882a593Smuzhiyun 2. Inode is not in-core, node is REF_PRISTINE. We lock the
360*4882a593Smuzhiyun inocache to prevent a read_inode(), copy the node intact.
361*4882a593Smuzhiyun 3. Inode is not in-core, node is not pristine. We must iget()
362*4882a593Smuzhiyun and take the slow path.
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun switch(ic->state) {
366*4882a593Smuzhiyun case INO_STATE_CHECKEDABSENT:
367*4882a593Smuzhiyun /* It's been checked, but it's not currently in-core.
368*4882a593Smuzhiyun We can just copy any pristine nodes, but have
369*4882a593Smuzhiyun to prevent anyone else from doing read_inode() while
370*4882a593Smuzhiyun we're at it, so we set the state accordingly */
371*4882a593Smuzhiyun if (ref_flags(raw) == REF_PRISTINE)
372*4882a593Smuzhiyun ic->state = INO_STATE_GC;
373*4882a593Smuzhiyun else {
374*4882a593Smuzhiyun jffs2_dbg(1, "Ino #%u is absent but node not REF_PRISTINE. Reading.\n",
375*4882a593Smuzhiyun ic->ino);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun break;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun case INO_STATE_PRESENT:
380*4882a593Smuzhiyun /* It's in-core. GC must iget() it. */
381*4882a593Smuzhiyun break;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun case INO_STATE_UNCHECKED:
384*4882a593Smuzhiyun case INO_STATE_CHECKING:
385*4882a593Smuzhiyun case INO_STATE_GC:
386*4882a593Smuzhiyun /* Should never happen. We should have finished checking
387*4882a593Smuzhiyun by the time we actually start doing any GC, and since
388*4882a593Smuzhiyun we're holding the alloc_sem, no other garbage collection
389*4882a593Smuzhiyun can happen.
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun pr_crit("Inode #%u already in state %d in jffs2_garbage_collect_pass()!\n",
392*4882a593Smuzhiyun ic->ino, ic->state);
393*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
394*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
395*4882a593Smuzhiyun BUG();
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun case INO_STATE_READING:
398*4882a593Smuzhiyun /* Someone's currently trying to read it. We must wait for
399*4882a593Smuzhiyun them to finish and then go through the full iget() route
400*4882a593Smuzhiyun to do the GC. However, sometimes read_inode() needs to get
401*4882a593Smuzhiyun the alloc_sem() (for marking nodes invalid) so we must
402*4882a593Smuzhiyun drop the alloc_sem before sleeping. */
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
405*4882a593Smuzhiyun jffs2_dbg(1, "%s(): waiting for ino #%u in state %d\n",
406*4882a593Smuzhiyun __func__, ic->ino, ic->state);
407*4882a593Smuzhiyun sleep_on_spinunlock(&c->inocache_wq, &c->inocache_lock);
408*4882a593Smuzhiyun /* And because we dropped the alloc_sem we must start again from the
409*4882a593Smuzhiyun beginning. Ponder chance of livelock here -- we're returning success
410*4882a593Smuzhiyun without actually making any progress.
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun Q: What are the chances that the inode is back in INO_STATE_READING
413*4882a593Smuzhiyun again by the time we next enter this function? And that this happens
414*4882a593Smuzhiyun enough times to cause a real delay?
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun A: Small enough that I don't care :)
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* OK. Now if the inode is in state INO_STATE_GC, we are going to copy the
422*4882a593Smuzhiyun node intact, and we don't have to muck about with the fragtree etc.
423*4882a593Smuzhiyun because we know it's not in-core. If it _was_ in-core, we go through
424*4882a593Smuzhiyun all the iget() crap anyway */
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (ic->state == INO_STATE_GC) {
427*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun ret = jffs2_garbage_collect_pristine(c, ic, raw);
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun spin_lock(&c->inocache_lock);
432*4882a593Smuzhiyun ic->state = INO_STATE_CHECKEDABSENT;
433*4882a593Smuzhiyun wake_up(&c->inocache_wq);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (ret != -EBADFD) {
436*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
437*4882a593Smuzhiyun goto test_gcnode;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Fall through if it wanted us to, with inocache_lock held */
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* Prevent the fairly unlikely race where the gcblock is
444*4882a593Smuzhiyun entirely obsoleted by the final close of a file which had
445*4882a593Smuzhiyun the only valid nodes in the block, followed by erasure,
446*4882a593Smuzhiyun followed by freeing of the ic because the erased block(s)
447*4882a593Smuzhiyun held _all_ the nodes of that inode.... never been seen but
448*4882a593Smuzhiyun it's vaguely possible. */
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun inum = ic->ino;
451*4882a593Smuzhiyun nlink = ic->pino_nlink;
452*4882a593Smuzhiyun spin_unlock(&c->inocache_lock);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun f = jffs2_gc_fetch_inode(c, inum, !nlink);
455*4882a593Smuzhiyun if (IS_ERR(f)) {
456*4882a593Smuzhiyun ret = PTR_ERR(f);
457*4882a593Smuzhiyun goto release_sem;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun if (!f) {
460*4882a593Smuzhiyun ret = 0;
461*4882a593Smuzhiyun goto release_sem;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun ret = jffs2_garbage_collect_live(c, jeb, raw, f);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun jffs2_gc_release_inode(c, f);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun test_gcnode:
469*4882a593Smuzhiyun if (jeb->dirty_size == gcblock_dirty && !ref_obsolete(jeb->gc_node)) {
470*4882a593Smuzhiyun /* Eep. This really should never happen. GC is broken */
471*4882a593Smuzhiyun pr_err("Error garbage collecting node at %08x!\n",
472*4882a593Smuzhiyun ref_offset(jeb->gc_node));
473*4882a593Smuzhiyun ret = -ENOSPC;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun release_sem:
476*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun eraseit_lock:
479*4882a593Smuzhiyun /* If we've finished this block, start it erasing */
480*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun eraseit:
483*4882a593Smuzhiyun if (c->gcblock && !c->gcblock->used_size) {
484*4882a593Smuzhiyun jffs2_dbg(1, "Block at 0x%08x completely obsoleted by GC. Moving to erase_pending_list\n",
485*4882a593Smuzhiyun c->gcblock->offset);
486*4882a593Smuzhiyun /* We're GC'ing an empty block? */
487*4882a593Smuzhiyun list_add_tail(&c->gcblock->list, &c->erase_pending_list);
488*4882a593Smuzhiyun c->gcblock = NULL;
489*4882a593Smuzhiyun c->nr_erasing_blocks++;
490*4882a593Smuzhiyun jffs2_garbage_collect_trigger(c);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return ret;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
jffs2_garbage_collect_live(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_raw_node_ref * raw,struct jffs2_inode_info * f)497*4882a593Smuzhiyun static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
498*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw, struct jffs2_inode_info *f)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct jffs2_node_frag *frag;
501*4882a593Smuzhiyun struct jffs2_full_dnode *fn = NULL;
502*4882a593Smuzhiyun struct jffs2_full_dirent *fd;
503*4882a593Smuzhiyun uint32_t start = 0, end = 0, nrfrags = 0;
504*4882a593Smuzhiyun int ret = 0;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun mutex_lock(&f->sem);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun /* Now we have the lock for this inode. Check that it's still the one at the head
509*4882a593Smuzhiyun of the list. */
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if (c->gcblock != jeb) {
514*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
515*4882a593Smuzhiyun jffs2_dbg(1, "GC block is no longer gcblock. Restart\n");
516*4882a593Smuzhiyun goto upnout;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun if (ref_obsolete(raw)) {
519*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
520*4882a593Smuzhiyun jffs2_dbg(1, "node to be GC'd was obsoleted in the meantime.\n");
521*4882a593Smuzhiyun /* They'll call again */
522*4882a593Smuzhiyun goto upnout;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun /* OK. Looks safe. And nobody can get us now because we have the semaphore. Move the block */
527*4882a593Smuzhiyun if (f->metadata && f->metadata->raw == raw) {
528*4882a593Smuzhiyun fn = f->metadata;
529*4882a593Smuzhiyun ret = jffs2_garbage_collect_metadata(c, jeb, f, fn);
530*4882a593Smuzhiyun goto upnout;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* FIXME. Read node and do lookup? */
534*4882a593Smuzhiyun for (frag = frag_first(&f->fragtree); frag; frag = frag_next(frag)) {
535*4882a593Smuzhiyun if (frag->node && frag->node->raw == raw) {
536*4882a593Smuzhiyun fn = frag->node;
537*4882a593Smuzhiyun end = frag->ofs + frag->size;
538*4882a593Smuzhiyun if (!nrfrags++)
539*4882a593Smuzhiyun start = frag->ofs;
540*4882a593Smuzhiyun if (nrfrags == frag->node->frags)
541*4882a593Smuzhiyun break; /* We've found them all */
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun if (fn) {
545*4882a593Smuzhiyun if (ref_flags(raw) == REF_PRISTINE) {
546*4882a593Smuzhiyun ret = jffs2_garbage_collect_pristine(c, f->inocache, raw);
547*4882a593Smuzhiyun if (!ret) {
548*4882a593Smuzhiyun /* Urgh. Return it sensibly. */
549*4882a593Smuzhiyun frag->node->raw = f->inocache->nodes;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun if (ret != -EBADFD)
552*4882a593Smuzhiyun goto upnout;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun /* We found a datanode. Do the GC */
555*4882a593Smuzhiyun if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) {
556*4882a593Smuzhiyun /* It crosses a page boundary. Therefore, it must be a hole. */
557*4882a593Smuzhiyun ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end);
558*4882a593Smuzhiyun } else {
559*4882a593Smuzhiyun /* It could still be a hole. But we GC the page this way anyway */
560*4882a593Smuzhiyun ret = jffs2_garbage_collect_dnode(c, jeb, f, fn, start, end);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun goto upnout;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Wasn't a dnode. Try dirent */
566*4882a593Smuzhiyun for (fd = f->dents; fd; fd=fd->next) {
567*4882a593Smuzhiyun if (fd->raw == raw)
568*4882a593Smuzhiyun break;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun if (fd && fd->ino) {
572*4882a593Smuzhiyun ret = jffs2_garbage_collect_dirent(c, jeb, f, fd);
573*4882a593Smuzhiyun } else if (fd) {
574*4882a593Smuzhiyun ret = jffs2_garbage_collect_deletion_dirent(c, jeb, f, fd);
575*4882a593Smuzhiyun } else {
576*4882a593Smuzhiyun pr_warn("Raw node at 0x%08x wasn't in node lists for ino #%u\n",
577*4882a593Smuzhiyun ref_offset(raw), f->inocache->ino);
578*4882a593Smuzhiyun if (ref_obsolete(raw)) {
579*4882a593Smuzhiyun pr_warn("But it's obsolete so we don't mind too much\n");
580*4882a593Smuzhiyun } else {
581*4882a593Smuzhiyun jffs2_dbg_dump_node(c, ref_offset(raw));
582*4882a593Smuzhiyun BUG();
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun upnout:
586*4882a593Smuzhiyun mutex_unlock(&f->sem);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return ret;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
jffs2_garbage_collect_pristine(struct jffs2_sb_info * c,struct jffs2_inode_cache * ic,struct jffs2_raw_node_ref * raw)591*4882a593Smuzhiyun static int jffs2_garbage_collect_pristine(struct jffs2_sb_info *c,
592*4882a593Smuzhiyun struct jffs2_inode_cache *ic,
593*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun union jffs2_node_union *node;
596*4882a593Smuzhiyun size_t retlen;
597*4882a593Smuzhiyun int ret;
598*4882a593Smuzhiyun uint32_t phys_ofs, alloclen;
599*4882a593Smuzhiyun uint32_t crc, rawlen;
600*4882a593Smuzhiyun int retried = 0;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun jffs2_dbg(1, "Going to GC REF_PRISTINE node at 0x%08x\n",
603*4882a593Smuzhiyun ref_offset(raw));
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun alloclen = rawlen = ref_totlen(c, c->gcblock, raw);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* Ask for a small amount of space (or the totlen if smaller) because we
608*4882a593Smuzhiyun don't want to force wastage of the end of a block if splitting would
609*4882a593Smuzhiyun work. */
610*4882a593Smuzhiyun if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN)
611*4882a593Smuzhiyun alloclen = sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun ret = jffs2_reserve_space_gc(c, alloclen, &alloclen, rawlen);
614*4882a593Smuzhiyun /* 'rawlen' is not the exact summary size; it is only an upper estimation */
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (ret)
617*4882a593Smuzhiyun return ret;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun if (alloclen < rawlen) {
620*4882a593Smuzhiyun /* Doesn't fit untouched. We'll go the old route and split it */
621*4882a593Smuzhiyun return -EBADFD;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun node = kmalloc(rawlen, GFP_KERNEL);
625*4882a593Smuzhiyun if (!node)
626*4882a593Smuzhiyun return -ENOMEM;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)node);
629*4882a593Smuzhiyun if (!ret && retlen != rawlen)
630*4882a593Smuzhiyun ret = -EIO;
631*4882a593Smuzhiyun if (ret)
632*4882a593Smuzhiyun goto out_node;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun crc = crc32(0, node, sizeof(struct jffs2_unknown_node)-4);
635*4882a593Smuzhiyun if (je32_to_cpu(node->u.hdr_crc) != crc) {
636*4882a593Smuzhiyun pr_warn("Header CRC failed on REF_PRISTINE node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
637*4882a593Smuzhiyun ref_offset(raw), je32_to_cpu(node->u.hdr_crc), crc);
638*4882a593Smuzhiyun goto bail;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun switch(je16_to_cpu(node->u.nodetype)) {
642*4882a593Smuzhiyun case JFFS2_NODETYPE_INODE:
643*4882a593Smuzhiyun crc = crc32(0, node, sizeof(node->i)-8);
644*4882a593Smuzhiyun if (je32_to_cpu(node->i.node_crc) != crc) {
645*4882a593Smuzhiyun pr_warn("Node CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
646*4882a593Smuzhiyun ref_offset(raw), je32_to_cpu(node->i.node_crc),
647*4882a593Smuzhiyun crc);
648*4882a593Smuzhiyun goto bail;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if (je32_to_cpu(node->i.dsize)) {
652*4882a593Smuzhiyun crc = crc32(0, node->i.data, je32_to_cpu(node->i.csize));
653*4882a593Smuzhiyun if (je32_to_cpu(node->i.data_crc) != crc) {
654*4882a593Smuzhiyun pr_warn("Data CRC failed on REF_PRISTINE data node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
655*4882a593Smuzhiyun ref_offset(raw),
656*4882a593Smuzhiyun je32_to_cpu(node->i.data_crc), crc);
657*4882a593Smuzhiyun goto bail;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun break;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun case JFFS2_NODETYPE_DIRENT:
663*4882a593Smuzhiyun crc = crc32(0, node, sizeof(node->d)-8);
664*4882a593Smuzhiyun if (je32_to_cpu(node->d.node_crc) != crc) {
665*4882a593Smuzhiyun pr_warn("Node CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
666*4882a593Smuzhiyun ref_offset(raw),
667*4882a593Smuzhiyun je32_to_cpu(node->d.node_crc), crc);
668*4882a593Smuzhiyun goto bail;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (strnlen(node->d.name, node->d.nsize) != node->d.nsize) {
672*4882a593Smuzhiyun pr_warn("Name in dirent node at 0x%08x contains zeroes\n",
673*4882a593Smuzhiyun ref_offset(raw));
674*4882a593Smuzhiyun goto bail;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (node->d.nsize) {
678*4882a593Smuzhiyun crc = crc32(0, node->d.name, node->d.nsize);
679*4882a593Smuzhiyun if (je32_to_cpu(node->d.name_crc) != crc) {
680*4882a593Smuzhiyun pr_warn("Name CRC failed on REF_PRISTINE dirent node at 0x%08x: Read 0x%08x, calculated 0x%08x\n",
681*4882a593Smuzhiyun ref_offset(raw),
682*4882a593Smuzhiyun je32_to_cpu(node->d.name_crc), crc);
683*4882a593Smuzhiyun goto bail;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun break;
687*4882a593Smuzhiyun default:
688*4882a593Smuzhiyun /* If it's inode-less, we don't _know_ what it is. Just copy it intact */
689*4882a593Smuzhiyun if (ic) {
690*4882a593Smuzhiyun pr_warn("Unknown node type for REF_PRISTINE node at 0x%08x: 0x%04x\n",
691*4882a593Smuzhiyun ref_offset(raw), je16_to_cpu(node->u.nodetype));
692*4882a593Smuzhiyun goto bail;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* OK, all the CRCs are good; this node can just be copied as-is. */
697*4882a593Smuzhiyun retry:
698*4882a593Smuzhiyun phys_ofs = write_ofs(c);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun ret = jffs2_flash_write(c, phys_ofs, rawlen, &retlen, (char *)node);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (ret || (retlen != rawlen)) {
703*4882a593Smuzhiyun pr_notice("Write of %d bytes at 0x%08x failed. returned %d, retlen %zd\n",
704*4882a593Smuzhiyun rawlen, phys_ofs, ret, retlen);
705*4882a593Smuzhiyun if (retlen) {
706*4882a593Smuzhiyun jffs2_add_physical_node_ref(c, phys_ofs | REF_OBSOLETE, rawlen, NULL);
707*4882a593Smuzhiyun } else {
708*4882a593Smuzhiyun pr_notice("Not marking the space at 0x%08x as dirty because the flash driver returned retlen zero\n",
709*4882a593Smuzhiyun phys_ofs);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun if (!retried) {
712*4882a593Smuzhiyun /* Try to reallocate space and retry */
713*4882a593Smuzhiyun uint32_t dummy;
714*4882a593Smuzhiyun struct jffs2_eraseblock *jeb = &c->blocks[phys_ofs / c->sector_size];
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun retried = 1;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun jffs2_dbg(1, "Retrying failed write of REF_PRISTINE node.\n");
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun jffs2_dbg_acct_sanity_check(c,jeb);
721*4882a593Smuzhiyun jffs2_dbg_acct_paranoia_check(c, jeb);
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun ret = jffs2_reserve_space_gc(c, rawlen, &dummy, rawlen);
724*4882a593Smuzhiyun /* this is not the exact summary size of it,
725*4882a593Smuzhiyun it is only an upper estimation */
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (!ret) {
728*4882a593Smuzhiyun jffs2_dbg(1, "Allocated space at 0x%08x to retry failed write.\n",
729*4882a593Smuzhiyun phys_ofs);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun jffs2_dbg_acct_sanity_check(c,jeb);
732*4882a593Smuzhiyun jffs2_dbg_acct_paranoia_check(c, jeb);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun goto retry;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun jffs2_dbg(1, "Failed to allocate space to retry failed write: %d!\n",
737*4882a593Smuzhiyun ret);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (!ret)
741*4882a593Smuzhiyun ret = -EIO;
742*4882a593Smuzhiyun goto out_node;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, raw);
747*4882a593Smuzhiyun jffs2_dbg(1, "WHEEE! GC REF_PRISTINE node at 0x%08x succeeded\n",
748*4882a593Smuzhiyun ref_offset(raw));
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun out_node:
751*4882a593Smuzhiyun kfree(node);
752*4882a593Smuzhiyun return ret;
753*4882a593Smuzhiyun bail:
754*4882a593Smuzhiyun ret = -EBADFD;
755*4882a593Smuzhiyun goto out_node;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
jffs2_garbage_collect_metadata(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_inode_info * f,struct jffs2_full_dnode * fn)758*4882a593Smuzhiyun static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
759*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dnode *fn)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun struct jffs2_full_dnode *new_fn;
762*4882a593Smuzhiyun struct jffs2_raw_inode ri;
763*4882a593Smuzhiyun struct jffs2_node_frag *last_frag;
764*4882a593Smuzhiyun union jffs2_device_node dev;
765*4882a593Smuzhiyun char *mdata = NULL;
766*4882a593Smuzhiyun int mdatalen = 0;
767*4882a593Smuzhiyun uint32_t alloclen, ilen;
768*4882a593Smuzhiyun int ret;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (S_ISBLK(JFFS2_F_I_MODE(f)) ||
771*4882a593Smuzhiyun S_ISCHR(JFFS2_F_I_MODE(f)) ) {
772*4882a593Smuzhiyun /* For these, we don't actually need to read the old node */
773*4882a593Smuzhiyun mdatalen = jffs2_encode_dev(&dev, JFFS2_F_I_RDEV(f));
774*4882a593Smuzhiyun mdata = (char *)&dev;
775*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Writing %d bytes of kdev_t\n",
776*4882a593Smuzhiyun __func__, mdatalen);
777*4882a593Smuzhiyun } else if (S_ISLNK(JFFS2_F_I_MODE(f))) {
778*4882a593Smuzhiyun mdatalen = fn->size;
779*4882a593Smuzhiyun mdata = kmalloc(fn->size, GFP_KERNEL);
780*4882a593Smuzhiyun if (!mdata) {
781*4882a593Smuzhiyun pr_warn("kmalloc of mdata failed in jffs2_garbage_collect_metadata()\n");
782*4882a593Smuzhiyun return -ENOMEM;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun ret = jffs2_read_dnode(c, f, fn, mdata, 0, mdatalen);
785*4882a593Smuzhiyun if (ret) {
786*4882a593Smuzhiyun pr_warn("read of old metadata failed in jffs2_garbage_collect_metadata(): %d\n",
787*4882a593Smuzhiyun ret);
788*4882a593Smuzhiyun kfree(mdata);
789*4882a593Smuzhiyun return ret;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Writing %d bites of symlink target\n",
792*4882a593Smuzhiyun __func__, mdatalen);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun ret = jffs2_reserve_space_gc(c, sizeof(ri) + mdatalen, &alloclen,
797*4882a593Smuzhiyun JFFS2_SUMMARY_INODE_SIZE);
798*4882a593Smuzhiyun if (ret) {
799*4882a593Smuzhiyun pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_metadata failed: %d\n",
800*4882a593Smuzhiyun sizeof(ri) + mdatalen, ret);
801*4882a593Smuzhiyun goto out;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun last_frag = frag_last(&f->fragtree);
805*4882a593Smuzhiyun if (last_frag)
806*4882a593Smuzhiyun /* Fetch the inode length from the fragtree rather then
807*4882a593Smuzhiyun * from i_size since i_size may have not been updated yet */
808*4882a593Smuzhiyun ilen = last_frag->ofs + last_frag->size;
809*4882a593Smuzhiyun else
810*4882a593Smuzhiyun ilen = JFFS2_F_I_SIZE(f);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun memset(&ri, 0, sizeof(ri));
813*4882a593Smuzhiyun ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
814*4882a593Smuzhiyun ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
815*4882a593Smuzhiyun ri.totlen = cpu_to_je32(sizeof(ri) + mdatalen);
816*4882a593Smuzhiyun ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun ri.ino = cpu_to_je32(f->inocache->ino);
819*4882a593Smuzhiyun ri.version = cpu_to_je32(++f->highest_version);
820*4882a593Smuzhiyun ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
821*4882a593Smuzhiyun ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
822*4882a593Smuzhiyun ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
823*4882a593Smuzhiyun ri.isize = cpu_to_je32(ilen);
824*4882a593Smuzhiyun ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
825*4882a593Smuzhiyun ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
826*4882a593Smuzhiyun ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
827*4882a593Smuzhiyun ri.offset = cpu_to_je32(0);
828*4882a593Smuzhiyun ri.csize = cpu_to_je32(mdatalen);
829*4882a593Smuzhiyun ri.dsize = cpu_to_je32(mdatalen);
830*4882a593Smuzhiyun ri.compr = JFFS2_COMPR_NONE;
831*4882a593Smuzhiyun ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
832*4882a593Smuzhiyun ri.data_crc = cpu_to_je32(crc32(0, mdata, mdatalen));
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun new_fn = jffs2_write_dnode(c, f, &ri, mdata, mdatalen, ALLOC_GC);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (IS_ERR(new_fn)) {
837*4882a593Smuzhiyun pr_warn("Error writing new dnode: %ld\n", PTR_ERR(new_fn));
838*4882a593Smuzhiyun ret = PTR_ERR(new_fn);
839*4882a593Smuzhiyun goto out;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, fn->raw);
842*4882a593Smuzhiyun jffs2_free_full_dnode(fn);
843*4882a593Smuzhiyun f->metadata = new_fn;
844*4882a593Smuzhiyun out:
845*4882a593Smuzhiyun if (S_ISLNK(JFFS2_F_I_MODE(f)))
846*4882a593Smuzhiyun kfree(mdata);
847*4882a593Smuzhiyun return ret;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun
jffs2_garbage_collect_dirent(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_inode_info * f,struct jffs2_full_dirent * fd)850*4882a593Smuzhiyun static int jffs2_garbage_collect_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
851*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun struct jffs2_full_dirent *new_fd;
854*4882a593Smuzhiyun struct jffs2_raw_dirent rd;
855*4882a593Smuzhiyun uint32_t alloclen;
856*4882a593Smuzhiyun int ret;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun rd.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
859*4882a593Smuzhiyun rd.nodetype = cpu_to_je16(JFFS2_NODETYPE_DIRENT);
860*4882a593Smuzhiyun rd.nsize = strlen(fd->name);
861*4882a593Smuzhiyun rd.totlen = cpu_to_je32(sizeof(rd) + rd.nsize);
862*4882a593Smuzhiyun rd.hdr_crc = cpu_to_je32(crc32(0, &rd, sizeof(struct jffs2_unknown_node)-4));
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun rd.pino = cpu_to_je32(f->inocache->ino);
865*4882a593Smuzhiyun rd.version = cpu_to_je32(++f->highest_version);
866*4882a593Smuzhiyun rd.ino = cpu_to_je32(fd->ino);
867*4882a593Smuzhiyun /* If the times on this inode were set by explicit utime() they can be different,
868*4882a593Smuzhiyun so refrain from splatting them. */
869*4882a593Smuzhiyun if (JFFS2_F_I_MTIME(f) == JFFS2_F_I_CTIME(f))
870*4882a593Smuzhiyun rd.mctime = cpu_to_je32(JFFS2_F_I_MTIME(f));
871*4882a593Smuzhiyun else
872*4882a593Smuzhiyun rd.mctime = cpu_to_je32(0);
873*4882a593Smuzhiyun rd.type = fd->type;
874*4882a593Smuzhiyun rd.node_crc = cpu_to_je32(crc32(0, &rd, sizeof(rd)-8));
875*4882a593Smuzhiyun rd.name_crc = cpu_to_je32(crc32(0, fd->name, rd.nsize));
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun ret = jffs2_reserve_space_gc(c, sizeof(rd)+rd.nsize, &alloclen,
878*4882a593Smuzhiyun JFFS2_SUMMARY_DIRENT_SIZE(rd.nsize));
879*4882a593Smuzhiyun if (ret) {
880*4882a593Smuzhiyun pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dirent failed: %d\n",
881*4882a593Smuzhiyun sizeof(rd)+rd.nsize, ret);
882*4882a593Smuzhiyun return ret;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun new_fd = jffs2_write_dirent(c, f, &rd, fd->name, rd.nsize, ALLOC_GC);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun if (IS_ERR(new_fd)) {
887*4882a593Smuzhiyun pr_warn("jffs2_write_dirent in garbage_collect_dirent failed: %ld\n",
888*4882a593Smuzhiyun PTR_ERR(new_fd));
889*4882a593Smuzhiyun return PTR_ERR(new_fd);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun jffs2_add_fd_to_list(c, new_fd, &f->dents);
892*4882a593Smuzhiyun return 0;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_inode_info * f,struct jffs2_full_dirent * fd)895*4882a593Smuzhiyun static int jffs2_garbage_collect_deletion_dirent(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
896*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dirent *fd)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun struct jffs2_full_dirent **fdp = &f->dents;
899*4882a593Smuzhiyun int found = 0;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /* On a medium where we can't actually mark nodes obsolete
902*4882a593Smuzhiyun pernamently, such as NAND flash, we need to work out
903*4882a593Smuzhiyun whether this deletion dirent is still needed to actively
904*4882a593Smuzhiyun delete a 'real' dirent with the same name that's still
905*4882a593Smuzhiyun somewhere else on the flash. */
906*4882a593Smuzhiyun if (!jffs2_can_mark_obsolete(c)) {
907*4882a593Smuzhiyun struct jffs2_raw_dirent *rd;
908*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw;
909*4882a593Smuzhiyun int ret;
910*4882a593Smuzhiyun size_t retlen;
911*4882a593Smuzhiyun int name_len = strlen(fd->name);
912*4882a593Smuzhiyun uint32_t name_crc = crc32(0, fd->name, name_len);
913*4882a593Smuzhiyun uint32_t rawlen = ref_totlen(c, jeb, fd->raw);
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun rd = kmalloc(rawlen, GFP_KERNEL);
916*4882a593Smuzhiyun if (!rd)
917*4882a593Smuzhiyun return -ENOMEM;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /* Prevent the erase code from nicking the obsolete node refs while
920*4882a593Smuzhiyun we're looking at them. I really don't like this extra lock but
921*4882a593Smuzhiyun can't see any alternative. Suggestions on a postcard to... */
922*4882a593Smuzhiyun mutex_lock(&c->erase_free_sem);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun for (raw = f->inocache->nodes; raw != (void *)f->inocache; raw = raw->next_in_ino) {
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun cond_resched();
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /* We only care about obsolete ones */
929*4882a593Smuzhiyun if (!(ref_obsolete(raw)))
930*4882a593Smuzhiyun continue;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun /* Any dirent with the same name is going to have the same length... */
933*4882a593Smuzhiyun if (ref_totlen(c, NULL, raw) != rawlen)
934*4882a593Smuzhiyun continue;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /* Doesn't matter if there's one in the same erase block. We're going to
937*4882a593Smuzhiyun delete it too at the same time. */
938*4882a593Smuzhiyun if (SECTOR_ADDR(raw->flash_offset) == SECTOR_ADDR(fd->raw->flash_offset))
939*4882a593Smuzhiyun continue;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun jffs2_dbg(1, "Check potential deletion dirent at %08x\n",
942*4882a593Smuzhiyun ref_offset(raw));
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun /* This is an obsolete node belonging to the same directory, and it's of the right
945*4882a593Smuzhiyun length. We need to take a closer look...*/
946*4882a593Smuzhiyun ret = jffs2_flash_read(c, ref_offset(raw), rawlen, &retlen, (char *)rd);
947*4882a593Smuzhiyun if (ret) {
948*4882a593Smuzhiyun pr_warn("%s(): Read error (%d) reading obsolete node at %08x\n",
949*4882a593Smuzhiyun __func__, ret, ref_offset(raw));
950*4882a593Smuzhiyun /* If we can't read it, we don't need to continue to obsolete it. Continue */
951*4882a593Smuzhiyun continue;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun if (retlen != rawlen) {
954*4882a593Smuzhiyun pr_warn("%s(): Short read (%zd not %u) reading header from obsolete node at %08x\n",
955*4882a593Smuzhiyun __func__, retlen, rawlen,
956*4882a593Smuzhiyun ref_offset(raw));
957*4882a593Smuzhiyun continue;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (je16_to_cpu(rd->nodetype) != JFFS2_NODETYPE_DIRENT)
961*4882a593Smuzhiyun continue;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /* If the name CRC doesn't match, skip */
964*4882a593Smuzhiyun if (je32_to_cpu(rd->name_crc) != name_crc)
965*4882a593Smuzhiyun continue;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /* If the name length doesn't match, or it's another deletion dirent, skip */
968*4882a593Smuzhiyun if (rd->nsize != name_len || !je32_to_cpu(rd->ino))
969*4882a593Smuzhiyun continue;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /* OK, check the actual name now */
972*4882a593Smuzhiyun if (memcmp(rd->name, fd->name, name_len))
973*4882a593Smuzhiyun continue;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /* OK. The name really does match. There really is still an older node on
976*4882a593Smuzhiyun the flash which our deletion dirent obsoletes. So we have to write out
977*4882a593Smuzhiyun a new deletion dirent to replace it */
978*4882a593Smuzhiyun mutex_unlock(&c->erase_free_sem);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun jffs2_dbg(1, "Deletion dirent at %08x still obsoletes real dirent \"%s\" at %08x for ino #%u\n",
981*4882a593Smuzhiyun ref_offset(fd->raw), fd->name,
982*4882a593Smuzhiyun ref_offset(raw), je32_to_cpu(rd->ino));
983*4882a593Smuzhiyun kfree(rd);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun return jffs2_garbage_collect_dirent(c, jeb, f, fd);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun mutex_unlock(&c->erase_free_sem);
989*4882a593Smuzhiyun kfree(rd);
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /* FIXME: If we're deleting a dirent which contains the current mtime and ctime,
993*4882a593Smuzhiyun we should update the metadata node with those times accordingly */
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /* No need for it any more. Just mark it obsolete and remove it from the list */
996*4882a593Smuzhiyun while (*fdp) {
997*4882a593Smuzhiyun if ((*fdp) == fd) {
998*4882a593Smuzhiyun found = 1;
999*4882a593Smuzhiyun *fdp = fd->next;
1000*4882a593Smuzhiyun break;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun fdp = &(*fdp)->next;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun if (!found) {
1005*4882a593Smuzhiyun pr_warn("Deletion dirent \"%s\" not found in list for ino #%u\n",
1006*4882a593Smuzhiyun fd->name, f->inocache->ino);
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, fd->raw);
1009*4882a593Smuzhiyun jffs2_free_full_dirent(fd);
1010*4882a593Smuzhiyun return 0;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
jffs2_garbage_collect_hole(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb,struct jffs2_inode_info * f,struct jffs2_full_dnode * fn,uint32_t start,uint32_t end)1013*4882a593Smuzhiyun static int jffs2_garbage_collect_hole(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb,
1014*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
1015*4882a593Smuzhiyun uint32_t start, uint32_t end)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun struct jffs2_raw_inode ri;
1018*4882a593Smuzhiyun struct jffs2_node_frag *frag;
1019*4882a593Smuzhiyun struct jffs2_full_dnode *new_fn;
1020*4882a593Smuzhiyun uint32_t alloclen, ilen;
1021*4882a593Smuzhiyun int ret;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun jffs2_dbg(1, "Writing replacement hole node for ino #%u from offset 0x%x to 0x%x\n",
1024*4882a593Smuzhiyun f->inocache->ino, start, end);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun memset(&ri, 0, sizeof(ri));
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun if(fn->frags > 1) {
1029*4882a593Smuzhiyun size_t readlen;
1030*4882a593Smuzhiyun uint32_t crc;
1031*4882a593Smuzhiyun /* It's partially obsoleted by a later write. So we have to
1032*4882a593Smuzhiyun write it out again with the _same_ version as before */
1033*4882a593Smuzhiyun ret = jffs2_flash_read(c, ref_offset(fn->raw), sizeof(ri), &readlen, (char *)&ri);
1034*4882a593Smuzhiyun if (readlen != sizeof(ri) || ret) {
1035*4882a593Smuzhiyun pr_warn("Node read failed in jffs2_garbage_collect_hole. Ret %d, retlen %zd. Data will be lost by writing new hole node\n",
1036*4882a593Smuzhiyun ret, readlen);
1037*4882a593Smuzhiyun goto fill;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun if (je16_to_cpu(ri.nodetype) != JFFS2_NODETYPE_INODE) {
1040*4882a593Smuzhiyun pr_warn("%s(): Node at 0x%08x had node type 0x%04x instead of JFFS2_NODETYPE_INODE(0x%04x)\n",
1041*4882a593Smuzhiyun __func__, ref_offset(fn->raw),
1042*4882a593Smuzhiyun je16_to_cpu(ri.nodetype), JFFS2_NODETYPE_INODE);
1043*4882a593Smuzhiyun return -EIO;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun if (je32_to_cpu(ri.totlen) != sizeof(ri)) {
1046*4882a593Smuzhiyun pr_warn("%s(): Node at 0x%08x had totlen 0x%x instead of expected 0x%zx\n",
1047*4882a593Smuzhiyun __func__, ref_offset(fn->raw),
1048*4882a593Smuzhiyun je32_to_cpu(ri.totlen), sizeof(ri));
1049*4882a593Smuzhiyun return -EIO;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun crc = crc32(0, &ri, sizeof(ri)-8);
1052*4882a593Smuzhiyun if (crc != je32_to_cpu(ri.node_crc)) {
1053*4882a593Smuzhiyun pr_warn("%s: Node at 0x%08x had CRC 0x%08x which doesn't match calculated CRC 0x%08x\n",
1054*4882a593Smuzhiyun __func__, ref_offset(fn->raw),
1055*4882a593Smuzhiyun je32_to_cpu(ri.node_crc), crc);
1056*4882a593Smuzhiyun /* FIXME: We could possibly deal with this by writing new holes for each frag */
1057*4882a593Smuzhiyun pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
1058*4882a593Smuzhiyun start, end, f->inocache->ino);
1059*4882a593Smuzhiyun goto fill;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun if (ri.compr != JFFS2_COMPR_ZERO) {
1062*4882a593Smuzhiyun pr_warn("%s(): Node 0x%08x wasn't a hole node!\n",
1063*4882a593Smuzhiyun __func__, ref_offset(fn->raw));
1064*4882a593Smuzhiyun pr_warn("Data in the range 0x%08x to 0x%08x of inode #%u will be lost\n",
1065*4882a593Smuzhiyun start, end, f->inocache->ino);
1066*4882a593Smuzhiyun goto fill;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun } else {
1069*4882a593Smuzhiyun fill:
1070*4882a593Smuzhiyun ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1071*4882a593Smuzhiyun ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
1072*4882a593Smuzhiyun ri.totlen = cpu_to_je32(sizeof(ri));
1073*4882a593Smuzhiyun ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun ri.ino = cpu_to_je32(f->inocache->ino);
1076*4882a593Smuzhiyun ri.version = cpu_to_je32(++f->highest_version);
1077*4882a593Smuzhiyun ri.offset = cpu_to_je32(start);
1078*4882a593Smuzhiyun ri.dsize = cpu_to_je32(end - start);
1079*4882a593Smuzhiyun ri.csize = cpu_to_je32(0);
1080*4882a593Smuzhiyun ri.compr = JFFS2_COMPR_ZERO;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun frag = frag_last(&f->fragtree);
1084*4882a593Smuzhiyun if (frag)
1085*4882a593Smuzhiyun /* Fetch the inode length from the fragtree rather then
1086*4882a593Smuzhiyun * from i_size since i_size may have not been updated yet */
1087*4882a593Smuzhiyun ilen = frag->ofs + frag->size;
1088*4882a593Smuzhiyun else
1089*4882a593Smuzhiyun ilen = JFFS2_F_I_SIZE(f);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
1092*4882a593Smuzhiyun ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
1093*4882a593Smuzhiyun ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
1094*4882a593Smuzhiyun ri.isize = cpu_to_je32(ilen);
1095*4882a593Smuzhiyun ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
1096*4882a593Smuzhiyun ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
1097*4882a593Smuzhiyun ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
1098*4882a593Smuzhiyun ri.data_crc = cpu_to_je32(0);
1099*4882a593Smuzhiyun ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun ret = jffs2_reserve_space_gc(c, sizeof(ri), &alloclen,
1102*4882a593Smuzhiyun JFFS2_SUMMARY_INODE_SIZE);
1103*4882a593Smuzhiyun if (ret) {
1104*4882a593Smuzhiyun pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_hole failed: %d\n",
1105*4882a593Smuzhiyun sizeof(ri), ret);
1106*4882a593Smuzhiyun return ret;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun new_fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_GC);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun if (IS_ERR(new_fn)) {
1111*4882a593Smuzhiyun pr_warn("Error writing new hole node: %ld\n", PTR_ERR(new_fn));
1112*4882a593Smuzhiyun return PTR_ERR(new_fn);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun if (je32_to_cpu(ri.version) == f->highest_version) {
1115*4882a593Smuzhiyun jffs2_add_full_dnode_to_inode(c, f, new_fn);
1116*4882a593Smuzhiyun if (f->metadata) {
1117*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, f->metadata->raw);
1118*4882a593Smuzhiyun jffs2_free_full_dnode(f->metadata);
1119*4882a593Smuzhiyun f->metadata = NULL;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun return 0;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /*
1125*4882a593Smuzhiyun * We should only get here in the case where the node we are
1126*4882a593Smuzhiyun * replacing had more than one frag, so we kept the same version
1127*4882a593Smuzhiyun * number as before. (Except in case of error -- see 'goto fill;'
1128*4882a593Smuzhiyun * above.)
1129*4882a593Smuzhiyun */
1130*4882a593Smuzhiyun D1(if(unlikely(fn->frags <= 1)) {
1131*4882a593Smuzhiyun pr_warn("%s(): Replacing fn with %d frag(s) but new ver %d != highest_version %d of ino #%d\n",
1132*4882a593Smuzhiyun __func__, fn->frags, je32_to_cpu(ri.version),
1133*4882a593Smuzhiyun f->highest_version, je32_to_cpu(ri.ino));
1134*4882a593Smuzhiyun });
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun /* This is a partially-overlapped hole node. Mark it REF_NORMAL not REF_PRISTINE */
1137*4882a593Smuzhiyun mark_ref_normal(new_fn->raw);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun for (frag = jffs2_lookup_node_frag(&f->fragtree, fn->ofs);
1140*4882a593Smuzhiyun frag; frag = frag_next(frag)) {
1141*4882a593Smuzhiyun if (frag->ofs > fn->size + fn->ofs)
1142*4882a593Smuzhiyun break;
1143*4882a593Smuzhiyun if (frag->node == fn) {
1144*4882a593Smuzhiyun frag->node = new_fn;
1145*4882a593Smuzhiyun new_fn->frags++;
1146*4882a593Smuzhiyun fn->frags--;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun if (fn->frags) {
1150*4882a593Smuzhiyun pr_warn("%s(): Old node still has frags!\n", __func__);
1151*4882a593Smuzhiyun BUG();
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun if (!new_fn->frags) {
1154*4882a593Smuzhiyun pr_warn("%s(): New node has no frags!\n", __func__);
1155*4882a593Smuzhiyun BUG();
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, fn->raw);
1159*4882a593Smuzhiyun jffs2_free_full_dnode(fn);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun return 0;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
jffs2_garbage_collect_dnode(struct jffs2_sb_info * c,struct jffs2_eraseblock * orig_jeb,struct jffs2_inode_info * f,struct jffs2_full_dnode * fn,uint32_t start,uint32_t end)1164*4882a593Smuzhiyun static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_eraseblock *orig_jeb,
1165*4882a593Smuzhiyun struct jffs2_inode_info *f, struct jffs2_full_dnode *fn,
1166*4882a593Smuzhiyun uint32_t start, uint32_t end)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun struct inode *inode = OFNI_EDONI_2SFFJ(f);
1169*4882a593Smuzhiyun struct jffs2_full_dnode *new_fn;
1170*4882a593Smuzhiyun struct jffs2_raw_inode ri;
1171*4882a593Smuzhiyun uint32_t alloclen, offset, orig_end, orig_start;
1172*4882a593Smuzhiyun int ret = 0;
1173*4882a593Smuzhiyun unsigned char *comprbuf = NULL, *writebuf;
1174*4882a593Smuzhiyun struct page *page;
1175*4882a593Smuzhiyun unsigned char *pg_ptr;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun memset(&ri, 0, sizeof(ri));
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun jffs2_dbg(1, "Writing replacement dnode for ino #%u from offset 0x%x to 0x%x\n",
1180*4882a593Smuzhiyun f->inocache->ino, start, end);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun orig_end = end;
1183*4882a593Smuzhiyun orig_start = start;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun if (c->nr_free_blocks + c->nr_erasing_blocks > c->resv_blocks_gcmerge) {
1186*4882a593Smuzhiyun /* Attempt to do some merging. But only expand to cover logically
1187*4882a593Smuzhiyun adjacent frags if the block containing them is already considered
1188*4882a593Smuzhiyun to be dirty. Otherwise we end up with GC just going round in
1189*4882a593Smuzhiyun circles dirtying the nodes it already wrote out, especially
1190*4882a593Smuzhiyun on NAND where we have small eraseblocks and hence a much higher
1191*4882a593Smuzhiyun chance of nodes having to be split to cross boundaries. */
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun struct jffs2_node_frag *frag;
1194*4882a593Smuzhiyun uint32_t min, max;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun min = start & ~(PAGE_SIZE-1);
1197*4882a593Smuzhiyun max = min + PAGE_SIZE;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun frag = jffs2_lookup_node_frag(&f->fragtree, start);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /* BUG_ON(!frag) but that'll happen anyway... */
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun BUG_ON(frag->ofs != start);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /* First grow down... */
1206*4882a593Smuzhiyun while((frag = frag_prev(frag)) && frag->ofs >= min) {
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun /* If the previous frag doesn't even reach the beginning, there's
1209*4882a593Smuzhiyun excessive fragmentation. Just merge. */
1210*4882a593Smuzhiyun if (frag->ofs > min) {
1211*4882a593Smuzhiyun jffs2_dbg(1, "Expanding down to cover partial frag (0x%x-0x%x)\n",
1212*4882a593Smuzhiyun frag->ofs, frag->ofs+frag->size);
1213*4882a593Smuzhiyun start = frag->ofs;
1214*4882a593Smuzhiyun continue;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun /* OK. This frag holds the first byte of the page. */
1217*4882a593Smuzhiyun if (!frag->node || !frag->node->raw) {
1218*4882a593Smuzhiyun jffs2_dbg(1, "First frag in page is hole (0x%x-0x%x). Not expanding down.\n",
1219*4882a593Smuzhiyun frag->ofs, frag->ofs+frag->size);
1220*4882a593Smuzhiyun break;
1221*4882a593Smuzhiyun } else {
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /* OK, it's a frag which extends to the beginning of the page. Does it live
1224*4882a593Smuzhiyun in a block which is still considered clean? If so, don't obsolete it.
1225*4882a593Smuzhiyun If not, cover it anyway. */
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw = frag->node->raw;
1228*4882a593Smuzhiyun struct jffs2_eraseblock *jeb;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun jeb = &c->blocks[raw->flash_offset / c->sector_size];
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun if (jeb == c->gcblock) {
1233*4882a593Smuzhiyun jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1234*4882a593Smuzhiyun frag->ofs,
1235*4882a593Smuzhiyun frag->ofs + frag->size,
1236*4882a593Smuzhiyun ref_offset(raw));
1237*4882a593Smuzhiyun start = frag->ofs;
1238*4882a593Smuzhiyun break;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1241*4882a593Smuzhiyun jffs2_dbg(1, "Not expanding down to cover frag (0x%x-0x%x) in clean block %08x\n",
1242*4882a593Smuzhiyun frag->ofs,
1243*4882a593Smuzhiyun frag->ofs + frag->size,
1244*4882a593Smuzhiyun jeb->offset);
1245*4882a593Smuzhiyun break;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun jffs2_dbg(1, "Expanding down to cover frag (0x%x-0x%x) in dirty block %08x\n",
1249*4882a593Smuzhiyun frag->ofs,
1250*4882a593Smuzhiyun frag->ofs + frag->size,
1251*4882a593Smuzhiyun jeb->offset);
1252*4882a593Smuzhiyun start = frag->ofs;
1253*4882a593Smuzhiyun break;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun /* ... then up */
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /* Find last frag which is actually part of the node we're to GC. */
1260*4882a593Smuzhiyun frag = jffs2_lookup_node_frag(&f->fragtree, end-1);
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun while((frag = frag_next(frag)) && frag->ofs+frag->size <= max) {
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun /* If the previous frag doesn't even reach the beginning, there's lots
1265*4882a593Smuzhiyun of fragmentation. Just merge. */
1266*4882a593Smuzhiyun if (frag->ofs+frag->size < max) {
1267*4882a593Smuzhiyun jffs2_dbg(1, "Expanding up to cover partial frag (0x%x-0x%x)\n",
1268*4882a593Smuzhiyun frag->ofs, frag->ofs+frag->size);
1269*4882a593Smuzhiyun end = frag->ofs + frag->size;
1270*4882a593Smuzhiyun continue;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun if (!frag->node || !frag->node->raw) {
1274*4882a593Smuzhiyun jffs2_dbg(1, "Last frag in page is hole (0x%x-0x%x). Not expanding up.\n",
1275*4882a593Smuzhiyun frag->ofs, frag->ofs+frag->size);
1276*4882a593Smuzhiyun break;
1277*4882a593Smuzhiyun } else {
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /* OK, it's a frag which extends to the beginning of the page. Does it live
1280*4882a593Smuzhiyun in a block which is still considered clean? If so, don't obsolete it.
1281*4882a593Smuzhiyun If not, cover it anyway. */
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun struct jffs2_raw_node_ref *raw = frag->node->raw;
1284*4882a593Smuzhiyun struct jffs2_eraseblock *jeb;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun jeb = &c->blocks[raw->flash_offset / c->sector_size];
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun if (jeb == c->gcblock) {
1289*4882a593Smuzhiyun jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in gcblock at %08x\n",
1290*4882a593Smuzhiyun frag->ofs,
1291*4882a593Smuzhiyun frag->ofs + frag->size,
1292*4882a593Smuzhiyun ref_offset(raw));
1293*4882a593Smuzhiyun end = frag->ofs + frag->size;
1294*4882a593Smuzhiyun break;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun if (!ISDIRTY(jeb->dirty_size + jeb->wasted_size)) {
1297*4882a593Smuzhiyun jffs2_dbg(1, "Not expanding up to cover frag (0x%x-0x%x) in clean block %08x\n",
1298*4882a593Smuzhiyun frag->ofs,
1299*4882a593Smuzhiyun frag->ofs + frag->size,
1300*4882a593Smuzhiyun jeb->offset);
1301*4882a593Smuzhiyun break;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun jffs2_dbg(1, "Expanding up to cover frag (0x%x-0x%x) in dirty block %08x\n",
1305*4882a593Smuzhiyun frag->ofs,
1306*4882a593Smuzhiyun frag->ofs + frag->size,
1307*4882a593Smuzhiyun jeb->offset);
1308*4882a593Smuzhiyun end = frag->ofs + frag->size;
1309*4882a593Smuzhiyun break;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun jffs2_dbg(1, "Expanded dnode to write from (0x%x-0x%x) to (0x%x-0x%x)\n",
1313*4882a593Smuzhiyun orig_start, orig_end, start, end);
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun D1(BUG_ON(end > frag_last(&f->fragtree)->ofs + frag_last(&f->fragtree)->size));
1316*4882a593Smuzhiyun BUG_ON(end < orig_end);
1317*4882a593Smuzhiyun BUG_ON(start > orig_start);
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun /* The rules state that we must obtain the page lock *before* f->sem, so
1321*4882a593Smuzhiyun * drop f->sem temporarily. Since we also hold c->alloc_sem, nothing's
1322*4882a593Smuzhiyun * actually going to *change* so we're safe; we only allow reading.
1323*4882a593Smuzhiyun *
1324*4882a593Smuzhiyun * It is important to note that jffs2_write_begin() will ensure that its
1325*4882a593Smuzhiyun * page is marked Uptodate before allocating space. That means that if we
1326*4882a593Smuzhiyun * end up here trying to GC the *same* page that jffs2_write_begin() is
1327*4882a593Smuzhiyun * trying to write out, read_cache_page() will not deadlock. */
1328*4882a593Smuzhiyun mutex_unlock(&f->sem);
1329*4882a593Smuzhiyun page = read_cache_page(inode->i_mapping, start >> PAGE_SHIFT,
1330*4882a593Smuzhiyun jffs2_do_readpage_unlock, inode);
1331*4882a593Smuzhiyun if (IS_ERR(page)) {
1332*4882a593Smuzhiyun pr_warn("read_cache_page() returned error: %ld\n",
1333*4882a593Smuzhiyun PTR_ERR(page));
1334*4882a593Smuzhiyun mutex_lock(&f->sem);
1335*4882a593Smuzhiyun return PTR_ERR(page);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun pg_ptr = kmap(page);
1339*4882a593Smuzhiyun mutex_lock(&f->sem);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun offset = start;
1342*4882a593Smuzhiyun while(offset < orig_end) {
1343*4882a593Smuzhiyun uint32_t datalen;
1344*4882a593Smuzhiyun uint32_t cdatalen;
1345*4882a593Smuzhiyun uint16_t comprtype = JFFS2_COMPR_NONE;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun ret = jffs2_reserve_space_gc(c, sizeof(ri) + JFFS2_MIN_DATA_LEN,
1348*4882a593Smuzhiyun &alloclen, JFFS2_SUMMARY_INODE_SIZE);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun if (ret) {
1351*4882a593Smuzhiyun pr_warn("jffs2_reserve_space_gc of %zd bytes for garbage_collect_dnode failed: %d\n",
1352*4882a593Smuzhiyun sizeof(ri) + JFFS2_MIN_DATA_LEN, ret);
1353*4882a593Smuzhiyun break;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset);
1356*4882a593Smuzhiyun datalen = end - offset;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun writebuf = pg_ptr + (offset & (PAGE_SIZE -1));
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen);
1361*4882a593Smuzhiyun
1362*4882a593Smuzhiyun ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
1363*4882a593Smuzhiyun ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE);
1364*4882a593Smuzhiyun ri.totlen = cpu_to_je32(sizeof(ri) + cdatalen);
1365*4882a593Smuzhiyun ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4));
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun ri.ino = cpu_to_je32(f->inocache->ino);
1368*4882a593Smuzhiyun ri.version = cpu_to_je32(++f->highest_version);
1369*4882a593Smuzhiyun ri.mode = cpu_to_jemode(JFFS2_F_I_MODE(f));
1370*4882a593Smuzhiyun ri.uid = cpu_to_je16(JFFS2_F_I_UID(f));
1371*4882a593Smuzhiyun ri.gid = cpu_to_je16(JFFS2_F_I_GID(f));
1372*4882a593Smuzhiyun ri.isize = cpu_to_je32(JFFS2_F_I_SIZE(f));
1373*4882a593Smuzhiyun ri.atime = cpu_to_je32(JFFS2_F_I_ATIME(f));
1374*4882a593Smuzhiyun ri.ctime = cpu_to_je32(JFFS2_F_I_CTIME(f));
1375*4882a593Smuzhiyun ri.mtime = cpu_to_je32(JFFS2_F_I_MTIME(f));
1376*4882a593Smuzhiyun ri.offset = cpu_to_je32(offset);
1377*4882a593Smuzhiyun ri.csize = cpu_to_je32(cdatalen);
1378*4882a593Smuzhiyun ri.dsize = cpu_to_je32(datalen);
1379*4882a593Smuzhiyun ri.compr = comprtype & 0xff;
1380*4882a593Smuzhiyun ri.usercompr = (comprtype >> 8) & 0xff;
1381*4882a593Smuzhiyun ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8));
1382*4882a593Smuzhiyun ri.data_crc = cpu_to_je32(crc32(0, comprbuf, cdatalen));
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun new_fn = jffs2_write_dnode(c, f, &ri, comprbuf, cdatalen, ALLOC_GC);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun jffs2_free_comprbuf(comprbuf, writebuf);
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun if (IS_ERR(new_fn)) {
1389*4882a593Smuzhiyun pr_warn("Error writing new dnode: %ld\n",
1390*4882a593Smuzhiyun PTR_ERR(new_fn));
1391*4882a593Smuzhiyun ret = PTR_ERR(new_fn);
1392*4882a593Smuzhiyun break;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun ret = jffs2_add_full_dnode_to_inode(c, f, new_fn);
1395*4882a593Smuzhiyun offset += datalen;
1396*4882a593Smuzhiyun if (f->metadata) {
1397*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, f->metadata->raw);
1398*4882a593Smuzhiyun jffs2_free_full_dnode(f->metadata);
1399*4882a593Smuzhiyun f->metadata = NULL;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun kunmap(page);
1404*4882a593Smuzhiyun put_page(page);
1405*4882a593Smuzhiyun return ret;
1406*4882a593Smuzhiyun }
1407