1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * JFFS2 -- Journalling Flash File System, Version 2.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright © 2001-2007 Red Hat, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Created by David Woodhouse <dwmw2@infradead.org>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * For licensing information, see the file 'LICENCE' in this directory.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/mtd/mtd.h>
16*4882a593Smuzhiyun #include <linux/compiler.h>
17*4882a593Smuzhiyun #include <linux/sched/signal.h>
18*4882a593Smuzhiyun #include "nodelist.h"
19*4882a593Smuzhiyun #include "debug.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun * Check whether the user is allowed to write.
23*4882a593Smuzhiyun */
jffs2_rp_can_write(struct jffs2_sb_info * c)24*4882a593Smuzhiyun static int jffs2_rp_can_write(struct jffs2_sb_info *c)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun uint32_t avail;
27*4882a593Smuzhiyun struct jffs2_mount_opts *opts = &c->mount_opts;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun avail = c->dirty_size + c->free_size + c->unchecked_size +
30*4882a593Smuzhiyun c->erasing_size - c->resv_blocks_write * c->sector_size
31*4882a593Smuzhiyun - c->nospc_dirty_size;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (avail < 2 * opts->rp_size)
34*4882a593Smuzhiyun jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
35*4882a593Smuzhiyun "erasing_size %u, unchecked_size %u, "
36*4882a593Smuzhiyun "nr_erasing_blocks %u, avail %u, resrv %u\n",
37*4882a593Smuzhiyun opts->rp_size, c->dirty_size, c->free_size,
38*4882a593Smuzhiyun c->erasing_size, c->unchecked_size,
39*4882a593Smuzhiyun c->nr_erasing_blocks, avail, c->nospc_dirty_size);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if (avail > opts->rp_size)
42*4882a593Smuzhiyun return 1;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Always allow root */
45*4882a593Smuzhiyun if (capable(CAP_SYS_RESOURCE))
46*4882a593Smuzhiyun return 1;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun jffs2_dbg(1, "forbid writing\n");
49*4882a593Smuzhiyun return 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * jffs2_reserve_space - request physical space to write nodes to flash
54*4882a593Smuzhiyun * @c: superblock info
55*4882a593Smuzhiyun * @minsize: Minimum acceptable size of allocation
56*4882a593Smuzhiyun * @len: Returned value of allocation length
57*4882a593Smuzhiyun * @prio: Allocation type - ALLOC_{NORMAL,DELETION}
58*4882a593Smuzhiyun *
59*4882a593Smuzhiyun * Requests a block of physical space on the flash. Returns zero for success
60*4882a593Smuzhiyun * and puts 'len' into the appropriate place, or returns -ENOSPC or other
61*4882a593Smuzhiyun * error if appropriate. Doesn't return len since that's
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * If it returns zero, jffs2_reserve_space() also downs the per-filesystem
64*4882a593Smuzhiyun * allocation semaphore, to prevent more than one allocation from being
65*4882a593Smuzhiyun * active at any time. The semaphore is later released by jffs2_commit_allocation()
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * jffs2_reserve_space() may trigger garbage collection in order to make room
68*4882a593Smuzhiyun * for the requested allocation.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
72*4882a593Smuzhiyun uint32_t *len, uint32_t sumsize);
73*4882a593Smuzhiyun
jffs2_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,int prio,uint32_t sumsize)74*4882a593Smuzhiyun int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
75*4882a593Smuzhiyun uint32_t *len, int prio, uint32_t sumsize)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun int ret = -EAGAIN;
78*4882a593Smuzhiyun int blocksneeded = c->resv_blocks_write;
79*4882a593Smuzhiyun /* align it */
80*4882a593Smuzhiyun minsize = PAD(minsize);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
83*4882a593Smuzhiyun mutex_lock(&c->alloc_sem);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun jffs2_dbg(1, "%s(): alloc sem got\n", __func__);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Check if the free space is greater then size of the reserved pool.
91*4882a593Smuzhiyun * If not, only allow root to proceed with writing.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
94*4882a593Smuzhiyun ret = -ENOSPC;
95*4882a593Smuzhiyun goto out;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* this needs a little more thought (true <tglx> :)) */
99*4882a593Smuzhiyun while(ret == -EAGAIN) {
100*4882a593Smuzhiyun while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
101*4882a593Smuzhiyun uint32_t dirty, avail;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* calculate real dirty size
104*4882a593Smuzhiyun * dirty_size contains blocks on erase_pending_list
105*4882a593Smuzhiyun * those blocks are counted in c->nr_erasing_blocks.
106*4882a593Smuzhiyun * If one block is actually erased, it is not longer counted as dirty_space
107*4882a593Smuzhiyun * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
108*4882a593Smuzhiyun * with c->nr_erasing_blocks * c->sector_size again.
109*4882a593Smuzhiyun * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
110*4882a593Smuzhiyun * This helps us to force gc and pick eventually a clean block to spread the load.
111*4882a593Smuzhiyun * We add unchecked_size here, as we hopefully will find some space to use.
112*4882a593Smuzhiyun * This will affect the sum only once, as gc first finishes checking
113*4882a593Smuzhiyun * of nodes.
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size + c->unchecked_size;
116*4882a593Smuzhiyun if (dirty < c->nospc_dirty_size) {
117*4882a593Smuzhiyun if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
118*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Low on dirty space to GC, but it's a deletion. Allowing...\n",
119*4882a593Smuzhiyun __func__);
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun jffs2_dbg(1, "dirty size 0x%08x + unchecked_size 0x%08x < nospc_dirty_size 0x%08x, returning -ENOSPC\n",
123*4882a593Smuzhiyun dirty, c->unchecked_size,
124*4882a593Smuzhiyun c->sector_size);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
127*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
128*4882a593Smuzhiyun return -ENOSPC;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Calc possibly available space. Possibly available means that we
132*4882a593Smuzhiyun * don't know, if unchecked size contains obsoleted nodes, which could give us some
133*4882a593Smuzhiyun * more usable space. This will affect the sum only once, as gc first finishes checking
134*4882a593Smuzhiyun * of nodes.
135*4882a593Smuzhiyun + Return -ENOSPC, if the maximum possibly available space is less or equal than
136*4882a593Smuzhiyun * blocksneeded * sector_size.
137*4882a593Smuzhiyun * This blocks endless gc looping on a filesystem, which is nearly full, even if
138*4882a593Smuzhiyun * the check above passes.
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size;
141*4882a593Smuzhiyun if ( (avail / c->sector_size) <= blocksneeded) {
142*4882a593Smuzhiyun if (prio == ALLOC_DELETION && c->nr_free_blocks + c->nr_erasing_blocks >= c->resv_blocks_deletion) {
143*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Low on possibly available space, but it's a deletion. Allowing...\n",
144*4882a593Smuzhiyun __func__);
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun jffs2_dbg(1, "max. available size 0x%08x < blocksneeded * sector_size 0x%08x, returning -ENOSPC\n",
149*4882a593Smuzhiyun avail, blocksneeded * c->sector_size);
150*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
151*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
152*4882a593Smuzhiyun return -ENOSPC;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun jffs2_dbg(1, "Triggering GC pass. nr_free_blocks %d, nr_erasing_blocks %d, free_size 0x%08x, dirty_size 0x%08x, wasted_size 0x%08x, used_size 0x%08x, erasing_size 0x%08x, bad_size 0x%08x (total 0x%08x of 0x%08x)\n",
158*4882a593Smuzhiyun c->nr_free_blocks, c->nr_erasing_blocks,
159*4882a593Smuzhiyun c->free_size, c->dirty_size, c->wasted_size,
160*4882a593Smuzhiyun c->used_size, c->erasing_size, c->bad_size,
161*4882a593Smuzhiyun c->free_size + c->dirty_size +
162*4882a593Smuzhiyun c->wasted_size + c->used_size +
163*4882a593Smuzhiyun c->erasing_size + c->bad_size,
164*4882a593Smuzhiyun c->flash_size);
165*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun ret = jffs2_garbage_collect_pass(c);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (ret == -EAGAIN) {
170*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
171*4882a593Smuzhiyun if (c->nr_erasing_blocks &&
172*4882a593Smuzhiyun list_empty(&c->erase_pending_list) &&
173*4882a593Smuzhiyun list_empty(&c->erase_complete_list)) {
174*4882a593Smuzhiyun DECLARE_WAITQUEUE(wait, current);
175*4882a593Smuzhiyun set_current_state(TASK_UNINTERRUPTIBLE);
176*4882a593Smuzhiyun add_wait_queue(&c->erase_wait, &wait);
177*4882a593Smuzhiyun jffs2_dbg(1, "%s waiting for erase to complete\n",
178*4882a593Smuzhiyun __func__);
179*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun schedule();
182*4882a593Smuzhiyun remove_wait_queue(&c->erase_wait, &wait);
183*4882a593Smuzhiyun } else
184*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
185*4882a593Smuzhiyun } else if (ret)
186*4882a593Smuzhiyun return ret;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun cond_resched();
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (signal_pending(current))
191*4882a593Smuzhiyun return -EINTR;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun mutex_lock(&c->alloc_sem);
194*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
198*4882a593Smuzhiyun if (ret) {
199*4882a593Smuzhiyun jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun out:
204*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
205*4882a593Smuzhiyun if (!ret)
206*4882a593Smuzhiyun ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
207*4882a593Smuzhiyun if (ret)
208*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
209*4882a593Smuzhiyun return ret;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
jffs2_reserve_space_gc(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)212*4882a593Smuzhiyun int jffs2_reserve_space_gc(struct jffs2_sb_info *c, uint32_t minsize,
213*4882a593Smuzhiyun uint32_t *len, uint32_t sumsize)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun int ret;
216*4882a593Smuzhiyun minsize = PAD(minsize);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Requested 0x%x bytes\n", __func__, minsize);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun while (true) {
221*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
222*4882a593Smuzhiyun ret = jffs2_do_reserve_space(c, minsize, len, sumsize);
223*4882a593Smuzhiyun if (ret) {
224*4882a593Smuzhiyun jffs2_dbg(1, "%s(): looping, ret is %d\n",
225*4882a593Smuzhiyun __func__, ret);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (ret == -EAGAIN)
230*4882a593Smuzhiyun cond_resched();
231*4882a593Smuzhiyun else
232*4882a593Smuzhiyun break;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun if (!ret)
235*4882a593Smuzhiyun ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return ret;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Classify nextblock (clean, dirty of verydirty) and force to select an other one */
242*4882a593Smuzhiyun
jffs2_close_nextblock(struct jffs2_sb_info * c,struct jffs2_eraseblock * jeb)243*4882a593Smuzhiyun static void jffs2_close_nextblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (c->nextblock == NULL) {
247*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Erase block at 0x%08x has already been placed in a list\n",
248*4882a593Smuzhiyun __func__, jeb->offset);
249*4882a593Smuzhiyun return;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun /* Check, if we have a dirty block now, or if it was dirty already */
252*4882a593Smuzhiyun if (ISDIRTY (jeb->wasted_size + jeb->dirty_size)) {
253*4882a593Smuzhiyun c->dirty_size += jeb->wasted_size;
254*4882a593Smuzhiyun c->wasted_size -= jeb->wasted_size;
255*4882a593Smuzhiyun jeb->dirty_size += jeb->wasted_size;
256*4882a593Smuzhiyun jeb->wasted_size = 0;
257*4882a593Smuzhiyun if (VERYDIRTY(c, jeb->dirty_size)) {
258*4882a593Smuzhiyun jffs2_dbg(1, "Adding full erase block at 0x%08x to very_dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
259*4882a593Smuzhiyun jeb->offset, jeb->free_size, jeb->dirty_size,
260*4882a593Smuzhiyun jeb->used_size);
261*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->very_dirty_list);
262*4882a593Smuzhiyun } else {
263*4882a593Smuzhiyun jffs2_dbg(1, "Adding full erase block at 0x%08x to dirty_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
264*4882a593Smuzhiyun jeb->offset, jeb->free_size, jeb->dirty_size,
265*4882a593Smuzhiyun jeb->used_size);
266*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->dirty_list);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun } else {
269*4882a593Smuzhiyun jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
270*4882a593Smuzhiyun jeb->offset, jeb->free_size, jeb->dirty_size,
271*4882a593Smuzhiyun jeb->used_size);
272*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->clean_list);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun c->nextblock = NULL;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Select a new jeb for nextblock */
279*4882a593Smuzhiyun
jffs2_find_nextblock(struct jffs2_sb_info * c)280*4882a593Smuzhiyun static int jffs2_find_nextblock(struct jffs2_sb_info *c)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun struct list_head *next;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Take the next block off the 'free' list */
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (list_empty(&c->free_list)) {
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (!c->nr_erasing_blocks &&
289*4882a593Smuzhiyun !list_empty(&c->erasable_list)) {
290*4882a593Smuzhiyun struct jffs2_eraseblock *ejeb;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun ejeb = list_entry(c->erasable_list.next, struct jffs2_eraseblock, list);
293*4882a593Smuzhiyun list_move_tail(&ejeb->list, &c->erase_pending_list);
294*4882a593Smuzhiyun c->nr_erasing_blocks++;
295*4882a593Smuzhiyun jffs2_garbage_collect_trigger(c);
296*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Triggering erase of erasable block at 0x%08x\n",
297*4882a593Smuzhiyun __func__, ejeb->offset);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (!c->nr_erasing_blocks &&
301*4882a593Smuzhiyun !list_empty(&c->erasable_pending_wbuf_list)) {
302*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Flushing write buffer\n",
303*4882a593Smuzhiyun __func__);
304*4882a593Smuzhiyun /* c->nextblock is NULL, no update to c->nextblock allowed */
305*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
306*4882a593Smuzhiyun jffs2_flush_wbuf_pad(c);
307*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
308*4882a593Smuzhiyun /* Have another go. It'll be on the erasable_list now */
309*4882a593Smuzhiyun return -EAGAIN;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (!c->nr_erasing_blocks) {
313*4882a593Smuzhiyun /* Ouch. We're in GC, or we wouldn't have got here.
314*4882a593Smuzhiyun And there's no space left. At all. */
315*4882a593Smuzhiyun pr_crit("Argh. No free space left for GC. nr_erasing_blocks is %d. nr_free_blocks is %d. (erasableempty: %s, erasingempty: %s, erasependingempty: %s)\n",
316*4882a593Smuzhiyun c->nr_erasing_blocks, c->nr_free_blocks,
317*4882a593Smuzhiyun list_empty(&c->erasable_list) ? "yes" : "no",
318*4882a593Smuzhiyun list_empty(&c->erasing_list) ? "yes" : "no",
319*4882a593Smuzhiyun list_empty(&c->erase_pending_list) ? "yes" : "no");
320*4882a593Smuzhiyun return -ENOSPC;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
324*4882a593Smuzhiyun /* Don't wait for it; just erase one right now */
325*4882a593Smuzhiyun jffs2_erase_pending_blocks(c, 1);
326*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /* An erase may have failed, decreasing the
329*4882a593Smuzhiyun amount of free space available. So we must
330*4882a593Smuzhiyun restart from the beginning */
331*4882a593Smuzhiyun return -EAGAIN;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun next = c->free_list.next;
335*4882a593Smuzhiyun list_del(next);
336*4882a593Smuzhiyun c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
337*4882a593Smuzhiyun c->nr_free_blocks--;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun jffs2_sum_reset_collected(c->summary); /* reset collected summary */
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_WRITEBUFFER
342*4882a593Smuzhiyun /* adjust write buffer offset, else we get a non contiguous write bug */
343*4882a593Smuzhiyun if (!(c->wbuf_ofs % c->sector_size) && !c->wbuf_len)
344*4882a593Smuzhiyun c->wbuf_ofs = 0xffffffff;
345*4882a593Smuzhiyun #endif
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun jffs2_dbg(1, "%s(): new nextblock = 0x%08x\n",
348*4882a593Smuzhiyun __func__, c->nextblock->offset);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun return 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Called with alloc sem _and_ erase_completion_lock */
jffs2_do_reserve_space(struct jffs2_sb_info * c,uint32_t minsize,uint32_t * len,uint32_t sumsize)354*4882a593Smuzhiyun static int jffs2_do_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
355*4882a593Smuzhiyun uint32_t *len, uint32_t sumsize)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct jffs2_eraseblock *jeb = c->nextblock;
358*4882a593Smuzhiyun uint32_t reserved_size; /* for summary information at the end of the jeb */
359*4882a593Smuzhiyun int ret;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun restart:
362*4882a593Smuzhiyun reserved_size = 0;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (jffs2_sum_active() && (sumsize != JFFS2_SUMMARY_NOSUM_SIZE)) {
365*4882a593Smuzhiyun /* NOSUM_SIZE means not to generate summary */
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (jeb) {
368*4882a593Smuzhiyun reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
369*4882a593Smuzhiyun dbg_summary("minsize=%d , jeb->free=%d ,"
370*4882a593Smuzhiyun "summary->size=%d , sumsize=%d\n",
371*4882a593Smuzhiyun minsize, jeb->free_size,
372*4882a593Smuzhiyun c->summary->sum_size, sumsize);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Is there enough space for writing out the current node, or we have to
376*4882a593Smuzhiyun write out summary information now, close this jeb and select new nextblock? */
377*4882a593Smuzhiyun if (jeb && (PAD(minsize) + PAD(c->summary->sum_size + sumsize +
378*4882a593Smuzhiyun JFFS2_SUMMARY_FRAME_SIZE) > jeb->free_size)) {
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* Has summary been disabled for this jeb? */
381*4882a593Smuzhiyun if (jffs2_sum_is_disabled(c->summary)) {
382*4882a593Smuzhiyun sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
383*4882a593Smuzhiyun goto restart;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* Writing out the collected summary information */
387*4882a593Smuzhiyun dbg_summary("generating summary for 0x%08x.\n", jeb->offset);
388*4882a593Smuzhiyun ret = jffs2_sum_write_sumnode(c);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (ret)
391*4882a593Smuzhiyun return ret;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (jffs2_sum_is_disabled(c->summary)) {
394*4882a593Smuzhiyun /* jffs2_write_sumnode() couldn't write out the summary information
395*4882a593Smuzhiyun diabling summary for this jeb and free the collected information
396*4882a593Smuzhiyun */
397*4882a593Smuzhiyun sumsize = JFFS2_SUMMARY_NOSUM_SIZE;
398*4882a593Smuzhiyun goto restart;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun jffs2_close_nextblock(c, jeb);
402*4882a593Smuzhiyun jeb = NULL;
403*4882a593Smuzhiyun /* keep always valid value in reserved_size */
404*4882a593Smuzhiyun reserved_size = PAD(sumsize + c->summary->sum_size + JFFS2_SUMMARY_FRAME_SIZE);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun } else {
407*4882a593Smuzhiyun if (jeb && minsize > jeb->free_size) {
408*4882a593Smuzhiyun uint32_t waste;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* Skip the end of this block and file it as having some dirty space */
411*4882a593Smuzhiyun /* If there's a pending write to it, flush now */
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (jffs2_wbuf_dirty(c)) {
414*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
415*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Flushing write buffer\n",
416*4882a593Smuzhiyun __func__);
417*4882a593Smuzhiyun jffs2_flush_wbuf_pad(c);
418*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
419*4882a593Smuzhiyun jeb = c->nextblock;
420*4882a593Smuzhiyun goto restart;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun ret = jffs2_prealloc_raw_node_refs(c, jeb, 1);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /* Just lock it again and continue. Nothing much can change because
428*4882a593Smuzhiyun we hold c->alloc_sem anyway. In fact, it's not entirely clear why
429*4882a593Smuzhiyun we hold c->erase_completion_lock in the majority of this function...
430*4882a593Smuzhiyun but that's a question for another (more caffeine-rich) day. */
431*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (ret)
434*4882a593Smuzhiyun return ret;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun waste = jeb->free_size;
437*4882a593Smuzhiyun jffs2_link_node_ref(c, jeb,
438*4882a593Smuzhiyun (jeb->offset + c->sector_size - waste) | REF_OBSOLETE,
439*4882a593Smuzhiyun waste, NULL);
440*4882a593Smuzhiyun /* FIXME: that made it count as dirty. Convert to wasted */
441*4882a593Smuzhiyun jeb->dirty_size -= waste;
442*4882a593Smuzhiyun c->dirty_size -= waste;
443*4882a593Smuzhiyun jeb->wasted_size += waste;
444*4882a593Smuzhiyun c->wasted_size += waste;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun jffs2_close_nextblock(c, jeb);
447*4882a593Smuzhiyun jeb = NULL;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (!jeb) {
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun ret = jffs2_find_nextblock(c);
454*4882a593Smuzhiyun if (ret)
455*4882a593Smuzhiyun return ret;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun jeb = c->nextblock;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun if (jeb->free_size != c->sector_size - c->cleanmarker_size) {
460*4882a593Smuzhiyun pr_warn("Eep. Block 0x%08x taken from free_list had free_size of 0x%08x!!\n",
461*4882a593Smuzhiyun jeb->offset, jeb->free_size);
462*4882a593Smuzhiyun goto restart;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun /* OK, jeb (==c->nextblock) is now pointing at a block which definitely has
466*4882a593Smuzhiyun enough space */
467*4882a593Smuzhiyun *len = jeb->free_size - reserved_size;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun if (c->cleanmarker_size && jeb->used_size == c->cleanmarker_size &&
470*4882a593Smuzhiyun !jeb->first_node->next_in_ino) {
471*4882a593Smuzhiyun /* Only node in it beforehand was a CLEANMARKER node (we think).
472*4882a593Smuzhiyun So mark it obsolete now that there's going to be another node
473*4882a593Smuzhiyun in the block. This will reduce used_size to zero but We've
474*4882a593Smuzhiyun already set c->nextblock so that jffs2_mark_node_obsolete()
475*4882a593Smuzhiyun won't try to refile it to the dirty_list.
476*4882a593Smuzhiyun */
477*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
478*4882a593Smuzhiyun jffs2_mark_node_obsolete(c, jeb->first_node);
479*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Giving 0x%x bytes at 0x%x\n",
483*4882a593Smuzhiyun __func__,
484*4882a593Smuzhiyun *len, jeb->offset + (c->sector_size - jeb->free_size));
485*4882a593Smuzhiyun return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /**
489*4882a593Smuzhiyun * jffs2_add_physical_node_ref - add a physical node reference to the list
490*4882a593Smuzhiyun * @c: superblock info
491*4882a593Smuzhiyun * @new: new node reference to add
492*4882a593Smuzhiyun * @len: length of this physical node
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun * Should only be used to report nodes for which space has been allocated
495*4882a593Smuzhiyun * by jffs2_reserve_space.
496*4882a593Smuzhiyun *
497*4882a593Smuzhiyun * Must be called with the alloc_sem held.
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun
jffs2_add_physical_node_ref(struct jffs2_sb_info * c,uint32_t ofs,uint32_t len,struct jffs2_inode_cache * ic)500*4882a593Smuzhiyun struct jffs2_raw_node_ref *jffs2_add_physical_node_ref(struct jffs2_sb_info *c,
501*4882a593Smuzhiyun uint32_t ofs, uint32_t len,
502*4882a593Smuzhiyun struct jffs2_inode_cache *ic)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun struct jffs2_eraseblock *jeb;
505*4882a593Smuzhiyun struct jffs2_raw_node_ref *new;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun jeb = &c->blocks[ofs / c->sector_size];
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun jffs2_dbg(1, "%s(): Node at 0x%x(%d), size 0x%x\n",
510*4882a593Smuzhiyun __func__, ofs & ~3, ofs & 3, len);
511*4882a593Smuzhiyun #if 1
512*4882a593Smuzhiyun /* Allow non-obsolete nodes only to be added at the end of c->nextblock,
513*4882a593Smuzhiyun if c->nextblock is set. Note that wbuf.c will file obsolete nodes
514*4882a593Smuzhiyun even after refiling c->nextblock */
515*4882a593Smuzhiyun if ((c->nextblock || ((ofs & 3) != REF_OBSOLETE))
516*4882a593Smuzhiyun && (jeb != c->nextblock || (ofs & ~3) != jeb->offset + (c->sector_size - jeb->free_size))) {
517*4882a593Smuzhiyun pr_warn("argh. node added in wrong place at 0x%08x(%d)\n",
518*4882a593Smuzhiyun ofs & ~3, ofs & 3);
519*4882a593Smuzhiyun if (c->nextblock)
520*4882a593Smuzhiyun pr_warn("nextblock 0x%08x", c->nextblock->offset);
521*4882a593Smuzhiyun else
522*4882a593Smuzhiyun pr_warn("No nextblock");
523*4882a593Smuzhiyun pr_cont(", expected at %08x\n",
524*4882a593Smuzhiyun jeb->offset + (c->sector_size - jeb->free_size));
525*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun #endif
528*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun new = jffs2_link_node_ref(c, jeb, ofs, len, ic);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun if (!jeb->free_size && !jeb->dirty_size && !ISDIRTY(jeb->wasted_size)) {
533*4882a593Smuzhiyun /* If it lives on the dirty_list, jffs2_reserve_space will put it there */
534*4882a593Smuzhiyun jffs2_dbg(1, "Adding full erase block at 0x%08x to clean_list (free 0x%08x, dirty 0x%08x, used 0x%08x\n",
535*4882a593Smuzhiyun jeb->offset, jeb->free_size, jeb->dirty_size,
536*4882a593Smuzhiyun jeb->used_size);
537*4882a593Smuzhiyun if (jffs2_wbuf_dirty(c)) {
538*4882a593Smuzhiyun /* Flush the last write in the block if it's outstanding */
539*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
540*4882a593Smuzhiyun jffs2_flush_wbuf_pad(c);
541*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->clean_list);
545*4882a593Smuzhiyun c->nextblock = NULL;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun jffs2_dbg_acct_sanity_check_nolock(c,jeb);
548*4882a593Smuzhiyun jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return new;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun
jffs2_complete_reservation(struct jffs2_sb_info * c)556*4882a593Smuzhiyun void jffs2_complete_reservation(struct jffs2_sb_info *c)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun jffs2_dbg(1, "jffs2_complete_reservation()\n");
559*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
560*4882a593Smuzhiyun jffs2_garbage_collect_trigger(c);
561*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
562*4882a593Smuzhiyun mutex_unlock(&c->alloc_sem);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
on_list(struct list_head * obj,struct list_head * head)565*4882a593Smuzhiyun static inline int on_list(struct list_head *obj, struct list_head *head)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun struct list_head *this;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun list_for_each(this, head) {
570*4882a593Smuzhiyun if (this == obj) {
571*4882a593Smuzhiyun jffs2_dbg(1, "%p is on list at %p\n", obj, head);
572*4882a593Smuzhiyun return 1;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun return 0;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
jffs2_mark_node_obsolete(struct jffs2_sb_info * c,struct jffs2_raw_node_ref * ref)579*4882a593Smuzhiyun void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref *ref)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct jffs2_eraseblock *jeb;
582*4882a593Smuzhiyun int blocknr;
583*4882a593Smuzhiyun struct jffs2_unknown_node n;
584*4882a593Smuzhiyun int ret, addedsize;
585*4882a593Smuzhiyun size_t retlen;
586*4882a593Smuzhiyun uint32_t freed_len;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if(unlikely(!ref)) {
589*4882a593Smuzhiyun pr_notice("EEEEEK. jffs2_mark_node_obsolete called with NULL node\n");
590*4882a593Smuzhiyun return;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun if (ref_obsolete(ref)) {
593*4882a593Smuzhiyun jffs2_dbg(1, "%s(): called with already obsolete node at 0x%08x\n",
594*4882a593Smuzhiyun __func__, ref_offset(ref));
595*4882a593Smuzhiyun return;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun blocknr = ref->flash_offset / c->sector_size;
598*4882a593Smuzhiyun if (blocknr >= c->nr_blocks) {
599*4882a593Smuzhiyun pr_notice("raw node at 0x%08x is off the end of device!\n",
600*4882a593Smuzhiyun ref->flash_offset);
601*4882a593Smuzhiyun BUG();
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun jeb = &c->blocks[blocknr];
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if (jffs2_can_mark_obsolete(c) && !jffs2_is_readonly(c) &&
606*4882a593Smuzhiyun !(c->flags & (JFFS2_SB_FLAG_SCANNING | JFFS2_SB_FLAG_BUILDING))) {
607*4882a593Smuzhiyun /* Hm. This may confuse static lock analysis. If any of the above
608*4882a593Smuzhiyun three conditions is false, we're going to return from this
609*4882a593Smuzhiyun function without actually obliterating any nodes or freeing
610*4882a593Smuzhiyun any jffs2_raw_node_refs. So we don't need to stop erases from
611*4882a593Smuzhiyun happening, or protect against people holding an obsolete
612*4882a593Smuzhiyun jffs2_raw_node_ref without the erase_completion_lock. */
613*4882a593Smuzhiyun mutex_lock(&c->erase_free_sem);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun freed_len = ref_totlen(c, jeb, ref);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (ref_flags(ref) == REF_UNCHECKED) {
621*4882a593Smuzhiyun D1(if (unlikely(jeb->unchecked_size < freed_len)) {
622*4882a593Smuzhiyun pr_notice("raw unchecked node of size 0x%08x freed from erase block %d at 0x%08x, but unchecked_size was already 0x%08x\n",
623*4882a593Smuzhiyun freed_len, blocknr,
624*4882a593Smuzhiyun ref->flash_offset, jeb->used_size);
625*4882a593Smuzhiyun BUG();
626*4882a593Smuzhiyun })
627*4882a593Smuzhiyun jffs2_dbg(1, "Obsoleting previously unchecked node at 0x%08x of len %x\n",
628*4882a593Smuzhiyun ref_offset(ref), freed_len);
629*4882a593Smuzhiyun jeb->unchecked_size -= freed_len;
630*4882a593Smuzhiyun c->unchecked_size -= freed_len;
631*4882a593Smuzhiyun } else {
632*4882a593Smuzhiyun D1(if (unlikely(jeb->used_size < freed_len)) {
633*4882a593Smuzhiyun pr_notice("raw node of size 0x%08x freed from erase block %d at 0x%08x, but used_size was already 0x%08x\n",
634*4882a593Smuzhiyun freed_len, blocknr,
635*4882a593Smuzhiyun ref->flash_offset, jeb->used_size);
636*4882a593Smuzhiyun BUG();
637*4882a593Smuzhiyun })
638*4882a593Smuzhiyun jffs2_dbg(1, "Obsoleting node at 0x%08x of len %#x: ",
639*4882a593Smuzhiyun ref_offset(ref), freed_len);
640*4882a593Smuzhiyun jeb->used_size -= freed_len;
641*4882a593Smuzhiyun c->used_size -= freed_len;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun // Take care, that wasted size is taken into concern
645*4882a593Smuzhiyun if ((jeb->dirty_size || ISDIRTY(jeb->wasted_size + freed_len)) && jeb != c->nextblock) {
646*4882a593Smuzhiyun jffs2_dbg(1, "Dirtying\n");
647*4882a593Smuzhiyun addedsize = freed_len;
648*4882a593Smuzhiyun jeb->dirty_size += freed_len;
649*4882a593Smuzhiyun c->dirty_size += freed_len;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* Convert wasted space to dirty, if not a bad block */
652*4882a593Smuzhiyun if (jeb->wasted_size) {
653*4882a593Smuzhiyun if (on_list(&jeb->list, &c->bad_used_list)) {
654*4882a593Smuzhiyun jffs2_dbg(1, "Leaving block at %08x on the bad_used_list\n",
655*4882a593Smuzhiyun jeb->offset);
656*4882a593Smuzhiyun addedsize = 0; /* To fool the refiling code later */
657*4882a593Smuzhiyun } else {
658*4882a593Smuzhiyun jffs2_dbg(1, "Converting %d bytes of wasted space to dirty in block at %08x\n",
659*4882a593Smuzhiyun jeb->wasted_size, jeb->offset);
660*4882a593Smuzhiyun addedsize += jeb->wasted_size;
661*4882a593Smuzhiyun jeb->dirty_size += jeb->wasted_size;
662*4882a593Smuzhiyun c->dirty_size += jeb->wasted_size;
663*4882a593Smuzhiyun c->wasted_size -= jeb->wasted_size;
664*4882a593Smuzhiyun jeb->wasted_size = 0;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun } else {
668*4882a593Smuzhiyun jffs2_dbg(1, "Wasting\n");
669*4882a593Smuzhiyun addedsize = 0;
670*4882a593Smuzhiyun jeb->wasted_size += freed_len;
671*4882a593Smuzhiyun c->wasted_size += freed_len;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun ref->flash_offset = ref_offset(ref) | REF_OBSOLETE;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun jffs2_dbg_acct_sanity_check_nolock(c, jeb);
676*4882a593Smuzhiyun jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun if (c->flags & JFFS2_SB_FLAG_SCANNING) {
679*4882a593Smuzhiyun /* Flash scanning is in progress. Don't muck about with the block
680*4882a593Smuzhiyun lists because they're not ready yet, and don't actually
681*4882a593Smuzhiyun obliterate nodes that look obsolete. If they weren't
682*4882a593Smuzhiyun marked obsolete on the flash at the time they _became_
683*4882a593Smuzhiyun obsolete, there was probably a reason for that. */
684*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
685*4882a593Smuzhiyun /* We didn't lock the erase_free_sem */
686*4882a593Smuzhiyun return;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (jeb == c->nextblock) {
690*4882a593Smuzhiyun jffs2_dbg(2, "Not moving nextblock 0x%08x to dirty/erase_pending list\n",
691*4882a593Smuzhiyun jeb->offset);
692*4882a593Smuzhiyun } else if (!jeb->used_size && !jeb->unchecked_size) {
693*4882a593Smuzhiyun if (jeb == c->gcblock) {
694*4882a593Smuzhiyun jffs2_dbg(1, "gcblock at 0x%08x completely dirtied. Clearing gcblock...\n",
695*4882a593Smuzhiyun jeb->offset);
696*4882a593Smuzhiyun c->gcblock = NULL;
697*4882a593Smuzhiyun } else {
698*4882a593Smuzhiyun jffs2_dbg(1, "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n",
699*4882a593Smuzhiyun jeb->offset);
700*4882a593Smuzhiyun list_del(&jeb->list);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun if (jffs2_wbuf_dirty(c)) {
703*4882a593Smuzhiyun jffs2_dbg(1, "...and adding to erasable_pending_wbuf_list\n");
704*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->erasable_pending_wbuf_list);
705*4882a593Smuzhiyun } else {
706*4882a593Smuzhiyun if (jiffies & 127) {
707*4882a593Smuzhiyun /* Most of the time, we just erase it immediately. Otherwise we
708*4882a593Smuzhiyun spend ages scanning it on mount, etc. */
709*4882a593Smuzhiyun jffs2_dbg(1, "...and adding to erase_pending_list\n");
710*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->erase_pending_list);
711*4882a593Smuzhiyun c->nr_erasing_blocks++;
712*4882a593Smuzhiyun jffs2_garbage_collect_trigger(c);
713*4882a593Smuzhiyun } else {
714*4882a593Smuzhiyun /* Sometimes, however, we leave it elsewhere so it doesn't get
715*4882a593Smuzhiyun immediately reused, and we spread the load a bit. */
716*4882a593Smuzhiyun jffs2_dbg(1, "...and adding to erasable_list\n");
717*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->erasable_list);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun jffs2_dbg(1, "Done OK\n");
721*4882a593Smuzhiyun } else if (jeb == c->gcblock) {
722*4882a593Smuzhiyun jffs2_dbg(2, "Not moving gcblock 0x%08x to dirty_list\n",
723*4882a593Smuzhiyun jeb->offset);
724*4882a593Smuzhiyun } else if (ISDIRTY(jeb->dirty_size) && !ISDIRTY(jeb->dirty_size - addedsize)) {
725*4882a593Smuzhiyun jffs2_dbg(1, "Eraseblock at 0x%08x is freshly dirtied. Removing from clean list...\n",
726*4882a593Smuzhiyun jeb->offset);
727*4882a593Smuzhiyun list_del(&jeb->list);
728*4882a593Smuzhiyun jffs2_dbg(1, "...and adding to dirty_list\n");
729*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->dirty_list);
730*4882a593Smuzhiyun } else if (VERYDIRTY(c, jeb->dirty_size) &&
731*4882a593Smuzhiyun !VERYDIRTY(c, jeb->dirty_size - addedsize)) {
732*4882a593Smuzhiyun jffs2_dbg(1, "Eraseblock at 0x%08x is now very dirty. Removing from dirty list...\n",
733*4882a593Smuzhiyun jeb->offset);
734*4882a593Smuzhiyun list_del(&jeb->list);
735*4882a593Smuzhiyun jffs2_dbg(1, "...and adding to very_dirty_list\n");
736*4882a593Smuzhiyun list_add_tail(&jeb->list, &c->very_dirty_list);
737*4882a593Smuzhiyun } else {
738*4882a593Smuzhiyun jffs2_dbg(1, "Eraseblock at 0x%08x not moved anywhere. (free 0x%08x, dirty 0x%08x, used 0x%08x)\n",
739*4882a593Smuzhiyun jeb->offset, jeb->free_size, jeb->dirty_size,
740*4882a593Smuzhiyun jeb->used_size);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (!jffs2_can_mark_obsolete(c) || jffs2_is_readonly(c) ||
746*4882a593Smuzhiyun (c->flags & JFFS2_SB_FLAG_BUILDING)) {
747*4882a593Smuzhiyun /* We didn't lock the erase_free_sem */
748*4882a593Smuzhiyun return;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* The erase_free_sem is locked, and has been since before we marked the node obsolete
752*4882a593Smuzhiyun and potentially put its eraseblock onto the erase_pending_list. Thus, we know that
753*4882a593Smuzhiyun the block hasn't _already_ been erased, and that 'ref' itself hasn't been freed yet
754*4882a593Smuzhiyun by jffs2_free_jeb_node_refs() in erase.c. Which is nice. */
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun jffs2_dbg(1, "obliterating obsoleted node at 0x%08x\n",
757*4882a593Smuzhiyun ref_offset(ref));
758*4882a593Smuzhiyun ret = jffs2_flash_read(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
759*4882a593Smuzhiyun if (ret) {
760*4882a593Smuzhiyun pr_warn("Read error reading from obsoleted node at 0x%08x: %d\n",
761*4882a593Smuzhiyun ref_offset(ref), ret);
762*4882a593Smuzhiyun goto out_erase_sem;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun if (retlen != sizeof(n)) {
765*4882a593Smuzhiyun pr_warn("Short read from obsoleted node at 0x%08x: %zd\n",
766*4882a593Smuzhiyun ref_offset(ref), retlen);
767*4882a593Smuzhiyun goto out_erase_sem;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun if (PAD(je32_to_cpu(n.totlen)) != PAD(freed_len)) {
770*4882a593Smuzhiyun pr_warn("Node totlen on flash (0x%08x) != totlen from node ref (0x%08x)\n",
771*4882a593Smuzhiyun je32_to_cpu(n.totlen), freed_len);
772*4882a593Smuzhiyun goto out_erase_sem;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun if (!(je16_to_cpu(n.nodetype) & JFFS2_NODE_ACCURATE)) {
775*4882a593Smuzhiyun jffs2_dbg(1, "Node at 0x%08x was already marked obsolete (nodetype 0x%04x)\n",
776*4882a593Smuzhiyun ref_offset(ref), je16_to_cpu(n.nodetype));
777*4882a593Smuzhiyun goto out_erase_sem;
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun /* XXX FIXME: This is ugly now */
780*4882a593Smuzhiyun n.nodetype = cpu_to_je16(je16_to_cpu(n.nodetype) & ~JFFS2_NODE_ACCURATE);
781*4882a593Smuzhiyun ret = jffs2_flash_write(c, ref_offset(ref), sizeof(n), &retlen, (char *)&n);
782*4882a593Smuzhiyun if (ret) {
783*4882a593Smuzhiyun pr_warn("Write error in obliterating obsoleted node at 0x%08x: %d\n",
784*4882a593Smuzhiyun ref_offset(ref), ret);
785*4882a593Smuzhiyun goto out_erase_sem;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun if (retlen != sizeof(n)) {
788*4882a593Smuzhiyun pr_warn("Short write in obliterating obsoleted node at 0x%08x: %zd\n",
789*4882a593Smuzhiyun ref_offset(ref), retlen);
790*4882a593Smuzhiyun goto out_erase_sem;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* Nodes which have been marked obsolete no longer need to be
794*4882a593Smuzhiyun associated with any inode. Remove them from the per-inode list.
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun Note we can't do this for NAND at the moment because we need
797*4882a593Smuzhiyun obsolete dirent nodes to stay on the lists, because of the
798*4882a593Smuzhiyun horridness in jffs2_garbage_collect_deletion_dirent(). Also
799*4882a593Smuzhiyun because we delete the inocache, and on NAND we need that to
800*4882a593Smuzhiyun stay around until all the nodes are actually erased, in order
801*4882a593Smuzhiyun to stop us from giving the same inode number to another newly
802*4882a593Smuzhiyun created inode. */
803*4882a593Smuzhiyun if (ref->next_in_ino) {
804*4882a593Smuzhiyun struct jffs2_inode_cache *ic;
805*4882a593Smuzhiyun struct jffs2_raw_node_ref **p;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_lock(&c->erase_completion_lock);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun ic = jffs2_raw_ref_to_ic(ref);
810*4882a593Smuzhiyun for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
811*4882a593Smuzhiyun ;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun *p = ref->next_in_ino;
814*4882a593Smuzhiyun ref->next_in_ino = NULL;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun switch (ic->class) {
817*4882a593Smuzhiyun #ifdef CONFIG_JFFS2_FS_XATTR
818*4882a593Smuzhiyun case RAWNODE_CLASS_XATTR_DATUM:
819*4882a593Smuzhiyun jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
820*4882a593Smuzhiyun break;
821*4882a593Smuzhiyun case RAWNODE_CLASS_XATTR_REF:
822*4882a593Smuzhiyun jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
823*4882a593Smuzhiyun break;
824*4882a593Smuzhiyun #endif
825*4882a593Smuzhiyun default:
826*4882a593Smuzhiyun if (ic->nodes == (void *)ic && ic->pino_nlink == 0)
827*4882a593Smuzhiyun jffs2_del_ino_cache(c, ic);
828*4882a593Smuzhiyun break;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun spin_unlock(&c->erase_completion_lock);
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun out_erase_sem:
834*4882a593Smuzhiyun mutex_unlock(&c->erase_free_sem);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
jffs2_thread_should_wake(struct jffs2_sb_info * c)837*4882a593Smuzhiyun int jffs2_thread_should_wake(struct jffs2_sb_info *c)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun int ret = 0;
840*4882a593Smuzhiyun uint32_t dirty;
841*4882a593Smuzhiyun int nr_very_dirty = 0;
842*4882a593Smuzhiyun struct jffs2_eraseblock *jeb;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!list_empty(&c->erase_complete_list) ||
845*4882a593Smuzhiyun !list_empty(&c->erase_pending_list))
846*4882a593Smuzhiyun return 1;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun if (c->unchecked_size) {
849*4882a593Smuzhiyun jffs2_dbg(1, "jffs2_thread_should_wake(): unchecked_size %d, check_ino #%d\n",
850*4882a593Smuzhiyun c->unchecked_size, c->check_ino);
851*4882a593Smuzhiyun return 1;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* dirty_size contains blocks on erase_pending_list
855*4882a593Smuzhiyun * those blocks are counted in c->nr_erasing_blocks.
856*4882a593Smuzhiyun * If one block is actually erased, it is not longer counted as dirty_space
857*4882a593Smuzhiyun * but it is counted in c->nr_erasing_blocks, so we add it and subtract it
858*4882a593Smuzhiyun * with c->nr_erasing_blocks * c->sector_size again.
859*4882a593Smuzhiyun * Blocks on erasable_list are counted as dirty_size, but not in c->nr_erasing_blocks
860*4882a593Smuzhiyun * This helps us to force gc and pick eventually a clean block to spread the load.
861*4882a593Smuzhiyun */
862*4882a593Smuzhiyun dirty = c->dirty_size + c->erasing_size - c->nr_erasing_blocks * c->sector_size;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun if (c->nr_free_blocks + c->nr_erasing_blocks < c->resv_blocks_gctrigger &&
865*4882a593Smuzhiyun (dirty > c->nospc_dirty_size))
866*4882a593Smuzhiyun ret = 1;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun list_for_each_entry(jeb, &c->very_dirty_list, list) {
869*4882a593Smuzhiyun nr_very_dirty++;
870*4882a593Smuzhiyun if (nr_very_dirty == c->vdirty_blocks_gctrigger) {
871*4882a593Smuzhiyun ret = 1;
872*4882a593Smuzhiyun /* In debug mode, actually go through and count them all */
873*4882a593Smuzhiyun D1(continue);
874*4882a593Smuzhiyun break;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun jffs2_dbg(1, "%s(): nr_free_blocks %d, nr_erasing_blocks %d, dirty_size 0x%x, vdirty_blocks %d: %s\n",
879*4882a593Smuzhiyun __func__, c->nr_free_blocks, c->nr_erasing_blocks,
880*4882a593Smuzhiyun c->dirty_size, nr_very_dirty, ret ? "yes" : "no");
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun return ret;
883*4882a593Smuzhiyun }
884