1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * This file is part of UBIFS.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2006-2008 Nokia Corporation.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Authors: Adrian Hunter
8*4882a593Smuzhiyun * Artem Bityutskiy (Битюцкий Артём)
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun /* This file implements TNC functions for committing */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/random.h>
14*4882a593Smuzhiyun #include "ubifs.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /**
17*4882a593Smuzhiyun * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
18*4882a593Smuzhiyun * @c: UBIFS file-system description object
19*4882a593Smuzhiyun * @idx: buffer in which to place new index node
20*4882a593Smuzhiyun * @znode: znode from which to make new index node
21*4882a593Smuzhiyun * @lnum: LEB number where new index node will be written
22*4882a593Smuzhiyun * @offs: offset where new index node will be written
23*4882a593Smuzhiyun * @len: length of new index node
24*4882a593Smuzhiyun */
make_idx_node(struct ubifs_info * c,struct ubifs_idx_node * idx,struct ubifs_znode * znode,int lnum,int offs,int len)25*4882a593Smuzhiyun static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
26*4882a593Smuzhiyun struct ubifs_znode *znode, int lnum, int offs, int len)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct ubifs_znode *zp;
29*4882a593Smuzhiyun u8 hash[UBIFS_HASH_ARR_SZ];
30*4882a593Smuzhiyun int i, err;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Make index node */
33*4882a593Smuzhiyun idx->ch.node_type = UBIFS_IDX_NODE;
34*4882a593Smuzhiyun idx->child_cnt = cpu_to_le16(znode->child_cnt);
35*4882a593Smuzhiyun idx->level = cpu_to_le16(znode->level);
36*4882a593Smuzhiyun for (i = 0; i < znode->child_cnt; i++) {
37*4882a593Smuzhiyun struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
38*4882a593Smuzhiyun struct ubifs_zbranch *zbr = &znode->zbranch[i];
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun key_write_idx(c, &zbr->key, &br->key);
41*4882a593Smuzhiyun br->lnum = cpu_to_le32(zbr->lnum);
42*4882a593Smuzhiyun br->offs = cpu_to_le32(zbr->offs);
43*4882a593Smuzhiyun br->len = cpu_to_le32(zbr->len);
44*4882a593Smuzhiyun ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
45*4882a593Smuzhiyun if (!zbr->lnum || !zbr->len) {
46*4882a593Smuzhiyun ubifs_err(c, "bad ref in znode");
47*4882a593Smuzhiyun ubifs_dump_znode(c, znode);
48*4882a593Smuzhiyun if (zbr->znode)
49*4882a593Smuzhiyun ubifs_dump_znode(c, zbr->znode);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun return -EINVAL;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun ubifs_prepare_node(c, idx, len, 0);
55*4882a593Smuzhiyun ubifs_node_calc_hash(c, idx, hash);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun znode->lnum = lnum;
58*4882a593Smuzhiyun znode->offs = offs;
59*4882a593Smuzhiyun znode->len = len;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun err = insert_old_idx_znode(c, znode);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Update the parent */
64*4882a593Smuzhiyun zp = znode->parent;
65*4882a593Smuzhiyun if (zp) {
66*4882a593Smuzhiyun struct ubifs_zbranch *zbr;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun zbr = &zp->zbranch[znode->iip];
69*4882a593Smuzhiyun zbr->lnum = lnum;
70*4882a593Smuzhiyun zbr->offs = offs;
71*4882a593Smuzhiyun zbr->len = len;
72*4882a593Smuzhiyun ubifs_copy_hash(c, hash, zbr->hash);
73*4882a593Smuzhiyun } else {
74*4882a593Smuzhiyun c->zroot.lnum = lnum;
75*4882a593Smuzhiyun c->zroot.offs = offs;
76*4882a593Smuzhiyun c->zroot.len = len;
77*4882a593Smuzhiyun ubifs_copy_hash(c, hash, c->zroot.hash);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun c->calc_idx_sz += ALIGN(len, 8);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun atomic_long_dec(&c->dirty_zn_cnt);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun ubifs_assert(c, ubifs_zn_dirty(znode));
84*4882a593Smuzhiyun ubifs_assert(c, ubifs_zn_cow(znode));
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Note, unlike 'write_index()' we do not add memory barriers here
88*4882a593Smuzhiyun * because this function is called with @c->tnc_mutex locked.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun __clear_bit(DIRTY_ZNODE, &znode->flags);
91*4882a593Smuzhiyun __clear_bit(COW_ZNODE, &znode->flags);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return err;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun * fill_gap - make index nodes in gaps in dirty index LEBs.
98*4882a593Smuzhiyun * @c: UBIFS file-system description object
99*4882a593Smuzhiyun * @lnum: LEB number that gap appears in
100*4882a593Smuzhiyun * @gap_start: offset of start of gap
101*4882a593Smuzhiyun * @gap_end: offset of end of gap
102*4882a593Smuzhiyun * @dirt: adds dirty space to this
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * This function returns the number of index nodes written into the gap.
105*4882a593Smuzhiyun */
fill_gap(struct ubifs_info * c,int lnum,int gap_start,int gap_end,int * dirt)106*4882a593Smuzhiyun static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
107*4882a593Smuzhiyun int *dirt)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun int len, gap_remains, gap_pos, written, pad_len;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun ubifs_assert(c, (gap_start & 7) == 0);
112*4882a593Smuzhiyun ubifs_assert(c, (gap_end & 7) == 0);
113*4882a593Smuzhiyun ubifs_assert(c, gap_end >= gap_start);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun gap_remains = gap_end - gap_start;
116*4882a593Smuzhiyun if (!gap_remains)
117*4882a593Smuzhiyun return 0;
118*4882a593Smuzhiyun gap_pos = gap_start;
119*4882a593Smuzhiyun written = 0;
120*4882a593Smuzhiyun while (c->enext) {
121*4882a593Smuzhiyun len = ubifs_idx_node_sz(c, c->enext->child_cnt);
122*4882a593Smuzhiyun if (len < gap_remains) {
123*4882a593Smuzhiyun struct ubifs_znode *znode = c->enext;
124*4882a593Smuzhiyun const int alen = ALIGN(len, 8);
125*4882a593Smuzhiyun int err;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun ubifs_assert(c, alen <= gap_remains);
128*4882a593Smuzhiyun err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
129*4882a593Smuzhiyun lnum, gap_pos, len);
130*4882a593Smuzhiyun if (err)
131*4882a593Smuzhiyun return err;
132*4882a593Smuzhiyun gap_remains -= alen;
133*4882a593Smuzhiyun gap_pos += alen;
134*4882a593Smuzhiyun c->enext = znode->cnext;
135*4882a593Smuzhiyun if (c->enext == c->cnext)
136*4882a593Smuzhiyun c->enext = NULL;
137*4882a593Smuzhiyun written += 1;
138*4882a593Smuzhiyun } else
139*4882a593Smuzhiyun break;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun if (gap_end == c->leb_size) {
142*4882a593Smuzhiyun c->ileb_len = ALIGN(gap_pos, c->min_io_size);
143*4882a593Smuzhiyun /* Pad to end of min_io_size */
144*4882a593Smuzhiyun pad_len = c->ileb_len - gap_pos;
145*4882a593Smuzhiyun } else
146*4882a593Smuzhiyun /* Pad to end of gap */
147*4882a593Smuzhiyun pad_len = gap_remains;
148*4882a593Smuzhiyun dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
149*4882a593Smuzhiyun lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
150*4882a593Smuzhiyun ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
151*4882a593Smuzhiyun *dirt += pad_len;
152*4882a593Smuzhiyun return written;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * find_old_idx - find an index node obsoleted since the last commit start.
157*4882a593Smuzhiyun * @c: UBIFS file-system description object
158*4882a593Smuzhiyun * @lnum: LEB number of obsoleted index node
159*4882a593Smuzhiyun * @offs: offset of obsoleted index node
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * Returns %1 if found and %0 otherwise.
162*4882a593Smuzhiyun */
find_old_idx(struct ubifs_info * c,int lnum,int offs)163*4882a593Smuzhiyun static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct ubifs_old_idx *o;
166*4882a593Smuzhiyun struct rb_node *p;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun p = c->old_idx.rb_node;
169*4882a593Smuzhiyun while (p) {
170*4882a593Smuzhiyun o = rb_entry(p, struct ubifs_old_idx, rb);
171*4882a593Smuzhiyun if (lnum < o->lnum)
172*4882a593Smuzhiyun p = p->rb_left;
173*4882a593Smuzhiyun else if (lnum > o->lnum)
174*4882a593Smuzhiyun p = p->rb_right;
175*4882a593Smuzhiyun else if (offs < o->offs)
176*4882a593Smuzhiyun p = p->rb_left;
177*4882a593Smuzhiyun else if (offs > o->offs)
178*4882a593Smuzhiyun p = p->rb_right;
179*4882a593Smuzhiyun else
180*4882a593Smuzhiyun return 1;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /**
186*4882a593Smuzhiyun * is_idx_node_in_use - determine if an index node can be overwritten.
187*4882a593Smuzhiyun * @c: UBIFS file-system description object
188*4882a593Smuzhiyun * @key: key of index node
189*4882a593Smuzhiyun * @level: index node level
190*4882a593Smuzhiyun * @lnum: LEB number of index node
191*4882a593Smuzhiyun * @offs: offset of index node
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * If @key / @lnum / @offs identify an index node that was not part of the old
194*4882a593Smuzhiyun * index, then this function returns %0 (obsolete). Else if the index node was
195*4882a593Smuzhiyun * part of the old index but is now dirty %1 is returned, else if it is clean %2
196*4882a593Smuzhiyun * is returned. A negative error code is returned on failure.
197*4882a593Smuzhiyun */
is_idx_node_in_use(struct ubifs_info * c,union ubifs_key * key,int level,int lnum,int offs)198*4882a593Smuzhiyun static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
199*4882a593Smuzhiyun int level, int lnum, int offs)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun int ret;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
204*4882a593Smuzhiyun if (ret < 0)
205*4882a593Smuzhiyun return ret; /* Error code */
206*4882a593Smuzhiyun if (ret == 0)
207*4882a593Smuzhiyun if (find_old_idx(c, lnum, offs))
208*4882a593Smuzhiyun return 1;
209*4882a593Smuzhiyun return ret;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /**
213*4882a593Smuzhiyun * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
214*4882a593Smuzhiyun * @c: UBIFS file-system description object
215*4882a593Smuzhiyun * @p: return LEB number in @c->gap_lebs[p]
216*4882a593Smuzhiyun *
217*4882a593Smuzhiyun * This function lays out new index nodes for dirty znodes using in-the-gaps
218*4882a593Smuzhiyun * method of TNC commit.
219*4882a593Smuzhiyun * This function merely puts the next znode into the next gap, making no attempt
220*4882a593Smuzhiyun * to try to maximise the number of znodes that fit.
221*4882a593Smuzhiyun * This function returns the number of index nodes written into the gaps, or a
222*4882a593Smuzhiyun * negative error code on failure.
223*4882a593Smuzhiyun */
layout_leb_in_gaps(struct ubifs_info * c,int p)224*4882a593Smuzhiyun static int layout_leb_in_gaps(struct ubifs_info *c, int p)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct ubifs_scan_leb *sleb;
227*4882a593Smuzhiyun struct ubifs_scan_node *snod;
228*4882a593Smuzhiyun int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun tot_written = 0;
231*4882a593Smuzhiyun /* Get an index LEB with lots of obsolete index nodes */
232*4882a593Smuzhiyun lnum = ubifs_find_dirty_idx_leb(c);
233*4882a593Smuzhiyun if (lnum < 0)
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * There also may be dirt in the index head that could be
236*4882a593Smuzhiyun * filled, however we do not check there at present.
237*4882a593Smuzhiyun */
238*4882a593Smuzhiyun return lnum; /* Error code */
239*4882a593Smuzhiyun c->gap_lebs[p] = lnum;
240*4882a593Smuzhiyun dbg_gc("LEB %d", lnum);
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun * Scan the index LEB. We use the generic scan for this even though
243*4882a593Smuzhiyun * it is more comprehensive and less efficient than is needed for this
244*4882a593Smuzhiyun * purpose.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
247*4882a593Smuzhiyun c->ileb_len = 0;
248*4882a593Smuzhiyun if (IS_ERR(sleb))
249*4882a593Smuzhiyun return PTR_ERR(sleb);
250*4882a593Smuzhiyun gap_start = 0;
251*4882a593Smuzhiyun list_for_each_entry(snod, &sleb->nodes, list) {
252*4882a593Smuzhiyun struct ubifs_idx_node *idx;
253*4882a593Smuzhiyun int in_use, level;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun ubifs_assert(c, snod->type == UBIFS_IDX_NODE);
256*4882a593Smuzhiyun idx = snod->node;
257*4882a593Smuzhiyun key_read(c, ubifs_idx_key(c, idx), &snod->key);
258*4882a593Smuzhiyun level = le16_to_cpu(idx->level);
259*4882a593Smuzhiyun /* Determine if the index node is in use (not obsolete) */
260*4882a593Smuzhiyun in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
261*4882a593Smuzhiyun snod->offs);
262*4882a593Smuzhiyun if (in_use < 0) {
263*4882a593Smuzhiyun ubifs_scan_destroy(sleb);
264*4882a593Smuzhiyun return in_use; /* Error code */
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun if (in_use) {
267*4882a593Smuzhiyun if (in_use == 1)
268*4882a593Smuzhiyun dirt += ALIGN(snod->len, 8);
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * The obsolete index nodes form gaps that can be
271*4882a593Smuzhiyun * overwritten. This gap has ended because we have
272*4882a593Smuzhiyun * found an index node that is still in use
273*4882a593Smuzhiyun * i.e. not obsolete
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun gap_end = snod->offs;
276*4882a593Smuzhiyun /* Try to fill gap */
277*4882a593Smuzhiyun written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
278*4882a593Smuzhiyun if (written < 0) {
279*4882a593Smuzhiyun ubifs_scan_destroy(sleb);
280*4882a593Smuzhiyun return written; /* Error code */
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun tot_written += written;
283*4882a593Smuzhiyun gap_start = ALIGN(snod->offs + snod->len, 8);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun ubifs_scan_destroy(sleb);
287*4882a593Smuzhiyun c->ileb_len = c->leb_size;
288*4882a593Smuzhiyun gap_end = c->leb_size;
289*4882a593Smuzhiyun /* Try to fill gap */
290*4882a593Smuzhiyun written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
291*4882a593Smuzhiyun if (written < 0)
292*4882a593Smuzhiyun return written; /* Error code */
293*4882a593Smuzhiyun tot_written += written;
294*4882a593Smuzhiyun if (tot_written == 0) {
295*4882a593Smuzhiyun struct ubifs_lprops lp;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
298*4882a593Smuzhiyun err = ubifs_read_one_lp(c, lnum, &lp);
299*4882a593Smuzhiyun if (err)
300*4882a593Smuzhiyun return err;
301*4882a593Smuzhiyun if (lp.free == c->leb_size) {
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun * We must have snatched this LEB from the idx_gc list
304*4882a593Smuzhiyun * so we need to correct the free and dirty space.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun err = ubifs_change_one_lp(c, lnum,
307*4882a593Smuzhiyun c->leb_size - c->ileb_len,
308*4882a593Smuzhiyun dirt, 0, 0, 0);
309*4882a593Smuzhiyun if (err)
310*4882a593Smuzhiyun return err;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun return 0;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
315*4882a593Smuzhiyun 0, 0, 0);
316*4882a593Smuzhiyun if (err)
317*4882a593Smuzhiyun return err;
318*4882a593Smuzhiyun err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
319*4882a593Smuzhiyun if (err)
320*4882a593Smuzhiyun return err;
321*4882a593Smuzhiyun dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
322*4882a593Smuzhiyun return tot_written;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /**
326*4882a593Smuzhiyun * get_leb_cnt - calculate the number of empty LEBs needed to commit.
327*4882a593Smuzhiyun * @c: UBIFS file-system description object
328*4882a593Smuzhiyun * @cnt: number of znodes to commit
329*4882a593Smuzhiyun *
330*4882a593Smuzhiyun * This function returns the number of empty LEBs needed to commit @cnt znodes
331*4882a593Smuzhiyun * to the current index head. The number is not exact and may be more than
332*4882a593Smuzhiyun * needed.
333*4882a593Smuzhiyun */
get_leb_cnt(struct ubifs_info * c,int cnt)334*4882a593Smuzhiyun static int get_leb_cnt(struct ubifs_info *c, int cnt)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun int d;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* Assume maximum index node size (i.e. overestimate space needed) */
339*4882a593Smuzhiyun cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
340*4882a593Smuzhiyun if (cnt < 0)
341*4882a593Smuzhiyun cnt = 0;
342*4882a593Smuzhiyun d = c->leb_size / c->max_idx_node_sz;
343*4882a593Smuzhiyun return DIV_ROUND_UP(cnt, d);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /**
347*4882a593Smuzhiyun * layout_in_gaps - in-the-gaps method of committing TNC.
348*4882a593Smuzhiyun * @c: UBIFS file-system description object
349*4882a593Smuzhiyun * @cnt: number of dirty znodes to commit.
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * This function lays out new index nodes for dirty znodes using in-the-gaps
352*4882a593Smuzhiyun * method of TNC commit.
353*4882a593Smuzhiyun *
354*4882a593Smuzhiyun * This function returns %0 on success and a negative error code on failure.
355*4882a593Smuzhiyun */
layout_in_gaps(struct ubifs_info * c,int cnt)356*4882a593Smuzhiyun static int layout_in_gaps(struct ubifs_info *c, int cnt)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun dbg_gc("%d znodes to write", cnt);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun c->gap_lebs = kmalloc_array(c->lst.idx_lebs + 1, sizeof(int),
363*4882a593Smuzhiyun GFP_NOFS);
364*4882a593Smuzhiyun if (!c->gap_lebs)
365*4882a593Smuzhiyun return -ENOMEM;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun old_idx_lebs = c->lst.idx_lebs;
368*4882a593Smuzhiyun do {
369*4882a593Smuzhiyun ubifs_assert(c, p < c->lst.idx_lebs);
370*4882a593Smuzhiyun written = layout_leb_in_gaps(c, p);
371*4882a593Smuzhiyun if (written < 0) {
372*4882a593Smuzhiyun err = written;
373*4882a593Smuzhiyun if (err != -ENOSPC) {
374*4882a593Smuzhiyun kfree(c->gap_lebs);
375*4882a593Smuzhiyun c->gap_lebs = NULL;
376*4882a593Smuzhiyun return err;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun if (!dbg_is_chk_index(c)) {
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * Do not print scary warnings if the debugging
381*4882a593Smuzhiyun * option which forces in-the-gaps is enabled.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun ubifs_warn(c, "out of space");
384*4882a593Smuzhiyun ubifs_dump_budg(c, &c->bi);
385*4882a593Smuzhiyun ubifs_dump_lprops(c);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun /* Try to commit anyway */
388*4882a593Smuzhiyun break;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun p++;
391*4882a593Smuzhiyun cnt -= written;
392*4882a593Smuzhiyun leb_needed_cnt = get_leb_cnt(c, cnt);
393*4882a593Smuzhiyun dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
394*4882a593Smuzhiyun leb_needed_cnt, c->ileb_cnt);
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * Dynamically change the size of @c->gap_lebs to prevent
397*4882a593Smuzhiyun * oob, because @c->lst.idx_lebs could be increased by
398*4882a593Smuzhiyun * function @get_idx_gc_leb (called by layout_leb_in_gaps->
399*4882a593Smuzhiyun * ubifs_find_dirty_idx_leb) during loop. Only enlarge
400*4882a593Smuzhiyun * @c->gap_lebs when needed.
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
404*4882a593Smuzhiyun old_idx_lebs < c->lst.idx_lebs) {
405*4882a593Smuzhiyun old_idx_lebs = c->lst.idx_lebs;
406*4882a593Smuzhiyun gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
407*4882a593Smuzhiyun (old_idx_lebs + 1), GFP_NOFS);
408*4882a593Smuzhiyun if (!gap_lebs) {
409*4882a593Smuzhiyun kfree(c->gap_lebs);
410*4882a593Smuzhiyun c->gap_lebs = NULL;
411*4882a593Smuzhiyun return -ENOMEM;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun c->gap_lebs = gap_lebs;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun } while (leb_needed_cnt > c->ileb_cnt);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun c->gap_lebs[p] = -1;
418*4882a593Smuzhiyun return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * layout_in_empty_space - layout index nodes in empty space.
423*4882a593Smuzhiyun * @c: UBIFS file-system description object
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * This function lays out new index nodes for dirty znodes using empty LEBs.
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * This function returns %0 on success and a negative error code on failure.
428*4882a593Smuzhiyun */
layout_in_empty_space(struct ubifs_info * c)429*4882a593Smuzhiyun static int layout_in_empty_space(struct ubifs_info *c)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun struct ubifs_znode *znode, *cnext, *zp;
432*4882a593Smuzhiyun int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
433*4882a593Smuzhiyun int wlen, blen, err;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun cnext = c->enext;
436*4882a593Smuzhiyun if (!cnext)
437*4882a593Smuzhiyun return 0;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun lnum = c->ihead_lnum;
440*4882a593Smuzhiyun buf_offs = c->ihead_offs;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun buf_len = ubifs_idx_node_sz(c, c->fanout);
443*4882a593Smuzhiyun buf_len = ALIGN(buf_len, c->min_io_size);
444*4882a593Smuzhiyun used = 0;
445*4882a593Smuzhiyun avail = buf_len;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /* Ensure there is enough room for first write */
448*4882a593Smuzhiyun next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
449*4882a593Smuzhiyun if (buf_offs + next_len > c->leb_size)
450*4882a593Smuzhiyun lnum = -1;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun while (1) {
453*4882a593Smuzhiyun znode = cnext;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun len = ubifs_idx_node_sz(c, znode->child_cnt);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* Determine the index node position */
458*4882a593Smuzhiyun if (lnum == -1) {
459*4882a593Smuzhiyun if (c->ileb_nxt >= c->ileb_cnt) {
460*4882a593Smuzhiyun ubifs_err(c, "out of space");
461*4882a593Smuzhiyun return -ENOSPC;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun lnum = c->ilebs[c->ileb_nxt++];
464*4882a593Smuzhiyun buf_offs = 0;
465*4882a593Smuzhiyun used = 0;
466*4882a593Smuzhiyun avail = buf_len;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun offs = buf_offs + used;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun znode->lnum = lnum;
472*4882a593Smuzhiyun znode->offs = offs;
473*4882a593Smuzhiyun znode->len = len;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* Update the parent */
476*4882a593Smuzhiyun zp = znode->parent;
477*4882a593Smuzhiyun if (zp) {
478*4882a593Smuzhiyun struct ubifs_zbranch *zbr;
479*4882a593Smuzhiyun int i;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun i = znode->iip;
482*4882a593Smuzhiyun zbr = &zp->zbranch[i];
483*4882a593Smuzhiyun zbr->lnum = lnum;
484*4882a593Smuzhiyun zbr->offs = offs;
485*4882a593Smuzhiyun zbr->len = len;
486*4882a593Smuzhiyun } else {
487*4882a593Smuzhiyun c->zroot.lnum = lnum;
488*4882a593Smuzhiyun c->zroot.offs = offs;
489*4882a593Smuzhiyun c->zroot.len = len;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun c->calc_idx_sz += ALIGN(len, 8);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * Once lprops is updated, we can decrease the dirty znode count
495*4882a593Smuzhiyun * but it is easier to just do it here.
496*4882a593Smuzhiyun */
497*4882a593Smuzhiyun atomic_long_dec(&c->dirty_zn_cnt);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * Calculate the next index node length to see if there is
501*4882a593Smuzhiyun * enough room for it
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun cnext = znode->cnext;
504*4882a593Smuzhiyun if (cnext == c->cnext)
505*4882a593Smuzhiyun next_len = 0;
506*4882a593Smuzhiyun else
507*4882a593Smuzhiyun next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* Update buffer positions */
510*4882a593Smuzhiyun wlen = used + len;
511*4882a593Smuzhiyun used += ALIGN(len, 8);
512*4882a593Smuzhiyun avail -= ALIGN(len, 8);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (next_len != 0 &&
515*4882a593Smuzhiyun buf_offs + used + next_len <= c->leb_size &&
516*4882a593Smuzhiyun avail > 0)
517*4882a593Smuzhiyun continue;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (avail <= 0 && next_len &&
520*4882a593Smuzhiyun buf_offs + used + next_len <= c->leb_size)
521*4882a593Smuzhiyun blen = buf_len;
522*4882a593Smuzhiyun else
523*4882a593Smuzhiyun blen = ALIGN(wlen, c->min_io_size);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* The buffer is full or there are no more znodes to do */
526*4882a593Smuzhiyun buf_offs += blen;
527*4882a593Smuzhiyun if (next_len) {
528*4882a593Smuzhiyun if (buf_offs + next_len > c->leb_size) {
529*4882a593Smuzhiyun err = ubifs_update_one_lp(c, lnum,
530*4882a593Smuzhiyun c->leb_size - buf_offs, blen - used,
531*4882a593Smuzhiyun 0, 0);
532*4882a593Smuzhiyun if (err)
533*4882a593Smuzhiyun return err;
534*4882a593Smuzhiyun lnum = -1;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun used -= blen;
537*4882a593Smuzhiyun if (used < 0)
538*4882a593Smuzhiyun used = 0;
539*4882a593Smuzhiyun avail = buf_len - used;
540*4882a593Smuzhiyun continue;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
543*4882a593Smuzhiyun blen - used, 0, 0);
544*4882a593Smuzhiyun if (err)
545*4882a593Smuzhiyun return err;
546*4882a593Smuzhiyun break;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun c->dbg->new_ihead_lnum = lnum;
550*4882a593Smuzhiyun c->dbg->new_ihead_offs = buf_offs;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun return 0;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /**
556*4882a593Smuzhiyun * layout_commit - determine positions of index nodes to commit.
557*4882a593Smuzhiyun * @c: UBIFS file-system description object
558*4882a593Smuzhiyun * @no_space: indicates that insufficient empty LEBs were allocated
559*4882a593Smuzhiyun * @cnt: number of znodes to commit
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * Calculate and update the positions of index nodes to commit. If there were
562*4882a593Smuzhiyun * an insufficient number of empty LEBs allocated, then index nodes are placed
563*4882a593Smuzhiyun * into the gaps created by obsolete index nodes in non-empty index LEBs. For
564*4882a593Smuzhiyun * this purpose, an obsolete index node is one that was not in the index as at
565*4882a593Smuzhiyun * the end of the last commit. To write "in-the-gaps" requires that those index
566*4882a593Smuzhiyun * LEBs are updated atomically in-place.
567*4882a593Smuzhiyun */
layout_commit(struct ubifs_info * c,int no_space,int cnt)568*4882a593Smuzhiyun static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun int err;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (no_space) {
573*4882a593Smuzhiyun err = layout_in_gaps(c, cnt);
574*4882a593Smuzhiyun if (err)
575*4882a593Smuzhiyun return err;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun err = layout_in_empty_space(c);
578*4882a593Smuzhiyun return err;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /**
582*4882a593Smuzhiyun * find_first_dirty - find first dirty znode.
583*4882a593Smuzhiyun * @znode: znode to begin searching from
584*4882a593Smuzhiyun */
find_first_dirty(struct ubifs_znode * znode)585*4882a593Smuzhiyun static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun int i, cont;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun if (!znode)
590*4882a593Smuzhiyun return NULL;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun while (1) {
593*4882a593Smuzhiyun if (znode->level == 0) {
594*4882a593Smuzhiyun if (ubifs_zn_dirty(znode))
595*4882a593Smuzhiyun return znode;
596*4882a593Smuzhiyun return NULL;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun cont = 0;
599*4882a593Smuzhiyun for (i = 0; i < znode->child_cnt; i++) {
600*4882a593Smuzhiyun struct ubifs_zbranch *zbr = &znode->zbranch[i];
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
603*4882a593Smuzhiyun znode = zbr->znode;
604*4882a593Smuzhiyun cont = 1;
605*4882a593Smuzhiyun break;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun if (!cont) {
609*4882a593Smuzhiyun if (ubifs_zn_dirty(znode))
610*4882a593Smuzhiyun return znode;
611*4882a593Smuzhiyun return NULL;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /**
617*4882a593Smuzhiyun * find_next_dirty - find next dirty znode.
618*4882a593Smuzhiyun * @znode: znode to begin searching from
619*4882a593Smuzhiyun */
find_next_dirty(struct ubifs_znode * znode)620*4882a593Smuzhiyun static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun int n = znode->iip + 1;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun znode = znode->parent;
625*4882a593Smuzhiyun if (!znode)
626*4882a593Smuzhiyun return NULL;
627*4882a593Smuzhiyun for (; n < znode->child_cnt; n++) {
628*4882a593Smuzhiyun struct ubifs_zbranch *zbr = &znode->zbranch[n];
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (zbr->znode && ubifs_zn_dirty(zbr->znode))
631*4882a593Smuzhiyun return find_first_dirty(zbr->znode);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun return znode;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /**
637*4882a593Smuzhiyun * get_znodes_to_commit - create list of dirty znodes to commit.
638*4882a593Smuzhiyun * @c: UBIFS file-system description object
639*4882a593Smuzhiyun *
640*4882a593Smuzhiyun * This function returns the number of znodes to commit.
641*4882a593Smuzhiyun */
get_znodes_to_commit(struct ubifs_info * c)642*4882a593Smuzhiyun static int get_znodes_to_commit(struct ubifs_info *c)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct ubifs_znode *znode, *cnext;
645*4882a593Smuzhiyun int cnt = 0;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun c->cnext = find_first_dirty(c->zroot.znode);
648*4882a593Smuzhiyun znode = c->enext = c->cnext;
649*4882a593Smuzhiyun if (!znode) {
650*4882a593Smuzhiyun dbg_cmt("no znodes to commit");
651*4882a593Smuzhiyun return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun cnt += 1;
654*4882a593Smuzhiyun while (1) {
655*4882a593Smuzhiyun ubifs_assert(c, !ubifs_zn_cow(znode));
656*4882a593Smuzhiyun __set_bit(COW_ZNODE, &znode->flags);
657*4882a593Smuzhiyun znode->alt = 0;
658*4882a593Smuzhiyun cnext = find_next_dirty(znode);
659*4882a593Smuzhiyun if (!cnext) {
660*4882a593Smuzhiyun znode->cnext = c->cnext;
661*4882a593Smuzhiyun break;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun znode->cparent = znode->parent;
664*4882a593Smuzhiyun znode->ciip = znode->iip;
665*4882a593Smuzhiyun znode->cnext = cnext;
666*4882a593Smuzhiyun znode = cnext;
667*4882a593Smuzhiyun cnt += 1;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun dbg_cmt("committing %d znodes", cnt);
670*4882a593Smuzhiyun ubifs_assert(c, cnt == atomic_long_read(&c->dirty_zn_cnt));
671*4882a593Smuzhiyun return cnt;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /**
675*4882a593Smuzhiyun * alloc_idx_lebs - allocate empty LEBs to be used to commit.
676*4882a593Smuzhiyun * @c: UBIFS file-system description object
677*4882a593Smuzhiyun * @cnt: number of znodes to commit
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * This function returns %-ENOSPC if it cannot allocate a sufficient number of
680*4882a593Smuzhiyun * empty LEBs. %0 is returned on success, otherwise a negative error code
681*4882a593Smuzhiyun * is returned.
682*4882a593Smuzhiyun */
alloc_idx_lebs(struct ubifs_info * c,int cnt)683*4882a593Smuzhiyun static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun int i, leb_cnt, lnum;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun c->ileb_cnt = 0;
688*4882a593Smuzhiyun c->ileb_nxt = 0;
689*4882a593Smuzhiyun leb_cnt = get_leb_cnt(c, cnt);
690*4882a593Smuzhiyun dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
691*4882a593Smuzhiyun if (!leb_cnt)
692*4882a593Smuzhiyun return 0;
693*4882a593Smuzhiyun c->ilebs = kmalloc_array(leb_cnt, sizeof(int), GFP_NOFS);
694*4882a593Smuzhiyun if (!c->ilebs)
695*4882a593Smuzhiyun return -ENOMEM;
696*4882a593Smuzhiyun for (i = 0; i < leb_cnt; i++) {
697*4882a593Smuzhiyun lnum = ubifs_find_free_leb_for_idx(c);
698*4882a593Smuzhiyun if (lnum < 0)
699*4882a593Smuzhiyun return lnum;
700*4882a593Smuzhiyun c->ilebs[c->ileb_cnt++] = lnum;
701*4882a593Smuzhiyun dbg_cmt("LEB %d", lnum);
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
704*4882a593Smuzhiyun return -ENOSPC;
705*4882a593Smuzhiyun return 0;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /**
709*4882a593Smuzhiyun * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
710*4882a593Smuzhiyun * @c: UBIFS file-system description object
711*4882a593Smuzhiyun *
712*4882a593Smuzhiyun * It is possible that we allocate more empty LEBs for the commit than we need.
713*4882a593Smuzhiyun * This functions frees the surplus.
714*4882a593Smuzhiyun *
715*4882a593Smuzhiyun * This function returns %0 on success and a negative error code on failure.
716*4882a593Smuzhiyun */
free_unused_idx_lebs(struct ubifs_info * c)717*4882a593Smuzhiyun static int free_unused_idx_lebs(struct ubifs_info *c)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun int i, err = 0, lnum, er;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
722*4882a593Smuzhiyun lnum = c->ilebs[i];
723*4882a593Smuzhiyun dbg_cmt("LEB %d", lnum);
724*4882a593Smuzhiyun er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
725*4882a593Smuzhiyun LPROPS_INDEX | LPROPS_TAKEN, 0);
726*4882a593Smuzhiyun if (!err)
727*4882a593Smuzhiyun err = er;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun return err;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /**
733*4882a593Smuzhiyun * free_idx_lebs - free unused LEBs after commit end.
734*4882a593Smuzhiyun * @c: UBIFS file-system description object
735*4882a593Smuzhiyun *
736*4882a593Smuzhiyun * This function returns %0 on success and a negative error code on failure.
737*4882a593Smuzhiyun */
free_idx_lebs(struct ubifs_info * c)738*4882a593Smuzhiyun static int free_idx_lebs(struct ubifs_info *c)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun int err;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun err = free_unused_idx_lebs(c);
743*4882a593Smuzhiyun kfree(c->ilebs);
744*4882a593Smuzhiyun c->ilebs = NULL;
745*4882a593Smuzhiyun return err;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /**
749*4882a593Smuzhiyun * ubifs_tnc_start_commit - start TNC commit.
750*4882a593Smuzhiyun * @c: UBIFS file-system description object
751*4882a593Smuzhiyun * @zroot: new index root position is returned here
752*4882a593Smuzhiyun *
753*4882a593Smuzhiyun * This function prepares the list of indexing nodes to commit and lays out
754*4882a593Smuzhiyun * their positions on flash. If there is not enough free space it uses the
755*4882a593Smuzhiyun * in-gap commit method. Returns zero in case of success and a negative error
756*4882a593Smuzhiyun * code in case of failure.
757*4882a593Smuzhiyun */
ubifs_tnc_start_commit(struct ubifs_info * c,struct ubifs_zbranch * zroot)758*4882a593Smuzhiyun int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun int err = 0, cnt;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun mutex_lock(&c->tnc_mutex);
763*4882a593Smuzhiyun err = dbg_check_tnc(c, 1);
764*4882a593Smuzhiyun if (err)
765*4882a593Smuzhiyun goto out;
766*4882a593Smuzhiyun cnt = get_znodes_to_commit(c);
767*4882a593Smuzhiyun if (cnt != 0) {
768*4882a593Smuzhiyun int no_space = 0;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun err = alloc_idx_lebs(c, cnt);
771*4882a593Smuzhiyun if (err == -ENOSPC)
772*4882a593Smuzhiyun no_space = 1;
773*4882a593Smuzhiyun else if (err)
774*4882a593Smuzhiyun goto out_free;
775*4882a593Smuzhiyun err = layout_commit(c, no_space, cnt);
776*4882a593Smuzhiyun if (err)
777*4882a593Smuzhiyun goto out_free;
778*4882a593Smuzhiyun ubifs_assert(c, atomic_long_read(&c->dirty_zn_cnt) == 0);
779*4882a593Smuzhiyun err = free_unused_idx_lebs(c);
780*4882a593Smuzhiyun if (err)
781*4882a593Smuzhiyun goto out;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun destroy_old_idx(c);
784*4882a593Smuzhiyun memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun err = ubifs_save_dirty_idx_lnums(c);
787*4882a593Smuzhiyun if (err)
788*4882a593Smuzhiyun goto out;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun spin_lock(&c->space_lock);
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * Although we have not finished committing yet, update size of the
793*4882a593Smuzhiyun * committed index ('c->bi.old_idx_sz') and zero out the index growth
794*4882a593Smuzhiyun * budget. It is OK to do this now, because we've reserved all the
795*4882a593Smuzhiyun * space which is needed to commit the index, and it is save for the
796*4882a593Smuzhiyun * budgeting subsystem to assume the index is already committed,
797*4882a593Smuzhiyun * even though it is not.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun ubifs_assert(c, c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
800*4882a593Smuzhiyun c->bi.old_idx_sz = c->calc_idx_sz;
801*4882a593Smuzhiyun c->bi.uncommitted_idx = 0;
802*4882a593Smuzhiyun c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
803*4882a593Smuzhiyun spin_unlock(&c->space_lock);
804*4882a593Smuzhiyun mutex_unlock(&c->tnc_mutex);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
807*4882a593Smuzhiyun dbg_cmt("size of index %llu", c->calc_idx_sz);
808*4882a593Smuzhiyun return err;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun out_free:
811*4882a593Smuzhiyun free_idx_lebs(c);
812*4882a593Smuzhiyun out:
813*4882a593Smuzhiyun mutex_unlock(&c->tnc_mutex);
814*4882a593Smuzhiyun return err;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /**
818*4882a593Smuzhiyun * write_index - write index nodes.
819*4882a593Smuzhiyun * @c: UBIFS file-system description object
820*4882a593Smuzhiyun *
821*4882a593Smuzhiyun * This function writes the index nodes whose positions were laid out in the
822*4882a593Smuzhiyun * layout_in_empty_space function.
823*4882a593Smuzhiyun */
write_index(struct ubifs_info * c)824*4882a593Smuzhiyun static int write_index(struct ubifs_info *c)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct ubifs_idx_node *idx;
827*4882a593Smuzhiyun struct ubifs_znode *znode, *cnext;
828*4882a593Smuzhiyun int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
829*4882a593Smuzhiyun int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun cnext = c->enext;
832*4882a593Smuzhiyun if (!cnext)
833*4882a593Smuzhiyun return 0;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * Always write index nodes to the index head so that index nodes and
837*4882a593Smuzhiyun * other types of nodes are never mixed in the same erase block.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun lnum = c->ihead_lnum;
840*4882a593Smuzhiyun buf_offs = c->ihead_offs;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Allocate commit buffer */
843*4882a593Smuzhiyun buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
844*4882a593Smuzhiyun used = 0;
845*4882a593Smuzhiyun avail = buf_len;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* Ensure there is enough room for first write */
848*4882a593Smuzhiyun next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
849*4882a593Smuzhiyun if (buf_offs + next_len > c->leb_size) {
850*4882a593Smuzhiyun err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
851*4882a593Smuzhiyun LPROPS_TAKEN);
852*4882a593Smuzhiyun if (err)
853*4882a593Smuzhiyun return err;
854*4882a593Smuzhiyun lnum = -1;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun while (1) {
858*4882a593Smuzhiyun u8 hash[UBIFS_HASH_ARR_SZ];
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun cond_resched();
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun znode = cnext;
863*4882a593Smuzhiyun idx = c->cbuf + used;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* Make index node */
866*4882a593Smuzhiyun idx->ch.node_type = UBIFS_IDX_NODE;
867*4882a593Smuzhiyun idx->child_cnt = cpu_to_le16(znode->child_cnt);
868*4882a593Smuzhiyun idx->level = cpu_to_le16(znode->level);
869*4882a593Smuzhiyun for (i = 0; i < znode->child_cnt; i++) {
870*4882a593Smuzhiyun struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
871*4882a593Smuzhiyun struct ubifs_zbranch *zbr = &znode->zbranch[i];
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun key_write_idx(c, &zbr->key, &br->key);
874*4882a593Smuzhiyun br->lnum = cpu_to_le32(zbr->lnum);
875*4882a593Smuzhiyun br->offs = cpu_to_le32(zbr->offs);
876*4882a593Smuzhiyun br->len = cpu_to_le32(zbr->len);
877*4882a593Smuzhiyun ubifs_copy_hash(c, zbr->hash, ubifs_branch_hash(c, br));
878*4882a593Smuzhiyun if (!zbr->lnum || !zbr->len) {
879*4882a593Smuzhiyun ubifs_err(c, "bad ref in znode");
880*4882a593Smuzhiyun ubifs_dump_znode(c, znode);
881*4882a593Smuzhiyun if (zbr->znode)
882*4882a593Smuzhiyun ubifs_dump_znode(c, zbr->znode);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun return -EINVAL;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun len = ubifs_idx_node_sz(c, znode->child_cnt);
888*4882a593Smuzhiyun ubifs_prepare_node(c, idx, len, 0);
889*4882a593Smuzhiyun ubifs_node_calc_hash(c, idx, hash);
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun mutex_lock(&c->tnc_mutex);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if (znode->cparent)
894*4882a593Smuzhiyun ubifs_copy_hash(c, hash,
895*4882a593Smuzhiyun znode->cparent->zbranch[znode->ciip].hash);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (znode->parent) {
898*4882a593Smuzhiyun if (!ubifs_zn_obsolete(znode))
899*4882a593Smuzhiyun ubifs_copy_hash(c, hash,
900*4882a593Smuzhiyun znode->parent->zbranch[znode->iip].hash);
901*4882a593Smuzhiyun } else {
902*4882a593Smuzhiyun ubifs_copy_hash(c, hash, c->zroot.hash);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun mutex_unlock(&c->tnc_mutex);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Determine the index node position */
908*4882a593Smuzhiyun if (lnum == -1) {
909*4882a593Smuzhiyun lnum = c->ilebs[lnum_pos++];
910*4882a593Smuzhiyun buf_offs = 0;
911*4882a593Smuzhiyun used = 0;
912*4882a593Smuzhiyun avail = buf_len;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun offs = buf_offs + used;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun if (lnum != znode->lnum || offs != znode->offs ||
917*4882a593Smuzhiyun len != znode->len) {
918*4882a593Smuzhiyun ubifs_err(c, "inconsistent znode posn");
919*4882a593Smuzhiyun return -EINVAL;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun /* Grab some stuff from znode while we still can */
923*4882a593Smuzhiyun cnext = znode->cnext;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun ubifs_assert(c, ubifs_zn_dirty(znode));
926*4882a593Smuzhiyun ubifs_assert(c, ubifs_zn_cow(znode));
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /*
929*4882a593Smuzhiyun * It is important that other threads should see %DIRTY_ZNODE
930*4882a593Smuzhiyun * flag cleared before %COW_ZNODE. Specifically, it matters in
931*4882a593Smuzhiyun * the 'dirty_cow_znode()' function. This is the reason for the
932*4882a593Smuzhiyun * first barrier. Also, we want the bit changes to be seen to
933*4882a593Smuzhiyun * other threads ASAP, to avoid unnecesarry copying, which is
934*4882a593Smuzhiyun * the reason for the second barrier.
935*4882a593Smuzhiyun */
936*4882a593Smuzhiyun clear_bit(DIRTY_ZNODE, &znode->flags);
937*4882a593Smuzhiyun smp_mb__before_atomic();
938*4882a593Smuzhiyun clear_bit(COW_ZNODE, &znode->flags);
939*4882a593Smuzhiyun smp_mb__after_atomic();
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * We have marked the znode as clean but have not updated the
943*4882a593Smuzhiyun * @c->clean_zn_cnt counter. If this znode becomes dirty again
944*4882a593Smuzhiyun * before 'free_obsolete_znodes()' is called, then
945*4882a593Smuzhiyun * @c->clean_zn_cnt will be decremented before it gets
946*4882a593Smuzhiyun * incremented (resulting in 2 decrements for the same znode).
947*4882a593Smuzhiyun * This means that @c->clean_zn_cnt may become negative for a
948*4882a593Smuzhiyun * while.
949*4882a593Smuzhiyun *
950*4882a593Smuzhiyun * Q: why we cannot increment @c->clean_zn_cnt?
951*4882a593Smuzhiyun * A: because we do not have the @c->tnc_mutex locked, and the
952*4882a593Smuzhiyun * following code would be racy and buggy:
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun * if (!ubifs_zn_obsolete(znode)) {
955*4882a593Smuzhiyun * atomic_long_inc(&c->clean_zn_cnt);
956*4882a593Smuzhiyun * atomic_long_inc(&ubifs_clean_zn_cnt);
957*4882a593Smuzhiyun * }
958*4882a593Smuzhiyun *
959*4882a593Smuzhiyun * Thus, we just delay the @c->clean_zn_cnt update until we
960*4882a593Smuzhiyun * have the mutex locked.
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun /* Do not access znode from this point on */
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* Update buffer positions */
966*4882a593Smuzhiyun wlen = used + len;
967*4882a593Smuzhiyun used += ALIGN(len, 8);
968*4882a593Smuzhiyun avail -= ALIGN(len, 8);
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * Calculate the next index node length to see if there is
972*4882a593Smuzhiyun * enough room for it
973*4882a593Smuzhiyun */
974*4882a593Smuzhiyun if (cnext == c->cnext)
975*4882a593Smuzhiyun next_len = 0;
976*4882a593Smuzhiyun else
977*4882a593Smuzhiyun next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun nxt_offs = buf_offs + used + next_len;
980*4882a593Smuzhiyun if (next_len && nxt_offs <= c->leb_size) {
981*4882a593Smuzhiyun if (avail > 0)
982*4882a593Smuzhiyun continue;
983*4882a593Smuzhiyun else
984*4882a593Smuzhiyun blen = buf_len;
985*4882a593Smuzhiyun } else {
986*4882a593Smuzhiyun wlen = ALIGN(wlen, 8);
987*4882a593Smuzhiyun blen = ALIGN(wlen, c->min_io_size);
988*4882a593Smuzhiyun ubifs_pad(c, c->cbuf + wlen, blen - wlen);
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* The buffer is full or there are no more znodes to do */
992*4882a593Smuzhiyun err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
993*4882a593Smuzhiyun if (err)
994*4882a593Smuzhiyun return err;
995*4882a593Smuzhiyun buf_offs += blen;
996*4882a593Smuzhiyun if (next_len) {
997*4882a593Smuzhiyun if (nxt_offs > c->leb_size) {
998*4882a593Smuzhiyun err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
999*4882a593Smuzhiyun 0, LPROPS_TAKEN);
1000*4882a593Smuzhiyun if (err)
1001*4882a593Smuzhiyun return err;
1002*4882a593Smuzhiyun lnum = -1;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun used -= blen;
1005*4882a593Smuzhiyun if (used < 0)
1006*4882a593Smuzhiyun used = 0;
1007*4882a593Smuzhiyun avail = buf_len - used;
1008*4882a593Smuzhiyun memmove(c->cbuf, c->cbuf + blen, used);
1009*4882a593Smuzhiyun continue;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun break;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun if (lnum != c->dbg->new_ihead_lnum ||
1015*4882a593Smuzhiyun buf_offs != c->dbg->new_ihead_offs) {
1016*4882a593Smuzhiyun ubifs_err(c, "inconsistent ihead");
1017*4882a593Smuzhiyun return -EINVAL;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun c->ihead_lnum = lnum;
1021*4882a593Smuzhiyun c->ihead_offs = buf_offs;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun return 0;
1024*4882a593Smuzhiyun }
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /**
1027*4882a593Smuzhiyun * free_obsolete_znodes - free obsolete znodes.
1028*4882a593Smuzhiyun * @c: UBIFS file-system description object
1029*4882a593Smuzhiyun *
1030*4882a593Smuzhiyun * At the end of commit end, obsolete znodes are freed.
1031*4882a593Smuzhiyun */
free_obsolete_znodes(struct ubifs_info * c)1032*4882a593Smuzhiyun static void free_obsolete_znodes(struct ubifs_info *c)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun struct ubifs_znode *znode, *cnext;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun cnext = c->cnext;
1037*4882a593Smuzhiyun do {
1038*4882a593Smuzhiyun znode = cnext;
1039*4882a593Smuzhiyun cnext = znode->cnext;
1040*4882a593Smuzhiyun if (ubifs_zn_obsolete(znode))
1041*4882a593Smuzhiyun kfree(znode);
1042*4882a593Smuzhiyun else {
1043*4882a593Smuzhiyun znode->cnext = NULL;
1044*4882a593Smuzhiyun atomic_long_inc(&c->clean_zn_cnt);
1045*4882a593Smuzhiyun atomic_long_inc(&ubifs_clean_zn_cnt);
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun } while (cnext != c->cnext);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /**
1051*4882a593Smuzhiyun * return_gap_lebs - return LEBs used by the in-gap commit method.
1052*4882a593Smuzhiyun * @c: UBIFS file-system description object
1053*4882a593Smuzhiyun *
1054*4882a593Smuzhiyun * This function clears the "taken" flag for the LEBs which were used by the
1055*4882a593Smuzhiyun * "commit in-the-gaps" method.
1056*4882a593Smuzhiyun */
return_gap_lebs(struct ubifs_info * c)1057*4882a593Smuzhiyun static int return_gap_lebs(struct ubifs_info *c)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun int *p, err;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun if (!c->gap_lebs)
1062*4882a593Smuzhiyun return 0;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun dbg_cmt("");
1065*4882a593Smuzhiyun for (p = c->gap_lebs; *p != -1; p++) {
1066*4882a593Smuzhiyun err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
1067*4882a593Smuzhiyun LPROPS_TAKEN, 0);
1068*4882a593Smuzhiyun if (err)
1069*4882a593Smuzhiyun return err;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun kfree(c->gap_lebs);
1073*4882a593Smuzhiyun c->gap_lebs = NULL;
1074*4882a593Smuzhiyun return 0;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /**
1078*4882a593Smuzhiyun * ubifs_tnc_end_commit - update the TNC for commit end.
1079*4882a593Smuzhiyun * @c: UBIFS file-system description object
1080*4882a593Smuzhiyun *
1081*4882a593Smuzhiyun * Write the dirty znodes.
1082*4882a593Smuzhiyun */
ubifs_tnc_end_commit(struct ubifs_info * c)1083*4882a593Smuzhiyun int ubifs_tnc_end_commit(struct ubifs_info *c)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun int err;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (!c->cnext)
1088*4882a593Smuzhiyun return 0;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun err = return_gap_lebs(c);
1091*4882a593Smuzhiyun if (err)
1092*4882a593Smuzhiyun return err;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun err = write_index(c);
1095*4882a593Smuzhiyun if (err)
1096*4882a593Smuzhiyun return err;
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun mutex_lock(&c->tnc_mutex);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun free_obsolete_znodes(c);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun c->cnext = NULL;
1105*4882a593Smuzhiyun kfree(c->ilebs);
1106*4882a593Smuzhiyun c->ilebs = NULL;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun mutex_unlock(&c->tnc_mutex);
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun return 0;
1111*4882a593Smuzhiyun }
1112