1*4882a593Smuzhiyun // SPDX-License-Identifier: LGPL-2.1
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright IBM Corporation, 2007
4*4882a593Smuzhiyun * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include "ext4_jbd2.h"
10*4882a593Smuzhiyun #include "ext4_extents.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * The contiguous blocks details which can be
14*4882a593Smuzhiyun * represented by a single extent
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun struct migrate_struct {
17*4882a593Smuzhiyun ext4_lblk_t first_block, last_block, curr_block;
18*4882a593Smuzhiyun ext4_fsblk_t first_pblock, last_pblock;
19*4882a593Smuzhiyun };
20*4882a593Smuzhiyun
finish_range(handle_t * handle,struct inode * inode,struct migrate_struct * lb)21*4882a593Smuzhiyun static int finish_range(handle_t *handle, struct inode *inode,
22*4882a593Smuzhiyun struct migrate_struct *lb)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun int retval = 0, needed;
26*4882a593Smuzhiyun struct ext4_extent newext;
27*4882a593Smuzhiyun struct ext4_ext_path *path;
28*4882a593Smuzhiyun if (lb->first_pblock == 0)
29*4882a593Smuzhiyun return 0;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Add the extent to temp inode*/
32*4882a593Smuzhiyun newext.ee_block = cpu_to_le32(lb->first_block);
33*4882a593Smuzhiyun newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
34*4882a593Smuzhiyun ext4_ext_store_pblock(&newext, lb->first_pblock);
35*4882a593Smuzhiyun /* Locking only for convinience since we are operating on temp inode */
36*4882a593Smuzhiyun down_write(&EXT4_I(inode)->i_data_sem);
37*4882a593Smuzhiyun path = ext4_find_extent(inode, lb->first_block, NULL, 0);
38*4882a593Smuzhiyun if (IS_ERR(path)) {
39*4882a593Smuzhiyun retval = PTR_ERR(path);
40*4882a593Smuzhiyun path = NULL;
41*4882a593Smuzhiyun goto err_out;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Calculate the credit needed to inserting this extent
46*4882a593Smuzhiyun * Since we are doing this in loop we may accumalate extra
47*4882a593Smuzhiyun * credit. But below we try to not accumalate too much
48*4882a593Smuzhiyun * of them by restarting the journal.
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun needed = ext4_ext_calc_credits_for_single_extent(inode,
51*4882a593Smuzhiyun lb->last_block - lb->first_block + 1, path);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun retval = ext4_datasem_ensure_credits(handle, inode, needed, needed, 0);
54*4882a593Smuzhiyun if (retval < 0)
55*4882a593Smuzhiyun goto err_out;
56*4882a593Smuzhiyun retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
57*4882a593Smuzhiyun err_out:
58*4882a593Smuzhiyun up_write((&EXT4_I(inode)->i_data_sem));
59*4882a593Smuzhiyun ext4_ext_drop_refs(path);
60*4882a593Smuzhiyun kfree(path);
61*4882a593Smuzhiyun lb->first_pblock = 0;
62*4882a593Smuzhiyun return retval;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
update_extent_range(handle_t * handle,struct inode * inode,ext4_fsblk_t pblock,struct migrate_struct * lb)65*4882a593Smuzhiyun static int update_extent_range(handle_t *handle, struct inode *inode,
66*4882a593Smuzhiyun ext4_fsblk_t pblock, struct migrate_struct *lb)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun int retval;
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * See if we can add on to the existing range (if it exists)
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun if (lb->first_pblock &&
73*4882a593Smuzhiyun (lb->last_pblock+1 == pblock) &&
74*4882a593Smuzhiyun (lb->last_block+1 == lb->curr_block)) {
75*4882a593Smuzhiyun lb->last_pblock = pblock;
76*4882a593Smuzhiyun lb->last_block = lb->curr_block;
77*4882a593Smuzhiyun lb->curr_block++;
78*4882a593Smuzhiyun return 0;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Start a new range.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun retval = finish_range(handle, inode, lb);
84*4882a593Smuzhiyun lb->first_pblock = lb->last_pblock = pblock;
85*4882a593Smuzhiyun lb->first_block = lb->last_block = lb->curr_block;
86*4882a593Smuzhiyun lb->curr_block++;
87*4882a593Smuzhiyun return retval;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
update_ind_extent_range(handle_t * handle,struct inode * inode,ext4_fsblk_t pblock,struct migrate_struct * lb)90*4882a593Smuzhiyun static int update_ind_extent_range(handle_t *handle, struct inode *inode,
91*4882a593Smuzhiyun ext4_fsblk_t pblock,
92*4882a593Smuzhiyun struct migrate_struct *lb)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct buffer_head *bh;
95*4882a593Smuzhiyun __le32 *i_data;
96*4882a593Smuzhiyun int i, retval = 0;
97*4882a593Smuzhiyun unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun bh = ext4_sb_bread(inode->i_sb, pblock, 0);
100*4882a593Smuzhiyun if (IS_ERR(bh))
101*4882a593Smuzhiyun return PTR_ERR(bh);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun i_data = (__le32 *)bh->b_data;
104*4882a593Smuzhiyun for (i = 0; i < max_entries; i++) {
105*4882a593Smuzhiyun if (i_data[i]) {
106*4882a593Smuzhiyun retval = update_extent_range(handle, inode,
107*4882a593Smuzhiyun le32_to_cpu(i_data[i]), lb);
108*4882a593Smuzhiyun if (retval)
109*4882a593Smuzhiyun break;
110*4882a593Smuzhiyun } else {
111*4882a593Smuzhiyun lb->curr_block++;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun put_bh(bh);
115*4882a593Smuzhiyun return retval;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
update_dind_extent_range(handle_t * handle,struct inode * inode,ext4_fsblk_t pblock,struct migrate_struct * lb)119*4882a593Smuzhiyun static int update_dind_extent_range(handle_t *handle, struct inode *inode,
120*4882a593Smuzhiyun ext4_fsblk_t pblock,
121*4882a593Smuzhiyun struct migrate_struct *lb)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct buffer_head *bh;
124*4882a593Smuzhiyun __le32 *i_data;
125*4882a593Smuzhiyun int i, retval = 0;
126*4882a593Smuzhiyun unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun bh = ext4_sb_bread(inode->i_sb, pblock, 0);
129*4882a593Smuzhiyun if (IS_ERR(bh))
130*4882a593Smuzhiyun return PTR_ERR(bh);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun i_data = (__le32 *)bh->b_data;
133*4882a593Smuzhiyun for (i = 0; i < max_entries; i++) {
134*4882a593Smuzhiyun if (i_data[i]) {
135*4882a593Smuzhiyun retval = update_ind_extent_range(handle, inode,
136*4882a593Smuzhiyun le32_to_cpu(i_data[i]), lb);
137*4882a593Smuzhiyun if (retval)
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun } else {
140*4882a593Smuzhiyun /* Only update the file block number */
141*4882a593Smuzhiyun lb->curr_block += max_entries;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun put_bh(bh);
145*4882a593Smuzhiyun return retval;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
update_tind_extent_range(handle_t * handle,struct inode * inode,ext4_fsblk_t pblock,struct migrate_struct * lb)149*4882a593Smuzhiyun static int update_tind_extent_range(handle_t *handle, struct inode *inode,
150*4882a593Smuzhiyun ext4_fsblk_t pblock,
151*4882a593Smuzhiyun struct migrate_struct *lb)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun struct buffer_head *bh;
154*4882a593Smuzhiyun __le32 *i_data;
155*4882a593Smuzhiyun int i, retval = 0;
156*4882a593Smuzhiyun unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun bh = ext4_sb_bread(inode->i_sb, pblock, 0);
159*4882a593Smuzhiyun if (IS_ERR(bh))
160*4882a593Smuzhiyun return PTR_ERR(bh);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun i_data = (__le32 *)bh->b_data;
163*4882a593Smuzhiyun for (i = 0; i < max_entries; i++) {
164*4882a593Smuzhiyun if (i_data[i]) {
165*4882a593Smuzhiyun retval = update_dind_extent_range(handle, inode,
166*4882a593Smuzhiyun le32_to_cpu(i_data[i]), lb);
167*4882a593Smuzhiyun if (retval)
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun } else {
170*4882a593Smuzhiyun /* Only update the file block number */
171*4882a593Smuzhiyun lb->curr_block += max_entries * max_entries;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun put_bh(bh);
175*4882a593Smuzhiyun return retval;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
free_dind_blocks(handle_t * handle,struct inode * inode,__le32 i_data)179*4882a593Smuzhiyun static int free_dind_blocks(handle_t *handle,
180*4882a593Smuzhiyun struct inode *inode, __le32 i_data)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun int i;
183*4882a593Smuzhiyun __le32 *tmp_idata;
184*4882a593Smuzhiyun struct buffer_head *bh;
185*4882a593Smuzhiyun struct super_block *sb = inode->i_sb;
186*4882a593Smuzhiyun unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
187*4882a593Smuzhiyun int err;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun bh = ext4_sb_bread(sb, le32_to_cpu(i_data), 0);
190*4882a593Smuzhiyun if (IS_ERR(bh))
191*4882a593Smuzhiyun return PTR_ERR(bh);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun tmp_idata = (__le32 *)bh->b_data;
194*4882a593Smuzhiyun for (i = 0; i < max_entries; i++) {
195*4882a593Smuzhiyun if (tmp_idata[i]) {
196*4882a593Smuzhiyun err = ext4_journal_ensure_credits(handle,
197*4882a593Smuzhiyun EXT4_RESERVE_TRANS_BLOCKS,
198*4882a593Smuzhiyun ext4_free_metadata_revoke_credits(sb, 1));
199*4882a593Smuzhiyun if (err < 0) {
200*4882a593Smuzhiyun put_bh(bh);
201*4882a593Smuzhiyun return err;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun ext4_free_blocks(handle, inode, NULL,
204*4882a593Smuzhiyun le32_to_cpu(tmp_idata[i]), 1,
205*4882a593Smuzhiyun EXT4_FREE_BLOCKS_METADATA |
206*4882a593Smuzhiyun EXT4_FREE_BLOCKS_FORGET);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun put_bh(bh);
210*4882a593Smuzhiyun err = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
211*4882a593Smuzhiyun ext4_free_metadata_revoke_credits(sb, 1));
212*4882a593Smuzhiyun if (err < 0)
213*4882a593Smuzhiyun return err;
214*4882a593Smuzhiyun ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
215*4882a593Smuzhiyun EXT4_FREE_BLOCKS_METADATA |
216*4882a593Smuzhiyun EXT4_FREE_BLOCKS_FORGET);
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
free_tind_blocks(handle_t * handle,struct inode * inode,__le32 i_data)220*4882a593Smuzhiyun static int free_tind_blocks(handle_t *handle,
221*4882a593Smuzhiyun struct inode *inode, __le32 i_data)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun int i, retval = 0;
224*4882a593Smuzhiyun __le32 *tmp_idata;
225*4882a593Smuzhiyun struct buffer_head *bh;
226*4882a593Smuzhiyun unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
229*4882a593Smuzhiyun if (IS_ERR(bh))
230*4882a593Smuzhiyun return PTR_ERR(bh);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun tmp_idata = (__le32 *)bh->b_data;
233*4882a593Smuzhiyun for (i = 0; i < max_entries; i++) {
234*4882a593Smuzhiyun if (tmp_idata[i]) {
235*4882a593Smuzhiyun retval = free_dind_blocks(handle,
236*4882a593Smuzhiyun inode, tmp_idata[i]);
237*4882a593Smuzhiyun if (retval) {
238*4882a593Smuzhiyun put_bh(bh);
239*4882a593Smuzhiyun return retval;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun put_bh(bh);
244*4882a593Smuzhiyun retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
245*4882a593Smuzhiyun ext4_free_metadata_revoke_credits(inode->i_sb, 1));
246*4882a593Smuzhiyun if (retval < 0)
247*4882a593Smuzhiyun return retval;
248*4882a593Smuzhiyun ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
249*4882a593Smuzhiyun EXT4_FREE_BLOCKS_METADATA |
250*4882a593Smuzhiyun EXT4_FREE_BLOCKS_FORGET);
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
free_ind_block(handle_t * handle,struct inode * inode,__le32 * i_data)254*4882a593Smuzhiyun static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun int retval;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* ei->i_data[EXT4_IND_BLOCK] */
259*4882a593Smuzhiyun if (i_data[0]) {
260*4882a593Smuzhiyun retval = ext4_journal_ensure_credits(handle,
261*4882a593Smuzhiyun EXT4_RESERVE_TRANS_BLOCKS,
262*4882a593Smuzhiyun ext4_free_metadata_revoke_credits(inode->i_sb, 1));
263*4882a593Smuzhiyun if (retval < 0)
264*4882a593Smuzhiyun return retval;
265*4882a593Smuzhiyun ext4_free_blocks(handle, inode, NULL,
266*4882a593Smuzhiyun le32_to_cpu(i_data[0]), 1,
267*4882a593Smuzhiyun EXT4_FREE_BLOCKS_METADATA |
268*4882a593Smuzhiyun EXT4_FREE_BLOCKS_FORGET);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /* ei->i_data[EXT4_DIND_BLOCK] */
272*4882a593Smuzhiyun if (i_data[1]) {
273*4882a593Smuzhiyun retval = free_dind_blocks(handle, inode, i_data[1]);
274*4882a593Smuzhiyun if (retval)
275*4882a593Smuzhiyun return retval;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* ei->i_data[EXT4_TIND_BLOCK] */
279*4882a593Smuzhiyun if (i_data[2]) {
280*4882a593Smuzhiyun retval = free_tind_blocks(handle, inode, i_data[2]);
281*4882a593Smuzhiyun if (retval)
282*4882a593Smuzhiyun return retval;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun return 0;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
ext4_ext_swap_inode_data(handle_t * handle,struct inode * inode,struct inode * tmp_inode)287*4882a593Smuzhiyun static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
288*4882a593Smuzhiyun struct inode *tmp_inode)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun int retval, retval2 = 0;
291*4882a593Smuzhiyun __le32 i_data[3];
292*4882a593Smuzhiyun struct ext4_inode_info *ei = EXT4_I(inode);
293*4882a593Smuzhiyun struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /*
296*4882a593Smuzhiyun * One credit accounted for writing the
297*4882a593Smuzhiyun * i_data field of the original inode
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun retval = ext4_journal_ensure_credits(handle, 1, 0);
300*4882a593Smuzhiyun if (retval < 0)
301*4882a593Smuzhiyun goto err_out;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun i_data[0] = ei->i_data[EXT4_IND_BLOCK];
304*4882a593Smuzhiyun i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
305*4882a593Smuzhiyun i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun down_write(&EXT4_I(inode)->i_data_sem);
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
310*4882a593Smuzhiyun * happened after we started the migrate. We need to
311*4882a593Smuzhiyun * fail the migrate
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
314*4882a593Smuzhiyun retval = -EAGAIN;
315*4882a593Smuzhiyun up_write(&EXT4_I(inode)->i_data_sem);
316*4882a593Smuzhiyun goto err_out;
317*4882a593Smuzhiyun } else
318*4882a593Smuzhiyun ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * We have the extent map build with the tmp inode.
321*4882a593Smuzhiyun * Now copy the i_data across
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
324*4882a593Smuzhiyun memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Update i_blocks with the new blocks that got
328*4882a593Smuzhiyun * allocated while adding extents for extent index
329*4882a593Smuzhiyun * blocks.
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * While converting to extents we need not
332*4882a593Smuzhiyun * update the original inode i_blocks for extent blocks
333*4882a593Smuzhiyun * via quota APIs. The quota update happened via tmp_inode already.
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun spin_lock(&inode->i_lock);
336*4882a593Smuzhiyun inode->i_blocks += tmp_inode->i_blocks;
337*4882a593Smuzhiyun spin_unlock(&inode->i_lock);
338*4882a593Smuzhiyun up_write(&EXT4_I(inode)->i_data_sem);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * We mark the inode dirty after, because we decrement the
342*4882a593Smuzhiyun * i_blocks when freeing the indirect meta-data blocks
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun retval = free_ind_block(handle, inode, i_data);
345*4882a593Smuzhiyun retval2 = ext4_mark_inode_dirty(handle, inode);
346*4882a593Smuzhiyun if (unlikely(retval2 && !retval))
347*4882a593Smuzhiyun retval = retval2;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun err_out:
350*4882a593Smuzhiyun return retval;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
free_ext_idx(handle_t * handle,struct inode * inode,struct ext4_extent_idx * ix)353*4882a593Smuzhiyun static int free_ext_idx(handle_t *handle, struct inode *inode,
354*4882a593Smuzhiyun struct ext4_extent_idx *ix)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun int i, retval = 0;
357*4882a593Smuzhiyun ext4_fsblk_t block;
358*4882a593Smuzhiyun struct buffer_head *bh;
359*4882a593Smuzhiyun struct ext4_extent_header *eh;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun block = ext4_idx_pblock(ix);
362*4882a593Smuzhiyun bh = ext4_sb_bread(inode->i_sb, block, 0);
363*4882a593Smuzhiyun if (IS_ERR(bh))
364*4882a593Smuzhiyun return PTR_ERR(bh);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun eh = (struct ext4_extent_header *)bh->b_data;
367*4882a593Smuzhiyun if (eh->eh_depth != 0) {
368*4882a593Smuzhiyun ix = EXT_FIRST_INDEX(eh);
369*4882a593Smuzhiyun for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
370*4882a593Smuzhiyun retval = free_ext_idx(handle, inode, ix);
371*4882a593Smuzhiyun if (retval) {
372*4882a593Smuzhiyun put_bh(bh);
373*4882a593Smuzhiyun return retval;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun put_bh(bh);
378*4882a593Smuzhiyun retval = ext4_journal_ensure_credits(handle, EXT4_RESERVE_TRANS_BLOCKS,
379*4882a593Smuzhiyun ext4_free_metadata_revoke_credits(inode->i_sb, 1));
380*4882a593Smuzhiyun if (retval < 0)
381*4882a593Smuzhiyun return retval;
382*4882a593Smuzhiyun ext4_free_blocks(handle, inode, NULL, block, 1,
383*4882a593Smuzhiyun EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
384*4882a593Smuzhiyun return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * Free the extent meta data blocks only
389*4882a593Smuzhiyun */
free_ext_block(handle_t * handle,struct inode * inode)390*4882a593Smuzhiyun static int free_ext_block(handle_t *handle, struct inode *inode)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun int i, retval = 0;
393*4882a593Smuzhiyun struct ext4_inode_info *ei = EXT4_I(inode);
394*4882a593Smuzhiyun struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
395*4882a593Smuzhiyun struct ext4_extent_idx *ix;
396*4882a593Smuzhiyun if (eh->eh_depth == 0)
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun * No extra blocks allocated for extent meta data
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun ix = EXT_FIRST_INDEX(eh);
402*4882a593Smuzhiyun for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
403*4882a593Smuzhiyun retval = free_ext_idx(handle, inode, ix);
404*4882a593Smuzhiyun if (retval)
405*4882a593Smuzhiyun return retval;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun return retval;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
ext4_ext_migrate(struct inode * inode)410*4882a593Smuzhiyun int ext4_ext_migrate(struct inode *inode)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
413*4882a593Smuzhiyun handle_t *handle;
414*4882a593Smuzhiyun int retval = 0, i;
415*4882a593Smuzhiyun __le32 *i_data;
416*4882a593Smuzhiyun struct ext4_inode_info *ei;
417*4882a593Smuzhiyun struct inode *tmp_inode = NULL;
418*4882a593Smuzhiyun struct migrate_struct lb;
419*4882a593Smuzhiyun unsigned long max_entries;
420*4882a593Smuzhiyun __u32 goal, tmp_csum_seed;
421*4882a593Smuzhiyun uid_t owner[2];
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun * If the filesystem does not support extents, or the inode
425*4882a593Smuzhiyun * already is extent-based, error out.
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun if (!ext4_has_feature_extents(inode->i_sb) ||
428*4882a593Smuzhiyun ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS) ||
429*4882a593Smuzhiyun ext4_has_inline_data(inode))
430*4882a593Smuzhiyun return -EINVAL;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * don't migrate fast symlink
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun return retval;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun percpu_down_write(&sbi->s_writepages_rwsem);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun * Worst case we can touch the allocation bitmaps and a block
442*4882a593Smuzhiyun * group descriptor block. We do need need to worry about
443*4882a593Smuzhiyun * credits for modifying the quota inode.
444*4882a593Smuzhiyun */
445*4882a593Smuzhiyun handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
446*4882a593Smuzhiyun 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (IS_ERR(handle)) {
449*4882a593Smuzhiyun retval = PTR_ERR(handle);
450*4882a593Smuzhiyun goto out_unlock;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
453*4882a593Smuzhiyun EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
454*4882a593Smuzhiyun owner[0] = i_uid_read(inode);
455*4882a593Smuzhiyun owner[1] = i_gid_read(inode);
456*4882a593Smuzhiyun tmp_inode = ext4_new_inode(handle, d_inode(inode->i_sb->s_root),
457*4882a593Smuzhiyun S_IFREG, NULL, goal, owner, 0);
458*4882a593Smuzhiyun if (IS_ERR(tmp_inode)) {
459*4882a593Smuzhiyun retval = PTR_ERR(tmp_inode);
460*4882a593Smuzhiyun ext4_journal_stop(handle);
461*4882a593Smuzhiyun goto out_unlock;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun /*
464*4882a593Smuzhiyun * Use the correct seed for checksum (i.e. the seed from 'inode'). This
465*4882a593Smuzhiyun * is so that the metadata blocks will have the correct checksum after
466*4882a593Smuzhiyun * the migration.
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun ei = EXT4_I(inode);
469*4882a593Smuzhiyun tmp_csum_seed = EXT4_I(tmp_inode)->i_csum_seed;
470*4882a593Smuzhiyun EXT4_I(tmp_inode)->i_csum_seed = ei->i_csum_seed;
471*4882a593Smuzhiyun i_size_write(tmp_inode, i_size_read(inode));
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * Set the i_nlink to zero so it will be deleted later
474*4882a593Smuzhiyun * when we drop inode reference.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun clear_nlink(tmp_inode);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun ext4_ext_tree_init(handle, tmp_inode);
479*4882a593Smuzhiyun ext4_journal_stop(handle);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /*
482*4882a593Smuzhiyun * start with one credit accounted for
483*4882a593Smuzhiyun * superblock modification.
484*4882a593Smuzhiyun *
485*4882a593Smuzhiyun * For the tmp_inode we already have committed the
486*4882a593Smuzhiyun * transaction that created the inode. Later as and
487*4882a593Smuzhiyun * when we add extents we extent the journal
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * Even though we take i_mutex we can still cause block
491*4882a593Smuzhiyun * allocation via mmap write to holes. If we have allocated
492*4882a593Smuzhiyun * new blocks we fail migrate. New block allocation will
493*4882a593Smuzhiyun * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
494*4882a593Smuzhiyun * with i_data_sem held to prevent racing with block
495*4882a593Smuzhiyun * allocation.
496*4882a593Smuzhiyun */
497*4882a593Smuzhiyun down_read(&EXT4_I(inode)->i_data_sem);
498*4882a593Smuzhiyun ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
499*4882a593Smuzhiyun up_read((&EXT4_I(inode)->i_data_sem));
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
502*4882a593Smuzhiyun if (IS_ERR(handle)) {
503*4882a593Smuzhiyun retval = PTR_ERR(handle);
504*4882a593Smuzhiyun goto out_tmp_inode;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun i_data = ei->i_data;
508*4882a593Smuzhiyun memset(&lb, 0, sizeof(lb));
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* 32 bit block address 4 bytes */
511*4882a593Smuzhiyun max_entries = inode->i_sb->s_blocksize >> 2;
512*4882a593Smuzhiyun for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
513*4882a593Smuzhiyun if (i_data[i]) {
514*4882a593Smuzhiyun retval = update_extent_range(handle, tmp_inode,
515*4882a593Smuzhiyun le32_to_cpu(i_data[i]), &lb);
516*4882a593Smuzhiyun if (retval)
517*4882a593Smuzhiyun goto err_out;
518*4882a593Smuzhiyun } else
519*4882a593Smuzhiyun lb.curr_block++;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun if (i_data[EXT4_IND_BLOCK]) {
522*4882a593Smuzhiyun retval = update_ind_extent_range(handle, tmp_inode,
523*4882a593Smuzhiyun le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
524*4882a593Smuzhiyun if (retval)
525*4882a593Smuzhiyun goto err_out;
526*4882a593Smuzhiyun } else
527*4882a593Smuzhiyun lb.curr_block += max_entries;
528*4882a593Smuzhiyun if (i_data[EXT4_DIND_BLOCK]) {
529*4882a593Smuzhiyun retval = update_dind_extent_range(handle, tmp_inode,
530*4882a593Smuzhiyun le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
531*4882a593Smuzhiyun if (retval)
532*4882a593Smuzhiyun goto err_out;
533*4882a593Smuzhiyun } else
534*4882a593Smuzhiyun lb.curr_block += max_entries * max_entries;
535*4882a593Smuzhiyun if (i_data[EXT4_TIND_BLOCK]) {
536*4882a593Smuzhiyun retval = update_tind_extent_range(handle, tmp_inode,
537*4882a593Smuzhiyun le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
538*4882a593Smuzhiyun if (retval)
539*4882a593Smuzhiyun goto err_out;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun /*
542*4882a593Smuzhiyun * Build the last extent
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun retval = finish_range(handle, tmp_inode, &lb);
545*4882a593Smuzhiyun err_out:
546*4882a593Smuzhiyun if (retval)
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun * Failure case delete the extent information with the
549*4882a593Smuzhiyun * tmp_inode
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun free_ext_block(handle, tmp_inode);
552*4882a593Smuzhiyun else {
553*4882a593Smuzhiyun retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
554*4882a593Smuzhiyun if (retval)
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * if we fail to swap inode data free the extent
557*4882a593Smuzhiyun * details of the tmp inode
558*4882a593Smuzhiyun */
559*4882a593Smuzhiyun free_ext_block(handle, tmp_inode);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
563*4882a593Smuzhiyun retval = ext4_journal_ensure_credits(handle, 1, 0);
564*4882a593Smuzhiyun if (retval < 0)
565*4882a593Smuzhiyun goto out_stop;
566*4882a593Smuzhiyun /*
567*4882a593Smuzhiyun * Mark the tmp_inode as of size zero
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun i_size_write(tmp_inode, 0);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /*
572*4882a593Smuzhiyun * set the i_blocks count to zero
573*4882a593Smuzhiyun * so that the ext4_evict_inode() does the
574*4882a593Smuzhiyun * right job
575*4882a593Smuzhiyun *
576*4882a593Smuzhiyun * We don't need to take the i_lock because
577*4882a593Smuzhiyun * the inode is not visible to user space.
578*4882a593Smuzhiyun */
579*4882a593Smuzhiyun tmp_inode->i_blocks = 0;
580*4882a593Smuzhiyun EXT4_I(tmp_inode)->i_csum_seed = tmp_csum_seed;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /* Reset the extent details */
583*4882a593Smuzhiyun ext4_ext_tree_init(handle, tmp_inode);
584*4882a593Smuzhiyun out_stop:
585*4882a593Smuzhiyun ext4_journal_stop(handle);
586*4882a593Smuzhiyun out_tmp_inode:
587*4882a593Smuzhiyun unlock_new_inode(tmp_inode);
588*4882a593Smuzhiyun iput(tmp_inode);
589*4882a593Smuzhiyun out_unlock:
590*4882a593Smuzhiyun percpu_up_write(&sbi->s_writepages_rwsem);
591*4882a593Smuzhiyun return retval;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /*
595*4882a593Smuzhiyun * Migrate a simple extent-based inode to use the i_blocks[] array
596*4882a593Smuzhiyun */
ext4_ind_migrate(struct inode * inode)597*4882a593Smuzhiyun int ext4_ind_migrate(struct inode *inode)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct ext4_extent_header *eh;
600*4882a593Smuzhiyun struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
601*4882a593Smuzhiyun struct ext4_super_block *es = sbi->s_es;
602*4882a593Smuzhiyun struct ext4_inode_info *ei = EXT4_I(inode);
603*4882a593Smuzhiyun struct ext4_extent *ex;
604*4882a593Smuzhiyun unsigned int i, len;
605*4882a593Smuzhiyun ext4_lblk_t start, end;
606*4882a593Smuzhiyun ext4_fsblk_t blk;
607*4882a593Smuzhiyun handle_t *handle;
608*4882a593Smuzhiyun int ret, ret2 = 0;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (!ext4_has_feature_extents(inode->i_sb) ||
611*4882a593Smuzhiyun (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
612*4882a593Smuzhiyun return -EINVAL;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (ext4_has_feature_bigalloc(inode->i_sb))
615*4882a593Smuzhiyun return -EOPNOTSUPP;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /*
618*4882a593Smuzhiyun * In order to get correct extent info, force all delayed allocation
619*4882a593Smuzhiyun * blocks to be allocated, otherwise delayed allocation blocks may not
620*4882a593Smuzhiyun * be reflected and bypass the checks on extent header.
621*4882a593Smuzhiyun */
622*4882a593Smuzhiyun if (test_opt(inode->i_sb, DELALLOC))
623*4882a593Smuzhiyun ext4_alloc_da_blocks(inode);
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun percpu_down_write(&sbi->s_writepages_rwsem);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
628*4882a593Smuzhiyun if (IS_ERR(handle)) {
629*4882a593Smuzhiyun ret = PTR_ERR(handle);
630*4882a593Smuzhiyun goto out_unlock;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun down_write(&EXT4_I(inode)->i_data_sem);
634*4882a593Smuzhiyun ret = ext4_ext_check_inode(inode);
635*4882a593Smuzhiyun if (ret)
636*4882a593Smuzhiyun goto errout;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun eh = ext_inode_hdr(inode);
639*4882a593Smuzhiyun ex = EXT_FIRST_EXTENT(eh);
640*4882a593Smuzhiyun if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
641*4882a593Smuzhiyun eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
642*4882a593Smuzhiyun ret = -EOPNOTSUPP;
643*4882a593Smuzhiyun goto errout;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun if (eh->eh_entries == 0)
646*4882a593Smuzhiyun blk = len = start = end = 0;
647*4882a593Smuzhiyun else {
648*4882a593Smuzhiyun len = le16_to_cpu(ex->ee_len);
649*4882a593Smuzhiyun blk = ext4_ext_pblock(ex);
650*4882a593Smuzhiyun start = le32_to_cpu(ex->ee_block);
651*4882a593Smuzhiyun end = start + len - 1;
652*4882a593Smuzhiyun if (end >= EXT4_NDIR_BLOCKS) {
653*4882a593Smuzhiyun ret = -EOPNOTSUPP;
654*4882a593Smuzhiyun goto errout;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
659*4882a593Smuzhiyun memset(ei->i_data, 0, sizeof(ei->i_data));
660*4882a593Smuzhiyun for (i = start; i <= end; i++)
661*4882a593Smuzhiyun ei->i_data[i] = cpu_to_le32(blk++);
662*4882a593Smuzhiyun ret2 = ext4_mark_inode_dirty(handle, inode);
663*4882a593Smuzhiyun if (unlikely(ret2 && !ret))
664*4882a593Smuzhiyun ret = ret2;
665*4882a593Smuzhiyun errout:
666*4882a593Smuzhiyun ext4_journal_stop(handle);
667*4882a593Smuzhiyun up_write(&EXT4_I(inode)->i_data_sem);
668*4882a593Smuzhiyun out_unlock:
669*4882a593Smuzhiyun percpu_up_write(&sbi->s_writepages_rwsem);
670*4882a593Smuzhiyun return ret;
671*4882a593Smuzhiyun }
672