xref: /OK3568_Linux_fs/kernel/fs/ocfs2/resize.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* -*- mode: c; c-basic-offset: 8; -*-
3*4882a593Smuzhiyun  * vim: noexpandtab sw=8 ts=8 sts=0:
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * resize.c
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * volume resize.
8*4882a593Smuzhiyun  * Inspired by ext3/resize.c.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Copyright (C) 2007 Oracle.  All rights reserved.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <cluster/masklog.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "ocfs2.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "alloc.h"
21*4882a593Smuzhiyun #include "dlmglue.h"
22*4882a593Smuzhiyun #include "inode.h"
23*4882a593Smuzhiyun #include "journal.h"
24*4882a593Smuzhiyun #include "super.h"
25*4882a593Smuzhiyun #include "sysfile.h"
26*4882a593Smuzhiyun #include "uptodate.h"
27*4882a593Smuzhiyun #include "ocfs2_trace.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "buffer_head_io.h"
30*4882a593Smuzhiyun #include "suballoc.h"
31*4882a593Smuzhiyun #include "resize.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Check whether there are new backup superblocks exist
35*4882a593Smuzhiyun  * in the last group. If there are some, mark them or clear
36*4882a593Smuzhiyun  * them in the bitmap.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * Return how many backups we find in the last group.
39*4882a593Smuzhiyun  */
ocfs2_calc_new_backup_super(struct inode * inode,struct ocfs2_group_desc * gd,u16 cl_cpg,u16 old_bg_clusters,int set)40*4882a593Smuzhiyun static u16 ocfs2_calc_new_backup_super(struct inode *inode,
41*4882a593Smuzhiyun 				       struct ocfs2_group_desc *gd,
42*4882a593Smuzhiyun 				       u16 cl_cpg,
43*4882a593Smuzhiyun 				       u16 old_bg_clusters,
44*4882a593Smuzhiyun 				       int set)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	int i;
47*4882a593Smuzhiyun 	u16 backups = 0;
48*4882a593Smuzhiyun 	u32 cluster, lgd_cluster;
49*4882a593Smuzhiyun 	u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
52*4882a593Smuzhiyun 		blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
53*4882a593Smuzhiyun 		cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 		gd_blkno = ocfs2_which_cluster_group(inode, cluster);
56*4882a593Smuzhiyun 		if (gd_blkno < lgd_blkno)
57*4882a593Smuzhiyun 			continue;
58*4882a593Smuzhiyun 		else if (gd_blkno > lgd_blkno)
59*4882a593Smuzhiyun 			break;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		/* check if already done backup super */
62*4882a593Smuzhiyun 		lgd_cluster = ocfs2_blocks_to_clusters(inode->i_sb, lgd_blkno);
63*4882a593Smuzhiyun 		lgd_cluster += old_bg_clusters;
64*4882a593Smuzhiyun 		if (lgd_cluster >= cluster)
65*4882a593Smuzhiyun 			continue;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 		if (set)
68*4882a593Smuzhiyun 			ocfs2_set_bit(cluster % cl_cpg,
69*4882a593Smuzhiyun 				      (unsigned long *)gd->bg_bitmap);
70*4882a593Smuzhiyun 		else
71*4882a593Smuzhiyun 			ocfs2_clear_bit(cluster % cl_cpg,
72*4882a593Smuzhiyun 					(unsigned long *)gd->bg_bitmap);
73*4882a593Smuzhiyun 		backups++;
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	return backups;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
ocfs2_update_last_group_and_inode(handle_t * handle,struct inode * bm_inode,struct buffer_head * bm_bh,struct buffer_head * group_bh,u32 first_new_cluster,int new_clusters)79*4882a593Smuzhiyun static int ocfs2_update_last_group_and_inode(handle_t *handle,
80*4882a593Smuzhiyun 					     struct inode *bm_inode,
81*4882a593Smuzhiyun 					     struct buffer_head *bm_bh,
82*4882a593Smuzhiyun 					     struct buffer_head *group_bh,
83*4882a593Smuzhiyun 					     u32 first_new_cluster,
84*4882a593Smuzhiyun 					     int new_clusters)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	int ret = 0;
87*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(bm_inode->i_sb);
88*4882a593Smuzhiyun 	struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bm_bh->b_data;
89*4882a593Smuzhiyun 	struct ocfs2_chain_list *cl = &fe->id2.i_chain;
90*4882a593Smuzhiyun 	struct ocfs2_chain_rec *cr;
91*4882a593Smuzhiyun 	struct ocfs2_group_desc *group;
92*4882a593Smuzhiyun 	u16 chain, num_bits, backups = 0;
93*4882a593Smuzhiyun 	u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
94*4882a593Smuzhiyun 	u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
95*4882a593Smuzhiyun 	u16 old_bg_clusters;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	trace_ocfs2_update_last_group_and_inode(new_clusters,
98*4882a593Smuzhiyun 						first_new_cluster);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
101*4882a593Smuzhiyun 				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
102*4882a593Smuzhiyun 	if (ret < 0) {
103*4882a593Smuzhiyun 		mlog_errno(ret);
104*4882a593Smuzhiyun 		goto out;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	group = (struct ocfs2_group_desc *)group_bh->b_data;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc;
110*4882a593Smuzhiyun 	/* update the group first. */
111*4882a593Smuzhiyun 	num_bits = new_clusters * cl_bpc;
112*4882a593Smuzhiyun 	le16_add_cpu(&group->bg_bits, num_bits);
113*4882a593Smuzhiyun 	le16_add_cpu(&group->bg_free_bits_count, num_bits);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/*
116*4882a593Smuzhiyun 	 * check whether there are some new backup superblocks exist in
117*4882a593Smuzhiyun 	 * this group and update the group bitmap accordingly.
118*4882a593Smuzhiyun 	 */
119*4882a593Smuzhiyun 	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
120*4882a593Smuzhiyun 				     OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
121*4882a593Smuzhiyun 		backups = ocfs2_calc_new_backup_super(bm_inode,
122*4882a593Smuzhiyun 						     group,
123*4882a593Smuzhiyun 						     cl_cpg, old_bg_clusters, 1);
124*4882a593Smuzhiyun 		le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	ocfs2_journal_dirty(handle, group_bh);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* update the inode accordingly. */
130*4882a593Smuzhiyun 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(bm_inode), bm_bh,
131*4882a593Smuzhiyun 				      OCFS2_JOURNAL_ACCESS_WRITE);
132*4882a593Smuzhiyun 	if (ret < 0) {
133*4882a593Smuzhiyun 		mlog_errno(ret);
134*4882a593Smuzhiyun 		goto out_rollback;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	chain = le16_to_cpu(group->bg_chain);
138*4882a593Smuzhiyun 	cr = (&cl->cl_recs[chain]);
139*4882a593Smuzhiyun 	le32_add_cpu(&cr->c_total, num_bits);
140*4882a593Smuzhiyun 	le32_add_cpu(&cr->c_free, num_bits);
141*4882a593Smuzhiyun 	le32_add_cpu(&fe->id1.bitmap1.i_total, num_bits);
142*4882a593Smuzhiyun 	le32_add_cpu(&fe->i_clusters, new_clusters);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (backups) {
145*4882a593Smuzhiyun 		le32_add_cpu(&cr->c_free, -1 * backups);
146*4882a593Smuzhiyun 		le32_add_cpu(&fe->id1.bitmap1.i_used, backups);
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	spin_lock(&OCFS2_I(bm_inode)->ip_lock);
150*4882a593Smuzhiyun 	OCFS2_I(bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
151*4882a593Smuzhiyun 	le64_add_cpu(&fe->i_size, (u64)new_clusters << osb->s_clustersize_bits);
152*4882a593Smuzhiyun 	spin_unlock(&OCFS2_I(bm_inode)->ip_lock);
153*4882a593Smuzhiyun 	i_size_write(bm_inode, le64_to_cpu(fe->i_size));
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	ocfs2_journal_dirty(handle, bm_bh);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun out_rollback:
158*4882a593Smuzhiyun 	if (ret < 0) {
159*4882a593Smuzhiyun 		ocfs2_calc_new_backup_super(bm_inode,
160*4882a593Smuzhiyun 					    group,
161*4882a593Smuzhiyun 					    cl_cpg, old_bg_clusters, 0);
162*4882a593Smuzhiyun 		le16_add_cpu(&group->bg_free_bits_count, backups);
163*4882a593Smuzhiyun 		le16_add_cpu(&group->bg_bits, -1 * num_bits);
164*4882a593Smuzhiyun 		le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun out:
167*4882a593Smuzhiyun 	if (ret)
168*4882a593Smuzhiyun 		mlog_errno(ret);
169*4882a593Smuzhiyun 	return ret;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
update_backups(struct inode * inode,u32 clusters,char * data)172*4882a593Smuzhiyun static int update_backups(struct inode * inode, u32 clusters, char *data)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	int i, ret = 0;
175*4882a593Smuzhiyun 	u32 cluster;
176*4882a593Smuzhiyun 	u64 blkno;
177*4882a593Smuzhiyun 	struct buffer_head *backup = NULL;
178*4882a593Smuzhiyun 	struct ocfs2_dinode *backup_di = NULL;
179*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* calculate the real backups we need to update. */
182*4882a593Smuzhiyun 	for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
183*4882a593Smuzhiyun 		blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
184*4882a593Smuzhiyun 		cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
185*4882a593Smuzhiyun 		if (cluster >= clusters)
186*4882a593Smuzhiyun 			break;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup);
189*4882a593Smuzhiyun 		if (ret < 0) {
190*4882a593Smuzhiyun 			mlog_errno(ret);
191*4882a593Smuzhiyun 			break;
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		memcpy(backup->b_data, data, inode->i_sb->s_blocksize);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		backup_di = (struct ocfs2_dinode *)backup->b_data;
197*4882a593Smuzhiyun 		backup_di->i_blkno = cpu_to_le64(blkno);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		ret = ocfs2_write_super_or_backup(osb, backup);
200*4882a593Smuzhiyun 		brelse(backup);
201*4882a593Smuzhiyun 		backup = NULL;
202*4882a593Smuzhiyun 		if (ret < 0) {
203*4882a593Smuzhiyun 			mlog_errno(ret);
204*4882a593Smuzhiyun 			break;
205*4882a593Smuzhiyun 		}
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	return ret;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
ocfs2_update_super_and_backups(struct inode * inode,int new_clusters)211*4882a593Smuzhiyun static void ocfs2_update_super_and_backups(struct inode *inode,
212*4882a593Smuzhiyun 					   int new_clusters)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	int ret;
215*4882a593Smuzhiyun 	u32 clusters = 0;
216*4882a593Smuzhiyun 	struct buffer_head *super_bh = NULL;
217*4882a593Smuzhiyun 	struct ocfs2_dinode *super_di = NULL;
218*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/*
221*4882a593Smuzhiyun 	 * update the superblock last.
222*4882a593Smuzhiyun 	 * It doesn't matter if the write failed.
223*4882a593Smuzhiyun 	 */
224*4882a593Smuzhiyun 	ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
225*4882a593Smuzhiyun 				     &super_bh);
226*4882a593Smuzhiyun 	if (ret < 0) {
227*4882a593Smuzhiyun 		mlog_errno(ret);
228*4882a593Smuzhiyun 		goto out;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	super_di = (struct ocfs2_dinode *)super_bh->b_data;
232*4882a593Smuzhiyun 	le32_add_cpu(&super_di->i_clusters, new_clusters);
233*4882a593Smuzhiyun 	clusters = le32_to_cpu(super_di->i_clusters);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	ret = ocfs2_write_super_or_backup(osb, super_bh);
236*4882a593Smuzhiyun 	if (ret < 0) {
237*4882a593Smuzhiyun 		mlog_errno(ret);
238*4882a593Smuzhiyun 		goto out;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB))
242*4882a593Smuzhiyun 		ret = update_backups(inode, clusters, super_bh->b_data);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun out:
245*4882a593Smuzhiyun 	brelse(super_bh);
246*4882a593Smuzhiyun 	if (ret)
247*4882a593Smuzhiyun 		printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s"
248*4882a593Smuzhiyun 			" during fs resize. This condition is not fatal,"
249*4882a593Smuzhiyun 			" but fsck.ocfs2 should be run to fix it\n",
250*4882a593Smuzhiyun 			osb->dev_str);
251*4882a593Smuzhiyun 	return;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun  * Extend the filesystem to the new number of clusters specified.  This entry
256*4882a593Smuzhiyun  * point is only used to extend the current filesystem to the end of the last
257*4882a593Smuzhiyun  * existing group.
258*4882a593Smuzhiyun  */
ocfs2_group_extend(struct inode * inode,int new_clusters)259*4882a593Smuzhiyun int ocfs2_group_extend(struct inode * inode, int new_clusters)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	int ret;
262*4882a593Smuzhiyun 	handle_t *handle;
263*4882a593Smuzhiyun 	struct buffer_head *main_bm_bh = NULL;
264*4882a593Smuzhiyun 	struct buffer_head *group_bh = NULL;
265*4882a593Smuzhiyun 	struct inode *main_bm_inode = NULL;
266*4882a593Smuzhiyun 	struct ocfs2_dinode *fe = NULL;
267*4882a593Smuzhiyun 	struct ocfs2_group_desc *group = NULL;
268*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
269*4882a593Smuzhiyun 	u16 cl_bpc;
270*4882a593Smuzhiyun 	u32 first_new_cluster;
271*4882a593Smuzhiyun 	u64 lgd_blkno;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
274*4882a593Smuzhiyun 		return -EROFS;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (new_clusters < 0)
277*4882a593Smuzhiyun 		return -EINVAL;
278*4882a593Smuzhiyun 	else if (new_clusters == 0)
279*4882a593Smuzhiyun 		return 0;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	main_bm_inode = ocfs2_get_system_file_inode(osb,
282*4882a593Smuzhiyun 						    GLOBAL_BITMAP_SYSTEM_INODE,
283*4882a593Smuzhiyun 						    OCFS2_INVALID_SLOT);
284*4882a593Smuzhiyun 	if (!main_bm_inode) {
285*4882a593Smuzhiyun 		ret = -EINVAL;
286*4882a593Smuzhiyun 		mlog_errno(ret);
287*4882a593Smuzhiyun 		goto out;
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	inode_lock(main_bm_inode);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
293*4882a593Smuzhiyun 	if (ret < 0) {
294*4882a593Smuzhiyun 		mlog_errno(ret);
295*4882a593Smuzhiyun 		goto out_mutex;
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
301*4882a593Smuzhiyun 	 * so any corruption is a code bug. */
302*4882a593Smuzhiyun 	BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
305*4882a593Smuzhiyun 		ocfs2_group_bitmap_size(osb->sb, 0,
306*4882a593Smuzhiyun 					osb->s_feature_incompat) * 8) {
307*4882a593Smuzhiyun 		mlog(ML_ERROR, "The disk is too old and small. "
308*4882a593Smuzhiyun 		     "Force to do offline resize.");
309*4882a593Smuzhiyun 		ret = -EINVAL;
310*4882a593Smuzhiyun 		goto out_unlock;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	first_new_cluster = le32_to_cpu(fe->i_clusters);
314*4882a593Smuzhiyun 	lgd_blkno = ocfs2_which_cluster_group(main_bm_inode,
315*4882a593Smuzhiyun 					      first_new_cluster - 1);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	ret = ocfs2_read_group_descriptor(main_bm_inode, fe, lgd_blkno,
318*4882a593Smuzhiyun 					  &group_bh);
319*4882a593Smuzhiyun 	if (ret < 0) {
320*4882a593Smuzhiyun 		mlog_errno(ret);
321*4882a593Smuzhiyun 		goto out_unlock;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 	group = (struct ocfs2_group_desc *)group_bh->b_data;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
326*4882a593Smuzhiyun 	if (le16_to_cpu(group->bg_bits) / cl_bpc + new_clusters >
327*4882a593Smuzhiyun 		le16_to_cpu(fe->id2.i_chain.cl_cpg)) {
328*4882a593Smuzhiyun 		ret = -EINVAL;
329*4882a593Smuzhiyun 		goto out_unlock;
330*4882a593Smuzhiyun 	}
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	trace_ocfs2_group_extend(
334*4882a593Smuzhiyun 	     (unsigned long long)le64_to_cpu(group->bg_blkno), new_clusters);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS);
337*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
338*4882a593Smuzhiyun 		mlog_errno(PTR_ERR(handle));
339*4882a593Smuzhiyun 		ret = -EINVAL;
340*4882a593Smuzhiyun 		goto out_unlock;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* update the last group descriptor and inode. */
344*4882a593Smuzhiyun 	ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode,
345*4882a593Smuzhiyun 						main_bm_bh, group_bh,
346*4882a593Smuzhiyun 						first_new_cluster,
347*4882a593Smuzhiyun 						new_clusters);
348*4882a593Smuzhiyun 	if (ret) {
349*4882a593Smuzhiyun 		mlog_errno(ret);
350*4882a593Smuzhiyun 		goto out_commit;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	ocfs2_update_super_and_backups(main_bm_inode, new_clusters);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun out_commit:
356*4882a593Smuzhiyun 	ocfs2_commit_trans(osb, handle);
357*4882a593Smuzhiyun out_unlock:
358*4882a593Smuzhiyun 	brelse(group_bh);
359*4882a593Smuzhiyun 	brelse(main_bm_bh);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	ocfs2_inode_unlock(main_bm_inode, 1);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun out_mutex:
364*4882a593Smuzhiyun 	inode_unlock(main_bm_inode);
365*4882a593Smuzhiyun 	iput(main_bm_inode);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun out:
368*4882a593Smuzhiyun 	return ret;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
ocfs2_check_new_group(struct inode * inode,struct ocfs2_dinode * di,struct ocfs2_new_group_input * input,struct buffer_head * group_bh)371*4882a593Smuzhiyun static int ocfs2_check_new_group(struct inode *inode,
372*4882a593Smuzhiyun 				 struct ocfs2_dinode *di,
373*4882a593Smuzhiyun 				 struct ocfs2_new_group_input *input,
374*4882a593Smuzhiyun 				 struct buffer_head *group_bh)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	int ret;
377*4882a593Smuzhiyun 	struct ocfs2_group_desc *gd =
378*4882a593Smuzhiyun 		(struct ocfs2_group_desc *)group_bh->b_data;
379*4882a593Smuzhiyun 	u16 cl_bpc = le16_to_cpu(di->id2.i_chain.cl_bpc);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	ret = ocfs2_check_group_descriptor(inode->i_sb, di, group_bh);
382*4882a593Smuzhiyun 	if (ret)
383*4882a593Smuzhiyun 		goto out;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	ret = -EINVAL;
386*4882a593Smuzhiyun 	if (le16_to_cpu(gd->bg_chain) != input->chain)
387*4882a593Smuzhiyun 		mlog(ML_ERROR, "Group descriptor # %llu has bad chain %u "
388*4882a593Smuzhiyun 		     "while input has %u set.\n",
389*4882a593Smuzhiyun 		     (unsigned long long)le64_to_cpu(gd->bg_blkno),
390*4882a593Smuzhiyun 		     le16_to_cpu(gd->bg_chain), input->chain);
391*4882a593Smuzhiyun 	else if (le16_to_cpu(gd->bg_bits) != input->clusters * cl_bpc)
392*4882a593Smuzhiyun 		mlog(ML_ERROR, "Group descriptor # %llu has bit count %u but "
393*4882a593Smuzhiyun 		     "input has %u clusters set\n",
394*4882a593Smuzhiyun 		     (unsigned long long)le64_to_cpu(gd->bg_blkno),
395*4882a593Smuzhiyun 		     le16_to_cpu(gd->bg_bits), input->clusters);
396*4882a593Smuzhiyun 	else if (le16_to_cpu(gd->bg_free_bits_count) != input->frees * cl_bpc)
397*4882a593Smuzhiyun 		mlog(ML_ERROR, "Group descriptor # %llu has free bit count %u "
398*4882a593Smuzhiyun 		     "but it should have %u set\n",
399*4882a593Smuzhiyun 		     (unsigned long long)le64_to_cpu(gd->bg_blkno),
400*4882a593Smuzhiyun 		     le16_to_cpu(gd->bg_bits),
401*4882a593Smuzhiyun 		     input->frees * cl_bpc);
402*4882a593Smuzhiyun 	else
403*4882a593Smuzhiyun 		ret = 0;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun out:
406*4882a593Smuzhiyun 	return ret;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
ocfs2_verify_group_and_input(struct inode * inode,struct ocfs2_dinode * di,struct ocfs2_new_group_input * input,struct buffer_head * group_bh)409*4882a593Smuzhiyun static int ocfs2_verify_group_and_input(struct inode *inode,
410*4882a593Smuzhiyun 					struct ocfs2_dinode *di,
411*4882a593Smuzhiyun 					struct ocfs2_new_group_input *input,
412*4882a593Smuzhiyun 					struct buffer_head *group_bh)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	u16 cl_count = le16_to_cpu(di->id2.i_chain.cl_count);
415*4882a593Smuzhiyun 	u16 cl_cpg = le16_to_cpu(di->id2.i_chain.cl_cpg);
416*4882a593Smuzhiyun 	u16 next_free = le16_to_cpu(di->id2.i_chain.cl_next_free_rec);
417*4882a593Smuzhiyun 	u32 cluster = ocfs2_blocks_to_clusters(inode->i_sb, input->group);
418*4882a593Smuzhiyun 	u32 total_clusters = le32_to_cpu(di->i_clusters);
419*4882a593Smuzhiyun 	int ret = -EINVAL;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	if (cluster < total_clusters)
422*4882a593Smuzhiyun 		mlog(ML_ERROR, "add a group which is in the current volume.\n");
423*4882a593Smuzhiyun 	else if (input->chain >= cl_count)
424*4882a593Smuzhiyun 		mlog(ML_ERROR, "input chain exceeds the limit.\n");
425*4882a593Smuzhiyun 	else if (next_free != cl_count && next_free != input->chain)
426*4882a593Smuzhiyun 		mlog(ML_ERROR,
427*4882a593Smuzhiyun 		     "the add group should be in chain %u\n", next_free);
428*4882a593Smuzhiyun 	else if (total_clusters + input->clusters < total_clusters)
429*4882a593Smuzhiyun 		mlog(ML_ERROR, "add group's clusters overflow.\n");
430*4882a593Smuzhiyun 	else if (input->clusters > cl_cpg)
431*4882a593Smuzhiyun 		mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n");
432*4882a593Smuzhiyun 	else if (input->frees > input->clusters)
433*4882a593Smuzhiyun 		mlog(ML_ERROR, "the free cluster exceeds the total clusters\n");
434*4882a593Smuzhiyun 	else if (total_clusters % cl_cpg != 0)
435*4882a593Smuzhiyun 		mlog(ML_ERROR,
436*4882a593Smuzhiyun 		     "the last group isn't full. Use group extend first.\n");
437*4882a593Smuzhiyun 	else if (input->group != ocfs2_which_cluster_group(inode, cluster))
438*4882a593Smuzhiyun 		mlog(ML_ERROR, "group blkno is invalid\n");
439*4882a593Smuzhiyun 	else if ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
440*4882a593Smuzhiyun 		mlog(ML_ERROR, "group descriptor check failed.\n");
441*4882a593Smuzhiyun 	else
442*4882a593Smuzhiyun 		ret = 0;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	return ret;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /* Add a new group descriptor to global_bitmap. */
ocfs2_group_add(struct inode * inode,struct ocfs2_new_group_input * input)448*4882a593Smuzhiyun int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	int ret;
451*4882a593Smuzhiyun 	handle_t *handle;
452*4882a593Smuzhiyun 	struct buffer_head *main_bm_bh = NULL;
453*4882a593Smuzhiyun 	struct inode *main_bm_inode = NULL;
454*4882a593Smuzhiyun 	struct ocfs2_dinode *fe = NULL;
455*4882a593Smuzhiyun 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
456*4882a593Smuzhiyun 	struct buffer_head *group_bh = NULL;
457*4882a593Smuzhiyun 	struct ocfs2_group_desc *group = NULL;
458*4882a593Smuzhiyun 	struct ocfs2_chain_list *cl;
459*4882a593Smuzhiyun 	struct ocfs2_chain_rec *cr;
460*4882a593Smuzhiyun 	u16 cl_bpc;
461*4882a593Smuzhiyun 	u64 bg_ptr;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
464*4882a593Smuzhiyun 		return -EROFS;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	main_bm_inode = ocfs2_get_system_file_inode(osb,
467*4882a593Smuzhiyun 						    GLOBAL_BITMAP_SYSTEM_INODE,
468*4882a593Smuzhiyun 						    OCFS2_INVALID_SLOT);
469*4882a593Smuzhiyun 	if (!main_bm_inode) {
470*4882a593Smuzhiyun 		ret = -EINVAL;
471*4882a593Smuzhiyun 		mlog_errno(ret);
472*4882a593Smuzhiyun 		goto out;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	inode_lock(main_bm_inode);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1);
478*4882a593Smuzhiyun 	if (ret < 0) {
479*4882a593Smuzhiyun 		mlog_errno(ret);
480*4882a593Smuzhiyun 		goto out_mutex;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
486*4882a593Smuzhiyun 		ocfs2_group_bitmap_size(osb->sb, 0,
487*4882a593Smuzhiyun 					osb->s_feature_incompat) * 8) {
488*4882a593Smuzhiyun 		mlog(ML_ERROR, "The disk is too old and small."
489*4882a593Smuzhiyun 		     " Force to do offline resize.");
490*4882a593Smuzhiyun 		ret = -EINVAL;
491*4882a593Smuzhiyun 		goto out_unlock;
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh);
495*4882a593Smuzhiyun 	if (ret < 0) {
496*4882a593Smuzhiyun 		mlog(ML_ERROR, "Can't read the group descriptor # %llu "
497*4882a593Smuzhiyun 		     "from the device.", (unsigned long long)input->group);
498*4882a593Smuzhiyun 		goto out_unlock;
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode), group_bh);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	ret = ocfs2_verify_group_and_input(main_bm_inode, fe, input, group_bh);
504*4882a593Smuzhiyun 	if (ret) {
505*4882a593Smuzhiyun 		mlog_errno(ret);
506*4882a593Smuzhiyun 		goto out_free_group_bh;
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	trace_ocfs2_group_add((unsigned long long)input->group,
510*4882a593Smuzhiyun 			       input->chain, input->clusters, input->frees);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	handle = ocfs2_start_trans(osb, OCFS2_GROUP_ADD_CREDITS);
513*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
514*4882a593Smuzhiyun 		mlog_errno(PTR_ERR(handle));
515*4882a593Smuzhiyun 		ret = -EINVAL;
516*4882a593Smuzhiyun 		goto out_free_group_bh;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	cl_bpc = le16_to_cpu(fe->id2.i_chain.cl_bpc);
520*4882a593Smuzhiyun 	cl = &fe->id2.i_chain;
521*4882a593Smuzhiyun 	cr = &cl->cl_recs[input->chain];
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	ret = ocfs2_journal_access_gd(handle, INODE_CACHE(main_bm_inode),
524*4882a593Smuzhiyun 				      group_bh, OCFS2_JOURNAL_ACCESS_WRITE);
525*4882a593Smuzhiyun 	if (ret < 0) {
526*4882a593Smuzhiyun 		mlog_errno(ret);
527*4882a593Smuzhiyun 		goto out_commit;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	group = (struct ocfs2_group_desc *)group_bh->b_data;
531*4882a593Smuzhiyun 	bg_ptr = le64_to_cpu(group->bg_next_group);
532*4882a593Smuzhiyun 	group->bg_next_group = cr->c_blkno;
533*4882a593Smuzhiyun 	ocfs2_journal_dirty(handle, group_bh);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	ret = ocfs2_journal_access_di(handle, INODE_CACHE(main_bm_inode),
536*4882a593Smuzhiyun 				      main_bm_bh, OCFS2_JOURNAL_ACCESS_WRITE);
537*4882a593Smuzhiyun 	if (ret < 0) {
538*4882a593Smuzhiyun 		group->bg_next_group = cpu_to_le64(bg_ptr);
539*4882a593Smuzhiyun 		mlog_errno(ret);
540*4882a593Smuzhiyun 		goto out_commit;
541*4882a593Smuzhiyun 	}
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (input->chain == le16_to_cpu(cl->cl_next_free_rec)) {
544*4882a593Smuzhiyun 		le16_add_cpu(&cl->cl_next_free_rec, 1);
545*4882a593Smuzhiyun 		memset(cr, 0, sizeof(struct ocfs2_chain_rec));
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	cr->c_blkno = cpu_to_le64(input->group);
549*4882a593Smuzhiyun 	le32_add_cpu(&cr->c_total, input->clusters * cl_bpc);
550*4882a593Smuzhiyun 	le32_add_cpu(&cr->c_free, input->frees * cl_bpc);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	le32_add_cpu(&fe->id1.bitmap1.i_total, input->clusters *cl_bpc);
553*4882a593Smuzhiyun 	le32_add_cpu(&fe->id1.bitmap1.i_used,
554*4882a593Smuzhiyun 		     (input->clusters - input->frees) * cl_bpc);
555*4882a593Smuzhiyun 	le32_add_cpu(&fe->i_clusters, input->clusters);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	ocfs2_journal_dirty(handle, main_bm_bh);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	spin_lock(&OCFS2_I(main_bm_inode)->ip_lock);
560*4882a593Smuzhiyun 	OCFS2_I(main_bm_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
561*4882a593Smuzhiyun 	le64_add_cpu(&fe->i_size, (u64)input->clusters << osb->s_clustersize_bits);
562*4882a593Smuzhiyun 	spin_unlock(&OCFS2_I(main_bm_inode)->ip_lock);
563*4882a593Smuzhiyun 	i_size_write(main_bm_inode, le64_to_cpu(fe->i_size));
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	ocfs2_update_super_and_backups(main_bm_inode, input->clusters);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun out_commit:
568*4882a593Smuzhiyun 	ocfs2_commit_trans(osb, handle);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun out_free_group_bh:
571*4882a593Smuzhiyun 	brelse(group_bh);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun out_unlock:
574*4882a593Smuzhiyun 	brelse(main_bm_bh);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	ocfs2_inode_unlock(main_bm_inode, 1);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun out_mutex:
579*4882a593Smuzhiyun 	inode_unlock(main_bm_inode);
580*4882a593Smuzhiyun 	iput(main_bm_inode);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun out:
583*4882a593Smuzhiyun 	return ret;
584*4882a593Smuzhiyun }
585