xref: /OK3568_Linux_fs/kernel/fs/ext4/ialloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/fs/ext4/ialloc.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1992, 1993, 1994, 1995
6*4882a593Smuzhiyun  * Remy Card (card@masi.ibp.fr)
7*4882a593Smuzhiyun  * Laboratoire MASI - Institut Blaise Pascal
8*4882a593Smuzhiyun  * Universite Pierre et Marie Curie (Paris VI)
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  BSD ufs-inspired inode and directory allocation by
11*4882a593Smuzhiyun  *  Stephen Tweedie (sct@redhat.com), 1993
12*4882a593Smuzhiyun  *  Big-endian to little-endian byte-swapping/bitmaps by
13*4882a593Smuzhiyun  *        David S. Miller (davem@caip.rutgers.edu), 1995
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/time.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/stat.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/quotaops.h>
21*4882a593Smuzhiyun #include <linux/buffer_head.h>
22*4882a593Smuzhiyun #include <linux/random.h>
23*4882a593Smuzhiyun #include <linux/bitops.h>
24*4882a593Smuzhiyun #include <linux/blkdev.h>
25*4882a593Smuzhiyun #include <linux/cred.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <asm/byteorder.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "ext4.h"
30*4882a593Smuzhiyun #include "ext4_jbd2.h"
31*4882a593Smuzhiyun #include "xattr.h"
32*4882a593Smuzhiyun #include "acl.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <trace/events/ext4.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * ialloc.c contains the inodes allocation and deallocation routines
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * The free inodes are managed by bitmaps.  A file system contains several
42*4882a593Smuzhiyun  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
43*4882a593Smuzhiyun  * block for inodes, N blocks for the inode table and data blocks.
44*4882a593Smuzhiyun  *
45*4882a593Smuzhiyun  * The file system contains group descriptors which are located after the
46*4882a593Smuzhiyun  * super block.  Each descriptor contains the number of the bitmap block and
47*4882a593Smuzhiyun  * the free blocks count in the block.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * To avoid calling the atomic setbit hundreds or thousands of times, we only
52*4882a593Smuzhiyun  * need to use it within a single byte (to ensure we get endianness right).
53*4882a593Smuzhiyun  * We can use memset for the rest of the bitmap as there are no other users.
54*4882a593Smuzhiyun  */
ext4_mark_bitmap_end(int start_bit,int end_bit,char * bitmap)55*4882a593Smuzhiyun void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	int i;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (start_bit >= end_bit)
60*4882a593Smuzhiyun 		return;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit);
63*4882a593Smuzhiyun 	for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
64*4882a593Smuzhiyun 		ext4_set_bit(i, bitmap);
65*4882a593Smuzhiyun 	if (i < end_bit)
66*4882a593Smuzhiyun 		memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
ext4_end_bitmap_read(struct buffer_head * bh,int uptodate)69*4882a593Smuzhiyun void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	if (uptodate) {
72*4882a593Smuzhiyun 		set_buffer_uptodate(bh);
73*4882a593Smuzhiyun 		set_bitmap_uptodate(bh);
74*4882a593Smuzhiyun 	}
75*4882a593Smuzhiyun 	unlock_buffer(bh);
76*4882a593Smuzhiyun 	put_bh(bh);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
ext4_validate_inode_bitmap(struct super_block * sb,struct ext4_group_desc * desc,ext4_group_t block_group,struct buffer_head * bh)79*4882a593Smuzhiyun static int ext4_validate_inode_bitmap(struct super_block *sb,
80*4882a593Smuzhiyun 				      struct ext4_group_desc *desc,
81*4882a593Smuzhiyun 				      ext4_group_t block_group,
82*4882a593Smuzhiyun 				      struct buffer_head *bh)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	ext4_fsblk_t	blk;
85*4882a593Smuzhiyun 	struct ext4_group_info *grp;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY)
88*4882a593Smuzhiyun 		return 0;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	grp = ext4_get_group_info(sb, block_group);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (buffer_verified(bh))
93*4882a593Smuzhiyun 		return 0;
94*4882a593Smuzhiyun 	if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
95*4882a593Smuzhiyun 		return -EFSCORRUPTED;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	ext4_lock_group(sb, block_group);
98*4882a593Smuzhiyun 	if (buffer_verified(bh))
99*4882a593Smuzhiyun 		goto verified;
100*4882a593Smuzhiyun 	blk = ext4_inode_bitmap(sb, desc);
101*4882a593Smuzhiyun 	if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
102*4882a593Smuzhiyun 					   EXT4_INODES_PER_GROUP(sb) / 8) ||
103*4882a593Smuzhiyun 	    ext4_simulate_fail(sb, EXT4_SIM_IBITMAP_CRC)) {
104*4882a593Smuzhiyun 		ext4_unlock_group(sb, block_group);
105*4882a593Smuzhiyun 		ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
106*4882a593Smuzhiyun 			   "inode_bitmap = %llu", block_group, blk);
107*4882a593Smuzhiyun 		ext4_mark_group_bitmap_corrupted(sb, block_group,
108*4882a593Smuzhiyun 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
109*4882a593Smuzhiyun 		return -EFSBADCRC;
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 	set_buffer_verified(bh);
112*4882a593Smuzhiyun verified:
113*4882a593Smuzhiyun 	ext4_unlock_group(sb, block_group);
114*4882a593Smuzhiyun 	return 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun  * Read the inode allocation bitmap for a given block_group, reading
119*4882a593Smuzhiyun  * into the specified slot in the superblock's bitmap cache.
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * Return buffer_head of bitmap on success, or an ERR_PTR on error.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun static struct buffer_head *
ext4_read_inode_bitmap(struct super_block * sb,ext4_group_t block_group)124*4882a593Smuzhiyun ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct ext4_group_desc *desc;
127*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(sb);
128*4882a593Smuzhiyun 	struct buffer_head *bh = NULL;
129*4882a593Smuzhiyun 	ext4_fsblk_t bitmap_blk;
130*4882a593Smuzhiyun 	int err;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	desc = ext4_get_group_desc(sb, block_group, NULL);
133*4882a593Smuzhiyun 	if (!desc)
134*4882a593Smuzhiyun 		return ERR_PTR(-EFSCORRUPTED);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	bitmap_blk = ext4_inode_bitmap(sb, desc);
137*4882a593Smuzhiyun 	if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
138*4882a593Smuzhiyun 	    (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
139*4882a593Smuzhiyun 		ext4_error(sb, "Invalid inode bitmap blk %llu in "
140*4882a593Smuzhiyun 			   "block_group %u", bitmap_blk, block_group);
141*4882a593Smuzhiyun 		ext4_mark_group_bitmap_corrupted(sb, block_group,
142*4882a593Smuzhiyun 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
143*4882a593Smuzhiyun 		return ERR_PTR(-EFSCORRUPTED);
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun 	bh = sb_getblk(sb, bitmap_blk);
146*4882a593Smuzhiyun 	if (unlikely(!bh)) {
147*4882a593Smuzhiyun 		ext4_warning(sb, "Cannot read inode bitmap - "
148*4882a593Smuzhiyun 			     "block_group = %u, inode_bitmap = %llu",
149*4882a593Smuzhiyun 			     block_group, bitmap_blk);
150*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
151*4882a593Smuzhiyun 	}
152*4882a593Smuzhiyun 	if (bitmap_uptodate(bh))
153*4882a593Smuzhiyun 		goto verify;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	lock_buffer(bh);
156*4882a593Smuzhiyun 	if (bitmap_uptodate(bh)) {
157*4882a593Smuzhiyun 		unlock_buffer(bh);
158*4882a593Smuzhiyun 		goto verify;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	ext4_lock_group(sb, block_group);
162*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb) &&
163*4882a593Smuzhiyun 	    (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
164*4882a593Smuzhiyun 		if (block_group == 0) {
165*4882a593Smuzhiyun 			ext4_unlock_group(sb, block_group);
166*4882a593Smuzhiyun 			unlock_buffer(bh);
167*4882a593Smuzhiyun 			ext4_error(sb, "Inode bitmap for bg 0 marked "
168*4882a593Smuzhiyun 				   "uninitialized");
169*4882a593Smuzhiyun 			err = -EFSCORRUPTED;
170*4882a593Smuzhiyun 			goto out;
171*4882a593Smuzhiyun 		}
172*4882a593Smuzhiyun 		memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
173*4882a593Smuzhiyun 		ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
174*4882a593Smuzhiyun 				     sb->s_blocksize * 8, bh->b_data);
175*4882a593Smuzhiyun 		set_bitmap_uptodate(bh);
176*4882a593Smuzhiyun 		set_buffer_uptodate(bh);
177*4882a593Smuzhiyun 		set_buffer_verified(bh);
178*4882a593Smuzhiyun 		ext4_unlock_group(sb, block_group);
179*4882a593Smuzhiyun 		unlock_buffer(bh);
180*4882a593Smuzhiyun 		return bh;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 	ext4_unlock_group(sb, block_group);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (buffer_uptodate(bh)) {
185*4882a593Smuzhiyun 		/*
186*4882a593Smuzhiyun 		 * if not uninit if bh is uptodate,
187*4882a593Smuzhiyun 		 * bitmap is also uptodate
188*4882a593Smuzhiyun 		 */
189*4882a593Smuzhiyun 		set_bitmap_uptodate(bh);
190*4882a593Smuzhiyun 		unlock_buffer(bh);
191*4882a593Smuzhiyun 		goto verify;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 	/*
194*4882a593Smuzhiyun 	 * submit the buffer_head for reading
195*4882a593Smuzhiyun 	 */
196*4882a593Smuzhiyun 	trace_ext4_load_inode_bitmap(sb, block_group);
197*4882a593Smuzhiyun 	ext4_read_bh(bh, REQ_META | REQ_PRIO, ext4_end_bitmap_read);
198*4882a593Smuzhiyun 	ext4_simulate_fail_bh(sb, bh, EXT4_SIM_IBITMAP_EIO);
199*4882a593Smuzhiyun 	if (!buffer_uptodate(bh)) {
200*4882a593Smuzhiyun 		put_bh(bh);
201*4882a593Smuzhiyun 		ext4_error_err(sb, EIO, "Cannot read inode bitmap - "
202*4882a593Smuzhiyun 			       "block_group = %u, inode_bitmap = %llu",
203*4882a593Smuzhiyun 			       block_group, bitmap_blk);
204*4882a593Smuzhiyun 		ext4_mark_group_bitmap_corrupted(sb, block_group,
205*4882a593Smuzhiyun 				EXT4_GROUP_INFO_IBITMAP_CORRUPT);
206*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun verify:
210*4882a593Smuzhiyun 	err = ext4_validate_inode_bitmap(sb, desc, block_group, bh);
211*4882a593Smuzhiyun 	if (err)
212*4882a593Smuzhiyun 		goto out;
213*4882a593Smuzhiyun 	return bh;
214*4882a593Smuzhiyun out:
215*4882a593Smuzhiyun 	put_bh(bh);
216*4882a593Smuzhiyun 	return ERR_PTR(err);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun  * NOTE! When we get the inode, we're the only people
221*4882a593Smuzhiyun  * that have access to it, and as such there are no
222*4882a593Smuzhiyun  * race conditions we have to worry about. The inode
223*4882a593Smuzhiyun  * is not on the hash-lists, and it cannot be reached
224*4882a593Smuzhiyun  * through the filesystem because the directory entry
225*4882a593Smuzhiyun  * has been deleted earlier.
226*4882a593Smuzhiyun  *
227*4882a593Smuzhiyun  * HOWEVER: we must make sure that we get no aliases,
228*4882a593Smuzhiyun  * which means that we have to call "clear_inode()"
229*4882a593Smuzhiyun  * _before_ we mark the inode not in use in the inode
230*4882a593Smuzhiyun  * bitmaps. Otherwise a newly created file might use
231*4882a593Smuzhiyun  * the same inode number (not actually the same pointer
232*4882a593Smuzhiyun  * though), and then we'd have two inodes sharing the
233*4882a593Smuzhiyun  * same inode number and space on the harddisk.
234*4882a593Smuzhiyun  */
ext4_free_inode(handle_t * handle,struct inode * inode)235*4882a593Smuzhiyun void ext4_free_inode(handle_t *handle, struct inode *inode)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
238*4882a593Smuzhiyun 	int is_directory;
239*4882a593Smuzhiyun 	unsigned long ino;
240*4882a593Smuzhiyun 	struct buffer_head *bitmap_bh = NULL;
241*4882a593Smuzhiyun 	struct buffer_head *bh2;
242*4882a593Smuzhiyun 	ext4_group_t block_group;
243*4882a593Smuzhiyun 	unsigned long bit;
244*4882a593Smuzhiyun 	struct ext4_group_desc *gdp;
245*4882a593Smuzhiyun 	struct ext4_super_block *es;
246*4882a593Smuzhiyun 	struct ext4_sb_info *sbi;
247*4882a593Smuzhiyun 	int fatal = 0, err, count, cleared;
248*4882a593Smuzhiyun 	struct ext4_group_info *grp;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!sb) {
251*4882a593Smuzhiyun 		printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
252*4882a593Smuzhiyun 		       "nonexistent device\n", __func__, __LINE__);
253*4882a593Smuzhiyun 		return;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 	if (atomic_read(&inode->i_count) > 1) {
256*4882a593Smuzhiyun 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: count=%d",
257*4882a593Smuzhiyun 			 __func__, __LINE__, inode->i_ino,
258*4882a593Smuzhiyun 			 atomic_read(&inode->i_count));
259*4882a593Smuzhiyun 		return;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 	if (inode->i_nlink) {
262*4882a593Smuzhiyun 		ext4_msg(sb, KERN_ERR, "%s:%d: inode #%lu: nlink=%d\n",
263*4882a593Smuzhiyun 			 __func__, __LINE__, inode->i_ino, inode->i_nlink);
264*4882a593Smuzhiyun 		return;
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 	sbi = EXT4_SB(sb);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	ino = inode->i_ino;
269*4882a593Smuzhiyun 	ext4_debug("freeing inode %lu\n", ino);
270*4882a593Smuzhiyun 	trace_ext4_free_inode(inode);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dquot_initialize(inode);
273*4882a593Smuzhiyun 	dquot_free_inode(inode);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	is_directory = S_ISDIR(inode->i_mode);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* Do this BEFORE marking the inode not in use or returning an error */
278*4882a593Smuzhiyun 	ext4_clear_inode(inode);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	es = sbi->s_es;
281*4882a593Smuzhiyun 	if (ino < EXT4_FIRST_INO(sb) || ino > le32_to_cpu(es->s_inodes_count)) {
282*4882a593Smuzhiyun 		ext4_error(sb, "reserved or nonexistent inode %lu", ino);
283*4882a593Smuzhiyun 		goto error_return;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
286*4882a593Smuzhiyun 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
287*4882a593Smuzhiyun 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
288*4882a593Smuzhiyun 	/* Don't bother if the inode bitmap is corrupt. */
289*4882a593Smuzhiyun 	if (IS_ERR(bitmap_bh)) {
290*4882a593Smuzhiyun 		fatal = PTR_ERR(bitmap_bh);
291*4882a593Smuzhiyun 		bitmap_bh = NULL;
292*4882a593Smuzhiyun 		goto error_return;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 	if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
295*4882a593Smuzhiyun 		grp = ext4_get_group_info(sb, block_group);
296*4882a593Smuzhiyun 		if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
297*4882a593Smuzhiyun 			fatal = -EFSCORRUPTED;
298*4882a593Smuzhiyun 			goto error_return;
299*4882a593Smuzhiyun 		}
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	BUFFER_TRACE(bitmap_bh, "get_write_access");
303*4882a593Smuzhiyun 	fatal = ext4_journal_get_write_access(handle, bitmap_bh);
304*4882a593Smuzhiyun 	if (fatal)
305*4882a593Smuzhiyun 		goto error_return;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	fatal = -ESRCH;
308*4882a593Smuzhiyun 	gdp = ext4_get_group_desc(sb, block_group, &bh2);
309*4882a593Smuzhiyun 	if (gdp) {
310*4882a593Smuzhiyun 		BUFFER_TRACE(bh2, "get_write_access");
311*4882a593Smuzhiyun 		fatal = ext4_journal_get_write_access(handle, bh2);
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 	ext4_lock_group(sb, block_group);
314*4882a593Smuzhiyun 	cleared = ext4_test_and_clear_bit(bit, bitmap_bh->b_data);
315*4882a593Smuzhiyun 	if (fatal || !cleared) {
316*4882a593Smuzhiyun 		ext4_unlock_group(sb, block_group);
317*4882a593Smuzhiyun 		goto out;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	count = ext4_free_inodes_count(sb, gdp) + 1;
321*4882a593Smuzhiyun 	ext4_free_inodes_set(sb, gdp, count);
322*4882a593Smuzhiyun 	if (is_directory) {
323*4882a593Smuzhiyun 		count = ext4_used_dirs_count(sb, gdp) - 1;
324*4882a593Smuzhiyun 		ext4_used_dirs_set(sb, gdp, count);
325*4882a593Smuzhiyun 		if (percpu_counter_initialized(&sbi->s_dirs_counter))
326*4882a593Smuzhiyun 			percpu_counter_dec(&sbi->s_dirs_counter);
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 	ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
329*4882a593Smuzhiyun 				   EXT4_INODES_PER_GROUP(sb) / 8);
330*4882a593Smuzhiyun 	ext4_group_desc_csum_set(sb, block_group, gdp);
331*4882a593Smuzhiyun 	ext4_unlock_group(sb, block_group);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
334*4882a593Smuzhiyun 		percpu_counter_inc(&sbi->s_freeinodes_counter);
335*4882a593Smuzhiyun 	if (sbi->s_log_groups_per_flex) {
336*4882a593Smuzhiyun 		struct flex_groups *fg;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		fg = sbi_array_rcu_deref(sbi, s_flex_groups,
339*4882a593Smuzhiyun 					 ext4_flex_group(sbi, block_group));
340*4882a593Smuzhiyun 		atomic_inc(&fg->free_inodes);
341*4882a593Smuzhiyun 		if (is_directory)
342*4882a593Smuzhiyun 			atomic_dec(&fg->used_dirs);
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 	BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
345*4882a593Smuzhiyun 	fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
346*4882a593Smuzhiyun out:
347*4882a593Smuzhiyun 	if (cleared) {
348*4882a593Smuzhiyun 		BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
349*4882a593Smuzhiyun 		err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
350*4882a593Smuzhiyun 		if (!fatal)
351*4882a593Smuzhiyun 			fatal = err;
352*4882a593Smuzhiyun 	} else {
353*4882a593Smuzhiyun 		ext4_error(sb, "bit already cleared for inode %lu", ino);
354*4882a593Smuzhiyun 		ext4_mark_group_bitmap_corrupted(sb, block_group,
355*4882a593Smuzhiyun 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun error_return:
359*4882a593Smuzhiyun 	brelse(bitmap_bh);
360*4882a593Smuzhiyun 	ext4_std_error(sb, fatal);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun struct orlov_stats {
364*4882a593Smuzhiyun 	__u64 free_clusters;
365*4882a593Smuzhiyun 	__u32 free_inodes;
366*4882a593Smuzhiyun 	__u32 used_dirs;
367*4882a593Smuzhiyun };
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun  * Helper function for Orlov's allocator; returns critical information
371*4882a593Smuzhiyun  * for a particular block group or flex_bg.  If flex_size is 1, then g
372*4882a593Smuzhiyun  * is a block group number; otherwise it is flex_bg number.
373*4882a593Smuzhiyun  */
get_orlov_stats(struct super_block * sb,ext4_group_t g,int flex_size,struct orlov_stats * stats)374*4882a593Smuzhiyun static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
375*4882a593Smuzhiyun 			    int flex_size, struct orlov_stats *stats)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct ext4_group_desc *desc;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (flex_size > 1) {
380*4882a593Smuzhiyun 		struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
381*4882a593Smuzhiyun 							     s_flex_groups, g);
382*4882a593Smuzhiyun 		stats->free_inodes = atomic_read(&fg->free_inodes);
383*4882a593Smuzhiyun 		stats->free_clusters = atomic64_read(&fg->free_clusters);
384*4882a593Smuzhiyun 		stats->used_dirs = atomic_read(&fg->used_dirs);
385*4882a593Smuzhiyun 		return;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	desc = ext4_get_group_desc(sb, g, NULL);
389*4882a593Smuzhiyun 	if (desc) {
390*4882a593Smuzhiyun 		stats->free_inodes = ext4_free_inodes_count(sb, desc);
391*4882a593Smuzhiyun 		stats->free_clusters = ext4_free_group_clusters(sb, desc);
392*4882a593Smuzhiyun 		stats->used_dirs = ext4_used_dirs_count(sb, desc);
393*4882a593Smuzhiyun 	} else {
394*4882a593Smuzhiyun 		stats->free_inodes = 0;
395*4882a593Smuzhiyun 		stats->free_clusters = 0;
396*4882a593Smuzhiyun 		stats->used_dirs = 0;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun  * Orlov's allocator for directories.
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  * We always try to spread first-level directories.
404*4882a593Smuzhiyun  *
405*4882a593Smuzhiyun  * If there are blockgroups with both free inodes and free clusters counts
406*4882a593Smuzhiyun  * not worse than average we return one with smallest directory count.
407*4882a593Smuzhiyun  * Otherwise we simply return a random group.
408*4882a593Smuzhiyun  *
409*4882a593Smuzhiyun  * For the rest rules look so:
410*4882a593Smuzhiyun  *
411*4882a593Smuzhiyun  * It's OK to put directory into a group unless
412*4882a593Smuzhiyun  * it has too many directories already (max_dirs) or
413*4882a593Smuzhiyun  * it has too few free inodes left (min_inodes) or
414*4882a593Smuzhiyun  * it has too few free clusters left (min_clusters) or
415*4882a593Smuzhiyun  * Parent's group is preferred, if it doesn't satisfy these
416*4882a593Smuzhiyun  * conditions we search cyclically through the rest. If none
417*4882a593Smuzhiyun  * of the groups look good we just look for a group with more
418*4882a593Smuzhiyun  * free inodes than average (starting at parent's group).
419*4882a593Smuzhiyun  */
420*4882a593Smuzhiyun 
find_group_orlov(struct super_block * sb,struct inode * parent,ext4_group_t * group,umode_t mode,const struct qstr * qstr)421*4882a593Smuzhiyun static int find_group_orlov(struct super_block *sb, struct inode *parent,
422*4882a593Smuzhiyun 			    ext4_group_t *group, umode_t mode,
423*4882a593Smuzhiyun 			    const struct qstr *qstr)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
426*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(sb);
427*4882a593Smuzhiyun 	ext4_group_t real_ngroups = ext4_get_groups_count(sb);
428*4882a593Smuzhiyun 	int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
429*4882a593Smuzhiyun 	unsigned int freei, avefreei, grp_free;
430*4882a593Smuzhiyun 	ext4_fsblk_t freec, avefreec;
431*4882a593Smuzhiyun 	unsigned int ndirs;
432*4882a593Smuzhiyun 	int max_dirs, min_inodes;
433*4882a593Smuzhiyun 	ext4_grpblk_t min_clusters;
434*4882a593Smuzhiyun 	ext4_group_t i, grp, g, ngroups;
435*4882a593Smuzhiyun 	struct ext4_group_desc *desc;
436*4882a593Smuzhiyun 	struct orlov_stats stats;
437*4882a593Smuzhiyun 	int flex_size = ext4_flex_bg_size(sbi);
438*4882a593Smuzhiyun 	struct dx_hash_info hinfo;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	ngroups = real_ngroups;
441*4882a593Smuzhiyun 	if (flex_size > 1) {
442*4882a593Smuzhiyun 		ngroups = (real_ngroups + flex_size - 1) >>
443*4882a593Smuzhiyun 			sbi->s_log_groups_per_flex;
444*4882a593Smuzhiyun 		parent_group >>= sbi->s_log_groups_per_flex;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
448*4882a593Smuzhiyun 	avefreei = freei / ngroups;
449*4882a593Smuzhiyun 	freec = percpu_counter_read_positive(&sbi->s_freeclusters_counter);
450*4882a593Smuzhiyun 	avefreec = freec;
451*4882a593Smuzhiyun 	do_div(avefreec, ngroups);
452*4882a593Smuzhiyun 	ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (S_ISDIR(mode) &&
455*4882a593Smuzhiyun 	    ((parent == d_inode(sb->s_root)) ||
456*4882a593Smuzhiyun 	     (ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
457*4882a593Smuzhiyun 		int best_ndir = inodes_per_group;
458*4882a593Smuzhiyun 		int ret = -1;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		if (qstr) {
461*4882a593Smuzhiyun 			if (ext4_hash_in_dirent(parent))
462*4882a593Smuzhiyun 				hinfo.hash_version = DX_HASH_SIPHASH;
463*4882a593Smuzhiyun 			else
464*4882a593Smuzhiyun 				hinfo.hash_version = DX_HASH_HALF_MD4;
465*4882a593Smuzhiyun 			hinfo.seed = sbi->s_hash_seed;
466*4882a593Smuzhiyun 			ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
467*4882a593Smuzhiyun 			grp = hinfo.hash;
468*4882a593Smuzhiyun 		} else
469*4882a593Smuzhiyun 			grp = prandom_u32();
470*4882a593Smuzhiyun 		parent_group = (unsigned)grp % ngroups;
471*4882a593Smuzhiyun 		for (i = 0; i < ngroups; i++) {
472*4882a593Smuzhiyun 			g = (parent_group + i) % ngroups;
473*4882a593Smuzhiyun 			get_orlov_stats(sb, g, flex_size, &stats);
474*4882a593Smuzhiyun 			if (!stats.free_inodes)
475*4882a593Smuzhiyun 				continue;
476*4882a593Smuzhiyun 			if (stats.used_dirs >= best_ndir)
477*4882a593Smuzhiyun 				continue;
478*4882a593Smuzhiyun 			if (stats.free_inodes < avefreei)
479*4882a593Smuzhiyun 				continue;
480*4882a593Smuzhiyun 			if (stats.free_clusters < avefreec)
481*4882a593Smuzhiyun 				continue;
482*4882a593Smuzhiyun 			grp = g;
483*4882a593Smuzhiyun 			ret = 0;
484*4882a593Smuzhiyun 			best_ndir = stats.used_dirs;
485*4882a593Smuzhiyun 		}
486*4882a593Smuzhiyun 		if (ret)
487*4882a593Smuzhiyun 			goto fallback;
488*4882a593Smuzhiyun 	found_flex_bg:
489*4882a593Smuzhiyun 		if (flex_size == 1) {
490*4882a593Smuzhiyun 			*group = grp;
491*4882a593Smuzhiyun 			return 0;
492*4882a593Smuzhiyun 		}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		/*
495*4882a593Smuzhiyun 		 * We pack inodes at the beginning of the flexgroup's
496*4882a593Smuzhiyun 		 * inode tables.  Block allocation decisions will do
497*4882a593Smuzhiyun 		 * something similar, although regular files will
498*4882a593Smuzhiyun 		 * start at 2nd block group of the flexgroup.  See
499*4882a593Smuzhiyun 		 * ext4_ext_find_goal() and ext4_find_near().
500*4882a593Smuzhiyun 		 */
501*4882a593Smuzhiyun 		grp *= flex_size;
502*4882a593Smuzhiyun 		for (i = 0; i < flex_size; i++) {
503*4882a593Smuzhiyun 			if (grp+i >= real_ngroups)
504*4882a593Smuzhiyun 				break;
505*4882a593Smuzhiyun 			desc = ext4_get_group_desc(sb, grp+i, NULL);
506*4882a593Smuzhiyun 			if (desc && ext4_free_inodes_count(sb, desc)) {
507*4882a593Smuzhiyun 				*group = grp+i;
508*4882a593Smuzhiyun 				return 0;
509*4882a593Smuzhiyun 			}
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 		goto fallback;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	max_dirs = ndirs / ngroups + inodes_per_group*flex_size / 16;
515*4882a593Smuzhiyun 	min_inodes = avefreei - inodes_per_group*flex_size / 4;
516*4882a593Smuzhiyun 	if (min_inodes < 1)
517*4882a593Smuzhiyun 		min_inodes = 1;
518*4882a593Smuzhiyun 	min_clusters = avefreec - EXT4_CLUSTERS_PER_GROUP(sb)*flex_size / 4;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/*
521*4882a593Smuzhiyun 	 * Start looking in the flex group where we last allocated an
522*4882a593Smuzhiyun 	 * inode for this parent directory
523*4882a593Smuzhiyun 	 */
524*4882a593Smuzhiyun 	if (EXT4_I(parent)->i_last_alloc_group != ~0) {
525*4882a593Smuzhiyun 		parent_group = EXT4_I(parent)->i_last_alloc_group;
526*4882a593Smuzhiyun 		if (flex_size > 1)
527*4882a593Smuzhiyun 			parent_group >>= sbi->s_log_groups_per_flex;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++) {
531*4882a593Smuzhiyun 		grp = (parent_group + i) % ngroups;
532*4882a593Smuzhiyun 		get_orlov_stats(sb, grp, flex_size, &stats);
533*4882a593Smuzhiyun 		if (stats.used_dirs >= max_dirs)
534*4882a593Smuzhiyun 			continue;
535*4882a593Smuzhiyun 		if (stats.free_inodes < min_inodes)
536*4882a593Smuzhiyun 			continue;
537*4882a593Smuzhiyun 		if (stats.free_clusters < min_clusters)
538*4882a593Smuzhiyun 			continue;
539*4882a593Smuzhiyun 		goto found_flex_bg;
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun fallback:
543*4882a593Smuzhiyun 	ngroups = real_ngroups;
544*4882a593Smuzhiyun 	avefreei = freei / ngroups;
545*4882a593Smuzhiyun fallback_retry:
546*4882a593Smuzhiyun 	parent_group = EXT4_I(parent)->i_block_group;
547*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++) {
548*4882a593Smuzhiyun 		grp = (parent_group + i) % ngroups;
549*4882a593Smuzhiyun 		desc = ext4_get_group_desc(sb, grp, NULL);
550*4882a593Smuzhiyun 		if (desc) {
551*4882a593Smuzhiyun 			grp_free = ext4_free_inodes_count(sb, desc);
552*4882a593Smuzhiyun 			if (grp_free && grp_free >= avefreei) {
553*4882a593Smuzhiyun 				*group = grp;
554*4882a593Smuzhiyun 				return 0;
555*4882a593Smuzhiyun 			}
556*4882a593Smuzhiyun 		}
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (avefreei) {
560*4882a593Smuzhiyun 		/*
561*4882a593Smuzhiyun 		 * The free-inodes counter is approximate, and for really small
562*4882a593Smuzhiyun 		 * filesystems the above test can fail to find any blockgroups
563*4882a593Smuzhiyun 		 */
564*4882a593Smuzhiyun 		avefreei = 0;
565*4882a593Smuzhiyun 		goto fallback_retry;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	return -1;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun 
find_group_other(struct super_block * sb,struct inode * parent,ext4_group_t * group,umode_t mode)571*4882a593Smuzhiyun static int find_group_other(struct super_block *sb, struct inode *parent,
572*4882a593Smuzhiyun 			    ext4_group_t *group, umode_t mode)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
575*4882a593Smuzhiyun 	ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
576*4882a593Smuzhiyun 	struct ext4_group_desc *desc;
577*4882a593Smuzhiyun 	int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	/*
580*4882a593Smuzhiyun 	 * Try to place the inode is the same flex group as its
581*4882a593Smuzhiyun 	 * parent.  If we can't find space, use the Orlov algorithm to
582*4882a593Smuzhiyun 	 * find another flex group, and store that information in the
583*4882a593Smuzhiyun 	 * parent directory's inode information so that use that flex
584*4882a593Smuzhiyun 	 * group for future allocations.
585*4882a593Smuzhiyun 	 */
586*4882a593Smuzhiyun 	if (flex_size > 1) {
587*4882a593Smuzhiyun 		int retry = 0;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	try_again:
590*4882a593Smuzhiyun 		parent_group &= ~(flex_size-1);
591*4882a593Smuzhiyun 		last = parent_group + flex_size;
592*4882a593Smuzhiyun 		if (last > ngroups)
593*4882a593Smuzhiyun 			last = ngroups;
594*4882a593Smuzhiyun 		for  (i = parent_group; i < last; i++) {
595*4882a593Smuzhiyun 			desc = ext4_get_group_desc(sb, i, NULL);
596*4882a593Smuzhiyun 			if (desc && ext4_free_inodes_count(sb, desc)) {
597*4882a593Smuzhiyun 				*group = i;
598*4882a593Smuzhiyun 				return 0;
599*4882a593Smuzhiyun 			}
600*4882a593Smuzhiyun 		}
601*4882a593Smuzhiyun 		if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
602*4882a593Smuzhiyun 			retry = 1;
603*4882a593Smuzhiyun 			parent_group = EXT4_I(parent)->i_last_alloc_group;
604*4882a593Smuzhiyun 			goto try_again;
605*4882a593Smuzhiyun 		}
606*4882a593Smuzhiyun 		/*
607*4882a593Smuzhiyun 		 * If this didn't work, use the Orlov search algorithm
608*4882a593Smuzhiyun 		 * to find a new flex group; we pass in the mode to
609*4882a593Smuzhiyun 		 * avoid the topdir algorithms.
610*4882a593Smuzhiyun 		 */
611*4882a593Smuzhiyun 		*group = parent_group + flex_size;
612*4882a593Smuzhiyun 		if (*group > ngroups)
613*4882a593Smuzhiyun 			*group = 0;
614*4882a593Smuzhiyun 		return find_group_orlov(sb, parent, group, mode, NULL);
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/*
618*4882a593Smuzhiyun 	 * Try to place the inode in its parent directory
619*4882a593Smuzhiyun 	 */
620*4882a593Smuzhiyun 	*group = parent_group;
621*4882a593Smuzhiyun 	desc = ext4_get_group_desc(sb, *group, NULL);
622*4882a593Smuzhiyun 	if (desc && ext4_free_inodes_count(sb, desc) &&
623*4882a593Smuzhiyun 	    ext4_free_group_clusters(sb, desc))
624*4882a593Smuzhiyun 		return 0;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	/*
627*4882a593Smuzhiyun 	 * We're going to place this inode in a different blockgroup from its
628*4882a593Smuzhiyun 	 * parent.  We want to cause files in a common directory to all land in
629*4882a593Smuzhiyun 	 * the same blockgroup.  But we want files which are in a different
630*4882a593Smuzhiyun 	 * directory which shares a blockgroup with our parent to land in a
631*4882a593Smuzhiyun 	 * different blockgroup.
632*4882a593Smuzhiyun 	 *
633*4882a593Smuzhiyun 	 * So add our directory's i_ino into the starting point for the hash.
634*4882a593Smuzhiyun 	 */
635*4882a593Smuzhiyun 	*group = (*group + parent->i_ino) % ngroups;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/*
638*4882a593Smuzhiyun 	 * Use a quadratic hash to find a group with a free inode and some free
639*4882a593Smuzhiyun 	 * blocks.
640*4882a593Smuzhiyun 	 */
641*4882a593Smuzhiyun 	for (i = 1; i < ngroups; i <<= 1) {
642*4882a593Smuzhiyun 		*group += i;
643*4882a593Smuzhiyun 		if (*group >= ngroups)
644*4882a593Smuzhiyun 			*group -= ngroups;
645*4882a593Smuzhiyun 		desc = ext4_get_group_desc(sb, *group, NULL);
646*4882a593Smuzhiyun 		if (desc && ext4_free_inodes_count(sb, desc) &&
647*4882a593Smuzhiyun 		    ext4_free_group_clusters(sb, desc))
648*4882a593Smuzhiyun 			return 0;
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	/*
652*4882a593Smuzhiyun 	 * That failed: try linear search for a free inode, even if that group
653*4882a593Smuzhiyun 	 * has no free blocks.
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	*group = parent_group;
656*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++) {
657*4882a593Smuzhiyun 		if (++*group >= ngroups)
658*4882a593Smuzhiyun 			*group = 0;
659*4882a593Smuzhiyun 		desc = ext4_get_group_desc(sb, *group, NULL);
660*4882a593Smuzhiyun 		if (desc && ext4_free_inodes_count(sb, desc))
661*4882a593Smuzhiyun 			return 0;
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	return -1;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /*
668*4882a593Smuzhiyun  * In no journal mode, if an inode has recently been deleted, we want
669*4882a593Smuzhiyun  * to avoid reusing it until we're reasonably sure the inode table
670*4882a593Smuzhiyun  * block has been written back to disk.  (Yes, these values are
671*4882a593Smuzhiyun  * somewhat arbitrary...)
672*4882a593Smuzhiyun  */
673*4882a593Smuzhiyun #define RECENTCY_MIN	60
674*4882a593Smuzhiyun #define RECENTCY_DIRTY	300
675*4882a593Smuzhiyun 
recently_deleted(struct super_block * sb,ext4_group_t group,int ino)676*4882a593Smuzhiyun static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct ext4_group_desc	*gdp;
679*4882a593Smuzhiyun 	struct ext4_inode	*raw_inode;
680*4882a593Smuzhiyun 	struct buffer_head	*bh;
681*4882a593Smuzhiyun 	int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
682*4882a593Smuzhiyun 	int offset, ret = 0;
683*4882a593Smuzhiyun 	int recentcy = RECENTCY_MIN;
684*4882a593Smuzhiyun 	u32 dtime, now;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	gdp = ext4_get_group_desc(sb, group, NULL);
687*4882a593Smuzhiyun 	if (unlikely(!gdp))
688*4882a593Smuzhiyun 		return 0;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	bh = sb_find_get_block(sb, ext4_inode_table(sb, gdp) +
691*4882a593Smuzhiyun 		       (ino / inodes_per_block));
692*4882a593Smuzhiyun 	if (!bh || !buffer_uptodate(bh))
693*4882a593Smuzhiyun 		/*
694*4882a593Smuzhiyun 		 * If the block is not in the buffer cache, then it
695*4882a593Smuzhiyun 		 * must have been written out.
696*4882a593Smuzhiyun 		 */
697*4882a593Smuzhiyun 		goto out;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
700*4882a593Smuzhiyun 	raw_inode = (struct ext4_inode *) (bh->b_data + offset);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	/* i_dtime is only 32 bits on disk, but we only care about relative
703*4882a593Smuzhiyun 	 * times in the range of a few minutes (i.e. long enough to sync a
704*4882a593Smuzhiyun 	 * recently-deleted inode to disk), so using the low 32 bits of the
705*4882a593Smuzhiyun 	 * clock (a 68 year range) is enough, see time_before32() */
706*4882a593Smuzhiyun 	dtime = le32_to_cpu(raw_inode->i_dtime);
707*4882a593Smuzhiyun 	now = ktime_get_real_seconds();
708*4882a593Smuzhiyun 	if (buffer_dirty(bh))
709*4882a593Smuzhiyun 		recentcy += RECENTCY_DIRTY;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	if (dtime && time_before32(dtime, now) &&
712*4882a593Smuzhiyun 	    time_before32(now, dtime + recentcy))
713*4882a593Smuzhiyun 		ret = 1;
714*4882a593Smuzhiyun out:
715*4882a593Smuzhiyun 	brelse(bh);
716*4882a593Smuzhiyun 	return ret;
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
find_inode_bit(struct super_block * sb,ext4_group_t group,struct buffer_head * bitmap,unsigned long * ino)719*4882a593Smuzhiyun static int find_inode_bit(struct super_block *sb, ext4_group_t group,
720*4882a593Smuzhiyun 			  struct buffer_head *bitmap, unsigned long *ino)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	bool check_recently_deleted = EXT4_SB(sb)->s_journal == NULL;
723*4882a593Smuzhiyun 	unsigned long recently_deleted_ino = EXT4_INODES_PER_GROUP(sb);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun next:
726*4882a593Smuzhiyun 	*ino = ext4_find_next_zero_bit((unsigned long *)
727*4882a593Smuzhiyun 				       bitmap->b_data,
728*4882a593Smuzhiyun 				       EXT4_INODES_PER_GROUP(sb), *ino);
729*4882a593Smuzhiyun 	if (*ino >= EXT4_INODES_PER_GROUP(sb))
730*4882a593Smuzhiyun 		goto not_found;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	if (check_recently_deleted && recently_deleted(sb, group, *ino)) {
733*4882a593Smuzhiyun 		recently_deleted_ino = *ino;
734*4882a593Smuzhiyun 		*ino = *ino + 1;
735*4882a593Smuzhiyun 		if (*ino < EXT4_INODES_PER_GROUP(sb))
736*4882a593Smuzhiyun 			goto next;
737*4882a593Smuzhiyun 		goto not_found;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 	return 1;
740*4882a593Smuzhiyun not_found:
741*4882a593Smuzhiyun 	if (recently_deleted_ino >= EXT4_INODES_PER_GROUP(sb))
742*4882a593Smuzhiyun 		return 0;
743*4882a593Smuzhiyun 	/*
744*4882a593Smuzhiyun 	 * Not reusing recently deleted inodes is mostly a preference. We don't
745*4882a593Smuzhiyun 	 * want to report ENOSPC or skew allocation patterns because of that.
746*4882a593Smuzhiyun 	 * So return even recently deleted inode if we could find better in the
747*4882a593Smuzhiyun 	 * given range.
748*4882a593Smuzhiyun 	 */
749*4882a593Smuzhiyun 	*ino = recently_deleted_ino;
750*4882a593Smuzhiyun 	return 1;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
ext4_mark_inode_used(struct super_block * sb,int ino)753*4882a593Smuzhiyun int ext4_mark_inode_used(struct super_block *sb, int ino)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
756*4882a593Smuzhiyun 	struct buffer_head *inode_bitmap_bh = NULL, *group_desc_bh = NULL;
757*4882a593Smuzhiyun 	struct ext4_group_desc *gdp;
758*4882a593Smuzhiyun 	ext4_group_t group;
759*4882a593Smuzhiyun 	int bit;
760*4882a593Smuzhiyun 	int err = -EFSCORRUPTED;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
763*4882a593Smuzhiyun 		goto out;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
766*4882a593Smuzhiyun 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
767*4882a593Smuzhiyun 	inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
768*4882a593Smuzhiyun 	if (IS_ERR(inode_bitmap_bh))
769*4882a593Smuzhiyun 		return PTR_ERR(inode_bitmap_bh);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (ext4_test_bit(bit, inode_bitmap_bh->b_data)) {
772*4882a593Smuzhiyun 		err = 0;
773*4882a593Smuzhiyun 		goto out;
774*4882a593Smuzhiyun 	}
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
777*4882a593Smuzhiyun 	if (!gdp || !group_desc_bh) {
778*4882a593Smuzhiyun 		err = -EINVAL;
779*4882a593Smuzhiyun 		goto out;
780*4882a593Smuzhiyun 	}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	ext4_set_bit(bit, inode_bitmap_bh->b_data);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
785*4882a593Smuzhiyun 	err = ext4_handle_dirty_metadata(NULL, NULL, inode_bitmap_bh);
786*4882a593Smuzhiyun 	if (err) {
787*4882a593Smuzhiyun 		ext4_std_error(sb, err);
788*4882a593Smuzhiyun 		goto out;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 	err = sync_dirty_buffer(inode_bitmap_bh);
791*4882a593Smuzhiyun 	if (err) {
792*4882a593Smuzhiyun 		ext4_std_error(sb, err);
793*4882a593Smuzhiyun 		goto out;
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	/* We may have to initialize the block bitmap if it isn't already */
797*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb) &&
798*4882a593Smuzhiyun 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
799*4882a593Smuzhiyun 		struct buffer_head *block_bitmap_bh;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
802*4882a593Smuzhiyun 		if (IS_ERR(block_bitmap_bh)) {
803*4882a593Smuzhiyun 			err = PTR_ERR(block_bitmap_bh);
804*4882a593Smuzhiyun 			goto out;
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
808*4882a593Smuzhiyun 		err = ext4_handle_dirty_metadata(NULL, NULL, block_bitmap_bh);
809*4882a593Smuzhiyun 		sync_dirty_buffer(block_bitmap_bh);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 		/* recheck and clear flag under lock if we still need to */
812*4882a593Smuzhiyun 		ext4_lock_group(sb, group);
813*4882a593Smuzhiyun 		if (ext4_has_group_desc_csum(sb) &&
814*4882a593Smuzhiyun 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
815*4882a593Smuzhiyun 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
816*4882a593Smuzhiyun 			ext4_free_group_clusters_set(sb, gdp,
817*4882a593Smuzhiyun 				ext4_free_clusters_after_init(sb, group, gdp));
818*4882a593Smuzhiyun 			ext4_block_bitmap_csum_set(sb, group, gdp,
819*4882a593Smuzhiyun 						   block_bitmap_bh);
820*4882a593Smuzhiyun 			ext4_group_desc_csum_set(sb, group, gdp);
821*4882a593Smuzhiyun 		}
822*4882a593Smuzhiyun 		ext4_unlock_group(sb, group);
823*4882a593Smuzhiyun 		brelse(block_bitmap_bh);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 		if (err) {
826*4882a593Smuzhiyun 			ext4_std_error(sb, err);
827*4882a593Smuzhiyun 			goto out;
828*4882a593Smuzhiyun 		}
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Update the relevant bg descriptor fields */
832*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb)) {
833*4882a593Smuzhiyun 		int free;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 		ext4_lock_group(sb, group); /* while we modify the bg desc */
836*4882a593Smuzhiyun 		free = EXT4_INODES_PER_GROUP(sb) -
837*4882a593Smuzhiyun 			ext4_itable_unused_count(sb, gdp);
838*4882a593Smuzhiyun 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
839*4882a593Smuzhiyun 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
840*4882a593Smuzhiyun 			free = 0;
841*4882a593Smuzhiyun 		}
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 		/*
844*4882a593Smuzhiyun 		 * Check the relative inode number against the last used
845*4882a593Smuzhiyun 		 * relative inode number in this group. if it is greater
846*4882a593Smuzhiyun 		 * we need to update the bg_itable_unused count
847*4882a593Smuzhiyun 		 */
848*4882a593Smuzhiyun 		if (bit >= free)
849*4882a593Smuzhiyun 			ext4_itable_unused_set(sb, gdp,
850*4882a593Smuzhiyun 					(EXT4_INODES_PER_GROUP(sb) - bit - 1));
851*4882a593Smuzhiyun 	} else {
852*4882a593Smuzhiyun 		ext4_lock_group(sb, group);
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
856*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb)) {
857*4882a593Smuzhiyun 		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
858*4882a593Smuzhiyun 					   EXT4_INODES_PER_GROUP(sb) / 8);
859*4882a593Smuzhiyun 		ext4_group_desc_csum_set(sb, group, gdp);
860*4882a593Smuzhiyun 	}
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	ext4_unlock_group(sb, group);
863*4882a593Smuzhiyun 	err = ext4_handle_dirty_metadata(NULL, NULL, group_desc_bh);
864*4882a593Smuzhiyun 	sync_dirty_buffer(group_desc_bh);
865*4882a593Smuzhiyun out:
866*4882a593Smuzhiyun 	return err;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun 
ext4_xattr_credits_for_new_inode(struct inode * dir,mode_t mode,bool encrypt)869*4882a593Smuzhiyun static int ext4_xattr_credits_for_new_inode(struct inode *dir, mode_t mode,
870*4882a593Smuzhiyun 					    bool encrypt)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	struct super_block *sb = dir->i_sb;
873*4882a593Smuzhiyun 	int nblocks = 0;
874*4882a593Smuzhiyun #ifdef CONFIG_EXT4_FS_POSIX_ACL
875*4882a593Smuzhiyun 	struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT);
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (IS_ERR(p))
878*4882a593Smuzhiyun 		return PTR_ERR(p);
879*4882a593Smuzhiyun 	if (p) {
880*4882a593Smuzhiyun 		int acl_size = p->a_count * sizeof(ext4_acl_entry);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 		nblocks += (S_ISDIR(mode) ? 2 : 1) *
883*4882a593Smuzhiyun 			__ext4_xattr_set_credits(sb, NULL /* inode */,
884*4882a593Smuzhiyun 						 NULL /* block_bh */, acl_size,
885*4882a593Smuzhiyun 						 true /* is_create */);
886*4882a593Smuzhiyun 		posix_acl_release(p);
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun #endif
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun #ifdef CONFIG_SECURITY
891*4882a593Smuzhiyun 	{
892*4882a593Smuzhiyun 		int num_security_xattrs = 1;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun #ifdef CONFIG_INTEGRITY
895*4882a593Smuzhiyun 		num_security_xattrs++;
896*4882a593Smuzhiyun #endif
897*4882a593Smuzhiyun 		/*
898*4882a593Smuzhiyun 		 * We assume that security xattrs are never more than 1k.
899*4882a593Smuzhiyun 		 * In practice they are under 128 bytes.
900*4882a593Smuzhiyun 		 */
901*4882a593Smuzhiyun 		nblocks += num_security_xattrs *
902*4882a593Smuzhiyun 			__ext4_xattr_set_credits(sb, NULL /* inode */,
903*4882a593Smuzhiyun 						 NULL /* block_bh */, 1024,
904*4882a593Smuzhiyun 						 true /* is_create */);
905*4882a593Smuzhiyun 	}
906*4882a593Smuzhiyun #endif
907*4882a593Smuzhiyun 	if (encrypt)
908*4882a593Smuzhiyun 		nblocks += __ext4_xattr_set_credits(sb,
909*4882a593Smuzhiyun 						    NULL /* inode */,
910*4882a593Smuzhiyun 						    NULL /* block_bh */,
911*4882a593Smuzhiyun 						    FSCRYPT_SET_CONTEXT_MAX_SIZE,
912*4882a593Smuzhiyun 						    true /* is_create */);
913*4882a593Smuzhiyun 	return nblocks;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun /*
917*4882a593Smuzhiyun  * There are two policies for allocating an inode.  If the new inode is
918*4882a593Smuzhiyun  * a directory, then a forward search is made for a block group with both
919*4882a593Smuzhiyun  * free space and a low directory-to-inode ratio; if that fails, then of
920*4882a593Smuzhiyun  * the groups with above-average free space, that group with the fewest
921*4882a593Smuzhiyun  * directories already is chosen.
922*4882a593Smuzhiyun  *
923*4882a593Smuzhiyun  * For other inodes, search forward from the parent directory's block
924*4882a593Smuzhiyun  * group to find a free inode.
925*4882a593Smuzhiyun  */
__ext4_new_inode(handle_t * handle,struct inode * dir,umode_t mode,const struct qstr * qstr,__u32 goal,uid_t * owner,__u32 i_flags,int handle_type,unsigned int line_no,int nblocks)926*4882a593Smuzhiyun struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
927*4882a593Smuzhiyun 			       umode_t mode, const struct qstr *qstr,
928*4882a593Smuzhiyun 			       __u32 goal, uid_t *owner, __u32 i_flags,
929*4882a593Smuzhiyun 			       int handle_type, unsigned int line_no,
930*4882a593Smuzhiyun 			       int nblocks)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	struct super_block *sb;
933*4882a593Smuzhiyun 	struct buffer_head *inode_bitmap_bh = NULL;
934*4882a593Smuzhiyun 	struct buffer_head *group_desc_bh;
935*4882a593Smuzhiyun 	ext4_group_t ngroups, group = 0;
936*4882a593Smuzhiyun 	unsigned long ino = 0;
937*4882a593Smuzhiyun 	struct inode *inode;
938*4882a593Smuzhiyun 	struct ext4_group_desc *gdp = NULL;
939*4882a593Smuzhiyun 	struct ext4_inode_info *ei;
940*4882a593Smuzhiyun 	struct ext4_sb_info *sbi;
941*4882a593Smuzhiyun 	int ret2, err;
942*4882a593Smuzhiyun 	struct inode *ret;
943*4882a593Smuzhiyun 	ext4_group_t i;
944*4882a593Smuzhiyun 	ext4_group_t flex_group;
945*4882a593Smuzhiyun 	struct ext4_group_info *grp = NULL;
946*4882a593Smuzhiyun 	bool encrypt = false;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	/* Cannot create files in a deleted directory */
949*4882a593Smuzhiyun 	if (!dir || !dir->i_nlink)
950*4882a593Smuzhiyun 		return ERR_PTR(-EPERM);
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	sb = dir->i_sb;
953*4882a593Smuzhiyun 	sbi = EXT4_SB(sb);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(sbi)))
956*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	ngroups = ext4_get_groups_count(sb);
959*4882a593Smuzhiyun 	trace_ext4_request_inode(dir, mode);
960*4882a593Smuzhiyun 	inode = new_inode(sb);
961*4882a593Smuzhiyun 	if (!inode)
962*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
963*4882a593Smuzhiyun 	ei = EXT4_I(inode);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	/*
966*4882a593Smuzhiyun 	 * Initialize owners and quota early so that we don't have to account
967*4882a593Smuzhiyun 	 * for quota initialization worst case in standard inode creating
968*4882a593Smuzhiyun 	 * transaction
969*4882a593Smuzhiyun 	 */
970*4882a593Smuzhiyun 	if (owner) {
971*4882a593Smuzhiyun 		inode->i_mode = mode;
972*4882a593Smuzhiyun 		i_uid_write(inode, owner[0]);
973*4882a593Smuzhiyun 		i_gid_write(inode, owner[1]);
974*4882a593Smuzhiyun 	} else if (test_opt(sb, GRPID)) {
975*4882a593Smuzhiyun 		inode->i_mode = mode;
976*4882a593Smuzhiyun 		inode->i_uid = current_fsuid();
977*4882a593Smuzhiyun 		inode->i_gid = dir->i_gid;
978*4882a593Smuzhiyun 	} else
979*4882a593Smuzhiyun 		inode_init_owner(inode, dir, mode);
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	if (ext4_has_feature_project(sb) &&
982*4882a593Smuzhiyun 	    ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT))
983*4882a593Smuzhiyun 		ei->i_projid = EXT4_I(dir)->i_projid;
984*4882a593Smuzhiyun 	else
985*4882a593Smuzhiyun 		ei->i_projid = make_kprojid(&init_user_ns, EXT4_DEF_PROJID);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	if (!(i_flags & EXT4_EA_INODE_FL)) {
988*4882a593Smuzhiyun 		err = fscrypt_prepare_new_inode(dir, inode, &encrypt);
989*4882a593Smuzhiyun 		if (err)
990*4882a593Smuzhiyun 			goto out;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	err = dquot_initialize(inode);
994*4882a593Smuzhiyun 	if (err)
995*4882a593Smuzhiyun 		goto out;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	if (!handle && sbi->s_journal && !(i_flags & EXT4_EA_INODE_FL)) {
998*4882a593Smuzhiyun 		ret2 = ext4_xattr_credits_for_new_inode(dir, mode, encrypt);
999*4882a593Smuzhiyun 		if (ret2 < 0) {
1000*4882a593Smuzhiyun 			err = ret2;
1001*4882a593Smuzhiyun 			goto out;
1002*4882a593Smuzhiyun 		}
1003*4882a593Smuzhiyun 		nblocks += ret2;
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	if (!goal)
1007*4882a593Smuzhiyun 		goal = sbi->s_inode_goal;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
1010*4882a593Smuzhiyun 		group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
1011*4882a593Smuzhiyun 		ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
1012*4882a593Smuzhiyun 		ret2 = 0;
1013*4882a593Smuzhiyun 		goto got_group;
1014*4882a593Smuzhiyun 	}
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	if (S_ISDIR(mode))
1017*4882a593Smuzhiyun 		ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
1018*4882a593Smuzhiyun 	else
1019*4882a593Smuzhiyun 		ret2 = find_group_other(sb, dir, &group, mode);
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun got_group:
1022*4882a593Smuzhiyun 	EXT4_I(dir)->i_last_alloc_group = group;
1023*4882a593Smuzhiyun 	err = -ENOSPC;
1024*4882a593Smuzhiyun 	if (ret2 == -1)
1025*4882a593Smuzhiyun 		goto out;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/*
1028*4882a593Smuzhiyun 	 * Normally we will only go through one pass of this loop,
1029*4882a593Smuzhiyun 	 * unless we get unlucky and it turns out the group we selected
1030*4882a593Smuzhiyun 	 * had its last inode grabbed by someone else.
1031*4882a593Smuzhiyun 	 */
1032*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++, ino = 0) {
1033*4882a593Smuzhiyun 		err = -EIO;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 		gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1036*4882a593Smuzhiyun 		if (!gdp)
1037*4882a593Smuzhiyun 			goto out;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 		/*
1040*4882a593Smuzhiyun 		 * Check free inodes count before loading bitmap.
1041*4882a593Smuzhiyun 		 */
1042*4882a593Smuzhiyun 		if (ext4_free_inodes_count(sb, gdp) == 0)
1043*4882a593Smuzhiyun 			goto next_group;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1046*4882a593Smuzhiyun 			grp = ext4_get_group_info(sb, group);
1047*4882a593Smuzhiyun 			/*
1048*4882a593Smuzhiyun 			 * Skip groups with already-known suspicious inode
1049*4882a593Smuzhiyun 			 * tables
1050*4882a593Smuzhiyun 			 */
1051*4882a593Smuzhiyun 			if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
1052*4882a593Smuzhiyun 				goto next_group;
1053*4882a593Smuzhiyun 		}
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		brelse(inode_bitmap_bh);
1056*4882a593Smuzhiyun 		inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
1057*4882a593Smuzhiyun 		/* Skip groups with suspicious inode tables */
1058*4882a593Smuzhiyun 		if (((!(sbi->s_mount_state & EXT4_FC_REPLAY))
1059*4882a593Smuzhiyun 		     && EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) ||
1060*4882a593Smuzhiyun 		    IS_ERR(inode_bitmap_bh)) {
1061*4882a593Smuzhiyun 			inode_bitmap_bh = NULL;
1062*4882a593Smuzhiyun 			goto next_group;
1063*4882a593Smuzhiyun 		}
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun repeat_in_this_group:
1066*4882a593Smuzhiyun 		ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
1067*4882a593Smuzhiyun 		if (!ret2)
1068*4882a593Smuzhiyun 			goto next_group;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		if (group == 0 && (ino + 1) < EXT4_FIRST_INO(sb)) {
1071*4882a593Smuzhiyun 			ext4_error(sb, "reserved inode found cleared - "
1072*4882a593Smuzhiyun 				   "inode=%lu", ino + 1);
1073*4882a593Smuzhiyun 			ext4_mark_group_bitmap_corrupted(sb, group,
1074*4882a593Smuzhiyun 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1075*4882a593Smuzhiyun 			goto next_group;
1076*4882a593Smuzhiyun 		}
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 		if ((!(sbi->s_mount_state & EXT4_FC_REPLAY)) && !handle) {
1079*4882a593Smuzhiyun 			BUG_ON(nblocks <= 0);
1080*4882a593Smuzhiyun 			handle = __ext4_journal_start_sb(dir->i_sb, line_no,
1081*4882a593Smuzhiyun 				 handle_type, nblocks, 0,
1082*4882a593Smuzhiyun 				 ext4_trans_default_revoke_credits(sb));
1083*4882a593Smuzhiyun 			if (IS_ERR(handle)) {
1084*4882a593Smuzhiyun 				err = PTR_ERR(handle);
1085*4882a593Smuzhiyun 				ext4_std_error(sb, err);
1086*4882a593Smuzhiyun 				goto out;
1087*4882a593Smuzhiyun 			}
1088*4882a593Smuzhiyun 		}
1089*4882a593Smuzhiyun 		BUFFER_TRACE(inode_bitmap_bh, "get_write_access");
1090*4882a593Smuzhiyun 		err = ext4_journal_get_write_access(handle, inode_bitmap_bh);
1091*4882a593Smuzhiyun 		if (err) {
1092*4882a593Smuzhiyun 			ext4_std_error(sb, err);
1093*4882a593Smuzhiyun 			goto out;
1094*4882a593Smuzhiyun 		}
1095*4882a593Smuzhiyun 		ext4_lock_group(sb, group);
1096*4882a593Smuzhiyun 		ret2 = ext4_test_and_set_bit(ino, inode_bitmap_bh->b_data);
1097*4882a593Smuzhiyun 		if (ret2) {
1098*4882a593Smuzhiyun 			/* Someone already took the bit. Repeat the search
1099*4882a593Smuzhiyun 			 * with lock held.
1100*4882a593Smuzhiyun 			 */
1101*4882a593Smuzhiyun 			ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino);
1102*4882a593Smuzhiyun 			if (ret2) {
1103*4882a593Smuzhiyun 				ext4_set_bit(ino, inode_bitmap_bh->b_data);
1104*4882a593Smuzhiyun 				ret2 = 0;
1105*4882a593Smuzhiyun 			} else {
1106*4882a593Smuzhiyun 				ret2 = 1; /* we didn't grab the inode */
1107*4882a593Smuzhiyun 			}
1108*4882a593Smuzhiyun 		}
1109*4882a593Smuzhiyun 		ext4_unlock_group(sb, group);
1110*4882a593Smuzhiyun 		ino++;		/* the inode bitmap is zero-based */
1111*4882a593Smuzhiyun 		if (!ret2)
1112*4882a593Smuzhiyun 			goto got; /* we grabbed the inode! */
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 		if (ino < EXT4_INODES_PER_GROUP(sb))
1115*4882a593Smuzhiyun 			goto repeat_in_this_group;
1116*4882a593Smuzhiyun next_group:
1117*4882a593Smuzhiyun 		if (++group == ngroups)
1118*4882a593Smuzhiyun 			group = 0;
1119*4882a593Smuzhiyun 	}
1120*4882a593Smuzhiyun 	err = -ENOSPC;
1121*4882a593Smuzhiyun 	goto out;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun got:
1124*4882a593Smuzhiyun 	BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
1125*4882a593Smuzhiyun 	err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
1126*4882a593Smuzhiyun 	if (err) {
1127*4882a593Smuzhiyun 		ext4_std_error(sb, err);
1128*4882a593Smuzhiyun 		goto out;
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1132*4882a593Smuzhiyun 	err = ext4_journal_get_write_access(handle, group_desc_bh);
1133*4882a593Smuzhiyun 	if (err) {
1134*4882a593Smuzhiyun 		ext4_std_error(sb, err);
1135*4882a593Smuzhiyun 		goto out;
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	/* We may have to initialize the block bitmap if it isn't already */
1139*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb) &&
1140*4882a593Smuzhiyun 	    gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
1141*4882a593Smuzhiyun 		struct buffer_head *block_bitmap_bh;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 		block_bitmap_bh = ext4_read_block_bitmap(sb, group);
1144*4882a593Smuzhiyun 		if (IS_ERR(block_bitmap_bh)) {
1145*4882a593Smuzhiyun 			err = PTR_ERR(block_bitmap_bh);
1146*4882a593Smuzhiyun 			goto out;
1147*4882a593Smuzhiyun 		}
1148*4882a593Smuzhiyun 		BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
1149*4882a593Smuzhiyun 		err = ext4_journal_get_write_access(handle, block_bitmap_bh);
1150*4882a593Smuzhiyun 		if (err) {
1151*4882a593Smuzhiyun 			brelse(block_bitmap_bh);
1152*4882a593Smuzhiyun 			ext4_std_error(sb, err);
1153*4882a593Smuzhiyun 			goto out;
1154*4882a593Smuzhiyun 		}
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 		BUFFER_TRACE(block_bitmap_bh, "dirty block bitmap");
1157*4882a593Smuzhiyun 		err = ext4_handle_dirty_metadata(handle, NULL, block_bitmap_bh);
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		/* recheck and clear flag under lock if we still need to */
1160*4882a593Smuzhiyun 		ext4_lock_group(sb, group);
1161*4882a593Smuzhiyun 		if (ext4_has_group_desc_csum(sb) &&
1162*4882a593Smuzhiyun 		    (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
1163*4882a593Smuzhiyun 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1164*4882a593Smuzhiyun 			ext4_free_group_clusters_set(sb, gdp,
1165*4882a593Smuzhiyun 				ext4_free_clusters_after_init(sb, group, gdp));
1166*4882a593Smuzhiyun 			ext4_block_bitmap_csum_set(sb, group, gdp,
1167*4882a593Smuzhiyun 						   block_bitmap_bh);
1168*4882a593Smuzhiyun 			ext4_group_desc_csum_set(sb, group, gdp);
1169*4882a593Smuzhiyun 		}
1170*4882a593Smuzhiyun 		ext4_unlock_group(sb, group);
1171*4882a593Smuzhiyun 		brelse(block_bitmap_bh);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 		if (err) {
1174*4882a593Smuzhiyun 			ext4_std_error(sb, err);
1175*4882a593Smuzhiyun 			goto out;
1176*4882a593Smuzhiyun 		}
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	/* Update the relevant bg descriptor fields */
1180*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb)) {
1181*4882a593Smuzhiyun 		int free;
1182*4882a593Smuzhiyun 		struct ext4_group_info *grp = NULL;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
1185*4882a593Smuzhiyun 			grp = ext4_get_group_info(sb, group);
1186*4882a593Smuzhiyun 			down_read(&grp->alloc_sem); /*
1187*4882a593Smuzhiyun 						     * protect vs itable
1188*4882a593Smuzhiyun 						     * lazyinit
1189*4882a593Smuzhiyun 						     */
1190*4882a593Smuzhiyun 		}
1191*4882a593Smuzhiyun 		ext4_lock_group(sb, group); /* while we modify the bg desc */
1192*4882a593Smuzhiyun 		free = EXT4_INODES_PER_GROUP(sb) -
1193*4882a593Smuzhiyun 			ext4_itable_unused_count(sb, gdp);
1194*4882a593Smuzhiyun 		if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
1195*4882a593Smuzhiyun 			gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
1196*4882a593Smuzhiyun 			free = 0;
1197*4882a593Smuzhiyun 		}
1198*4882a593Smuzhiyun 		/*
1199*4882a593Smuzhiyun 		 * Check the relative inode number against the last used
1200*4882a593Smuzhiyun 		 * relative inode number in this group. if it is greater
1201*4882a593Smuzhiyun 		 * we need to update the bg_itable_unused count
1202*4882a593Smuzhiyun 		 */
1203*4882a593Smuzhiyun 		if (ino > free)
1204*4882a593Smuzhiyun 			ext4_itable_unused_set(sb, gdp,
1205*4882a593Smuzhiyun 					(EXT4_INODES_PER_GROUP(sb) - ino));
1206*4882a593Smuzhiyun 		if (!(sbi->s_mount_state & EXT4_FC_REPLAY))
1207*4882a593Smuzhiyun 			up_read(&grp->alloc_sem);
1208*4882a593Smuzhiyun 	} else {
1209*4882a593Smuzhiyun 		ext4_lock_group(sb, group);
1210*4882a593Smuzhiyun 	}
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
1213*4882a593Smuzhiyun 	if (S_ISDIR(mode)) {
1214*4882a593Smuzhiyun 		ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
1215*4882a593Smuzhiyun 		if (sbi->s_log_groups_per_flex) {
1216*4882a593Smuzhiyun 			ext4_group_t f = ext4_flex_group(sbi, group);
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 			atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
1219*4882a593Smuzhiyun 							f)->used_dirs);
1220*4882a593Smuzhiyun 		}
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 	if (ext4_has_group_desc_csum(sb)) {
1223*4882a593Smuzhiyun 		ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
1224*4882a593Smuzhiyun 					   EXT4_INODES_PER_GROUP(sb) / 8);
1225*4882a593Smuzhiyun 		ext4_group_desc_csum_set(sb, group, gdp);
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 	ext4_unlock_group(sb, group);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	BUFFER_TRACE(group_desc_bh, "call ext4_handle_dirty_metadata");
1230*4882a593Smuzhiyun 	err = ext4_handle_dirty_metadata(handle, NULL, group_desc_bh);
1231*4882a593Smuzhiyun 	if (err) {
1232*4882a593Smuzhiyun 		ext4_std_error(sb, err);
1233*4882a593Smuzhiyun 		goto out;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	percpu_counter_dec(&sbi->s_freeinodes_counter);
1237*4882a593Smuzhiyun 	if (S_ISDIR(mode))
1238*4882a593Smuzhiyun 		percpu_counter_inc(&sbi->s_dirs_counter);
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	if (sbi->s_log_groups_per_flex) {
1241*4882a593Smuzhiyun 		flex_group = ext4_flex_group(sbi, group);
1242*4882a593Smuzhiyun 		atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
1243*4882a593Smuzhiyun 						flex_group)->free_inodes);
1244*4882a593Smuzhiyun 	}
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
1247*4882a593Smuzhiyun 	/* This is the optimal IO size (for stat), not the fs block size */
1248*4882a593Smuzhiyun 	inode->i_blocks = 0;
1249*4882a593Smuzhiyun 	inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
1250*4882a593Smuzhiyun 	ei->i_crtime = inode->i_mtime;
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	memset(ei->i_data, 0, sizeof(ei->i_data));
1253*4882a593Smuzhiyun 	ei->i_dir_start_lookup = 0;
1254*4882a593Smuzhiyun 	ei->i_disksize = 0;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	/* Don't inherit extent flag from directory, amongst others. */
1257*4882a593Smuzhiyun 	ei->i_flags =
1258*4882a593Smuzhiyun 		ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1259*4882a593Smuzhiyun 	ei->i_flags |= i_flags;
1260*4882a593Smuzhiyun 	ei->i_file_acl = 0;
1261*4882a593Smuzhiyun 	ei->i_dtime = 0;
1262*4882a593Smuzhiyun 	ei->i_block_group = group;
1263*4882a593Smuzhiyun 	ei->i_last_alloc_group = ~0;
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	ext4_set_inode_flags(inode, true);
1266*4882a593Smuzhiyun 	if (IS_DIRSYNC(inode))
1267*4882a593Smuzhiyun 		ext4_handle_sync(handle);
1268*4882a593Smuzhiyun 	if (insert_inode_locked(inode) < 0) {
1269*4882a593Smuzhiyun 		/*
1270*4882a593Smuzhiyun 		 * Likely a bitmap corruption causing inode to be allocated
1271*4882a593Smuzhiyun 		 * twice.
1272*4882a593Smuzhiyun 		 */
1273*4882a593Smuzhiyun 		err = -EIO;
1274*4882a593Smuzhiyun 		ext4_error(sb, "failed to insert inode %lu: doubly allocated?",
1275*4882a593Smuzhiyun 			   inode->i_ino);
1276*4882a593Smuzhiyun 		ext4_mark_group_bitmap_corrupted(sb, group,
1277*4882a593Smuzhiyun 					EXT4_GROUP_INFO_IBITMAP_CORRUPT);
1278*4882a593Smuzhiyun 		goto out;
1279*4882a593Smuzhiyun 	}
1280*4882a593Smuzhiyun 	inode->i_generation = prandom_u32();
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	/* Precompute checksum seed for inode metadata */
1283*4882a593Smuzhiyun 	if (ext4_has_metadata_csum(sb)) {
1284*4882a593Smuzhiyun 		__u32 csum;
1285*4882a593Smuzhiyun 		__le32 inum = cpu_to_le32(inode->i_ino);
1286*4882a593Smuzhiyun 		__le32 gen = cpu_to_le32(inode->i_generation);
1287*4882a593Smuzhiyun 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
1288*4882a593Smuzhiyun 				   sizeof(inum));
1289*4882a593Smuzhiyun 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
1290*4882a593Smuzhiyun 					      sizeof(gen));
1291*4882a593Smuzhiyun 	}
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
1294*4882a593Smuzhiyun 	ext4_set_inode_state(inode, EXT4_STATE_NEW);
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	ei->i_extra_isize = sbi->s_want_extra_isize;
1297*4882a593Smuzhiyun 	ei->i_inline_off = 0;
1298*4882a593Smuzhiyun 	if (ext4_has_feature_inline_data(sb) &&
1299*4882a593Smuzhiyun 	    (!(ei->i_flags & EXT4_DAX_FL) || S_ISDIR(mode)))
1300*4882a593Smuzhiyun 		ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
1301*4882a593Smuzhiyun 	ret = inode;
1302*4882a593Smuzhiyun 	err = dquot_alloc_inode(inode);
1303*4882a593Smuzhiyun 	if (err)
1304*4882a593Smuzhiyun 		goto fail_drop;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	/*
1307*4882a593Smuzhiyun 	 * Since the encryption xattr will always be unique, create it first so
1308*4882a593Smuzhiyun 	 * that it's less likely to end up in an external xattr block and
1309*4882a593Smuzhiyun 	 * prevent its deduplication.
1310*4882a593Smuzhiyun 	 */
1311*4882a593Smuzhiyun 	if (encrypt) {
1312*4882a593Smuzhiyun 		err = fscrypt_set_context(inode, handle);
1313*4882a593Smuzhiyun 		if (err)
1314*4882a593Smuzhiyun 			goto fail_free_drop;
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
1318*4882a593Smuzhiyun 		err = ext4_init_acl(handle, inode, dir);
1319*4882a593Smuzhiyun 		if (err)
1320*4882a593Smuzhiyun 			goto fail_free_drop;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		err = ext4_init_security(handle, inode, dir, qstr);
1323*4882a593Smuzhiyun 		if (err)
1324*4882a593Smuzhiyun 			goto fail_free_drop;
1325*4882a593Smuzhiyun 	}
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	if (ext4_has_feature_extents(sb)) {
1328*4882a593Smuzhiyun 		/* set extent flag only for directory, file and normal symlink*/
1329*4882a593Smuzhiyun 		if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
1330*4882a593Smuzhiyun 			ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
1331*4882a593Smuzhiyun 			ext4_ext_tree_init(handle, inode);
1332*4882a593Smuzhiyun 		}
1333*4882a593Smuzhiyun 	}
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	if (ext4_handle_valid(handle)) {
1336*4882a593Smuzhiyun 		ei->i_sync_tid = handle->h_transaction->t_tid;
1337*4882a593Smuzhiyun 		ei->i_datasync_tid = handle->h_transaction->t_tid;
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	err = ext4_mark_inode_dirty(handle, inode);
1341*4882a593Smuzhiyun 	if (err) {
1342*4882a593Smuzhiyun 		ext4_std_error(sb, err);
1343*4882a593Smuzhiyun 		goto fail_free_drop;
1344*4882a593Smuzhiyun 	}
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	ext4_debug("allocating inode %lu\n", inode->i_ino);
1347*4882a593Smuzhiyun 	trace_ext4_allocate_inode(inode, dir, mode);
1348*4882a593Smuzhiyun 	brelse(inode_bitmap_bh);
1349*4882a593Smuzhiyun 	return ret;
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun fail_free_drop:
1352*4882a593Smuzhiyun 	dquot_free_inode(inode);
1353*4882a593Smuzhiyun fail_drop:
1354*4882a593Smuzhiyun 	clear_nlink(inode);
1355*4882a593Smuzhiyun 	unlock_new_inode(inode);
1356*4882a593Smuzhiyun out:
1357*4882a593Smuzhiyun 	dquot_drop(inode);
1358*4882a593Smuzhiyun 	inode->i_flags |= S_NOQUOTA;
1359*4882a593Smuzhiyun 	iput(inode);
1360*4882a593Smuzhiyun 	brelse(inode_bitmap_bh);
1361*4882a593Smuzhiyun 	return ERR_PTR(err);
1362*4882a593Smuzhiyun }
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun /* Verify that we are loading a valid orphan from disk */
ext4_orphan_get(struct super_block * sb,unsigned long ino)1365*4882a593Smuzhiyun struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun 	unsigned long max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count);
1368*4882a593Smuzhiyun 	ext4_group_t block_group;
1369*4882a593Smuzhiyun 	int bit;
1370*4882a593Smuzhiyun 	struct buffer_head *bitmap_bh = NULL;
1371*4882a593Smuzhiyun 	struct inode *inode = NULL;
1372*4882a593Smuzhiyun 	int err = -EFSCORRUPTED;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	if (ino < EXT4_FIRST_INO(sb) || ino > max_ino)
1375*4882a593Smuzhiyun 		goto bad_orphan;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
1378*4882a593Smuzhiyun 	bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
1379*4882a593Smuzhiyun 	bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
1380*4882a593Smuzhiyun 	if (IS_ERR(bitmap_bh))
1381*4882a593Smuzhiyun 		return ERR_CAST(bitmap_bh);
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	/* Having the inode bit set should be a 100% indicator that this
1384*4882a593Smuzhiyun 	 * is a valid orphan (no e2fsck run on fs).  Orphans also include
1385*4882a593Smuzhiyun 	 * inodes that were being truncated, so we can't check i_nlink==0.
1386*4882a593Smuzhiyun 	 */
1387*4882a593Smuzhiyun 	if (!ext4_test_bit(bit, bitmap_bh->b_data))
1388*4882a593Smuzhiyun 		goto bad_orphan;
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
1391*4882a593Smuzhiyun 	if (IS_ERR(inode)) {
1392*4882a593Smuzhiyun 		err = PTR_ERR(inode);
1393*4882a593Smuzhiyun 		ext4_error_err(sb, -err,
1394*4882a593Smuzhiyun 			       "couldn't read orphan inode %lu (err %d)",
1395*4882a593Smuzhiyun 			       ino, err);
1396*4882a593Smuzhiyun 		brelse(bitmap_bh);
1397*4882a593Smuzhiyun 		return inode;
1398*4882a593Smuzhiyun 	}
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun 	/*
1401*4882a593Smuzhiyun 	 * If the orphans has i_nlinks > 0 then it should be able to
1402*4882a593Smuzhiyun 	 * be truncated, otherwise it won't be removed from the orphan
1403*4882a593Smuzhiyun 	 * list during processing and an infinite loop will result.
1404*4882a593Smuzhiyun 	 * Similarly, it must not be a bad inode.
1405*4882a593Smuzhiyun 	 */
1406*4882a593Smuzhiyun 	if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
1407*4882a593Smuzhiyun 	    is_bad_inode(inode))
1408*4882a593Smuzhiyun 		goto bad_orphan;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	if (NEXT_ORPHAN(inode) > max_ino)
1411*4882a593Smuzhiyun 		goto bad_orphan;
1412*4882a593Smuzhiyun 	brelse(bitmap_bh);
1413*4882a593Smuzhiyun 	return inode;
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun bad_orphan:
1416*4882a593Smuzhiyun 	ext4_error(sb, "bad orphan inode %lu", ino);
1417*4882a593Smuzhiyun 	if (bitmap_bh)
1418*4882a593Smuzhiyun 		printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
1419*4882a593Smuzhiyun 		       bit, (unsigned long long)bitmap_bh->b_blocknr,
1420*4882a593Smuzhiyun 		       ext4_test_bit(bit, bitmap_bh->b_data));
1421*4882a593Smuzhiyun 	if (inode) {
1422*4882a593Smuzhiyun 		printk(KERN_ERR "is_bad_inode(inode)=%d\n",
1423*4882a593Smuzhiyun 		       is_bad_inode(inode));
1424*4882a593Smuzhiyun 		printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
1425*4882a593Smuzhiyun 		       NEXT_ORPHAN(inode));
1426*4882a593Smuzhiyun 		printk(KERN_ERR "max_ino=%lu\n", max_ino);
1427*4882a593Smuzhiyun 		printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink);
1428*4882a593Smuzhiyun 		/* Avoid freeing blocks if we got a bad deleted inode */
1429*4882a593Smuzhiyun 		if (inode->i_nlink == 0)
1430*4882a593Smuzhiyun 			inode->i_blocks = 0;
1431*4882a593Smuzhiyun 		iput(inode);
1432*4882a593Smuzhiyun 	}
1433*4882a593Smuzhiyun 	brelse(bitmap_bh);
1434*4882a593Smuzhiyun 	return ERR_PTR(err);
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun 
ext4_count_free_inodes(struct super_block * sb)1437*4882a593Smuzhiyun unsigned long ext4_count_free_inodes(struct super_block *sb)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun 	unsigned long desc_count;
1440*4882a593Smuzhiyun 	struct ext4_group_desc *gdp;
1441*4882a593Smuzhiyun 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1442*4882a593Smuzhiyun #ifdef EXT4FS_DEBUG
1443*4882a593Smuzhiyun 	struct ext4_super_block *es;
1444*4882a593Smuzhiyun 	unsigned long bitmap_count, x;
1445*4882a593Smuzhiyun 	struct buffer_head *bitmap_bh = NULL;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	es = EXT4_SB(sb)->s_es;
1448*4882a593Smuzhiyun 	desc_count = 0;
1449*4882a593Smuzhiyun 	bitmap_count = 0;
1450*4882a593Smuzhiyun 	gdp = NULL;
1451*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++) {
1452*4882a593Smuzhiyun 		gdp = ext4_get_group_desc(sb, i, NULL);
1453*4882a593Smuzhiyun 		if (!gdp)
1454*4882a593Smuzhiyun 			continue;
1455*4882a593Smuzhiyun 		desc_count += ext4_free_inodes_count(sb, gdp);
1456*4882a593Smuzhiyun 		brelse(bitmap_bh);
1457*4882a593Smuzhiyun 		bitmap_bh = ext4_read_inode_bitmap(sb, i);
1458*4882a593Smuzhiyun 		if (IS_ERR(bitmap_bh)) {
1459*4882a593Smuzhiyun 			bitmap_bh = NULL;
1460*4882a593Smuzhiyun 			continue;
1461*4882a593Smuzhiyun 		}
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 		x = ext4_count_free(bitmap_bh->b_data,
1464*4882a593Smuzhiyun 				    EXT4_INODES_PER_GROUP(sb) / 8);
1465*4882a593Smuzhiyun 		printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
1466*4882a593Smuzhiyun 			(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
1467*4882a593Smuzhiyun 		bitmap_count += x;
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 	brelse(bitmap_bh);
1470*4882a593Smuzhiyun 	printk(KERN_DEBUG "ext4_count_free_inodes: "
1471*4882a593Smuzhiyun 	       "stored = %u, computed = %lu, %lu\n",
1472*4882a593Smuzhiyun 	       le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count);
1473*4882a593Smuzhiyun 	return desc_count;
1474*4882a593Smuzhiyun #else
1475*4882a593Smuzhiyun 	desc_count = 0;
1476*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++) {
1477*4882a593Smuzhiyun 		gdp = ext4_get_group_desc(sb, i, NULL);
1478*4882a593Smuzhiyun 		if (!gdp)
1479*4882a593Smuzhiyun 			continue;
1480*4882a593Smuzhiyun 		desc_count += ext4_free_inodes_count(sb, gdp);
1481*4882a593Smuzhiyun 		cond_resched();
1482*4882a593Smuzhiyun 	}
1483*4882a593Smuzhiyun 	return desc_count;
1484*4882a593Smuzhiyun #endif
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun /* Called at mount-time, super-block is locked */
ext4_count_dirs(struct super_block * sb)1488*4882a593Smuzhiyun unsigned long ext4_count_dirs(struct super_block * sb)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun 	unsigned long count = 0;
1491*4882a593Smuzhiyun 	ext4_group_t i, ngroups = ext4_get_groups_count(sb);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	for (i = 0; i < ngroups; i++) {
1494*4882a593Smuzhiyun 		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1495*4882a593Smuzhiyun 		if (!gdp)
1496*4882a593Smuzhiyun 			continue;
1497*4882a593Smuzhiyun 		count += ext4_used_dirs_count(sb, gdp);
1498*4882a593Smuzhiyun 	}
1499*4882a593Smuzhiyun 	return count;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun /*
1503*4882a593Smuzhiyun  * Zeroes not yet zeroed inode table - just write zeroes through the whole
1504*4882a593Smuzhiyun  * inode table. Must be called without any spinlock held. The only place
1505*4882a593Smuzhiyun  * where it is called from on active part of filesystem is ext4lazyinit
1506*4882a593Smuzhiyun  * thread, so we do not need any special locks, however we have to prevent
1507*4882a593Smuzhiyun  * inode allocation from the current group, so we take alloc_sem lock, to
1508*4882a593Smuzhiyun  * block ext4_new_inode() until we are finished.
1509*4882a593Smuzhiyun  */
ext4_init_inode_table(struct super_block * sb,ext4_group_t group,int barrier)1510*4882a593Smuzhiyun int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1511*4882a593Smuzhiyun 				 int barrier)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun 	struct ext4_group_info *grp = ext4_get_group_info(sb, group);
1514*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(sb);
1515*4882a593Smuzhiyun 	struct ext4_group_desc *gdp = NULL;
1516*4882a593Smuzhiyun 	struct buffer_head *group_desc_bh;
1517*4882a593Smuzhiyun 	handle_t *handle;
1518*4882a593Smuzhiyun 	ext4_fsblk_t blk;
1519*4882a593Smuzhiyun 	int num, ret = 0, used_blks = 0;
1520*4882a593Smuzhiyun 	unsigned long used_inos = 0;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	/* This should not happen, but just to be sure check this */
1523*4882a593Smuzhiyun 	if (sb_rdonly(sb)) {
1524*4882a593Smuzhiyun 		ret = 1;
1525*4882a593Smuzhiyun 		goto out;
1526*4882a593Smuzhiyun 	}
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
1529*4882a593Smuzhiyun 	if (!gdp)
1530*4882a593Smuzhiyun 		goto out;
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	/*
1533*4882a593Smuzhiyun 	 * We do not need to lock this, because we are the only one
1534*4882a593Smuzhiyun 	 * handling this flag.
1535*4882a593Smuzhiyun 	 */
1536*4882a593Smuzhiyun 	if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))
1537*4882a593Smuzhiyun 		goto out;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1);
1540*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
1541*4882a593Smuzhiyun 		ret = PTR_ERR(handle);
1542*4882a593Smuzhiyun 		goto out;
1543*4882a593Smuzhiyun 	}
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	down_write(&grp->alloc_sem);
1546*4882a593Smuzhiyun 	/*
1547*4882a593Smuzhiyun 	 * If inode bitmap was already initialized there may be some
1548*4882a593Smuzhiyun 	 * used inodes so we need to skip blocks with used inodes in
1549*4882a593Smuzhiyun 	 * inode table.
1550*4882a593Smuzhiyun 	 */
1551*4882a593Smuzhiyun 	if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
1552*4882a593Smuzhiyun 		used_inos = EXT4_INODES_PER_GROUP(sb) -
1553*4882a593Smuzhiyun 			    ext4_itable_unused_count(sb, gdp);
1554*4882a593Smuzhiyun 		used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 		/* Bogus inode unused count? */
1557*4882a593Smuzhiyun 		if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
1558*4882a593Smuzhiyun 			ext4_error(sb, "Something is wrong with group %u: "
1559*4882a593Smuzhiyun 				   "used itable blocks: %d; "
1560*4882a593Smuzhiyun 				   "itable unused count: %u",
1561*4882a593Smuzhiyun 				   group, used_blks,
1562*4882a593Smuzhiyun 				   ext4_itable_unused_count(sb, gdp));
1563*4882a593Smuzhiyun 			ret = 1;
1564*4882a593Smuzhiyun 			goto err_out;
1565*4882a593Smuzhiyun 		}
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 		used_inos += group * EXT4_INODES_PER_GROUP(sb);
1568*4882a593Smuzhiyun 		/*
1569*4882a593Smuzhiyun 		 * Are there some uninitialized inodes in the inode table
1570*4882a593Smuzhiyun 		 * before the first normal inode?
1571*4882a593Smuzhiyun 		 */
1572*4882a593Smuzhiyun 		if ((used_blks != sbi->s_itb_per_group) &&
1573*4882a593Smuzhiyun 		     (used_inos < EXT4_FIRST_INO(sb))) {
1574*4882a593Smuzhiyun 			ext4_error(sb, "Something is wrong with group %u: "
1575*4882a593Smuzhiyun 				   "itable unused count: %u; "
1576*4882a593Smuzhiyun 				   "itables initialized count: %ld",
1577*4882a593Smuzhiyun 				   group, ext4_itable_unused_count(sb, gdp),
1578*4882a593Smuzhiyun 				   used_inos);
1579*4882a593Smuzhiyun 			ret = 1;
1580*4882a593Smuzhiyun 			goto err_out;
1581*4882a593Smuzhiyun 		}
1582*4882a593Smuzhiyun 	}
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	blk = ext4_inode_table(sb, gdp) + used_blks;
1585*4882a593Smuzhiyun 	num = sbi->s_itb_per_group - used_blks;
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	BUFFER_TRACE(group_desc_bh, "get_write_access");
1588*4882a593Smuzhiyun 	ret = ext4_journal_get_write_access(handle,
1589*4882a593Smuzhiyun 					    group_desc_bh);
1590*4882a593Smuzhiyun 	if (ret)
1591*4882a593Smuzhiyun 		goto err_out;
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	/*
1594*4882a593Smuzhiyun 	 * Skip zeroout if the inode table is full. But we set the ZEROED
1595*4882a593Smuzhiyun 	 * flag anyway, because obviously, when it is full it does not need
1596*4882a593Smuzhiyun 	 * further zeroing.
1597*4882a593Smuzhiyun 	 */
1598*4882a593Smuzhiyun 	if (unlikely(num == 0))
1599*4882a593Smuzhiyun 		goto skip_zeroout;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	ext4_debug("going to zero out inode table in group %d\n",
1602*4882a593Smuzhiyun 		   group);
1603*4882a593Smuzhiyun 	ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS);
1604*4882a593Smuzhiyun 	if (ret < 0)
1605*4882a593Smuzhiyun 		goto err_out;
1606*4882a593Smuzhiyun 	if (barrier)
1607*4882a593Smuzhiyun 		blkdev_issue_flush(sb->s_bdev, GFP_NOFS);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun skip_zeroout:
1610*4882a593Smuzhiyun 	ext4_lock_group(sb, group);
1611*4882a593Smuzhiyun 	gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1612*4882a593Smuzhiyun 	ext4_group_desc_csum_set(sb, group, gdp);
1613*4882a593Smuzhiyun 	ext4_unlock_group(sb, group);
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	BUFFER_TRACE(group_desc_bh,
1616*4882a593Smuzhiyun 		     "call ext4_handle_dirty_metadata");
1617*4882a593Smuzhiyun 	ret = ext4_handle_dirty_metadata(handle, NULL,
1618*4882a593Smuzhiyun 					 group_desc_bh);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun err_out:
1621*4882a593Smuzhiyun 	up_write(&grp->alloc_sem);
1622*4882a593Smuzhiyun 	ext4_journal_stop(handle);
1623*4882a593Smuzhiyun out:
1624*4882a593Smuzhiyun 	return ret;
1625*4882a593Smuzhiyun }
1626