xref: /OK3568_Linux_fs/kernel/fs/ext4/inode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/fs/ext4/inode.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1992, 1993, 1994, 1995
6*4882a593Smuzhiyun  * Remy Card (card@masi.ibp.fr)
7*4882a593Smuzhiyun  * Laboratoire MASI - Institut Blaise Pascal
8*4882a593Smuzhiyun  * Universite Pierre et Marie Curie (Paris VI)
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *  from
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *  linux/fs/minix/inode.c
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *  Copyright (C) 1991, 1992  Linus Torvalds
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *  64-bit file support on 64-bit platforms by Jakub Jelinek
17*4882a593Smuzhiyun  *	(jj@sunsite.ms.mff.cuni.cz)
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <linux/fs.h>
23*4882a593Smuzhiyun #include <linux/time.h>
24*4882a593Smuzhiyun #include <linux/highuid.h>
25*4882a593Smuzhiyun #include <linux/pagemap.h>
26*4882a593Smuzhiyun #include <linux/dax.h>
27*4882a593Smuzhiyun #include <linux/quotaops.h>
28*4882a593Smuzhiyun #include <linux/string.h>
29*4882a593Smuzhiyun #include <linux/buffer_head.h>
30*4882a593Smuzhiyun #include <linux/writeback.h>
31*4882a593Smuzhiyun #include <linux/pagevec.h>
32*4882a593Smuzhiyun #include <linux/mpage.h>
33*4882a593Smuzhiyun #include <linux/namei.h>
34*4882a593Smuzhiyun #include <linux/uio.h>
35*4882a593Smuzhiyun #include <linux/bio.h>
36*4882a593Smuzhiyun #include <linux/workqueue.h>
37*4882a593Smuzhiyun #include <linux/kernel.h>
38*4882a593Smuzhiyun #include <linux/printk.h>
39*4882a593Smuzhiyun #include <linux/slab.h>
40*4882a593Smuzhiyun #include <linux/bitops.h>
41*4882a593Smuzhiyun #include <linux/iomap.h>
42*4882a593Smuzhiyun #include <linux/iversion.h>
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #include "ext4_jbd2.h"
45*4882a593Smuzhiyun #include "xattr.h"
46*4882a593Smuzhiyun #include "acl.h"
47*4882a593Smuzhiyun #include "truncate.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #include <trace/events/ext4.h>
50*4882a593Smuzhiyun #include <trace/events/android_fs.h>
51*4882a593Smuzhiyun 
ext4_inode_csum(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)52*4882a593Smuzhiyun static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53*4882a593Smuzhiyun 			      struct ext4_inode_info *ei)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56*4882a593Smuzhiyun 	__u32 csum;
57*4882a593Smuzhiyun 	__u16 dummy_csum = 0;
58*4882a593Smuzhiyun 	int offset = offsetof(struct ext4_inode, i_checksum_lo);
59*4882a593Smuzhiyun 	unsigned int csum_size = sizeof(dummy_csum);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
62*4882a593Smuzhiyun 	csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
63*4882a593Smuzhiyun 	offset += csum_size;
64*4882a593Smuzhiyun 	csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
65*4882a593Smuzhiyun 			   EXT4_GOOD_OLD_INODE_SIZE - offset);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
68*4882a593Smuzhiyun 		offset = offsetof(struct ext4_inode, i_checksum_hi);
69*4882a593Smuzhiyun 		csum = ext4_chksum(sbi, csum, (__u8 *)raw +
70*4882a593Smuzhiyun 				   EXT4_GOOD_OLD_INODE_SIZE,
71*4882a593Smuzhiyun 				   offset - EXT4_GOOD_OLD_INODE_SIZE);
72*4882a593Smuzhiyun 		if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
73*4882a593Smuzhiyun 			csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
74*4882a593Smuzhiyun 					   csum_size);
75*4882a593Smuzhiyun 			offset += csum_size;
76*4882a593Smuzhiyun 		}
77*4882a593Smuzhiyun 		csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
78*4882a593Smuzhiyun 				   EXT4_INODE_SIZE(inode->i_sb) - offset);
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	return csum;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
ext4_inode_csum_verify(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)84*4882a593Smuzhiyun static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
85*4882a593Smuzhiyun 				  struct ext4_inode_info *ei)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	__u32 provided, calculated;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
90*4882a593Smuzhiyun 	    cpu_to_le32(EXT4_OS_LINUX) ||
91*4882a593Smuzhiyun 	    !ext4_has_metadata_csum(inode->i_sb))
92*4882a593Smuzhiyun 		return 1;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	provided = le16_to_cpu(raw->i_checksum_lo);
95*4882a593Smuzhiyun 	calculated = ext4_inode_csum(inode, raw, ei);
96*4882a593Smuzhiyun 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
97*4882a593Smuzhiyun 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
98*4882a593Smuzhiyun 		provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
99*4882a593Smuzhiyun 	else
100*4882a593Smuzhiyun 		calculated &= 0xFFFF;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return provided == calculated;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
ext4_inode_csum_set(struct inode * inode,struct ext4_inode * raw,struct ext4_inode_info * ei)105*4882a593Smuzhiyun void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
106*4882a593Smuzhiyun 			 struct ext4_inode_info *ei)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	__u32 csum;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
111*4882a593Smuzhiyun 	    cpu_to_le32(EXT4_OS_LINUX) ||
112*4882a593Smuzhiyun 	    !ext4_has_metadata_csum(inode->i_sb))
113*4882a593Smuzhiyun 		return;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	csum = ext4_inode_csum(inode, raw, ei);
116*4882a593Smuzhiyun 	raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
117*4882a593Smuzhiyun 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
118*4882a593Smuzhiyun 	    EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
119*4882a593Smuzhiyun 		raw->i_checksum_hi = cpu_to_le16(csum >> 16);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
ext4_begin_ordered_truncate(struct inode * inode,loff_t new_size)122*4882a593Smuzhiyun static inline int ext4_begin_ordered_truncate(struct inode *inode,
123*4882a593Smuzhiyun 					      loff_t new_size)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	trace_ext4_begin_ordered_truncate(inode, new_size);
126*4882a593Smuzhiyun 	/*
127*4882a593Smuzhiyun 	 * If jinode is zero, then we never opened the file for
128*4882a593Smuzhiyun 	 * writing, so there's no need to call
129*4882a593Smuzhiyun 	 * jbd2_journal_begin_ordered_truncate() since there's no
130*4882a593Smuzhiyun 	 * outstanding writes we need to flush.
131*4882a593Smuzhiyun 	 */
132*4882a593Smuzhiyun 	if (!EXT4_I(inode)->jinode)
133*4882a593Smuzhiyun 		return 0;
134*4882a593Smuzhiyun 	return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
135*4882a593Smuzhiyun 						   EXT4_I(inode)->jinode,
136*4882a593Smuzhiyun 						   new_size);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static void ext4_invalidatepage(struct page *page, unsigned int offset,
140*4882a593Smuzhiyun 				unsigned int length);
141*4882a593Smuzhiyun static int __ext4_journalled_writepage(struct page *page, unsigned int len);
142*4882a593Smuzhiyun static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
143*4882a593Smuzhiyun static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
144*4882a593Smuzhiyun 				  int pextents);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun  * Test whether an inode is a fast symlink.
148*4882a593Smuzhiyun  * A fast symlink has its symlink data stored in ext4_inode_info->i_data.
149*4882a593Smuzhiyun  */
ext4_inode_is_fast_symlink(struct inode * inode)150*4882a593Smuzhiyun int ext4_inode_is_fast_symlink(struct inode *inode)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
153*4882a593Smuzhiyun 		int ea_blocks = EXT4_I(inode)->i_file_acl ?
154*4882a593Smuzhiyun 				EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		if (ext4_has_inline_data(inode))
157*4882a593Smuzhiyun 			return 0;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 	return S_ISLNK(inode->i_mode) && inode->i_size &&
162*4882a593Smuzhiyun 	       (inode->i_size < EXT4_N_BLOCKS * 4);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun  * Called at the last iput() if i_nlink is zero.
167*4882a593Smuzhiyun  */
ext4_evict_inode(struct inode * inode)168*4882a593Smuzhiyun void ext4_evict_inode(struct inode *inode)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	handle_t *handle;
171*4882a593Smuzhiyun 	int err;
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * Credits for final inode cleanup and freeing:
174*4882a593Smuzhiyun 	 * sb + inode (ext4_orphan_del()), block bitmap, group descriptor
175*4882a593Smuzhiyun 	 * (xattr block freeing), bitmap, group descriptor (inode freeing)
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	int extra_credits = 6;
178*4882a593Smuzhiyun 	struct ext4_xattr_inode_array *ea_inode_array = NULL;
179*4882a593Smuzhiyun 	bool freeze_protected = false;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	trace_ext4_evict_inode(inode);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (inode->i_nlink) {
184*4882a593Smuzhiyun 		/*
185*4882a593Smuzhiyun 		 * When journalling data dirty buffers are tracked only in the
186*4882a593Smuzhiyun 		 * journal. So although mm thinks everything is clean and
187*4882a593Smuzhiyun 		 * ready for reaping the inode might still have some pages to
188*4882a593Smuzhiyun 		 * write in the running transaction or waiting to be
189*4882a593Smuzhiyun 		 * checkpointed. Thus calling jbd2_journal_invalidatepage()
190*4882a593Smuzhiyun 		 * (via truncate_inode_pages()) to discard these buffers can
191*4882a593Smuzhiyun 		 * cause data loss. Also even if we did not discard these
192*4882a593Smuzhiyun 		 * buffers, we would have no way to find them after the inode
193*4882a593Smuzhiyun 		 * is reaped and thus user could see stale data if he tries to
194*4882a593Smuzhiyun 		 * read them before the transaction is checkpointed. So be
195*4882a593Smuzhiyun 		 * careful and force everything to disk here... We use
196*4882a593Smuzhiyun 		 * ei->i_datasync_tid to store the newest transaction
197*4882a593Smuzhiyun 		 * containing inode's data.
198*4882a593Smuzhiyun 		 *
199*4882a593Smuzhiyun 		 * Note that directories do not have this problem because they
200*4882a593Smuzhiyun 		 * don't use page cache.
201*4882a593Smuzhiyun 		 */
202*4882a593Smuzhiyun 		if (inode->i_ino != EXT4_JOURNAL_INO &&
203*4882a593Smuzhiyun 		    ext4_should_journal_data(inode) &&
204*4882a593Smuzhiyun 		    (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
205*4882a593Smuzhiyun 		    inode->i_data.nrpages) {
206*4882a593Smuzhiyun 			journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
207*4882a593Smuzhiyun 			tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 			jbd2_complete_transaction(journal, commit_tid);
210*4882a593Smuzhiyun 			filemap_write_and_wait(&inode->i_data);
211*4882a593Smuzhiyun 		}
212*4882a593Smuzhiyun 		truncate_inode_pages_final(&inode->i_data);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 		goto no_delete;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (is_bad_inode(inode))
218*4882a593Smuzhiyun 		goto no_delete;
219*4882a593Smuzhiyun 	dquot_initialize(inode);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (ext4_should_order_data(inode))
222*4882a593Smuzhiyun 		ext4_begin_ordered_truncate(inode, 0);
223*4882a593Smuzhiyun 	truncate_inode_pages_final(&inode->i_data);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/*
226*4882a593Smuzhiyun 	 * For inodes with journalled data, transaction commit could have
227*4882a593Smuzhiyun 	 * dirtied the inode. Flush worker is ignoring it because of I_FREEING
228*4882a593Smuzhiyun 	 * flag but we still need to remove the inode from the writeback lists.
229*4882a593Smuzhiyun 	 */
230*4882a593Smuzhiyun 	if (!list_empty_careful(&inode->i_io_list)) {
231*4882a593Smuzhiyun 		WARN_ON_ONCE(!ext4_should_journal_data(inode));
232*4882a593Smuzhiyun 		inode_io_list_del(inode);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * Protect us against freezing - iput() caller didn't have to have any
237*4882a593Smuzhiyun 	 * protection against it. When we are in a running transaction though,
238*4882a593Smuzhiyun 	 * we are already protected against freezing and we cannot grab further
239*4882a593Smuzhiyun 	 * protection due to lock ordering constraints.
240*4882a593Smuzhiyun 	 */
241*4882a593Smuzhiyun 	if (!ext4_journal_current_handle()) {
242*4882a593Smuzhiyun 		sb_start_intwrite(inode->i_sb);
243*4882a593Smuzhiyun 		freeze_protected = true;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (!IS_NOQUOTA(inode))
247*4882a593Smuzhiyun 		extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Block bitmap, group descriptor, and inode are accounted in both
251*4882a593Smuzhiyun 	 * ext4_blocks_for_truncate() and extra_credits. So subtract 3.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
254*4882a593Smuzhiyun 			 ext4_blocks_for_truncate(inode) + extra_credits - 3);
255*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
256*4882a593Smuzhiyun 		ext4_std_error(inode->i_sb, PTR_ERR(handle));
257*4882a593Smuzhiyun 		/*
258*4882a593Smuzhiyun 		 * If we're going to skip the normal cleanup, we still need to
259*4882a593Smuzhiyun 		 * make sure that the in-core orphan linked list is properly
260*4882a593Smuzhiyun 		 * cleaned up.
261*4882a593Smuzhiyun 		 */
262*4882a593Smuzhiyun 		ext4_orphan_del(NULL, inode);
263*4882a593Smuzhiyun 		if (freeze_protected)
264*4882a593Smuzhiyun 			sb_end_intwrite(inode->i_sb);
265*4882a593Smuzhiyun 		goto no_delete;
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (IS_SYNC(inode))
269*4882a593Smuzhiyun 		ext4_handle_sync(handle);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/*
272*4882a593Smuzhiyun 	 * Set inode->i_size to 0 before calling ext4_truncate(). We need
273*4882a593Smuzhiyun 	 * special handling of symlinks here because i_size is used to
274*4882a593Smuzhiyun 	 * determine whether ext4_inode_info->i_data contains symlink data or
275*4882a593Smuzhiyun 	 * block mappings. Setting i_size to 0 will remove its fast symlink
276*4882a593Smuzhiyun 	 * status. Erase i_data so that it becomes a valid empty block map.
277*4882a593Smuzhiyun 	 */
278*4882a593Smuzhiyun 	if (ext4_inode_is_fast_symlink(inode))
279*4882a593Smuzhiyun 		memset(EXT4_I(inode)->i_data, 0, sizeof(EXT4_I(inode)->i_data));
280*4882a593Smuzhiyun 	inode->i_size = 0;
281*4882a593Smuzhiyun 	err = ext4_mark_inode_dirty(handle, inode);
282*4882a593Smuzhiyun 	if (err) {
283*4882a593Smuzhiyun 		ext4_warning(inode->i_sb,
284*4882a593Smuzhiyun 			     "couldn't mark inode dirty (err %d)", err);
285*4882a593Smuzhiyun 		goto stop_handle;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 	if (inode->i_blocks) {
288*4882a593Smuzhiyun 		err = ext4_truncate(inode);
289*4882a593Smuzhiyun 		if (err) {
290*4882a593Smuzhiyun 			ext4_error_err(inode->i_sb, -err,
291*4882a593Smuzhiyun 				       "couldn't truncate inode %lu (err %d)",
292*4882a593Smuzhiyun 				       inode->i_ino, err);
293*4882a593Smuzhiyun 			goto stop_handle;
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* Remove xattr references. */
298*4882a593Smuzhiyun 	err = ext4_xattr_delete_inode(handle, inode, &ea_inode_array,
299*4882a593Smuzhiyun 				      extra_credits);
300*4882a593Smuzhiyun 	if (err) {
301*4882a593Smuzhiyun 		ext4_warning(inode->i_sb, "xattr delete (err %d)", err);
302*4882a593Smuzhiyun stop_handle:
303*4882a593Smuzhiyun 		ext4_journal_stop(handle);
304*4882a593Smuzhiyun 		ext4_orphan_del(NULL, inode);
305*4882a593Smuzhiyun 		if (freeze_protected)
306*4882a593Smuzhiyun 			sb_end_intwrite(inode->i_sb);
307*4882a593Smuzhiyun 		ext4_xattr_inode_array_free(ea_inode_array);
308*4882a593Smuzhiyun 		goto no_delete;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/*
312*4882a593Smuzhiyun 	 * Kill off the orphan record which ext4_truncate created.
313*4882a593Smuzhiyun 	 * AKPM: I think this can be inside the above `if'.
314*4882a593Smuzhiyun 	 * Note that ext4_orphan_del() has to be able to cope with the
315*4882a593Smuzhiyun 	 * deletion of a non-existent orphan - this is because we don't
316*4882a593Smuzhiyun 	 * know if ext4_truncate() actually created an orphan record.
317*4882a593Smuzhiyun 	 * (Well, we could do this if we need to, but heck - it works)
318*4882a593Smuzhiyun 	 */
319*4882a593Smuzhiyun 	ext4_orphan_del(handle, inode);
320*4882a593Smuzhiyun 	EXT4_I(inode)->i_dtime	= (__u32)ktime_get_real_seconds();
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/*
323*4882a593Smuzhiyun 	 * One subtle ordering requirement: if anything has gone wrong
324*4882a593Smuzhiyun 	 * (transaction abort, IO errors, whatever), then we can still
325*4882a593Smuzhiyun 	 * do these next steps (the fs will already have been marked as
326*4882a593Smuzhiyun 	 * having errors), but we can't free the inode if the mark_dirty
327*4882a593Smuzhiyun 	 * fails.
328*4882a593Smuzhiyun 	 */
329*4882a593Smuzhiyun 	if (ext4_mark_inode_dirty(handle, inode))
330*4882a593Smuzhiyun 		/* If that failed, just do the required in-core inode clear. */
331*4882a593Smuzhiyun 		ext4_clear_inode(inode);
332*4882a593Smuzhiyun 	else
333*4882a593Smuzhiyun 		ext4_free_inode(handle, inode);
334*4882a593Smuzhiyun 	ext4_journal_stop(handle);
335*4882a593Smuzhiyun 	if (freeze_protected)
336*4882a593Smuzhiyun 		sb_end_intwrite(inode->i_sb);
337*4882a593Smuzhiyun 	ext4_xattr_inode_array_free(ea_inode_array);
338*4882a593Smuzhiyun 	return;
339*4882a593Smuzhiyun no_delete:
340*4882a593Smuzhiyun 	if (!list_empty(&EXT4_I(inode)->i_fc_list))
341*4882a593Smuzhiyun 		ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
342*4882a593Smuzhiyun 	ext4_clear_inode(inode);	/* We must guarantee clearing of inode... */
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
ext4_get_reserved_space(struct inode * inode)346*4882a593Smuzhiyun qsize_t *ext4_get_reserved_space(struct inode *inode)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	return &EXT4_I(inode)->i_reserved_quota;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun #endif
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun  * Called with i_data_sem down, which is important since we can call
354*4882a593Smuzhiyun  * ext4_discard_preallocations() from here.
355*4882a593Smuzhiyun  */
ext4_da_update_reserve_space(struct inode * inode,int used,int quota_claim)356*4882a593Smuzhiyun void ext4_da_update_reserve_space(struct inode *inode,
357*4882a593Smuzhiyun 					int used, int quota_claim)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
360*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	spin_lock(&ei->i_block_reservation_lock);
363*4882a593Smuzhiyun 	trace_ext4_da_update_reserve_space(inode, used, quota_claim);
364*4882a593Smuzhiyun 	if (unlikely(used > ei->i_reserved_data_blocks)) {
365*4882a593Smuzhiyun 		ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
366*4882a593Smuzhiyun 			 "with only %d reserved data blocks",
367*4882a593Smuzhiyun 			 __func__, inode->i_ino, used,
368*4882a593Smuzhiyun 			 ei->i_reserved_data_blocks);
369*4882a593Smuzhiyun 		WARN_ON(1);
370*4882a593Smuzhiyun 		used = ei->i_reserved_data_blocks;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Update per-inode reservations */
374*4882a593Smuzhiyun 	ei->i_reserved_data_blocks -= used;
375*4882a593Smuzhiyun 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Update quota subsystem for data blocks */
380*4882a593Smuzhiyun 	if (quota_claim)
381*4882a593Smuzhiyun 		dquot_claim_block(inode, EXT4_C2B(sbi, used));
382*4882a593Smuzhiyun 	else {
383*4882a593Smuzhiyun 		/*
384*4882a593Smuzhiyun 		 * We did fallocate with an offset that is already delayed
385*4882a593Smuzhiyun 		 * allocated. So on delayed allocated writeback we should
386*4882a593Smuzhiyun 		 * not re-claim the quota for fallocated blocks.
387*4882a593Smuzhiyun 		 */
388*4882a593Smuzhiyun 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	/*
392*4882a593Smuzhiyun 	 * If we have done all the pending block allocations and if
393*4882a593Smuzhiyun 	 * there aren't any writers on the inode, we can discard the
394*4882a593Smuzhiyun 	 * inode's preallocations.
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	if ((ei->i_reserved_data_blocks == 0) &&
397*4882a593Smuzhiyun 	    !inode_is_open_for_write(inode))
398*4882a593Smuzhiyun 		ext4_discard_preallocations(inode, 0);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
__check_block_validity(struct inode * inode,const char * func,unsigned int line,struct ext4_map_blocks * map)401*4882a593Smuzhiyun static int __check_block_validity(struct inode *inode, const char *func,
402*4882a593Smuzhiyun 				unsigned int line,
403*4882a593Smuzhiyun 				struct ext4_map_blocks *map)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	if (ext4_has_feature_journal(inode->i_sb) &&
406*4882a593Smuzhiyun 	    (inode->i_ino ==
407*4882a593Smuzhiyun 	     le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
408*4882a593Smuzhiyun 		return 0;
409*4882a593Smuzhiyun 	if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) {
410*4882a593Smuzhiyun 		ext4_error_inode(inode, func, line, map->m_pblk,
411*4882a593Smuzhiyun 				 "lblock %lu mapped to illegal pblock %llu "
412*4882a593Smuzhiyun 				 "(length %d)", (unsigned long) map->m_lblk,
413*4882a593Smuzhiyun 				 map->m_pblk, map->m_len);
414*4882a593Smuzhiyun 		return -EFSCORRUPTED;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 	return 0;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
ext4_issue_zeroout(struct inode * inode,ext4_lblk_t lblk,ext4_fsblk_t pblk,ext4_lblk_t len)419*4882a593Smuzhiyun int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
420*4882a593Smuzhiyun 		       ext4_lblk_t len)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	int ret;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
425*4882a593Smuzhiyun 		return fscrypt_zeroout_range(inode, lblk, pblk, len);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
428*4882a593Smuzhiyun 	if (ret > 0)
429*4882a593Smuzhiyun 		ret = 0;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	return ret;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun #define check_block_validity(inode, map)	\
435*4882a593Smuzhiyun 	__check_block_validity((inode), __func__, __LINE__, (map))
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun #ifdef ES_AGGRESSIVE_TEST
ext4_map_blocks_es_recheck(handle_t * handle,struct inode * inode,struct ext4_map_blocks * es_map,struct ext4_map_blocks * map,int flags)438*4882a593Smuzhiyun static void ext4_map_blocks_es_recheck(handle_t *handle,
439*4882a593Smuzhiyun 				       struct inode *inode,
440*4882a593Smuzhiyun 				       struct ext4_map_blocks *es_map,
441*4882a593Smuzhiyun 				       struct ext4_map_blocks *map,
442*4882a593Smuzhiyun 				       int flags)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	int retval;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	map->m_flags = 0;
447*4882a593Smuzhiyun 	/*
448*4882a593Smuzhiyun 	 * There is a race window that the result is not the same.
449*4882a593Smuzhiyun 	 * e.g. xfstests #223 when dioread_nolock enables.  The reason
450*4882a593Smuzhiyun 	 * is that we lookup a block mapping in extent status tree with
451*4882a593Smuzhiyun 	 * out taking i_data_sem.  So at the time the unwritten extent
452*4882a593Smuzhiyun 	 * could be converted.
453*4882a593Smuzhiyun 	 */
454*4882a593Smuzhiyun 	down_read(&EXT4_I(inode)->i_data_sem);
455*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
456*4882a593Smuzhiyun 		retval = ext4_ext_map_blocks(handle, inode, map, 0);
457*4882a593Smuzhiyun 	} else {
458*4882a593Smuzhiyun 		retval = ext4_ind_map_blocks(handle, inode, map, 0);
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 	up_read((&EXT4_I(inode)->i_data_sem));
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * We don't check m_len because extent will be collpased in status
464*4882a593Smuzhiyun 	 * tree.  So the m_len might not equal.
465*4882a593Smuzhiyun 	 */
466*4882a593Smuzhiyun 	if (es_map->m_lblk != map->m_lblk ||
467*4882a593Smuzhiyun 	    es_map->m_flags != map->m_flags ||
468*4882a593Smuzhiyun 	    es_map->m_pblk != map->m_pblk) {
469*4882a593Smuzhiyun 		printk("ES cache assertion failed for inode: %lu "
470*4882a593Smuzhiyun 		       "es_cached ex [%d/%d/%llu/%x] != "
471*4882a593Smuzhiyun 		       "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
472*4882a593Smuzhiyun 		       inode->i_ino, es_map->m_lblk, es_map->m_len,
473*4882a593Smuzhiyun 		       es_map->m_pblk, es_map->m_flags, map->m_lblk,
474*4882a593Smuzhiyun 		       map->m_len, map->m_pblk, map->m_flags,
475*4882a593Smuzhiyun 		       retval, flags);
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun #endif /* ES_AGGRESSIVE_TEST */
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun  * The ext4_map_blocks() function tries to look up the requested blocks,
482*4882a593Smuzhiyun  * and returns if the blocks are already mapped.
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
485*4882a593Smuzhiyun  * and store the allocated blocks in the result buffer head and mark it
486*4882a593Smuzhiyun  * mapped.
487*4882a593Smuzhiyun  *
488*4882a593Smuzhiyun  * If file type is extents based, it will call ext4_ext_map_blocks(),
489*4882a593Smuzhiyun  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
490*4882a593Smuzhiyun  * based files
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * On success, it returns the number of blocks being mapped or allocated.  if
493*4882a593Smuzhiyun  * create==0 and the blocks are pre-allocated and unwritten, the resulting @map
494*4882a593Smuzhiyun  * is marked as unwritten. If the create == 1, it will mark @map as mapped.
495*4882a593Smuzhiyun  *
496*4882a593Smuzhiyun  * It returns 0 if plain look up failed (blocks have not been allocated), in
497*4882a593Smuzhiyun  * that case, @map is returned as unmapped but we still do fill map->m_len to
498*4882a593Smuzhiyun  * indicate the length of a hole starting at map->m_lblk.
499*4882a593Smuzhiyun  *
500*4882a593Smuzhiyun  * It returns the error in case of allocation failure.
501*4882a593Smuzhiyun  */
ext4_map_blocks(handle_t * handle,struct inode * inode,struct ext4_map_blocks * map,int flags)502*4882a593Smuzhiyun int ext4_map_blocks(handle_t *handle, struct inode *inode,
503*4882a593Smuzhiyun 		    struct ext4_map_blocks *map, int flags)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun 	struct extent_status es;
506*4882a593Smuzhiyun 	int retval;
507*4882a593Smuzhiyun 	int ret = 0;
508*4882a593Smuzhiyun #ifdef ES_AGGRESSIVE_TEST
509*4882a593Smuzhiyun 	struct ext4_map_blocks orig_map;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	memcpy(&orig_map, map, sizeof(*map));
512*4882a593Smuzhiyun #endif
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	map->m_flags = 0;
515*4882a593Smuzhiyun 	ext_debug(inode, "flag 0x%x, max_blocks %u, logical block %lu\n",
516*4882a593Smuzhiyun 		  flags, map->m_len, (unsigned long) map->m_lblk);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/*
519*4882a593Smuzhiyun 	 * ext4_map_blocks returns an int, and m_len is an unsigned int
520*4882a593Smuzhiyun 	 */
521*4882a593Smuzhiyun 	if (unlikely(map->m_len > INT_MAX))
522*4882a593Smuzhiyun 		map->m_len = INT_MAX;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* We can handle the block number less than EXT_MAX_BLOCKS */
525*4882a593Smuzhiyun 	if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS))
526*4882a593Smuzhiyun 		return -EFSCORRUPTED;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* Lookup extent status tree firstly */
529*4882a593Smuzhiyun 	if (!(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY) &&
530*4882a593Smuzhiyun 	    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
531*4882a593Smuzhiyun 		if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
532*4882a593Smuzhiyun 			map->m_pblk = ext4_es_pblock(&es) +
533*4882a593Smuzhiyun 					map->m_lblk - es.es_lblk;
534*4882a593Smuzhiyun 			map->m_flags |= ext4_es_is_written(&es) ?
535*4882a593Smuzhiyun 					EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
536*4882a593Smuzhiyun 			retval = es.es_len - (map->m_lblk - es.es_lblk);
537*4882a593Smuzhiyun 			if (retval > map->m_len)
538*4882a593Smuzhiyun 				retval = map->m_len;
539*4882a593Smuzhiyun 			map->m_len = retval;
540*4882a593Smuzhiyun 		} else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
541*4882a593Smuzhiyun 			map->m_pblk = 0;
542*4882a593Smuzhiyun 			retval = es.es_len - (map->m_lblk - es.es_lblk);
543*4882a593Smuzhiyun 			if (retval > map->m_len)
544*4882a593Smuzhiyun 				retval = map->m_len;
545*4882a593Smuzhiyun 			map->m_len = retval;
546*4882a593Smuzhiyun 			retval = 0;
547*4882a593Smuzhiyun 		} else {
548*4882a593Smuzhiyun 			BUG();
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun #ifdef ES_AGGRESSIVE_TEST
551*4882a593Smuzhiyun 		ext4_map_blocks_es_recheck(handle, inode, map,
552*4882a593Smuzhiyun 					   &orig_map, flags);
553*4882a593Smuzhiyun #endif
554*4882a593Smuzhiyun 		goto found;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/*
558*4882a593Smuzhiyun 	 * Try to see if we can get the block without requesting a new
559*4882a593Smuzhiyun 	 * file system block.
560*4882a593Smuzhiyun 	 */
561*4882a593Smuzhiyun 	down_read(&EXT4_I(inode)->i_data_sem);
562*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
563*4882a593Smuzhiyun 		retval = ext4_ext_map_blocks(handle, inode, map, 0);
564*4882a593Smuzhiyun 	} else {
565*4882a593Smuzhiyun 		retval = ext4_ind_map_blocks(handle, inode, map, 0);
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 	if (retval > 0) {
568*4882a593Smuzhiyun 		unsigned int status;
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 		if (unlikely(retval != map->m_len)) {
571*4882a593Smuzhiyun 			ext4_warning(inode->i_sb,
572*4882a593Smuzhiyun 				     "ES len assertion failed for inode "
573*4882a593Smuzhiyun 				     "%lu: retval %d != map->m_len %d",
574*4882a593Smuzhiyun 				     inode->i_ino, retval, map->m_len);
575*4882a593Smuzhiyun 			WARN_ON(1);
576*4882a593Smuzhiyun 		}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
579*4882a593Smuzhiyun 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
580*4882a593Smuzhiyun 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
581*4882a593Smuzhiyun 		    !(status & EXTENT_STATUS_WRITTEN) &&
582*4882a593Smuzhiyun 		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
583*4882a593Smuzhiyun 				       map->m_lblk + map->m_len - 1))
584*4882a593Smuzhiyun 			status |= EXTENT_STATUS_DELAYED;
585*4882a593Smuzhiyun 		ret = ext4_es_insert_extent(inode, map->m_lblk,
586*4882a593Smuzhiyun 					    map->m_len, map->m_pblk, status);
587*4882a593Smuzhiyun 		if (ret < 0)
588*4882a593Smuzhiyun 			retval = ret;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 	up_read((&EXT4_I(inode)->i_data_sem));
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun found:
593*4882a593Smuzhiyun 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
594*4882a593Smuzhiyun 		ret = check_block_validity(inode, map);
595*4882a593Smuzhiyun 		if (ret != 0)
596*4882a593Smuzhiyun 			return ret;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* If it is only a block(s) look up */
600*4882a593Smuzhiyun 	if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
601*4882a593Smuzhiyun 		return retval;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	/*
604*4882a593Smuzhiyun 	 * Returns if the blocks have already allocated
605*4882a593Smuzhiyun 	 *
606*4882a593Smuzhiyun 	 * Note that if blocks have been preallocated
607*4882a593Smuzhiyun 	 * ext4_ext_get_block() returns the create = 0
608*4882a593Smuzhiyun 	 * with buffer head unmapped.
609*4882a593Smuzhiyun 	 */
610*4882a593Smuzhiyun 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
611*4882a593Smuzhiyun 		/*
612*4882a593Smuzhiyun 		 * If we need to convert extent to unwritten
613*4882a593Smuzhiyun 		 * we continue and do the actual work in
614*4882a593Smuzhiyun 		 * ext4_ext_map_blocks()
615*4882a593Smuzhiyun 		 */
616*4882a593Smuzhiyun 		if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
617*4882a593Smuzhiyun 			return retval;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/*
620*4882a593Smuzhiyun 	 * Here we clear m_flags because after allocating an new extent,
621*4882a593Smuzhiyun 	 * it will be set again.
622*4882a593Smuzhiyun 	 */
623*4882a593Smuzhiyun 	map->m_flags &= ~EXT4_MAP_FLAGS;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/*
626*4882a593Smuzhiyun 	 * New blocks allocate and/or writing to unwritten extent
627*4882a593Smuzhiyun 	 * will possibly result in updating i_data, so we take
628*4882a593Smuzhiyun 	 * the write lock of i_data_sem, and call get_block()
629*4882a593Smuzhiyun 	 * with create == 1 flag.
630*4882a593Smuzhiyun 	 */
631*4882a593Smuzhiyun 	down_write(&EXT4_I(inode)->i_data_sem);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	/*
634*4882a593Smuzhiyun 	 * We need to check for EXT4 here because migrate
635*4882a593Smuzhiyun 	 * could have changed the inode type in between
636*4882a593Smuzhiyun 	 */
637*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
638*4882a593Smuzhiyun 		retval = ext4_ext_map_blocks(handle, inode, map, flags);
639*4882a593Smuzhiyun 	} else {
640*4882a593Smuzhiyun 		retval = ext4_ind_map_blocks(handle, inode, map, flags);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 		if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
643*4882a593Smuzhiyun 			/*
644*4882a593Smuzhiyun 			 * We allocated new blocks which will result in
645*4882a593Smuzhiyun 			 * i_data's format changing.  Force the migrate
646*4882a593Smuzhiyun 			 * to fail by clearing migrate flags
647*4882a593Smuzhiyun 			 */
648*4882a593Smuzhiyun 			ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 		/*
652*4882a593Smuzhiyun 		 * Update reserved blocks/metadata blocks after successful
653*4882a593Smuzhiyun 		 * block allocation which had been deferred till now. We don't
654*4882a593Smuzhiyun 		 * support fallocate for non extent files. So we can update
655*4882a593Smuzhiyun 		 * reserve space here.
656*4882a593Smuzhiyun 		 */
657*4882a593Smuzhiyun 		if ((retval > 0) &&
658*4882a593Smuzhiyun 			(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
659*4882a593Smuzhiyun 			ext4_da_update_reserve_space(inode, retval, 1);
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	if (retval > 0) {
663*4882a593Smuzhiyun 		unsigned int status;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		if (unlikely(retval != map->m_len)) {
666*4882a593Smuzhiyun 			ext4_warning(inode->i_sb,
667*4882a593Smuzhiyun 				     "ES len assertion failed for inode "
668*4882a593Smuzhiyun 				     "%lu: retval %d != map->m_len %d",
669*4882a593Smuzhiyun 				     inode->i_ino, retval, map->m_len);
670*4882a593Smuzhiyun 			WARN_ON(1);
671*4882a593Smuzhiyun 		}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 		/*
674*4882a593Smuzhiyun 		 * We have to zeroout blocks before inserting them into extent
675*4882a593Smuzhiyun 		 * status tree. Otherwise someone could look them up there and
676*4882a593Smuzhiyun 		 * use them before they are really zeroed. We also have to
677*4882a593Smuzhiyun 		 * unmap metadata before zeroing as otherwise writeback can
678*4882a593Smuzhiyun 		 * overwrite zeros with stale data from block device.
679*4882a593Smuzhiyun 		 */
680*4882a593Smuzhiyun 		if (flags & EXT4_GET_BLOCKS_ZERO &&
681*4882a593Smuzhiyun 		    map->m_flags & EXT4_MAP_MAPPED &&
682*4882a593Smuzhiyun 		    map->m_flags & EXT4_MAP_NEW) {
683*4882a593Smuzhiyun 			ret = ext4_issue_zeroout(inode, map->m_lblk,
684*4882a593Smuzhiyun 						 map->m_pblk, map->m_len);
685*4882a593Smuzhiyun 			if (ret) {
686*4882a593Smuzhiyun 				retval = ret;
687*4882a593Smuzhiyun 				goto out_sem;
688*4882a593Smuzhiyun 			}
689*4882a593Smuzhiyun 		}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		/*
692*4882a593Smuzhiyun 		 * If the extent has been zeroed out, we don't need to update
693*4882a593Smuzhiyun 		 * extent status tree.
694*4882a593Smuzhiyun 		 */
695*4882a593Smuzhiyun 		if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
696*4882a593Smuzhiyun 		    ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) {
697*4882a593Smuzhiyun 			if (ext4_es_is_written(&es))
698*4882a593Smuzhiyun 				goto out_sem;
699*4882a593Smuzhiyun 		}
700*4882a593Smuzhiyun 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
701*4882a593Smuzhiyun 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
702*4882a593Smuzhiyun 		if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
703*4882a593Smuzhiyun 		    !(status & EXTENT_STATUS_WRITTEN) &&
704*4882a593Smuzhiyun 		    ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk,
705*4882a593Smuzhiyun 				       map->m_lblk + map->m_len - 1))
706*4882a593Smuzhiyun 			status |= EXTENT_STATUS_DELAYED;
707*4882a593Smuzhiyun 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
708*4882a593Smuzhiyun 					    map->m_pblk, status);
709*4882a593Smuzhiyun 		if (ret < 0) {
710*4882a593Smuzhiyun 			retval = ret;
711*4882a593Smuzhiyun 			goto out_sem;
712*4882a593Smuzhiyun 		}
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun out_sem:
716*4882a593Smuzhiyun 	up_write((&EXT4_I(inode)->i_data_sem));
717*4882a593Smuzhiyun 	if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
718*4882a593Smuzhiyun 		ret = check_block_validity(inode, map);
719*4882a593Smuzhiyun 		if (ret != 0)
720*4882a593Smuzhiyun 			return ret;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 		/*
723*4882a593Smuzhiyun 		 * Inodes with freshly allocated blocks where contents will be
724*4882a593Smuzhiyun 		 * visible after transaction commit must be on transaction's
725*4882a593Smuzhiyun 		 * ordered data list.
726*4882a593Smuzhiyun 		 */
727*4882a593Smuzhiyun 		if (map->m_flags & EXT4_MAP_NEW &&
728*4882a593Smuzhiyun 		    !(map->m_flags & EXT4_MAP_UNWRITTEN) &&
729*4882a593Smuzhiyun 		    !(flags & EXT4_GET_BLOCKS_ZERO) &&
730*4882a593Smuzhiyun 		    !ext4_is_quota_file(inode) &&
731*4882a593Smuzhiyun 		    ext4_should_order_data(inode)) {
732*4882a593Smuzhiyun 			loff_t start_byte =
733*4882a593Smuzhiyun 				(loff_t)map->m_lblk << inode->i_blkbits;
734*4882a593Smuzhiyun 			loff_t length = (loff_t)map->m_len << inode->i_blkbits;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 			if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
737*4882a593Smuzhiyun 				ret = ext4_jbd2_inode_add_wait(handle, inode,
738*4882a593Smuzhiyun 						start_byte, length);
739*4882a593Smuzhiyun 			else
740*4882a593Smuzhiyun 				ret = ext4_jbd2_inode_add_write(handle, inode,
741*4882a593Smuzhiyun 						start_byte, length);
742*4882a593Smuzhiyun 			if (ret)
743*4882a593Smuzhiyun 				return ret;
744*4882a593Smuzhiyun 		}
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun 	if (retval > 0 && (map->m_flags & EXT4_MAP_UNWRITTEN ||
747*4882a593Smuzhiyun 				map->m_flags & EXT4_MAP_MAPPED))
748*4882a593Smuzhiyun 		ext4_fc_track_range(handle, inode, map->m_lblk,
749*4882a593Smuzhiyun 					map->m_lblk + map->m_len - 1);
750*4882a593Smuzhiyun 	if (retval < 0)
751*4882a593Smuzhiyun 		ext_debug(inode, "failed with err %d\n", retval);
752*4882a593Smuzhiyun 	return retval;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun  * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
757*4882a593Smuzhiyun  * we have to be careful as someone else may be manipulating b_state as well.
758*4882a593Smuzhiyun  */
ext4_update_bh_state(struct buffer_head * bh,unsigned long flags)759*4882a593Smuzhiyun static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun 	unsigned long old_state;
762*4882a593Smuzhiyun 	unsigned long new_state;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	flags &= EXT4_MAP_FLAGS;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	/* Dummy buffer_head? Set non-atomically. */
767*4882a593Smuzhiyun 	if (!bh->b_page) {
768*4882a593Smuzhiyun 		bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
769*4882a593Smuzhiyun 		return;
770*4882a593Smuzhiyun 	}
771*4882a593Smuzhiyun 	/*
772*4882a593Smuzhiyun 	 * Someone else may be modifying b_state. Be careful! This is ugly but
773*4882a593Smuzhiyun 	 * once we get rid of using bh as a container for mapping information
774*4882a593Smuzhiyun 	 * to pass to / from get_block functions, this can go away.
775*4882a593Smuzhiyun 	 */
776*4882a593Smuzhiyun 	do {
777*4882a593Smuzhiyun 		old_state = READ_ONCE(bh->b_state);
778*4882a593Smuzhiyun 		new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
779*4882a593Smuzhiyun 	} while (unlikely(
780*4882a593Smuzhiyun 		 cmpxchg(&bh->b_state, old_state, new_state) != old_state));
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
_ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int flags)783*4882a593Smuzhiyun static int _ext4_get_block(struct inode *inode, sector_t iblock,
784*4882a593Smuzhiyun 			   struct buffer_head *bh, int flags)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	struct ext4_map_blocks map;
787*4882a593Smuzhiyun 	int ret = 0;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode))
790*4882a593Smuzhiyun 		return -ERANGE;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	map.m_lblk = iblock;
793*4882a593Smuzhiyun 	map.m_len = bh->b_size >> inode->i_blkbits;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map,
796*4882a593Smuzhiyun 			      flags);
797*4882a593Smuzhiyun 	if (ret > 0) {
798*4882a593Smuzhiyun 		map_bh(bh, inode->i_sb, map.m_pblk);
799*4882a593Smuzhiyun 		ext4_update_bh_state(bh, map.m_flags);
800*4882a593Smuzhiyun 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
801*4882a593Smuzhiyun 		ret = 0;
802*4882a593Smuzhiyun 	} else if (ret == 0) {
803*4882a593Smuzhiyun 		/* hole case, need to fill in bh->b_size */
804*4882a593Smuzhiyun 		bh->b_size = inode->i_sb->s_blocksize * map.m_len;
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun 	return ret;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
ext4_get_block(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)809*4882a593Smuzhiyun int ext4_get_block(struct inode *inode, sector_t iblock,
810*4882a593Smuzhiyun 		   struct buffer_head *bh, int create)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	return _ext4_get_block(inode, iblock, bh,
813*4882a593Smuzhiyun 			       create ? EXT4_GET_BLOCKS_CREATE : 0);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun  * Get block function used when preparing for buffered write if we require
818*4882a593Smuzhiyun  * creating an unwritten extent if blocks haven't been allocated.  The extent
819*4882a593Smuzhiyun  * will be converted to written after the IO is complete.
820*4882a593Smuzhiyun  */
ext4_get_block_unwritten(struct inode * inode,sector_t iblock,struct buffer_head * bh_result,int create)821*4882a593Smuzhiyun int ext4_get_block_unwritten(struct inode *inode, sector_t iblock,
822*4882a593Smuzhiyun 			     struct buffer_head *bh_result, int create)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	ext4_debug("ext4_get_block_unwritten: inode %lu, create flag %d\n",
825*4882a593Smuzhiyun 		   inode->i_ino, create);
826*4882a593Smuzhiyun 	return _ext4_get_block(inode, iblock, bh_result,
827*4882a593Smuzhiyun 			       EXT4_GET_BLOCKS_IO_CREATE_EXT);
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun /* Maximum number of blocks we map for direct IO at once. */
831*4882a593Smuzhiyun #define DIO_MAX_BLOCKS 4096
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun  * `handle' can be NULL if create is zero
835*4882a593Smuzhiyun  */
ext4_getblk(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)836*4882a593Smuzhiyun struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
837*4882a593Smuzhiyun 				ext4_lblk_t block, int map_flags)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	struct ext4_map_blocks map;
840*4882a593Smuzhiyun 	struct buffer_head *bh;
841*4882a593Smuzhiyun 	int create = map_flags & EXT4_GET_BLOCKS_CREATE;
842*4882a593Smuzhiyun 	int err;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
845*4882a593Smuzhiyun 		 || handle != NULL || create == 0);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	map.m_lblk = block;
848*4882a593Smuzhiyun 	map.m_len = 1;
849*4882a593Smuzhiyun 	err = ext4_map_blocks(handle, inode, &map, map_flags);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	if (err == 0)
852*4882a593Smuzhiyun 		return create ? ERR_PTR(-ENOSPC) : NULL;
853*4882a593Smuzhiyun 	if (err < 0)
854*4882a593Smuzhiyun 		return ERR_PTR(err);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	bh = sb_getblk(inode->i_sb, map.m_pblk);
857*4882a593Smuzhiyun 	if (unlikely(!bh))
858*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
859*4882a593Smuzhiyun 	if (map.m_flags & EXT4_MAP_NEW) {
860*4882a593Smuzhiyun 		J_ASSERT(create != 0);
861*4882a593Smuzhiyun 		J_ASSERT((EXT4_SB(inode->i_sb)->s_mount_state & EXT4_FC_REPLAY)
862*4882a593Smuzhiyun 			 || (handle != NULL));
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 		/*
865*4882a593Smuzhiyun 		 * Now that we do not always journal data, we should
866*4882a593Smuzhiyun 		 * keep in mind whether this should always journal the
867*4882a593Smuzhiyun 		 * new buffer as metadata.  For now, regular file
868*4882a593Smuzhiyun 		 * writes use ext4_get_block instead, so it's not a
869*4882a593Smuzhiyun 		 * problem.
870*4882a593Smuzhiyun 		 */
871*4882a593Smuzhiyun 		lock_buffer(bh);
872*4882a593Smuzhiyun 		BUFFER_TRACE(bh, "call get_create_access");
873*4882a593Smuzhiyun 		err = ext4_journal_get_create_access(handle, bh);
874*4882a593Smuzhiyun 		if (unlikely(err)) {
875*4882a593Smuzhiyun 			unlock_buffer(bh);
876*4882a593Smuzhiyun 			goto errout;
877*4882a593Smuzhiyun 		}
878*4882a593Smuzhiyun 		if (!buffer_uptodate(bh)) {
879*4882a593Smuzhiyun 			memset(bh->b_data, 0, inode->i_sb->s_blocksize);
880*4882a593Smuzhiyun 			set_buffer_uptodate(bh);
881*4882a593Smuzhiyun 		}
882*4882a593Smuzhiyun 		unlock_buffer(bh);
883*4882a593Smuzhiyun 		BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
884*4882a593Smuzhiyun 		err = ext4_handle_dirty_metadata(handle, inode, bh);
885*4882a593Smuzhiyun 		if (unlikely(err))
886*4882a593Smuzhiyun 			goto errout;
887*4882a593Smuzhiyun 	} else
888*4882a593Smuzhiyun 		BUFFER_TRACE(bh, "not a new buffer");
889*4882a593Smuzhiyun 	return bh;
890*4882a593Smuzhiyun errout:
891*4882a593Smuzhiyun 	brelse(bh);
892*4882a593Smuzhiyun 	return ERR_PTR(err);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
ext4_bread(handle_t * handle,struct inode * inode,ext4_lblk_t block,int map_flags)895*4882a593Smuzhiyun struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
896*4882a593Smuzhiyun 			       ext4_lblk_t block, int map_flags)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	struct buffer_head *bh;
899*4882a593Smuzhiyun 	int ret;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	bh = ext4_getblk(handle, inode, block, map_flags);
902*4882a593Smuzhiyun 	if (IS_ERR(bh))
903*4882a593Smuzhiyun 		return bh;
904*4882a593Smuzhiyun 	if (!bh || ext4_buffer_uptodate(bh))
905*4882a593Smuzhiyun 		return bh;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	ret = ext4_read_bh_lock(bh, REQ_META | REQ_PRIO, true);
908*4882a593Smuzhiyun 	if (ret) {
909*4882a593Smuzhiyun 		put_bh(bh);
910*4882a593Smuzhiyun 		return ERR_PTR(ret);
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 	return bh;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun /* Read a contiguous batch of blocks. */
ext4_bread_batch(struct inode * inode,ext4_lblk_t block,int bh_count,bool wait,struct buffer_head ** bhs)916*4882a593Smuzhiyun int ext4_bread_batch(struct inode *inode, ext4_lblk_t block, int bh_count,
917*4882a593Smuzhiyun 		     bool wait, struct buffer_head **bhs)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	int i, err;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	for (i = 0; i < bh_count; i++) {
922*4882a593Smuzhiyun 		bhs[i] = ext4_getblk(NULL, inode, block + i, 0 /* map_flags */);
923*4882a593Smuzhiyun 		if (IS_ERR(bhs[i])) {
924*4882a593Smuzhiyun 			err = PTR_ERR(bhs[i]);
925*4882a593Smuzhiyun 			bh_count = i;
926*4882a593Smuzhiyun 			goto out_brelse;
927*4882a593Smuzhiyun 		}
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	for (i = 0; i < bh_count; i++)
931*4882a593Smuzhiyun 		/* Note that NULL bhs[i] is valid because of holes. */
932*4882a593Smuzhiyun 		if (bhs[i] && !ext4_buffer_uptodate(bhs[i]))
933*4882a593Smuzhiyun 			ext4_read_bh_lock(bhs[i], REQ_META | REQ_PRIO, false);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if (!wait)
936*4882a593Smuzhiyun 		return 0;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	for (i = 0; i < bh_count; i++)
939*4882a593Smuzhiyun 		if (bhs[i])
940*4882a593Smuzhiyun 			wait_on_buffer(bhs[i]);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	for (i = 0; i < bh_count; i++) {
943*4882a593Smuzhiyun 		if (bhs[i] && !buffer_uptodate(bhs[i])) {
944*4882a593Smuzhiyun 			err = -EIO;
945*4882a593Smuzhiyun 			goto out_brelse;
946*4882a593Smuzhiyun 		}
947*4882a593Smuzhiyun 	}
948*4882a593Smuzhiyun 	return 0;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun out_brelse:
951*4882a593Smuzhiyun 	for (i = 0; i < bh_count; i++) {
952*4882a593Smuzhiyun 		brelse(bhs[i]);
953*4882a593Smuzhiyun 		bhs[i] = NULL;
954*4882a593Smuzhiyun 	}
955*4882a593Smuzhiyun 	return err;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
ext4_walk_page_buffers(handle_t * handle,struct buffer_head * head,unsigned from,unsigned to,int * partial,int (* fn)(handle_t * handle,struct buffer_head * bh))958*4882a593Smuzhiyun int ext4_walk_page_buffers(handle_t *handle,
959*4882a593Smuzhiyun 			   struct buffer_head *head,
960*4882a593Smuzhiyun 			   unsigned from,
961*4882a593Smuzhiyun 			   unsigned to,
962*4882a593Smuzhiyun 			   int *partial,
963*4882a593Smuzhiyun 			   int (*fn)(handle_t *handle,
964*4882a593Smuzhiyun 				     struct buffer_head *bh))
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	struct buffer_head *bh;
967*4882a593Smuzhiyun 	unsigned block_start, block_end;
968*4882a593Smuzhiyun 	unsigned blocksize = head->b_size;
969*4882a593Smuzhiyun 	int err, ret = 0;
970*4882a593Smuzhiyun 	struct buffer_head *next;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	for (bh = head, block_start = 0;
973*4882a593Smuzhiyun 	     ret == 0 && (bh != head || !block_start);
974*4882a593Smuzhiyun 	     block_start = block_end, bh = next) {
975*4882a593Smuzhiyun 		next = bh->b_this_page;
976*4882a593Smuzhiyun 		block_end = block_start + blocksize;
977*4882a593Smuzhiyun 		if (block_end <= from || block_start >= to) {
978*4882a593Smuzhiyun 			if (partial && !buffer_uptodate(bh))
979*4882a593Smuzhiyun 				*partial = 1;
980*4882a593Smuzhiyun 			continue;
981*4882a593Smuzhiyun 		}
982*4882a593Smuzhiyun 		err = (*fn)(handle, bh);
983*4882a593Smuzhiyun 		if (!ret)
984*4882a593Smuzhiyun 			ret = err;
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	return ret;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun /*
990*4882a593Smuzhiyun  * To preserve ordering, it is essential that the hole instantiation and
991*4882a593Smuzhiyun  * the data write be encapsulated in a single transaction.  We cannot
992*4882a593Smuzhiyun  * close off a transaction and start a new one between the ext4_get_block()
993*4882a593Smuzhiyun  * and the commit_write().  So doing the jbd2_journal_start at the start of
994*4882a593Smuzhiyun  * prepare_write() is the right place.
995*4882a593Smuzhiyun  *
996*4882a593Smuzhiyun  * Also, this function can nest inside ext4_writepage().  In that case, we
997*4882a593Smuzhiyun  * *know* that ext4_writepage() has generated enough buffer credits to do the
998*4882a593Smuzhiyun  * whole page.  So we won't block on the journal in that case, which is good,
999*4882a593Smuzhiyun  * because the caller may be PF_MEMALLOC.
1000*4882a593Smuzhiyun  *
1001*4882a593Smuzhiyun  * By accident, ext4 can be reentered when a transaction is open via
1002*4882a593Smuzhiyun  * quota file writes.  If we were to commit the transaction while thus
1003*4882a593Smuzhiyun  * reentered, there can be a deadlock - we would be holding a quota
1004*4882a593Smuzhiyun  * lock, and the commit would never complete if another thread had a
1005*4882a593Smuzhiyun  * transaction open and was blocking on the quota lock - a ranking
1006*4882a593Smuzhiyun  * violation.
1007*4882a593Smuzhiyun  *
1008*4882a593Smuzhiyun  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
1009*4882a593Smuzhiyun  * will _not_ run commit under these circumstances because handle->h_ref
1010*4882a593Smuzhiyun  * is elevated.  We'll still have enough credits for the tiny quotafile
1011*4882a593Smuzhiyun  * write.
1012*4882a593Smuzhiyun  */
do_journal_get_write_access(handle_t * handle,struct buffer_head * bh)1013*4882a593Smuzhiyun int do_journal_get_write_access(handle_t *handle,
1014*4882a593Smuzhiyun 				struct buffer_head *bh)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun 	int dirty = buffer_dirty(bh);
1017*4882a593Smuzhiyun 	int ret;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	if (!buffer_mapped(bh) || buffer_freed(bh))
1020*4882a593Smuzhiyun 		return 0;
1021*4882a593Smuzhiyun 	/*
1022*4882a593Smuzhiyun 	 * __block_write_begin() could have dirtied some buffers. Clean
1023*4882a593Smuzhiyun 	 * the dirty bit as jbd2_journal_get_write_access() could complain
1024*4882a593Smuzhiyun 	 * otherwise about fs integrity issues. Setting of the dirty bit
1025*4882a593Smuzhiyun 	 * by __block_write_begin() isn't a real problem here as we clear
1026*4882a593Smuzhiyun 	 * the bit before releasing a page lock and thus writeback cannot
1027*4882a593Smuzhiyun 	 * ever write the buffer.
1028*4882a593Smuzhiyun 	 */
1029*4882a593Smuzhiyun 	if (dirty)
1030*4882a593Smuzhiyun 		clear_buffer_dirty(bh);
1031*4882a593Smuzhiyun 	BUFFER_TRACE(bh, "get write access");
1032*4882a593Smuzhiyun 	ret = ext4_journal_get_write_access(handle, bh);
1033*4882a593Smuzhiyun 	if (!ret && dirty)
1034*4882a593Smuzhiyun 		ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1035*4882a593Smuzhiyun 	return ret;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION
ext4_block_write_begin(struct page * page,loff_t pos,unsigned len,get_block_t * get_block)1039*4882a593Smuzhiyun static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
1040*4882a593Smuzhiyun 				  get_block_t *get_block)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun 	unsigned from = pos & (PAGE_SIZE - 1);
1043*4882a593Smuzhiyun 	unsigned to = from + len;
1044*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
1045*4882a593Smuzhiyun 	unsigned block_start, block_end;
1046*4882a593Smuzhiyun 	sector_t block;
1047*4882a593Smuzhiyun 	int err = 0;
1048*4882a593Smuzhiyun 	unsigned blocksize = inode->i_sb->s_blocksize;
1049*4882a593Smuzhiyun 	unsigned bbits;
1050*4882a593Smuzhiyun 	struct buffer_head *bh, *head, *wait[2];
1051*4882a593Smuzhiyun 	int nr_wait = 0;
1052*4882a593Smuzhiyun 	int i;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
1055*4882a593Smuzhiyun 	BUG_ON(from > PAGE_SIZE);
1056*4882a593Smuzhiyun 	BUG_ON(to > PAGE_SIZE);
1057*4882a593Smuzhiyun 	BUG_ON(from > to);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (!page_has_buffers(page))
1060*4882a593Smuzhiyun 		create_empty_buffers(page, blocksize, 0);
1061*4882a593Smuzhiyun 	head = page_buffers(page);
1062*4882a593Smuzhiyun 	bbits = ilog2(blocksize);
1063*4882a593Smuzhiyun 	block = (sector_t)page->index << (PAGE_SHIFT - bbits);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	for (bh = head, block_start = 0; bh != head || !block_start;
1066*4882a593Smuzhiyun 	    block++, block_start = block_end, bh = bh->b_this_page) {
1067*4882a593Smuzhiyun 		block_end = block_start + blocksize;
1068*4882a593Smuzhiyun 		if (block_end <= from || block_start >= to) {
1069*4882a593Smuzhiyun 			if (PageUptodate(page)) {
1070*4882a593Smuzhiyun 				if (!buffer_uptodate(bh))
1071*4882a593Smuzhiyun 					set_buffer_uptodate(bh);
1072*4882a593Smuzhiyun 			}
1073*4882a593Smuzhiyun 			continue;
1074*4882a593Smuzhiyun 		}
1075*4882a593Smuzhiyun 		if (buffer_new(bh))
1076*4882a593Smuzhiyun 			clear_buffer_new(bh);
1077*4882a593Smuzhiyun 		if (!buffer_mapped(bh)) {
1078*4882a593Smuzhiyun 			WARN_ON(bh->b_size != blocksize);
1079*4882a593Smuzhiyun 			err = get_block(inode, block, bh, 1);
1080*4882a593Smuzhiyun 			if (err)
1081*4882a593Smuzhiyun 				break;
1082*4882a593Smuzhiyun 			if (buffer_new(bh)) {
1083*4882a593Smuzhiyun 				if (PageUptodate(page)) {
1084*4882a593Smuzhiyun 					clear_buffer_new(bh);
1085*4882a593Smuzhiyun 					set_buffer_uptodate(bh);
1086*4882a593Smuzhiyun 					mark_buffer_dirty(bh);
1087*4882a593Smuzhiyun 					continue;
1088*4882a593Smuzhiyun 				}
1089*4882a593Smuzhiyun 				if (block_end > to || block_start < from)
1090*4882a593Smuzhiyun 					zero_user_segments(page, to, block_end,
1091*4882a593Smuzhiyun 							   block_start, from);
1092*4882a593Smuzhiyun 				continue;
1093*4882a593Smuzhiyun 			}
1094*4882a593Smuzhiyun 		}
1095*4882a593Smuzhiyun 		if (PageUptodate(page)) {
1096*4882a593Smuzhiyun 			if (!buffer_uptodate(bh))
1097*4882a593Smuzhiyun 				set_buffer_uptodate(bh);
1098*4882a593Smuzhiyun 			continue;
1099*4882a593Smuzhiyun 		}
1100*4882a593Smuzhiyun 		if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
1101*4882a593Smuzhiyun 		    !buffer_unwritten(bh) &&
1102*4882a593Smuzhiyun 		    (block_start < from || block_end > to)) {
1103*4882a593Smuzhiyun 			ext4_read_bh_lock(bh, 0, false);
1104*4882a593Smuzhiyun 			wait[nr_wait++] = bh;
1105*4882a593Smuzhiyun 		}
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 	/*
1108*4882a593Smuzhiyun 	 * If we issued read requests, let them complete.
1109*4882a593Smuzhiyun 	 */
1110*4882a593Smuzhiyun 	for (i = 0; i < nr_wait; i++) {
1111*4882a593Smuzhiyun 		wait_on_buffer(wait[i]);
1112*4882a593Smuzhiyun 		if (!buffer_uptodate(wait[i]))
1113*4882a593Smuzhiyun 			err = -EIO;
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 	if (unlikely(err)) {
1116*4882a593Smuzhiyun 		page_zero_new_buffers(page, from, to);
1117*4882a593Smuzhiyun 	} else if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
1118*4882a593Smuzhiyun 		for (i = 0; i < nr_wait; i++) {
1119*4882a593Smuzhiyun 			int err2;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 			err2 = fscrypt_decrypt_pagecache_blocks(page, blocksize,
1122*4882a593Smuzhiyun 								bh_offset(wait[i]));
1123*4882a593Smuzhiyun 			if (err2) {
1124*4882a593Smuzhiyun 				clear_buffer_uptodate(wait[i]);
1125*4882a593Smuzhiyun 				err = err2;
1126*4882a593Smuzhiyun 			}
1127*4882a593Smuzhiyun 		}
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	return err;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun #endif
1133*4882a593Smuzhiyun 
ext4_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)1134*4882a593Smuzhiyun static int ext4_write_begin(struct file *file, struct address_space *mapping,
1135*4882a593Smuzhiyun 			    loff_t pos, unsigned len, unsigned flags,
1136*4882a593Smuzhiyun 			    struct page **pagep, void **fsdata)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1139*4882a593Smuzhiyun 	int ret, needed_blocks;
1140*4882a593Smuzhiyun 	handle_t *handle;
1141*4882a593Smuzhiyun 	int retries = 0;
1142*4882a593Smuzhiyun 	struct page *page;
1143*4882a593Smuzhiyun 	pgoff_t index;
1144*4882a593Smuzhiyun 	unsigned from, to;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
1147*4882a593Smuzhiyun 		return -EIO;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	if (trace_android_fs_datawrite_start_enabled()) {
1150*4882a593Smuzhiyun 		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 		path = android_fstrace_get_pathname(pathbuf,
1153*4882a593Smuzhiyun 						    MAX_TRACE_PATHBUF_LEN,
1154*4882a593Smuzhiyun 						    inode);
1155*4882a593Smuzhiyun 		trace_android_fs_datawrite_start(inode, pos, len,
1156*4882a593Smuzhiyun 						 current->pid, path,
1157*4882a593Smuzhiyun 						 current->comm);
1158*4882a593Smuzhiyun 	}
1159*4882a593Smuzhiyun 	trace_ext4_write_begin(inode, pos, len, flags);
1160*4882a593Smuzhiyun 	/*
1161*4882a593Smuzhiyun 	 * Reserve one block more for addition to orphan list in case
1162*4882a593Smuzhiyun 	 * we allocate blocks but write fails for some reason
1163*4882a593Smuzhiyun 	 */
1164*4882a593Smuzhiyun 	needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
1165*4882a593Smuzhiyun 	index = pos >> PAGE_SHIFT;
1166*4882a593Smuzhiyun 	from = pos & (PAGE_SIZE - 1);
1167*4882a593Smuzhiyun 	to = from + len;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
1170*4882a593Smuzhiyun 		ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
1171*4882a593Smuzhiyun 						    flags, pagep);
1172*4882a593Smuzhiyun 		if (ret < 0)
1173*4882a593Smuzhiyun 			return ret;
1174*4882a593Smuzhiyun 		if (ret == 1)
1175*4882a593Smuzhiyun 			return 0;
1176*4882a593Smuzhiyun 	}
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 	/*
1179*4882a593Smuzhiyun 	 * grab_cache_page_write_begin() can take a long time if the
1180*4882a593Smuzhiyun 	 * system is thrashing due to memory pressure, or if the page
1181*4882a593Smuzhiyun 	 * is being written back.  So grab it first before we start
1182*4882a593Smuzhiyun 	 * the transaction handle.  This also allows us to allocate
1183*4882a593Smuzhiyun 	 * the page (if needed) without using GFP_NOFS.
1184*4882a593Smuzhiyun 	 */
1185*4882a593Smuzhiyun retry_grab:
1186*4882a593Smuzhiyun 	page = grab_cache_page_write_begin(mapping, index, flags);
1187*4882a593Smuzhiyun 	if (!page)
1188*4882a593Smuzhiyun 		return -ENOMEM;
1189*4882a593Smuzhiyun 	/*
1190*4882a593Smuzhiyun 	 * The same as page allocation, we prealloc buffer heads before
1191*4882a593Smuzhiyun 	 * starting the handle.
1192*4882a593Smuzhiyun 	 */
1193*4882a593Smuzhiyun 	if (!page_has_buffers(page))
1194*4882a593Smuzhiyun 		create_empty_buffers(page, inode->i_sb->s_blocksize, 0);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	unlock_page(page);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun retry_journal:
1199*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
1200*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
1201*4882a593Smuzhiyun 		put_page(page);
1202*4882a593Smuzhiyun 		return PTR_ERR(handle);
1203*4882a593Smuzhiyun 	}
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	lock_page(page);
1206*4882a593Smuzhiyun 	if (page->mapping != mapping) {
1207*4882a593Smuzhiyun 		/* The page got truncated from under us */
1208*4882a593Smuzhiyun 		unlock_page(page);
1209*4882a593Smuzhiyun 		put_page(page);
1210*4882a593Smuzhiyun 		ext4_journal_stop(handle);
1211*4882a593Smuzhiyun 		goto retry_grab;
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun 	/* In case writeback began while the page was unlocked */
1214*4882a593Smuzhiyun 	wait_for_stable_page(page);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION
1217*4882a593Smuzhiyun 	if (ext4_should_dioread_nolock(inode))
1218*4882a593Smuzhiyun 		ret = ext4_block_write_begin(page, pos, len,
1219*4882a593Smuzhiyun 					     ext4_get_block_unwritten);
1220*4882a593Smuzhiyun 	else
1221*4882a593Smuzhiyun 		ret = ext4_block_write_begin(page, pos, len,
1222*4882a593Smuzhiyun 					     ext4_get_block);
1223*4882a593Smuzhiyun #else
1224*4882a593Smuzhiyun 	if (ext4_should_dioread_nolock(inode))
1225*4882a593Smuzhiyun 		ret = __block_write_begin(page, pos, len,
1226*4882a593Smuzhiyun 					  ext4_get_block_unwritten);
1227*4882a593Smuzhiyun 	else
1228*4882a593Smuzhiyun 		ret = __block_write_begin(page, pos, len, ext4_get_block);
1229*4882a593Smuzhiyun #endif
1230*4882a593Smuzhiyun 	if (!ret && ext4_should_journal_data(inode)) {
1231*4882a593Smuzhiyun 		ret = ext4_walk_page_buffers(handle, page_buffers(page),
1232*4882a593Smuzhiyun 					     from, to, NULL,
1233*4882a593Smuzhiyun 					     do_journal_get_write_access);
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	if (ret) {
1237*4882a593Smuzhiyun 		bool extended = (pos + len > inode->i_size) &&
1238*4882a593Smuzhiyun 				!ext4_verity_in_progress(inode);
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 		unlock_page(page);
1241*4882a593Smuzhiyun 		/*
1242*4882a593Smuzhiyun 		 * __block_write_begin may have instantiated a few blocks
1243*4882a593Smuzhiyun 		 * outside i_size.  Trim these off again. Don't need
1244*4882a593Smuzhiyun 		 * i_size_read because we hold i_mutex.
1245*4882a593Smuzhiyun 		 *
1246*4882a593Smuzhiyun 		 * Add inode to orphan list in case we crash before
1247*4882a593Smuzhiyun 		 * truncate finishes
1248*4882a593Smuzhiyun 		 */
1249*4882a593Smuzhiyun 		if (extended && ext4_can_truncate(inode))
1250*4882a593Smuzhiyun 			ext4_orphan_add(handle, inode);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 		ext4_journal_stop(handle);
1253*4882a593Smuzhiyun 		if (extended) {
1254*4882a593Smuzhiyun 			ext4_truncate_failed_write(inode);
1255*4882a593Smuzhiyun 			/*
1256*4882a593Smuzhiyun 			 * If truncate failed early the inode might
1257*4882a593Smuzhiyun 			 * still be on the orphan list; we need to
1258*4882a593Smuzhiyun 			 * make sure the inode is removed from the
1259*4882a593Smuzhiyun 			 * orphan list in that case.
1260*4882a593Smuzhiyun 			 */
1261*4882a593Smuzhiyun 			if (inode->i_nlink)
1262*4882a593Smuzhiyun 				ext4_orphan_del(NULL, inode);
1263*4882a593Smuzhiyun 		}
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 		if (ret == -ENOSPC &&
1266*4882a593Smuzhiyun 		    ext4_should_retry_alloc(inode->i_sb, &retries))
1267*4882a593Smuzhiyun 			goto retry_journal;
1268*4882a593Smuzhiyun 		put_page(page);
1269*4882a593Smuzhiyun 		return ret;
1270*4882a593Smuzhiyun 	}
1271*4882a593Smuzhiyun 	*pagep = page;
1272*4882a593Smuzhiyun 	return ret;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun /* For write_end() in data=journal mode */
write_end_fn(handle_t * handle,struct buffer_head * bh)1276*4882a593Smuzhiyun static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun 	int ret;
1279*4882a593Smuzhiyun 	if (!buffer_mapped(bh) || buffer_freed(bh))
1280*4882a593Smuzhiyun 		return 0;
1281*4882a593Smuzhiyun 	set_buffer_uptodate(bh);
1282*4882a593Smuzhiyun 	ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1283*4882a593Smuzhiyun 	clear_buffer_meta(bh);
1284*4882a593Smuzhiyun 	clear_buffer_prio(bh);
1285*4882a593Smuzhiyun 	return ret;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun /*
1289*4882a593Smuzhiyun  * We need to pick up the new inode size which generic_commit_write gave us
1290*4882a593Smuzhiyun  * `file' can be NULL - eg, when called from page_symlink().
1291*4882a593Smuzhiyun  *
1292*4882a593Smuzhiyun  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
1293*4882a593Smuzhiyun  * buffers are managed internally.
1294*4882a593Smuzhiyun  */
ext4_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1295*4882a593Smuzhiyun static int ext4_write_end(struct file *file,
1296*4882a593Smuzhiyun 			  struct address_space *mapping,
1297*4882a593Smuzhiyun 			  loff_t pos, unsigned len, unsigned copied,
1298*4882a593Smuzhiyun 			  struct page *page, void *fsdata)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	handle_t *handle = ext4_journal_current_handle();
1301*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1302*4882a593Smuzhiyun 	loff_t old_size = inode->i_size;
1303*4882a593Smuzhiyun 	int ret = 0, ret2;
1304*4882a593Smuzhiyun 	int i_size_changed = 0;
1305*4882a593Smuzhiyun 	int inline_data = ext4_has_inline_data(inode);
1306*4882a593Smuzhiyun 	bool verity = ext4_verity_in_progress(inode);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	trace_android_fs_datawrite_end(inode, pos, len);
1309*4882a593Smuzhiyun 	trace_ext4_write_end(inode, pos, len, copied);
1310*4882a593Smuzhiyun 	if (inline_data) {
1311*4882a593Smuzhiyun 		ret = ext4_write_inline_data_end(inode, pos, len,
1312*4882a593Smuzhiyun 						 copied, page);
1313*4882a593Smuzhiyun 		if (ret < 0) {
1314*4882a593Smuzhiyun 			unlock_page(page);
1315*4882a593Smuzhiyun 			put_page(page);
1316*4882a593Smuzhiyun 			goto errout;
1317*4882a593Smuzhiyun 		}
1318*4882a593Smuzhiyun 		copied = ret;
1319*4882a593Smuzhiyun 		ret = 0;
1320*4882a593Smuzhiyun 	} else
1321*4882a593Smuzhiyun 		copied = block_write_end(file, mapping, pos,
1322*4882a593Smuzhiyun 					 len, copied, page, fsdata);
1323*4882a593Smuzhiyun 	/*
1324*4882a593Smuzhiyun 	 * it's important to update i_size while still holding page lock:
1325*4882a593Smuzhiyun 	 * page writeout could otherwise come in and zero beyond i_size.
1326*4882a593Smuzhiyun 	 *
1327*4882a593Smuzhiyun 	 * If FS_IOC_ENABLE_VERITY is running on this inode, then Merkle tree
1328*4882a593Smuzhiyun 	 * blocks are being written past EOF, so skip the i_size update.
1329*4882a593Smuzhiyun 	 */
1330*4882a593Smuzhiyun 	if (!verity)
1331*4882a593Smuzhiyun 		i_size_changed = ext4_update_inode_size(inode, pos + copied);
1332*4882a593Smuzhiyun 	unlock_page(page);
1333*4882a593Smuzhiyun 	put_page(page);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	if (old_size < pos && !verity)
1336*4882a593Smuzhiyun 		pagecache_isize_extended(inode, old_size, pos);
1337*4882a593Smuzhiyun 	/*
1338*4882a593Smuzhiyun 	 * Don't mark the inode dirty under page lock. First, it unnecessarily
1339*4882a593Smuzhiyun 	 * makes the holding time of page lock longer. Second, it forces lock
1340*4882a593Smuzhiyun 	 * ordering of page lock and transaction start for journaling
1341*4882a593Smuzhiyun 	 * filesystems.
1342*4882a593Smuzhiyun 	 */
1343*4882a593Smuzhiyun 	if (i_size_changed || inline_data)
1344*4882a593Smuzhiyun 		ret = ext4_mark_inode_dirty(handle, inode);
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun errout:
1347*4882a593Smuzhiyun 	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1348*4882a593Smuzhiyun 		/* if we have allocated more blocks and copied
1349*4882a593Smuzhiyun 		 * less. We will have blocks allocated outside
1350*4882a593Smuzhiyun 		 * inode->i_size. So truncate them
1351*4882a593Smuzhiyun 		 */
1352*4882a593Smuzhiyun 		ext4_orphan_add(handle, inode);
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	ret2 = ext4_journal_stop(handle);
1355*4882a593Smuzhiyun 	if (!ret)
1356*4882a593Smuzhiyun 		ret = ret2;
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (pos + len > inode->i_size && !verity) {
1359*4882a593Smuzhiyun 		ext4_truncate_failed_write(inode);
1360*4882a593Smuzhiyun 		/*
1361*4882a593Smuzhiyun 		 * If truncate failed early the inode might still be
1362*4882a593Smuzhiyun 		 * on the orphan list; we need to make sure the inode
1363*4882a593Smuzhiyun 		 * is removed from the orphan list in that case.
1364*4882a593Smuzhiyun 		 */
1365*4882a593Smuzhiyun 		if (inode->i_nlink)
1366*4882a593Smuzhiyun 			ext4_orphan_del(NULL, inode);
1367*4882a593Smuzhiyun 	}
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	return ret ? ret : copied;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun /*
1373*4882a593Smuzhiyun  * This is a private version of page_zero_new_buffers() which doesn't
1374*4882a593Smuzhiyun  * set the buffer to be dirty, since in data=journalled mode we need
1375*4882a593Smuzhiyun  * to call ext4_handle_dirty_metadata() instead.
1376*4882a593Smuzhiyun  */
ext4_journalled_zero_new_buffers(handle_t * handle,struct page * page,unsigned from,unsigned to)1377*4882a593Smuzhiyun static void ext4_journalled_zero_new_buffers(handle_t *handle,
1378*4882a593Smuzhiyun 					    struct page *page,
1379*4882a593Smuzhiyun 					    unsigned from, unsigned to)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun 	unsigned int block_start = 0, block_end;
1382*4882a593Smuzhiyun 	struct buffer_head *head, *bh;
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	bh = head = page_buffers(page);
1385*4882a593Smuzhiyun 	do {
1386*4882a593Smuzhiyun 		block_end = block_start + bh->b_size;
1387*4882a593Smuzhiyun 		if (buffer_new(bh)) {
1388*4882a593Smuzhiyun 			if (block_end > from && block_start < to) {
1389*4882a593Smuzhiyun 				if (!PageUptodate(page)) {
1390*4882a593Smuzhiyun 					unsigned start, size;
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun 					start = max(from, block_start);
1393*4882a593Smuzhiyun 					size = min(to, block_end) - start;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 					zero_user(page, start, size);
1396*4882a593Smuzhiyun 					write_end_fn(handle, bh);
1397*4882a593Smuzhiyun 				}
1398*4882a593Smuzhiyun 				clear_buffer_new(bh);
1399*4882a593Smuzhiyun 			}
1400*4882a593Smuzhiyun 		}
1401*4882a593Smuzhiyun 		block_start = block_end;
1402*4882a593Smuzhiyun 		bh = bh->b_this_page;
1403*4882a593Smuzhiyun 	} while (bh != head);
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun 
ext4_journalled_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)1406*4882a593Smuzhiyun static int ext4_journalled_write_end(struct file *file,
1407*4882a593Smuzhiyun 				     struct address_space *mapping,
1408*4882a593Smuzhiyun 				     loff_t pos, unsigned len, unsigned copied,
1409*4882a593Smuzhiyun 				     struct page *page, void *fsdata)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun 	handle_t *handle = ext4_journal_current_handle();
1412*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1413*4882a593Smuzhiyun 	loff_t old_size = inode->i_size;
1414*4882a593Smuzhiyun 	int ret = 0, ret2;
1415*4882a593Smuzhiyun 	int partial = 0;
1416*4882a593Smuzhiyun 	unsigned from, to;
1417*4882a593Smuzhiyun 	int size_changed = 0;
1418*4882a593Smuzhiyun 	int inline_data = ext4_has_inline_data(inode);
1419*4882a593Smuzhiyun 	bool verity = ext4_verity_in_progress(inode);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	trace_android_fs_datawrite_end(inode, pos, len);
1422*4882a593Smuzhiyun 	trace_ext4_journalled_write_end(inode, pos, len, copied);
1423*4882a593Smuzhiyun 	from = pos & (PAGE_SIZE - 1);
1424*4882a593Smuzhiyun 	to = from + len;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	BUG_ON(!ext4_handle_valid(handle));
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	if (inline_data) {
1429*4882a593Smuzhiyun 		ret = ext4_write_inline_data_end(inode, pos, len,
1430*4882a593Smuzhiyun 						 copied, page);
1431*4882a593Smuzhiyun 		if (ret < 0) {
1432*4882a593Smuzhiyun 			unlock_page(page);
1433*4882a593Smuzhiyun 			put_page(page);
1434*4882a593Smuzhiyun 			goto errout;
1435*4882a593Smuzhiyun 		}
1436*4882a593Smuzhiyun 		copied = ret;
1437*4882a593Smuzhiyun 		ret = 0;
1438*4882a593Smuzhiyun 	} else if (unlikely(copied < len) && !PageUptodate(page)) {
1439*4882a593Smuzhiyun 		copied = 0;
1440*4882a593Smuzhiyun 		ext4_journalled_zero_new_buffers(handle, page, from, to);
1441*4882a593Smuzhiyun 	} else {
1442*4882a593Smuzhiyun 		if (unlikely(copied < len))
1443*4882a593Smuzhiyun 			ext4_journalled_zero_new_buffers(handle, page,
1444*4882a593Smuzhiyun 							 from + copied, to);
1445*4882a593Smuzhiyun 		ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1446*4882a593Smuzhiyun 					     from + copied, &partial,
1447*4882a593Smuzhiyun 					     write_end_fn);
1448*4882a593Smuzhiyun 		if (!partial)
1449*4882a593Smuzhiyun 			SetPageUptodate(page);
1450*4882a593Smuzhiyun 	}
1451*4882a593Smuzhiyun 	if (!verity)
1452*4882a593Smuzhiyun 		size_changed = ext4_update_inode_size(inode, pos + copied);
1453*4882a593Smuzhiyun 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1454*4882a593Smuzhiyun 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1455*4882a593Smuzhiyun 	unlock_page(page);
1456*4882a593Smuzhiyun 	put_page(page);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	if (old_size < pos && !verity)
1459*4882a593Smuzhiyun 		pagecache_isize_extended(inode, old_size, pos);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	if (size_changed || inline_data) {
1462*4882a593Smuzhiyun 		ret2 = ext4_mark_inode_dirty(handle, inode);
1463*4882a593Smuzhiyun 		if (!ret)
1464*4882a593Smuzhiyun 			ret = ret2;
1465*4882a593Smuzhiyun 	}
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun errout:
1468*4882a593Smuzhiyun 	if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
1469*4882a593Smuzhiyun 		/* if we have allocated more blocks and copied
1470*4882a593Smuzhiyun 		 * less. We will have blocks allocated outside
1471*4882a593Smuzhiyun 		 * inode->i_size. So truncate them
1472*4882a593Smuzhiyun 		 */
1473*4882a593Smuzhiyun 		ext4_orphan_add(handle, inode);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	ret2 = ext4_journal_stop(handle);
1476*4882a593Smuzhiyun 	if (!ret)
1477*4882a593Smuzhiyun 		ret = ret2;
1478*4882a593Smuzhiyun 	if (pos + len > inode->i_size && !verity) {
1479*4882a593Smuzhiyun 		ext4_truncate_failed_write(inode);
1480*4882a593Smuzhiyun 		/*
1481*4882a593Smuzhiyun 		 * If truncate failed early the inode might still be
1482*4882a593Smuzhiyun 		 * on the orphan list; we need to make sure the inode
1483*4882a593Smuzhiyun 		 * is removed from the orphan list in that case.
1484*4882a593Smuzhiyun 		 */
1485*4882a593Smuzhiyun 		if (inode->i_nlink)
1486*4882a593Smuzhiyun 			ext4_orphan_del(NULL, inode);
1487*4882a593Smuzhiyun 	}
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	return ret ? ret : copied;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun /*
1493*4882a593Smuzhiyun  * Reserve space for a single cluster
1494*4882a593Smuzhiyun  */
ext4_da_reserve_space(struct inode * inode)1495*4882a593Smuzhiyun static int ext4_da_reserve_space(struct inode *inode)
1496*4882a593Smuzhiyun {
1497*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1498*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
1499*4882a593Smuzhiyun 	int ret;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	/*
1502*4882a593Smuzhiyun 	 * We will charge metadata quota at writeout time; this saves
1503*4882a593Smuzhiyun 	 * us from metadata over-estimation, though we may go over by
1504*4882a593Smuzhiyun 	 * a small amount in the end.  Here we just reserve for data.
1505*4882a593Smuzhiyun 	 */
1506*4882a593Smuzhiyun 	ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1507*4882a593Smuzhiyun 	if (ret)
1508*4882a593Smuzhiyun 		return ret;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	spin_lock(&ei->i_block_reservation_lock);
1511*4882a593Smuzhiyun 	if (ext4_claim_free_clusters(sbi, 1, 0)) {
1512*4882a593Smuzhiyun 		spin_unlock(&ei->i_block_reservation_lock);
1513*4882a593Smuzhiyun 		dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1514*4882a593Smuzhiyun 		return -ENOSPC;
1515*4882a593Smuzhiyun 	}
1516*4882a593Smuzhiyun 	ei->i_reserved_data_blocks++;
1517*4882a593Smuzhiyun 	trace_ext4_da_reserve_space(inode);
1518*4882a593Smuzhiyun 	spin_unlock(&ei->i_block_reservation_lock);
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	return 0;       /* success */
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun 
ext4_da_release_space(struct inode * inode,int to_free)1523*4882a593Smuzhiyun void ext4_da_release_space(struct inode *inode, int to_free)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1526*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	if (!to_free)
1529*4882a593Smuzhiyun 		return;		/* Nothing to release, exit */
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	trace_ext4_da_release_space(inode, to_free);
1534*4882a593Smuzhiyun 	if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1535*4882a593Smuzhiyun 		/*
1536*4882a593Smuzhiyun 		 * if there aren't enough reserved blocks, then the
1537*4882a593Smuzhiyun 		 * counter is messed up somewhere.  Since this
1538*4882a593Smuzhiyun 		 * function is called from invalidate page, it's
1539*4882a593Smuzhiyun 		 * harmless to return without any action.
1540*4882a593Smuzhiyun 		 */
1541*4882a593Smuzhiyun 		ext4_warning(inode->i_sb, "ext4_da_release_space: "
1542*4882a593Smuzhiyun 			 "ino %lu, to_free %d with only %d reserved "
1543*4882a593Smuzhiyun 			 "data blocks", inode->i_ino, to_free,
1544*4882a593Smuzhiyun 			 ei->i_reserved_data_blocks);
1545*4882a593Smuzhiyun 		WARN_ON(1);
1546*4882a593Smuzhiyun 		to_free = ei->i_reserved_data_blocks;
1547*4882a593Smuzhiyun 	}
1548*4882a593Smuzhiyun 	ei->i_reserved_data_blocks -= to_free;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	/* update fs dirty data blocks counter */
1551*4882a593Smuzhiyun 	percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun /*
1559*4882a593Smuzhiyun  * Delayed allocation stuff
1560*4882a593Smuzhiyun  */
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun struct mpage_da_data {
1563*4882a593Smuzhiyun 	struct inode *inode;
1564*4882a593Smuzhiyun 	struct writeback_control *wbc;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	pgoff_t first_page;	/* The first page to write */
1567*4882a593Smuzhiyun 	pgoff_t next_page;	/* Current page to examine */
1568*4882a593Smuzhiyun 	pgoff_t last_page;	/* Last page to examine */
1569*4882a593Smuzhiyun 	/*
1570*4882a593Smuzhiyun 	 * Extent to map - this can be after first_page because that can be
1571*4882a593Smuzhiyun 	 * fully mapped. We somewhat abuse m_flags to store whether the extent
1572*4882a593Smuzhiyun 	 * is delalloc or unwritten.
1573*4882a593Smuzhiyun 	 */
1574*4882a593Smuzhiyun 	struct ext4_map_blocks map;
1575*4882a593Smuzhiyun 	struct ext4_io_submit io_submit;	/* IO submission data */
1576*4882a593Smuzhiyun 	unsigned int do_map:1;
1577*4882a593Smuzhiyun 	unsigned int scanned_until_end:1;
1578*4882a593Smuzhiyun };
1579*4882a593Smuzhiyun 
mpage_release_unused_pages(struct mpage_da_data * mpd,bool invalidate)1580*4882a593Smuzhiyun static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1581*4882a593Smuzhiyun 				       bool invalidate)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun 	int nr_pages, i;
1584*4882a593Smuzhiyun 	pgoff_t index, end;
1585*4882a593Smuzhiyun 	struct pagevec pvec;
1586*4882a593Smuzhiyun 	struct inode *inode = mpd->inode;
1587*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	/* This is necessary when next_page == 0. */
1590*4882a593Smuzhiyun 	if (mpd->first_page >= mpd->next_page)
1591*4882a593Smuzhiyun 		return;
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	mpd->scanned_until_end = 0;
1594*4882a593Smuzhiyun 	index = mpd->first_page;
1595*4882a593Smuzhiyun 	end   = mpd->next_page - 1;
1596*4882a593Smuzhiyun 	if (invalidate) {
1597*4882a593Smuzhiyun 		ext4_lblk_t start, last;
1598*4882a593Smuzhiyun 		start = index << (PAGE_SHIFT - inode->i_blkbits);
1599*4882a593Smuzhiyun 		last = end << (PAGE_SHIFT - inode->i_blkbits);
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 		/*
1602*4882a593Smuzhiyun 		 * avoid racing with extent status tree scans made by
1603*4882a593Smuzhiyun 		 * ext4_insert_delayed_block()
1604*4882a593Smuzhiyun 		 */
1605*4882a593Smuzhiyun 		down_write(&EXT4_I(inode)->i_data_sem);
1606*4882a593Smuzhiyun 		ext4_es_remove_extent(inode, start, last - start + 1);
1607*4882a593Smuzhiyun 		up_write(&EXT4_I(inode)->i_data_sem);
1608*4882a593Smuzhiyun 	}
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	pagevec_init(&pvec);
1611*4882a593Smuzhiyun 	while (index <= end) {
1612*4882a593Smuzhiyun 		nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end);
1613*4882a593Smuzhiyun 		if (nr_pages == 0)
1614*4882a593Smuzhiyun 			break;
1615*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++) {
1616*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 			BUG_ON(!PageLocked(page));
1619*4882a593Smuzhiyun 			BUG_ON(PageWriteback(page));
1620*4882a593Smuzhiyun 			if (invalidate) {
1621*4882a593Smuzhiyun 				if (page_mapped(page))
1622*4882a593Smuzhiyun 					clear_page_dirty_for_io(page);
1623*4882a593Smuzhiyun 				block_invalidatepage(page, 0, PAGE_SIZE);
1624*4882a593Smuzhiyun 				ClearPageUptodate(page);
1625*4882a593Smuzhiyun 			}
1626*4882a593Smuzhiyun 			unlock_page(page);
1627*4882a593Smuzhiyun 		}
1628*4882a593Smuzhiyun 		pagevec_release(&pvec);
1629*4882a593Smuzhiyun 	}
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun 
ext4_print_free_blocks(struct inode * inode)1632*4882a593Smuzhiyun static void ext4_print_free_blocks(struct inode *inode)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1635*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
1636*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1639*4882a593Smuzhiyun 	       EXT4_C2B(EXT4_SB(inode->i_sb),
1640*4882a593Smuzhiyun 			ext4_count_free_clusters(sb)));
1641*4882a593Smuzhiyun 	ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1642*4882a593Smuzhiyun 	ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1643*4882a593Smuzhiyun 	       (long long) EXT4_C2B(EXT4_SB(sb),
1644*4882a593Smuzhiyun 		percpu_counter_sum(&sbi->s_freeclusters_counter)));
1645*4882a593Smuzhiyun 	ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1646*4882a593Smuzhiyun 	       (long long) EXT4_C2B(EXT4_SB(sb),
1647*4882a593Smuzhiyun 		percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1648*4882a593Smuzhiyun 	ext4_msg(sb, KERN_CRIT, "Block reservation details");
1649*4882a593Smuzhiyun 	ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1650*4882a593Smuzhiyun 		 ei->i_reserved_data_blocks);
1651*4882a593Smuzhiyun 	return;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
ext4_bh_delay_or_unwritten(handle_t * handle,struct buffer_head * bh)1654*4882a593Smuzhiyun static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun 	return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun /*
1660*4882a593Smuzhiyun  * ext4_insert_delayed_block - adds a delayed block to the extents status
1661*4882a593Smuzhiyun  *                             tree, incrementing the reserved cluster/block
1662*4882a593Smuzhiyun  *                             count or making a pending reservation
1663*4882a593Smuzhiyun  *                             where needed
1664*4882a593Smuzhiyun  *
1665*4882a593Smuzhiyun  * @inode - file containing the newly added block
1666*4882a593Smuzhiyun  * @lblk - logical block to be added
1667*4882a593Smuzhiyun  *
1668*4882a593Smuzhiyun  * Returns 0 on success, negative error code on failure.
1669*4882a593Smuzhiyun  */
ext4_insert_delayed_block(struct inode * inode,ext4_lblk_t lblk)1670*4882a593Smuzhiyun static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1673*4882a593Smuzhiyun 	int ret;
1674*4882a593Smuzhiyun 	bool allocated = false;
1675*4882a593Smuzhiyun 	bool reserved = false;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	/*
1678*4882a593Smuzhiyun 	 * If the cluster containing lblk is shared with a delayed,
1679*4882a593Smuzhiyun 	 * written, or unwritten extent in a bigalloc file system, it's
1680*4882a593Smuzhiyun 	 * already been accounted for and does not need to be reserved.
1681*4882a593Smuzhiyun 	 * A pending reservation must be made for the cluster if it's
1682*4882a593Smuzhiyun 	 * shared with a written or unwritten extent and doesn't already
1683*4882a593Smuzhiyun 	 * have one.  Written and unwritten extents can be purged from the
1684*4882a593Smuzhiyun 	 * extents status tree if the system is under memory pressure, so
1685*4882a593Smuzhiyun 	 * it's necessary to examine the extent tree if a search of the
1686*4882a593Smuzhiyun 	 * extents status tree doesn't get a match.
1687*4882a593Smuzhiyun 	 */
1688*4882a593Smuzhiyun 	if (sbi->s_cluster_ratio == 1) {
1689*4882a593Smuzhiyun 		ret = ext4_da_reserve_space(inode);
1690*4882a593Smuzhiyun 		if (ret != 0)   /* ENOSPC */
1691*4882a593Smuzhiyun 			goto errout;
1692*4882a593Smuzhiyun 		reserved = true;
1693*4882a593Smuzhiyun 	} else {   /* bigalloc */
1694*4882a593Smuzhiyun 		if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
1695*4882a593Smuzhiyun 			if (!ext4_es_scan_clu(inode,
1696*4882a593Smuzhiyun 					      &ext4_es_is_mapped, lblk)) {
1697*4882a593Smuzhiyun 				ret = ext4_clu_mapped(inode,
1698*4882a593Smuzhiyun 						      EXT4_B2C(sbi, lblk));
1699*4882a593Smuzhiyun 				if (ret < 0)
1700*4882a593Smuzhiyun 					goto errout;
1701*4882a593Smuzhiyun 				if (ret == 0) {
1702*4882a593Smuzhiyun 					ret = ext4_da_reserve_space(inode);
1703*4882a593Smuzhiyun 					if (ret != 0)   /* ENOSPC */
1704*4882a593Smuzhiyun 						goto errout;
1705*4882a593Smuzhiyun 					reserved = true;
1706*4882a593Smuzhiyun 				} else {
1707*4882a593Smuzhiyun 					allocated = true;
1708*4882a593Smuzhiyun 				}
1709*4882a593Smuzhiyun 			} else {
1710*4882a593Smuzhiyun 				allocated = true;
1711*4882a593Smuzhiyun 			}
1712*4882a593Smuzhiyun 		}
1713*4882a593Smuzhiyun 	}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
1716*4882a593Smuzhiyun 	if (ret && reserved)
1717*4882a593Smuzhiyun 		ext4_da_release_space(inode, 1);
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun errout:
1720*4882a593Smuzhiyun 	return ret;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun /*
1724*4882a593Smuzhiyun  * This function is grabs code from the very beginning of
1725*4882a593Smuzhiyun  * ext4_map_blocks, but assumes that the caller is from delayed write
1726*4882a593Smuzhiyun  * time. This function looks up the requested blocks and sets the
1727*4882a593Smuzhiyun  * buffer delay bit under the protection of i_data_sem.
1728*4882a593Smuzhiyun  */
ext4_da_map_blocks(struct inode * inode,sector_t iblock,struct ext4_map_blocks * map,struct buffer_head * bh)1729*4882a593Smuzhiyun static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1730*4882a593Smuzhiyun 			      struct ext4_map_blocks *map,
1731*4882a593Smuzhiyun 			      struct buffer_head *bh)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun 	struct extent_status es;
1734*4882a593Smuzhiyun 	int retval;
1735*4882a593Smuzhiyun 	sector_t invalid_block = ~((sector_t) 0xffff);
1736*4882a593Smuzhiyun #ifdef ES_AGGRESSIVE_TEST
1737*4882a593Smuzhiyun 	struct ext4_map_blocks orig_map;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	memcpy(&orig_map, map, sizeof(*map));
1740*4882a593Smuzhiyun #endif
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun 	if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1743*4882a593Smuzhiyun 		invalid_block = ~0;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	map->m_flags = 0;
1746*4882a593Smuzhiyun 	ext_debug(inode, "max_blocks %u, logical block %lu\n", map->m_len,
1747*4882a593Smuzhiyun 		  (unsigned long) map->m_lblk);
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	/* Lookup extent status tree firstly */
1750*4882a593Smuzhiyun 	if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
1751*4882a593Smuzhiyun 		if (ext4_es_is_hole(&es)) {
1752*4882a593Smuzhiyun 			retval = 0;
1753*4882a593Smuzhiyun 			down_read(&EXT4_I(inode)->i_data_sem);
1754*4882a593Smuzhiyun 			goto add_delayed;
1755*4882a593Smuzhiyun 		}
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 		/*
1758*4882a593Smuzhiyun 		 * Delayed extent could be allocated by fallocate.
1759*4882a593Smuzhiyun 		 * So we need to check it.
1760*4882a593Smuzhiyun 		 */
1761*4882a593Smuzhiyun 		if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1762*4882a593Smuzhiyun 			map_bh(bh, inode->i_sb, invalid_block);
1763*4882a593Smuzhiyun 			set_buffer_new(bh);
1764*4882a593Smuzhiyun 			set_buffer_delay(bh);
1765*4882a593Smuzhiyun 			return 0;
1766*4882a593Smuzhiyun 		}
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 		map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1769*4882a593Smuzhiyun 		retval = es.es_len - (iblock - es.es_lblk);
1770*4882a593Smuzhiyun 		if (retval > map->m_len)
1771*4882a593Smuzhiyun 			retval = map->m_len;
1772*4882a593Smuzhiyun 		map->m_len = retval;
1773*4882a593Smuzhiyun 		if (ext4_es_is_written(&es))
1774*4882a593Smuzhiyun 			map->m_flags |= EXT4_MAP_MAPPED;
1775*4882a593Smuzhiyun 		else if (ext4_es_is_unwritten(&es))
1776*4882a593Smuzhiyun 			map->m_flags |= EXT4_MAP_UNWRITTEN;
1777*4882a593Smuzhiyun 		else
1778*4882a593Smuzhiyun 			BUG();
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun #ifdef ES_AGGRESSIVE_TEST
1781*4882a593Smuzhiyun 		ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1782*4882a593Smuzhiyun #endif
1783*4882a593Smuzhiyun 		return retval;
1784*4882a593Smuzhiyun 	}
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	/*
1787*4882a593Smuzhiyun 	 * Try to see if we can get the block without requesting a new
1788*4882a593Smuzhiyun 	 * file system block.
1789*4882a593Smuzhiyun 	 */
1790*4882a593Smuzhiyun 	down_read(&EXT4_I(inode)->i_data_sem);
1791*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode))
1792*4882a593Smuzhiyun 		retval = 0;
1793*4882a593Smuzhiyun 	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1794*4882a593Smuzhiyun 		retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1795*4882a593Smuzhiyun 	else
1796*4882a593Smuzhiyun 		retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun add_delayed:
1799*4882a593Smuzhiyun 	if (retval == 0) {
1800*4882a593Smuzhiyun 		int ret;
1801*4882a593Smuzhiyun 
1802*4882a593Smuzhiyun 		/*
1803*4882a593Smuzhiyun 		 * XXX: __block_prepare_write() unmaps passed block,
1804*4882a593Smuzhiyun 		 * is it OK?
1805*4882a593Smuzhiyun 		 */
1806*4882a593Smuzhiyun 
1807*4882a593Smuzhiyun 		ret = ext4_insert_delayed_block(inode, map->m_lblk);
1808*4882a593Smuzhiyun 		if (ret != 0) {
1809*4882a593Smuzhiyun 			retval = ret;
1810*4882a593Smuzhiyun 			goto out_unlock;
1811*4882a593Smuzhiyun 		}
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 		map_bh(bh, inode->i_sb, invalid_block);
1814*4882a593Smuzhiyun 		set_buffer_new(bh);
1815*4882a593Smuzhiyun 		set_buffer_delay(bh);
1816*4882a593Smuzhiyun 	} else if (retval > 0) {
1817*4882a593Smuzhiyun 		int ret;
1818*4882a593Smuzhiyun 		unsigned int status;
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 		if (unlikely(retval != map->m_len)) {
1821*4882a593Smuzhiyun 			ext4_warning(inode->i_sb,
1822*4882a593Smuzhiyun 				     "ES len assertion failed for inode "
1823*4882a593Smuzhiyun 				     "%lu: retval %d != map->m_len %d",
1824*4882a593Smuzhiyun 				     inode->i_ino, retval, map->m_len);
1825*4882a593Smuzhiyun 			WARN_ON(1);
1826*4882a593Smuzhiyun 		}
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 		status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1829*4882a593Smuzhiyun 				EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1830*4882a593Smuzhiyun 		ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1831*4882a593Smuzhiyun 					    map->m_pblk, status);
1832*4882a593Smuzhiyun 		if (ret != 0)
1833*4882a593Smuzhiyun 			retval = ret;
1834*4882a593Smuzhiyun 	}
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun out_unlock:
1837*4882a593Smuzhiyun 	up_read((&EXT4_I(inode)->i_data_sem));
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	return retval;
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun /*
1843*4882a593Smuzhiyun  * This is a special get_block_t callback which is used by
1844*4882a593Smuzhiyun  * ext4_da_write_begin().  It will either return mapped block or
1845*4882a593Smuzhiyun  * reserve space for a single block.
1846*4882a593Smuzhiyun  *
1847*4882a593Smuzhiyun  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1848*4882a593Smuzhiyun  * We also have b_blocknr = -1 and b_bdev initialized properly
1849*4882a593Smuzhiyun  *
1850*4882a593Smuzhiyun  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1851*4882a593Smuzhiyun  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1852*4882a593Smuzhiyun  * initialized properly.
1853*4882a593Smuzhiyun  */
ext4_da_get_block_prep(struct inode * inode,sector_t iblock,struct buffer_head * bh,int create)1854*4882a593Smuzhiyun int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1855*4882a593Smuzhiyun 			   struct buffer_head *bh, int create)
1856*4882a593Smuzhiyun {
1857*4882a593Smuzhiyun 	struct ext4_map_blocks map;
1858*4882a593Smuzhiyun 	int ret = 0;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	BUG_ON(create == 0);
1861*4882a593Smuzhiyun 	BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	map.m_lblk = iblock;
1864*4882a593Smuzhiyun 	map.m_len = 1;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	/*
1867*4882a593Smuzhiyun 	 * first, we need to know whether the block is allocated already
1868*4882a593Smuzhiyun 	 * preallocated blocks are unmapped but should treated
1869*4882a593Smuzhiyun 	 * the same as allocated blocks.
1870*4882a593Smuzhiyun 	 */
1871*4882a593Smuzhiyun 	ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1872*4882a593Smuzhiyun 	if (ret <= 0)
1873*4882a593Smuzhiyun 		return ret;
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	map_bh(bh, inode->i_sb, map.m_pblk);
1876*4882a593Smuzhiyun 	ext4_update_bh_state(bh, map.m_flags);
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	if (buffer_unwritten(bh)) {
1879*4882a593Smuzhiyun 		/* A delayed write to unwritten bh should be marked
1880*4882a593Smuzhiyun 		 * new and mapped.  Mapped ensures that we don't do
1881*4882a593Smuzhiyun 		 * get_block multiple times when we write to the same
1882*4882a593Smuzhiyun 		 * offset and new ensures that we do proper zero out
1883*4882a593Smuzhiyun 		 * for partial write.
1884*4882a593Smuzhiyun 		 */
1885*4882a593Smuzhiyun 		set_buffer_new(bh);
1886*4882a593Smuzhiyun 		set_buffer_mapped(bh);
1887*4882a593Smuzhiyun 	}
1888*4882a593Smuzhiyun 	return 0;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun 
bget_one(handle_t * handle,struct buffer_head * bh)1891*4882a593Smuzhiyun static int bget_one(handle_t *handle, struct buffer_head *bh)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun 	get_bh(bh);
1894*4882a593Smuzhiyun 	return 0;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun 
bput_one(handle_t * handle,struct buffer_head * bh)1897*4882a593Smuzhiyun static int bput_one(handle_t *handle, struct buffer_head *bh)
1898*4882a593Smuzhiyun {
1899*4882a593Smuzhiyun 	put_bh(bh);
1900*4882a593Smuzhiyun 	return 0;
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun 
__ext4_journalled_writepage(struct page * page,unsigned int len)1903*4882a593Smuzhiyun static int __ext4_journalled_writepage(struct page *page,
1904*4882a593Smuzhiyun 				       unsigned int len)
1905*4882a593Smuzhiyun {
1906*4882a593Smuzhiyun 	struct address_space *mapping = page->mapping;
1907*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
1908*4882a593Smuzhiyun 	struct buffer_head *page_bufs = NULL;
1909*4882a593Smuzhiyun 	handle_t *handle = NULL;
1910*4882a593Smuzhiyun 	int ret = 0, err = 0;
1911*4882a593Smuzhiyun 	int inline_data = ext4_has_inline_data(inode);
1912*4882a593Smuzhiyun 	struct buffer_head *inode_bh = NULL;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	ClearPageChecked(page);
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	if (inline_data) {
1917*4882a593Smuzhiyun 		BUG_ON(page->index != 0);
1918*4882a593Smuzhiyun 		BUG_ON(len > ext4_get_max_inline_size(inode));
1919*4882a593Smuzhiyun 		inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1920*4882a593Smuzhiyun 		if (inode_bh == NULL)
1921*4882a593Smuzhiyun 			goto out;
1922*4882a593Smuzhiyun 	} else {
1923*4882a593Smuzhiyun 		page_bufs = page_buffers(page);
1924*4882a593Smuzhiyun 		if (!page_bufs) {
1925*4882a593Smuzhiyun 			BUG();
1926*4882a593Smuzhiyun 			goto out;
1927*4882a593Smuzhiyun 		}
1928*4882a593Smuzhiyun 		ext4_walk_page_buffers(handle, page_bufs, 0, len,
1929*4882a593Smuzhiyun 				       NULL, bget_one);
1930*4882a593Smuzhiyun 	}
1931*4882a593Smuzhiyun 	/*
1932*4882a593Smuzhiyun 	 * We need to release the page lock before we start the
1933*4882a593Smuzhiyun 	 * journal, so grab a reference so the page won't disappear
1934*4882a593Smuzhiyun 	 * out from under us.
1935*4882a593Smuzhiyun 	 */
1936*4882a593Smuzhiyun 	get_page(page);
1937*4882a593Smuzhiyun 	unlock_page(page);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1940*4882a593Smuzhiyun 				    ext4_writepage_trans_blocks(inode));
1941*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
1942*4882a593Smuzhiyun 		ret = PTR_ERR(handle);
1943*4882a593Smuzhiyun 		put_page(page);
1944*4882a593Smuzhiyun 		goto out_no_pagelock;
1945*4882a593Smuzhiyun 	}
1946*4882a593Smuzhiyun 	BUG_ON(!ext4_handle_valid(handle));
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	lock_page(page);
1949*4882a593Smuzhiyun 	put_page(page);
1950*4882a593Smuzhiyun 	if (page->mapping != mapping) {
1951*4882a593Smuzhiyun 		/* The page got truncated from under us */
1952*4882a593Smuzhiyun 		ext4_journal_stop(handle);
1953*4882a593Smuzhiyun 		ret = 0;
1954*4882a593Smuzhiyun 		goto out;
1955*4882a593Smuzhiyun 	}
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	if (inline_data) {
1958*4882a593Smuzhiyun 		ret = ext4_mark_inode_dirty(handle, inode);
1959*4882a593Smuzhiyun 	} else {
1960*4882a593Smuzhiyun 		ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1961*4882a593Smuzhiyun 					     do_journal_get_write_access);
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 		err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1964*4882a593Smuzhiyun 					     write_end_fn);
1965*4882a593Smuzhiyun 	}
1966*4882a593Smuzhiyun 	if (ret == 0)
1967*4882a593Smuzhiyun 		ret = err;
1968*4882a593Smuzhiyun 	err = ext4_jbd2_inode_add_write(handle, inode, page_offset(page), len);
1969*4882a593Smuzhiyun 	if (ret == 0)
1970*4882a593Smuzhiyun 		ret = err;
1971*4882a593Smuzhiyun 	EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1972*4882a593Smuzhiyun 	err = ext4_journal_stop(handle);
1973*4882a593Smuzhiyun 	if (!ret)
1974*4882a593Smuzhiyun 		ret = err;
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1977*4882a593Smuzhiyun out:
1978*4882a593Smuzhiyun 	unlock_page(page);
1979*4882a593Smuzhiyun out_no_pagelock:
1980*4882a593Smuzhiyun 	if (!inline_data && page_bufs)
1981*4882a593Smuzhiyun 		ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1982*4882a593Smuzhiyun 				       NULL, bput_one);
1983*4882a593Smuzhiyun 	brelse(inode_bh);
1984*4882a593Smuzhiyun 	return ret;
1985*4882a593Smuzhiyun }
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun /*
1988*4882a593Smuzhiyun  * Note that we don't need to start a transaction unless we're journaling data
1989*4882a593Smuzhiyun  * because we should have holes filled from ext4_page_mkwrite(). We even don't
1990*4882a593Smuzhiyun  * need to file the inode to the transaction's list in ordered mode because if
1991*4882a593Smuzhiyun  * we are writing back data added by write(), the inode is already there and if
1992*4882a593Smuzhiyun  * we are writing back data modified via mmap(), no one guarantees in which
1993*4882a593Smuzhiyun  * transaction the data will hit the disk. In case we are journaling data, we
1994*4882a593Smuzhiyun  * cannot start transaction directly because transaction start ranks above page
1995*4882a593Smuzhiyun  * lock so we have to do some magic.
1996*4882a593Smuzhiyun  *
1997*4882a593Smuzhiyun  * This function can get called via...
1998*4882a593Smuzhiyun  *   - ext4_writepages after taking page lock (have journal handle)
1999*4882a593Smuzhiyun  *   - journal_submit_inode_data_buffers (no journal handle)
2000*4882a593Smuzhiyun  *   - shrink_page_list via the kswapd/direct reclaim (no journal handle)
2001*4882a593Smuzhiyun  *   - grab_page_cache when doing write_begin (have journal handle)
2002*4882a593Smuzhiyun  *
2003*4882a593Smuzhiyun  * We don't do any block allocation in this function. If we have page with
2004*4882a593Smuzhiyun  * multiple blocks we need to write those buffer_heads that are mapped. This
2005*4882a593Smuzhiyun  * is important for mmaped based write. So if we do with blocksize 1K
2006*4882a593Smuzhiyun  * truncate(f, 1024);
2007*4882a593Smuzhiyun  * a = mmap(f, 0, 4096);
2008*4882a593Smuzhiyun  * a[0] = 'a';
2009*4882a593Smuzhiyun  * truncate(f, 4096);
2010*4882a593Smuzhiyun  * we have in the page first buffer_head mapped via page_mkwrite call back
2011*4882a593Smuzhiyun  * but other buffer_heads would be unmapped but dirty (dirty done via the
2012*4882a593Smuzhiyun  * do_wp_page). So writepage should write the first block. If we modify
2013*4882a593Smuzhiyun  * the mmap area beyond 1024 we will again get a page_fault and the
2014*4882a593Smuzhiyun  * page_mkwrite callback will do the block allocation and mark the
2015*4882a593Smuzhiyun  * buffer_heads mapped.
2016*4882a593Smuzhiyun  *
2017*4882a593Smuzhiyun  * We redirty the page if we have any buffer_heads that is either delay or
2018*4882a593Smuzhiyun  * unwritten in the page.
2019*4882a593Smuzhiyun  *
2020*4882a593Smuzhiyun  * We can get recursively called as show below.
2021*4882a593Smuzhiyun  *
2022*4882a593Smuzhiyun  *	ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
2023*4882a593Smuzhiyun  *		ext4_writepage()
2024*4882a593Smuzhiyun  *
2025*4882a593Smuzhiyun  * But since we don't do any block allocation we should not deadlock.
2026*4882a593Smuzhiyun  * Page also have the dirty flag cleared so we don't get recurive page_lock.
2027*4882a593Smuzhiyun  */
ext4_writepage(struct page * page,struct writeback_control * wbc)2028*4882a593Smuzhiyun static int ext4_writepage(struct page *page,
2029*4882a593Smuzhiyun 			  struct writeback_control *wbc)
2030*4882a593Smuzhiyun {
2031*4882a593Smuzhiyun 	int ret = 0;
2032*4882a593Smuzhiyun 	loff_t size;
2033*4882a593Smuzhiyun 	unsigned int len;
2034*4882a593Smuzhiyun 	struct buffer_head *page_bufs = NULL;
2035*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
2036*4882a593Smuzhiyun 	struct ext4_io_submit io_submit;
2037*4882a593Smuzhiyun 	bool keep_towrite = false;
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
2040*4882a593Smuzhiyun 		inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
2041*4882a593Smuzhiyun 		unlock_page(page);
2042*4882a593Smuzhiyun 		return -EIO;
2043*4882a593Smuzhiyun 	}
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun 	trace_ext4_writepage(page);
2046*4882a593Smuzhiyun 	size = i_size_read(inode);
2047*4882a593Smuzhiyun 	if (page->index == size >> PAGE_SHIFT &&
2048*4882a593Smuzhiyun 	    !ext4_verity_in_progress(inode))
2049*4882a593Smuzhiyun 		len = size & ~PAGE_MASK;
2050*4882a593Smuzhiyun 	else
2051*4882a593Smuzhiyun 		len = PAGE_SIZE;
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	/* Should never happen but for bugs in other kernel subsystems */
2054*4882a593Smuzhiyun 	if (!page_has_buffers(page)) {
2055*4882a593Smuzhiyun 		ext4_warning_inode(inode,
2056*4882a593Smuzhiyun 		   "page %lu does not have buffers attached", page->index);
2057*4882a593Smuzhiyun 		ClearPageDirty(page);
2058*4882a593Smuzhiyun 		unlock_page(page);
2059*4882a593Smuzhiyun 		return 0;
2060*4882a593Smuzhiyun 	}
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	page_bufs = page_buffers(page);
2063*4882a593Smuzhiyun 	/*
2064*4882a593Smuzhiyun 	 * We cannot do block allocation or other extent handling in this
2065*4882a593Smuzhiyun 	 * function. If there are buffers needing that, we have to redirty
2066*4882a593Smuzhiyun 	 * the page. But we may reach here when we do a journal commit via
2067*4882a593Smuzhiyun 	 * journal_submit_inode_data_buffers() and in that case we must write
2068*4882a593Smuzhiyun 	 * allocated buffers to achieve data=ordered mode guarantees.
2069*4882a593Smuzhiyun 	 *
2070*4882a593Smuzhiyun 	 * Also, if there is only one buffer per page (the fs block
2071*4882a593Smuzhiyun 	 * size == the page size), if one buffer needs block
2072*4882a593Smuzhiyun 	 * allocation or needs to modify the extent tree to clear the
2073*4882a593Smuzhiyun 	 * unwritten flag, we know that the page can't be written at
2074*4882a593Smuzhiyun 	 * all, so we might as well refuse the write immediately.
2075*4882a593Smuzhiyun 	 * Unfortunately if the block size != page size, we can't as
2076*4882a593Smuzhiyun 	 * easily detect this case using ext4_walk_page_buffers(), but
2077*4882a593Smuzhiyun 	 * for the extremely common case, this is an optimization that
2078*4882a593Smuzhiyun 	 * skips a useless round trip through ext4_bio_write_page().
2079*4882a593Smuzhiyun 	 */
2080*4882a593Smuzhiyun 	if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2081*4882a593Smuzhiyun 				   ext4_bh_delay_or_unwritten)) {
2082*4882a593Smuzhiyun 		redirty_page_for_writepage(wbc, page);
2083*4882a593Smuzhiyun 		if ((current->flags & PF_MEMALLOC) ||
2084*4882a593Smuzhiyun 		    (inode->i_sb->s_blocksize == PAGE_SIZE)) {
2085*4882a593Smuzhiyun 			/*
2086*4882a593Smuzhiyun 			 * For memory cleaning there's no point in writing only
2087*4882a593Smuzhiyun 			 * some buffers. So just bail out. Warn if we came here
2088*4882a593Smuzhiyun 			 * from direct reclaim.
2089*4882a593Smuzhiyun 			 */
2090*4882a593Smuzhiyun 			WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
2091*4882a593Smuzhiyun 							== PF_MEMALLOC);
2092*4882a593Smuzhiyun 			unlock_page(page);
2093*4882a593Smuzhiyun 			return 0;
2094*4882a593Smuzhiyun 		}
2095*4882a593Smuzhiyun 		keep_towrite = true;
2096*4882a593Smuzhiyun 	}
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	if (PageChecked(page) && ext4_should_journal_data(inode))
2099*4882a593Smuzhiyun 		/*
2100*4882a593Smuzhiyun 		 * It's mmapped pagecache.  Add buffers and journal it.  There
2101*4882a593Smuzhiyun 		 * doesn't seem much point in redirtying the page here.
2102*4882a593Smuzhiyun 		 */
2103*4882a593Smuzhiyun 		return __ext4_journalled_writepage(page, len);
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	ext4_io_submit_init(&io_submit, wbc);
2106*4882a593Smuzhiyun 	io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2107*4882a593Smuzhiyun 	if (!io_submit.io_end) {
2108*4882a593Smuzhiyun 		redirty_page_for_writepage(wbc, page);
2109*4882a593Smuzhiyun 		unlock_page(page);
2110*4882a593Smuzhiyun 		return -ENOMEM;
2111*4882a593Smuzhiyun 	}
2112*4882a593Smuzhiyun 	ret = ext4_bio_write_page(&io_submit, page, len, wbc, keep_towrite);
2113*4882a593Smuzhiyun 	ext4_io_submit(&io_submit);
2114*4882a593Smuzhiyun 	/* Drop io_end reference we got from init */
2115*4882a593Smuzhiyun 	ext4_put_io_end_defer(io_submit.io_end);
2116*4882a593Smuzhiyun 	return ret;
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun 
mpage_submit_page(struct mpage_da_data * mpd,struct page * page)2119*4882a593Smuzhiyun static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
2120*4882a593Smuzhiyun {
2121*4882a593Smuzhiyun 	int len;
2122*4882a593Smuzhiyun 	loff_t size;
2123*4882a593Smuzhiyun 	int err;
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 	BUG_ON(page->index != mpd->first_page);
2126*4882a593Smuzhiyun 	clear_page_dirty_for_io(page);
2127*4882a593Smuzhiyun 	/*
2128*4882a593Smuzhiyun 	 * We have to be very careful here!  Nothing protects writeback path
2129*4882a593Smuzhiyun 	 * against i_size changes and the page can be writeably mapped into
2130*4882a593Smuzhiyun 	 * page tables. So an application can be growing i_size and writing
2131*4882a593Smuzhiyun 	 * data through mmap while writeback runs. clear_page_dirty_for_io()
2132*4882a593Smuzhiyun 	 * write-protects our page in page tables and the page cannot get
2133*4882a593Smuzhiyun 	 * written to again until we release page lock. So only after
2134*4882a593Smuzhiyun 	 * clear_page_dirty_for_io() we are safe to sample i_size for
2135*4882a593Smuzhiyun 	 * ext4_bio_write_page() to zero-out tail of the written page. We rely
2136*4882a593Smuzhiyun 	 * on the barrier provided by TestClearPageDirty in
2137*4882a593Smuzhiyun 	 * clear_page_dirty_for_io() to make sure i_size is really sampled only
2138*4882a593Smuzhiyun 	 * after page tables are updated.
2139*4882a593Smuzhiyun 	 */
2140*4882a593Smuzhiyun 	size = i_size_read(mpd->inode);
2141*4882a593Smuzhiyun 	if (page->index == size >> PAGE_SHIFT &&
2142*4882a593Smuzhiyun 	    !ext4_verity_in_progress(mpd->inode))
2143*4882a593Smuzhiyun 		len = size & ~PAGE_MASK;
2144*4882a593Smuzhiyun 	else
2145*4882a593Smuzhiyun 		len = PAGE_SIZE;
2146*4882a593Smuzhiyun 	err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false);
2147*4882a593Smuzhiyun 	if (!err)
2148*4882a593Smuzhiyun 		mpd->wbc->nr_to_write--;
2149*4882a593Smuzhiyun 	mpd->first_page++;
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	return err;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun #define BH_FLAGS (BIT(BH_Unwritten) | BIT(BH_Delay))
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun /*
2157*4882a593Smuzhiyun  * mballoc gives us at most this number of blocks...
2158*4882a593Smuzhiyun  * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
2159*4882a593Smuzhiyun  * The rest of mballoc seems to handle chunks up to full group size.
2160*4882a593Smuzhiyun  */
2161*4882a593Smuzhiyun #define MAX_WRITEPAGES_EXTENT_LEN 2048
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun /*
2164*4882a593Smuzhiyun  * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
2165*4882a593Smuzhiyun  *
2166*4882a593Smuzhiyun  * @mpd - extent of blocks
2167*4882a593Smuzhiyun  * @lblk - logical number of the block in the file
2168*4882a593Smuzhiyun  * @bh - buffer head we want to add to the extent
2169*4882a593Smuzhiyun  *
2170*4882a593Smuzhiyun  * The function is used to collect contig. blocks in the same state. If the
2171*4882a593Smuzhiyun  * buffer doesn't require mapping for writeback and we haven't started the
2172*4882a593Smuzhiyun  * extent of buffers to map yet, the function returns 'true' immediately - the
2173*4882a593Smuzhiyun  * caller can write the buffer right away. Otherwise the function returns true
2174*4882a593Smuzhiyun  * if the block has been added to the extent, false if the block couldn't be
2175*4882a593Smuzhiyun  * added.
2176*4882a593Smuzhiyun  */
mpage_add_bh_to_extent(struct mpage_da_data * mpd,ext4_lblk_t lblk,struct buffer_head * bh)2177*4882a593Smuzhiyun static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
2178*4882a593Smuzhiyun 				   struct buffer_head *bh)
2179*4882a593Smuzhiyun {
2180*4882a593Smuzhiyun 	struct ext4_map_blocks *map = &mpd->map;
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	/* Buffer that doesn't need mapping for writeback? */
2183*4882a593Smuzhiyun 	if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
2184*4882a593Smuzhiyun 	    (!buffer_delay(bh) && !buffer_unwritten(bh))) {
2185*4882a593Smuzhiyun 		/* So far no extent to map => we write the buffer right away */
2186*4882a593Smuzhiyun 		if (map->m_len == 0)
2187*4882a593Smuzhiyun 			return true;
2188*4882a593Smuzhiyun 		return false;
2189*4882a593Smuzhiyun 	}
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	/* First block in the extent? */
2192*4882a593Smuzhiyun 	if (map->m_len == 0) {
2193*4882a593Smuzhiyun 		/* We cannot map unless handle is started... */
2194*4882a593Smuzhiyun 		if (!mpd->do_map)
2195*4882a593Smuzhiyun 			return false;
2196*4882a593Smuzhiyun 		map->m_lblk = lblk;
2197*4882a593Smuzhiyun 		map->m_len = 1;
2198*4882a593Smuzhiyun 		map->m_flags = bh->b_state & BH_FLAGS;
2199*4882a593Smuzhiyun 		return true;
2200*4882a593Smuzhiyun 	}
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	/* Don't go larger than mballoc is willing to allocate */
2203*4882a593Smuzhiyun 	if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
2204*4882a593Smuzhiyun 		return false;
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	/* Can we merge the block to our big extent? */
2207*4882a593Smuzhiyun 	if (lblk == map->m_lblk + map->m_len &&
2208*4882a593Smuzhiyun 	    (bh->b_state & BH_FLAGS) == map->m_flags) {
2209*4882a593Smuzhiyun 		map->m_len++;
2210*4882a593Smuzhiyun 		return true;
2211*4882a593Smuzhiyun 	}
2212*4882a593Smuzhiyun 	return false;
2213*4882a593Smuzhiyun }
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun /*
2216*4882a593Smuzhiyun  * mpage_process_page_bufs - submit page buffers for IO or add them to extent
2217*4882a593Smuzhiyun  *
2218*4882a593Smuzhiyun  * @mpd - extent of blocks for mapping
2219*4882a593Smuzhiyun  * @head - the first buffer in the page
2220*4882a593Smuzhiyun  * @bh - buffer we should start processing from
2221*4882a593Smuzhiyun  * @lblk - logical number of the block in the file corresponding to @bh
2222*4882a593Smuzhiyun  *
2223*4882a593Smuzhiyun  * Walk through page buffers from @bh upto @head (exclusive) and either submit
2224*4882a593Smuzhiyun  * the page for IO if all buffers in this page were mapped and there's no
2225*4882a593Smuzhiyun  * accumulated extent of buffers to map or add buffers in the page to the
2226*4882a593Smuzhiyun  * extent of buffers to map. The function returns 1 if the caller can continue
2227*4882a593Smuzhiyun  * by processing the next page, 0 if it should stop adding buffers to the
2228*4882a593Smuzhiyun  * extent to map because we cannot extend it anymore. It can also return value
2229*4882a593Smuzhiyun  * < 0 in case of error during IO submission.
2230*4882a593Smuzhiyun  */
mpage_process_page_bufs(struct mpage_da_data * mpd,struct buffer_head * head,struct buffer_head * bh,ext4_lblk_t lblk)2231*4882a593Smuzhiyun static int mpage_process_page_bufs(struct mpage_da_data *mpd,
2232*4882a593Smuzhiyun 				   struct buffer_head *head,
2233*4882a593Smuzhiyun 				   struct buffer_head *bh,
2234*4882a593Smuzhiyun 				   ext4_lblk_t lblk)
2235*4882a593Smuzhiyun {
2236*4882a593Smuzhiyun 	struct inode *inode = mpd->inode;
2237*4882a593Smuzhiyun 	int err;
2238*4882a593Smuzhiyun 	ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
2239*4882a593Smuzhiyun 							>> inode->i_blkbits;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	if (ext4_verity_in_progress(inode))
2242*4882a593Smuzhiyun 		blocks = EXT_MAX_BLOCKS;
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	do {
2245*4882a593Smuzhiyun 		BUG_ON(buffer_locked(bh));
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 		if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
2248*4882a593Smuzhiyun 			/* Found extent to map? */
2249*4882a593Smuzhiyun 			if (mpd->map.m_len)
2250*4882a593Smuzhiyun 				return 0;
2251*4882a593Smuzhiyun 			/* Buffer needs mapping and handle is not started? */
2252*4882a593Smuzhiyun 			if (!mpd->do_map)
2253*4882a593Smuzhiyun 				return 0;
2254*4882a593Smuzhiyun 			/* Everything mapped so far and we hit EOF */
2255*4882a593Smuzhiyun 			break;
2256*4882a593Smuzhiyun 		}
2257*4882a593Smuzhiyun 	} while (lblk++, (bh = bh->b_this_page) != head);
2258*4882a593Smuzhiyun 	/* So far everything mapped? Submit the page for IO. */
2259*4882a593Smuzhiyun 	if (mpd->map.m_len == 0) {
2260*4882a593Smuzhiyun 		err = mpage_submit_page(mpd, head->b_page);
2261*4882a593Smuzhiyun 		if (err < 0)
2262*4882a593Smuzhiyun 			return err;
2263*4882a593Smuzhiyun 	}
2264*4882a593Smuzhiyun 	if (lblk >= blocks) {
2265*4882a593Smuzhiyun 		mpd->scanned_until_end = 1;
2266*4882a593Smuzhiyun 		return 0;
2267*4882a593Smuzhiyun 	}
2268*4882a593Smuzhiyun 	return 1;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun /*
2272*4882a593Smuzhiyun  * mpage_process_page - update page buffers corresponding to changed extent and
2273*4882a593Smuzhiyun  *		       may submit fully mapped page for IO
2274*4882a593Smuzhiyun  *
2275*4882a593Smuzhiyun  * @mpd		- description of extent to map, on return next extent to map
2276*4882a593Smuzhiyun  * @m_lblk	- logical block mapping.
2277*4882a593Smuzhiyun  * @m_pblk	- corresponding physical mapping.
2278*4882a593Smuzhiyun  * @map_bh	- determines on return whether this page requires any further
2279*4882a593Smuzhiyun  *		  mapping or not.
2280*4882a593Smuzhiyun  * Scan given page buffers corresponding to changed extent and update buffer
2281*4882a593Smuzhiyun  * state according to new extent state.
2282*4882a593Smuzhiyun  * We map delalloc buffers to their physical location, clear unwritten bits.
2283*4882a593Smuzhiyun  * If the given page is not fully mapped, we update @map to the next extent in
2284*4882a593Smuzhiyun  * the given page that needs mapping & return @map_bh as true.
2285*4882a593Smuzhiyun  */
mpage_process_page(struct mpage_da_data * mpd,struct page * page,ext4_lblk_t * m_lblk,ext4_fsblk_t * m_pblk,bool * map_bh)2286*4882a593Smuzhiyun static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
2287*4882a593Smuzhiyun 			      ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
2288*4882a593Smuzhiyun 			      bool *map_bh)
2289*4882a593Smuzhiyun {
2290*4882a593Smuzhiyun 	struct buffer_head *head, *bh;
2291*4882a593Smuzhiyun 	ext4_io_end_t *io_end = mpd->io_submit.io_end;
2292*4882a593Smuzhiyun 	ext4_lblk_t lblk = *m_lblk;
2293*4882a593Smuzhiyun 	ext4_fsblk_t pblock = *m_pblk;
2294*4882a593Smuzhiyun 	int err = 0;
2295*4882a593Smuzhiyun 	int blkbits = mpd->inode->i_blkbits;
2296*4882a593Smuzhiyun 	ssize_t io_end_size = 0;
2297*4882a593Smuzhiyun 	struct ext4_io_end_vec *io_end_vec = ext4_last_io_end_vec(io_end);
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	bh = head = page_buffers(page);
2300*4882a593Smuzhiyun 	do {
2301*4882a593Smuzhiyun 		if (lblk < mpd->map.m_lblk)
2302*4882a593Smuzhiyun 			continue;
2303*4882a593Smuzhiyun 		if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2304*4882a593Smuzhiyun 			/*
2305*4882a593Smuzhiyun 			 * Buffer after end of mapped extent.
2306*4882a593Smuzhiyun 			 * Find next buffer in the page to map.
2307*4882a593Smuzhiyun 			 */
2308*4882a593Smuzhiyun 			mpd->map.m_len = 0;
2309*4882a593Smuzhiyun 			mpd->map.m_flags = 0;
2310*4882a593Smuzhiyun 			io_end_vec->size += io_end_size;
2311*4882a593Smuzhiyun 			io_end_size = 0;
2312*4882a593Smuzhiyun 
2313*4882a593Smuzhiyun 			err = mpage_process_page_bufs(mpd, head, bh, lblk);
2314*4882a593Smuzhiyun 			if (err > 0)
2315*4882a593Smuzhiyun 				err = 0;
2316*4882a593Smuzhiyun 			if (!err && mpd->map.m_len && mpd->map.m_lblk > lblk) {
2317*4882a593Smuzhiyun 				io_end_vec = ext4_alloc_io_end_vec(io_end);
2318*4882a593Smuzhiyun 				if (IS_ERR(io_end_vec)) {
2319*4882a593Smuzhiyun 					err = PTR_ERR(io_end_vec);
2320*4882a593Smuzhiyun 					goto out;
2321*4882a593Smuzhiyun 				}
2322*4882a593Smuzhiyun 				io_end_vec->offset = (loff_t)mpd->map.m_lblk << blkbits;
2323*4882a593Smuzhiyun 			}
2324*4882a593Smuzhiyun 			*map_bh = true;
2325*4882a593Smuzhiyun 			goto out;
2326*4882a593Smuzhiyun 		}
2327*4882a593Smuzhiyun 		if (buffer_delay(bh)) {
2328*4882a593Smuzhiyun 			clear_buffer_delay(bh);
2329*4882a593Smuzhiyun 			bh->b_blocknr = pblock++;
2330*4882a593Smuzhiyun 		}
2331*4882a593Smuzhiyun 		clear_buffer_unwritten(bh);
2332*4882a593Smuzhiyun 		io_end_size += (1 << blkbits);
2333*4882a593Smuzhiyun 	} while (lblk++, (bh = bh->b_this_page) != head);
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 	io_end_vec->size += io_end_size;
2336*4882a593Smuzhiyun 	io_end_size = 0;
2337*4882a593Smuzhiyun 	*map_bh = false;
2338*4882a593Smuzhiyun out:
2339*4882a593Smuzhiyun 	*m_lblk = lblk;
2340*4882a593Smuzhiyun 	*m_pblk = pblock;
2341*4882a593Smuzhiyun 	return err;
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun /*
2345*4882a593Smuzhiyun  * mpage_map_buffers - update buffers corresponding to changed extent and
2346*4882a593Smuzhiyun  *		       submit fully mapped pages for IO
2347*4882a593Smuzhiyun  *
2348*4882a593Smuzhiyun  * @mpd - description of extent to map, on return next extent to map
2349*4882a593Smuzhiyun  *
2350*4882a593Smuzhiyun  * Scan buffers corresponding to changed extent (we expect corresponding pages
2351*4882a593Smuzhiyun  * to be already locked) and update buffer state according to new extent state.
2352*4882a593Smuzhiyun  * We map delalloc buffers to their physical location, clear unwritten bits,
2353*4882a593Smuzhiyun  * and mark buffers as uninit when we perform writes to unwritten extents
2354*4882a593Smuzhiyun  * and do extent conversion after IO is finished. If the last page is not fully
2355*4882a593Smuzhiyun  * mapped, we update @map to the next extent in the last page that needs
2356*4882a593Smuzhiyun  * mapping. Otherwise we submit the page for IO.
2357*4882a593Smuzhiyun  */
mpage_map_and_submit_buffers(struct mpage_da_data * mpd)2358*4882a593Smuzhiyun static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2359*4882a593Smuzhiyun {
2360*4882a593Smuzhiyun 	struct pagevec pvec;
2361*4882a593Smuzhiyun 	int nr_pages, i;
2362*4882a593Smuzhiyun 	struct inode *inode = mpd->inode;
2363*4882a593Smuzhiyun 	int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
2364*4882a593Smuzhiyun 	pgoff_t start, end;
2365*4882a593Smuzhiyun 	ext4_lblk_t lblk;
2366*4882a593Smuzhiyun 	ext4_fsblk_t pblock;
2367*4882a593Smuzhiyun 	int err;
2368*4882a593Smuzhiyun 	bool map_bh = false;
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 	start = mpd->map.m_lblk >> bpp_bits;
2371*4882a593Smuzhiyun 	end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2372*4882a593Smuzhiyun 	lblk = start << bpp_bits;
2373*4882a593Smuzhiyun 	pblock = mpd->map.m_pblk;
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 	pagevec_init(&pvec);
2376*4882a593Smuzhiyun 	while (start <= end) {
2377*4882a593Smuzhiyun 		nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping,
2378*4882a593Smuzhiyun 						&start, end);
2379*4882a593Smuzhiyun 		if (nr_pages == 0)
2380*4882a593Smuzhiyun 			break;
2381*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++) {
2382*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 			err = mpage_process_page(mpd, page, &lblk, &pblock,
2385*4882a593Smuzhiyun 						 &map_bh);
2386*4882a593Smuzhiyun 			/*
2387*4882a593Smuzhiyun 			 * If map_bh is true, means page may require further bh
2388*4882a593Smuzhiyun 			 * mapping, or maybe the page was submitted for IO.
2389*4882a593Smuzhiyun 			 * So we return to call further extent mapping.
2390*4882a593Smuzhiyun 			 */
2391*4882a593Smuzhiyun 			if (err < 0 || map_bh)
2392*4882a593Smuzhiyun 				goto out;
2393*4882a593Smuzhiyun 			/* Page fully mapped - let IO run! */
2394*4882a593Smuzhiyun 			err = mpage_submit_page(mpd, page);
2395*4882a593Smuzhiyun 			if (err < 0)
2396*4882a593Smuzhiyun 				goto out;
2397*4882a593Smuzhiyun 		}
2398*4882a593Smuzhiyun 		pagevec_release(&pvec);
2399*4882a593Smuzhiyun 	}
2400*4882a593Smuzhiyun 	/* Extent fully mapped and matches with page boundary. We are done. */
2401*4882a593Smuzhiyun 	mpd->map.m_len = 0;
2402*4882a593Smuzhiyun 	mpd->map.m_flags = 0;
2403*4882a593Smuzhiyun 	return 0;
2404*4882a593Smuzhiyun out:
2405*4882a593Smuzhiyun 	pagevec_release(&pvec);
2406*4882a593Smuzhiyun 	return err;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun 
mpage_map_one_extent(handle_t * handle,struct mpage_da_data * mpd)2409*4882a593Smuzhiyun static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun 	struct inode *inode = mpd->inode;
2412*4882a593Smuzhiyun 	struct ext4_map_blocks *map = &mpd->map;
2413*4882a593Smuzhiyun 	int get_blocks_flags;
2414*4882a593Smuzhiyun 	int err, dioread_nolock;
2415*4882a593Smuzhiyun 
2416*4882a593Smuzhiyun 	trace_ext4_da_write_pages_extent(inode, map);
2417*4882a593Smuzhiyun 	/*
2418*4882a593Smuzhiyun 	 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2419*4882a593Smuzhiyun 	 * to convert an unwritten extent to be initialized (in the case
2420*4882a593Smuzhiyun 	 * where we have written into one or more preallocated blocks).  It is
2421*4882a593Smuzhiyun 	 * possible that we're going to need more metadata blocks than
2422*4882a593Smuzhiyun 	 * previously reserved. However we must not fail because we're in
2423*4882a593Smuzhiyun 	 * writeback and there is nothing we can do about it so it might result
2424*4882a593Smuzhiyun 	 * in data loss.  So use reserved blocks to allocate metadata if
2425*4882a593Smuzhiyun 	 * possible.
2426*4882a593Smuzhiyun 	 *
2427*4882a593Smuzhiyun 	 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2428*4882a593Smuzhiyun 	 * the blocks in question are delalloc blocks.  This indicates
2429*4882a593Smuzhiyun 	 * that the blocks and quotas has already been checked when
2430*4882a593Smuzhiyun 	 * the data was copied into the page cache.
2431*4882a593Smuzhiyun 	 */
2432*4882a593Smuzhiyun 	get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2433*4882a593Smuzhiyun 			   EXT4_GET_BLOCKS_METADATA_NOFAIL |
2434*4882a593Smuzhiyun 			   EXT4_GET_BLOCKS_IO_SUBMIT;
2435*4882a593Smuzhiyun 	dioread_nolock = ext4_should_dioread_nolock(inode);
2436*4882a593Smuzhiyun 	if (dioread_nolock)
2437*4882a593Smuzhiyun 		get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2438*4882a593Smuzhiyun 	if (map->m_flags & BIT(BH_Delay))
2439*4882a593Smuzhiyun 		get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2442*4882a593Smuzhiyun 	if (err < 0)
2443*4882a593Smuzhiyun 		return err;
2444*4882a593Smuzhiyun 	if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) {
2445*4882a593Smuzhiyun 		if (!mpd->io_submit.io_end->handle &&
2446*4882a593Smuzhiyun 		    ext4_handle_valid(handle)) {
2447*4882a593Smuzhiyun 			mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2448*4882a593Smuzhiyun 			handle->h_rsv_handle = NULL;
2449*4882a593Smuzhiyun 		}
2450*4882a593Smuzhiyun 		ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2451*4882a593Smuzhiyun 	}
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	BUG_ON(map->m_len == 0);
2454*4882a593Smuzhiyun 	return 0;
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun /*
2458*4882a593Smuzhiyun  * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2459*4882a593Smuzhiyun  *				 mpd->len and submit pages underlying it for IO
2460*4882a593Smuzhiyun  *
2461*4882a593Smuzhiyun  * @handle - handle for journal operations
2462*4882a593Smuzhiyun  * @mpd - extent to map
2463*4882a593Smuzhiyun  * @give_up_on_write - we set this to true iff there is a fatal error and there
2464*4882a593Smuzhiyun  *                     is no hope of writing the data. The caller should discard
2465*4882a593Smuzhiyun  *                     dirty pages to avoid infinite loops.
2466*4882a593Smuzhiyun  *
2467*4882a593Smuzhiyun  * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2468*4882a593Smuzhiyun  * delayed, blocks are allocated, if it is unwritten, we may need to convert
2469*4882a593Smuzhiyun  * them to initialized or split the described range from larger unwritten
2470*4882a593Smuzhiyun  * extent. Note that we need not map all the described range since allocation
2471*4882a593Smuzhiyun  * can return less blocks or the range is covered by more unwritten extents. We
2472*4882a593Smuzhiyun  * cannot map more because we are limited by reserved transaction credits. On
2473*4882a593Smuzhiyun  * the other hand we always make sure that the last touched page is fully
2474*4882a593Smuzhiyun  * mapped so that it can be written out (and thus forward progress is
2475*4882a593Smuzhiyun  * guaranteed). After mapping we submit all mapped pages for IO.
2476*4882a593Smuzhiyun  */
mpage_map_and_submit_extent(handle_t * handle,struct mpage_da_data * mpd,bool * give_up_on_write)2477*4882a593Smuzhiyun static int mpage_map_and_submit_extent(handle_t *handle,
2478*4882a593Smuzhiyun 				       struct mpage_da_data *mpd,
2479*4882a593Smuzhiyun 				       bool *give_up_on_write)
2480*4882a593Smuzhiyun {
2481*4882a593Smuzhiyun 	struct inode *inode = mpd->inode;
2482*4882a593Smuzhiyun 	struct ext4_map_blocks *map = &mpd->map;
2483*4882a593Smuzhiyun 	int err;
2484*4882a593Smuzhiyun 	loff_t disksize;
2485*4882a593Smuzhiyun 	int progress = 0;
2486*4882a593Smuzhiyun 	ext4_io_end_t *io_end = mpd->io_submit.io_end;
2487*4882a593Smuzhiyun 	struct ext4_io_end_vec *io_end_vec;
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	io_end_vec = ext4_alloc_io_end_vec(io_end);
2490*4882a593Smuzhiyun 	if (IS_ERR(io_end_vec))
2491*4882a593Smuzhiyun 		return PTR_ERR(io_end_vec);
2492*4882a593Smuzhiyun 	io_end_vec->offset = ((loff_t)map->m_lblk) << inode->i_blkbits;
2493*4882a593Smuzhiyun 	do {
2494*4882a593Smuzhiyun 		err = mpage_map_one_extent(handle, mpd);
2495*4882a593Smuzhiyun 		if (err < 0) {
2496*4882a593Smuzhiyun 			struct super_block *sb = inode->i_sb;
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 			if (ext4_forced_shutdown(EXT4_SB(sb)) ||
2499*4882a593Smuzhiyun 			    ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED))
2500*4882a593Smuzhiyun 				goto invalidate_dirty_pages;
2501*4882a593Smuzhiyun 			/*
2502*4882a593Smuzhiyun 			 * Let the uper layers retry transient errors.
2503*4882a593Smuzhiyun 			 * In the case of ENOSPC, if ext4_count_free_blocks()
2504*4882a593Smuzhiyun 			 * is non-zero, a commit should free up blocks.
2505*4882a593Smuzhiyun 			 */
2506*4882a593Smuzhiyun 			if ((err == -ENOMEM) ||
2507*4882a593Smuzhiyun 			    (err == -ENOSPC && ext4_count_free_clusters(sb))) {
2508*4882a593Smuzhiyun 				if (progress)
2509*4882a593Smuzhiyun 					goto update_disksize;
2510*4882a593Smuzhiyun 				return err;
2511*4882a593Smuzhiyun 			}
2512*4882a593Smuzhiyun 			ext4_msg(sb, KERN_CRIT,
2513*4882a593Smuzhiyun 				 "Delayed block allocation failed for "
2514*4882a593Smuzhiyun 				 "inode %lu at logical offset %llu with"
2515*4882a593Smuzhiyun 				 " max blocks %u with error %d",
2516*4882a593Smuzhiyun 				 inode->i_ino,
2517*4882a593Smuzhiyun 				 (unsigned long long)map->m_lblk,
2518*4882a593Smuzhiyun 				 (unsigned)map->m_len, -err);
2519*4882a593Smuzhiyun 			ext4_msg(sb, KERN_CRIT,
2520*4882a593Smuzhiyun 				 "This should not happen!! Data will "
2521*4882a593Smuzhiyun 				 "be lost\n");
2522*4882a593Smuzhiyun 			if (err == -ENOSPC)
2523*4882a593Smuzhiyun 				ext4_print_free_blocks(inode);
2524*4882a593Smuzhiyun 		invalidate_dirty_pages:
2525*4882a593Smuzhiyun 			*give_up_on_write = true;
2526*4882a593Smuzhiyun 			return err;
2527*4882a593Smuzhiyun 		}
2528*4882a593Smuzhiyun 		progress = 1;
2529*4882a593Smuzhiyun 		/*
2530*4882a593Smuzhiyun 		 * Update buffer state, submit mapped pages, and get us new
2531*4882a593Smuzhiyun 		 * extent to map
2532*4882a593Smuzhiyun 		 */
2533*4882a593Smuzhiyun 		err = mpage_map_and_submit_buffers(mpd);
2534*4882a593Smuzhiyun 		if (err < 0)
2535*4882a593Smuzhiyun 			goto update_disksize;
2536*4882a593Smuzhiyun 	} while (map->m_len);
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun update_disksize:
2539*4882a593Smuzhiyun 	/*
2540*4882a593Smuzhiyun 	 * Update on-disk size after IO is submitted.  Races with
2541*4882a593Smuzhiyun 	 * truncate are avoided by checking i_size under i_data_sem.
2542*4882a593Smuzhiyun 	 */
2543*4882a593Smuzhiyun 	disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
2544*4882a593Smuzhiyun 	if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
2545*4882a593Smuzhiyun 		int err2;
2546*4882a593Smuzhiyun 		loff_t i_size;
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 		down_write(&EXT4_I(inode)->i_data_sem);
2549*4882a593Smuzhiyun 		i_size = i_size_read(inode);
2550*4882a593Smuzhiyun 		if (disksize > i_size)
2551*4882a593Smuzhiyun 			disksize = i_size;
2552*4882a593Smuzhiyun 		if (disksize > EXT4_I(inode)->i_disksize)
2553*4882a593Smuzhiyun 			EXT4_I(inode)->i_disksize = disksize;
2554*4882a593Smuzhiyun 		up_write(&EXT4_I(inode)->i_data_sem);
2555*4882a593Smuzhiyun 		err2 = ext4_mark_inode_dirty(handle, inode);
2556*4882a593Smuzhiyun 		if (err2) {
2557*4882a593Smuzhiyun 			ext4_error_err(inode->i_sb, -err2,
2558*4882a593Smuzhiyun 				       "Failed to mark inode %lu dirty",
2559*4882a593Smuzhiyun 				       inode->i_ino);
2560*4882a593Smuzhiyun 		}
2561*4882a593Smuzhiyun 		if (!err)
2562*4882a593Smuzhiyun 			err = err2;
2563*4882a593Smuzhiyun 	}
2564*4882a593Smuzhiyun 	return err;
2565*4882a593Smuzhiyun }
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun /*
2568*4882a593Smuzhiyun  * Calculate the total number of credits to reserve for one writepages
2569*4882a593Smuzhiyun  * iteration. This is called from ext4_writepages(). We map an extent of
2570*4882a593Smuzhiyun  * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2571*4882a593Smuzhiyun  * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2572*4882a593Smuzhiyun  * bpp - 1 blocks in bpp different extents.
2573*4882a593Smuzhiyun  */
ext4_da_writepages_trans_blocks(struct inode * inode)2574*4882a593Smuzhiyun static int ext4_da_writepages_trans_blocks(struct inode *inode)
2575*4882a593Smuzhiyun {
2576*4882a593Smuzhiyun 	int bpp = ext4_journal_blocks_per_page(inode);
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun 	return ext4_meta_trans_blocks(inode,
2579*4882a593Smuzhiyun 				MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2580*4882a593Smuzhiyun }
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun /*
2583*4882a593Smuzhiyun  * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2584*4882a593Smuzhiyun  * 				 and underlying extent to map
2585*4882a593Smuzhiyun  *
2586*4882a593Smuzhiyun  * @mpd - where to look for pages
2587*4882a593Smuzhiyun  *
2588*4882a593Smuzhiyun  * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2589*4882a593Smuzhiyun  * IO immediately. When we find a page which isn't mapped we start accumulating
2590*4882a593Smuzhiyun  * extent of buffers underlying these pages that needs mapping (formed by
2591*4882a593Smuzhiyun  * either delayed or unwritten buffers). We also lock the pages containing
2592*4882a593Smuzhiyun  * these buffers. The extent found is returned in @mpd structure (starting at
2593*4882a593Smuzhiyun  * mpd->lblk with length mpd->len blocks).
2594*4882a593Smuzhiyun  *
2595*4882a593Smuzhiyun  * Note that this function can attach bios to one io_end structure which are
2596*4882a593Smuzhiyun  * neither logically nor physically contiguous. Although it may seem as an
2597*4882a593Smuzhiyun  * unnecessary complication, it is actually inevitable in blocksize < pagesize
2598*4882a593Smuzhiyun  * case as we need to track IO to all buffers underlying a page in one io_end.
2599*4882a593Smuzhiyun  */
mpage_prepare_extent_to_map(struct mpage_da_data * mpd)2600*4882a593Smuzhiyun static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2601*4882a593Smuzhiyun {
2602*4882a593Smuzhiyun 	struct address_space *mapping = mpd->inode->i_mapping;
2603*4882a593Smuzhiyun 	struct pagevec pvec;
2604*4882a593Smuzhiyun 	unsigned int nr_pages;
2605*4882a593Smuzhiyun 	long left = mpd->wbc->nr_to_write;
2606*4882a593Smuzhiyun 	pgoff_t index = mpd->first_page;
2607*4882a593Smuzhiyun 	pgoff_t end = mpd->last_page;
2608*4882a593Smuzhiyun 	xa_mark_t tag;
2609*4882a593Smuzhiyun 	int i, err = 0;
2610*4882a593Smuzhiyun 	int blkbits = mpd->inode->i_blkbits;
2611*4882a593Smuzhiyun 	ext4_lblk_t lblk;
2612*4882a593Smuzhiyun 	struct buffer_head *head;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2615*4882a593Smuzhiyun 		tag = PAGECACHE_TAG_TOWRITE;
2616*4882a593Smuzhiyun 	else
2617*4882a593Smuzhiyun 		tag = PAGECACHE_TAG_DIRTY;
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 	pagevec_init(&pvec);
2620*4882a593Smuzhiyun 	mpd->map.m_len = 0;
2621*4882a593Smuzhiyun 	mpd->next_page = index;
2622*4882a593Smuzhiyun 	while (index <= end) {
2623*4882a593Smuzhiyun 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
2624*4882a593Smuzhiyun 				tag);
2625*4882a593Smuzhiyun 		if (nr_pages == 0)
2626*4882a593Smuzhiyun 			break;
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++) {
2629*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 			/*
2632*4882a593Smuzhiyun 			 * Accumulated enough dirty pages? This doesn't apply
2633*4882a593Smuzhiyun 			 * to WB_SYNC_ALL mode. For integrity sync we have to
2634*4882a593Smuzhiyun 			 * keep going because someone may be concurrently
2635*4882a593Smuzhiyun 			 * dirtying pages, and we might have synced a lot of
2636*4882a593Smuzhiyun 			 * newly appeared dirty pages, but have not synced all
2637*4882a593Smuzhiyun 			 * of the old dirty pages.
2638*4882a593Smuzhiyun 			 */
2639*4882a593Smuzhiyun 			if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2640*4882a593Smuzhiyun 				goto out;
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 			/* If we can't merge this page, we are done. */
2643*4882a593Smuzhiyun 			if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2644*4882a593Smuzhiyun 				goto out;
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun 			lock_page(page);
2647*4882a593Smuzhiyun 			/*
2648*4882a593Smuzhiyun 			 * If the page is no longer dirty, or its mapping no
2649*4882a593Smuzhiyun 			 * longer corresponds to inode we are writing (which
2650*4882a593Smuzhiyun 			 * means it has been truncated or invalidated), or the
2651*4882a593Smuzhiyun 			 * page is already under writeback and we are not doing
2652*4882a593Smuzhiyun 			 * a data integrity writeback, skip the page
2653*4882a593Smuzhiyun 			 */
2654*4882a593Smuzhiyun 			if (!PageDirty(page) ||
2655*4882a593Smuzhiyun 			    (PageWriteback(page) &&
2656*4882a593Smuzhiyun 			     (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2657*4882a593Smuzhiyun 			    unlikely(page->mapping != mapping)) {
2658*4882a593Smuzhiyun 				unlock_page(page);
2659*4882a593Smuzhiyun 				continue;
2660*4882a593Smuzhiyun 			}
2661*4882a593Smuzhiyun 
2662*4882a593Smuzhiyun 			wait_on_page_writeback(page);
2663*4882a593Smuzhiyun 			BUG_ON(PageWriteback(page));
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 			/*
2666*4882a593Smuzhiyun 			 * Should never happen but for buggy code in
2667*4882a593Smuzhiyun 			 * other subsystems that call
2668*4882a593Smuzhiyun 			 * set_page_dirty() without properly warning
2669*4882a593Smuzhiyun 			 * the file system first.  See [1] for more
2670*4882a593Smuzhiyun 			 * information.
2671*4882a593Smuzhiyun 			 *
2672*4882a593Smuzhiyun 			 * [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz
2673*4882a593Smuzhiyun 			 */
2674*4882a593Smuzhiyun 			if (!page_has_buffers(page)) {
2675*4882a593Smuzhiyun 				ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index);
2676*4882a593Smuzhiyun 				ClearPageDirty(page);
2677*4882a593Smuzhiyun 				unlock_page(page);
2678*4882a593Smuzhiyun 				continue;
2679*4882a593Smuzhiyun 			}
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 			if (mpd->map.m_len == 0)
2682*4882a593Smuzhiyun 				mpd->first_page = page->index;
2683*4882a593Smuzhiyun 			mpd->next_page = page->index + 1;
2684*4882a593Smuzhiyun 			/* Add all dirty buffers to mpd */
2685*4882a593Smuzhiyun 			lblk = ((ext4_lblk_t)page->index) <<
2686*4882a593Smuzhiyun 				(PAGE_SHIFT - blkbits);
2687*4882a593Smuzhiyun 			head = page_buffers(page);
2688*4882a593Smuzhiyun 			err = mpage_process_page_bufs(mpd, head, head, lblk);
2689*4882a593Smuzhiyun 			if (err <= 0)
2690*4882a593Smuzhiyun 				goto out;
2691*4882a593Smuzhiyun 			err = 0;
2692*4882a593Smuzhiyun 			left--;
2693*4882a593Smuzhiyun 		}
2694*4882a593Smuzhiyun 		pagevec_release(&pvec);
2695*4882a593Smuzhiyun 		cond_resched();
2696*4882a593Smuzhiyun 	}
2697*4882a593Smuzhiyun 	mpd->scanned_until_end = 1;
2698*4882a593Smuzhiyun 	return 0;
2699*4882a593Smuzhiyun out:
2700*4882a593Smuzhiyun 	pagevec_release(&pvec);
2701*4882a593Smuzhiyun 	return err;
2702*4882a593Smuzhiyun }
2703*4882a593Smuzhiyun 
ext4_writepages(struct address_space * mapping,struct writeback_control * wbc)2704*4882a593Smuzhiyun static int ext4_writepages(struct address_space *mapping,
2705*4882a593Smuzhiyun 			   struct writeback_control *wbc)
2706*4882a593Smuzhiyun {
2707*4882a593Smuzhiyun 	pgoff_t	writeback_index = 0;
2708*4882a593Smuzhiyun 	long nr_to_write = wbc->nr_to_write;
2709*4882a593Smuzhiyun 	int range_whole = 0;
2710*4882a593Smuzhiyun 	int cycled = 1;
2711*4882a593Smuzhiyun 	handle_t *handle = NULL;
2712*4882a593Smuzhiyun 	struct mpage_da_data mpd;
2713*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2714*4882a593Smuzhiyun 	int needed_blocks, rsv_blocks = 0, ret = 0;
2715*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2716*4882a593Smuzhiyun 	struct blk_plug plug;
2717*4882a593Smuzhiyun 	bool give_up_on_write = false;
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2720*4882a593Smuzhiyun 		return -EIO;
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	percpu_down_read(&sbi->s_writepages_rwsem);
2723*4882a593Smuzhiyun 	trace_ext4_writepages(inode, wbc);
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 	/*
2726*4882a593Smuzhiyun 	 * No pages to write? This is mainly a kludge to avoid starting
2727*4882a593Smuzhiyun 	 * a transaction for special inodes like journal inode on last iput()
2728*4882a593Smuzhiyun 	 * because that could violate lock ordering on umount
2729*4882a593Smuzhiyun 	 */
2730*4882a593Smuzhiyun 	if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2731*4882a593Smuzhiyun 		goto out_writepages;
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode)) {
2734*4882a593Smuzhiyun 		ret = generic_writepages(mapping, wbc);
2735*4882a593Smuzhiyun 		goto out_writepages;
2736*4882a593Smuzhiyun 	}
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 	/*
2739*4882a593Smuzhiyun 	 * If the filesystem has aborted, it is read-only, so return
2740*4882a593Smuzhiyun 	 * right away instead of dumping stack traces later on that
2741*4882a593Smuzhiyun 	 * will obscure the real source of the problem.  We test
2742*4882a593Smuzhiyun 	 * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
2743*4882a593Smuzhiyun 	 * the latter could be true if the filesystem is mounted
2744*4882a593Smuzhiyun 	 * read-only, and in that case, ext4_writepages should
2745*4882a593Smuzhiyun 	 * *never* be called, so if that ever happens, we would want
2746*4882a593Smuzhiyun 	 * the stack trace.
2747*4882a593Smuzhiyun 	 */
2748*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) ||
2749*4882a593Smuzhiyun 		     ext4_test_mount_flag(inode->i_sb, EXT4_MF_FS_ABORTED))) {
2750*4882a593Smuzhiyun 		ret = -EROFS;
2751*4882a593Smuzhiyun 		goto out_writepages;
2752*4882a593Smuzhiyun 	}
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	/*
2755*4882a593Smuzhiyun 	 * If we have inline data and arrive here, it means that
2756*4882a593Smuzhiyun 	 * we will soon create the block for the 1st page, so
2757*4882a593Smuzhiyun 	 * we'd better clear the inline data here.
2758*4882a593Smuzhiyun 	 */
2759*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode)) {
2760*4882a593Smuzhiyun 		/* Just inode will be modified... */
2761*4882a593Smuzhiyun 		handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2762*4882a593Smuzhiyun 		if (IS_ERR(handle)) {
2763*4882a593Smuzhiyun 			ret = PTR_ERR(handle);
2764*4882a593Smuzhiyun 			goto out_writepages;
2765*4882a593Smuzhiyun 		}
2766*4882a593Smuzhiyun 		BUG_ON(ext4_test_inode_state(inode,
2767*4882a593Smuzhiyun 				EXT4_STATE_MAY_INLINE_DATA));
2768*4882a593Smuzhiyun 		ext4_destroy_inline_data(handle, inode);
2769*4882a593Smuzhiyun 		ext4_journal_stop(handle);
2770*4882a593Smuzhiyun 	}
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun 	if (ext4_should_dioread_nolock(inode)) {
2773*4882a593Smuzhiyun 		/*
2774*4882a593Smuzhiyun 		 * We may need to convert up to one extent per block in
2775*4882a593Smuzhiyun 		 * the page and we may dirty the inode.
2776*4882a593Smuzhiyun 		 */
2777*4882a593Smuzhiyun 		rsv_blocks = 1 + ext4_chunk_trans_blocks(inode,
2778*4882a593Smuzhiyun 						PAGE_SIZE >> inode->i_blkbits);
2779*4882a593Smuzhiyun 	}
2780*4882a593Smuzhiyun 
2781*4882a593Smuzhiyun 	if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2782*4882a593Smuzhiyun 		range_whole = 1;
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	if (wbc->range_cyclic) {
2785*4882a593Smuzhiyun 		writeback_index = mapping->writeback_index;
2786*4882a593Smuzhiyun 		if (writeback_index)
2787*4882a593Smuzhiyun 			cycled = 0;
2788*4882a593Smuzhiyun 		mpd.first_page = writeback_index;
2789*4882a593Smuzhiyun 		mpd.last_page = -1;
2790*4882a593Smuzhiyun 	} else {
2791*4882a593Smuzhiyun 		mpd.first_page = wbc->range_start >> PAGE_SHIFT;
2792*4882a593Smuzhiyun 		mpd.last_page = wbc->range_end >> PAGE_SHIFT;
2793*4882a593Smuzhiyun 	}
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	mpd.inode = inode;
2796*4882a593Smuzhiyun 	mpd.wbc = wbc;
2797*4882a593Smuzhiyun 	ext4_io_submit_init(&mpd.io_submit, wbc);
2798*4882a593Smuzhiyun retry:
2799*4882a593Smuzhiyun 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2800*4882a593Smuzhiyun 		tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2801*4882a593Smuzhiyun 	blk_start_plug(&plug);
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun 	/*
2804*4882a593Smuzhiyun 	 * First writeback pages that don't need mapping - we can avoid
2805*4882a593Smuzhiyun 	 * starting a transaction unnecessarily and also avoid being blocked
2806*4882a593Smuzhiyun 	 * in the block layer on device congestion while having transaction
2807*4882a593Smuzhiyun 	 * started.
2808*4882a593Smuzhiyun 	 */
2809*4882a593Smuzhiyun 	mpd.do_map = 0;
2810*4882a593Smuzhiyun 	mpd.scanned_until_end = 0;
2811*4882a593Smuzhiyun 	mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2812*4882a593Smuzhiyun 	if (!mpd.io_submit.io_end) {
2813*4882a593Smuzhiyun 		ret = -ENOMEM;
2814*4882a593Smuzhiyun 		goto unplug;
2815*4882a593Smuzhiyun 	}
2816*4882a593Smuzhiyun 	ret = mpage_prepare_extent_to_map(&mpd);
2817*4882a593Smuzhiyun 	/* Unlock pages we didn't use */
2818*4882a593Smuzhiyun 	mpage_release_unused_pages(&mpd, false);
2819*4882a593Smuzhiyun 	/* Submit prepared bio */
2820*4882a593Smuzhiyun 	ext4_io_submit(&mpd.io_submit);
2821*4882a593Smuzhiyun 	ext4_put_io_end_defer(mpd.io_submit.io_end);
2822*4882a593Smuzhiyun 	mpd.io_submit.io_end = NULL;
2823*4882a593Smuzhiyun 	if (ret < 0)
2824*4882a593Smuzhiyun 		goto unplug;
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	while (!mpd.scanned_until_end && wbc->nr_to_write > 0) {
2827*4882a593Smuzhiyun 		/* For each extent of pages we use new io_end */
2828*4882a593Smuzhiyun 		mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2829*4882a593Smuzhiyun 		if (!mpd.io_submit.io_end) {
2830*4882a593Smuzhiyun 			ret = -ENOMEM;
2831*4882a593Smuzhiyun 			break;
2832*4882a593Smuzhiyun 		}
2833*4882a593Smuzhiyun 
2834*4882a593Smuzhiyun 		/*
2835*4882a593Smuzhiyun 		 * We have two constraints: We find one extent to map and we
2836*4882a593Smuzhiyun 		 * must always write out whole page (makes a difference when
2837*4882a593Smuzhiyun 		 * blocksize < pagesize) so that we don't block on IO when we
2838*4882a593Smuzhiyun 		 * try to write out the rest of the page. Journalled mode is
2839*4882a593Smuzhiyun 		 * not supported by delalloc.
2840*4882a593Smuzhiyun 		 */
2841*4882a593Smuzhiyun 		BUG_ON(ext4_should_journal_data(inode));
2842*4882a593Smuzhiyun 		needed_blocks = ext4_da_writepages_trans_blocks(inode);
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 		/* start a new transaction */
2845*4882a593Smuzhiyun 		handle = ext4_journal_start_with_reserve(inode,
2846*4882a593Smuzhiyun 				EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2847*4882a593Smuzhiyun 		if (IS_ERR(handle)) {
2848*4882a593Smuzhiyun 			ret = PTR_ERR(handle);
2849*4882a593Smuzhiyun 			ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2850*4882a593Smuzhiyun 			       "%ld pages, ino %lu; err %d", __func__,
2851*4882a593Smuzhiyun 				wbc->nr_to_write, inode->i_ino, ret);
2852*4882a593Smuzhiyun 			/* Release allocated io_end */
2853*4882a593Smuzhiyun 			ext4_put_io_end(mpd.io_submit.io_end);
2854*4882a593Smuzhiyun 			mpd.io_submit.io_end = NULL;
2855*4882a593Smuzhiyun 			break;
2856*4882a593Smuzhiyun 		}
2857*4882a593Smuzhiyun 		mpd.do_map = 1;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 		trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2860*4882a593Smuzhiyun 		ret = mpage_prepare_extent_to_map(&mpd);
2861*4882a593Smuzhiyun 		if (!ret && mpd.map.m_len)
2862*4882a593Smuzhiyun 			ret = mpage_map_and_submit_extent(handle, &mpd,
2863*4882a593Smuzhiyun 					&give_up_on_write);
2864*4882a593Smuzhiyun 		/*
2865*4882a593Smuzhiyun 		 * Caution: If the handle is synchronous,
2866*4882a593Smuzhiyun 		 * ext4_journal_stop() can wait for transaction commit
2867*4882a593Smuzhiyun 		 * to finish which may depend on writeback of pages to
2868*4882a593Smuzhiyun 		 * complete or on page lock to be released.  In that
2869*4882a593Smuzhiyun 		 * case, we have to wait until after we have
2870*4882a593Smuzhiyun 		 * submitted all the IO, released page locks we hold,
2871*4882a593Smuzhiyun 		 * and dropped io_end reference (for extent conversion
2872*4882a593Smuzhiyun 		 * to be able to complete) before stopping the handle.
2873*4882a593Smuzhiyun 		 */
2874*4882a593Smuzhiyun 		if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
2875*4882a593Smuzhiyun 			ext4_journal_stop(handle);
2876*4882a593Smuzhiyun 			handle = NULL;
2877*4882a593Smuzhiyun 			mpd.do_map = 0;
2878*4882a593Smuzhiyun 		}
2879*4882a593Smuzhiyun 		/* Unlock pages we didn't use */
2880*4882a593Smuzhiyun 		mpage_release_unused_pages(&mpd, give_up_on_write);
2881*4882a593Smuzhiyun 		/* Submit prepared bio */
2882*4882a593Smuzhiyun 		ext4_io_submit(&mpd.io_submit);
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun 		/*
2885*4882a593Smuzhiyun 		 * Drop our io_end reference we got from init. We have
2886*4882a593Smuzhiyun 		 * to be careful and use deferred io_end finishing if
2887*4882a593Smuzhiyun 		 * we are still holding the transaction as we can
2888*4882a593Smuzhiyun 		 * release the last reference to io_end which may end
2889*4882a593Smuzhiyun 		 * up doing unwritten extent conversion.
2890*4882a593Smuzhiyun 		 */
2891*4882a593Smuzhiyun 		if (handle) {
2892*4882a593Smuzhiyun 			ext4_put_io_end_defer(mpd.io_submit.io_end);
2893*4882a593Smuzhiyun 			ext4_journal_stop(handle);
2894*4882a593Smuzhiyun 		} else
2895*4882a593Smuzhiyun 			ext4_put_io_end(mpd.io_submit.io_end);
2896*4882a593Smuzhiyun 		mpd.io_submit.io_end = NULL;
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 		if (ret == -ENOSPC && sbi->s_journal) {
2899*4882a593Smuzhiyun 			/*
2900*4882a593Smuzhiyun 			 * Commit the transaction which would
2901*4882a593Smuzhiyun 			 * free blocks released in the transaction
2902*4882a593Smuzhiyun 			 * and try again
2903*4882a593Smuzhiyun 			 */
2904*4882a593Smuzhiyun 			jbd2_journal_force_commit_nested(sbi->s_journal);
2905*4882a593Smuzhiyun 			ret = 0;
2906*4882a593Smuzhiyun 			continue;
2907*4882a593Smuzhiyun 		}
2908*4882a593Smuzhiyun 		/* Fatal error - ENOMEM, EIO... */
2909*4882a593Smuzhiyun 		if (ret)
2910*4882a593Smuzhiyun 			break;
2911*4882a593Smuzhiyun 	}
2912*4882a593Smuzhiyun unplug:
2913*4882a593Smuzhiyun 	blk_finish_plug(&plug);
2914*4882a593Smuzhiyun 	if (!ret && !cycled && wbc->nr_to_write > 0) {
2915*4882a593Smuzhiyun 		cycled = 1;
2916*4882a593Smuzhiyun 		mpd.last_page = writeback_index - 1;
2917*4882a593Smuzhiyun 		mpd.first_page = 0;
2918*4882a593Smuzhiyun 		goto retry;
2919*4882a593Smuzhiyun 	}
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	/* Update index */
2922*4882a593Smuzhiyun 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2923*4882a593Smuzhiyun 		/*
2924*4882a593Smuzhiyun 		 * Set the writeback_index so that range_cyclic
2925*4882a593Smuzhiyun 		 * mode will write it back later
2926*4882a593Smuzhiyun 		 */
2927*4882a593Smuzhiyun 		mapping->writeback_index = mpd.first_page;
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun out_writepages:
2930*4882a593Smuzhiyun 	trace_ext4_writepages_result(inode, wbc, ret,
2931*4882a593Smuzhiyun 				     nr_to_write - wbc->nr_to_write);
2932*4882a593Smuzhiyun 	percpu_up_read(&sbi->s_writepages_rwsem);
2933*4882a593Smuzhiyun 	return ret;
2934*4882a593Smuzhiyun }
2935*4882a593Smuzhiyun 
ext4_dax_writepages(struct address_space * mapping,struct writeback_control * wbc)2936*4882a593Smuzhiyun static int ext4_dax_writepages(struct address_space *mapping,
2937*4882a593Smuzhiyun 			       struct writeback_control *wbc)
2938*4882a593Smuzhiyun {
2939*4882a593Smuzhiyun 	int ret;
2940*4882a593Smuzhiyun 	long nr_to_write = wbc->nr_to_write;
2941*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2942*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2943*4882a593Smuzhiyun 
2944*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
2945*4882a593Smuzhiyun 		return -EIO;
2946*4882a593Smuzhiyun 
2947*4882a593Smuzhiyun 	percpu_down_read(&sbi->s_writepages_rwsem);
2948*4882a593Smuzhiyun 	trace_ext4_writepages(inode, wbc);
2949*4882a593Smuzhiyun 
2950*4882a593Smuzhiyun 	ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
2951*4882a593Smuzhiyun 	trace_ext4_writepages_result(inode, wbc, ret,
2952*4882a593Smuzhiyun 				     nr_to_write - wbc->nr_to_write);
2953*4882a593Smuzhiyun 	percpu_up_read(&sbi->s_writepages_rwsem);
2954*4882a593Smuzhiyun 	return ret;
2955*4882a593Smuzhiyun }
2956*4882a593Smuzhiyun 
ext4_nonda_switch(struct super_block * sb)2957*4882a593Smuzhiyun static int ext4_nonda_switch(struct super_block *sb)
2958*4882a593Smuzhiyun {
2959*4882a593Smuzhiyun 	s64 free_clusters, dirty_clusters;
2960*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(sb);
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 	/*
2963*4882a593Smuzhiyun 	 * switch to non delalloc mode if we are running low
2964*4882a593Smuzhiyun 	 * on free block. The free block accounting via percpu
2965*4882a593Smuzhiyun 	 * counters can get slightly wrong with percpu_counter_batch getting
2966*4882a593Smuzhiyun 	 * accumulated on each CPU without updating global counters
2967*4882a593Smuzhiyun 	 * Delalloc need an accurate free block accounting. So switch
2968*4882a593Smuzhiyun 	 * to non delalloc when we are near to error range.
2969*4882a593Smuzhiyun 	 */
2970*4882a593Smuzhiyun 	free_clusters =
2971*4882a593Smuzhiyun 		percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2972*4882a593Smuzhiyun 	dirty_clusters =
2973*4882a593Smuzhiyun 		percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2974*4882a593Smuzhiyun 	/*
2975*4882a593Smuzhiyun 	 * Start pushing delalloc when 1/2 of free blocks are dirty.
2976*4882a593Smuzhiyun 	 */
2977*4882a593Smuzhiyun 	if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2978*4882a593Smuzhiyun 		try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2979*4882a593Smuzhiyun 
2980*4882a593Smuzhiyun 	if (2 * free_clusters < 3 * dirty_clusters ||
2981*4882a593Smuzhiyun 	    free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2982*4882a593Smuzhiyun 		/*
2983*4882a593Smuzhiyun 		 * free block count is less than 150% of dirty blocks
2984*4882a593Smuzhiyun 		 * or free blocks is less than watermark
2985*4882a593Smuzhiyun 		 */
2986*4882a593Smuzhiyun 		return 1;
2987*4882a593Smuzhiyun 	}
2988*4882a593Smuzhiyun 	return 0;
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun /* We always reserve for an inode update; the superblock could be there too */
ext4_da_write_credits(struct inode * inode,loff_t pos,unsigned len)2992*4882a593Smuzhiyun static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2993*4882a593Smuzhiyun {
2994*4882a593Smuzhiyun 	if (likely(ext4_has_feature_large_file(inode->i_sb)))
2995*4882a593Smuzhiyun 		return 1;
2996*4882a593Smuzhiyun 
2997*4882a593Smuzhiyun 	if (pos + len <= 0x7fffffffULL)
2998*4882a593Smuzhiyun 		return 1;
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	/* We might need to update the superblock to set LARGE_FILE */
3001*4882a593Smuzhiyun 	return 2;
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun 
ext4_da_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)3004*4882a593Smuzhiyun static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
3005*4882a593Smuzhiyun 			       loff_t pos, unsigned len, unsigned flags,
3006*4882a593Smuzhiyun 			       struct page **pagep, void **fsdata)
3007*4882a593Smuzhiyun {
3008*4882a593Smuzhiyun 	int ret, retries = 0;
3009*4882a593Smuzhiyun 	struct page *page;
3010*4882a593Smuzhiyun 	pgoff_t index;
3011*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
3012*4882a593Smuzhiyun 	handle_t *handle;
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
3015*4882a593Smuzhiyun 		return -EIO;
3016*4882a593Smuzhiyun 
3017*4882a593Smuzhiyun 	index = pos >> PAGE_SHIFT;
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	if (ext4_nonda_switch(inode->i_sb) || S_ISLNK(inode->i_mode) ||
3020*4882a593Smuzhiyun 	    ext4_verity_in_progress(inode)) {
3021*4882a593Smuzhiyun 		*fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
3022*4882a593Smuzhiyun 		return ext4_write_begin(file, mapping, pos,
3023*4882a593Smuzhiyun 					len, flags, pagep, fsdata);
3024*4882a593Smuzhiyun 	}
3025*4882a593Smuzhiyun 	*fsdata = (void *)0;
3026*4882a593Smuzhiyun 	if (trace_android_fs_datawrite_start_enabled()) {
3027*4882a593Smuzhiyun 		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 		path = android_fstrace_get_pathname(pathbuf,
3030*4882a593Smuzhiyun 						    MAX_TRACE_PATHBUF_LEN,
3031*4882a593Smuzhiyun 						    inode);
3032*4882a593Smuzhiyun 		trace_android_fs_datawrite_start(inode, pos, len,
3033*4882a593Smuzhiyun 						 current->pid,
3034*4882a593Smuzhiyun 						 path, current->comm);
3035*4882a593Smuzhiyun 	}
3036*4882a593Smuzhiyun 	trace_ext4_da_write_begin(inode, pos, len, flags);
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
3039*4882a593Smuzhiyun 		ret = ext4_da_write_inline_data_begin(mapping, inode,
3040*4882a593Smuzhiyun 						      pos, len, flags,
3041*4882a593Smuzhiyun 						      pagep, fsdata);
3042*4882a593Smuzhiyun 		if (ret < 0)
3043*4882a593Smuzhiyun 			return ret;
3044*4882a593Smuzhiyun 		if (ret == 1)
3045*4882a593Smuzhiyun 			return 0;
3046*4882a593Smuzhiyun 	}
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 	/*
3049*4882a593Smuzhiyun 	 * grab_cache_page_write_begin() can take a long time if the
3050*4882a593Smuzhiyun 	 * system is thrashing due to memory pressure, or if the page
3051*4882a593Smuzhiyun 	 * is being written back.  So grab it first before we start
3052*4882a593Smuzhiyun 	 * the transaction handle.  This also allows us to allocate
3053*4882a593Smuzhiyun 	 * the page (if needed) without using GFP_NOFS.
3054*4882a593Smuzhiyun 	 */
3055*4882a593Smuzhiyun retry_grab:
3056*4882a593Smuzhiyun 	page = grab_cache_page_write_begin(mapping, index, flags);
3057*4882a593Smuzhiyun 	if (!page)
3058*4882a593Smuzhiyun 		return -ENOMEM;
3059*4882a593Smuzhiyun 	unlock_page(page);
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 	/*
3062*4882a593Smuzhiyun 	 * With delayed allocation, we don't log the i_disksize update
3063*4882a593Smuzhiyun 	 * if there is delayed block allocation. But we still need
3064*4882a593Smuzhiyun 	 * to journalling the i_disksize update if writes to the end
3065*4882a593Smuzhiyun 	 * of file which has an already mapped buffer.
3066*4882a593Smuzhiyun 	 */
3067*4882a593Smuzhiyun retry_journal:
3068*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
3069*4882a593Smuzhiyun 				ext4_da_write_credits(inode, pos, len));
3070*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
3071*4882a593Smuzhiyun 		put_page(page);
3072*4882a593Smuzhiyun 		return PTR_ERR(handle);
3073*4882a593Smuzhiyun 	}
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	lock_page(page);
3076*4882a593Smuzhiyun 	if (page->mapping != mapping) {
3077*4882a593Smuzhiyun 		/* The page got truncated from under us */
3078*4882a593Smuzhiyun 		unlock_page(page);
3079*4882a593Smuzhiyun 		put_page(page);
3080*4882a593Smuzhiyun 		ext4_journal_stop(handle);
3081*4882a593Smuzhiyun 		goto retry_grab;
3082*4882a593Smuzhiyun 	}
3083*4882a593Smuzhiyun 	/* In case writeback began while the page was unlocked */
3084*4882a593Smuzhiyun 	wait_for_stable_page(page);
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun #ifdef CONFIG_FS_ENCRYPTION
3087*4882a593Smuzhiyun 	ret = ext4_block_write_begin(page, pos, len,
3088*4882a593Smuzhiyun 				     ext4_da_get_block_prep);
3089*4882a593Smuzhiyun #else
3090*4882a593Smuzhiyun 	ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
3091*4882a593Smuzhiyun #endif
3092*4882a593Smuzhiyun 	if (ret < 0) {
3093*4882a593Smuzhiyun 		unlock_page(page);
3094*4882a593Smuzhiyun 		ext4_journal_stop(handle);
3095*4882a593Smuzhiyun 		/*
3096*4882a593Smuzhiyun 		 * block_write_begin may have instantiated a few blocks
3097*4882a593Smuzhiyun 		 * outside i_size.  Trim these off again. Don't need
3098*4882a593Smuzhiyun 		 * i_size_read because we hold i_mutex.
3099*4882a593Smuzhiyun 		 */
3100*4882a593Smuzhiyun 		if (pos + len > inode->i_size)
3101*4882a593Smuzhiyun 			ext4_truncate_failed_write(inode);
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun 		if (ret == -ENOSPC &&
3104*4882a593Smuzhiyun 		    ext4_should_retry_alloc(inode->i_sb, &retries))
3105*4882a593Smuzhiyun 			goto retry_journal;
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 		put_page(page);
3108*4882a593Smuzhiyun 		return ret;
3109*4882a593Smuzhiyun 	}
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 	*pagep = page;
3112*4882a593Smuzhiyun 	return ret;
3113*4882a593Smuzhiyun }
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun /*
3116*4882a593Smuzhiyun  * Check if we should update i_disksize
3117*4882a593Smuzhiyun  * when write to the end of file but not require block allocation
3118*4882a593Smuzhiyun  */
ext4_da_should_update_i_disksize(struct page * page,unsigned long offset)3119*4882a593Smuzhiyun static int ext4_da_should_update_i_disksize(struct page *page,
3120*4882a593Smuzhiyun 					    unsigned long offset)
3121*4882a593Smuzhiyun {
3122*4882a593Smuzhiyun 	struct buffer_head *bh;
3123*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
3124*4882a593Smuzhiyun 	unsigned int idx;
3125*4882a593Smuzhiyun 	int i;
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun 	bh = page_buffers(page);
3128*4882a593Smuzhiyun 	idx = offset >> inode->i_blkbits;
3129*4882a593Smuzhiyun 
3130*4882a593Smuzhiyun 	for (i = 0; i < idx; i++)
3131*4882a593Smuzhiyun 		bh = bh->b_this_page;
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun 	if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
3134*4882a593Smuzhiyun 		return 0;
3135*4882a593Smuzhiyun 	return 1;
3136*4882a593Smuzhiyun }
3137*4882a593Smuzhiyun 
ext4_da_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3138*4882a593Smuzhiyun static int ext4_da_write_end(struct file *file,
3139*4882a593Smuzhiyun 			     struct address_space *mapping,
3140*4882a593Smuzhiyun 			     loff_t pos, unsigned len, unsigned copied,
3141*4882a593Smuzhiyun 			     struct page *page, void *fsdata)
3142*4882a593Smuzhiyun {
3143*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
3144*4882a593Smuzhiyun 	int ret = 0, ret2;
3145*4882a593Smuzhiyun 	handle_t *handle = ext4_journal_current_handle();
3146*4882a593Smuzhiyun 	loff_t new_i_size;
3147*4882a593Smuzhiyun 	unsigned long start, end;
3148*4882a593Smuzhiyun 	int write_mode = (int)(unsigned long)fsdata;
3149*4882a593Smuzhiyun 
3150*4882a593Smuzhiyun 	if (write_mode == FALL_BACK_TO_NONDELALLOC)
3151*4882a593Smuzhiyun 		return ext4_write_end(file, mapping, pos,
3152*4882a593Smuzhiyun 				      len, copied, page, fsdata);
3153*4882a593Smuzhiyun 
3154*4882a593Smuzhiyun 	trace_android_fs_datawrite_end(inode, pos, len);
3155*4882a593Smuzhiyun 	trace_ext4_da_write_end(inode, pos, len, copied);
3156*4882a593Smuzhiyun 	start = pos & (PAGE_SIZE - 1);
3157*4882a593Smuzhiyun 	end = start + copied - 1;
3158*4882a593Smuzhiyun 
3159*4882a593Smuzhiyun 	/*
3160*4882a593Smuzhiyun 	 * Since we are holding inode lock, we are sure i_disksize <=
3161*4882a593Smuzhiyun 	 * i_size. We also know that if i_disksize < i_size, there are
3162*4882a593Smuzhiyun 	 * delalloc writes pending in the range upto i_size. If the end of
3163*4882a593Smuzhiyun 	 * the current write is <= i_size, there's no need to touch
3164*4882a593Smuzhiyun 	 * i_disksize since writeback will push i_disksize upto i_size
3165*4882a593Smuzhiyun 	 * eventually. If the end of the current write is > i_size and
3166*4882a593Smuzhiyun 	 * inside an allocated block (ext4_da_should_update_i_disksize()
3167*4882a593Smuzhiyun 	 * check), we need to update i_disksize here as neither
3168*4882a593Smuzhiyun 	 * ext4_writepage() nor certain ext4_writepages() paths not
3169*4882a593Smuzhiyun 	 * allocating blocks update i_disksize.
3170*4882a593Smuzhiyun 	 *
3171*4882a593Smuzhiyun 	 * Note that we defer inode dirtying to generic_write_end() /
3172*4882a593Smuzhiyun 	 * ext4_da_write_inline_data_end().
3173*4882a593Smuzhiyun 	 */
3174*4882a593Smuzhiyun 	new_i_size = pos + copied;
3175*4882a593Smuzhiyun 	if (copied && new_i_size > inode->i_size) {
3176*4882a593Smuzhiyun 		if (ext4_has_inline_data(inode) ||
3177*4882a593Smuzhiyun 		    ext4_da_should_update_i_disksize(page, end))
3178*4882a593Smuzhiyun 			ext4_update_i_disksize(inode, new_i_size);
3179*4882a593Smuzhiyun 	}
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun 	if (write_mode != CONVERT_INLINE_DATA &&
3182*4882a593Smuzhiyun 	    ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
3183*4882a593Smuzhiyun 	    ext4_has_inline_data(inode))
3184*4882a593Smuzhiyun 		ret = ext4_da_write_inline_data_end(inode, pos, len, copied,
3185*4882a593Smuzhiyun 						     page);
3186*4882a593Smuzhiyun 	else
3187*4882a593Smuzhiyun 		ret = generic_write_end(file, mapping, pos, len, copied,
3188*4882a593Smuzhiyun 							page, fsdata);
3189*4882a593Smuzhiyun 
3190*4882a593Smuzhiyun 	copied = ret;
3191*4882a593Smuzhiyun 	ret2 = ext4_journal_stop(handle);
3192*4882a593Smuzhiyun 	if (unlikely(ret2 && !ret))
3193*4882a593Smuzhiyun 		ret = ret2;
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun 	return ret ? ret : copied;
3196*4882a593Smuzhiyun }
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun /*
3199*4882a593Smuzhiyun  * Force all delayed allocation blocks to be allocated for a given inode.
3200*4882a593Smuzhiyun  */
ext4_alloc_da_blocks(struct inode * inode)3201*4882a593Smuzhiyun int ext4_alloc_da_blocks(struct inode *inode)
3202*4882a593Smuzhiyun {
3203*4882a593Smuzhiyun 	trace_ext4_alloc_da_blocks(inode);
3204*4882a593Smuzhiyun 
3205*4882a593Smuzhiyun 	if (!EXT4_I(inode)->i_reserved_data_blocks)
3206*4882a593Smuzhiyun 		return 0;
3207*4882a593Smuzhiyun 
3208*4882a593Smuzhiyun 	/*
3209*4882a593Smuzhiyun 	 * We do something simple for now.  The filemap_flush() will
3210*4882a593Smuzhiyun 	 * also start triggering a write of the data blocks, which is
3211*4882a593Smuzhiyun 	 * not strictly speaking necessary (and for users of
3212*4882a593Smuzhiyun 	 * laptop_mode, not even desirable).  However, to do otherwise
3213*4882a593Smuzhiyun 	 * would require replicating code paths in:
3214*4882a593Smuzhiyun 	 *
3215*4882a593Smuzhiyun 	 * ext4_writepages() ->
3216*4882a593Smuzhiyun 	 *    write_cache_pages() ---> (via passed in callback function)
3217*4882a593Smuzhiyun 	 *        __mpage_da_writepage() -->
3218*4882a593Smuzhiyun 	 *           mpage_add_bh_to_extent()
3219*4882a593Smuzhiyun 	 *           mpage_da_map_blocks()
3220*4882a593Smuzhiyun 	 *
3221*4882a593Smuzhiyun 	 * The problem is that write_cache_pages(), located in
3222*4882a593Smuzhiyun 	 * mm/page-writeback.c, marks pages clean in preparation for
3223*4882a593Smuzhiyun 	 * doing I/O, which is not desirable if we're not planning on
3224*4882a593Smuzhiyun 	 * doing I/O at all.
3225*4882a593Smuzhiyun 	 *
3226*4882a593Smuzhiyun 	 * We could call write_cache_pages(), and then redirty all of
3227*4882a593Smuzhiyun 	 * the pages by calling redirty_page_for_writepage() but that
3228*4882a593Smuzhiyun 	 * would be ugly in the extreme.  So instead we would need to
3229*4882a593Smuzhiyun 	 * replicate parts of the code in the above functions,
3230*4882a593Smuzhiyun 	 * simplifying them because we wouldn't actually intend to
3231*4882a593Smuzhiyun 	 * write out the pages, but rather only collect contiguous
3232*4882a593Smuzhiyun 	 * logical block extents, call the multi-block allocator, and
3233*4882a593Smuzhiyun 	 * then update the buffer heads with the block allocations.
3234*4882a593Smuzhiyun 	 *
3235*4882a593Smuzhiyun 	 * For now, though, we'll cheat by calling filemap_flush(),
3236*4882a593Smuzhiyun 	 * which will map the blocks, and start the I/O, but not
3237*4882a593Smuzhiyun 	 * actually wait for the I/O to complete.
3238*4882a593Smuzhiyun 	 */
3239*4882a593Smuzhiyun 	return filemap_flush(inode->i_mapping);
3240*4882a593Smuzhiyun }
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun /*
3243*4882a593Smuzhiyun  * bmap() is special.  It gets used by applications such as lilo and by
3244*4882a593Smuzhiyun  * the swapper to find the on-disk block of a specific piece of data.
3245*4882a593Smuzhiyun  *
3246*4882a593Smuzhiyun  * Naturally, this is dangerous if the block concerned is still in the
3247*4882a593Smuzhiyun  * journal.  If somebody makes a swapfile on an ext4 data-journaling
3248*4882a593Smuzhiyun  * filesystem and enables swap, then they may get a nasty shock when the
3249*4882a593Smuzhiyun  * data getting swapped to that swapfile suddenly gets overwritten by
3250*4882a593Smuzhiyun  * the original zero's written out previously to the journal and
3251*4882a593Smuzhiyun  * awaiting writeback in the kernel's buffer cache.
3252*4882a593Smuzhiyun  *
3253*4882a593Smuzhiyun  * So, if we see any bmap calls here on a modified, data-journaled file,
3254*4882a593Smuzhiyun  * take extra steps to flush any blocks which might be in the cache.
3255*4882a593Smuzhiyun  */
ext4_bmap(struct address_space * mapping,sector_t block)3256*4882a593Smuzhiyun static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3257*4882a593Smuzhiyun {
3258*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
3259*4882a593Smuzhiyun 	journal_t *journal;
3260*4882a593Smuzhiyun 	sector_t ret = 0;
3261*4882a593Smuzhiyun 	int err;
3262*4882a593Smuzhiyun 
3263*4882a593Smuzhiyun 	inode_lock_shared(inode);
3264*4882a593Smuzhiyun 	/*
3265*4882a593Smuzhiyun 	 * We can get here for an inline file via the FIBMAP ioctl
3266*4882a593Smuzhiyun 	 */
3267*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode))
3268*4882a593Smuzhiyun 		goto out;
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
3271*4882a593Smuzhiyun 			test_opt(inode->i_sb, DELALLOC)) {
3272*4882a593Smuzhiyun 		/*
3273*4882a593Smuzhiyun 		 * With delalloc we want to sync the file
3274*4882a593Smuzhiyun 		 * so that we can make sure we allocate
3275*4882a593Smuzhiyun 		 * blocks for file
3276*4882a593Smuzhiyun 		 */
3277*4882a593Smuzhiyun 		filemap_write_and_wait(mapping);
3278*4882a593Smuzhiyun 	}
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	if (EXT4_JOURNAL(inode) &&
3281*4882a593Smuzhiyun 	    ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
3282*4882a593Smuzhiyun 		/*
3283*4882a593Smuzhiyun 		 * This is a REALLY heavyweight approach, but the use of
3284*4882a593Smuzhiyun 		 * bmap on dirty files is expected to be extremely rare:
3285*4882a593Smuzhiyun 		 * only if we run lilo or swapon on a freshly made file
3286*4882a593Smuzhiyun 		 * do we expect this to happen.
3287*4882a593Smuzhiyun 		 *
3288*4882a593Smuzhiyun 		 * (bmap requires CAP_SYS_RAWIO so this does not
3289*4882a593Smuzhiyun 		 * represent an unprivileged user DOS attack --- we'd be
3290*4882a593Smuzhiyun 		 * in trouble if mortal users could trigger this path at
3291*4882a593Smuzhiyun 		 * will.)
3292*4882a593Smuzhiyun 		 *
3293*4882a593Smuzhiyun 		 * NB. EXT4_STATE_JDATA is not set on files other than
3294*4882a593Smuzhiyun 		 * regular files.  If somebody wants to bmap a directory
3295*4882a593Smuzhiyun 		 * or symlink and gets confused because the buffer
3296*4882a593Smuzhiyun 		 * hasn't yet been flushed to disk, they deserve
3297*4882a593Smuzhiyun 		 * everything they get.
3298*4882a593Smuzhiyun 		 */
3299*4882a593Smuzhiyun 
3300*4882a593Smuzhiyun 		ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
3301*4882a593Smuzhiyun 		journal = EXT4_JOURNAL(inode);
3302*4882a593Smuzhiyun 		jbd2_journal_lock_updates(journal);
3303*4882a593Smuzhiyun 		err = jbd2_journal_flush(journal);
3304*4882a593Smuzhiyun 		jbd2_journal_unlock_updates(journal);
3305*4882a593Smuzhiyun 
3306*4882a593Smuzhiyun 		if (err)
3307*4882a593Smuzhiyun 			goto out;
3308*4882a593Smuzhiyun 	}
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 	ret = iomap_bmap(mapping, block, &ext4_iomap_ops);
3311*4882a593Smuzhiyun 
3312*4882a593Smuzhiyun out:
3313*4882a593Smuzhiyun 	inode_unlock_shared(inode);
3314*4882a593Smuzhiyun 	return ret;
3315*4882a593Smuzhiyun }
3316*4882a593Smuzhiyun 
ext4_readpage(struct file * file,struct page * page)3317*4882a593Smuzhiyun static int ext4_readpage(struct file *file, struct page *page)
3318*4882a593Smuzhiyun {
3319*4882a593Smuzhiyun 	int ret = -EAGAIN;
3320*4882a593Smuzhiyun 	struct inode *inode = page->mapping->host;
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 	trace_ext4_readpage(page);
3323*4882a593Smuzhiyun 
3324*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode))
3325*4882a593Smuzhiyun 		ret = ext4_readpage_inline(inode, page);
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	if (ret == -EAGAIN)
3328*4882a593Smuzhiyun 		return ext4_mpage_readpages(inode, NULL, page);
3329*4882a593Smuzhiyun 
3330*4882a593Smuzhiyun 	return ret;
3331*4882a593Smuzhiyun }
3332*4882a593Smuzhiyun 
ext4_readahead(struct readahead_control * rac)3333*4882a593Smuzhiyun static void ext4_readahead(struct readahead_control *rac)
3334*4882a593Smuzhiyun {
3335*4882a593Smuzhiyun 	struct inode *inode = rac->mapping->host;
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun 	/* If the file has inline data, no need to do readahead. */
3338*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode))
3339*4882a593Smuzhiyun 		return;
3340*4882a593Smuzhiyun 
3341*4882a593Smuzhiyun 	ext4_mpage_readpages(inode, rac, NULL);
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun 
ext4_invalidatepage(struct page * page,unsigned int offset,unsigned int length)3344*4882a593Smuzhiyun static void ext4_invalidatepage(struct page *page, unsigned int offset,
3345*4882a593Smuzhiyun 				unsigned int length)
3346*4882a593Smuzhiyun {
3347*4882a593Smuzhiyun 	trace_ext4_invalidatepage(page, offset, length);
3348*4882a593Smuzhiyun 
3349*4882a593Smuzhiyun 	/* No journalling happens on data buffers when this function is used */
3350*4882a593Smuzhiyun 	WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun 	block_invalidatepage(page, offset, length);
3353*4882a593Smuzhiyun }
3354*4882a593Smuzhiyun 
__ext4_journalled_invalidatepage(struct page * page,unsigned int offset,unsigned int length)3355*4882a593Smuzhiyun static int __ext4_journalled_invalidatepage(struct page *page,
3356*4882a593Smuzhiyun 					    unsigned int offset,
3357*4882a593Smuzhiyun 					    unsigned int length)
3358*4882a593Smuzhiyun {
3359*4882a593Smuzhiyun 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	trace_ext4_journalled_invalidatepage(page, offset, length);
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun 	/*
3364*4882a593Smuzhiyun 	 * If it's a full truncate we just forget about the pending dirtying
3365*4882a593Smuzhiyun 	 */
3366*4882a593Smuzhiyun 	if (offset == 0 && length == PAGE_SIZE)
3367*4882a593Smuzhiyun 		ClearPageChecked(page);
3368*4882a593Smuzhiyun 
3369*4882a593Smuzhiyun 	return jbd2_journal_invalidatepage(journal, page, offset, length);
3370*4882a593Smuzhiyun }
3371*4882a593Smuzhiyun 
3372*4882a593Smuzhiyun /* Wrapper for aops... */
ext4_journalled_invalidatepage(struct page * page,unsigned int offset,unsigned int length)3373*4882a593Smuzhiyun static void ext4_journalled_invalidatepage(struct page *page,
3374*4882a593Smuzhiyun 					   unsigned int offset,
3375*4882a593Smuzhiyun 					   unsigned int length)
3376*4882a593Smuzhiyun {
3377*4882a593Smuzhiyun 	WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
3378*4882a593Smuzhiyun }
3379*4882a593Smuzhiyun 
ext4_releasepage(struct page * page,gfp_t wait)3380*4882a593Smuzhiyun static int ext4_releasepage(struct page *page, gfp_t wait)
3381*4882a593Smuzhiyun {
3382*4882a593Smuzhiyun 	journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3383*4882a593Smuzhiyun 
3384*4882a593Smuzhiyun 	trace_ext4_releasepage(page);
3385*4882a593Smuzhiyun 
3386*4882a593Smuzhiyun 	/* Page has dirty journalled data -> cannot release */
3387*4882a593Smuzhiyun 	if (PageChecked(page))
3388*4882a593Smuzhiyun 		return 0;
3389*4882a593Smuzhiyun 	if (journal)
3390*4882a593Smuzhiyun 		return jbd2_journal_try_to_free_buffers(journal, page);
3391*4882a593Smuzhiyun 	else
3392*4882a593Smuzhiyun 		return try_to_free_buffers(page);
3393*4882a593Smuzhiyun }
3394*4882a593Smuzhiyun 
ext4_inode_datasync_dirty(struct inode * inode)3395*4882a593Smuzhiyun static bool ext4_inode_datasync_dirty(struct inode *inode)
3396*4882a593Smuzhiyun {
3397*4882a593Smuzhiyun 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
3398*4882a593Smuzhiyun 
3399*4882a593Smuzhiyun 	if (journal) {
3400*4882a593Smuzhiyun 		if (jbd2_transaction_committed(journal,
3401*4882a593Smuzhiyun 			EXT4_I(inode)->i_datasync_tid))
3402*4882a593Smuzhiyun 			return false;
3403*4882a593Smuzhiyun 		if (test_opt2(inode->i_sb, JOURNAL_FAST_COMMIT))
3404*4882a593Smuzhiyun 			return !list_empty(&EXT4_I(inode)->i_fc_list);
3405*4882a593Smuzhiyun 		return true;
3406*4882a593Smuzhiyun 	}
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun 	/* Any metadata buffers to write? */
3409*4882a593Smuzhiyun 	if (!list_empty(&inode->i_mapping->private_list))
3410*4882a593Smuzhiyun 		return true;
3411*4882a593Smuzhiyun 	return inode->i_state & I_DIRTY_DATASYNC;
3412*4882a593Smuzhiyun }
3413*4882a593Smuzhiyun 
ext4_set_iomap(struct inode * inode,struct iomap * iomap,struct ext4_map_blocks * map,loff_t offset,loff_t length)3414*4882a593Smuzhiyun static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
3415*4882a593Smuzhiyun 			   struct ext4_map_blocks *map, loff_t offset,
3416*4882a593Smuzhiyun 			   loff_t length)
3417*4882a593Smuzhiyun {
3418*4882a593Smuzhiyun 	u8 blkbits = inode->i_blkbits;
3419*4882a593Smuzhiyun 
3420*4882a593Smuzhiyun 	/*
3421*4882a593Smuzhiyun 	 * Writes that span EOF might trigger an I/O size update on completion,
3422*4882a593Smuzhiyun 	 * so consider them to be dirty for the purpose of O_DSYNC, even if
3423*4882a593Smuzhiyun 	 * there is no other metadata changes being made or are pending.
3424*4882a593Smuzhiyun 	 */
3425*4882a593Smuzhiyun 	iomap->flags = 0;
3426*4882a593Smuzhiyun 	if (ext4_inode_datasync_dirty(inode) ||
3427*4882a593Smuzhiyun 	    offset + length > i_size_read(inode))
3428*4882a593Smuzhiyun 		iomap->flags |= IOMAP_F_DIRTY;
3429*4882a593Smuzhiyun 
3430*4882a593Smuzhiyun 	if (map->m_flags & EXT4_MAP_NEW)
3431*4882a593Smuzhiyun 		iomap->flags |= IOMAP_F_NEW;
3432*4882a593Smuzhiyun 
3433*4882a593Smuzhiyun 	iomap->bdev = inode->i_sb->s_bdev;
3434*4882a593Smuzhiyun 	iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
3435*4882a593Smuzhiyun 	iomap->offset = (u64) map->m_lblk << blkbits;
3436*4882a593Smuzhiyun 	iomap->length = (u64) map->m_len << blkbits;
3437*4882a593Smuzhiyun 
3438*4882a593Smuzhiyun 	if ((map->m_flags & EXT4_MAP_MAPPED) &&
3439*4882a593Smuzhiyun 	    !ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3440*4882a593Smuzhiyun 		iomap->flags |= IOMAP_F_MERGED;
3441*4882a593Smuzhiyun 
3442*4882a593Smuzhiyun 	/*
3443*4882a593Smuzhiyun 	 * Flags passed to ext4_map_blocks() for direct I/O writes can result
3444*4882a593Smuzhiyun 	 * in m_flags having both EXT4_MAP_MAPPED and EXT4_MAP_UNWRITTEN bits
3445*4882a593Smuzhiyun 	 * set. In order for any allocated unwritten extents to be converted
3446*4882a593Smuzhiyun 	 * into written extents correctly within the ->end_io() handler, we
3447*4882a593Smuzhiyun 	 * need to ensure that the iomap->type is set appropriately. Hence, the
3448*4882a593Smuzhiyun 	 * reason why we need to check whether the EXT4_MAP_UNWRITTEN bit has
3449*4882a593Smuzhiyun 	 * been set first.
3450*4882a593Smuzhiyun 	 */
3451*4882a593Smuzhiyun 	if (map->m_flags & EXT4_MAP_UNWRITTEN) {
3452*4882a593Smuzhiyun 		iomap->type = IOMAP_UNWRITTEN;
3453*4882a593Smuzhiyun 		iomap->addr = (u64) map->m_pblk << blkbits;
3454*4882a593Smuzhiyun 	} else if (map->m_flags & EXT4_MAP_MAPPED) {
3455*4882a593Smuzhiyun 		iomap->type = IOMAP_MAPPED;
3456*4882a593Smuzhiyun 		iomap->addr = (u64) map->m_pblk << blkbits;
3457*4882a593Smuzhiyun 	} else {
3458*4882a593Smuzhiyun 		iomap->type = IOMAP_HOLE;
3459*4882a593Smuzhiyun 		iomap->addr = IOMAP_NULL_ADDR;
3460*4882a593Smuzhiyun 	}
3461*4882a593Smuzhiyun }
3462*4882a593Smuzhiyun 
ext4_iomap_alloc(struct inode * inode,struct ext4_map_blocks * map,unsigned int flags)3463*4882a593Smuzhiyun static int ext4_iomap_alloc(struct inode *inode, struct ext4_map_blocks *map,
3464*4882a593Smuzhiyun 			    unsigned int flags)
3465*4882a593Smuzhiyun {
3466*4882a593Smuzhiyun 	handle_t *handle;
3467*4882a593Smuzhiyun 	u8 blkbits = inode->i_blkbits;
3468*4882a593Smuzhiyun 	int ret, dio_credits, m_flags = 0, retries = 0;
3469*4882a593Smuzhiyun 
3470*4882a593Smuzhiyun 	/*
3471*4882a593Smuzhiyun 	 * Trim the mapping request to the maximum value that we can map at
3472*4882a593Smuzhiyun 	 * once for direct I/O.
3473*4882a593Smuzhiyun 	 */
3474*4882a593Smuzhiyun 	if (map->m_len > DIO_MAX_BLOCKS)
3475*4882a593Smuzhiyun 		map->m_len = DIO_MAX_BLOCKS;
3476*4882a593Smuzhiyun 	dio_credits = ext4_chunk_trans_blocks(inode, map->m_len);
3477*4882a593Smuzhiyun 
3478*4882a593Smuzhiyun retry:
3479*4882a593Smuzhiyun 	/*
3480*4882a593Smuzhiyun 	 * Either we allocate blocks and then don't get an unwritten extent, so
3481*4882a593Smuzhiyun 	 * in that case we have reserved enough credits. Or, the blocks are
3482*4882a593Smuzhiyun 	 * already allocated and unwritten. In that case, the extent conversion
3483*4882a593Smuzhiyun 	 * fits into the credits as well.
3484*4882a593Smuzhiyun 	 */
3485*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits);
3486*4882a593Smuzhiyun 	if (IS_ERR(handle))
3487*4882a593Smuzhiyun 		return PTR_ERR(handle);
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 	/*
3490*4882a593Smuzhiyun 	 * DAX and direct I/O are the only two operations that are currently
3491*4882a593Smuzhiyun 	 * supported with IOMAP_WRITE.
3492*4882a593Smuzhiyun 	 */
3493*4882a593Smuzhiyun 	WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
3494*4882a593Smuzhiyun 	if (IS_DAX(inode))
3495*4882a593Smuzhiyun 		m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
3496*4882a593Smuzhiyun 	/*
3497*4882a593Smuzhiyun 	 * We use i_size instead of i_disksize here because delalloc writeback
3498*4882a593Smuzhiyun 	 * can complete at any point during the I/O and subsequently push the
3499*4882a593Smuzhiyun 	 * i_disksize out to i_size. This could be beyond where direct I/O is
3500*4882a593Smuzhiyun 	 * happening and thus expose allocated blocks to direct I/O reads.
3501*4882a593Smuzhiyun 	 */
3502*4882a593Smuzhiyun 	else if (((loff_t)map->m_lblk << blkbits) >= i_size_read(inode))
3503*4882a593Smuzhiyun 		m_flags = EXT4_GET_BLOCKS_CREATE;
3504*4882a593Smuzhiyun 	else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3505*4882a593Smuzhiyun 		m_flags = EXT4_GET_BLOCKS_IO_CREATE_EXT;
3506*4882a593Smuzhiyun 
3507*4882a593Smuzhiyun 	ret = ext4_map_blocks(handle, inode, map, m_flags);
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun 	/*
3510*4882a593Smuzhiyun 	 * We cannot fill holes in indirect tree based inodes as that could
3511*4882a593Smuzhiyun 	 * expose stale data in the case of a crash. Use the magic error code
3512*4882a593Smuzhiyun 	 * to fallback to buffered I/O.
3513*4882a593Smuzhiyun 	 */
3514*4882a593Smuzhiyun 	if (!m_flags && !ret)
3515*4882a593Smuzhiyun 		ret = -ENOTBLK;
3516*4882a593Smuzhiyun 
3517*4882a593Smuzhiyun 	ext4_journal_stop(handle);
3518*4882a593Smuzhiyun 	if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
3519*4882a593Smuzhiyun 		goto retry;
3520*4882a593Smuzhiyun 
3521*4882a593Smuzhiyun 	return ret;
3522*4882a593Smuzhiyun }
3523*4882a593Smuzhiyun 
3524*4882a593Smuzhiyun 
ext4_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3525*4882a593Smuzhiyun static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
3526*4882a593Smuzhiyun 		unsigned flags, struct iomap *iomap, struct iomap *srcmap)
3527*4882a593Smuzhiyun {
3528*4882a593Smuzhiyun 	int ret;
3529*4882a593Smuzhiyun 	struct ext4_map_blocks map;
3530*4882a593Smuzhiyun 	u8 blkbits = inode->i_blkbits;
3531*4882a593Smuzhiyun 
3532*4882a593Smuzhiyun 	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3533*4882a593Smuzhiyun 		return -EINVAL;
3534*4882a593Smuzhiyun 
3535*4882a593Smuzhiyun 	if (WARN_ON_ONCE(ext4_has_inline_data(inode)))
3536*4882a593Smuzhiyun 		return -ERANGE;
3537*4882a593Smuzhiyun 
3538*4882a593Smuzhiyun 	/*
3539*4882a593Smuzhiyun 	 * Calculate the first and last logical blocks respectively.
3540*4882a593Smuzhiyun 	 */
3541*4882a593Smuzhiyun 	map.m_lblk = offset >> blkbits;
3542*4882a593Smuzhiyun 	map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3543*4882a593Smuzhiyun 			  EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3544*4882a593Smuzhiyun 
3545*4882a593Smuzhiyun 	if (flags & IOMAP_WRITE) {
3546*4882a593Smuzhiyun 		/*
3547*4882a593Smuzhiyun 		 * We check here if the blocks are already allocated, then we
3548*4882a593Smuzhiyun 		 * don't need to start a journal txn and we can directly return
3549*4882a593Smuzhiyun 		 * the mapping information. This could boost performance
3550*4882a593Smuzhiyun 		 * especially in multi-threaded overwrite requests.
3551*4882a593Smuzhiyun 		 */
3552*4882a593Smuzhiyun 		if (offset + length <= i_size_read(inode)) {
3553*4882a593Smuzhiyun 			ret = ext4_map_blocks(NULL, inode, &map, 0);
3554*4882a593Smuzhiyun 			if (ret > 0 && (map.m_flags & EXT4_MAP_MAPPED))
3555*4882a593Smuzhiyun 				goto out;
3556*4882a593Smuzhiyun 		}
3557*4882a593Smuzhiyun 		ret = ext4_iomap_alloc(inode, &map, flags);
3558*4882a593Smuzhiyun 	} else {
3559*4882a593Smuzhiyun 		ret = ext4_map_blocks(NULL, inode, &map, 0);
3560*4882a593Smuzhiyun 	}
3561*4882a593Smuzhiyun 
3562*4882a593Smuzhiyun 	if (ret < 0)
3563*4882a593Smuzhiyun 		return ret;
3564*4882a593Smuzhiyun out:
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun 	/*
3567*4882a593Smuzhiyun 	 * When inline encryption is enabled, sometimes I/O to an encrypted file
3568*4882a593Smuzhiyun 	 * has to be broken up to guarantee DUN contiguity. Handle this by
3569*4882a593Smuzhiyun 	 * limiting the length of the mapping returned.
3570*4882a593Smuzhiyun 	 */
3571*4882a593Smuzhiyun 	map.m_len = fscrypt_limit_io_blocks(inode, map.m_lblk, map.m_len);
3572*4882a593Smuzhiyun 
3573*4882a593Smuzhiyun 	ext4_set_iomap(inode, iomap, &map, offset, length);
3574*4882a593Smuzhiyun 
3575*4882a593Smuzhiyun 	return 0;
3576*4882a593Smuzhiyun }
3577*4882a593Smuzhiyun 
ext4_iomap_overwrite_begin(struct inode * inode,loff_t offset,loff_t length,unsigned flags,struct iomap * iomap,struct iomap * srcmap)3578*4882a593Smuzhiyun static int ext4_iomap_overwrite_begin(struct inode *inode, loff_t offset,
3579*4882a593Smuzhiyun 		loff_t length, unsigned flags, struct iomap *iomap,
3580*4882a593Smuzhiyun 		struct iomap *srcmap)
3581*4882a593Smuzhiyun {
3582*4882a593Smuzhiyun 	int ret;
3583*4882a593Smuzhiyun 
3584*4882a593Smuzhiyun 	/*
3585*4882a593Smuzhiyun 	 * Even for writes we don't need to allocate blocks, so just pretend
3586*4882a593Smuzhiyun 	 * we are reading to save overhead of starting a transaction.
3587*4882a593Smuzhiyun 	 */
3588*4882a593Smuzhiyun 	flags &= ~IOMAP_WRITE;
3589*4882a593Smuzhiyun 	ret = ext4_iomap_begin(inode, offset, length, flags, iomap, srcmap);
3590*4882a593Smuzhiyun 	WARN_ON_ONCE(iomap->type != IOMAP_MAPPED);
3591*4882a593Smuzhiyun 	return ret;
3592*4882a593Smuzhiyun }
3593*4882a593Smuzhiyun 
ext4_iomap_end(struct inode * inode,loff_t offset,loff_t length,ssize_t written,unsigned flags,struct iomap * iomap)3594*4882a593Smuzhiyun static int ext4_iomap_end(struct inode *inode, loff_t offset, loff_t length,
3595*4882a593Smuzhiyun 			  ssize_t written, unsigned flags, struct iomap *iomap)
3596*4882a593Smuzhiyun {
3597*4882a593Smuzhiyun 	/*
3598*4882a593Smuzhiyun 	 * Check to see whether an error occurred while writing out the data to
3599*4882a593Smuzhiyun 	 * the allocated blocks. If so, return the magic error code so that we
3600*4882a593Smuzhiyun 	 * fallback to buffered I/O and attempt to complete the remainder of
3601*4882a593Smuzhiyun 	 * the I/O. Any blocks that may have been allocated in preparation for
3602*4882a593Smuzhiyun 	 * the direct I/O will be reused during buffered I/O.
3603*4882a593Smuzhiyun 	 */
3604*4882a593Smuzhiyun 	if (flags & (IOMAP_WRITE | IOMAP_DIRECT) && written == 0)
3605*4882a593Smuzhiyun 		return -ENOTBLK;
3606*4882a593Smuzhiyun 
3607*4882a593Smuzhiyun 	return 0;
3608*4882a593Smuzhiyun }
3609*4882a593Smuzhiyun 
3610*4882a593Smuzhiyun const struct iomap_ops ext4_iomap_ops = {
3611*4882a593Smuzhiyun 	.iomap_begin		= ext4_iomap_begin,
3612*4882a593Smuzhiyun 	.iomap_end		= ext4_iomap_end,
3613*4882a593Smuzhiyun };
3614*4882a593Smuzhiyun 
3615*4882a593Smuzhiyun const struct iomap_ops ext4_iomap_overwrite_ops = {
3616*4882a593Smuzhiyun 	.iomap_begin		= ext4_iomap_overwrite_begin,
3617*4882a593Smuzhiyun 	.iomap_end		= ext4_iomap_end,
3618*4882a593Smuzhiyun };
3619*4882a593Smuzhiyun 
ext4_iomap_is_delalloc(struct inode * inode,struct ext4_map_blocks * map)3620*4882a593Smuzhiyun static bool ext4_iomap_is_delalloc(struct inode *inode,
3621*4882a593Smuzhiyun 				   struct ext4_map_blocks *map)
3622*4882a593Smuzhiyun {
3623*4882a593Smuzhiyun 	struct extent_status es;
3624*4882a593Smuzhiyun 	ext4_lblk_t offset = 0, end = map->m_lblk + map->m_len - 1;
3625*4882a593Smuzhiyun 
3626*4882a593Smuzhiyun 	ext4_es_find_extent_range(inode, &ext4_es_is_delayed,
3627*4882a593Smuzhiyun 				  map->m_lblk, end, &es);
3628*4882a593Smuzhiyun 
3629*4882a593Smuzhiyun 	if (!es.es_len || es.es_lblk > end)
3630*4882a593Smuzhiyun 		return false;
3631*4882a593Smuzhiyun 
3632*4882a593Smuzhiyun 	if (es.es_lblk > map->m_lblk) {
3633*4882a593Smuzhiyun 		map->m_len = es.es_lblk - map->m_lblk;
3634*4882a593Smuzhiyun 		return false;
3635*4882a593Smuzhiyun 	}
3636*4882a593Smuzhiyun 
3637*4882a593Smuzhiyun 	offset = map->m_lblk - es.es_lblk;
3638*4882a593Smuzhiyun 	map->m_len = es.es_len - offset;
3639*4882a593Smuzhiyun 
3640*4882a593Smuzhiyun 	return true;
3641*4882a593Smuzhiyun }
3642*4882a593Smuzhiyun 
ext4_iomap_begin_report(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)3643*4882a593Smuzhiyun static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
3644*4882a593Smuzhiyun 				   loff_t length, unsigned int flags,
3645*4882a593Smuzhiyun 				   struct iomap *iomap, struct iomap *srcmap)
3646*4882a593Smuzhiyun {
3647*4882a593Smuzhiyun 	int ret;
3648*4882a593Smuzhiyun 	bool delalloc = false;
3649*4882a593Smuzhiyun 	struct ext4_map_blocks map;
3650*4882a593Smuzhiyun 	u8 blkbits = inode->i_blkbits;
3651*4882a593Smuzhiyun 
3652*4882a593Smuzhiyun 	if ((offset >> blkbits) > EXT4_MAX_LOGICAL_BLOCK)
3653*4882a593Smuzhiyun 		return -EINVAL;
3654*4882a593Smuzhiyun 
3655*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode)) {
3656*4882a593Smuzhiyun 		ret = ext4_inline_data_iomap(inode, iomap);
3657*4882a593Smuzhiyun 		if (ret != -EAGAIN) {
3658*4882a593Smuzhiyun 			if (ret == 0 && offset >= iomap->length)
3659*4882a593Smuzhiyun 				ret = -ENOENT;
3660*4882a593Smuzhiyun 			return ret;
3661*4882a593Smuzhiyun 		}
3662*4882a593Smuzhiyun 	}
3663*4882a593Smuzhiyun 
3664*4882a593Smuzhiyun 	/*
3665*4882a593Smuzhiyun 	 * Calculate the first and last logical block respectively.
3666*4882a593Smuzhiyun 	 */
3667*4882a593Smuzhiyun 	map.m_lblk = offset >> blkbits;
3668*4882a593Smuzhiyun 	map.m_len = min_t(loff_t, (offset + length - 1) >> blkbits,
3669*4882a593Smuzhiyun 			  EXT4_MAX_LOGICAL_BLOCK) - map.m_lblk + 1;
3670*4882a593Smuzhiyun 
3671*4882a593Smuzhiyun 	/*
3672*4882a593Smuzhiyun 	 * Fiemap callers may call for offset beyond s_bitmap_maxbytes.
3673*4882a593Smuzhiyun 	 * So handle it here itself instead of querying ext4_map_blocks().
3674*4882a593Smuzhiyun 	 * Since ext4_map_blocks() will warn about it and will return
3675*4882a593Smuzhiyun 	 * -EIO error.
3676*4882a593Smuzhiyun 	 */
3677*4882a593Smuzhiyun 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3678*4882a593Smuzhiyun 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3679*4882a593Smuzhiyun 
3680*4882a593Smuzhiyun 		if (offset >= sbi->s_bitmap_maxbytes) {
3681*4882a593Smuzhiyun 			map.m_flags = 0;
3682*4882a593Smuzhiyun 			goto set_iomap;
3683*4882a593Smuzhiyun 		}
3684*4882a593Smuzhiyun 	}
3685*4882a593Smuzhiyun 
3686*4882a593Smuzhiyun 	ret = ext4_map_blocks(NULL, inode, &map, 0);
3687*4882a593Smuzhiyun 	if (ret < 0)
3688*4882a593Smuzhiyun 		return ret;
3689*4882a593Smuzhiyun 	if (ret == 0)
3690*4882a593Smuzhiyun 		delalloc = ext4_iomap_is_delalloc(inode, &map);
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun set_iomap:
3693*4882a593Smuzhiyun 	ext4_set_iomap(inode, iomap, &map, offset, length);
3694*4882a593Smuzhiyun 	if (delalloc && iomap->type == IOMAP_HOLE)
3695*4882a593Smuzhiyun 		iomap->type = IOMAP_DELALLOC;
3696*4882a593Smuzhiyun 
3697*4882a593Smuzhiyun 	return 0;
3698*4882a593Smuzhiyun }
3699*4882a593Smuzhiyun 
3700*4882a593Smuzhiyun const struct iomap_ops ext4_iomap_report_ops = {
3701*4882a593Smuzhiyun 	.iomap_begin = ext4_iomap_begin_report,
3702*4882a593Smuzhiyun };
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun /*
3705*4882a593Smuzhiyun  * Pages can be marked dirty completely asynchronously from ext4's journalling
3706*4882a593Smuzhiyun  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3707*4882a593Smuzhiyun  * much here because ->set_page_dirty is called under VFS locks.  The page is
3708*4882a593Smuzhiyun  * not necessarily locked.
3709*4882a593Smuzhiyun  *
3710*4882a593Smuzhiyun  * We cannot just dirty the page and leave attached buffers clean, because the
3711*4882a593Smuzhiyun  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3712*4882a593Smuzhiyun  * or jbddirty because all the journalling code will explode.
3713*4882a593Smuzhiyun  *
3714*4882a593Smuzhiyun  * So what we do is to mark the page "pending dirty" and next time writepage
3715*4882a593Smuzhiyun  * is called, propagate that into the buffers appropriately.
3716*4882a593Smuzhiyun  */
ext4_journalled_set_page_dirty(struct page * page)3717*4882a593Smuzhiyun static int ext4_journalled_set_page_dirty(struct page *page)
3718*4882a593Smuzhiyun {
3719*4882a593Smuzhiyun 	SetPageChecked(page);
3720*4882a593Smuzhiyun 	return __set_page_dirty_nobuffers(page);
3721*4882a593Smuzhiyun }
3722*4882a593Smuzhiyun 
ext4_set_page_dirty(struct page * page)3723*4882a593Smuzhiyun static int ext4_set_page_dirty(struct page *page)
3724*4882a593Smuzhiyun {
3725*4882a593Smuzhiyun 	WARN_ON_ONCE(!PageLocked(page) && !PageDirty(page));
3726*4882a593Smuzhiyun 	WARN_ON_ONCE(!page_has_buffers(page));
3727*4882a593Smuzhiyun 	return __set_page_dirty_buffers(page);
3728*4882a593Smuzhiyun }
3729*4882a593Smuzhiyun 
ext4_iomap_swap_activate(struct swap_info_struct * sis,struct file * file,sector_t * span)3730*4882a593Smuzhiyun static int ext4_iomap_swap_activate(struct swap_info_struct *sis,
3731*4882a593Smuzhiyun 				    struct file *file, sector_t *span)
3732*4882a593Smuzhiyun {
3733*4882a593Smuzhiyun 	return iomap_swapfile_activate(sis, file, span,
3734*4882a593Smuzhiyun 				       &ext4_iomap_report_ops);
3735*4882a593Smuzhiyun }
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun static const struct address_space_operations ext4_aops = {
3738*4882a593Smuzhiyun 	.readpage		= ext4_readpage,
3739*4882a593Smuzhiyun 	.readahead		= ext4_readahead,
3740*4882a593Smuzhiyun 	.writepage		= ext4_writepage,
3741*4882a593Smuzhiyun 	.writepages		= ext4_writepages,
3742*4882a593Smuzhiyun 	.write_begin		= ext4_write_begin,
3743*4882a593Smuzhiyun 	.write_end		= ext4_write_end,
3744*4882a593Smuzhiyun 	.set_page_dirty		= ext4_set_page_dirty,
3745*4882a593Smuzhiyun 	.bmap			= ext4_bmap,
3746*4882a593Smuzhiyun 	.invalidatepage		= ext4_invalidatepage,
3747*4882a593Smuzhiyun 	.releasepage		= ext4_releasepage,
3748*4882a593Smuzhiyun 	.direct_IO		= noop_direct_IO,
3749*4882a593Smuzhiyun 	.migratepage		= buffer_migrate_page,
3750*4882a593Smuzhiyun 	.is_partially_uptodate  = block_is_partially_uptodate,
3751*4882a593Smuzhiyun 	.error_remove_page	= generic_error_remove_page,
3752*4882a593Smuzhiyun 	.swap_activate		= ext4_iomap_swap_activate,
3753*4882a593Smuzhiyun };
3754*4882a593Smuzhiyun 
3755*4882a593Smuzhiyun static const struct address_space_operations ext4_journalled_aops = {
3756*4882a593Smuzhiyun 	.readpage		= ext4_readpage,
3757*4882a593Smuzhiyun 	.readahead		= ext4_readahead,
3758*4882a593Smuzhiyun 	.writepage		= ext4_writepage,
3759*4882a593Smuzhiyun 	.writepages		= ext4_writepages,
3760*4882a593Smuzhiyun 	.write_begin		= ext4_write_begin,
3761*4882a593Smuzhiyun 	.write_end		= ext4_journalled_write_end,
3762*4882a593Smuzhiyun 	.set_page_dirty		= ext4_journalled_set_page_dirty,
3763*4882a593Smuzhiyun 	.bmap			= ext4_bmap,
3764*4882a593Smuzhiyun 	.invalidatepage		= ext4_journalled_invalidatepage,
3765*4882a593Smuzhiyun 	.releasepage		= ext4_releasepage,
3766*4882a593Smuzhiyun 	.direct_IO		= noop_direct_IO,
3767*4882a593Smuzhiyun 	.is_partially_uptodate  = block_is_partially_uptodate,
3768*4882a593Smuzhiyun 	.error_remove_page	= generic_error_remove_page,
3769*4882a593Smuzhiyun 	.swap_activate		= ext4_iomap_swap_activate,
3770*4882a593Smuzhiyun };
3771*4882a593Smuzhiyun 
3772*4882a593Smuzhiyun static const struct address_space_operations ext4_da_aops = {
3773*4882a593Smuzhiyun 	.readpage		= ext4_readpage,
3774*4882a593Smuzhiyun 	.readahead		= ext4_readahead,
3775*4882a593Smuzhiyun 	.writepage		= ext4_writepage,
3776*4882a593Smuzhiyun 	.writepages		= ext4_writepages,
3777*4882a593Smuzhiyun 	.write_begin		= ext4_da_write_begin,
3778*4882a593Smuzhiyun 	.write_end		= ext4_da_write_end,
3779*4882a593Smuzhiyun 	.set_page_dirty		= ext4_set_page_dirty,
3780*4882a593Smuzhiyun 	.bmap			= ext4_bmap,
3781*4882a593Smuzhiyun 	.invalidatepage		= ext4_invalidatepage,
3782*4882a593Smuzhiyun 	.releasepage		= ext4_releasepage,
3783*4882a593Smuzhiyun 	.direct_IO		= noop_direct_IO,
3784*4882a593Smuzhiyun 	.migratepage		= buffer_migrate_page,
3785*4882a593Smuzhiyun 	.is_partially_uptodate  = block_is_partially_uptodate,
3786*4882a593Smuzhiyun 	.error_remove_page	= generic_error_remove_page,
3787*4882a593Smuzhiyun 	.swap_activate		= ext4_iomap_swap_activate,
3788*4882a593Smuzhiyun };
3789*4882a593Smuzhiyun 
3790*4882a593Smuzhiyun static const struct address_space_operations ext4_dax_aops = {
3791*4882a593Smuzhiyun 	.writepages		= ext4_dax_writepages,
3792*4882a593Smuzhiyun 	.direct_IO		= noop_direct_IO,
3793*4882a593Smuzhiyun 	.set_page_dirty		= noop_set_page_dirty,
3794*4882a593Smuzhiyun 	.bmap			= ext4_bmap,
3795*4882a593Smuzhiyun 	.invalidatepage		= noop_invalidatepage,
3796*4882a593Smuzhiyun 	.swap_activate		= ext4_iomap_swap_activate,
3797*4882a593Smuzhiyun };
3798*4882a593Smuzhiyun 
ext4_set_aops(struct inode * inode)3799*4882a593Smuzhiyun void ext4_set_aops(struct inode *inode)
3800*4882a593Smuzhiyun {
3801*4882a593Smuzhiyun 	switch (ext4_inode_journal_mode(inode)) {
3802*4882a593Smuzhiyun 	case EXT4_INODE_ORDERED_DATA_MODE:
3803*4882a593Smuzhiyun 	case EXT4_INODE_WRITEBACK_DATA_MODE:
3804*4882a593Smuzhiyun 		break;
3805*4882a593Smuzhiyun 	case EXT4_INODE_JOURNAL_DATA_MODE:
3806*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &ext4_journalled_aops;
3807*4882a593Smuzhiyun 		return;
3808*4882a593Smuzhiyun 	default:
3809*4882a593Smuzhiyun 		BUG();
3810*4882a593Smuzhiyun 	}
3811*4882a593Smuzhiyun 	if (IS_DAX(inode))
3812*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &ext4_dax_aops;
3813*4882a593Smuzhiyun 	else if (test_opt(inode->i_sb, DELALLOC))
3814*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &ext4_da_aops;
3815*4882a593Smuzhiyun 	else
3816*4882a593Smuzhiyun 		inode->i_mapping->a_ops = &ext4_aops;
3817*4882a593Smuzhiyun }
3818*4882a593Smuzhiyun 
__ext4_block_zero_page_range(handle_t * handle,struct address_space * mapping,loff_t from,loff_t length)3819*4882a593Smuzhiyun static int __ext4_block_zero_page_range(handle_t *handle,
3820*4882a593Smuzhiyun 		struct address_space *mapping, loff_t from, loff_t length)
3821*4882a593Smuzhiyun {
3822*4882a593Smuzhiyun 	ext4_fsblk_t index = from >> PAGE_SHIFT;
3823*4882a593Smuzhiyun 	unsigned offset = from & (PAGE_SIZE-1);
3824*4882a593Smuzhiyun 	unsigned blocksize, pos;
3825*4882a593Smuzhiyun 	ext4_lblk_t iblock;
3826*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
3827*4882a593Smuzhiyun 	struct buffer_head *bh;
3828*4882a593Smuzhiyun 	struct page *page;
3829*4882a593Smuzhiyun 	int err = 0;
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun 	page = find_or_create_page(mapping, from >> PAGE_SHIFT,
3832*4882a593Smuzhiyun 				   mapping_gfp_constraint(mapping, ~__GFP_FS));
3833*4882a593Smuzhiyun 	if (!page)
3834*4882a593Smuzhiyun 		return -ENOMEM;
3835*4882a593Smuzhiyun 
3836*4882a593Smuzhiyun 	blocksize = inode->i_sb->s_blocksize;
3837*4882a593Smuzhiyun 
3838*4882a593Smuzhiyun 	iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits);
3839*4882a593Smuzhiyun 
3840*4882a593Smuzhiyun 	if (!page_has_buffers(page))
3841*4882a593Smuzhiyun 		create_empty_buffers(page, blocksize, 0);
3842*4882a593Smuzhiyun 
3843*4882a593Smuzhiyun 	/* Find the buffer that contains "offset" */
3844*4882a593Smuzhiyun 	bh = page_buffers(page);
3845*4882a593Smuzhiyun 	pos = blocksize;
3846*4882a593Smuzhiyun 	while (offset >= pos) {
3847*4882a593Smuzhiyun 		bh = bh->b_this_page;
3848*4882a593Smuzhiyun 		iblock++;
3849*4882a593Smuzhiyun 		pos += blocksize;
3850*4882a593Smuzhiyun 	}
3851*4882a593Smuzhiyun 	if (buffer_freed(bh)) {
3852*4882a593Smuzhiyun 		BUFFER_TRACE(bh, "freed: skip");
3853*4882a593Smuzhiyun 		goto unlock;
3854*4882a593Smuzhiyun 	}
3855*4882a593Smuzhiyun 	if (!buffer_mapped(bh)) {
3856*4882a593Smuzhiyun 		BUFFER_TRACE(bh, "unmapped");
3857*4882a593Smuzhiyun 		ext4_get_block(inode, iblock, bh, 0);
3858*4882a593Smuzhiyun 		/* unmapped? It's a hole - nothing to do */
3859*4882a593Smuzhiyun 		if (!buffer_mapped(bh)) {
3860*4882a593Smuzhiyun 			BUFFER_TRACE(bh, "still unmapped");
3861*4882a593Smuzhiyun 			goto unlock;
3862*4882a593Smuzhiyun 		}
3863*4882a593Smuzhiyun 	}
3864*4882a593Smuzhiyun 
3865*4882a593Smuzhiyun 	/* Ok, it's mapped. Make sure it's up-to-date */
3866*4882a593Smuzhiyun 	if (PageUptodate(page))
3867*4882a593Smuzhiyun 		set_buffer_uptodate(bh);
3868*4882a593Smuzhiyun 
3869*4882a593Smuzhiyun 	if (!buffer_uptodate(bh)) {
3870*4882a593Smuzhiyun 		err = ext4_read_bh_lock(bh, 0, true);
3871*4882a593Smuzhiyun 		if (err)
3872*4882a593Smuzhiyun 			goto unlock;
3873*4882a593Smuzhiyun 		if (fscrypt_inode_uses_fs_layer_crypto(inode)) {
3874*4882a593Smuzhiyun 			/* We expect the key to be set. */
3875*4882a593Smuzhiyun 			BUG_ON(!fscrypt_has_encryption_key(inode));
3876*4882a593Smuzhiyun 			err = fscrypt_decrypt_pagecache_blocks(page, blocksize,
3877*4882a593Smuzhiyun 							       bh_offset(bh));
3878*4882a593Smuzhiyun 			if (err) {
3879*4882a593Smuzhiyun 				clear_buffer_uptodate(bh);
3880*4882a593Smuzhiyun 				goto unlock;
3881*4882a593Smuzhiyun 			}
3882*4882a593Smuzhiyun 		}
3883*4882a593Smuzhiyun 	}
3884*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode)) {
3885*4882a593Smuzhiyun 		BUFFER_TRACE(bh, "get write access");
3886*4882a593Smuzhiyun 		err = ext4_journal_get_write_access(handle, bh);
3887*4882a593Smuzhiyun 		if (err)
3888*4882a593Smuzhiyun 			goto unlock;
3889*4882a593Smuzhiyun 	}
3890*4882a593Smuzhiyun 	zero_user(page, offset, length);
3891*4882a593Smuzhiyun 	BUFFER_TRACE(bh, "zeroed end of block");
3892*4882a593Smuzhiyun 
3893*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode)) {
3894*4882a593Smuzhiyun 		err = ext4_handle_dirty_metadata(handle, inode, bh);
3895*4882a593Smuzhiyun 	} else {
3896*4882a593Smuzhiyun 		err = 0;
3897*4882a593Smuzhiyun 		mark_buffer_dirty(bh);
3898*4882a593Smuzhiyun 		if (ext4_should_order_data(inode))
3899*4882a593Smuzhiyun 			err = ext4_jbd2_inode_add_write(handle, inode, from,
3900*4882a593Smuzhiyun 					length);
3901*4882a593Smuzhiyun 	}
3902*4882a593Smuzhiyun 
3903*4882a593Smuzhiyun unlock:
3904*4882a593Smuzhiyun 	unlock_page(page);
3905*4882a593Smuzhiyun 	put_page(page);
3906*4882a593Smuzhiyun 	return err;
3907*4882a593Smuzhiyun }
3908*4882a593Smuzhiyun 
3909*4882a593Smuzhiyun /*
3910*4882a593Smuzhiyun  * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3911*4882a593Smuzhiyun  * starting from file offset 'from'.  The range to be zero'd must
3912*4882a593Smuzhiyun  * be contained with in one block.  If the specified range exceeds
3913*4882a593Smuzhiyun  * the end of the block it will be shortened to end of the block
3914*4882a593Smuzhiyun  * that cooresponds to 'from'
3915*4882a593Smuzhiyun  */
ext4_block_zero_page_range(handle_t * handle,struct address_space * mapping,loff_t from,loff_t length)3916*4882a593Smuzhiyun static int ext4_block_zero_page_range(handle_t *handle,
3917*4882a593Smuzhiyun 		struct address_space *mapping, loff_t from, loff_t length)
3918*4882a593Smuzhiyun {
3919*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
3920*4882a593Smuzhiyun 	unsigned offset = from & (PAGE_SIZE-1);
3921*4882a593Smuzhiyun 	unsigned blocksize = inode->i_sb->s_blocksize;
3922*4882a593Smuzhiyun 	unsigned max = blocksize - (offset & (blocksize - 1));
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun 	/*
3925*4882a593Smuzhiyun 	 * correct length if it does not fall between
3926*4882a593Smuzhiyun 	 * 'from' and the end of the block
3927*4882a593Smuzhiyun 	 */
3928*4882a593Smuzhiyun 	if (length > max || length < 0)
3929*4882a593Smuzhiyun 		length = max;
3930*4882a593Smuzhiyun 
3931*4882a593Smuzhiyun 	if (IS_DAX(inode)) {
3932*4882a593Smuzhiyun 		return iomap_zero_range(inode, from, length, NULL,
3933*4882a593Smuzhiyun 					&ext4_iomap_ops);
3934*4882a593Smuzhiyun 	}
3935*4882a593Smuzhiyun 	return __ext4_block_zero_page_range(handle, mapping, from, length);
3936*4882a593Smuzhiyun }
3937*4882a593Smuzhiyun 
3938*4882a593Smuzhiyun /*
3939*4882a593Smuzhiyun  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3940*4882a593Smuzhiyun  * up to the end of the block which corresponds to `from'.
3941*4882a593Smuzhiyun  * This required during truncate. We need to physically zero the tail end
3942*4882a593Smuzhiyun  * of that block so it doesn't yield old data if the file is later grown.
3943*4882a593Smuzhiyun  */
ext4_block_truncate_page(handle_t * handle,struct address_space * mapping,loff_t from)3944*4882a593Smuzhiyun static int ext4_block_truncate_page(handle_t *handle,
3945*4882a593Smuzhiyun 		struct address_space *mapping, loff_t from)
3946*4882a593Smuzhiyun {
3947*4882a593Smuzhiyun 	unsigned offset = from & (PAGE_SIZE-1);
3948*4882a593Smuzhiyun 	unsigned length;
3949*4882a593Smuzhiyun 	unsigned blocksize;
3950*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
3951*4882a593Smuzhiyun 
3952*4882a593Smuzhiyun 	/* If we are processing an encrypted inode during orphan list handling */
3953*4882a593Smuzhiyun 	if (IS_ENCRYPTED(inode) && !fscrypt_has_encryption_key(inode))
3954*4882a593Smuzhiyun 		return 0;
3955*4882a593Smuzhiyun 
3956*4882a593Smuzhiyun 	blocksize = inode->i_sb->s_blocksize;
3957*4882a593Smuzhiyun 	length = blocksize - (offset & (blocksize - 1));
3958*4882a593Smuzhiyun 
3959*4882a593Smuzhiyun 	return ext4_block_zero_page_range(handle, mapping, from, length);
3960*4882a593Smuzhiyun }
3961*4882a593Smuzhiyun 
ext4_zero_partial_blocks(handle_t * handle,struct inode * inode,loff_t lstart,loff_t length)3962*4882a593Smuzhiyun int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3963*4882a593Smuzhiyun 			     loff_t lstart, loff_t length)
3964*4882a593Smuzhiyun {
3965*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
3966*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
3967*4882a593Smuzhiyun 	unsigned partial_start, partial_end;
3968*4882a593Smuzhiyun 	ext4_fsblk_t start, end;
3969*4882a593Smuzhiyun 	loff_t byte_end = (lstart + length - 1);
3970*4882a593Smuzhiyun 	int err = 0;
3971*4882a593Smuzhiyun 
3972*4882a593Smuzhiyun 	partial_start = lstart & (sb->s_blocksize - 1);
3973*4882a593Smuzhiyun 	partial_end = byte_end & (sb->s_blocksize - 1);
3974*4882a593Smuzhiyun 
3975*4882a593Smuzhiyun 	start = lstart >> sb->s_blocksize_bits;
3976*4882a593Smuzhiyun 	end = byte_end >> sb->s_blocksize_bits;
3977*4882a593Smuzhiyun 
3978*4882a593Smuzhiyun 	/* Handle partial zero within the single block */
3979*4882a593Smuzhiyun 	if (start == end &&
3980*4882a593Smuzhiyun 	    (partial_start || (partial_end != sb->s_blocksize - 1))) {
3981*4882a593Smuzhiyun 		err = ext4_block_zero_page_range(handle, mapping,
3982*4882a593Smuzhiyun 						 lstart, length);
3983*4882a593Smuzhiyun 		return err;
3984*4882a593Smuzhiyun 	}
3985*4882a593Smuzhiyun 	/* Handle partial zero out on the start of the range */
3986*4882a593Smuzhiyun 	if (partial_start) {
3987*4882a593Smuzhiyun 		err = ext4_block_zero_page_range(handle, mapping,
3988*4882a593Smuzhiyun 						 lstart, sb->s_blocksize);
3989*4882a593Smuzhiyun 		if (err)
3990*4882a593Smuzhiyun 			return err;
3991*4882a593Smuzhiyun 	}
3992*4882a593Smuzhiyun 	/* Handle partial zero out on the end of the range */
3993*4882a593Smuzhiyun 	if (partial_end != sb->s_blocksize - 1)
3994*4882a593Smuzhiyun 		err = ext4_block_zero_page_range(handle, mapping,
3995*4882a593Smuzhiyun 						 byte_end - partial_end,
3996*4882a593Smuzhiyun 						 partial_end + 1);
3997*4882a593Smuzhiyun 	return err;
3998*4882a593Smuzhiyun }
3999*4882a593Smuzhiyun 
ext4_can_truncate(struct inode * inode)4000*4882a593Smuzhiyun int ext4_can_truncate(struct inode *inode)
4001*4882a593Smuzhiyun {
4002*4882a593Smuzhiyun 	if (S_ISREG(inode->i_mode))
4003*4882a593Smuzhiyun 		return 1;
4004*4882a593Smuzhiyun 	if (S_ISDIR(inode->i_mode))
4005*4882a593Smuzhiyun 		return 1;
4006*4882a593Smuzhiyun 	if (S_ISLNK(inode->i_mode))
4007*4882a593Smuzhiyun 		return !ext4_inode_is_fast_symlink(inode);
4008*4882a593Smuzhiyun 	return 0;
4009*4882a593Smuzhiyun }
4010*4882a593Smuzhiyun 
4011*4882a593Smuzhiyun /*
4012*4882a593Smuzhiyun  * We have to make sure i_disksize gets properly updated before we truncate
4013*4882a593Smuzhiyun  * page cache due to hole punching or zero range. Otherwise i_disksize update
4014*4882a593Smuzhiyun  * can get lost as it may have been postponed to submission of writeback but
4015*4882a593Smuzhiyun  * that will never happen after we truncate page cache.
4016*4882a593Smuzhiyun  */
ext4_update_disksize_before_punch(struct inode * inode,loff_t offset,loff_t len)4017*4882a593Smuzhiyun int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4018*4882a593Smuzhiyun 				      loff_t len)
4019*4882a593Smuzhiyun {
4020*4882a593Smuzhiyun 	handle_t *handle;
4021*4882a593Smuzhiyun 	int ret;
4022*4882a593Smuzhiyun 
4023*4882a593Smuzhiyun 	loff_t size = i_size_read(inode);
4024*4882a593Smuzhiyun 
4025*4882a593Smuzhiyun 	WARN_ON(!inode_is_locked(inode));
4026*4882a593Smuzhiyun 	if (offset > size || offset + len < size)
4027*4882a593Smuzhiyun 		return 0;
4028*4882a593Smuzhiyun 
4029*4882a593Smuzhiyun 	if (EXT4_I(inode)->i_disksize >= size)
4030*4882a593Smuzhiyun 		return 0;
4031*4882a593Smuzhiyun 
4032*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4033*4882a593Smuzhiyun 	if (IS_ERR(handle))
4034*4882a593Smuzhiyun 		return PTR_ERR(handle);
4035*4882a593Smuzhiyun 	ext4_update_i_disksize(inode, size);
4036*4882a593Smuzhiyun 	ret = ext4_mark_inode_dirty(handle, inode);
4037*4882a593Smuzhiyun 	ext4_journal_stop(handle);
4038*4882a593Smuzhiyun 
4039*4882a593Smuzhiyun 	return ret;
4040*4882a593Smuzhiyun }
4041*4882a593Smuzhiyun 
ext4_wait_dax_page(struct ext4_inode_info * ei)4042*4882a593Smuzhiyun static void ext4_wait_dax_page(struct ext4_inode_info *ei)
4043*4882a593Smuzhiyun {
4044*4882a593Smuzhiyun 	up_write(&ei->i_mmap_sem);
4045*4882a593Smuzhiyun 	schedule();
4046*4882a593Smuzhiyun 	down_write(&ei->i_mmap_sem);
4047*4882a593Smuzhiyun }
4048*4882a593Smuzhiyun 
ext4_break_layouts(struct inode * inode)4049*4882a593Smuzhiyun int ext4_break_layouts(struct inode *inode)
4050*4882a593Smuzhiyun {
4051*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
4052*4882a593Smuzhiyun 	struct page *page;
4053*4882a593Smuzhiyun 	int error;
4054*4882a593Smuzhiyun 
4055*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!rwsem_is_locked(&ei->i_mmap_sem)))
4056*4882a593Smuzhiyun 		return -EINVAL;
4057*4882a593Smuzhiyun 
4058*4882a593Smuzhiyun 	do {
4059*4882a593Smuzhiyun 		page = dax_layout_busy_page(inode->i_mapping);
4060*4882a593Smuzhiyun 		if (!page)
4061*4882a593Smuzhiyun 			return 0;
4062*4882a593Smuzhiyun 
4063*4882a593Smuzhiyun 		error = ___wait_var_event(&page->_refcount,
4064*4882a593Smuzhiyun 				atomic_read(&page->_refcount) == 1,
4065*4882a593Smuzhiyun 				TASK_INTERRUPTIBLE, 0, 0,
4066*4882a593Smuzhiyun 				ext4_wait_dax_page(ei));
4067*4882a593Smuzhiyun 	} while (error == 0);
4068*4882a593Smuzhiyun 
4069*4882a593Smuzhiyun 	return error;
4070*4882a593Smuzhiyun }
4071*4882a593Smuzhiyun 
4072*4882a593Smuzhiyun /*
4073*4882a593Smuzhiyun  * ext4_punch_hole: punches a hole in a file by releasing the blocks
4074*4882a593Smuzhiyun  * associated with the given offset and length
4075*4882a593Smuzhiyun  *
4076*4882a593Smuzhiyun  * @inode:  File inode
4077*4882a593Smuzhiyun  * @offset: The offset where the hole will begin
4078*4882a593Smuzhiyun  * @len:    The length of the hole
4079*4882a593Smuzhiyun  *
4080*4882a593Smuzhiyun  * Returns: 0 on success or negative on failure
4081*4882a593Smuzhiyun  */
4082*4882a593Smuzhiyun 
ext4_punch_hole(struct file * file,loff_t offset,loff_t length)4083*4882a593Smuzhiyun int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
4084*4882a593Smuzhiyun {
4085*4882a593Smuzhiyun 	struct inode *inode = file_inode(file);
4086*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
4087*4882a593Smuzhiyun 	ext4_lblk_t first_block, stop_block;
4088*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
4089*4882a593Smuzhiyun 	loff_t first_block_offset, last_block_offset, max_length;
4090*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4091*4882a593Smuzhiyun 	handle_t *handle;
4092*4882a593Smuzhiyun 	unsigned int credits;
4093*4882a593Smuzhiyun 	int ret = 0, ret2 = 0;
4094*4882a593Smuzhiyun 
4095*4882a593Smuzhiyun 	trace_ext4_punch_hole(inode, offset, length, 0);
4096*4882a593Smuzhiyun 
4097*4882a593Smuzhiyun 	/*
4098*4882a593Smuzhiyun 	 * Write out all dirty pages to avoid race conditions
4099*4882a593Smuzhiyun 	 * Then release them.
4100*4882a593Smuzhiyun 	 */
4101*4882a593Smuzhiyun 	if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4102*4882a593Smuzhiyun 		ret = filemap_write_and_wait_range(mapping, offset,
4103*4882a593Smuzhiyun 						   offset + length - 1);
4104*4882a593Smuzhiyun 		if (ret)
4105*4882a593Smuzhiyun 			return ret;
4106*4882a593Smuzhiyun 	}
4107*4882a593Smuzhiyun 
4108*4882a593Smuzhiyun 	inode_lock(inode);
4109*4882a593Smuzhiyun 
4110*4882a593Smuzhiyun 	/* No need to punch hole beyond i_size */
4111*4882a593Smuzhiyun 	if (offset >= inode->i_size)
4112*4882a593Smuzhiyun 		goto out_mutex;
4113*4882a593Smuzhiyun 
4114*4882a593Smuzhiyun 	/*
4115*4882a593Smuzhiyun 	 * If the hole extends beyond i_size, set the hole
4116*4882a593Smuzhiyun 	 * to end after the page that contains i_size
4117*4882a593Smuzhiyun 	 */
4118*4882a593Smuzhiyun 	if (offset + length > inode->i_size) {
4119*4882a593Smuzhiyun 		length = inode->i_size +
4120*4882a593Smuzhiyun 		   PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) -
4121*4882a593Smuzhiyun 		   offset;
4122*4882a593Smuzhiyun 	}
4123*4882a593Smuzhiyun 
4124*4882a593Smuzhiyun 	/*
4125*4882a593Smuzhiyun 	 * For punch hole the length + offset needs to be within one block
4126*4882a593Smuzhiyun 	 * before last range. Adjust the length if it goes beyond that limit.
4127*4882a593Smuzhiyun 	 */
4128*4882a593Smuzhiyun 	max_length = sbi->s_bitmap_maxbytes - inode->i_sb->s_blocksize;
4129*4882a593Smuzhiyun 	if (offset + length > max_length)
4130*4882a593Smuzhiyun 		length = max_length - offset;
4131*4882a593Smuzhiyun 
4132*4882a593Smuzhiyun 	if (offset & (sb->s_blocksize - 1) ||
4133*4882a593Smuzhiyun 	    (offset + length) & (sb->s_blocksize - 1)) {
4134*4882a593Smuzhiyun 		/*
4135*4882a593Smuzhiyun 		 * Attach jinode to inode for jbd2 if we do any zeroing of
4136*4882a593Smuzhiyun 		 * partial block
4137*4882a593Smuzhiyun 		 */
4138*4882a593Smuzhiyun 		ret = ext4_inode_attach_jinode(inode);
4139*4882a593Smuzhiyun 		if (ret < 0)
4140*4882a593Smuzhiyun 			goto out_mutex;
4141*4882a593Smuzhiyun 
4142*4882a593Smuzhiyun 	}
4143*4882a593Smuzhiyun 
4144*4882a593Smuzhiyun 	/* Wait all existing dio workers, newcomers will block on i_mutex */
4145*4882a593Smuzhiyun 	inode_dio_wait(inode);
4146*4882a593Smuzhiyun 
4147*4882a593Smuzhiyun 	ret = file_modified(file);
4148*4882a593Smuzhiyun 	if (ret)
4149*4882a593Smuzhiyun 		goto out_mutex;
4150*4882a593Smuzhiyun 
4151*4882a593Smuzhiyun 	/*
4152*4882a593Smuzhiyun 	 * Prevent page faults from reinstantiating pages we have released from
4153*4882a593Smuzhiyun 	 * page cache.
4154*4882a593Smuzhiyun 	 */
4155*4882a593Smuzhiyun 	down_write(&EXT4_I(inode)->i_mmap_sem);
4156*4882a593Smuzhiyun 
4157*4882a593Smuzhiyun 	ret = ext4_break_layouts(inode);
4158*4882a593Smuzhiyun 	if (ret)
4159*4882a593Smuzhiyun 		goto out_dio;
4160*4882a593Smuzhiyun 
4161*4882a593Smuzhiyun 	first_block_offset = round_up(offset, sb->s_blocksize);
4162*4882a593Smuzhiyun 	last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4163*4882a593Smuzhiyun 
4164*4882a593Smuzhiyun 	/* Now release the pages and zero block aligned part of pages*/
4165*4882a593Smuzhiyun 	if (last_block_offset > first_block_offset) {
4166*4882a593Smuzhiyun 		ret = ext4_update_disksize_before_punch(inode, offset, length);
4167*4882a593Smuzhiyun 		if (ret)
4168*4882a593Smuzhiyun 			goto out_dio;
4169*4882a593Smuzhiyun 		truncate_pagecache_range(inode, first_block_offset,
4170*4882a593Smuzhiyun 					 last_block_offset);
4171*4882a593Smuzhiyun 	}
4172*4882a593Smuzhiyun 
4173*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4174*4882a593Smuzhiyun 		credits = ext4_writepage_trans_blocks(inode);
4175*4882a593Smuzhiyun 	else
4176*4882a593Smuzhiyun 		credits = ext4_blocks_for_truncate(inode);
4177*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4178*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
4179*4882a593Smuzhiyun 		ret = PTR_ERR(handle);
4180*4882a593Smuzhiyun 		ext4_std_error(sb, ret);
4181*4882a593Smuzhiyun 		goto out_dio;
4182*4882a593Smuzhiyun 	}
4183*4882a593Smuzhiyun 
4184*4882a593Smuzhiyun 	ret = ext4_zero_partial_blocks(handle, inode, offset,
4185*4882a593Smuzhiyun 				       length);
4186*4882a593Smuzhiyun 	if (ret)
4187*4882a593Smuzhiyun 		goto out_stop;
4188*4882a593Smuzhiyun 
4189*4882a593Smuzhiyun 	first_block = (offset + sb->s_blocksize - 1) >>
4190*4882a593Smuzhiyun 		EXT4_BLOCK_SIZE_BITS(sb);
4191*4882a593Smuzhiyun 	stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
4192*4882a593Smuzhiyun 
4193*4882a593Smuzhiyun 	/* If there are blocks to remove, do it */
4194*4882a593Smuzhiyun 	if (stop_block > first_block) {
4195*4882a593Smuzhiyun 
4196*4882a593Smuzhiyun 		down_write(&EXT4_I(inode)->i_data_sem);
4197*4882a593Smuzhiyun 		ext4_discard_preallocations(inode, 0);
4198*4882a593Smuzhiyun 
4199*4882a593Smuzhiyun 		ret = ext4_es_remove_extent(inode, first_block,
4200*4882a593Smuzhiyun 					    stop_block - first_block);
4201*4882a593Smuzhiyun 		if (ret) {
4202*4882a593Smuzhiyun 			up_write(&EXT4_I(inode)->i_data_sem);
4203*4882a593Smuzhiyun 			goto out_stop;
4204*4882a593Smuzhiyun 		}
4205*4882a593Smuzhiyun 
4206*4882a593Smuzhiyun 		if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4207*4882a593Smuzhiyun 			ret = ext4_ext_remove_space(inode, first_block,
4208*4882a593Smuzhiyun 						    stop_block - 1);
4209*4882a593Smuzhiyun 		else
4210*4882a593Smuzhiyun 			ret = ext4_ind_remove_space(handle, inode, first_block,
4211*4882a593Smuzhiyun 						    stop_block);
4212*4882a593Smuzhiyun 
4213*4882a593Smuzhiyun 		up_write(&EXT4_I(inode)->i_data_sem);
4214*4882a593Smuzhiyun 	}
4215*4882a593Smuzhiyun 	ext4_fc_track_range(handle, inode, first_block, stop_block);
4216*4882a593Smuzhiyun 	if (IS_SYNC(inode))
4217*4882a593Smuzhiyun 		ext4_handle_sync(handle);
4218*4882a593Smuzhiyun 
4219*4882a593Smuzhiyun 	inode->i_mtime = inode->i_ctime = current_time(inode);
4220*4882a593Smuzhiyun 	ret2 = ext4_mark_inode_dirty(handle, inode);
4221*4882a593Smuzhiyun 	if (unlikely(ret2))
4222*4882a593Smuzhiyun 		ret = ret2;
4223*4882a593Smuzhiyun 	if (ret >= 0)
4224*4882a593Smuzhiyun 		ext4_update_inode_fsync_trans(handle, inode, 1);
4225*4882a593Smuzhiyun out_stop:
4226*4882a593Smuzhiyun 	ext4_journal_stop(handle);
4227*4882a593Smuzhiyun out_dio:
4228*4882a593Smuzhiyun 	up_write(&EXT4_I(inode)->i_mmap_sem);
4229*4882a593Smuzhiyun out_mutex:
4230*4882a593Smuzhiyun 	inode_unlock(inode);
4231*4882a593Smuzhiyun 	return ret;
4232*4882a593Smuzhiyun }
4233*4882a593Smuzhiyun 
ext4_inode_attach_jinode(struct inode * inode)4234*4882a593Smuzhiyun int ext4_inode_attach_jinode(struct inode *inode)
4235*4882a593Smuzhiyun {
4236*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
4237*4882a593Smuzhiyun 	struct jbd2_inode *jinode;
4238*4882a593Smuzhiyun 
4239*4882a593Smuzhiyun 	if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
4240*4882a593Smuzhiyun 		return 0;
4241*4882a593Smuzhiyun 
4242*4882a593Smuzhiyun 	jinode = jbd2_alloc_inode(GFP_KERNEL);
4243*4882a593Smuzhiyun 	spin_lock(&inode->i_lock);
4244*4882a593Smuzhiyun 	if (!ei->jinode) {
4245*4882a593Smuzhiyun 		if (!jinode) {
4246*4882a593Smuzhiyun 			spin_unlock(&inode->i_lock);
4247*4882a593Smuzhiyun 			return -ENOMEM;
4248*4882a593Smuzhiyun 		}
4249*4882a593Smuzhiyun 		ei->jinode = jinode;
4250*4882a593Smuzhiyun 		jbd2_journal_init_jbd_inode(ei->jinode, inode);
4251*4882a593Smuzhiyun 		jinode = NULL;
4252*4882a593Smuzhiyun 	}
4253*4882a593Smuzhiyun 	spin_unlock(&inode->i_lock);
4254*4882a593Smuzhiyun 	if (unlikely(jinode != NULL))
4255*4882a593Smuzhiyun 		jbd2_free_inode(jinode);
4256*4882a593Smuzhiyun 	return 0;
4257*4882a593Smuzhiyun }
4258*4882a593Smuzhiyun 
4259*4882a593Smuzhiyun /*
4260*4882a593Smuzhiyun  * ext4_truncate()
4261*4882a593Smuzhiyun  *
4262*4882a593Smuzhiyun  * We block out ext4_get_block() block instantiations across the entire
4263*4882a593Smuzhiyun  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
4264*4882a593Smuzhiyun  * simultaneously on behalf of the same inode.
4265*4882a593Smuzhiyun  *
4266*4882a593Smuzhiyun  * As we work through the truncate and commit bits of it to the journal there
4267*4882a593Smuzhiyun  * is one core, guiding principle: the file's tree must always be consistent on
4268*4882a593Smuzhiyun  * disk.  We must be able to restart the truncate after a crash.
4269*4882a593Smuzhiyun  *
4270*4882a593Smuzhiyun  * The file's tree may be transiently inconsistent in memory (although it
4271*4882a593Smuzhiyun  * probably isn't), but whenever we close off and commit a journal transaction,
4272*4882a593Smuzhiyun  * the contents of (the filesystem + the journal) must be consistent and
4273*4882a593Smuzhiyun  * restartable.  It's pretty simple, really: bottom up, right to left (although
4274*4882a593Smuzhiyun  * left-to-right works OK too).
4275*4882a593Smuzhiyun  *
4276*4882a593Smuzhiyun  * Note that at recovery time, journal replay occurs *before* the restart of
4277*4882a593Smuzhiyun  * truncate against the orphan inode list.
4278*4882a593Smuzhiyun  *
4279*4882a593Smuzhiyun  * The committed inode has the new, desired i_size (which is the same as
4280*4882a593Smuzhiyun  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
4281*4882a593Smuzhiyun  * that this inode's truncate did not complete and it will again call
4282*4882a593Smuzhiyun  * ext4_truncate() to have another go.  So there will be instantiated blocks
4283*4882a593Smuzhiyun  * to the right of the truncation point in a crashed ext4 filesystem.  But
4284*4882a593Smuzhiyun  * that's fine - as long as they are linked from the inode, the post-crash
4285*4882a593Smuzhiyun  * ext4_truncate() run will find them and release them.
4286*4882a593Smuzhiyun  */
ext4_truncate(struct inode * inode)4287*4882a593Smuzhiyun int ext4_truncate(struct inode *inode)
4288*4882a593Smuzhiyun {
4289*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
4290*4882a593Smuzhiyun 	unsigned int credits;
4291*4882a593Smuzhiyun 	int err = 0, err2;
4292*4882a593Smuzhiyun 	handle_t *handle;
4293*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 	/*
4296*4882a593Smuzhiyun 	 * There is a possibility that we're either freeing the inode
4297*4882a593Smuzhiyun 	 * or it's a completely new inode. In those cases we might not
4298*4882a593Smuzhiyun 	 * have i_mutex locked because it's not necessary.
4299*4882a593Smuzhiyun 	 */
4300*4882a593Smuzhiyun 	if (!(inode->i_state & (I_NEW|I_FREEING)))
4301*4882a593Smuzhiyun 		WARN_ON(!inode_is_locked(inode));
4302*4882a593Smuzhiyun 	trace_ext4_truncate_enter(inode);
4303*4882a593Smuzhiyun 
4304*4882a593Smuzhiyun 	if (!ext4_can_truncate(inode))
4305*4882a593Smuzhiyun 		goto out_trace;
4306*4882a593Smuzhiyun 
4307*4882a593Smuzhiyun 	if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
4308*4882a593Smuzhiyun 		ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
4309*4882a593Smuzhiyun 
4310*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode)) {
4311*4882a593Smuzhiyun 		int has_inline = 1;
4312*4882a593Smuzhiyun 
4313*4882a593Smuzhiyun 		err = ext4_inline_data_truncate(inode, &has_inline);
4314*4882a593Smuzhiyun 		if (err || has_inline)
4315*4882a593Smuzhiyun 			goto out_trace;
4316*4882a593Smuzhiyun 	}
4317*4882a593Smuzhiyun 
4318*4882a593Smuzhiyun 	/* If we zero-out tail of the page, we have to create jinode for jbd2 */
4319*4882a593Smuzhiyun 	if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
4320*4882a593Smuzhiyun 		if (ext4_inode_attach_jinode(inode) < 0)
4321*4882a593Smuzhiyun 			goto out_trace;
4322*4882a593Smuzhiyun 	}
4323*4882a593Smuzhiyun 
4324*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4325*4882a593Smuzhiyun 		credits = ext4_writepage_trans_blocks(inode);
4326*4882a593Smuzhiyun 	else
4327*4882a593Smuzhiyun 		credits = ext4_blocks_for_truncate(inode);
4328*4882a593Smuzhiyun 
4329*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4330*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
4331*4882a593Smuzhiyun 		err = PTR_ERR(handle);
4332*4882a593Smuzhiyun 		goto out_trace;
4333*4882a593Smuzhiyun 	}
4334*4882a593Smuzhiyun 
4335*4882a593Smuzhiyun 	if (inode->i_size & (inode->i_sb->s_blocksize - 1))
4336*4882a593Smuzhiyun 		ext4_block_truncate_page(handle, mapping, inode->i_size);
4337*4882a593Smuzhiyun 
4338*4882a593Smuzhiyun 	/*
4339*4882a593Smuzhiyun 	 * We add the inode to the orphan list, so that if this
4340*4882a593Smuzhiyun 	 * truncate spans multiple transactions, and we crash, we will
4341*4882a593Smuzhiyun 	 * resume the truncate when the filesystem recovers.  It also
4342*4882a593Smuzhiyun 	 * marks the inode dirty, to catch the new size.
4343*4882a593Smuzhiyun 	 *
4344*4882a593Smuzhiyun 	 * Implication: the file must always be in a sane, consistent
4345*4882a593Smuzhiyun 	 * truncatable state while each transaction commits.
4346*4882a593Smuzhiyun 	 */
4347*4882a593Smuzhiyun 	err = ext4_orphan_add(handle, inode);
4348*4882a593Smuzhiyun 	if (err)
4349*4882a593Smuzhiyun 		goto out_stop;
4350*4882a593Smuzhiyun 
4351*4882a593Smuzhiyun 	down_write(&EXT4_I(inode)->i_data_sem);
4352*4882a593Smuzhiyun 
4353*4882a593Smuzhiyun 	ext4_discard_preallocations(inode, 0);
4354*4882a593Smuzhiyun 
4355*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4356*4882a593Smuzhiyun 		err = ext4_ext_truncate(handle, inode);
4357*4882a593Smuzhiyun 	else
4358*4882a593Smuzhiyun 		ext4_ind_truncate(handle, inode);
4359*4882a593Smuzhiyun 
4360*4882a593Smuzhiyun 	up_write(&ei->i_data_sem);
4361*4882a593Smuzhiyun 	if (err)
4362*4882a593Smuzhiyun 		goto out_stop;
4363*4882a593Smuzhiyun 
4364*4882a593Smuzhiyun 	if (IS_SYNC(inode))
4365*4882a593Smuzhiyun 		ext4_handle_sync(handle);
4366*4882a593Smuzhiyun 
4367*4882a593Smuzhiyun out_stop:
4368*4882a593Smuzhiyun 	/*
4369*4882a593Smuzhiyun 	 * If this was a simple ftruncate() and the file will remain alive,
4370*4882a593Smuzhiyun 	 * then we need to clear up the orphan record which we created above.
4371*4882a593Smuzhiyun 	 * However, if this was a real unlink then we were called by
4372*4882a593Smuzhiyun 	 * ext4_evict_inode(), and we allow that function to clean up the
4373*4882a593Smuzhiyun 	 * orphan info for us.
4374*4882a593Smuzhiyun 	 */
4375*4882a593Smuzhiyun 	if (inode->i_nlink)
4376*4882a593Smuzhiyun 		ext4_orphan_del(handle, inode);
4377*4882a593Smuzhiyun 
4378*4882a593Smuzhiyun 	inode->i_mtime = inode->i_ctime = current_time(inode);
4379*4882a593Smuzhiyun 	err2 = ext4_mark_inode_dirty(handle, inode);
4380*4882a593Smuzhiyun 	if (unlikely(err2 && !err))
4381*4882a593Smuzhiyun 		err = err2;
4382*4882a593Smuzhiyun 	ext4_journal_stop(handle);
4383*4882a593Smuzhiyun 
4384*4882a593Smuzhiyun out_trace:
4385*4882a593Smuzhiyun 	trace_ext4_truncate_exit(inode);
4386*4882a593Smuzhiyun 	return err;
4387*4882a593Smuzhiyun }
4388*4882a593Smuzhiyun 
4389*4882a593Smuzhiyun /*
4390*4882a593Smuzhiyun  * ext4_get_inode_loc returns with an extra refcount against the inode's
4391*4882a593Smuzhiyun  * underlying buffer_head on success. If 'in_mem' is true, we have all
4392*4882a593Smuzhiyun  * data in memory that is needed to recreate the on-disk version of this
4393*4882a593Smuzhiyun  * inode.
4394*4882a593Smuzhiyun  */
__ext4_get_inode_loc(struct super_block * sb,unsigned long ino,struct ext4_iloc * iloc,int in_mem,ext4_fsblk_t * ret_block)4395*4882a593Smuzhiyun static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
4396*4882a593Smuzhiyun 				struct ext4_iloc *iloc, int in_mem,
4397*4882a593Smuzhiyun 				ext4_fsblk_t *ret_block)
4398*4882a593Smuzhiyun {
4399*4882a593Smuzhiyun 	struct ext4_group_desc	*gdp;
4400*4882a593Smuzhiyun 	struct buffer_head	*bh;
4401*4882a593Smuzhiyun 	ext4_fsblk_t		block;
4402*4882a593Smuzhiyun 	struct blk_plug		plug;
4403*4882a593Smuzhiyun 	int			inodes_per_block, inode_offset;
4404*4882a593Smuzhiyun 
4405*4882a593Smuzhiyun 	iloc->bh = NULL;
4406*4882a593Smuzhiyun 	if (ino < EXT4_ROOT_INO ||
4407*4882a593Smuzhiyun 	    ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
4408*4882a593Smuzhiyun 		return -EFSCORRUPTED;
4409*4882a593Smuzhiyun 
4410*4882a593Smuzhiyun 	iloc->block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
4411*4882a593Smuzhiyun 	gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
4412*4882a593Smuzhiyun 	if (!gdp)
4413*4882a593Smuzhiyun 		return -EIO;
4414*4882a593Smuzhiyun 
4415*4882a593Smuzhiyun 	/*
4416*4882a593Smuzhiyun 	 * Figure out the offset within the block group inode table
4417*4882a593Smuzhiyun 	 */
4418*4882a593Smuzhiyun 	inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
4419*4882a593Smuzhiyun 	inode_offset = ((ino - 1) %
4420*4882a593Smuzhiyun 			EXT4_INODES_PER_GROUP(sb));
4421*4882a593Smuzhiyun 	block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
4422*4882a593Smuzhiyun 	iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
4423*4882a593Smuzhiyun 
4424*4882a593Smuzhiyun 	bh = sb_getblk(sb, block);
4425*4882a593Smuzhiyun 	if (unlikely(!bh))
4426*4882a593Smuzhiyun 		return -ENOMEM;
4427*4882a593Smuzhiyun 	if (ext4_simulate_fail(sb, EXT4_SIM_INODE_EIO))
4428*4882a593Smuzhiyun 		goto simulate_eio;
4429*4882a593Smuzhiyun 	if (!buffer_uptodate(bh)) {
4430*4882a593Smuzhiyun 		lock_buffer(bh);
4431*4882a593Smuzhiyun 
4432*4882a593Smuzhiyun 		if (ext4_buffer_uptodate(bh)) {
4433*4882a593Smuzhiyun 			/* someone brought it uptodate while we waited */
4434*4882a593Smuzhiyun 			unlock_buffer(bh);
4435*4882a593Smuzhiyun 			goto has_buffer;
4436*4882a593Smuzhiyun 		}
4437*4882a593Smuzhiyun 
4438*4882a593Smuzhiyun 		/*
4439*4882a593Smuzhiyun 		 * If we have all information of the inode in memory and this
4440*4882a593Smuzhiyun 		 * is the only valid inode in the block, we need not read the
4441*4882a593Smuzhiyun 		 * block.
4442*4882a593Smuzhiyun 		 */
4443*4882a593Smuzhiyun 		if (in_mem) {
4444*4882a593Smuzhiyun 			struct buffer_head *bitmap_bh;
4445*4882a593Smuzhiyun 			int i, start;
4446*4882a593Smuzhiyun 
4447*4882a593Smuzhiyun 			start = inode_offset & ~(inodes_per_block - 1);
4448*4882a593Smuzhiyun 
4449*4882a593Smuzhiyun 			/* Is the inode bitmap in cache? */
4450*4882a593Smuzhiyun 			bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
4451*4882a593Smuzhiyun 			if (unlikely(!bitmap_bh))
4452*4882a593Smuzhiyun 				goto make_io;
4453*4882a593Smuzhiyun 
4454*4882a593Smuzhiyun 			/*
4455*4882a593Smuzhiyun 			 * If the inode bitmap isn't in cache then the
4456*4882a593Smuzhiyun 			 * optimisation may end up performing two reads instead
4457*4882a593Smuzhiyun 			 * of one, so skip it.
4458*4882a593Smuzhiyun 			 */
4459*4882a593Smuzhiyun 			if (!buffer_uptodate(bitmap_bh)) {
4460*4882a593Smuzhiyun 				brelse(bitmap_bh);
4461*4882a593Smuzhiyun 				goto make_io;
4462*4882a593Smuzhiyun 			}
4463*4882a593Smuzhiyun 			for (i = start; i < start + inodes_per_block; i++) {
4464*4882a593Smuzhiyun 				if (i == inode_offset)
4465*4882a593Smuzhiyun 					continue;
4466*4882a593Smuzhiyun 				if (ext4_test_bit(i, bitmap_bh->b_data))
4467*4882a593Smuzhiyun 					break;
4468*4882a593Smuzhiyun 			}
4469*4882a593Smuzhiyun 			brelse(bitmap_bh);
4470*4882a593Smuzhiyun 			if (i == start + inodes_per_block) {
4471*4882a593Smuzhiyun 				/* all other inodes are free, so skip I/O */
4472*4882a593Smuzhiyun 				memset(bh->b_data, 0, bh->b_size);
4473*4882a593Smuzhiyun 				set_buffer_uptodate(bh);
4474*4882a593Smuzhiyun 				unlock_buffer(bh);
4475*4882a593Smuzhiyun 				goto has_buffer;
4476*4882a593Smuzhiyun 			}
4477*4882a593Smuzhiyun 		}
4478*4882a593Smuzhiyun 
4479*4882a593Smuzhiyun make_io:
4480*4882a593Smuzhiyun 		/*
4481*4882a593Smuzhiyun 		 * If we need to do any I/O, try to pre-readahead extra
4482*4882a593Smuzhiyun 		 * blocks from the inode table.
4483*4882a593Smuzhiyun 		 */
4484*4882a593Smuzhiyun 		blk_start_plug(&plug);
4485*4882a593Smuzhiyun 		if (EXT4_SB(sb)->s_inode_readahead_blks) {
4486*4882a593Smuzhiyun 			ext4_fsblk_t b, end, table;
4487*4882a593Smuzhiyun 			unsigned num;
4488*4882a593Smuzhiyun 			__u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
4489*4882a593Smuzhiyun 
4490*4882a593Smuzhiyun 			table = ext4_inode_table(sb, gdp);
4491*4882a593Smuzhiyun 			/* s_inode_readahead_blks is always a power of 2 */
4492*4882a593Smuzhiyun 			b = block & ~((ext4_fsblk_t) ra_blks - 1);
4493*4882a593Smuzhiyun 			if (table > b)
4494*4882a593Smuzhiyun 				b = table;
4495*4882a593Smuzhiyun 			end = b + ra_blks;
4496*4882a593Smuzhiyun 			num = EXT4_INODES_PER_GROUP(sb);
4497*4882a593Smuzhiyun 			if (ext4_has_group_desc_csum(sb))
4498*4882a593Smuzhiyun 				num -= ext4_itable_unused_count(sb, gdp);
4499*4882a593Smuzhiyun 			table += num / inodes_per_block;
4500*4882a593Smuzhiyun 			if (end > table)
4501*4882a593Smuzhiyun 				end = table;
4502*4882a593Smuzhiyun 			while (b <= end)
4503*4882a593Smuzhiyun 				ext4_sb_breadahead_unmovable(sb, b++);
4504*4882a593Smuzhiyun 		}
4505*4882a593Smuzhiyun 
4506*4882a593Smuzhiyun 		/*
4507*4882a593Smuzhiyun 		 * There are other valid inodes in the buffer, this inode
4508*4882a593Smuzhiyun 		 * has in-inode xattrs, or we don't have this inode in memory.
4509*4882a593Smuzhiyun 		 * Read the block from disk.
4510*4882a593Smuzhiyun 		 */
4511*4882a593Smuzhiyun 		trace_ext4_load_inode(sb, ino);
4512*4882a593Smuzhiyun 		ext4_read_bh_nowait(bh, REQ_META | REQ_PRIO, NULL);
4513*4882a593Smuzhiyun 		blk_finish_plug(&plug);
4514*4882a593Smuzhiyun 		wait_on_buffer(bh);
4515*4882a593Smuzhiyun 		if (!buffer_uptodate(bh)) {
4516*4882a593Smuzhiyun 		simulate_eio:
4517*4882a593Smuzhiyun 			if (ret_block)
4518*4882a593Smuzhiyun 				*ret_block = block;
4519*4882a593Smuzhiyun 			brelse(bh);
4520*4882a593Smuzhiyun 			return -EIO;
4521*4882a593Smuzhiyun 		}
4522*4882a593Smuzhiyun 	}
4523*4882a593Smuzhiyun has_buffer:
4524*4882a593Smuzhiyun 	iloc->bh = bh;
4525*4882a593Smuzhiyun 	return 0;
4526*4882a593Smuzhiyun }
4527*4882a593Smuzhiyun 
__ext4_get_inode_loc_noinmem(struct inode * inode,struct ext4_iloc * iloc)4528*4882a593Smuzhiyun static int __ext4_get_inode_loc_noinmem(struct inode *inode,
4529*4882a593Smuzhiyun 					struct ext4_iloc *iloc)
4530*4882a593Smuzhiyun {
4531*4882a593Smuzhiyun 	ext4_fsblk_t err_blk = 0;
4532*4882a593Smuzhiyun 	int ret;
4533*4882a593Smuzhiyun 
4534*4882a593Smuzhiyun 	ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc, 0,
4535*4882a593Smuzhiyun 					&err_blk);
4536*4882a593Smuzhiyun 
4537*4882a593Smuzhiyun 	if (ret == -EIO)
4538*4882a593Smuzhiyun 		ext4_error_inode_block(inode, err_blk, EIO,
4539*4882a593Smuzhiyun 					"unable to read itable block");
4540*4882a593Smuzhiyun 
4541*4882a593Smuzhiyun 	return ret;
4542*4882a593Smuzhiyun }
4543*4882a593Smuzhiyun 
ext4_get_inode_loc(struct inode * inode,struct ext4_iloc * iloc)4544*4882a593Smuzhiyun int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
4545*4882a593Smuzhiyun {
4546*4882a593Smuzhiyun 	ext4_fsblk_t err_blk = 0;
4547*4882a593Smuzhiyun 	int ret;
4548*4882a593Smuzhiyun 
4549*4882a593Smuzhiyun 	/* We have all inode data except xattrs in memory here. */
4550*4882a593Smuzhiyun 	ret = __ext4_get_inode_loc(inode->i_sb, inode->i_ino, iloc,
4551*4882a593Smuzhiyun 		!ext4_test_inode_state(inode, EXT4_STATE_XATTR), &err_blk);
4552*4882a593Smuzhiyun 
4553*4882a593Smuzhiyun 	if (ret == -EIO)
4554*4882a593Smuzhiyun 		ext4_error_inode_block(inode, err_blk, EIO,
4555*4882a593Smuzhiyun 					"unable to read itable block");
4556*4882a593Smuzhiyun 
4557*4882a593Smuzhiyun 	return ret;
4558*4882a593Smuzhiyun }
4559*4882a593Smuzhiyun 
4560*4882a593Smuzhiyun 
ext4_get_fc_inode_loc(struct super_block * sb,unsigned long ino,struct ext4_iloc * iloc)4561*4882a593Smuzhiyun int ext4_get_fc_inode_loc(struct super_block *sb, unsigned long ino,
4562*4882a593Smuzhiyun 			  struct ext4_iloc *iloc)
4563*4882a593Smuzhiyun {
4564*4882a593Smuzhiyun 	return __ext4_get_inode_loc(sb, ino, iloc, 0, NULL);
4565*4882a593Smuzhiyun }
4566*4882a593Smuzhiyun 
ext4_should_enable_dax(struct inode * inode)4567*4882a593Smuzhiyun static bool ext4_should_enable_dax(struct inode *inode)
4568*4882a593Smuzhiyun {
4569*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4570*4882a593Smuzhiyun 
4571*4882a593Smuzhiyun 	if (test_opt2(inode->i_sb, DAX_NEVER))
4572*4882a593Smuzhiyun 		return false;
4573*4882a593Smuzhiyun 	if (!S_ISREG(inode->i_mode))
4574*4882a593Smuzhiyun 		return false;
4575*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode))
4576*4882a593Smuzhiyun 		return false;
4577*4882a593Smuzhiyun 	if (ext4_has_inline_data(inode))
4578*4882a593Smuzhiyun 		return false;
4579*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT))
4580*4882a593Smuzhiyun 		return false;
4581*4882a593Smuzhiyun 	if (ext4_test_inode_flag(inode, EXT4_INODE_VERITY))
4582*4882a593Smuzhiyun 		return false;
4583*4882a593Smuzhiyun 	if (!test_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags))
4584*4882a593Smuzhiyun 		return false;
4585*4882a593Smuzhiyun 	if (test_opt(inode->i_sb, DAX_ALWAYS))
4586*4882a593Smuzhiyun 		return true;
4587*4882a593Smuzhiyun 
4588*4882a593Smuzhiyun 	return ext4_test_inode_flag(inode, EXT4_INODE_DAX);
4589*4882a593Smuzhiyun }
4590*4882a593Smuzhiyun 
ext4_set_inode_flags(struct inode * inode,bool init)4591*4882a593Smuzhiyun void ext4_set_inode_flags(struct inode *inode, bool init)
4592*4882a593Smuzhiyun {
4593*4882a593Smuzhiyun 	unsigned int flags = EXT4_I(inode)->i_flags;
4594*4882a593Smuzhiyun 	unsigned int new_fl = 0;
4595*4882a593Smuzhiyun 
4596*4882a593Smuzhiyun 	WARN_ON_ONCE(IS_DAX(inode) && init);
4597*4882a593Smuzhiyun 
4598*4882a593Smuzhiyun 	if (flags & EXT4_SYNC_FL)
4599*4882a593Smuzhiyun 		new_fl |= S_SYNC;
4600*4882a593Smuzhiyun 	if (flags & EXT4_APPEND_FL)
4601*4882a593Smuzhiyun 		new_fl |= S_APPEND;
4602*4882a593Smuzhiyun 	if (flags & EXT4_IMMUTABLE_FL)
4603*4882a593Smuzhiyun 		new_fl |= S_IMMUTABLE;
4604*4882a593Smuzhiyun 	if (flags & EXT4_NOATIME_FL)
4605*4882a593Smuzhiyun 		new_fl |= S_NOATIME;
4606*4882a593Smuzhiyun 	if (flags & EXT4_DIRSYNC_FL)
4607*4882a593Smuzhiyun 		new_fl |= S_DIRSYNC;
4608*4882a593Smuzhiyun 
4609*4882a593Smuzhiyun 	/* Because of the way inode_set_flags() works we must preserve S_DAX
4610*4882a593Smuzhiyun 	 * here if already set. */
4611*4882a593Smuzhiyun 	new_fl |= (inode->i_flags & S_DAX);
4612*4882a593Smuzhiyun 	if (init && ext4_should_enable_dax(inode))
4613*4882a593Smuzhiyun 		new_fl |= S_DAX;
4614*4882a593Smuzhiyun 
4615*4882a593Smuzhiyun 	if (flags & EXT4_ENCRYPT_FL)
4616*4882a593Smuzhiyun 		new_fl |= S_ENCRYPTED;
4617*4882a593Smuzhiyun 	if (flags & EXT4_CASEFOLD_FL)
4618*4882a593Smuzhiyun 		new_fl |= S_CASEFOLD;
4619*4882a593Smuzhiyun 	if (flags & EXT4_VERITY_FL)
4620*4882a593Smuzhiyun 		new_fl |= S_VERITY;
4621*4882a593Smuzhiyun 	inode_set_flags(inode, new_fl,
4622*4882a593Smuzhiyun 			S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX|
4623*4882a593Smuzhiyun 			S_ENCRYPTED|S_CASEFOLD|S_VERITY);
4624*4882a593Smuzhiyun }
4625*4882a593Smuzhiyun 
ext4_inode_blocks(struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4626*4882a593Smuzhiyun static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
4627*4882a593Smuzhiyun 				  struct ext4_inode_info *ei)
4628*4882a593Smuzhiyun {
4629*4882a593Smuzhiyun 	blkcnt_t i_blocks ;
4630*4882a593Smuzhiyun 	struct inode *inode = &(ei->vfs_inode);
4631*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
4632*4882a593Smuzhiyun 
4633*4882a593Smuzhiyun 	if (ext4_has_feature_huge_file(sb)) {
4634*4882a593Smuzhiyun 		/* we are using combined 48 bit field */
4635*4882a593Smuzhiyun 		i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
4636*4882a593Smuzhiyun 					le32_to_cpu(raw_inode->i_blocks_lo);
4637*4882a593Smuzhiyun 		if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
4638*4882a593Smuzhiyun 			/* i_blocks represent file system block size */
4639*4882a593Smuzhiyun 			return i_blocks  << (inode->i_blkbits - 9);
4640*4882a593Smuzhiyun 		} else {
4641*4882a593Smuzhiyun 			return i_blocks;
4642*4882a593Smuzhiyun 		}
4643*4882a593Smuzhiyun 	} else {
4644*4882a593Smuzhiyun 		return le32_to_cpu(raw_inode->i_blocks_lo);
4645*4882a593Smuzhiyun 	}
4646*4882a593Smuzhiyun }
4647*4882a593Smuzhiyun 
ext4_iget_extra_inode(struct inode * inode,struct ext4_inode * raw_inode,struct ext4_inode_info * ei)4648*4882a593Smuzhiyun static inline int ext4_iget_extra_inode(struct inode *inode,
4649*4882a593Smuzhiyun 					 struct ext4_inode *raw_inode,
4650*4882a593Smuzhiyun 					 struct ext4_inode_info *ei)
4651*4882a593Smuzhiyun {
4652*4882a593Smuzhiyun 	__le32 *magic = (void *)raw_inode +
4653*4882a593Smuzhiyun 			EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
4654*4882a593Smuzhiyun 
4655*4882a593Smuzhiyun 	if (EXT4_INODE_HAS_XATTR_SPACE(inode)  &&
4656*4882a593Smuzhiyun 	    *magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
4657*4882a593Smuzhiyun 		ext4_set_inode_state(inode, EXT4_STATE_XATTR);
4658*4882a593Smuzhiyun 		return ext4_find_inline_data_nolock(inode);
4659*4882a593Smuzhiyun 	} else
4660*4882a593Smuzhiyun 		EXT4_I(inode)->i_inline_off = 0;
4661*4882a593Smuzhiyun 	return 0;
4662*4882a593Smuzhiyun }
4663*4882a593Smuzhiyun 
ext4_get_projid(struct inode * inode,kprojid_t * projid)4664*4882a593Smuzhiyun int ext4_get_projid(struct inode *inode, kprojid_t *projid)
4665*4882a593Smuzhiyun {
4666*4882a593Smuzhiyun 	if (!ext4_has_feature_project(inode->i_sb))
4667*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4668*4882a593Smuzhiyun 	*projid = EXT4_I(inode)->i_projid;
4669*4882a593Smuzhiyun 	return 0;
4670*4882a593Smuzhiyun }
4671*4882a593Smuzhiyun 
4672*4882a593Smuzhiyun /*
4673*4882a593Smuzhiyun  * ext4 has self-managed i_version for ea inodes, it stores the lower 32bit of
4674*4882a593Smuzhiyun  * refcount in i_version, so use raw values if inode has EXT4_EA_INODE_FL flag
4675*4882a593Smuzhiyun  * set.
4676*4882a593Smuzhiyun  */
ext4_inode_set_iversion_queried(struct inode * inode,u64 val)4677*4882a593Smuzhiyun static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
4678*4882a593Smuzhiyun {
4679*4882a593Smuzhiyun 	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4680*4882a593Smuzhiyun 		inode_set_iversion_raw(inode, val);
4681*4882a593Smuzhiyun 	else
4682*4882a593Smuzhiyun 		inode_set_iversion_queried(inode, val);
4683*4882a593Smuzhiyun }
ext4_inode_peek_iversion(const struct inode * inode)4684*4882a593Smuzhiyun static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
4685*4882a593Smuzhiyun {
4686*4882a593Smuzhiyun 	if (unlikely(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
4687*4882a593Smuzhiyun 		return inode_peek_iversion_raw(inode);
4688*4882a593Smuzhiyun 	else
4689*4882a593Smuzhiyun 		return inode_peek_iversion(inode);
4690*4882a593Smuzhiyun }
4691*4882a593Smuzhiyun 
__ext4_iget(struct super_block * sb,unsigned long ino,ext4_iget_flags flags,const char * function,unsigned int line)4692*4882a593Smuzhiyun struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
4693*4882a593Smuzhiyun 			  ext4_iget_flags flags, const char *function,
4694*4882a593Smuzhiyun 			  unsigned int line)
4695*4882a593Smuzhiyun {
4696*4882a593Smuzhiyun 	struct ext4_iloc iloc;
4697*4882a593Smuzhiyun 	struct ext4_inode *raw_inode;
4698*4882a593Smuzhiyun 	struct ext4_inode_info *ei;
4699*4882a593Smuzhiyun 	struct inode *inode;
4700*4882a593Smuzhiyun 	journal_t *journal = EXT4_SB(sb)->s_journal;
4701*4882a593Smuzhiyun 	long ret;
4702*4882a593Smuzhiyun 	loff_t size;
4703*4882a593Smuzhiyun 	int block;
4704*4882a593Smuzhiyun 	uid_t i_uid;
4705*4882a593Smuzhiyun 	gid_t i_gid;
4706*4882a593Smuzhiyun 	projid_t i_projid;
4707*4882a593Smuzhiyun 
4708*4882a593Smuzhiyun 	if ((!(flags & EXT4_IGET_SPECIAL) &&
4709*4882a593Smuzhiyun 	     (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
4710*4882a593Smuzhiyun 	    (ino < EXT4_ROOT_INO) ||
4711*4882a593Smuzhiyun 	    (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
4712*4882a593Smuzhiyun 		if (flags & EXT4_IGET_HANDLE)
4713*4882a593Smuzhiyun 			return ERR_PTR(-ESTALE);
4714*4882a593Smuzhiyun 		__ext4_error(sb, function, line, EFSCORRUPTED, 0,
4715*4882a593Smuzhiyun 			     "inode #%lu: comm %s: iget: illegal inode #",
4716*4882a593Smuzhiyun 			     ino, current->comm);
4717*4882a593Smuzhiyun 		return ERR_PTR(-EFSCORRUPTED);
4718*4882a593Smuzhiyun 	}
4719*4882a593Smuzhiyun 
4720*4882a593Smuzhiyun 	inode = iget_locked(sb, ino);
4721*4882a593Smuzhiyun 	if (!inode)
4722*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
4723*4882a593Smuzhiyun 	if (!(inode->i_state & I_NEW))
4724*4882a593Smuzhiyun 		return inode;
4725*4882a593Smuzhiyun 
4726*4882a593Smuzhiyun 	ei = EXT4_I(inode);
4727*4882a593Smuzhiyun 	iloc.bh = NULL;
4728*4882a593Smuzhiyun 
4729*4882a593Smuzhiyun 	ret = __ext4_get_inode_loc_noinmem(inode, &iloc);
4730*4882a593Smuzhiyun 	if (ret < 0)
4731*4882a593Smuzhiyun 		goto bad_inode;
4732*4882a593Smuzhiyun 	raw_inode = ext4_raw_inode(&iloc);
4733*4882a593Smuzhiyun 
4734*4882a593Smuzhiyun 	if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4735*4882a593Smuzhiyun 		ext4_error_inode(inode, function, line, 0,
4736*4882a593Smuzhiyun 				 "iget: root inode unallocated");
4737*4882a593Smuzhiyun 		ret = -EFSCORRUPTED;
4738*4882a593Smuzhiyun 		goto bad_inode;
4739*4882a593Smuzhiyun 	}
4740*4882a593Smuzhiyun 
4741*4882a593Smuzhiyun 	if ((flags & EXT4_IGET_HANDLE) &&
4742*4882a593Smuzhiyun 	    (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
4743*4882a593Smuzhiyun 		ret = -ESTALE;
4744*4882a593Smuzhiyun 		goto bad_inode;
4745*4882a593Smuzhiyun 	}
4746*4882a593Smuzhiyun 
4747*4882a593Smuzhiyun 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4748*4882a593Smuzhiyun 		ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4749*4882a593Smuzhiyun 		if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4750*4882a593Smuzhiyun 			EXT4_INODE_SIZE(inode->i_sb) ||
4751*4882a593Smuzhiyun 		    (ei->i_extra_isize & 3)) {
4752*4882a593Smuzhiyun 			ext4_error_inode(inode, function, line, 0,
4753*4882a593Smuzhiyun 					 "iget: bad extra_isize %u "
4754*4882a593Smuzhiyun 					 "(inode size %u)",
4755*4882a593Smuzhiyun 					 ei->i_extra_isize,
4756*4882a593Smuzhiyun 					 EXT4_INODE_SIZE(inode->i_sb));
4757*4882a593Smuzhiyun 			ret = -EFSCORRUPTED;
4758*4882a593Smuzhiyun 			goto bad_inode;
4759*4882a593Smuzhiyun 		}
4760*4882a593Smuzhiyun 	} else
4761*4882a593Smuzhiyun 		ei->i_extra_isize = 0;
4762*4882a593Smuzhiyun 
4763*4882a593Smuzhiyun 	/* Precompute checksum seed for inode metadata */
4764*4882a593Smuzhiyun 	if (ext4_has_metadata_csum(sb)) {
4765*4882a593Smuzhiyun 		struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4766*4882a593Smuzhiyun 		__u32 csum;
4767*4882a593Smuzhiyun 		__le32 inum = cpu_to_le32(inode->i_ino);
4768*4882a593Smuzhiyun 		__le32 gen = raw_inode->i_generation;
4769*4882a593Smuzhiyun 		csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4770*4882a593Smuzhiyun 				   sizeof(inum));
4771*4882a593Smuzhiyun 		ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4772*4882a593Smuzhiyun 					      sizeof(gen));
4773*4882a593Smuzhiyun 	}
4774*4882a593Smuzhiyun 
4775*4882a593Smuzhiyun 	if ((!ext4_inode_csum_verify(inode, raw_inode, ei) ||
4776*4882a593Smuzhiyun 	    ext4_simulate_fail(sb, EXT4_SIM_INODE_CRC)) &&
4777*4882a593Smuzhiyun 	     (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))) {
4778*4882a593Smuzhiyun 		ext4_error_inode_err(inode, function, line, 0,
4779*4882a593Smuzhiyun 				EFSBADCRC, "iget: checksum invalid");
4780*4882a593Smuzhiyun 		ret = -EFSBADCRC;
4781*4882a593Smuzhiyun 		goto bad_inode;
4782*4882a593Smuzhiyun 	}
4783*4882a593Smuzhiyun 
4784*4882a593Smuzhiyun 	inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4785*4882a593Smuzhiyun 	i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4786*4882a593Smuzhiyun 	i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4787*4882a593Smuzhiyun 	if (ext4_has_feature_project(sb) &&
4788*4882a593Smuzhiyun 	    EXT4_INODE_SIZE(sb) > EXT4_GOOD_OLD_INODE_SIZE &&
4789*4882a593Smuzhiyun 	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
4790*4882a593Smuzhiyun 		i_projid = (projid_t)le32_to_cpu(raw_inode->i_projid);
4791*4882a593Smuzhiyun 	else
4792*4882a593Smuzhiyun 		i_projid = EXT4_DEF_PROJID;
4793*4882a593Smuzhiyun 
4794*4882a593Smuzhiyun 	if (!(test_opt(inode->i_sb, NO_UID32))) {
4795*4882a593Smuzhiyun 		i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4796*4882a593Smuzhiyun 		i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4797*4882a593Smuzhiyun 	}
4798*4882a593Smuzhiyun 	i_uid_write(inode, i_uid);
4799*4882a593Smuzhiyun 	i_gid_write(inode, i_gid);
4800*4882a593Smuzhiyun 	ei->i_projid = make_kprojid(&init_user_ns, i_projid);
4801*4882a593Smuzhiyun 	set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4802*4882a593Smuzhiyun 
4803*4882a593Smuzhiyun 	ext4_clear_state_flags(ei);	/* Only relevant on 32-bit archs */
4804*4882a593Smuzhiyun 	ei->i_inline_off = 0;
4805*4882a593Smuzhiyun 	ei->i_dir_start_lookup = 0;
4806*4882a593Smuzhiyun 	ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4807*4882a593Smuzhiyun 	/* We now have enough fields to check if the inode was active or not.
4808*4882a593Smuzhiyun 	 * This is needed because nfsd might try to access dead inodes
4809*4882a593Smuzhiyun 	 * the test is that same one that e2fsck uses
4810*4882a593Smuzhiyun 	 * NeilBrown 1999oct15
4811*4882a593Smuzhiyun 	 */
4812*4882a593Smuzhiyun 	if (inode->i_nlink == 0) {
4813*4882a593Smuzhiyun 		if ((inode->i_mode == 0 ||
4814*4882a593Smuzhiyun 		     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4815*4882a593Smuzhiyun 		    ino != EXT4_BOOT_LOADER_INO) {
4816*4882a593Smuzhiyun 			/* this inode is deleted */
4817*4882a593Smuzhiyun 			ret = -ESTALE;
4818*4882a593Smuzhiyun 			goto bad_inode;
4819*4882a593Smuzhiyun 		}
4820*4882a593Smuzhiyun 		/* The only unlinked inodes we let through here have
4821*4882a593Smuzhiyun 		 * valid i_mode and are being read by the orphan
4822*4882a593Smuzhiyun 		 * recovery code: that's fine, we're about to complete
4823*4882a593Smuzhiyun 		 * the process of deleting those.
4824*4882a593Smuzhiyun 		 * OR it is the EXT4_BOOT_LOADER_INO which is
4825*4882a593Smuzhiyun 		 * not initialized on a new filesystem. */
4826*4882a593Smuzhiyun 	}
4827*4882a593Smuzhiyun 	ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4828*4882a593Smuzhiyun 	ext4_set_inode_flags(inode, true);
4829*4882a593Smuzhiyun 	inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4830*4882a593Smuzhiyun 	ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4831*4882a593Smuzhiyun 	if (ext4_has_feature_64bit(sb))
4832*4882a593Smuzhiyun 		ei->i_file_acl |=
4833*4882a593Smuzhiyun 			((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4834*4882a593Smuzhiyun 	inode->i_size = ext4_isize(sb, raw_inode);
4835*4882a593Smuzhiyun 	if ((size = i_size_read(inode)) < 0) {
4836*4882a593Smuzhiyun 		ext4_error_inode(inode, function, line, 0,
4837*4882a593Smuzhiyun 				 "iget: bad i_size value: %lld", size);
4838*4882a593Smuzhiyun 		ret = -EFSCORRUPTED;
4839*4882a593Smuzhiyun 		goto bad_inode;
4840*4882a593Smuzhiyun 	}
4841*4882a593Smuzhiyun 	/*
4842*4882a593Smuzhiyun 	 * If dir_index is not enabled but there's dir with INDEX flag set,
4843*4882a593Smuzhiyun 	 * we'd normally treat htree data as empty space. But with metadata
4844*4882a593Smuzhiyun 	 * checksumming that corrupts checksums so forbid that.
4845*4882a593Smuzhiyun 	 */
4846*4882a593Smuzhiyun 	if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
4847*4882a593Smuzhiyun 	    ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
4848*4882a593Smuzhiyun 		ext4_error_inode(inode, function, line, 0,
4849*4882a593Smuzhiyun 			 "iget: Dir with htree data on filesystem without dir_index feature.");
4850*4882a593Smuzhiyun 		ret = -EFSCORRUPTED;
4851*4882a593Smuzhiyun 		goto bad_inode;
4852*4882a593Smuzhiyun 	}
4853*4882a593Smuzhiyun 	ei->i_disksize = inode->i_size;
4854*4882a593Smuzhiyun #ifdef CONFIG_QUOTA
4855*4882a593Smuzhiyun 	ei->i_reserved_quota = 0;
4856*4882a593Smuzhiyun #endif
4857*4882a593Smuzhiyun 	inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4858*4882a593Smuzhiyun 	ei->i_block_group = iloc.block_group;
4859*4882a593Smuzhiyun 	ei->i_last_alloc_group = ~0;
4860*4882a593Smuzhiyun 	/*
4861*4882a593Smuzhiyun 	 * NOTE! The in-memory inode i_data array is in little-endian order
4862*4882a593Smuzhiyun 	 * even on big-endian machines: we do NOT byteswap the block numbers!
4863*4882a593Smuzhiyun 	 */
4864*4882a593Smuzhiyun 	for (block = 0; block < EXT4_N_BLOCKS; block++)
4865*4882a593Smuzhiyun 		ei->i_data[block] = raw_inode->i_block[block];
4866*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ei->i_orphan);
4867*4882a593Smuzhiyun 	ext4_fc_init_inode(&ei->vfs_inode);
4868*4882a593Smuzhiyun 
4869*4882a593Smuzhiyun 	/*
4870*4882a593Smuzhiyun 	 * Set transaction id's of transactions that have to be committed
4871*4882a593Smuzhiyun 	 * to finish f[data]sync. We set them to currently running transaction
4872*4882a593Smuzhiyun 	 * as we cannot be sure that the inode or some of its metadata isn't
4873*4882a593Smuzhiyun 	 * part of the transaction - the inode could have been reclaimed and
4874*4882a593Smuzhiyun 	 * now it is reread from disk.
4875*4882a593Smuzhiyun 	 */
4876*4882a593Smuzhiyun 	if (journal) {
4877*4882a593Smuzhiyun 		transaction_t *transaction;
4878*4882a593Smuzhiyun 		tid_t tid;
4879*4882a593Smuzhiyun 
4880*4882a593Smuzhiyun 		read_lock(&journal->j_state_lock);
4881*4882a593Smuzhiyun 		if (journal->j_running_transaction)
4882*4882a593Smuzhiyun 			transaction = journal->j_running_transaction;
4883*4882a593Smuzhiyun 		else
4884*4882a593Smuzhiyun 			transaction = journal->j_committing_transaction;
4885*4882a593Smuzhiyun 		if (transaction)
4886*4882a593Smuzhiyun 			tid = transaction->t_tid;
4887*4882a593Smuzhiyun 		else
4888*4882a593Smuzhiyun 			tid = journal->j_commit_sequence;
4889*4882a593Smuzhiyun 		read_unlock(&journal->j_state_lock);
4890*4882a593Smuzhiyun 		ei->i_sync_tid = tid;
4891*4882a593Smuzhiyun 		ei->i_datasync_tid = tid;
4892*4882a593Smuzhiyun 	}
4893*4882a593Smuzhiyun 
4894*4882a593Smuzhiyun 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4895*4882a593Smuzhiyun 		if (ei->i_extra_isize == 0) {
4896*4882a593Smuzhiyun 			/* The extra space is currently unused. Use it. */
4897*4882a593Smuzhiyun 			BUILD_BUG_ON(sizeof(struct ext4_inode) & 3);
4898*4882a593Smuzhiyun 			ei->i_extra_isize = sizeof(struct ext4_inode) -
4899*4882a593Smuzhiyun 					    EXT4_GOOD_OLD_INODE_SIZE;
4900*4882a593Smuzhiyun 		} else {
4901*4882a593Smuzhiyun 			ret = ext4_iget_extra_inode(inode, raw_inode, ei);
4902*4882a593Smuzhiyun 			if (ret)
4903*4882a593Smuzhiyun 				goto bad_inode;
4904*4882a593Smuzhiyun 		}
4905*4882a593Smuzhiyun 	}
4906*4882a593Smuzhiyun 
4907*4882a593Smuzhiyun 	EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4908*4882a593Smuzhiyun 	EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4909*4882a593Smuzhiyun 	EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4910*4882a593Smuzhiyun 	EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4911*4882a593Smuzhiyun 
4912*4882a593Smuzhiyun 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
4913*4882a593Smuzhiyun 		u64 ivers = le32_to_cpu(raw_inode->i_disk_version);
4914*4882a593Smuzhiyun 
4915*4882a593Smuzhiyun 		if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4916*4882a593Smuzhiyun 			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4917*4882a593Smuzhiyun 				ivers |=
4918*4882a593Smuzhiyun 		    (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4919*4882a593Smuzhiyun 		}
4920*4882a593Smuzhiyun 		ext4_inode_set_iversion_queried(inode, ivers);
4921*4882a593Smuzhiyun 	}
4922*4882a593Smuzhiyun 
4923*4882a593Smuzhiyun 	ret = 0;
4924*4882a593Smuzhiyun 	if (ei->i_file_acl &&
4925*4882a593Smuzhiyun 	    !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) {
4926*4882a593Smuzhiyun 		ext4_error_inode(inode, function, line, 0,
4927*4882a593Smuzhiyun 				 "iget: bad extended attribute block %llu",
4928*4882a593Smuzhiyun 				 ei->i_file_acl);
4929*4882a593Smuzhiyun 		ret = -EFSCORRUPTED;
4930*4882a593Smuzhiyun 		goto bad_inode;
4931*4882a593Smuzhiyun 	} else if (!ext4_has_inline_data(inode)) {
4932*4882a593Smuzhiyun 		/* validate the block references in the inode */
4933*4882a593Smuzhiyun 		if (!(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) &&
4934*4882a593Smuzhiyun 			(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4935*4882a593Smuzhiyun 			(S_ISLNK(inode->i_mode) &&
4936*4882a593Smuzhiyun 			!ext4_inode_is_fast_symlink(inode)))) {
4937*4882a593Smuzhiyun 			if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4938*4882a593Smuzhiyun 				ret = ext4_ext_check_inode(inode);
4939*4882a593Smuzhiyun 			else
4940*4882a593Smuzhiyun 				ret = ext4_ind_check_inode(inode);
4941*4882a593Smuzhiyun 		}
4942*4882a593Smuzhiyun 	}
4943*4882a593Smuzhiyun 	if (ret)
4944*4882a593Smuzhiyun 		goto bad_inode;
4945*4882a593Smuzhiyun 
4946*4882a593Smuzhiyun 	if (S_ISREG(inode->i_mode)) {
4947*4882a593Smuzhiyun 		inode->i_op = &ext4_file_inode_operations;
4948*4882a593Smuzhiyun 		inode->i_fop = &ext4_file_operations;
4949*4882a593Smuzhiyun 		ext4_set_aops(inode);
4950*4882a593Smuzhiyun 	} else if (S_ISDIR(inode->i_mode)) {
4951*4882a593Smuzhiyun 		inode->i_op = &ext4_dir_inode_operations;
4952*4882a593Smuzhiyun 		inode->i_fop = &ext4_dir_operations;
4953*4882a593Smuzhiyun 	} else if (S_ISLNK(inode->i_mode)) {
4954*4882a593Smuzhiyun 		/* VFS does not allow setting these so must be corruption */
4955*4882a593Smuzhiyun 		if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
4956*4882a593Smuzhiyun 			ext4_error_inode(inode, function, line, 0,
4957*4882a593Smuzhiyun 					 "iget: immutable or append flags "
4958*4882a593Smuzhiyun 					 "not allowed on symlinks");
4959*4882a593Smuzhiyun 			ret = -EFSCORRUPTED;
4960*4882a593Smuzhiyun 			goto bad_inode;
4961*4882a593Smuzhiyun 		}
4962*4882a593Smuzhiyun 		if (IS_ENCRYPTED(inode)) {
4963*4882a593Smuzhiyun 			inode->i_op = &ext4_encrypted_symlink_inode_operations;
4964*4882a593Smuzhiyun 			ext4_set_aops(inode);
4965*4882a593Smuzhiyun 		} else if (ext4_inode_is_fast_symlink(inode)) {
4966*4882a593Smuzhiyun 			inode->i_link = (char *)ei->i_data;
4967*4882a593Smuzhiyun 			inode->i_op = &ext4_fast_symlink_inode_operations;
4968*4882a593Smuzhiyun 			nd_terminate_link(ei->i_data, inode->i_size,
4969*4882a593Smuzhiyun 				sizeof(ei->i_data) - 1);
4970*4882a593Smuzhiyun 		} else {
4971*4882a593Smuzhiyun 			inode->i_op = &ext4_symlink_inode_operations;
4972*4882a593Smuzhiyun 			ext4_set_aops(inode);
4973*4882a593Smuzhiyun 		}
4974*4882a593Smuzhiyun 		inode_nohighmem(inode);
4975*4882a593Smuzhiyun 	} else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4976*4882a593Smuzhiyun 	      S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4977*4882a593Smuzhiyun 		inode->i_op = &ext4_special_inode_operations;
4978*4882a593Smuzhiyun 		if (raw_inode->i_block[0])
4979*4882a593Smuzhiyun 			init_special_inode(inode, inode->i_mode,
4980*4882a593Smuzhiyun 			   old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4981*4882a593Smuzhiyun 		else
4982*4882a593Smuzhiyun 			init_special_inode(inode, inode->i_mode,
4983*4882a593Smuzhiyun 			   new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4984*4882a593Smuzhiyun 	} else if (ino == EXT4_BOOT_LOADER_INO) {
4985*4882a593Smuzhiyun 		make_bad_inode(inode);
4986*4882a593Smuzhiyun 	} else {
4987*4882a593Smuzhiyun 		ret = -EFSCORRUPTED;
4988*4882a593Smuzhiyun 		ext4_error_inode(inode, function, line, 0,
4989*4882a593Smuzhiyun 				 "iget: bogus i_mode (%o)", inode->i_mode);
4990*4882a593Smuzhiyun 		goto bad_inode;
4991*4882a593Smuzhiyun 	}
4992*4882a593Smuzhiyun 	if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
4993*4882a593Smuzhiyun 		ext4_error_inode(inode, function, line, 0,
4994*4882a593Smuzhiyun 				 "casefold flag without casefold feature");
4995*4882a593Smuzhiyun 	brelse(iloc.bh);
4996*4882a593Smuzhiyun 
4997*4882a593Smuzhiyun 	unlock_new_inode(inode);
4998*4882a593Smuzhiyun 	return inode;
4999*4882a593Smuzhiyun 
5000*4882a593Smuzhiyun bad_inode:
5001*4882a593Smuzhiyun 	brelse(iloc.bh);
5002*4882a593Smuzhiyun 	iget_failed(inode);
5003*4882a593Smuzhiyun 	return ERR_PTR(ret);
5004*4882a593Smuzhiyun }
5005*4882a593Smuzhiyun 
ext4_inode_blocks_set(handle_t * handle,struct ext4_inode * raw_inode,struct ext4_inode_info * ei)5006*4882a593Smuzhiyun static int ext4_inode_blocks_set(handle_t *handle,
5007*4882a593Smuzhiyun 				struct ext4_inode *raw_inode,
5008*4882a593Smuzhiyun 				struct ext4_inode_info *ei)
5009*4882a593Smuzhiyun {
5010*4882a593Smuzhiyun 	struct inode *inode = &(ei->vfs_inode);
5011*4882a593Smuzhiyun 	u64 i_blocks = READ_ONCE(inode->i_blocks);
5012*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
5013*4882a593Smuzhiyun 
5014*4882a593Smuzhiyun 	if (i_blocks <= ~0U) {
5015*4882a593Smuzhiyun 		/*
5016*4882a593Smuzhiyun 		 * i_blocks can be represented in a 32 bit variable
5017*4882a593Smuzhiyun 		 * as multiple of 512 bytes
5018*4882a593Smuzhiyun 		 */
5019*4882a593Smuzhiyun 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5020*4882a593Smuzhiyun 		raw_inode->i_blocks_high = 0;
5021*4882a593Smuzhiyun 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5022*4882a593Smuzhiyun 		return 0;
5023*4882a593Smuzhiyun 	}
5024*4882a593Smuzhiyun 	if (!ext4_has_feature_huge_file(sb))
5025*4882a593Smuzhiyun 		return -EFBIG;
5026*4882a593Smuzhiyun 
5027*4882a593Smuzhiyun 	if (i_blocks <= 0xffffffffffffULL) {
5028*4882a593Smuzhiyun 		/*
5029*4882a593Smuzhiyun 		 * i_blocks can be represented in a 48 bit variable
5030*4882a593Smuzhiyun 		 * as multiple of 512 bytes
5031*4882a593Smuzhiyun 		 */
5032*4882a593Smuzhiyun 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5033*4882a593Smuzhiyun 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5034*4882a593Smuzhiyun 		ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5035*4882a593Smuzhiyun 	} else {
5036*4882a593Smuzhiyun 		ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
5037*4882a593Smuzhiyun 		/* i_block is stored in file system block size */
5038*4882a593Smuzhiyun 		i_blocks = i_blocks >> (inode->i_blkbits - 9);
5039*4882a593Smuzhiyun 		raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
5040*4882a593Smuzhiyun 		raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
5041*4882a593Smuzhiyun 	}
5042*4882a593Smuzhiyun 	return 0;
5043*4882a593Smuzhiyun }
5044*4882a593Smuzhiyun 
__ext4_update_other_inode_time(struct super_block * sb,unsigned long orig_ino,unsigned long ino,struct ext4_inode * raw_inode)5045*4882a593Smuzhiyun static void __ext4_update_other_inode_time(struct super_block *sb,
5046*4882a593Smuzhiyun 					   unsigned long orig_ino,
5047*4882a593Smuzhiyun 					   unsigned long ino,
5048*4882a593Smuzhiyun 					   struct ext4_inode *raw_inode)
5049*4882a593Smuzhiyun {
5050*4882a593Smuzhiyun 	struct inode *inode;
5051*4882a593Smuzhiyun 
5052*4882a593Smuzhiyun 	inode = find_inode_by_ino_rcu(sb, ino);
5053*4882a593Smuzhiyun 	if (!inode)
5054*4882a593Smuzhiyun 		return;
5055*4882a593Smuzhiyun 
5056*4882a593Smuzhiyun 	if ((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5057*4882a593Smuzhiyun 			       I_DIRTY_INODE)) ||
5058*4882a593Smuzhiyun 	    ((inode->i_state & I_DIRTY_TIME) == 0))
5059*4882a593Smuzhiyun 		return;
5060*4882a593Smuzhiyun 
5061*4882a593Smuzhiyun 	spin_lock(&inode->i_lock);
5062*4882a593Smuzhiyun 	if (((inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW |
5063*4882a593Smuzhiyun 				I_DIRTY_INODE)) == 0) &&
5064*4882a593Smuzhiyun 	    (inode->i_state & I_DIRTY_TIME)) {
5065*4882a593Smuzhiyun 		struct ext4_inode_info	*ei = EXT4_I(inode);
5066*4882a593Smuzhiyun 
5067*4882a593Smuzhiyun 		inode->i_state &= ~I_DIRTY_TIME;
5068*4882a593Smuzhiyun 		spin_unlock(&inode->i_lock);
5069*4882a593Smuzhiyun 
5070*4882a593Smuzhiyun 		spin_lock(&ei->i_raw_lock);
5071*4882a593Smuzhiyun 		EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5072*4882a593Smuzhiyun 		EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5073*4882a593Smuzhiyun 		EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5074*4882a593Smuzhiyun 		ext4_inode_csum_set(inode, raw_inode, ei);
5075*4882a593Smuzhiyun 		spin_unlock(&ei->i_raw_lock);
5076*4882a593Smuzhiyun 		trace_ext4_other_inode_update_time(inode, orig_ino);
5077*4882a593Smuzhiyun 		return;
5078*4882a593Smuzhiyun 	}
5079*4882a593Smuzhiyun 	spin_unlock(&inode->i_lock);
5080*4882a593Smuzhiyun }
5081*4882a593Smuzhiyun 
5082*4882a593Smuzhiyun /*
5083*4882a593Smuzhiyun  * Opportunistically update the other time fields for other inodes in
5084*4882a593Smuzhiyun  * the same inode table block.
5085*4882a593Smuzhiyun  */
ext4_update_other_inodes_time(struct super_block * sb,unsigned long orig_ino,char * buf)5086*4882a593Smuzhiyun static void ext4_update_other_inodes_time(struct super_block *sb,
5087*4882a593Smuzhiyun 					  unsigned long orig_ino, char *buf)
5088*4882a593Smuzhiyun {
5089*4882a593Smuzhiyun 	unsigned long ino;
5090*4882a593Smuzhiyun 	int i, inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
5091*4882a593Smuzhiyun 	int inode_size = EXT4_INODE_SIZE(sb);
5092*4882a593Smuzhiyun 
5093*4882a593Smuzhiyun 	/*
5094*4882a593Smuzhiyun 	 * Calculate the first inode in the inode table block.  Inode
5095*4882a593Smuzhiyun 	 * numbers are one-based.  That is, the first inode in a block
5096*4882a593Smuzhiyun 	 * (assuming 4k blocks and 256 byte inodes) is (n*16 + 1).
5097*4882a593Smuzhiyun 	 */
5098*4882a593Smuzhiyun 	ino = ((orig_ino - 1) & ~(inodes_per_block - 1)) + 1;
5099*4882a593Smuzhiyun 	rcu_read_lock();
5100*4882a593Smuzhiyun 	for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
5101*4882a593Smuzhiyun 		if (ino == orig_ino)
5102*4882a593Smuzhiyun 			continue;
5103*4882a593Smuzhiyun 		__ext4_update_other_inode_time(sb, orig_ino, ino,
5104*4882a593Smuzhiyun 					       (struct ext4_inode *)buf);
5105*4882a593Smuzhiyun 	}
5106*4882a593Smuzhiyun 	rcu_read_unlock();
5107*4882a593Smuzhiyun }
5108*4882a593Smuzhiyun 
5109*4882a593Smuzhiyun /*
5110*4882a593Smuzhiyun  * Post the struct inode info into an on-disk inode location in the
5111*4882a593Smuzhiyun  * buffer-cache.  This gobbles the caller's reference to the
5112*4882a593Smuzhiyun  * buffer_head in the inode location struct.
5113*4882a593Smuzhiyun  *
5114*4882a593Smuzhiyun  * The caller must have write access to iloc->bh.
5115*4882a593Smuzhiyun  */
ext4_do_update_inode(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5116*4882a593Smuzhiyun static int ext4_do_update_inode(handle_t *handle,
5117*4882a593Smuzhiyun 				struct inode *inode,
5118*4882a593Smuzhiyun 				struct ext4_iloc *iloc)
5119*4882a593Smuzhiyun {
5120*4882a593Smuzhiyun 	struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
5121*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
5122*4882a593Smuzhiyun 	struct buffer_head *bh = iloc->bh;
5123*4882a593Smuzhiyun 	struct super_block *sb = inode->i_sb;
5124*4882a593Smuzhiyun 	int err = 0, block;
5125*4882a593Smuzhiyun 	int need_datasync = 0, set_large_file = 0;
5126*4882a593Smuzhiyun 	uid_t i_uid;
5127*4882a593Smuzhiyun 	gid_t i_gid;
5128*4882a593Smuzhiyun 	projid_t i_projid;
5129*4882a593Smuzhiyun 
5130*4882a593Smuzhiyun 	spin_lock(&ei->i_raw_lock);
5131*4882a593Smuzhiyun 
5132*4882a593Smuzhiyun 	/* For fields not tracked in the in-memory inode,
5133*4882a593Smuzhiyun 	 * initialise them to zero for new inodes. */
5134*4882a593Smuzhiyun 	if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
5135*4882a593Smuzhiyun 		memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
5136*4882a593Smuzhiyun 
5137*4882a593Smuzhiyun 	err = ext4_inode_blocks_set(handle, raw_inode, ei);
5138*4882a593Smuzhiyun 	if (err) {
5139*4882a593Smuzhiyun 		spin_unlock(&ei->i_raw_lock);
5140*4882a593Smuzhiyun 		goto out_brelse;
5141*4882a593Smuzhiyun 	}
5142*4882a593Smuzhiyun 
5143*4882a593Smuzhiyun 	raw_inode->i_mode = cpu_to_le16(inode->i_mode);
5144*4882a593Smuzhiyun 	i_uid = i_uid_read(inode);
5145*4882a593Smuzhiyun 	i_gid = i_gid_read(inode);
5146*4882a593Smuzhiyun 	i_projid = from_kprojid(&init_user_ns, ei->i_projid);
5147*4882a593Smuzhiyun 	if (!(test_opt(inode->i_sb, NO_UID32))) {
5148*4882a593Smuzhiyun 		raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
5149*4882a593Smuzhiyun 		raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
5150*4882a593Smuzhiyun /*
5151*4882a593Smuzhiyun  * Fix up interoperability with old kernels. Otherwise, old inodes get
5152*4882a593Smuzhiyun  * re-used with the upper 16 bits of the uid/gid intact
5153*4882a593Smuzhiyun  */
5154*4882a593Smuzhiyun 		if (ei->i_dtime && list_empty(&ei->i_orphan)) {
5155*4882a593Smuzhiyun 			raw_inode->i_uid_high = 0;
5156*4882a593Smuzhiyun 			raw_inode->i_gid_high = 0;
5157*4882a593Smuzhiyun 		} else {
5158*4882a593Smuzhiyun 			raw_inode->i_uid_high =
5159*4882a593Smuzhiyun 				cpu_to_le16(high_16_bits(i_uid));
5160*4882a593Smuzhiyun 			raw_inode->i_gid_high =
5161*4882a593Smuzhiyun 				cpu_to_le16(high_16_bits(i_gid));
5162*4882a593Smuzhiyun 		}
5163*4882a593Smuzhiyun 	} else {
5164*4882a593Smuzhiyun 		raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
5165*4882a593Smuzhiyun 		raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
5166*4882a593Smuzhiyun 		raw_inode->i_uid_high = 0;
5167*4882a593Smuzhiyun 		raw_inode->i_gid_high = 0;
5168*4882a593Smuzhiyun 	}
5169*4882a593Smuzhiyun 	raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
5170*4882a593Smuzhiyun 
5171*4882a593Smuzhiyun 	EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
5172*4882a593Smuzhiyun 	EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
5173*4882a593Smuzhiyun 	EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
5174*4882a593Smuzhiyun 	EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
5175*4882a593Smuzhiyun 
5176*4882a593Smuzhiyun 	raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
5177*4882a593Smuzhiyun 	raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
5178*4882a593Smuzhiyun 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT)))
5179*4882a593Smuzhiyun 		raw_inode->i_file_acl_high =
5180*4882a593Smuzhiyun 			cpu_to_le16(ei->i_file_acl >> 32);
5181*4882a593Smuzhiyun 	raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
5182*4882a593Smuzhiyun 	if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
5183*4882a593Smuzhiyun 		ext4_isize_set(raw_inode, ei->i_disksize);
5184*4882a593Smuzhiyun 		need_datasync = 1;
5185*4882a593Smuzhiyun 	}
5186*4882a593Smuzhiyun 	if (ei->i_disksize > 0x7fffffffULL) {
5187*4882a593Smuzhiyun 		if (!ext4_has_feature_large_file(sb) ||
5188*4882a593Smuzhiyun 				EXT4_SB(sb)->s_es->s_rev_level ==
5189*4882a593Smuzhiyun 		    cpu_to_le32(EXT4_GOOD_OLD_REV))
5190*4882a593Smuzhiyun 			set_large_file = 1;
5191*4882a593Smuzhiyun 	}
5192*4882a593Smuzhiyun 	raw_inode->i_generation = cpu_to_le32(inode->i_generation);
5193*4882a593Smuzhiyun 	if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
5194*4882a593Smuzhiyun 		if (old_valid_dev(inode->i_rdev)) {
5195*4882a593Smuzhiyun 			raw_inode->i_block[0] =
5196*4882a593Smuzhiyun 				cpu_to_le32(old_encode_dev(inode->i_rdev));
5197*4882a593Smuzhiyun 			raw_inode->i_block[1] = 0;
5198*4882a593Smuzhiyun 		} else {
5199*4882a593Smuzhiyun 			raw_inode->i_block[0] = 0;
5200*4882a593Smuzhiyun 			raw_inode->i_block[1] =
5201*4882a593Smuzhiyun 				cpu_to_le32(new_encode_dev(inode->i_rdev));
5202*4882a593Smuzhiyun 			raw_inode->i_block[2] = 0;
5203*4882a593Smuzhiyun 		}
5204*4882a593Smuzhiyun 	} else if (!ext4_has_inline_data(inode)) {
5205*4882a593Smuzhiyun 		for (block = 0; block < EXT4_N_BLOCKS; block++)
5206*4882a593Smuzhiyun 			raw_inode->i_block[block] = ei->i_data[block];
5207*4882a593Smuzhiyun 	}
5208*4882a593Smuzhiyun 
5209*4882a593Smuzhiyun 	if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
5210*4882a593Smuzhiyun 		u64 ivers = ext4_inode_peek_iversion(inode);
5211*4882a593Smuzhiyun 
5212*4882a593Smuzhiyun 		raw_inode->i_disk_version = cpu_to_le32(ivers);
5213*4882a593Smuzhiyun 		if (ei->i_extra_isize) {
5214*4882a593Smuzhiyun 			if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
5215*4882a593Smuzhiyun 				raw_inode->i_version_hi =
5216*4882a593Smuzhiyun 					cpu_to_le32(ivers >> 32);
5217*4882a593Smuzhiyun 			raw_inode->i_extra_isize =
5218*4882a593Smuzhiyun 				cpu_to_le16(ei->i_extra_isize);
5219*4882a593Smuzhiyun 		}
5220*4882a593Smuzhiyun 	}
5221*4882a593Smuzhiyun 
5222*4882a593Smuzhiyun 	BUG_ON(!ext4_has_feature_project(inode->i_sb) &&
5223*4882a593Smuzhiyun 	       i_projid != EXT4_DEF_PROJID);
5224*4882a593Smuzhiyun 
5225*4882a593Smuzhiyun 	if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
5226*4882a593Smuzhiyun 	    EXT4_FITS_IN_INODE(raw_inode, ei, i_projid))
5227*4882a593Smuzhiyun 		raw_inode->i_projid = cpu_to_le32(i_projid);
5228*4882a593Smuzhiyun 
5229*4882a593Smuzhiyun 	ext4_inode_csum_set(inode, raw_inode, ei);
5230*4882a593Smuzhiyun 	spin_unlock(&ei->i_raw_lock);
5231*4882a593Smuzhiyun 	if (inode->i_sb->s_flags & SB_LAZYTIME)
5232*4882a593Smuzhiyun 		ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
5233*4882a593Smuzhiyun 					      bh->b_data);
5234*4882a593Smuzhiyun 
5235*4882a593Smuzhiyun 	BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
5236*4882a593Smuzhiyun 	err = ext4_handle_dirty_metadata(handle, NULL, bh);
5237*4882a593Smuzhiyun 	if (err)
5238*4882a593Smuzhiyun 		goto out_brelse;
5239*4882a593Smuzhiyun 	ext4_clear_inode_state(inode, EXT4_STATE_NEW);
5240*4882a593Smuzhiyun 	if (set_large_file) {
5241*4882a593Smuzhiyun 		BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get write access");
5242*4882a593Smuzhiyun 		err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
5243*4882a593Smuzhiyun 		if (err)
5244*4882a593Smuzhiyun 			goto out_brelse;
5245*4882a593Smuzhiyun 		ext4_set_feature_large_file(sb);
5246*4882a593Smuzhiyun 		ext4_handle_sync(handle);
5247*4882a593Smuzhiyun 		err = ext4_handle_dirty_super(handle, sb);
5248*4882a593Smuzhiyun 	}
5249*4882a593Smuzhiyun 	ext4_update_inode_fsync_trans(handle, inode, need_datasync);
5250*4882a593Smuzhiyun out_brelse:
5251*4882a593Smuzhiyun 	brelse(bh);
5252*4882a593Smuzhiyun 	ext4_std_error(inode->i_sb, err);
5253*4882a593Smuzhiyun 	return err;
5254*4882a593Smuzhiyun }
5255*4882a593Smuzhiyun 
5256*4882a593Smuzhiyun /*
5257*4882a593Smuzhiyun  * ext4_write_inode()
5258*4882a593Smuzhiyun  *
5259*4882a593Smuzhiyun  * We are called from a few places:
5260*4882a593Smuzhiyun  *
5261*4882a593Smuzhiyun  * - Within generic_file_aio_write() -> generic_write_sync() for O_SYNC files.
5262*4882a593Smuzhiyun  *   Here, there will be no transaction running. We wait for any running
5263*4882a593Smuzhiyun  *   transaction to commit.
5264*4882a593Smuzhiyun  *
5265*4882a593Smuzhiyun  * - Within flush work (sys_sync(), kupdate and such).
5266*4882a593Smuzhiyun  *   We wait on commit, if told to.
5267*4882a593Smuzhiyun  *
5268*4882a593Smuzhiyun  * - Within iput_final() -> write_inode_now()
5269*4882a593Smuzhiyun  *   We wait on commit, if told to.
5270*4882a593Smuzhiyun  *
5271*4882a593Smuzhiyun  * In all cases it is actually safe for us to return without doing anything,
5272*4882a593Smuzhiyun  * because the inode has been copied into a raw inode buffer in
5273*4882a593Smuzhiyun  * ext4_mark_inode_dirty().  This is a correctness thing for WB_SYNC_ALL
5274*4882a593Smuzhiyun  * writeback.
5275*4882a593Smuzhiyun  *
5276*4882a593Smuzhiyun  * Note that we are absolutely dependent upon all inode dirtiers doing the
5277*4882a593Smuzhiyun  * right thing: they *must* call mark_inode_dirty() after dirtying info in
5278*4882a593Smuzhiyun  * which we are interested.
5279*4882a593Smuzhiyun  *
5280*4882a593Smuzhiyun  * It would be a bug for them to not do this.  The code:
5281*4882a593Smuzhiyun  *
5282*4882a593Smuzhiyun  *	mark_inode_dirty(inode)
5283*4882a593Smuzhiyun  *	stuff();
5284*4882a593Smuzhiyun  *	inode->i_size = expr;
5285*4882a593Smuzhiyun  *
5286*4882a593Smuzhiyun  * is in error because write_inode() could occur while `stuff()' is running,
5287*4882a593Smuzhiyun  * and the new i_size will be lost.  Plus the inode will no longer be on the
5288*4882a593Smuzhiyun  * superblock's dirty inode list.
5289*4882a593Smuzhiyun  */
ext4_write_inode(struct inode * inode,struct writeback_control * wbc)5290*4882a593Smuzhiyun int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
5291*4882a593Smuzhiyun {
5292*4882a593Smuzhiyun 	int err;
5293*4882a593Smuzhiyun 
5294*4882a593Smuzhiyun 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
5295*4882a593Smuzhiyun 	    sb_rdonly(inode->i_sb))
5296*4882a593Smuzhiyun 		return 0;
5297*4882a593Smuzhiyun 
5298*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5299*4882a593Smuzhiyun 		return -EIO;
5300*4882a593Smuzhiyun 
5301*4882a593Smuzhiyun 	if (EXT4_SB(inode->i_sb)->s_journal) {
5302*4882a593Smuzhiyun 		if (ext4_journal_current_handle()) {
5303*4882a593Smuzhiyun 			jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
5304*4882a593Smuzhiyun 			dump_stack();
5305*4882a593Smuzhiyun 			return -EIO;
5306*4882a593Smuzhiyun 		}
5307*4882a593Smuzhiyun 
5308*4882a593Smuzhiyun 		/*
5309*4882a593Smuzhiyun 		 * No need to force transaction in WB_SYNC_NONE mode. Also
5310*4882a593Smuzhiyun 		 * ext4_sync_fs() will force the commit after everything is
5311*4882a593Smuzhiyun 		 * written.
5312*4882a593Smuzhiyun 		 */
5313*4882a593Smuzhiyun 		if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
5314*4882a593Smuzhiyun 			return 0;
5315*4882a593Smuzhiyun 
5316*4882a593Smuzhiyun 		err = ext4_fc_commit(EXT4_SB(inode->i_sb)->s_journal,
5317*4882a593Smuzhiyun 						EXT4_I(inode)->i_sync_tid);
5318*4882a593Smuzhiyun 	} else {
5319*4882a593Smuzhiyun 		struct ext4_iloc iloc;
5320*4882a593Smuzhiyun 
5321*4882a593Smuzhiyun 		err = __ext4_get_inode_loc_noinmem(inode, &iloc);
5322*4882a593Smuzhiyun 		if (err)
5323*4882a593Smuzhiyun 			return err;
5324*4882a593Smuzhiyun 		/*
5325*4882a593Smuzhiyun 		 * sync(2) will flush the whole buffer cache. No need to do
5326*4882a593Smuzhiyun 		 * it here separately for each inode.
5327*4882a593Smuzhiyun 		 */
5328*4882a593Smuzhiyun 		if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
5329*4882a593Smuzhiyun 			sync_dirty_buffer(iloc.bh);
5330*4882a593Smuzhiyun 		if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
5331*4882a593Smuzhiyun 			ext4_error_inode_block(inode, iloc.bh->b_blocknr, EIO,
5332*4882a593Smuzhiyun 					       "IO error syncing inode");
5333*4882a593Smuzhiyun 			err = -EIO;
5334*4882a593Smuzhiyun 		}
5335*4882a593Smuzhiyun 		brelse(iloc.bh);
5336*4882a593Smuzhiyun 	}
5337*4882a593Smuzhiyun 	return err;
5338*4882a593Smuzhiyun }
5339*4882a593Smuzhiyun 
5340*4882a593Smuzhiyun /*
5341*4882a593Smuzhiyun  * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
5342*4882a593Smuzhiyun  * buffers that are attached to a page stradding i_size and are undergoing
5343*4882a593Smuzhiyun  * commit. In that case we have to wait for commit to finish and try again.
5344*4882a593Smuzhiyun  */
ext4_wait_for_tail_page_commit(struct inode * inode)5345*4882a593Smuzhiyun static void ext4_wait_for_tail_page_commit(struct inode *inode)
5346*4882a593Smuzhiyun {
5347*4882a593Smuzhiyun 	struct page *page;
5348*4882a593Smuzhiyun 	unsigned offset;
5349*4882a593Smuzhiyun 	journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
5350*4882a593Smuzhiyun 	tid_t commit_tid = 0;
5351*4882a593Smuzhiyun 	int ret;
5352*4882a593Smuzhiyun 
5353*4882a593Smuzhiyun 	offset = inode->i_size & (PAGE_SIZE - 1);
5354*4882a593Smuzhiyun 	/*
5355*4882a593Smuzhiyun 	 * If the page is fully truncated, we don't need to wait for any commit
5356*4882a593Smuzhiyun 	 * (and we even should not as __ext4_journalled_invalidatepage() may
5357*4882a593Smuzhiyun 	 * strip all buffers from the page but keep the page dirty which can then
5358*4882a593Smuzhiyun 	 * confuse e.g. concurrent ext4_writepage() seeing dirty page without
5359*4882a593Smuzhiyun 	 * buffers). Also we don't need to wait for any commit if all buffers in
5360*4882a593Smuzhiyun 	 * the page remain valid. This is most beneficial for the common case of
5361*4882a593Smuzhiyun 	 * blocksize == PAGESIZE.
5362*4882a593Smuzhiyun 	 */
5363*4882a593Smuzhiyun 	if (!offset || offset > (PAGE_SIZE - i_blocksize(inode)))
5364*4882a593Smuzhiyun 		return;
5365*4882a593Smuzhiyun 	while (1) {
5366*4882a593Smuzhiyun 		page = find_lock_page(inode->i_mapping,
5367*4882a593Smuzhiyun 				      inode->i_size >> PAGE_SHIFT);
5368*4882a593Smuzhiyun 		if (!page)
5369*4882a593Smuzhiyun 			return;
5370*4882a593Smuzhiyun 		ret = __ext4_journalled_invalidatepage(page, offset,
5371*4882a593Smuzhiyun 						PAGE_SIZE - offset);
5372*4882a593Smuzhiyun 		unlock_page(page);
5373*4882a593Smuzhiyun 		put_page(page);
5374*4882a593Smuzhiyun 		if (ret != -EBUSY)
5375*4882a593Smuzhiyun 			return;
5376*4882a593Smuzhiyun 		commit_tid = 0;
5377*4882a593Smuzhiyun 		read_lock(&journal->j_state_lock);
5378*4882a593Smuzhiyun 		if (journal->j_committing_transaction)
5379*4882a593Smuzhiyun 			commit_tid = journal->j_committing_transaction->t_tid;
5380*4882a593Smuzhiyun 		read_unlock(&journal->j_state_lock);
5381*4882a593Smuzhiyun 		if (commit_tid)
5382*4882a593Smuzhiyun 			jbd2_log_wait_commit(journal, commit_tid);
5383*4882a593Smuzhiyun 	}
5384*4882a593Smuzhiyun }
5385*4882a593Smuzhiyun 
5386*4882a593Smuzhiyun /*
5387*4882a593Smuzhiyun  * ext4_setattr()
5388*4882a593Smuzhiyun  *
5389*4882a593Smuzhiyun  * Called from notify_change.
5390*4882a593Smuzhiyun  *
5391*4882a593Smuzhiyun  * We want to trap VFS attempts to truncate the file as soon as
5392*4882a593Smuzhiyun  * possible.  In particular, we want to make sure that when the VFS
5393*4882a593Smuzhiyun  * shrinks i_size, we put the inode on the orphan list and modify
5394*4882a593Smuzhiyun  * i_disksize immediately, so that during the subsequent flushing of
5395*4882a593Smuzhiyun  * dirty pages and freeing of disk blocks, we can guarantee that any
5396*4882a593Smuzhiyun  * commit will leave the blocks being flushed in an unused state on
5397*4882a593Smuzhiyun  * disk.  (On recovery, the inode will get truncated and the blocks will
5398*4882a593Smuzhiyun  * be freed, so we have a strong guarantee that no future commit will
5399*4882a593Smuzhiyun  * leave these blocks visible to the user.)
5400*4882a593Smuzhiyun  *
5401*4882a593Smuzhiyun  * Another thing we have to assure is that if we are in ordered mode
5402*4882a593Smuzhiyun  * and inode is still attached to the committing transaction, we must
5403*4882a593Smuzhiyun  * we start writeout of all the dirty pages which are being truncated.
5404*4882a593Smuzhiyun  * This way we are sure that all the data written in the previous
5405*4882a593Smuzhiyun  * transaction are already on disk (truncate waits for pages under
5406*4882a593Smuzhiyun  * writeback).
5407*4882a593Smuzhiyun  *
5408*4882a593Smuzhiyun  * Called with inode->i_mutex down.
5409*4882a593Smuzhiyun  */
ext4_setattr(struct dentry * dentry,struct iattr * attr)5410*4882a593Smuzhiyun int ext4_setattr(struct dentry *dentry, struct iattr *attr)
5411*4882a593Smuzhiyun {
5412*4882a593Smuzhiyun 	struct inode *inode = d_inode(dentry);
5413*4882a593Smuzhiyun 	int error, rc = 0;
5414*4882a593Smuzhiyun 	int orphan = 0;
5415*4882a593Smuzhiyun 	const unsigned int ia_valid = attr->ia_valid;
5416*4882a593Smuzhiyun 
5417*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5418*4882a593Smuzhiyun 		return -EIO;
5419*4882a593Smuzhiyun 
5420*4882a593Smuzhiyun 	if (unlikely(IS_IMMUTABLE(inode)))
5421*4882a593Smuzhiyun 		return -EPERM;
5422*4882a593Smuzhiyun 
5423*4882a593Smuzhiyun 	if (unlikely(IS_APPEND(inode) &&
5424*4882a593Smuzhiyun 		     (ia_valid & (ATTR_MODE | ATTR_UID |
5425*4882a593Smuzhiyun 				  ATTR_GID | ATTR_TIMES_SET))))
5426*4882a593Smuzhiyun 		return -EPERM;
5427*4882a593Smuzhiyun 
5428*4882a593Smuzhiyun 	error = setattr_prepare(dentry, attr);
5429*4882a593Smuzhiyun 	if (error)
5430*4882a593Smuzhiyun 		return error;
5431*4882a593Smuzhiyun 
5432*4882a593Smuzhiyun 	error = fscrypt_prepare_setattr(dentry, attr);
5433*4882a593Smuzhiyun 	if (error)
5434*4882a593Smuzhiyun 		return error;
5435*4882a593Smuzhiyun 
5436*4882a593Smuzhiyun 	error = fsverity_prepare_setattr(dentry, attr);
5437*4882a593Smuzhiyun 	if (error)
5438*4882a593Smuzhiyun 		return error;
5439*4882a593Smuzhiyun 
5440*4882a593Smuzhiyun 	if (is_quota_modification(inode, attr)) {
5441*4882a593Smuzhiyun 		error = dquot_initialize(inode);
5442*4882a593Smuzhiyun 		if (error)
5443*4882a593Smuzhiyun 			return error;
5444*4882a593Smuzhiyun 	}
5445*4882a593Smuzhiyun 	ext4_fc_start_update(inode);
5446*4882a593Smuzhiyun 	if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
5447*4882a593Smuzhiyun 	    (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
5448*4882a593Smuzhiyun 		handle_t *handle;
5449*4882a593Smuzhiyun 
5450*4882a593Smuzhiyun 		/* (user+group)*(old+new) structure, inode write (sb,
5451*4882a593Smuzhiyun 		 * inode block, ? - but truncate inode update has it) */
5452*4882a593Smuzhiyun 		handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
5453*4882a593Smuzhiyun 			(EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
5454*4882a593Smuzhiyun 			 EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
5455*4882a593Smuzhiyun 		if (IS_ERR(handle)) {
5456*4882a593Smuzhiyun 			error = PTR_ERR(handle);
5457*4882a593Smuzhiyun 			goto err_out;
5458*4882a593Smuzhiyun 		}
5459*4882a593Smuzhiyun 
5460*4882a593Smuzhiyun 		/* dquot_transfer() calls back ext4_get_inode_usage() which
5461*4882a593Smuzhiyun 		 * counts xattr inode references.
5462*4882a593Smuzhiyun 		 */
5463*4882a593Smuzhiyun 		down_read(&EXT4_I(inode)->xattr_sem);
5464*4882a593Smuzhiyun 		error = dquot_transfer(inode, attr);
5465*4882a593Smuzhiyun 		up_read(&EXT4_I(inode)->xattr_sem);
5466*4882a593Smuzhiyun 
5467*4882a593Smuzhiyun 		if (error) {
5468*4882a593Smuzhiyun 			ext4_journal_stop(handle);
5469*4882a593Smuzhiyun 			ext4_fc_stop_update(inode);
5470*4882a593Smuzhiyun 			return error;
5471*4882a593Smuzhiyun 		}
5472*4882a593Smuzhiyun 		/* Update corresponding info in inode so that everything is in
5473*4882a593Smuzhiyun 		 * one transaction */
5474*4882a593Smuzhiyun 		if (attr->ia_valid & ATTR_UID)
5475*4882a593Smuzhiyun 			inode->i_uid = attr->ia_uid;
5476*4882a593Smuzhiyun 		if (attr->ia_valid & ATTR_GID)
5477*4882a593Smuzhiyun 			inode->i_gid = attr->ia_gid;
5478*4882a593Smuzhiyun 		error = ext4_mark_inode_dirty(handle, inode);
5479*4882a593Smuzhiyun 		ext4_journal_stop(handle);
5480*4882a593Smuzhiyun 		if (unlikely(error)) {
5481*4882a593Smuzhiyun 			ext4_fc_stop_update(inode);
5482*4882a593Smuzhiyun 			return error;
5483*4882a593Smuzhiyun 		}
5484*4882a593Smuzhiyun 	}
5485*4882a593Smuzhiyun 
5486*4882a593Smuzhiyun 	if (attr->ia_valid & ATTR_SIZE) {
5487*4882a593Smuzhiyun 		handle_t *handle;
5488*4882a593Smuzhiyun 		loff_t oldsize = inode->i_size;
5489*4882a593Smuzhiyun 		loff_t old_disksize;
5490*4882a593Smuzhiyun 		int shrink = (attr->ia_size < inode->i_size);
5491*4882a593Smuzhiyun 
5492*4882a593Smuzhiyun 		if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
5493*4882a593Smuzhiyun 			struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
5494*4882a593Smuzhiyun 
5495*4882a593Smuzhiyun 			if (attr->ia_size > sbi->s_bitmap_maxbytes) {
5496*4882a593Smuzhiyun 				ext4_fc_stop_update(inode);
5497*4882a593Smuzhiyun 				return -EFBIG;
5498*4882a593Smuzhiyun 			}
5499*4882a593Smuzhiyun 		}
5500*4882a593Smuzhiyun 		if (!S_ISREG(inode->i_mode)) {
5501*4882a593Smuzhiyun 			ext4_fc_stop_update(inode);
5502*4882a593Smuzhiyun 			return -EINVAL;
5503*4882a593Smuzhiyun 		}
5504*4882a593Smuzhiyun 
5505*4882a593Smuzhiyun 		if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
5506*4882a593Smuzhiyun 			inode_inc_iversion(inode);
5507*4882a593Smuzhiyun 
5508*4882a593Smuzhiyun 		if (shrink) {
5509*4882a593Smuzhiyun 			if (ext4_should_order_data(inode)) {
5510*4882a593Smuzhiyun 				error = ext4_begin_ordered_truncate(inode,
5511*4882a593Smuzhiyun 							    attr->ia_size);
5512*4882a593Smuzhiyun 				if (error)
5513*4882a593Smuzhiyun 					goto err_out;
5514*4882a593Smuzhiyun 			}
5515*4882a593Smuzhiyun 			/*
5516*4882a593Smuzhiyun 			 * Blocks are going to be removed from the inode. Wait
5517*4882a593Smuzhiyun 			 * for dio in flight.
5518*4882a593Smuzhiyun 			 */
5519*4882a593Smuzhiyun 			inode_dio_wait(inode);
5520*4882a593Smuzhiyun 		}
5521*4882a593Smuzhiyun 
5522*4882a593Smuzhiyun 		down_write(&EXT4_I(inode)->i_mmap_sem);
5523*4882a593Smuzhiyun 
5524*4882a593Smuzhiyun 		rc = ext4_break_layouts(inode);
5525*4882a593Smuzhiyun 		if (rc) {
5526*4882a593Smuzhiyun 			up_write(&EXT4_I(inode)->i_mmap_sem);
5527*4882a593Smuzhiyun 			goto err_out;
5528*4882a593Smuzhiyun 		}
5529*4882a593Smuzhiyun 
5530*4882a593Smuzhiyun 		if (attr->ia_size != inode->i_size) {
5531*4882a593Smuzhiyun 			handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
5532*4882a593Smuzhiyun 			if (IS_ERR(handle)) {
5533*4882a593Smuzhiyun 				error = PTR_ERR(handle);
5534*4882a593Smuzhiyun 				goto out_mmap_sem;
5535*4882a593Smuzhiyun 			}
5536*4882a593Smuzhiyun 			if (ext4_handle_valid(handle) && shrink) {
5537*4882a593Smuzhiyun 				error = ext4_orphan_add(handle, inode);
5538*4882a593Smuzhiyun 				orphan = 1;
5539*4882a593Smuzhiyun 			}
5540*4882a593Smuzhiyun 			/*
5541*4882a593Smuzhiyun 			 * Update c/mtime on truncate up, ext4_truncate() will
5542*4882a593Smuzhiyun 			 * update c/mtime in shrink case below
5543*4882a593Smuzhiyun 			 */
5544*4882a593Smuzhiyun 			if (!shrink) {
5545*4882a593Smuzhiyun 				inode->i_mtime = current_time(inode);
5546*4882a593Smuzhiyun 				inode->i_ctime = inode->i_mtime;
5547*4882a593Smuzhiyun 			}
5548*4882a593Smuzhiyun 
5549*4882a593Smuzhiyun 			if (shrink)
5550*4882a593Smuzhiyun 				ext4_fc_track_range(handle, inode,
5551*4882a593Smuzhiyun 					(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5552*4882a593Smuzhiyun 					inode->i_sb->s_blocksize_bits,
5553*4882a593Smuzhiyun 					EXT_MAX_BLOCKS - 1);
5554*4882a593Smuzhiyun 			else
5555*4882a593Smuzhiyun 				ext4_fc_track_range(
5556*4882a593Smuzhiyun 					handle, inode,
5557*4882a593Smuzhiyun 					(oldsize > 0 ? oldsize - 1 : oldsize) >>
5558*4882a593Smuzhiyun 					inode->i_sb->s_blocksize_bits,
5559*4882a593Smuzhiyun 					(attr->ia_size > 0 ? attr->ia_size - 1 : 0) >>
5560*4882a593Smuzhiyun 					inode->i_sb->s_blocksize_bits);
5561*4882a593Smuzhiyun 
5562*4882a593Smuzhiyun 			down_write(&EXT4_I(inode)->i_data_sem);
5563*4882a593Smuzhiyun 			old_disksize = EXT4_I(inode)->i_disksize;
5564*4882a593Smuzhiyun 			EXT4_I(inode)->i_disksize = attr->ia_size;
5565*4882a593Smuzhiyun 			rc = ext4_mark_inode_dirty(handle, inode);
5566*4882a593Smuzhiyun 			if (!error)
5567*4882a593Smuzhiyun 				error = rc;
5568*4882a593Smuzhiyun 			/*
5569*4882a593Smuzhiyun 			 * We have to update i_size under i_data_sem together
5570*4882a593Smuzhiyun 			 * with i_disksize to avoid races with writeback code
5571*4882a593Smuzhiyun 			 * running ext4_wb_update_i_disksize().
5572*4882a593Smuzhiyun 			 */
5573*4882a593Smuzhiyun 			if (!error)
5574*4882a593Smuzhiyun 				i_size_write(inode, attr->ia_size);
5575*4882a593Smuzhiyun 			else
5576*4882a593Smuzhiyun 				EXT4_I(inode)->i_disksize = old_disksize;
5577*4882a593Smuzhiyun 			up_write(&EXT4_I(inode)->i_data_sem);
5578*4882a593Smuzhiyun 			ext4_journal_stop(handle);
5579*4882a593Smuzhiyun 			if (error)
5580*4882a593Smuzhiyun 				goto out_mmap_sem;
5581*4882a593Smuzhiyun 			if (!shrink) {
5582*4882a593Smuzhiyun 				pagecache_isize_extended(inode, oldsize,
5583*4882a593Smuzhiyun 							 inode->i_size);
5584*4882a593Smuzhiyun 			} else if (ext4_should_journal_data(inode)) {
5585*4882a593Smuzhiyun 				ext4_wait_for_tail_page_commit(inode);
5586*4882a593Smuzhiyun 			}
5587*4882a593Smuzhiyun 		}
5588*4882a593Smuzhiyun 
5589*4882a593Smuzhiyun 		/*
5590*4882a593Smuzhiyun 		 * Truncate pagecache after we've waited for commit
5591*4882a593Smuzhiyun 		 * in data=journal mode to make pages freeable.
5592*4882a593Smuzhiyun 		 */
5593*4882a593Smuzhiyun 		truncate_pagecache(inode, inode->i_size);
5594*4882a593Smuzhiyun 		/*
5595*4882a593Smuzhiyun 		 * Call ext4_truncate() even if i_size didn't change to
5596*4882a593Smuzhiyun 		 * truncate possible preallocated blocks.
5597*4882a593Smuzhiyun 		 */
5598*4882a593Smuzhiyun 		if (attr->ia_size <= oldsize) {
5599*4882a593Smuzhiyun 			rc = ext4_truncate(inode);
5600*4882a593Smuzhiyun 			if (rc)
5601*4882a593Smuzhiyun 				error = rc;
5602*4882a593Smuzhiyun 		}
5603*4882a593Smuzhiyun out_mmap_sem:
5604*4882a593Smuzhiyun 		up_write(&EXT4_I(inode)->i_mmap_sem);
5605*4882a593Smuzhiyun 	}
5606*4882a593Smuzhiyun 
5607*4882a593Smuzhiyun 	if (!error) {
5608*4882a593Smuzhiyun 		setattr_copy(inode, attr);
5609*4882a593Smuzhiyun 		mark_inode_dirty(inode);
5610*4882a593Smuzhiyun 	}
5611*4882a593Smuzhiyun 
5612*4882a593Smuzhiyun 	/*
5613*4882a593Smuzhiyun 	 * If the call to ext4_truncate failed to get a transaction handle at
5614*4882a593Smuzhiyun 	 * all, we need to clean up the in-core orphan list manually.
5615*4882a593Smuzhiyun 	 */
5616*4882a593Smuzhiyun 	if (orphan && inode->i_nlink)
5617*4882a593Smuzhiyun 		ext4_orphan_del(NULL, inode);
5618*4882a593Smuzhiyun 
5619*4882a593Smuzhiyun 	if (!error && (ia_valid & ATTR_MODE))
5620*4882a593Smuzhiyun 		rc = posix_acl_chmod(inode, inode->i_mode);
5621*4882a593Smuzhiyun 
5622*4882a593Smuzhiyun err_out:
5623*4882a593Smuzhiyun 	if  (error)
5624*4882a593Smuzhiyun 		ext4_std_error(inode->i_sb, error);
5625*4882a593Smuzhiyun 	if (!error)
5626*4882a593Smuzhiyun 		error = rc;
5627*4882a593Smuzhiyun 	ext4_fc_stop_update(inode);
5628*4882a593Smuzhiyun 	return error;
5629*4882a593Smuzhiyun }
5630*4882a593Smuzhiyun 
ext4_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)5631*4882a593Smuzhiyun int ext4_getattr(const struct path *path, struct kstat *stat,
5632*4882a593Smuzhiyun 		 u32 request_mask, unsigned int query_flags)
5633*4882a593Smuzhiyun {
5634*4882a593Smuzhiyun 	struct inode *inode = d_inode(path->dentry);
5635*4882a593Smuzhiyun 	struct ext4_inode *raw_inode;
5636*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
5637*4882a593Smuzhiyun 	unsigned int flags;
5638*4882a593Smuzhiyun 
5639*4882a593Smuzhiyun 	if ((request_mask & STATX_BTIME) &&
5640*4882a593Smuzhiyun 	    EXT4_FITS_IN_INODE(raw_inode, ei, i_crtime)) {
5641*4882a593Smuzhiyun 		stat->result_mask |= STATX_BTIME;
5642*4882a593Smuzhiyun 		stat->btime.tv_sec = ei->i_crtime.tv_sec;
5643*4882a593Smuzhiyun 		stat->btime.tv_nsec = ei->i_crtime.tv_nsec;
5644*4882a593Smuzhiyun 	}
5645*4882a593Smuzhiyun 
5646*4882a593Smuzhiyun 	flags = ei->i_flags & EXT4_FL_USER_VISIBLE;
5647*4882a593Smuzhiyun 	if (flags & EXT4_APPEND_FL)
5648*4882a593Smuzhiyun 		stat->attributes |= STATX_ATTR_APPEND;
5649*4882a593Smuzhiyun 	if (flags & EXT4_COMPR_FL)
5650*4882a593Smuzhiyun 		stat->attributes |= STATX_ATTR_COMPRESSED;
5651*4882a593Smuzhiyun 	if (flags & EXT4_ENCRYPT_FL)
5652*4882a593Smuzhiyun 		stat->attributes |= STATX_ATTR_ENCRYPTED;
5653*4882a593Smuzhiyun 	if (flags & EXT4_IMMUTABLE_FL)
5654*4882a593Smuzhiyun 		stat->attributes |= STATX_ATTR_IMMUTABLE;
5655*4882a593Smuzhiyun 	if (flags & EXT4_NODUMP_FL)
5656*4882a593Smuzhiyun 		stat->attributes |= STATX_ATTR_NODUMP;
5657*4882a593Smuzhiyun 	if (flags & EXT4_VERITY_FL)
5658*4882a593Smuzhiyun 		stat->attributes |= STATX_ATTR_VERITY;
5659*4882a593Smuzhiyun 
5660*4882a593Smuzhiyun 	stat->attributes_mask |= (STATX_ATTR_APPEND |
5661*4882a593Smuzhiyun 				  STATX_ATTR_COMPRESSED |
5662*4882a593Smuzhiyun 				  STATX_ATTR_ENCRYPTED |
5663*4882a593Smuzhiyun 				  STATX_ATTR_IMMUTABLE |
5664*4882a593Smuzhiyun 				  STATX_ATTR_NODUMP |
5665*4882a593Smuzhiyun 				  STATX_ATTR_VERITY);
5666*4882a593Smuzhiyun 
5667*4882a593Smuzhiyun 	generic_fillattr(inode, stat);
5668*4882a593Smuzhiyun 	return 0;
5669*4882a593Smuzhiyun }
5670*4882a593Smuzhiyun 
ext4_file_getattr(const struct path * path,struct kstat * stat,u32 request_mask,unsigned int query_flags)5671*4882a593Smuzhiyun int ext4_file_getattr(const struct path *path, struct kstat *stat,
5672*4882a593Smuzhiyun 		      u32 request_mask, unsigned int query_flags)
5673*4882a593Smuzhiyun {
5674*4882a593Smuzhiyun 	struct inode *inode = d_inode(path->dentry);
5675*4882a593Smuzhiyun 	u64 delalloc_blocks;
5676*4882a593Smuzhiyun 
5677*4882a593Smuzhiyun 	ext4_getattr(path, stat, request_mask, query_flags);
5678*4882a593Smuzhiyun 
5679*4882a593Smuzhiyun 	/*
5680*4882a593Smuzhiyun 	 * If there is inline data in the inode, the inode will normally not
5681*4882a593Smuzhiyun 	 * have data blocks allocated (it may have an external xattr block).
5682*4882a593Smuzhiyun 	 * Report at least one sector for such files, so tools like tar, rsync,
5683*4882a593Smuzhiyun 	 * others don't incorrectly think the file is completely sparse.
5684*4882a593Smuzhiyun 	 */
5685*4882a593Smuzhiyun 	if (unlikely(ext4_has_inline_data(inode)))
5686*4882a593Smuzhiyun 		stat->blocks += (stat->size + 511) >> 9;
5687*4882a593Smuzhiyun 
5688*4882a593Smuzhiyun 	/*
5689*4882a593Smuzhiyun 	 * We can't update i_blocks if the block allocation is delayed
5690*4882a593Smuzhiyun 	 * otherwise in the case of system crash before the real block
5691*4882a593Smuzhiyun 	 * allocation is done, we will have i_blocks inconsistent with
5692*4882a593Smuzhiyun 	 * on-disk file blocks.
5693*4882a593Smuzhiyun 	 * We always keep i_blocks updated together with real
5694*4882a593Smuzhiyun 	 * allocation. But to not confuse with user, stat
5695*4882a593Smuzhiyun 	 * will return the blocks that include the delayed allocation
5696*4882a593Smuzhiyun 	 * blocks for this file.
5697*4882a593Smuzhiyun 	 */
5698*4882a593Smuzhiyun 	delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
5699*4882a593Smuzhiyun 				   EXT4_I(inode)->i_reserved_data_blocks);
5700*4882a593Smuzhiyun 	stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
5701*4882a593Smuzhiyun 	return 0;
5702*4882a593Smuzhiyun }
5703*4882a593Smuzhiyun 
ext4_index_trans_blocks(struct inode * inode,int lblocks,int pextents)5704*4882a593Smuzhiyun static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
5705*4882a593Smuzhiyun 				   int pextents)
5706*4882a593Smuzhiyun {
5707*4882a593Smuzhiyun 	if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5708*4882a593Smuzhiyun 		return ext4_ind_trans_blocks(inode, lblocks);
5709*4882a593Smuzhiyun 	return ext4_ext_index_trans_blocks(inode, pextents);
5710*4882a593Smuzhiyun }
5711*4882a593Smuzhiyun 
5712*4882a593Smuzhiyun /*
5713*4882a593Smuzhiyun  * Account for index blocks, block groups bitmaps and block group
5714*4882a593Smuzhiyun  * descriptor blocks if modify datablocks and index blocks
5715*4882a593Smuzhiyun  * worse case, the indexs blocks spread over different block groups
5716*4882a593Smuzhiyun  *
5717*4882a593Smuzhiyun  * If datablocks are discontiguous, they are possible to spread over
5718*4882a593Smuzhiyun  * different block groups too. If they are contiguous, with flexbg,
5719*4882a593Smuzhiyun  * they could still across block group boundary.
5720*4882a593Smuzhiyun  *
5721*4882a593Smuzhiyun  * Also account for superblock, inode, quota and xattr blocks
5722*4882a593Smuzhiyun  */
ext4_meta_trans_blocks(struct inode * inode,int lblocks,int pextents)5723*4882a593Smuzhiyun static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
5724*4882a593Smuzhiyun 				  int pextents)
5725*4882a593Smuzhiyun {
5726*4882a593Smuzhiyun 	ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
5727*4882a593Smuzhiyun 	int gdpblocks;
5728*4882a593Smuzhiyun 	int idxblocks;
5729*4882a593Smuzhiyun 	int ret = 0;
5730*4882a593Smuzhiyun 
5731*4882a593Smuzhiyun 	/*
5732*4882a593Smuzhiyun 	 * How many index blocks need to touch to map @lblocks logical blocks
5733*4882a593Smuzhiyun 	 * to @pextents physical extents?
5734*4882a593Smuzhiyun 	 */
5735*4882a593Smuzhiyun 	idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
5736*4882a593Smuzhiyun 
5737*4882a593Smuzhiyun 	ret = idxblocks;
5738*4882a593Smuzhiyun 
5739*4882a593Smuzhiyun 	/*
5740*4882a593Smuzhiyun 	 * Now let's see how many group bitmaps and group descriptors need
5741*4882a593Smuzhiyun 	 * to account
5742*4882a593Smuzhiyun 	 */
5743*4882a593Smuzhiyun 	groups = idxblocks + pextents;
5744*4882a593Smuzhiyun 	gdpblocks = groups;
5745*4882a593Smuzhiyun 	if (groups > ngroups)
5746*4882a593Smuzhiyun 		groups = ngroups;
5747*4882a593Smuzhiyun 	if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5748*4882a593Smuzhiyun 		gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5749*4882a593Smuzhiyun 
5750*4882a593Smuzhiyun 	/* bitmaps and block group descriptor blocks */
5751*4882a593Smuzhiyun 	ret += groups + gdpblocks;
5752*4882a593Smuzhiyun 
5753*4882a593Smuzhiyun 	/* Blocks for super block, inode, quota and xattr blocks */
5754*4882a593Smuzhiyun 	ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
5755*4882a593Smuzhiyun 
5756*4882a593Smuzhiyun 	return ret;
5757*4882a593Smuzhiyun }
5758*4882a593Smuzhiyun 
5759*4882a593Smuzhiyun /*
5760*4882a593Smuzhiyun  * Calculate the total number of credits to reserve to fit
5761*4882a593Smuzhiyun  * the modification of a single pages into a single transaction,
5762*4882a593Smuzhiyun  * which may include multiple chunks of block allocations.
5763*4882a593Smuzhiyun  *
5764*4882a593Smuzhiyun  * This could be called via ext4_write_begin()
5765*4882a593Smuzhiyun  *
5766*4882a593Smuzhiyun  * We need to consider the worse case, when
5767*4882a593Smuzhiyun  * one new block per extent.
5768*4882a593Smuzhiyun  */
ext4_writepage_trans_blocks(struct inode * inode)5769*4882a593Smuzhiyun int ext4_writepage_trans_blocks(struct inode *inode)
5770*4882a593Smuzhiyun {
5771*4882a593Smuzhiyun 	int bpp = ext4_journal_blocks_per_page(inode);
5772*4882a593Smuzhiyun 	int ret;
5773*4882a593Smuzhiyun 
5774*4882a593Smuzhiyun 	ret = ext4_meta_trans_blocks(inode, bpp, bpp);
5775*4882a593Smuzhiyun 
5776*4882a593Smuzhiyun 	/* Account for data blocks for journalled mode */
5777*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode))
5778*4882a593Smuzhiyun 		ret += bpp;
5779*4882a593Smuzhiyun 	return ret;
5780*4882a593Smuzhiyun }
5781*4882a593Smuzhiyun 
5782*4882a593Smuzhiyun /*
5783*4882a593Smuzhiyun  * Calculate the journal credits for a chunk of data modification.
5784*4882a593Smuzhiyun  *
5785*4882a593Smuzhiyun  * This is called from DIO, fallocate or whoever calling
5786*4882a593Smuzhiyun  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
5787*4882a593Smuzhiyun  *
5788*4882a593Smuzhiyun  * journal buffers for data blocks are not included here, as DIO
5789*4882a593Smuzhiyun  * and fallocate do no need to journal data buffers.
5790*4882a593Smuzhiyun  */
ext4_chunk_trans_blocks(struct inode * inode,int nrblocks)5791*4882a593Smuzhiyun int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5792*4882a593Smuzhiyun {
5793*4882a593Smuzhiyun 	return ext4_meta_trans_blocks(inode, nrblocks, 1);
5794*4882a593Smuzhiyun }
5795*4882a593Smuzhiyun 
5796*4882a593Smuzhiyun /*
5797*4882a593Smuzhiyun  * The caller must have previously called ext4_reserve_inode_write().
5798*4882a593Smuzhiyun  * Give this, we know that the caller already has write access to iloc->bh.
5799*4882a593Smuzhiyun  */
ext4_mark_iloc_dirty(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5800*4882a593Smuzhiyun int ext4_mark_iloc_dirty(handle_t *handle,
5801*4882a593Smuzhiyun 			 struct inode *inode, struct ext4_iloc *iloc)
5802*4882a593Smuzhiyun {
5803*4882a593Smuzhiyun 	int err = 0;
5804*4882a593Smuzhiyun 
5805*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
5806*4882a593Smuzhiyun 		put_bh(iloc->bh);
5807*4882a593Smuzhiyun 		return -EIO;
5808*4882a593Smuzhiyun 	}
5809*4882a593Smuzhiyun 	ext4_fc_track_inode(handle, inode);
5810*4882a593Smuzhiyun 
5811*4882a593Smuzhiyun 	/*
5812*4882a593Smuzhiyun 	 * ea_inodes are using i_version for storing reference count, don't
5813*4882a593Smuzhiyun 	 * mess with it
5814*4882a593Smuzhiyun 	 */
5815*4882a593Smuzhiyun 	if (IS_I_VERSION(inode) &&
5816*4882a593Smuzhiyun 	    !(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
5817*4882a593Smuzhiyun 		inode_inc_iversion(inode);
5818*4882a593Smuzhiyun 
5819*4882a593Smuzhiyun 	/* the do_update_inode consumes one bh->b_count */
5820*4882a593Smuzhiyun 	get_bh(iloc->bh);
5821*4882a593Smuzhiyun 
5822*4882a593Smuzhiyun 	/* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
5823*4882a593Smuzhiyun 	err = ext4_do_update_inode(handle, inode, iloc);
5824*4882a593Smuzhiyun 	put_bh(iloc->bh);
5825*4882a593Smuzhiyun 	return err;
5826*4882a593Smuzhiyun }
5827*4882a593Smuzhiyun 
5828*4882a593Smuzhiyun /*
5829*4882a593Smuzhiyun  * On success, We end up with an outstanding reference count against
5830*4882a593Smuzhiyun  * iloc->bh.  This _must_ be cleaned up later.
5831*4882a593Smuzhiyun  */
5832*4882a593Smuzhiyun 
5833*4882a593Smuzhiyun int
ext4_reserve_inode_write(handle_t * handle,struct inode * inode,struct ext4_iloc * iloc)5834*4882a593Smuzhiyun ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
5835*4882a593Smuzhiyun 			 struct ext4_iloc *iloc)
5836*4882a593Smuzhiyun {
5837*4882a593Smuzhiyun 	int err;
5838*4882a593Smuzhiyun 
5839*4882a593Smuzhiyun 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
5840*4882a593Smuzhiyun 		return -EIO;
5841*4882a593Smuzhiyun 
5842*4882a593Smuzhiyun 	err = ext4_get_inode_loc(inode, iloc);
5843*4882a593Smuzhiyun 	if (!err) {
5844*4882a593Smuzhiyun 		BUFFER_TRACE(iloc->bh, "get_write_access");
5845*4882a593Smuzhiyun 		err = ext4_journal_get_write_access(handle, iloc->bh);
5846*4882a593Smuzhiyun 		if (err) {
5847*4882a593Smuzhiyun 			brelse(iloc->bh);
5848*4882a593Smuzhiyun 			iloc->bh = NULL;
5849*4882a593Smuzhiyun 		}
5850*4882a593Smuzhiyun 	}
5851*4882a593Smuzhiyun 	ext4_std_error(inode->i_sb, err);
5852*4882a593Smuzhiyun 	return err;
5853*4882a593Smuzhiyun }
5854*4882a593Smuzhiyun 
__ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc,handle_t * handle,int * no_expand)5855*4882a593Smuzhiyun static int __ext4_expand_extra_isize(struct inode *inode,
5856*4882a593Smuzhiyun 				     unsigned int new_extra_isize,
5857*4882a593Smuzhiyun 				     struct ext4_iloc *iloc,
5858*4882a593Smuzhiyun 				     handle_t *handle, int *no_expand)
5859*4882a593Smuzhiyun {
5860*4882a593Smuzhiyun 	struct ext4_inode *raw_inode;
5861*4882a593Smuzhiyun 	struct ext4_xattr_ibody_header *header;
5862*4882a593Smuzhiyun 	unsigned int inode_size = EXT4_INODE_SIZE(inode->i_sb);
5863*4882a593Smuzhiyun 	struct ext4_inode_info *ei = EXT4_I(inode);
5864*4882a593Smuzhiyun 	int error;
5865*4882a593Smuzhiyun 
5866*4882a593Smuzhiyun 	/* this was checked at iget time, but double check for good measure */
5867*4882a593Smuzhiyun 	if ((EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize > inode_size) ||
5868*4882a593Smuzhiyun 	    (ei->i_extra_isize & 3)) {
5869*4882a593Smuzhiyun 		EXT4_ERROR_INODE(inode, "bad extra_isize %u (inode size %u)",
5870*4882a593Smuzhiyun 				 ei->i_extra_isize,
5871*4882a593Smuzhiyun 				 EXT4_INODE_SIZE(inode->i_sb));
5872*4882a593Smuzhiyun 		return -EFSCORRUPTED;
5873*4882a593Smuzhiyun 	}
5874*4882a593Smuzhiyun 	if ((new_extra_isize < ei->i_extra_isize) ||
5875*4882a593Smuzhiyun 	    (new_extra_isize < 4) ||
5876*4882a593Smuzhiyun 	    (new_extra_isize > inode_size - EXT4_GOOD_OLD_INODE_SIZE))
5877*4882a593Smuzhiyun 		return -EINVAL;	/* Should never happen */
5878*4882a593Smuzhiyun 
5879*4882a593Smuzhiyun 	raw_inode = ext4_raw_inode(iloc);
5880*4882a593Smuzhiyun 
5881*4882a593Smuzhiyun 	header = IHDR(inode, raw_inode);
5882*4882a593Smuzhiyun 
5883*4882a593Smuzhiyun 	/* No extended attributes present */
5884*4882a593Smuzhiyun 	if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
5885*4882a593Smuzhiyun 	    header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
5886*4882a593Smuzhiyun 		memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
5887*4882a593Smuzhiyun 		       EXT4_I(inode)->i_extra_isize, 0,
5888*4882a593Smuzhiyun 		       new_extra_isize - EXT4_I(inode)->i_extra_isize);
5889*4882a593Smuzhiyun 		EXT4_I(inode)->i_extra_isize = new_extra_isize;
5890*4882a593Smuzhiyun 		return 0;
5891*4882a593Smuzhiyun 	}
5892*4882a593Smuzhiyun 
5893*4882a593Smuzhiyun 	/* try to expand with EAs present */
5894*4882a593Smuzhiyun 	error = ext4_expand_extra_isize_ea(inode, new_extra_isize,
5895*4882a593Smuzhiyun 					   raw_inode, handle);
5896*4882a593Smuzhiyun 	if (error) {
5897*4882a593Smuzhiyun 		/*
5898*4882a593Smuzhiyun 		 * Inode size expansion failed; don't try again
5899*4882a593Smuzhiyun 		 */
5900*4882a593Smuzhiyun 		*no_expand = 1;
5901*4882a593Smuzhiyun 	}
5902*4882a593Smuzhiyun 
5903*4882a593Smuzhiyun 	return error;
5904*4882a593Smuzhiyun }
5905*4882a593Smuzhiyun 
5906*4882a593Smuzhiyun /*
5907*4882a593Smuzhiyun  * Expand an inode by new_extra_isize bytes.
5908*4882a593Smuzhiyun  * Returns 0 on success or negative error number on failure.
5909*4882a593Smuzhiyun  */
ext4_try_to_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc iloc,handle_t * handle)5910*4882a593Smuzhiyun static int ext4_try_to_expand_extra_isize(struct inode *inode,
5911*4882a593Smuzhiyun 					  unsigned int new_extra_isize,
5912*4882a593Smuzhiyun 					  struct ext4_iloc iloc,
5913*4882a593Smuzhiyun 					  handle_t *handle)
5914*4882a593Smuzhiyun {
5915*4882a593Smuzhiyun 	int no_expand;
5916*4882a593Smuzhiyun 	int error;
5917*4882a593Smuzhiyun 
5918*4882a593Smuzhiyun 	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND))
5919*4882a593Smuzhiyun 		return -EOVERFLOW;
5920*4882a593Smuzhiyun 
5921*4882a593Smuzhiyun 	/*
5922*4882a593Smuzhiyun 	 * In nojournal mode, we can immediately attempt to expand
5923*4882a593Smuzhiyun 	 * the inode.  When journaled, we first need to obtain extra
5924*4882a593Smuzhiyun 	 * buffer credits since we may write into the EA block
5925*4882a593Smuzhiyun 	 * with this same handle. If journal_extend fails, then it will
5926*4882a593Smuzhiyun 	 * only result in a minor loss of functionality for that inode.
5927*4882a593Smuzhiyun 	 * If this is felt to be critical, then e2fsck should be run to
5928*4882a593Smuzhiyun 	 * force a large enough s_min_extra_isize.
5929*4882a593Smuzhiyun 	 */
5930*4882a593Smuzhiyun 	if (ext4_journal_extend(handle,
5931*4882a593Smuzhiyun 				EXT4_DATA_TRANS_BLOCKS(inode->i_sb), 0) != 0)
5932*4882a593Smuzhiyun 		return -ENOSPC;
5933*4882a593Smuzhiyun 
5934*4882a593Smuzhiyun 	if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
5935*4882a593Smuzhiyun 		return -EBUSY;
5936*4882a593Smuzhiyun 
5937*4882a593Smuzhiyun 	error = __ext4_expand_extra_isize(inode, new_extra_isize, &iloc,
5938*4882a593Smuzhiyun 					  handle, &no_expand);
5939*4882a593Smuzhiyun 	ext4_write_unlock_xattr(inode, &no_expand);
5940*4882a593Smuzhiyun 
5941*4882a593Smuzhiyun 	return error;
5942*4882a593Smuzhiyun }
5943*4882a593Smuzhiyun 
ext4_expand_extra_isize(struct inode * inode,unsigned int new_extra_isize,struct ext4_iloc * iloc)5944*4882a593Smuzhiyun int ext4_expand_extra_isize(struct inode *inode,
5945*4882a593Smuzhiyun 			    unsigned int new_extra_isize,
5946*4882a593Smuzhiyun 			    struct ext4_iloc *iloc)
5947*4882a593Smuzhiyun {
5948*4882a593Smuzhiyun 	handle_t *handle;
5949*4882a593Smuzhiyun 	int no_expand;
5950*4882a593Smuzhiyun 	int error, rc;
5951*4882a593Smuzhiyun 
5952*4882a593Smuzhiyun 	if (ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
5953*4882a593Smuzhiyun 		brelse(iloc->bh);
5954*4882a593Smuzhiyun 		return -EOVERFLOW;
5955*4882a593Smuzhiyun 	}
5956*4882a593Smuzhiyun 
5957*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_INODE,
5958*4882a593Smuzhiyun 				    EXT4_DATA_TRANS_BLOCKS(inode->i_sb));
5959*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
5960*4882a593Smuzhiyun 		error = PTR_ERR(handle);
5961*4882a593Smuzhiyun 		brelse(iloc->bh);
5962*4882a593Smuzhiyun 		return error;
5963*4882a593Smuzhiyun 	}
5964*4882a593Smuzhiyun 
5965*4882a593Smuzhiyun 	ext4_write_lock_xattr(inode, &no_expand);
5966*4882a593Smuzhiyun 
5967*4882a593Smuzhiyun 	BUFFER_TRACE(iloc->bh, "get_write_access");
5968*4882a593Smuzhiyun 	error = ext4_journal_get_write_access(handle, iloc->bh);
5969*4882a593Smuzhiyun 	if (error) {
5970*4882a593Smuzhiyun 		brelse(iloc->bh);
5971*4882a593Smuzhiyun 		goto out_unlock;
5972*4882a593Smuzhiyun 	}
5973*4882a593Smuzhiyun 
5974*4882a593Smuzhiyun 	error = __ext4_expand_extra_isize(inode, new_extra_isize, iloc,
5975*4882a593Smuzhiyun 					  handle, &no_expand);
5976*4882a593Smuzhiyun 
5977*4882a593Smuzhiyun 	rc = ext4_mark_iloc_dirty(handle, inode, iloc);
5978*4882a593Smuzhiyun 	if (!error)
5979*4882a593Smuzhiyun 		error = rc;
5980*4882a593Smuzhiyun 
5981*4882a593Smuzhiyun out_unlock:
5982*4882a593Smuzhiyun 	ext4_write_unlock_xattr(inode, &no_expand);
5983*4882a593Smuzhiyun 	ext4_journal_stop(handle);
5984*4882a593Smuzhiyun 	return error;
5985*4882a593Smuzhiyun }
5986*4882a593Smuzhiyun 
5987*4882a593Smuzhiyun /*
5988*4882a593Smuzhiyun  * What we do here is to mark the in-core inode as clean with respect to inode
5989*4882a593Smuzhiyun  * dirtiness (it may still be data-dirty).
5990*4882a593Smuzhiyun  * This means that the in-core inode may be reaped by prune_icache
5991*4882a593Smuzhiyun  * without having to perform any I/O.  This is a very good thing,
5992*4882a593Smuzhiyun  * because *any* task may call prune_icache - even ones which
5993*4882a593Smuzhiyun  * have a transaction open against a different journal.
5994*4882a593Smuzhiyun  *
5995*4882a593Smuzhiyun  * Is this cheating?  Not really.  Sure, we haven't written the
5996*4882a593Smuzhiyun  * inode out, but prune_icache isn't a user-visible syncing function.
5997*4882a593Smuzhiyun  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
5998*4882a593Smuzhiyun  * we start and wait on commits.
5999*4882a593Smuzhiyun  */
__ext4_mark_inode_dirty(handle_t * handle,struct inode * inode,const char * func,unsigned int line)6000*4882a593Smuzhiyun int __ext4_mark_inode_dirty(handle_t *handle, struct inode *inode,
6001*4882a593Smuzhiyun 				const char *func, unsigned int line)
6002*4882a593Smuzhiyun {
6003*4882a593Smuzhiyun 	struct ext4_iloc iloc;
6004*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6005*4882a593Smuzhiyun 	int err;
6006*4882a593Smuzhiyun 
6007*4882a593Smuzhiyun 	might_sleep();
6008*4882a593Smuzhiyun 	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
6009*4882a593Smuzhiyun 	err = ext4_reserve_inode_write(handle, inode, &iloc);
6010*4882a593Smuzhiyun 	if (err)
6011*4882a593Smuzhiyun 		goto out;
6012*4882a593Smuzhiyun 
6013*4882a593Smuzhiyun 	if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize)
6014*4882a593Smuzhiyun 		ext4_try_to_expand_extra_isize(inode, sbi->s_want_extra_isize,
6015*4882a593Smuzhiyun 					       iloc, handle);
6016*4882a593Smuzhiyun 
6017*4882a593Smuzhiyun 	err = ext4_mark_iloc_dirty(handle, inode, &iloc);
6018*4882a593Smuzhiyun out:
6019*4882a593Smuzhiyun 	if (unlikely(err))
6020*4882a593Smuzhiyun 		ext4_error_inode_err(inode, func, line, 0, err,
6021*4882a593Smuzhiyun 					"mark_inode_dirty error");
6022*4882a593Smuzhiyun 	return err;
6023*4882a593Smuzhiyun }
6024*4882a593Smuzhiyun 
6025*4882a593Smuzhiyun /*
6026*4882a593Smuzhiyun  * ext4_dirty_inode() is called from __mark_inode_dirty()
6027*4882a593Smuzhiyun  *
6028*4882a593Smuzhiyun  * We're really interested in the case where a file is being extended.
6029*4882a593Smuzhiyun  * i_size has been changed by generic_commit_write() and we thus need
6030*4882a593Smuzhiyun  * to include the updated inode in the current transaction.
6031*4882a593Smuzhiyun  *
6032*4882a593Smuzhiyun  * Also, dquot_alloc_block() will always dirty the inode when blocks
6033*4882a593Smuzhiyun  * are allocated to the file.
6034*4882a593Smuzhiyun  *
6035*4882a593Smuzhiyun  * If the inode is marked synchronous, we don't honour that here - doing
6036*4882a593Smuzhiyun  * so would cause a commit on atime updates, which we don't bother doing.
6037*4882a593Smuzhiyun  * We handle synchronous inodes at the highest possible level.
6038*4882a593Smuzhiyun  *
6039*4882a593Smuzhiyun  * If only the I_DIRTY_TIME flag is set, we can skip everything.  If
6040*4882a593Smuzhiyun  * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need
6041*4882a593Smuzhiyun  * to copy into the on-disk inode structure are the timestamp files.
6042*4882a593Smuzhiyun  */
ext4_dirty_inode(struct inode * inode,int flags)6043*4882a593Smuzhiyun void ext4_dirty_inode(struct inode *inode, int flags)
6044*4882a593Smuzhiyun {
6045*4882a593Smuzhiyun 	handle_t *handle;
6046*4882a593Smuzhiyun 
6047*4882a593Smuzhiyun 	if (flags == I_DIRTY_TIME)
6048*4882a593Smuzhiyun 		return;
6049*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
6050*4882a593Smuzhiyun 	if (IS_ERR(handle))
6051*4882a593Smuzhiyun 		goto out;
6052*4882a593Smuzhiyun 
6053*4882a593Smuzhiyun 	ext4_mark_inode_dirty(handle, inode);
6054*4882a593Smuzhiyun 
6055*4882a593Smuzhiyun 	ext4_journal_stop(handle);
6056*4882a593Smuzhiyun out:
6057*4882a593Smuzhiyun 	return;
6058*4882a593Smuzhiyun }
6059*4882a593Smuzhiyun 
ext4_change_inode_journal_flag(struct inode * inode,int val)6060*4882a593Smuzhiyun int ext4_change_inode_journal_flag(struct inode *inode, int val)
6061*4882a593Smuzhiyun {
6062*4882a593Smuzhiyun 	journal_t *journal;
6063*4882a593Smuzhiyun 	handle_t *handle;
6064*4882a593Smuzhiyun 	int err;
6065*4882a593Smuzhiyun 	struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
6066*4882a593Smuzhiyun 
6067*4882a593Smuzhiyun 	/*
6068*4882a593Smuzhiyun 	 * We have to be very careful here: changing a data block's
6069*4882a593Smuzhiyun 	 * journaling status dynamically is dangerous.  If we write a
6070*4882a593Smuzhiyun 	 * data block to the journal, change the status and then delete
6071*4882a593Smuzhiyun 	 * that block, we risk forgetting to revoke the old log record
6072*4882a593Smuzhiyun 	 * from the journal and so a subsequent replay can corrupt data.
6073*4882a593Smuzhiyun 	 * So, first we make sure that the journal is empty and that
6074*4882a593Smuzhiyun 	 * nobody is changing anything.
6075*4882a593Smuzhiyun 	 */
6076*4882a593Smuzhiyun 
6077*4882a593Smuzhiyun 	journal = EXT4_JOURNAL(inode);
6078*4882a593Smuzhiyun 	if (!journal)
6079*4882a593Smuzhiyun 		return 0;
6080*4882a593Smuzhiyun 	if (is_journal_aborted(journal))
6081*4882a593Smuzhiyun 		return -EROFS;
6082*4882a593Smuzhiyun 
6083*4882a593Smuzhiyun 	/* Wait for all existing dio workers */
6084*4882a593Smuzhiyun 	inode_dio_wait(inode);
6085*4882a593Smuzhiyun 
6086*4882a593Smuzhiyun 	/*
6087*4882a593Smuzhiyun 	 * Before flushing the journal and switching inode's aops, we have
6088*4882a593Smuzhiyun 	 * to flush all dirty data the inode has. There can be outstanding
6089*4882a593Smuzhiyun 	 * delayed allocations, there can be unwritten extents created by
6090*4882a593Smuzhiyun 	 * fallocate or buffered writes in dioread_nolock mode covered by
6091*4882a593Smuzhiyun 	 * dirty data which can be converted only after flushing the dirty
6092*4882a593Smuzhiyun 	 * data (and journalled aops don't know how to handle these cases).
6093*4882a593Smuzhiyun 	 */
6094*4882a593Smuzhiyun 	if (val) {
6095*4882a593Smuzhiyun 		down_write(&EXT4_I(inode)->i_mmap_sem);
6096*4882a593Smuzhiyun 		err = filemap_write_and_wait(inode->i_mapping);
6097*4882a593Smuzhiyun 		if (err < 0) {
6098*4882a593Smuzhiyun 			up_write(&EXT4_I(inode)->i_mmap_sem);
6099*4882a593Smuzhiyun 			return err;
6100*4882a593Smuzhiyun 		}
6101*4882a593Smuzhiyun 	}
6102*4882a593Smuzhiyun 
6103*4882a593Smuzhiyun 	percpu_down_write(&sbi->s_writepages_rwsem);
6104*4882a593Smuzhiyun 	jbd2_journal_lock_updates(journal);
6105*4882a593Smuzhiyun 
6106*4882a593Smuzhiyun 	/*
6107*4882a593Smuzhiyun 	 * OK, there are no updates running now, and all cached data is
6108*4882a593Smuzhiyun 	 * synced to disk.  We are now in a completely consistent state
6109*4882a593Smuzhiyun 	 * which doesn't have anything in the journal, and we know that
6110*4882a593Smuzhiyun 	 * no filesystem updates are running, so it is safe to modify
6111*4882a593Smuzhiyun 	 * the inode's in-core data-journaling state flag now.
6112*4882a593Smuzhiyun 	 */
6113*4882a593Smuzhiyun 
6114*4882a593Smuzhiyun 	if (val)
6115*4882a593Smuzhiyun 		ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6116*4882a593Smuzhiyun 	else {
6117*4882a593Smuzhiyun 		err = jbd2_journal_flush(journal);
6118*4882a593Smuzhiyun 		if (err < 0) {
6119*4882a593Smuzhiyun 			jbd2_journal_unlock_updates(journal);
6120*4882a593Smuzhiyun 			percpu_up_write(&sbi->s_writepages_rwsem);
6121*4882a593Smuzhiyun 			return err;
6122*4882a593Smuzhiyun 		}
6123*4882a593Smuzhiyun 		ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
6124*4882a593Smuzhiyun 	}
6125*4882a593Smuzhiyun 	ext4_set_aops(inode);
6126*4882a593Smuzhiyun 
6127*4882a593Smuzhiyun 	jbd2_journal_unlock_updates(journal);
6128*4882a593Smuzhiyun 	percpu_up_write(&sbi->s_writepages_rwsem);
6129*4882a593Smuzhiyun 
6130*4882a593Smuzhiyun 	if (val)
6131*4882a593Smuzhiyun 		up_write(&EXT4_I(inode)->i_mmap_sem);
6132*4882a593Smuzhiyun 
6133*4882a593Smuzhiyun 	/* Finally we can mark the inode as dirty. */
6134*4882a593Smuzhiyun 
6135*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
6136*4882a593Smuzhiyun 	if (IS_ERR(handle))
6137*4882a593Smuzhiyun 		return PTR_ERR(handle);
6138*4882a593Smuzhiyun 
6139*4882a593Smuzhiyun 	ext4_fc_mark_ineligible(inode->i_sb,
6140*4882a593Smuzhiyun 		EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
6141*4882a593Smuzhiyun 	err = ext4_mark_inode_dirty(handle, inode);
6142*4882a593Smuzhiyun 	ext4_handle_sync(handle);
6143*4882a593Smuzhiyun 	ext4_journal_stop(handle);
6144*4882a593Smuzhiyun 	ext4_std_error(inode->i_sb, err);
6145*4882a593Smuzhiyun 
6146*4882a593Smuzhiyun 	return err;
6147*4882a593Smuzhiyun }
6148*4882a593Smuzhiyun 
ext4_bh_unmapped(handle_t * handle,struct buffer_head * bh)6149*4882a593Smuzhiyun static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
6150*4882a593Smuzhiyun {
6151*4882a593Smuzhiyun 	return !buffer_mapped(bh);
6152*4882a593Smuzhiyun }
6153*4882a593Smuzhiyun 
ext4_page_mkwrite(struct vm_fault * vmf)6154*4882a593Smuzhiyun vm_fault_t ext4_page_mkwrite(struct vm_fault *vmf)
6155*4882a593Smuzhiyun {
6156*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
6157*4882a593Smuzhiyun 	struct page *page = vmf->page;
6158*4882a593Smuzhiyun 	loff_t size;
6159*4882a593Smuzhiyun 	unsigned long len;
6160*4882a593Smuzhiyun 	int err;
6161*4882a593Smuzhiyun 	vm_fault_t ret;
6162*4882a593Smuzhiyun 	struct file *file = vma->vm_file;
6163*4882a593Smuzhiyun 	struct inode *inode = file_inode(file);
6164*4882a593Smuzhiyun 	struct address_space *mapping = inode->i_mapping;
6165*4882a593Smuzhiyun 	handle_t *handle;
6166*4882a593Smuzhiyun 	get_block_t *get_block;
6167*4882a593Smuzhiyun 	int retries = 0;
6168*4882a593Smuzhiyun 
6169*4882a593Smuzhiyun 	if (unlikely(IS_IMMUTABLE(inode)))
6170*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
6171*4882a593Smuzhiyun 
6172*4882a593Smuzhiyun 	sb_start_pagefault(inode->i_sb);
6173*4882a593Smuzhiyun 	file_update_time(vma->vm_file);
6174*4882a593Smuzhiyun 
6175*4882a593Smuzhiyun 	down_read(&EXT4_I(inode)->i_mmap_sem);
6176*4882a593Smuzhiyun 
6177*4882a593Smuzhiyun 	err = ext4_convert_inline_data(inode);
6178*4882a593Smuzhiyun 	if (err)
6179*4882a593Smuzhiyun 		goto out_ret;
6180*4882a593Smuzhiyun 
6181*4882a593Smuzhiyun 	/*
6182*4882a593Smuzhiyun 	 * On data journalling we skip straight to the transaction handle:
6183*4882a593Smuzhiyun 	 * there's no delalloc; page truncated will be checked later; the
6184*4882a593Smuzhiyun 	 * early return w/ all buffers mapped (calculates size/len) can't
6185*4882a593Smuzhiyun 	 * be used; and there's no dioread_nolock, so only ext4_get_block.
6186*4882a593Smuzhiyun 	 */
6187*4882a593Smuzhiyun 	if (ext4_should_journal_data(inode))
6188*4882a593Smuzhiyun 		goto retry_alloc;
6189*4882a593Smuzhiyun 
6190*4882a593Smuzhiyun 	/* Delalloc case is easy... */
6191*4882a593Smuzhiyun 	if (test_opt(inode->i_sb, DELALLOC) &&
6192*4882a593Smuzhiyun 	    !ext4_nonda_switch(inode->i_sb)) {
6193*4882a593Smuzhiyun 		do {
6194*4882a593Smuzhiyun 			err = block_page_mkwrite(vma, vmf,
6195*4882a593Smuzhiyun 						   ext4_da_get_block_prep);
6196*4882a593Smuzhiyun 		} while (err == -ENOSPC &&
6197*4882a593Smuzhiyun 		       ext4_should_retry_alloc(inode->i_sb, &retries));
6198*4882a593Smuzhiyun 		goto out_ret;
6199*4882a593Smuzhiyun 	}
6200*4882a593Smuzhiyun 
6201*4882a593Smuzhiyun 	lock_page(page);
6202*4882a593Smuzhiyun 	size = i_size_read(inode);
6203*4882a593Smuzhiyun 	/* Page got truncated from under us? */
6204*4882a593Smuzhiyun 	if (page->mapping != mapping || page_offset(page) > size) {
6205*4882a593Smuzhiyun 		unlock_page(page);
6206*4882a593Smuzhiyun 		ret = VM_FAULT_NOPAGE;
6207*4882a593Smuzhiyun 		goto out;
6208*4882a593Smuzhiyun 	}
6209*4882a593Smuzhiyun 
6210*4882a593Smuzhiyun 	if (page->index == size >> PAGE_SHIFT)
6211*4882a593Smuzhiyun 		len = size & ~PAGE_MASK;
6212*4882a593Smuzhiyun 	else
6213*4882a593Smuzhiyun 		len = PAGE_SIZE;
6214*4882a593Smuzhiyun 	/*
6215*4882a593Smuzhiyun 	 * Return if we have all the buffers mapped. This avoids the need to do
6216*4882a593Smuzhiyun 	 * journal_start/journal_stop which can block and take a long time
6217*4882a593Smuzhiyun 	 *
6218*4882a593Smuzhiyun 	 * This cannot be done for data journalling, as we have to add the
6219*4882a593Smuzhiyun 	 * inode to the transaction's list to writeprotect pages on commit.
6220*4882a593Smuzhiyun 	 */
6221*4882a593Smuzhiyun 	if (page_has_buffers(page)) {
6222*4882a593Smuzhiyun 		if (!ext4_walk_page_buffers(NULL, page_buffers(page),
6223*4882a593Smuzhiyun 					    0, len, NULL,
6224*4882a593Smuzhiyun 					    ext4_bh_unmapped)) {
6225*4882a593Smuzhiyun 			/* Wait so that we don't change page under IO */
6226*4882a593Smuzhiyun 			wait_for_stable_page(page);
6227*4882a593Smuzhiyun 			ret = VM_FAULT_LOCKED;
6228*4882a593Smuzhiyun 			goto out;
6229*4882a593Smuzhiyun 		}
6230*4882a593Smuzhiyun 	}
6231*4882a593Smuzhiyun 	unlock_page(page);
6232*4882a593Smuzhiyun 	/* OK, we need to fill the hole... */
6233*4882a593Smuzhiyun 	if (ext4_should_dioread_nolock(inode))
6234*4882a593Smuzhiyun 		get_block = ext4_get_block_unwritten;
6235*4882a593Smuzhiyun 	else
6236*4882a593Smuzhiyun 		get_block = ext4_get_block;
6237*4882a593Smuzhiyun retry_alloc:
6238*4882a593Smuzhiyun 	handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
6239*4882a593Smuzhiyun 				    ext4_writepage_trans_blocks(inode));
6240*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
6241*4882a593Smuzhiyun 		ret = VM_FAULT_SIGBUS;
6242*4882a593Smuzhiyun 		goto out;
6243*4882a593Smuzhiyun 	}
6244*4882a593Smuzhiyun 	/*
6245*4882a593Smuzhiyun 	 * Data journalling can't use block_page_mkwrite() because it
6246*4882a593Smuzhiyun 	 * will set_buffer_dirty() before do_journal_get_write_access()
6247*4882a593Smuzhiyun 	 * thus might hit warning messages for dirty metadata buffers.
6248*4882a593Smuzhiyun 	 */
6249*4882a593Smuzhiyun 	if (!ext4_should_journal_data(inode)) {
6250*4882a593Smuzhiyun 		err = block_page_mkwrite(vma, vmf, get_block);
6251*4882a593Smuzhiyun 	} else {
6252*4882a593Smuzhiyun 		lock_page(page);
6253*4882a593Smuzhiyun 		size = i_size_read(inode);
6254*4882a593Smuzhiyun 		/* Page got truncated from under us? */
6255*4882a593Smuzhiyun 		if (page->mapping != mapping || page_offset(page) > size) {
6256*4882a593Smuzhiyun 			ret = VM_FAULT_NOPAGE;
6257*4882a593Smuzhiyun 			goto out_error;
6258*4882a593Smuzhiyun 		}
6259*4882a593Smuzhiyun 
6260*4882a593Smuzhiyun 		if (page->index == size >> PAGE_SHIFT)
6261*4882a593Smuzhiyun 			len = size & ~PAGE_MASK;
6262*4882a593Smuzhiyun 		else
6263*4882a593Smuzhiyun 			len = PAGE_SIZE;
6264*4882a593Smuzhiyun 
6265*4882a593Smuzhiyun 		err = __block_write_begin(page, 0, len, ext4_get_block);
6266*4882a593Smuzhiyun 		if (!err) {
6267*4882a593Smuzhiyun 			ret = VM_FAULT_SIGBUS;
6268*4882a593Smuzhiyun 			if (ext4_walk_page_buffers(handle, page_buffers(page),
6269*4882a593Smuzhiyun 					0, len, NULL, do_journal_get_write_access))
6270*4882a593Smuzhiyun 				goto out_error;
6271*4882a593Smuzhiyun 			if (ext4_walk_page_buffers(handle, page_buffers(page),
6272*4882a593Smuzhiyun 					0, len, NULL, write_end_fn))
6273*4882a593Smuzhiyun 				goto out_error;
6274*4882a593Smuzhiyun 			if (ext4_jbd2_inode_add_write(handle, inode,
6275*4882a593Smuzhiyun 						      page_offset(page), len))
6276*4882a593Smuzhiyun 				goto out_error;
6277*4882a593Smuzhiyun 			ext4_set_inode_state(inode, EXT4_STATE_JDATA);
6278*4882a593Smuzhiyun 		} else {
6279*4882a593Smuzhiyun 			unlock_page(page);
6280*4882a593Smuzhiyun 		}
6281*4882a593Smuzhiyun 	}
6282*4882a593Smuzhiyun 	ext4_journal_stop(handle);
6283*4882a593Smuzhiyun 	if (err == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
6284*4882a593Smuzhiyun 		goto retry_alloc;
6285*4882a593Smuzhiyun out_ret:
6286*4882a593Smuzhiyun 	ret = block_page_mkwrite_return(err);
6287*4882a593Smuzhiyun out:
6288*4882a593Smuzhiyun 	up_read(&EXT4_I(inode)->i_mmap_sem);
6289*4882a593Smuzhiyun 	sb_end_pagefault(inode->i_sb);
6290*4882a593Smuzhiyun 	return ret;
6291*4882a593Smuzhiyun out_error:
6292*4882a593Smuzhiyun 	unlock_page(page);
6293*4882a593Smuzhiyun 	ext4_journal_stop(handle);
6294*4882a593Smuzhiyun 	goto out;
6295*4882a593Smuzhiyun }
6296*4882a593Smuzhiyun 
ext4_filemap_fault(struct vm_fault * vmf)6297*4882a593Smuzhiyun vm_fault_t ext4_filemap_fault(struct vm_fault *vmf)
6298*4882a593Smuzhiyun {
6299*4882a593Smuzhiyun 	struct inode *inode = file_inode(vmf->vma->vm_file);
6300*4882a593Smuzhiyun 	vm_fault_t ret;
6301*4882a593Smuzhiyun 
6302*4882a593Smuzhiyun 	down_read(&EXT4_I(inode)->i_mmap_sem);
6303*4882a593Smuzhiyun 	ret = filemap_fault(vmf);
6304*4882a593Smuzhiyun 	up_read(&EXT4_I(inode)->i_mmap_sem);
6305*4882a593Smuzhiyun 
6306*4882a593Smuzhiyun 	return ret;
6307*4882a593Smuzhiyun }
6308