xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_file.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_mount.h"
13*4882a593Smuzhiyun #include "xfs_inode.h"
14*4882a593Smuzhiyun #include "xfs_trans.h"
15*4882a593Smuzhiyun #include "xfs_inode_item.h"
16*4882a593Smuzhiyun #include "xfs_bmap.h"
17*4882a593Smuzhiyun #include "xfs_bmap_util.h"
18*4882a593Smuzhiyun #include "xfs_dir2.h"
19*4882a593Smuzhiyun #include "xfs_dir2_priv.h"
20*4882a593Smuzhiyun #include "xfs_ioctl.h"
21*4882a593Smuzhiyun #include "xfs_trace.h"
22*4882a593Smuzhiyun #include "xfs_log.h"
23*4882a593Smuzhiyun #include "xfs_icache.h"
24*4882a593Smuzhiyun #include "xfs_pnfs.h"
25*4882a593Smuzhiyun #include "xfs_iomap.h"
26*4882a593Smuzhiyun #include "xfs_reflink.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <linux/falloc.h>
29*4882a593Smuzhiyun #include <linux/backing-dev.h>
30*4882a593Smuzhiyun #include <linux/mman.h>
31*4882a593Smuzhiyun #include <linux/fadvise.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static const struct vm_operations_struct xfs_file_vm_ops;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Decide if the given file range is aligned to the size of the fundamental
37*4882a593Smuzhiyun  * allocation unit for the file.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun static bool
xfs_is_falloc_aligned(struct xfs_inode * ip,loff_t pos,long long int len)40*4882a593Smuzhiyun xfs_is_falloc_aligned(
41*4882a593Smuzhiyun 	struct xfs_inode	*ip,
42*4882a593Smuzhiyun 	loff_t			pos,
43*4882a593Smuzhiyun 	long long int		len)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
46*4882a593Smuzhiyun 	uint64_t		mask;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (XFS_IS_REALTIME_INODE(ip)) {
49*4882a593Smuzhiyun 		if (!is_power_of_2(mp->m_sb.sb_rextsize)) {
50*4882a593Smuzhiyun 			u64	rextbytes;
51*4882a593Smuzhiyun 			u32	mod;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 			rextbytes = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize);
54*4882a593Smuzhiyun 			div_u64_rem(pos, rextbytes, &mod);
55*4882a593Smuzhiyun 			if (mod)
56*4882a593Smuzhiyun 				return false;
57*4882a593Smuzhiyun 			div_u64_rem(len, rextbytes, &mod);
58*4882a593Smuzhiyun 			return mod == 0;
59*4882a593Smuzhiyun 		}
60*4882a593Smuzhiyun 		mask = XFS_FSB_TO_B(mp, mp->m_sb.sb_rextsize) - 1;
61*4882a593Smuzhiyun 	} else {
62*4882a593Smuzhiyun 		mask = mp->m_sb.sb_blocksize - 1;
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	return !((pos | len) & mask);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun int
xfs_update_prealloc_flags(struct xfs_inode * ip,enum xfs_prealloc_flags flags)69*4882a593Smuzhiyun xfs_update_prealloc_flags(
70*4882a593Smuzhiyun 	struct xfs_inode	*ip,
71*4882a593Smuzhiyun 	enum xfs_prealloc_flags	flags)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct xfs_trans	*tp;
74*4882a593Smuzhiyun 	int			error;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
77*4882a593Smuzhiyun 			0, 0, 0, &tp);
78*4882a593Smuzhiyun 	if (error)
79*4882a593Smuzhiyun 		return error;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_ILOCK_EXCL);
82*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (!(flags & XFS_PREALLOC_INVISIBLE)) {
85*4882a593Smuzhiyun 		VFS_I(ip)->i_mode &= ~S_ISUID;
86*4882a593Smuzhiyun 		if (VFS_I(ip)->i_mode & S_IXGRP)
87*4882a593Smuzhiyun 			VFS_I(ip)->i_mode &= ~S_ISGID;
88*4882a593Smuzhiyun 		xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (flags & XFS_PREALLOC_SET)
92*4882a593Smuzhiyun 		ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
93*4882a593Smuzhiyun 	if (flags & XFS_PREALLOC_CLEAR)
94*4882a593Smuzhiyun 		ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
97*4882a593Smuzhiyun 	if (flags & XFS_PREALLOC_SYNC)
98*4882a593Smuzhiyun 		xfs_trans_set_sync(tp);
99*4882a593Smuzhiyun 	return xfs_trans_commit(tp);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun  * Fsync operations on directories are much simpler than on regular files,
104*4882a593Smuzhiyun  * as there is no file data to flush, and thus also no need for explicit
105*4882a593Smuzhiyun  * cache flush operations, and there are no non-transaction metadata updates
106*4882a593Smuzhiyun  * on directories either.
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun STATIC int
xfs_dir_fsync(struct file * file,loff_t start,loff_t end,int datasync)109*4882a593Smuzhiyun xfs_dir_fsync(
110*4882a593Smuzhiyun 	struct file		*file,
111*4882a593Smuzhiyun 	loff_t			start,
112*4882a593Smuzhiyun 	loff_t			end,
113*4882a593Smuzhiyun 	int			datasync)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	trace_xfs_dir_fsync(ip);
118*4882a593Smuzhiyun 	return xfs_log_force_inode(ip);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun static xfs_csn_t
xfs_fsync_seq(struct xfs_inode * ip,bool datasync)122*4882a593Smuzhiyun xfs_fsync_seq(
123*4882a593Smuzhiyun 	struct xfs_inode	*ip,
124*4882a593Smuzhiyun 	bool			datasync)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	if (!xfs_ipincount(ip))
127*4882a593Smuzhiyun 		return 0;
128*4882a593Smuzhiyun 	if (datasync && !(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
129*4882a593Smuzhiyun 		return 0;
130*4882a593Smuzhiyun 	return ip->i_itemp->ili_commit_seq;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun  * All metadata updates are logged, which means that we just have to flush the
135*4882a593Smuzhiyun  * log up to the latest LSN that touched the inode.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * If we have concurrent fsync/fdatasync() calls, we need them to all block on
138*4882a593Smuzhiyun  * the log force before we clear the ili_fsync_fields field. This ensures that
139*4882a593Smuzhiyun  * we don't get a racing sync operation that does not wait for the metadata to
140*4882a593Smuzhiyun  * hit the journal before returning.  If we race with clearing ili_fsync_fields,
141*4882a593Smuzhiyun  * then all that will happen is the log force will do nothing as the lsn will
142*4882a593Smuzhiyun  * already be on disk.  We can't race with setting ili_fsync_fields because that
143*4882a593Smuzhiyun  * is done under XFS_ILOCK_EXCL, and that can't happen because we hold the lock
144*4882a593Smuzhiyun  * shared until after the ili_fsync_fields is cleared.
145*4882a593Smuzhiyun  */
146*4882a593Smuzhiyun static  int
xfs_fsync_flush_log(struct xfs_inode * ip,bool datasync,int * log_flushed)147*4882a593Smuzhiyun xfs_fsync_flush_log(
148*4882a593Smuzhiyun 	struct xfs_inode	*ip,
149*4882a593Smuzhiyun 	bool			datasync,
150*4882a593Smuzhiyun 	int			*log_flushed)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	int			error = 0;
153*4882a593Smuzhiyun 	xfs_csn_t		seq;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_ILOCK_SHARED);
156*4882a593Smuzhiyun 	seq = xfs_fsync_seq(ip, datasync);
157*4882a593Smuzhiyun 	if (seq) {
158*4882a593Smuzhiyun 		error = xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC,
159*4882a593Smuzhiyun 					  log_flushed);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		spin_lock(&ip->i_itemp->ili_lock);
162*4882a593Smuzhiyun 		ip->i_itemp->ili_fsync_fields = 0;
163*4882a593Smuzhiyun 		spin_unlock(&ip->i_itemp->ili_lock);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
166*4882a593Smuzhiyun 	return error;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun STATIC int
xfs_file_fsync(struct file * file,loff_t start,loff_t end,int datasync)170*4882a593Smuzhiyun xfs_file_fsync(
171*4882a593Smuzhiyun 	struct file		*file,
172*4882a593Smuzhiyun 	loff_t			start,
173*4882a593Smuzhiyun 	loff_t			end,
174*4882a593Smuzhiyun 	int			datasync)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(file->f_mapping->host);
177*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
178*4882a593Smuzhiyun 	int			error = 0;
179*4882a593Smuzhiyun 	int			log_flushed = 0;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	trace_xfs_file_fsync(ip);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	error = file_write_and_wait_range(file, start, end);
184*4882a593Smuzhiyun 	if (error)
185*4882a593Smuzhiyun 		return error;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
188*4882a593Smuzhiyun 		return -EIO;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	xfs_iflags_clear(ip, XFS_ITRUNCATED);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/*
193*4882a593Smuzhiyun 	 * If we have an RT and/or log subvolume we need to make sure to flush
194*4882a593Smuzhiyun 	 * the write cache the device used for file data first.  This is to
195*4882a593Smuzhiyun 	 * ensure newly written file data make it to disk before logging the new
196*4882a593Smuzhiyun 	 * inode size in case of an extending write.
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	if (XFS_IS_REALTIME_INODE(ip))
199*4882a593Smuzhiyun 		xfs_blkdev_issue_flush(mp->m_rtdev_targp);
200*4882a593Smuzhiyun 	else if (mp->m_logdev_targp != mp->m_ddev_targp)
201*4882a593Smuzhiyun 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	error = xfs_fsync_flush_log(ip, datasync, &log_flushed);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	/*
206*4882a593Smuzhiyun 	 * If we only have a single device, and the log force about was
207*4882a593Smuzhiyun 	 * a no-op we might have to flush the data device cache here.
208*4882a593Smuzhiyun 	 * This can only happen for fdatasync/O_DSYNC if we were overwriting
209*4882a593Smuzhiyun 	 * an already allocated file and thus do not have any metadata to
210*4882a593Smuzhiyun 	 * commit.
211*4882a593Smuzhiyun 	 */
212*4882a593Smuzhiyun 	if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
213*4882a593Smuzhiyun 	    mp->m_logdev_targp == mp->m_ddev_targp)
214*4882a593Smuzhiyun 		xfs_blkdev_issue_flush(mp->m_ddev_targp);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	return error;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun STATIC ssize_t
xfs_file_dio_aio_read(struct kiocb * iocb,struct iov_iter * to)220*4882a593Smuzhiyun xfs_file_dio_aio_read(
221*4882a593Smuzhiyun 	struct kiocb		*iocb,
222*4882a593Smuzhiyun 	struct iov_iter		*to)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
225*4882a593Smuzhiyun 	size_t			count = iov_iter_count(to);
226*4882a593Smuzhiyun 	ssize_t			ret;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (!count)
231*4882a593Smuzhiyun 		return 0; /* skip atime */
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	file_accessed(iocb->ki_filp);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
236*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
237*4882a593Smuzhiyun 			return -EAGAIN;
238*4882a593Smuzhiyun 	} else {
239*4882a593Smuzhiyun 		xfs_ilock(ip, XFS_IOLOCK_SHARED);
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 	ret = iomap_dio_rw(iocb, to, &xfs_read_iomap_ops, NULL,
242*4882a593Smuzhiyun 			is_sync_kiocb(iocb));
243*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	return ret;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun static noinline ssize_t
xfs_file_dax_read(struct kiocb * iocb,struct iov_iter * to)249*4882a593Smuzhiyun xfs_file_dax_read(
250*4882a593Smuzhiyun 	struct kiocb		*iocb,
251*4882a593Smuzhiyun 	struct iov_iter		*to)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(iocb->ki_filp->f_mapping->host);
254*4882a593Smuzhiyun 	size_t			count = iov_iter_count(to);
255*4882a593Smuzhiyun 	ssize_t			ret = 0;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (!count)
260*4882a593Smuzhiyun 		return 0; /* skip atime */
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
263*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
264*4882a593Smuzhiyun 			return -EAGAIN;
265*4882a593Smuzhiyun 	} else {
266*4882a593Smuzhiyun 		xfs_ilock(ip, XFS_IOLOCK_SHARED);
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	ret = dax_iomap_rw(iocb, to, &xfs_read_iomap_ops);
270*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	file_accessed(iocb->ki_filp);
273*4882a593Smuzhiyun 	return ret;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun STATIC ssize_t
xfs_file_buffered_aio_read(struct kiocb * iocb,struct iov_iter * to)277*4882a593Smuzhiyun xfs_file_buffered_aio_read(
278*4882a593Smuzhiyun 	struct kiocb		*iocb,
279*4882a593Smuzhiyun 	struct iov_iter		*to)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(file_inode(iocb->ki_filp));
282*4882a593Smuzhiyun 	ssize_t			ret;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
287*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
288*4882a593Smuzhiyun 			return -EAGAIN;
289*4882a593Smuzhiyun 	} else {
290*4882a593Smuzhiyun 		xfs_ilock(ip, XFS_IOLOCK_SHARED);
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 	ret = generic_file_read_iter(iocb, to);
293*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun STATIC ssize_t
xfs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)299*4882a593Smuzhiyun xfs_file_read_iter(
300*4882a593Smuzhiyun 	struct kiocb		*iocb,
301*4882a593Smuzhiyun 	struct iov_iter		*to)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct inode		*inode = file_inode(iocb->ki_filp);
304*4882a593Smuzhiyun 	struct xfs_mount	*mp = XFS_I(inode)->i_mount;
305*4882a593Smuzhiyun 	ssize_t			ret = 0;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	XFS_STATS_INC(mp, xs_read_calls);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
310*4882a593Smuzhiyun 		return -EIO;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (IS_DAX(inode))
313*4882a593Smuzhiyun 		ret = xfs_file_dax_read(iocb, to);
314*4882a593Smuzhiyun 	else if (iocb->ki_flags & IOCB_DIRECT)
315*4882a593Smuzhiyun 		ret = xfs_file_dio_aio_read(iocb, to);
316*4882a593Smuzhiyun 	else
317*4882a593Smuzhiyun 		ret = xfs_file_buffered_aio_read(iocb, to);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (ret > 0)
320*4882a593Smuzhiyun 		XFS_STATS_ADD(mp, xs_read_bytes, ret);
321*4882a593Smuzhiyun 	return ret;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun  * Common pre-write limit and setup checks.
326*4882a593Smuzhiyun  *
327*4882a593Smuzhiyun  * Called with the iolocked held either shared and exclusive according to
328*4882a593Smuzhiyun  * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
329*4882a593Smuzhiyun  * if called for a direct write beyond i_size.
330*4882a593Smuzhiyun  */
331*4882a593Smuzhiyun STATIC ssize_t
xfs_file_aio_write_checks(struct kiocb * iocb,struct iov_iter * from,int * iolock)332*4882a593Smuzhiyun xfs_file_aio_write_checks(
333*4882a593Smuzhiyun 	struct kiocb		*iocb,
334*4882a593Smuzhiyun 	struct iov_iter		*from,
335*4882a593Smuzhiyun 	int			*iolock)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct file		*file = iocb->ki_filp;
338*4882a593Smuzhiyun 	struct inode		*inode = file->f_mapping->host;
339*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
340*4882a593Smuzhiyun 	ssize_t			error = 0;
341*4882a593Smuzhiyun 	size_t			count = iov_iter_count(from);
342*4882a593Smuzhiyun 	bool			drained_dio = false;
343*4882a593Smuzhiyun 	loff_t			isize;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun restart:
346*4882a593Smuzhiyun 	error = generic_write_checks(iocb, from);
347*4882a593Smuzhiyun 	if (error <= 0)
348*4882a593Smuzhiyun 		return error;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	error = xfs_break_layouts(inode, iolock, BREAK_WRITE);
351*4882a593Smuzhiyun 	if (error)
352*4882a593Smuzhiyun 		return error;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/*
355*4882a593Smuzhiyun 	 * For changing security info in file_remove_privs() we need i_rwsem
356*4882a593Smuzhiyun 	 * exclusively.
357*4882a593Smuzhiyun 	 */
358*4882a593Smuzhiyun 	if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
359*4882a593Smuzhiyun 		xfs_iunlock(ip, *iolock);
360*4882a593Smuzhiyun 		*iolock = XFS_IOLOCK_EXCL;
361*4882a593Smuzhiyun 		xfs_ilock(ip, *iolock);
362*4882a593Smuzhiyun 		goto restart;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 	/*
365*4882a593Smuzhiyun 	 * If the offset is beyond the size of the file, we need to zero any
366*4882a593Smuzhiyun 	 * blocks that fall between the existing EOF and the start of this
367*4882a593Smuzhiyun 	 * write.  If zeroing is needed and we are currently holding the
368*4882a593Smuzhiyun 	 * iolock shared, we need to update it to exclusive which implies
369*4882a593Smuzhiyun 	 * having to redo all checks before.
370*4882a593Smuzhiyun 	 *
371*4882a593Smuzhiyun 	 * We need to serialise against EOF updates that occur in IO
372*4882a593Smuzhiyun 	 * completions here. We want to make sure that nobody is changing the
373*4882a593Smuzhiyun 	 * size while we do this check until we have placed an IO barrier (i.e.
374*4882a593Smuzhiyun 	 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
375*4882a593Smuzhiyun 	 * The spinlock effectively forms a memory barrier once we have the
376*4882a593Smuzhiyun 	 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
377*4882a593Smuzhiyun 	 * and hence be able to correctly determine if we need to run zeroing.
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
380*4882a593Smuzhiyun 	isize = i_size_read(inode);
381*4882a593Smuzhiyun 	if (iocb->ki_pos > isize) {
382*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
383*4882a593Smuzhiyun 		if (!drained_dio) {
384*4882a593Smuzhiyun 			if (*iolock == XFS_IOLOCK_SHARED) {
385*4882a593Smuzhiyun 				xfs_iunlock(ip, *iolock);
386*4882a593Smuzhiyun 				*iolock = XFS_IOLOCK_EXCL;
387*4882a593Smuzhiyun 				xfs_ilock(ip, *iolock);
388*4882a593Smuzhiyun 				iov_iter_reexpand(from, count);
389*4882a593Smuzhiyun 			}
390*4882a593Smuzhiyun 			/*
391*4882a593Smuzhiyun 			 * We now have an IO submission barrier in place, but
392*4882a593Smuzhiyun 			 * AIO can do EOF updates during IO completion and hence
393*4882a593Smuzhiyun 			 * we now need to wait for all of them to drain. Non-AIO
394*4882a593Smuzhiyun 			 * DIO will have drained before we are given the
395*4882a593Smuzhiyun 			 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
396*4882a593Smuzhiyun 			 * no-op.
397*4882a593Smuzhiyun 			 */
398*4882a593Smuzhiyun 			inode_dio_wait(inode);
399*4882a593Smuzhiyun 			drained_dio = true;
400*4882a593Smuzhiyun 			goto restart;
401*4882a593Smuzhiyun 		}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
404*4882a593Smuzhiyun 		error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
405*4882a593Smuzhiyun 				NULL, &xfs_buffered_write_iomap_ops);
406*4882a593Smuzhiyun 		if (error)
407*4882a593Smuzhiyun 			return error;
408*4882a593Smuzhiyun 	} else
409*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/*
412*4882a593Smuzhiyun 	 * Updating the timestamps will grab the ilock again from
413*4882a593Smuzhiyun 	 * xfs_fs_dirty_inode, so we have to call it after dropping the
414*4882a593Smuzhiyun 	 * lock above.  Eventually we should look into a way to avoid
415*4882a593Smuzhiyun 	 * the pointless lock roundtrip.
416*4882a593Smuzhiyun 	 */
417*4882a593Smuzhiyun 	return file_modified(file);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun static int
xfs_dio_write_end_io(struct kiocb * iocb,ssize_t size,int error,unsigned flags)421*4882a593Smuzhiyun xfs_dio_write_end_io(
422*4882a593Smuzhiyun 	struct kiocb		*iocb,
423*4882a593Smuzhiyun 	ssize_t			size,
424*4882a593Smuzhiyun 	int			error,
425*4882a593Smuzhiyun 	unsigned		flags)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct inode		*inode = file_inode(iocb->ki_filp);
428*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
429*4882a593Smuzhiyun 	loff_t			offset = iocb->ki_pos;
430*4882a593Smuzhiyun 	unsigned int		nofs_flag;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	trace_xfs_end_io_direct_write(ip, offset, size);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
435*4882a593Smuzhiyun 		return -EIO;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (error)
438*4882a593Smuzhiyun 		return error;
439*4882a593Smuzhiyun 	if (!size)
440*4882a593Smuzhiyun 		return 0;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/*
443*4882a593Smuzhiyun 	 * Capture amount written on completion as we can't reliably account
444*4882a593Smuzhiyun 	 * for it on submission.
445*4882a593Smuzhiyun 	 */
446*4882a593Smuzhiyun 	XFS_STATS_ADD(ip->i_mount, xs_write_bytes, size);
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	/*
449*4882a593Smuzhiyun 	 * We can allocate memory here while doing writeback on behalf of
450*4882a593Smuzhiyun 	 * memory reclaim.  To avoid memory allocation deadlocks set the
451*4882a593Smuzhiyun 	 * task-wide nofs context for the following operations.
452*4882a593Smuzhiyun 	 */
453*4882a593Smuzhiyun 	nofs_flag = memalloc_nofs_save();
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	if (flags & IOMAP_DIO_COW) {
456*4882a593Smuzhiyun 		error = xfs_reflink_end_cow(ip, offset, size);
457*4882a593Smuzhiyun 		if (error)
458*4882a593Smuzhiyun 			goto out;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/*
462*4882a593Smuzhiyun 	 * Unwritten conversion updates the in-core isize after extent
463*4882a593Smuzhiyun 	 * conversion but before updating the on-disk size. Updating isize any
464*4882a593Smuzhiyun 	 * earlier allows a racing dio read to find unwritten extents before
465*4882a593Smuzhiyun 	 * they are converted.
466*4882a593Smuzhiyun 	 */
467*4882a593Smuzhiyun 	if (flags & IOMAP_DIO_UNWRITTEN) {
468*4882a593Smuzhiyun 		error = xfs_iomap_write_unwritten(ip, offset, size, true);
469*4882a593Smuzhiyun 		goto out;
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/*
473*4882a593Smuzhiyun 	 * We need to update the in-core inode size here so that we don't end up
474*4882a593Smuzhiyun 	 * with the on-disk inode size being outside the in-core inode size. We
475*4882a593Smuzhiyun 	 * have no other method of updating EOF for AIO, so always do it here
476*4882a593Smuzhiyun 	 * if necessary.
477*4882a593Smuzhiyun 	 *
478*4882a593Smuzhiyun 	 * We need to lock the test/set EOF update as we can be racing with
479*4882a593Smuzhiyun 	 * other IO completions here to update the EOF. Failing to serialise
480*4882a593Smuzhiyun 	 * here can result in EOF moving backwards and Bad Things Happen when
481*4882a593Smuzhiyun 	 * that occurs.
482*4882a593Smuzhiyun 	 */
483*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
484*4882a593Smuzhiyun 	if (offset + size > i_size_read(inode)) {
485*4882a593Smuzhiyun 		i_size_write(inode, offset + size);
486*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
487*4882a593Smuzhiyun 		error = xfs_setfilesize(ip, offset, size);
488*4882a593Smuzhiyun 	} else {
489*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun out:
493*4882a593Smuzhiyun 	memalloc_nofs_restore(nofs_flag);
494*4882a593Smuzhiyun 	return error;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun static const struct iomap_dio_ops xfs_dio_write_ops = {
498*4882a593Smuzhiyun 	.end_io		= xfs_dio_write_end_io,
499*4882a593Smuzhiyun };
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun /*
502*4882a593Smuzhiyun  * xfs_file_dio_aio_write - handle direct IO writes
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * Lock the inode appropriately to prepare for and issue a direct IO write.
505*4882a593Smuzhiyun  * By separating it from the buffered write path we remove all the tricky to
506*4882a593Smuzhiyun  * follow locking changes and looping.
507*4882a593Smuzhiyun  *
508*4882a593Smuzhiyun  * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
509*4882a593Smuzhiyun  * until we're sure the bytes at the new EOF have been zeroed and/or the cached
510*4882a593Smuzhiyun  * pages are flushed out.
511*4882a593Smuzhiyun  *
512*4882a593Smuzhiyun  * In most cases the direct IO writes will be done holding IOLOCK_SHARED
513*4882a593Smuzhiyun  * allowing them to be done in parallel with reads and other direct IO writes.
514*4882a593Smuzhiyun  * However, if the IO is not aligned to filesystem blocks, the direct IO layer
515*4882a593Smuzhiyun  * needs to do sub-block zeroing and that requires serialisation against other
516*4882a593Smuzhiyun  * direct IOs to the same block. In this case we need to serialise the
517*4882a593Smuzhiyun  * submission of the unaligned IOs so that we don't get racing block zeroing in
518*4882a593Smuzhiyun  * the dio layer.  To avoid the problem with aio, we also need to wait for
519*4882a593Smuzhiyun  * outstanding IOs to complete so that unwritten extent conversion is completed
520*4882a593Smuzhiyun  * before we try to map the overlapping block. This is currently implemented by
521*4882a593Smuzhiyun  * hitting it with a big hammer (i.e. inode_dio_wait()).
522*4882a593Smuzhiyun  *
523*4882a593Smuzhiyun  * Returns with locks held indicated by @iolock and errors indicated by
524*4882a593Smuzhiyun  * negative return values.
525*4882a593Smuzhiyun  */
526*4882a593Smuzhiyun STATIC ssize_t
xfs_file_dio_aio_write(struct kiocb * iocb,struct iov_iter * from)527*4882a593Smuzhiyun xfs_file_dio_aio_write(
528*4882a593Smuzhiyun 	struct kiocb		*iocb,
529*4882a593Smuzhiyun 	struct iov_iter		*from)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct file		*file = iocb->ki_filp;
532*4882a593Smuzhiyun 	struct address_space	*mapping = file->f_mapping;
533*4882a593Smuzhiyun 	struct inode		*inode = mapping->host;
534*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
535*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
536*4882a593Smuzhiyun 	ssize_t			ret = 0;
537*4882a593Smuzhiyun 	int			unaligned_io = 0;
538*4882a593Smuzhiyun 	int			iolock;
539*4882a593Smuzhiyun 	size_t			count = iov_iter_count(from);
540*4882a593Smuzhiyun 	struct xfs_buftarg      *target = xfs_inode_buftarg(ip);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* DIO must be aligned to device logical sector size */
543*4882a593Smuzhiyun 	if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
544*4882a593Smuzhiyun 		return -EINVAL;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/*
547*4882a593Smuzhiyun 	 * Don't take the exclusive iolock here unless the I/O is unaligned to
548*4882a593Smuzhiyun 	 * the file system block size.  We don't need to consider the EOF
549*4882a593Smuzhiyun 	 * extension case here because xfs_file_aio_write_checks() will relock
550*4882a593Smuzhiyun 	 * the inode as necessary for EOF zeroing cases and fill out the new
551*4882a593Smuzhiyun 	 * inode size as appropriate.
552*4882a593Smuzhiyun 	 */
553*4882a593Smuzhiyun 	if ((iocb->ki_pos & mp->m_blockmask) ||
554*4882a593Smuzhiyun 	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
555*4882a593Smuzhiyun 		unaligned_io = 1;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		/*
558*4882a593Smuzhiyun 		 * We can't properly handle unaligned direct I/O to reflink
559*4882a593Smuzhiyun 		 * files yet, as we can't unshare a partial block.
560*4882a593Smuzhiyun 		 */
561*4882a593Smuzhiyun 		if (xfs_is_cow_inode(ip)) {
562*4882a593Smuzhiyun 			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
563*4882a593Smuzhiyun 			return -ENOTBLK;
564*4882a593Smuzhiyun 		}
565*4882a593Smuzhiyun 		iolock = XFS_IOLOCK_EXCL;
566*4882a593Smuzhiyun 	} else {
567*4882a593Smuzhiyun 		iolock = XFS_IOLOCK_SHARED;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
571*4882a593Smuzhiyun 		/* unaligned dio always waits, bail */
572*4882a593Smuzhiyun 		if (unaligned_io)
573*4882a593Smuzhiyun 			return -EAGAIN;
574*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, iolock))
575*4882a593Smuzhiyun 			return -EAGAIN;
576*4882a593Smuzhiyun 	} else {
577*4882a593Smuzhiyun 		xfs_ilock(ip, iolock);
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
581*4882a593Smuzhiyun 	if (ret)
582*4882a593Smuzhiyun 		goto out;
583*4882a593Smuzhiyun 	count = iov_iter_count(from);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/*
586*4882a593Smuzhiyun 	 * If we are doing unaligned IO, we can't allow any other overlapping IO
587*4882a593Smuzhiyun 	 * in-flight at the same time or we risk data corruption. Wait for all
588*4882a593Smuzhiyun 	 * other IO to drain before we submit. If the IO is aligned, demote the
589*4882a593Smuzhiyun 	 * iolock if we had to take the exclusive lock in
590*4882a593Smuzhiyun 	 * xfs_file_aio_write_checks() for other reasons.
591*4882a593Smuzhiyun 	 */
592*4882a593Smuzhiyun 	if (unaligned_io) {
593*4882a593Smuzhiyun 		inode_dio_wait(inode);
594*4882a593Smuzhiyun 	} else if (iolock == XFS_IOLOCK_EXCL) {
595*4882a593Smuzhiyun 		xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
596*4882a593Smuzhiyun 		iolock = XFS_IOLOCK_SHARED;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
600*4882a593Smuzhiyun 	/*
601*4882a593Smuzhiyun 	 * If unaligned, this is the only IO in-flight. Wait on it before we
602*4882a593Smuzhiyun 	 * release the iolock to prevent subsequent overlapping IO.
603*4882a593Smuzhiyun 	 */
604*4882a593Smuzhiyun 	ret = iomap_dio_rw(iocb, from, &xfs_direct_write_iomap_ops,
605*4882a593Smuzhiyun 			   &xfs_dio_write_ops,
606*4882a593Smuzhiyun 			   is_sync_kiocb(iocb) || unaligned_io);
607*4882a593Smuzhiyun out:
608*4882a593Smuzhiyun 	xfs_iunlock(ip, iolock);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/*
611*4882a593Smuzhiyun 	 * No fallback to buffered IO after short writes for XFS, direct I/O
612*4882a593Smuzhiyun 	 * will either complete fully or return an error.
613*4882a593Smuzhiyun 	 */
614*4882a593Smuzhiyun 	ASSERT(ret < 0 || ret == count);
615*4882a593Smuzhiyun 	return ret;
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun static noinline ssize_t
xfs_file_dax_write(struct kiocb * iocb,struct iov_iter * from)619*4882a593Smuzhiyun xfs_file_dax_write(
620*4882a593Smuzhiyun 	struct kiocb		*iocb,
621*4882a593Smuzhiyun 	struct iov_iter		*from)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	struct inode		*inode = iocb->ki_filp->f_mapping->host;
624*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
625*4882a593Smuzhiyun 	int			iolock = XFS_IOLOCK_EXCL;
626*4882a593Smuzhiyun 	ssize_t			ret, error = 0;
627*4882a593Smuzhiyun 	size_t			count;
628*4882a593Smuzhiyun 	loff_t			pos;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
631*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, iolock))
632*4882a593Smuzhiyun 			return -EAGAIN;
633*4882a593Smuzhiyun 	} else {
634*4882a593Smuzhiyun 		xfs_ilock(ip, iolock);
635*4882a593Smuzhiyun 	}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
638*4882a593Smuzhiyun 	if (ret)
639*4882a593Smuzhiyun 		goto out;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	pos = iocb->ki_pos;
642*4882a593Smuzhiyun 	count = iov_iter_count(from);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	trace_xfs_file_dax_write(ip, count, pos);
645*4882a593Smuzhiyun 	ret = dax_iomap_rw(iocb, from, &xfs_direct_write_iomap_ops);
646*4882a593Smuzhiyun 	if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
647*4882a593Smuzhiyun 		i_size_write(inode, iocb->ki_pos);
648*4882a593Smuzhiyun 		error = xfs_setfilesize(ip, pos, ret);
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun out:
651*4882a593Smuzhiyun 	xfs_iunlock(ip, iolock);
652*4882a593Smuzhiyun 	if (error)
653*4882a593Smuzhiyun 		return error;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	if (ret > 0) {
656*4882a593Smuzhiyun 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 		/* Handle various SYNC-type writes */
659*4882a593Smuzhiyun 		ret = generic_write_sync(iocb, ret);
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun 	return ret;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun STATIC ssize_t
xfs_file_buffered_aio_write(struct kiocb * iocb,struct iov_iter * from)665*4882a593Smuzhiyun xfs_file_buffered_aio_write(
666*4882a593Smuzhiyun 	struct kiocb		*iocb,
667*4882a593Smuzhiyun 	struct iov_iter		*from)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	struct file		*file = iocb->ki_filp;
670*4882a593Smuzhiyun 	struct address_space	*mapping = file->f_mapping;
671*4882a593Smuzhiyun 	struct inode		*inode = mapping->host;
672*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
673*4882a593Smuzhiyun 	ssize_t			ret;
674*4882a593Smuzhiyun 	int			enospc = 0;
675*4882a593Smuzhiyun 	int			iolock;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT)
678*4882a593Smuzhiyun 		return -EOPNOTSUPP;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun write_retry:
681*4882a593Smuzhiyun 	iolock = XFS_IOLOCK_EXCL;
682*4882a593Smuzhiyun 	xfs_ilock(ip, iolock);
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
685*4882a593Smuzhiyun 	if (ret)
686*4882a593Smuzhiyun 		goto out;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/* We can write back this queue in page reclaim */
689*4882a593Smuzhiyun 	current->backing_dev_info = inode_to_bdi(inode);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
692*4882a593Smuzhiyun 	ret = iomap_file_buffered_write(iocb, from,
693*4882a593Smuzhiyun 			&xfs_buffered_write_iomap_ops);
694*4882a593Smuzhiyun 	if (likely(ret >= 0))
695*4882a593Smuzhiyun 		iocb->ki_pos += ret;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/*
698*4882a593Smuzhiyun 	 * If we hit a space limit, try to free up some lingering preallocated
699*4882a593Smuzhiyun 	 * space before returning an error. In the case of ENOSPC, first try to
700*4882a593Smuzhiyun 	 * write back all dirty inodes to free up some of the excess reserved
701*4882a593Smuzhiyun 	 * metadata space. This reduces the chances that the eofblocks scan
702*4882a593Smuzhiyun 	 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
703*4882a593Smuzhiyun 	 * also behaves as a filter to prevent too many eofblocks scans from
704*4882a593Smuzhiyun 	 * running at the same time.
705*4882a593Smuzhiyun 	 */
706*4882a593Smuzhiyun 	if (ret == -EDQUOT && !enospc) {
707*4882a593Smuzhiyun 		xfs_iunlock(ip, iolock);
708*4882a593Smuzhiyun 		enospc = xfs_inode_free_quota_eofblocks(ip);
709*4882a593Smuzhiyun 		if (enospc)
710*4882a593Smuzhiyun 			goto write_retry;
711*4882a593Smuzhiyun 		enospc = xfs_inode_free_quota_cowblocks(ip);
712*4882a593Smuzhiyun 		if (enospc)
713*4882a593Smuzhiyun 			goto write_retry;
714*4882a593Smuzhiyun 		iolock = 0;
715*4882a593Smuzhiyun 	} else if (ret == -ENOSPC && !enospc) {
716*4882a593Smuzhiyun 		struct xfs_eofblocks eofb = {0};
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 		enospc = 1;
719*4882a593Smuzhiyun 		xfs_flush_inodes(ip->i_mount);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 		xfs_iunlock(ip, iolock);
722*4882a593Smuzhiyun 		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
723*4882a593Smuzhiyun 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
724*4882a593Smuzhiyun 		xfs_icache_free_cowblocks(ip->i_mount, &eofb);
725*4882a593Smuzhiyun 		goto write_retry;
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	current->backing_dev_info = NULL;
729*4882a593Smuzhiyun out:
730*4882a593Smuzhiyun 	if (iolock)
731*4882a593Smuzhiyun 		xfs_iunlock(ip, iolock);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	if (ret > 0) {
734*4882a593Smuzhiyun 		XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
735*4882a593Smuzhiyun 		/* Handle various SYNC-type writes */
736*4882a593Smuzhiyun 		ret = generic_write_sync(iocb, ret);
737*4882a593Smuzhiyun 	}
738*4882a593Smuzhiyun 	return ret;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun STATIC ssize_t
xfs_file_write_iter(struct kiocb * iocb,struct iov_iter * from)742*4882a593Smuzhiyun xfs_file_write_iter(
743*4882a593Smuzhiyun 	struct kiocb		*iocb,
744*4882a593Smuzhiyun 	struct iov_iter		*from)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct file		*file = iocb->ki_filp;
747*4882a593Smuzhiyun 	struct address_space	*mapping = file->f_mapping;
748*4882a593Smuzhiyun 	struct inode		*inode = mapping->host;
749*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
750*4882a593Smuzhiyun 	ssize_t			ret;
751*4882a593Smuzhiyun 	size_t			ocount = iov_iter_count(from);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	XFS_STATS_INC(ip->i_mount, xs_write_calls);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	if (ocount == 0)
756*4882a593Smuzhiyun 		return 0;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
759*4882a593Smuzhiyun 		return -EIO;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (IS_DAX(inode))
762*4882a593Smuzhiyun 		return xfs_file_dax_write(iocb, from);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_DIRECT) {
765*4882a593Smuzhiyun 		/*
766*4882a593Smuzhiyun 		 * Allow a directio write to fall back to a buffered
767*4882a593Smuzhiyun 		 * write *only* in the case that we're doing a reflink
768*4882a593Smuzhiyun 		 * CoW.  In all other directio scenarios we do not
769*4882a593Smuzhiyun 		 * allow an operation to fall back to buffered mode.
770*4882a593Smuzhiyun 		 */
771*4882a593Smuzhiyun 		ret = xfs_file_dio_aio_write(iocb, from);
772*4882a593Smuzhiyun 		if (ret != -ENOTBLK)
773*4882a593Smuzhiyun 			return ret;
774*4882a593Smuzhiyun 	}
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	return xfs_file_buffered_aio_write(iocb, from);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun static void
xfs_wait_dax_page(struct inode * inode)780*4882a593Smuzhiyun xfs_wait_dax_page(
781*4882a593Smuzhiyun 	struct inode		*inode)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun 	struct xfs_inode        *ip = XFS_I(inode);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
786*4882a593Smuzhiyun 	schedule();
787*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun static int
xfs_break_dax_layouts(struct inode * inode,bool * retry)791*4882a593Smuzhiyun xfs_break_dax_layouts(
792*4882a593Smuzhiyun 	struct inode		*inode,
793*4882a593Smuzhiyun 	bool			*retry)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	struct page		*page;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	page = dax_layout_busy_page(inode->i_mapping);
800*4882a593Smuzhiyun 	if (!page)
801*4882a593Smuzhiyun 		return 0;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	*retry = true;
804*4882a593Smuzhiyun 	return ___wait_var_event(&page->_refcount,
805*4882a593Smuzhiyun 			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
806*4882a593Smuzhiyun 			0, 0, xfs_wait_dax_page(inode));
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun int
xfs_break_layouts(struct inode * inode,uint * iolock,enum layout_break_reason reason)810*4882a593Smuzhiyun xfs_break_layouts(
811*4882a593Smuzhiyun 	struct inode		*inode,
812*4882a593Smuzhiyun 	uint			*iolock,
813*4882a593Smuzhiyun 	enum layout_break_reason reason)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	bool			retry;
816*4882a593Smuzhiyun 	int			error;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	do {
821*4882a593Smuzhiyun 		retry = false;
822*4882a593Smuzhiyun 		switch (reason) {
823*4882a593Smuzhiyun 		case BREAK_UNMAP:
824*4882a593Smuzhiyun 			error = xfs_break_dax_layouts(inode, &retry);
825*4882a593Smuzhiyun 			if (error || retry)
826*4882a593Smuzhiyun 				break;
827*4882a593Smuzhiyun 			/* fall through */
828*4882a593Smuzhiyun 		case BREAK_WRITE:
829*4882a593Smuzhiyun 			error = xfs_break_leased_layouts(inode, iolock, &retry);
830*4882a593Smuzhiyun 			break;
831*4882a593Smuzhiyun 		default:
832*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
833*4882a593Smuzhiyun 			error = -EINVAL;
834*4882a593Smuzhiyun 		}
835*4882a593Smuzhiyun 	} while (error == 0 && retry);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	return error;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun #define	XFS_FALLOC_FL_SUPPORTED						\
841*4882a593Smuzhiyun 		(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |		\
842*4882a593Smuzhiyun 		 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |	\
843*4882a593Smuzhiyun 		 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun STATIC long
xfs_file_fallocate(struct file * file,int mode,loff_t offset,loff_t len)846*4882a593Smuzhiyun xfs_file_fallocate(
847*4882a593Smuzhiyun 	struct file		*file,
848*4882a593Smuzhiyun 	int			mode,
849*4882a593Smuzhiyun 	loff_t			offset,
850*4882a593Smuzhiyun 	loff_t			len)
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct inode		*inode = file_inode(file);
853*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
854*4882a593Smuzhiyun 	long			error;
855*4882a593Smuzhiyun 	enum xfs_prealloc_flags	flags = 0;
856*4882a593Smuzhiyun 	uint			iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
857*4882a593Smuzhiyun 	loff_t			new_size = 0;
858*4882a593Smuzhiyun 	bool			do_file_insert = false;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	if (!S_ISREG(inode->i_mode))
861*4882a593Smuzhiyun 		return -EINVAL;
862*4882a593Smuzhiyun 	if (mode & ~XFS_FALLOC_FL_SUPPORTED)
863*4882a593Smuzhiyun 		return -EOPNOTSUPP;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	xfs_ilock(ip, iolock);
866*4882a593Smuzhiyun 	error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
867*4882a593Smuzhiyun 	if (error)
868*4882a593Smuzhiyun 		goto out_unlock;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	/*
871*4882a593Smuzhiyun 	 * Must wait for all AIO to complete before we continue as AIO can
872*4882a593Smuzhiyun 	 * change the file size on completion without holding any locks we
873*4882a593Smuzhiyun 	 * currently hold. We must do this first because AIO can update both
874*4882a593Smuzhiyun 	 * the on disk and in memory inode sizes, and the operations that follow
875*4882a593Smuzhiyun 	 * require the in-memory size to be fully up-to-date.
876*4882a593Smuzhiyun 	 */
877*4882a593Smuzhiyun 	inode_dio_wait(inode);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	/*
880*4882a593Smuzhiyun 	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
881*4882a593Smuzhiyun 	 * the cached range over the first operation we are about to run.
882*4882a593Smuzhiyun 	 *
883*4882a593Smuzhiyun 	 * We care about zero and collapse here because they both run a hole
884*4882a593Smuzhiyun 	 * punch over the range first. Because that can zero data, and the range
885*4882a593Smuzhiyun 	 * of invalidation for the shift operations is much larger, we still do
886*4882a593Smuzhiyun 	 * the required flush for collapse in xfs_prepare_shift().
887*4882a593Smuzhiyun 	 *
888*4882a593Smuzhiyun 	 * Insert has the same range requirements as collapse, and we extend the
889*4882a593Smuzhiyun 	 * file first which can zero data. Hence insert has the same
890*4882a593Smuzhiyun 	 * flush/invalidate requirements as collapse and so they are both
891*4882a593Smuzhiyun 	 * handled at the right time by xfs_prepare_shift().
892*4882a593Smuzhiyun 	 */
893*4882a593Smuzhiyun 	if (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE |
894*4882a593Smuzhiyun 		    FALLOC_FL_COLLAPSE_RANGE)) {
895*4882a593Smuzhiyun 		error = xfs_flush_unmap_range(ip, offset, len);
896*4882a593Smuzhiyun 		if (error)
897*4882a593Smuzhiyun 			goto out_unlock;
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	if (mode & FALLOC_FL_PUNCH_HOLE) {
901*4882a593Smuzhiyun 		error = xfs_free_file_space(ip, offset, len);
902*4882a593Smuzhiyun 		if (error)
903*4882a593Smuzhiyun 			goto out_unlock;
904*4882a593Smuzhiyun 	} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
905*4882a593Smuzhiyun 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
906*4882a593Smuzhiyun 			error = -EINVAL;
907*4882a593Smuzhiyun 			goto out_unlock;
908*4882a593Smuzhiyun 		}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		/*
911*4882a593Smuzhiyun 		 * There is no need to overlap collapse range with EOF,
912*4882a593Smuzhiyun 		 * in which case it is effectively a truncate operation
913*4882a593Smuzhiyun 		 */
914*4882a593Smuzhiyun 		if (offset + len >= i_size_read(inode)) {
915*4882a593Smuzhiyun 			error = -EINVAL;
916*4882a593Smuzhiyun 			goto out_unlock;
917*4882a593Smuzhiyun 		}
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 		new_size = i_size_read(inode) - len;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 		error = xfs_collapse_file_space(ip, offset, len);
922*4882a593Smuzhiyun 		if (error)
923*4882a593Smuzhiyun 			goto out_unlock;
924*4882a593Smuzhiyun 	} else if (mode & FALLOC_FL_INSERT_RANGE) {
925*4882a593Smuzhiyun 		loff_t		isize = i_size_read(inode);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 		if (!xfs_is_falloc_aligned(ip, offset, len)) {
928*4882a593Smuzhiyun 			error = -EINVAL;
929*4882a593Smuzhiyun 			goto out_unlock;
930*4882a593Smuzhiyun 		}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		/*
933*4882a593Smuzhiyun 		 * New inode size must not exceed ->s_maxbytes, accounting for
934*4882a593Smuzhiyun 		 * possible signed overflow.
935*4882a593Smuzhiyun 		 */
936*4882a593Smuzhiyun 		if (inode->i_sb->s_maxbytes - isize < len) {
937*4882a593Smuzhiyun 			error = -EFBIG;
938*4882a593Smuzhiyun 			goto out_unlock;
939*4882a593Smuzhiyun 		}
940*4882a593Smuzhiyun 		new_size = isize + len;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 		/* Offset should be less than i_size */
943*4882a593Smuzhiyun 		if (offset >= isize) {
944*4882a593Smuzhiyun 			error = -EINVAL;
945*4882a593Smuzhiyun 			goto out_unlock;
946*4882a593Smuzhiyun 		}
947*4882a593Smuzhiyun 		do_file_insert = true;
948*4882a593Smuzhiyun 	} else {
949*4882a593Smuzhiyun 		flags |= XFS_PREALLOC_SET;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 		if (!(mode & FALLOC_FL_KEEP_SIZE) &&
952*4882a593Smuzhiyun 		    offset + len > i_size_read(inode)) {
953*4882a593Smuzhiyun 			new_size = offset + len;
954*4882a593Smuzhiyun 			error = inode_newsize_ok(inode, new_size);
955*4882a593Smuzhiyun 			if (error)
956*4882a593Smuzhiyun 				goto out_unlock;
957*4882a593Smuzhiyun 		}
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 		if (mode & FALLOC_FL_ZERO_RANGE) {
960*4882a593Smuzhiyun 			/*
961*4882a593Smuzhiyun 			 * Punch a hole and prealloc the range.  We use a hole
962*4882a593Smuzhiyun 			 * punch rather than unwritten extent conversion for two
963*4882a593Smuzhiyun 			 * reasons:
964*4882a593Smuzhiyun 			 *
965*4882a593Smuzhiyun 			 *   1.) Hole punch handles partial block zeroing for us.
966*4882a593Smuzhiyun 			 *   2.) If prealloc returns ENOSPC, the file range is
967*4882a593Smuzhiyun 			 *       still zero-valued by virtue of the hole punch.
968*4882a593Smuzhiyun 			 */
969*4882a593Smuzhiyun 			unsigned int blksize = i_blocksize(inode);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 			trace_xfs_zero_file_space(ip);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 			error = xfs_free_file_space(ip, offset, len);
974*4882a593Smuzhiyun 			if (error)
975*4882a593Smuzhiyun 				goto out_unlock;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 			len = round_up(offset + len, blksize) -
978*4882a593Smuzhiyun 			      round_down(offset, blksize);
979*4882a593Smuzhiyun 			offset = round_down(offset, blksize);
980*4882a593Smuzhiyun 		} else if (mode & FALLOC_FL_UNSHARE_RANGE) {
981*4882a593Smuzhiyun 			error = xfs_reflink_unshare(ip, offset, len);
982*4882a593Smuzhiyun 			if (error)
983*4882a593Smuzhiyun 				goto out_unlock;
984*4882a593Smuzhiyun 		} else {
985*4882a593Smuzhiyun 			/*
986*4882a593Smuzhiyun 			 * If always_cow mode we can't use preallocations and
987*4882a593Smuzhiyun 			 * thus should not create them.
988*4882a593Smuzhiyun 			 */
989*4882a593Smuzhiyun 			if (xfs_is_always_cow_inode(ip)) {
990*4882a593Smuzhiyun 				error = -EOPNOTSUPP;
991*4882a593Smuzhiyun 				goto out_unlock;
992*4882a593Smuzhiyun 			}
993*4882a593Smuzhiyun 		}
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 		if (!xfs_is_always_cow_inode(ip)) {
996*4882a593Smuzhiyun 			error = xfs_alloc_file_space(ip, offset, len,
997*4882a593Smuzhiyun 						     XFS_BMAPI_PREALLOC);
998*4882a593Smuzhiyun 			if (error)
999*4882a593Smuzhiyun 				goto out_unlock;
1000*4882a593Smuzhiyun 		}
1001*4882a593Smuzhiyun 	}
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	if (file->f_flags & O_DSYNC)
1004*4882a593Smuzhiyun 		flags |= XFS_PREALLOC_SYNC;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	error = xfs_update_prealloc_flags(ip, flags);
1007*4882a593Smuzhiyun 	if (error)
1008*4882a593Smuzhiyun 		goto out_unlock;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	/* Change file size if needed */
1011*4882a593Smuzhiyun 	if (new_size) {
1012*4882a593Smuzhiyun 		struct iattr iattr;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 		iattr.ia_valid = ATTR_SIZE;
1015*4882a593Smuzhiyun 		iattr.ia_size = new_size;
1016*4882a593Smuzhiyun 		error = xfs_vn_setattr_size(file_dentry(file), &iattr);
1017*4882a593Smuzhiyun 		if (error)
1018*4882a593Smuzhiyun 			goto out_unlock;
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	/*
1022*4882a593Smuzhiyun 	 * Perform hole insertion now that the file size has been
1023*4882a593Smuzhiyun 	 * updated so that if we crash during the operation we don't
1024*4882a593Smuzhiyun 	 * leave shifted extents past EOF and hence losing access to
1025*4882a593Smuzhiyun 	 * the data that is contained within them.
1026*4882a593Smuzhiyun 	 */
1027*4882a593Smuzhiyun 	if (do_file_insert)
1028*4882a593Smuzhiyun 		error = xfs_insert_file_space(ip, offset, len);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun out_unlock:
1031*4882a593Smuzhiyun 	xfs_iunlock(ip, iolock);
1032*4882a593Smuzhiyun 	return error;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun STATIC int
xfs_file_fadvise(struct file * file,loff_t start,loff_t end,int advice)1036*4882a593Smuzhiyun xfs_file_fadvise(
1037*4882a593Smuzhiyun 	struct file	*file,
1038*4882a593Smuzhiyun 	loff_t		start,
1039*4882a593Smuzhiyun 	loff_t		end,
1040*4882a593Smuzhiyun 	int		advice)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun 	struct xfs_inode *ip = XFS_I(file_inode(file));
1043*4882a593Smuzhiyun 	int ret;
1044*4882a593Smuzhiyun 	int lockflags = 0;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	/*
1047*4882a593Smuzhiyun 	 * Operations creating pages in page cache need protection from hole
1048*4882a593Smuzhiyun 	 * punching and similar ops
1049*4882a593Smuzhiyun 	 */
1050*4882a593Smuzhiyun 	if (advice == POSIX_FADV_WILLNEED) {
1051*4882a593Smuzhiyun 		lockflags = XFS_IOLOCK_SHARED;
1052*4882a593Smuzhiyun 		xfs_ilock(ip, lockflags);
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun 	ret = generic_fadvise(file, start, end, advice);
1055*4882a593Smuzhiyun 	if (lockflags)
1056*4882a593Smuzhiyun 		xfs_iunlock(ip, lockflags);
1057*4882a593Smuzhiyun 	return ret;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun /* Does this file, inode, or mount want synchronous writes? */
xfs_file_sync_writes(struct file * filp)1061*4882a593Smuzhiyun static inline bool xfs_file_sync_writes(struct file *filp)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(file_inode(filp));
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	if (ip->i_mount->m_flags & XFS_MOUNT_WSYNC)
1066*4882a593Smuzhiyun 		return true;
1067*4882a593Smuzhiyun 	if (filp->f_flags & (__O_SYNC | O_DSYNC))
1068*4882a593Smuzhiyun 		return true;
1069*4882a593Smuzhiyun 	if (IS_SYNC(file_inode(filp)))
1070*4882a593Smuzhiyun 		return true;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	return false;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun STATIC loff_t
xfs_file_remap_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,loff_t len,unsigned int remap_flags)1076*4882a593Smuzhiyun xfs_file_remap_range(
1077*4882a593Smuzhiyun 	struct file		*file_in,
1078*4882a593Smuzhiyun 	loff_t			pos_in,
1079*4882a593Smuzhiyun 	struct file		*file_out,
1080*4882a593Smuzhiyun 	loff_t			pos_out,
1081*4882a593Smuzhiyun 	loff_t			len,
1082*4882a593Smuzhiyun 	unsigned int		remap_flags)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun 	struct inode		*inode_in = file_inode(file_in);
1085*4882a593Smuzhiyun 	struct xfs_inode	*src = XFS_I(inode_in);
1086*4882a593Smuzhiyun 	struct inode		*inode_out = file_inode(file_out);
1087*4882a593Smuzhiyun 	struct xfs_inode	*dest = XFS_I(inode_out);
1088*4882a593Smuzhiyun 	struct xfs_mount	*mp = src->i_mount;
1089*4882a593Smuzhiyun 	loff_t			remapped = 0;
1090*4882a593Smuzhiyun 	xfs_extlen_t		cowextsize;
1091*4882a593Smuzhiyun 	int			ret;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
1094*4882a593Smuzhiyun 		return -EINVAL;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	if (!xfs_sb_version_hasreflink(&mp->m_sb))
1097*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
1100*4882a593Smuzhiyun 		return -EIO;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	/* Prepare and then clone file data. */
1103*4882a593Smuzhiyun 	ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out,
1104*4882a593Smuzhiyun 			&len, remap_flags);
1105*4882a593Smuzhiyun 	if (ret || len == 0)
1106*4882a593Smuzhiyun 		return ret;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	ret = xfs_reflink_remap_blocks(src, pos_in, dest, pos_out, len,
1111*4882a593Smuzhiyun 			&remapped);
1112*4882a593Smuzhiyun 	if (ret)
1113*4882a593Smuzhiyun 		goto out_unlock;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	/*
1116*4882a593Smuzhiyun 	 * Carry the cowextsize hint from src to dest if we're sharing the
1117*4882a593Smuzhiyun 	 * entire source file to the entire destination file, the source file
1118*4882a593Smuzhiyun 	 * has a cowextsize hint, and the destination file does not.
1119*4882a593Smuzhiyun 	 */
1120*4882a593Smuzhiyun 	cowextsize = 0;
1121*4882a593Smuzhiyun 	if (pos_in == 0 && len == i_size_read(inode_in) &&
1122*4882a593Smuzhiyun 	    (src->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) &&
1123*4882a593Smuzhiyun 	    pos_out == 0 && len >= i_size_read(inode_out) &&
1124*4882a593Smuzhiyun 	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
1125*4882a593Smuzhiyun 		cowextsize = src->i_d.di_cowextsize;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
1128*4882a593Smuzhiyun 			remap_flags);
1129*4882a593Smuzhiyun 	if (ret)
1130*4882a593Smuzhiyun 		goto out_unlock;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
1133*4882a593Smuzhiyun 		xfs_log_force_inode(dest);
1134*4882a593Smuzhiyun out_unlock:
1135*4882a593Smuzhiyun 	xfs_iunlock2_io_mmap(src, dest);
1136*4882a593Smuzhiyun 	if (ret)
1137*4882a593Smuzhiyun 		trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
1138*4882a593Smuzhiyun 	return remapped > 0 ? remapped : ret;
1139*4882a593Smuzhiyun }
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun STATIC int
xfs_file_open(struct inode * inode,struct file * file)1142*4882a593Smuzhiyun xfs_file_open(
1143*4882a593Smuzhiyun 	struct inode	*inode,
1144*4882a593Smuzhiyun 	struct file	*file)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1147*4882a593Smuzhiyun 		return -EFBIG;
1148*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
1149*4882a593Smuzhiyun 		return -EIO;
1150*4882a593Smuzhiyun 	file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC;
1151*4882a593Smuzhiyun 	return 0;
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun STATIC int
xfs_dir_open(struct inode * inode,struct file * file)1155*4882a593Smuzhiyun xfs_dir_open(
1156*4882a593Smuzhiyun 	struct inode	*inode,
1157*4882a593Smuzhiyun 	struct file	*file)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun 	struct xfs_inode *ip = XFS_I(inode);
1160*4882a593Smuzhiyun 	int		mode;
1161*4882a593Smuzhiyun 	int		error;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	error = xfs_file_open(inode, file);
1164*4882a593Smuzhiyun 	if (error)
1165*4882a593Smuzhiyun 		return error;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	/*
1168*4882a593Smuzhiyun 	 * If there are any blocks, read-ahead block 0 as we're almost
1169*4882a593Smuzhiyun 	 * certain to have the next operation be a read there.
1170*4882a593Smuzhiyun 	 */
1171*4882a593Smuzhiyun 	mode = xfs_ilock_data_map_shared(ip);
1172*4882a593Smuzhiyun 	if (ip->i_df.if_nextents > 0)
1173*4882a593Smuzhiyun 		error = xfs_dir3_data_readahead(ip, 0, 0);
1174*4882a593Smuzhiyun 	xfs_iunlock(ip, mode);
1175*4882a593Smuzhiyun 	return error;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun STATIC int
xfs_file_release(struct inode * inode,struct file * filp)1179*4882a593Smuzhiyun xfs_file_release(
1180*4882a593Smuzhiyun 	struct inode	*inode,
1181*4882a593Smuzhiyun 	struct file	*filp)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	return xfs_release(XFS_I(inode));
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun STATIC int
xfs_file_readdir(struct file * file,struct dir_context * ctx)1187*4882a593Smuzhiyun xfs_file_readdir(
1188*4882a593Smuzhiyun 	struct file	*file,
1189*4882a593Smuzhiyun 	struct dir_context *ctx)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct inode	*inode = file_inode(file);
1192*4882a593Smuzhiyun 	xfs_inode_t	*ip = XFS_I(inode);
1193*4882a593Smuzhiyun 	size_t		bufsize;
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	/*
1196*4882a593Smuzhiyun 	 * The Linux API doesn't pass down the total size of the buffer
1197*4882a593Smuzhiyun 	 * we read into down to the filesystem.  With the filldir concept
1198*4882a593Smuzhiyun 	 * it's not needed for correct information, but the XFS dir2 leaf
1199*4882a593Smuzhiyun 	 * code wants an estimate of the buffer size to calculate it's
1200*4882a593Smuzhiyun 	 * readahead window and size the buffers used for mapping to
1201*4882a593Smuzhiyun 	 * physical blocks.
1202*4882a593Smuzhiyun 	 *
1203*4882a593Smuzhiyun 	 * Try to give it an estimate that's good enough, maybe at some
1204*4882a593Smuzhiyun 	 * point we can change the ->readdir prototype to include the
1205*4882a593Smuzhiyun 	 * buffer size.  For now we use the current glibc buffer size.
1206*4882a593Smuzhiyun 	 */
1207*4882a593Smuzhiyun 	bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE, ip->i_d.di_size);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	return xfs_readdir(NULL, ip, ctx, bufsize);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun STATIC loff_t
xfs_file_llseek(struct file * file,loff_t offset,int whence)1213*4882a593Smuzhiyun xfs_file_llseek(
1214*4882a593Smuzhiyun 	struct file	*file,
1215*4882a593Smuzhiyun 	loff_t		offset,
1216*4882a593Smuzhiyun 	int		whence)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun 	struct inode		*inode = file->f_mapping->host;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(XFS_I(inode)->i_mount))
1221*4882a593Smuzhiyun 		return -EIO;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	switch (whence) {
1224*4882a593Smuzhiyun 	default:
1225*4882a593Smuzhiyun 		return generic_file_llseek(file, offset, whence);
1226*4882a593Smuzhiyun 	case SEEK_HOLE:
1227*4882a593Smuzhiyun 		offset = iomap_seek_hole(inode, offset, &xfs_seek_iomap_ops);
1228*4882a593Smuzhiyun 		break;
1229*4882a593Smuzhiyun 	case SEEK_DATA:
1230*4882a593Smuzhiyun 		offset = iomap_seek_data(inode, offset, &xfs_seek_iomap_ops);
1231*4882a593Smuzhiyun 		break;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	if (offset < 0)
1235*4882a593Smuzhiyun 		return offset;
1236*4882a593Smuzhiyun 	return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun /*
1240*4882a593Smuzhiyun  * Locking for serialisation of IO during page faults. This results in a lock
1241*4882a593Smuzhiyun  * ordering of:
1242*4882a593Smuzhiyun  *
1243*4882a593Smuzhiyun  * mmap_lock (MM)
1244*4882a593Smuzhiyun  *   sb_start_pagefault(vfs, freeze)
1245*4882a593Smuzhiyun  *     i_mmaplock (XFS - truncate serialisation)
1246*4882a593Smuzhiyun  *       page_lock (MM)
1247*4882a593Smuzhiyun  *         i_lock (XFS - extent map serialisation)
1248*4882a593Smuzhiyun  */
1249*4882a593Smuzhiyun static vm_fault_t
__xfs_filemap_fault(struct vm_fault * vmf,enum page_entry_size pe_size,bool write_fault)1250*4882a593Smuzhiyun __xfs_filemap_fault(
1251*4882a593Smuzhiyun 	struct vm_fault		*vmf,
1252*4882a593Smuzhiyun 	enum page_entry_size	pe_size,
1253*4882a593Smuzhiyun 	bool			write_fault)
1254*4882a593Smuzhiyun {
1255*4882a593Smuzhiyun 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1256*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
1257*4882a593Smuzhiyun 	vm_fault_t		ret;
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	trace_xfs_filemap_fault(ip, pe_size, write_fault);
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	if (write_fault) {
1262*4882a593Smuzhiyun 		sb_start_pagefault(inode->i_sb);
1263*4882a593Smuzhiyun 		file_update_time(vmf->vma->vm_file);
1264*4882a593Smuzhiyun 	}
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1267*4882a593Smuzhiyun 	if (IS_DAX(inode)) {
1268*4882a593Smuzhiyun 		pfn_t pfn;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 		ret = dax_iomap_fault(vmf, pe_size, &pfn, NULL,
1271*4882a593Smuzhiyun 				(write_fault && !vmf->cow_page) ?
1272*4882a593Smuzhiyun 				 &xfs_direct_write_iomap_ops :
1273*4882a593Smuzhiyun 				 &xfs_read_iomap_ops);
1274*4882a593Smuzhiyun 		if (ret & VM_FAULT_NEEDDSYNC)
1275*4882a593Smuzhiyun 			ret = dax_finish_sync_fault(vmf, pe_size, pfn);
1276*4882a593Smuzhiyun 	} else {
1277*4882a593Smuzhiyun 		if (write_fault)
1278*4882a593Smuzhiyun 			ret = iomap_page_mkwrite(vmf,
1279*4882a593Smuzhiyun 					&xfs_buffered_write_iomap_ops);
1280*4882a593Smuzhiyun 		else
1281*4882a593Smuzhiyun 			ret = filemap_fault(vmf);
1282*4882a593Smuzhiyun 	}
1283*4882a593Smuzhiyun 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	if (write_fault)
1286*4882a593Smuzhiyun 		sb_end_pagefault(inode->i_sb);
1287*4882a593Smuzhiyun 	return ret;
1288*4882a593Smuzhiyun }
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun static inline bool
xfs_is_write_fault(struct vm_fault * vmf)1291*4882a593Smuzhiyun xfs_is_write_fault(
1292*4882a593Smuzhiyun 	struct vm_fault		*vmf)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	return (vmf->flags & FAULT_FLAG_WRITE) &&
1295*4882a593Smuzhiyun 	       (vmf->vma->vm_flags & VM_SHARED);
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun static vm_fault_t
xfs_filemap_fault(struct vm_fault * vmf)1299*4882a593Smuzhiyun xfs_filemap_fault(
1300*4882a593Smuzhiyun 	struct vm_fault		*vmf)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun 	/* DAX can shortcut the normal fault path on write faults! */
1303*4882a593Smuzhiyun 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
1304*4882a593Smuzhiyun 			IS_DAX(file_inode(vmf->vma->vm_file)) &&
1305*4882a593Smuzhiyun 			xfs_is_write_fault(vmf));
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun static vm_fault_t
xfs_filemap_huge_fault(struct vm_fault * vmf,enum page_entry_size pe_size)1309*4882a593Smuzhiyun xfs_filemap_huge_fault(
1310*4882a593Smuzhiyun 	struct vm_fault		*vmf,
1311*4882a593Smuzhiyun 	enum page_entry_size	pe_size)
1312*4882a593Smuzhiyun {
1313*4882a593Smuzhiyun 	if (!IS_DAX(file_inode(vmf->vma->vm_file)))
1314*4882a593Smuzhiyun 		return VM_FAULT_FALLBACK;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	/* DAX can shortcut the normal fault path on write faults! */
1317*4882a593Smuzhiyun 	return __xfs_filemap_fault(vmf, pe_size,
1318*4882a593Smuzhiyun 			xfs_is_write_fault(vmf));
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun static vm_fault_t
xfs_filemap_page_mkwrite(struct vm_fault * vmf)1322*4882a593Smuzhiyun xfs_filemap_page_mkwrite(
1323*4882a593Smuzhiyun 	struct vm_fault		*vmf)
1324*4882a593Smuzhiyun {
1325*4882a593Smuzhiyun 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun /*
1329*4882a593Smuzhiyun  * pfn_mkwrite was originally intended to ensure we capture time stamp updates
1330*4882a593Smuzhiyun  * on write faults. In reality, it needs to serialise against truncate and
1331*4882a593Smuzhiyun  * prepare memory for writing so handle is as standard write fault.
1332*4882a593Smuzhiyun  */
1333*4882a593Smuzhiyun static vm_fault_t
xfs_filemap_pfn_mkwrite(struct vm_fault * vmf)1334*4882a593Smuzhiyun xfs_filemap_pfn_mkwrite(
1335*4882a593Smuzhiyun 	struct vm_fault		*vmf)
1336*4882a593Smuzhiyun {
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun static vm_fault_t
xfs_filemap_map_pages(struct vm_fault * vmf,pgoff_t start_pgoff,pgoff_t end_pgoff)1342*4882a593Smuzhiyun xfs_filemap_map_pages(
1343*4882a593Smuzhiyun 	struct vm_fault		*vmf,
1344*4882a593Smuzhiyun 	pgoff_t			start_pgoff,
1345*4882a593Smuzhiyun 	pgoff_t			end_pgoff)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	struct inode		*inode = file_inode(vmf->vma->vm_file);
1348*4882a593Smuzhiyun 	vm_fault_t ret;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 	xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1351*4882a593Smuzhiyun 	ret = filemap_map_pages(vmf, start_pgoff, end_pgoff);
1352*4882a593Smuzhiyun 	xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1353*4882a593Smuzhiyun 	return ret;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun static const struct vm_operations_struct xfs_file_vm_ops = {
1357*4882a593Smuzhiyun 	.fault		= xfs_filemap_fault,
1358*4882a593Smuzhiyun 	.huge_fault	= xfs_filemap_huge_fault,
1359*4882a593Smuzhiyun 	.map_pages	= xfs_filemap_map_pages,
1360*4882a593Smuzhiyun 	.page_mkwrite	= xfs_filemap_page_mkwrite,
1361*4882a593Smuzhiyun 	.pfn_mkwrite	= xfs_filemap_pfn_mkwrite,
1362*4882a593Smuzhiyun };
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun STATIC int
xfs_file_mmap(struct file * file,struct vm_area_struct * vma)1365*4882a593Smuzhiyun xfs_file_mmap(
1366*4882a593Smuzhiyun 	struct file		*file,
1367*4882a593Smuzhiyun 	struct vm_area_struct	*vma)
1368*4882a593Smuzhiyun {
1369*4882a593Smuzhiyun 	struct inode		*inode = file_inode(file);
1370*4882a593Smuzhiyun 	struct xfs_buftarg	*target = xfs_inode_buftarg(XFS_I(inode));
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	/*
1373*4882a593Smuzhiyun 	 * We don't support synchronous mappings for non-DAX files and
1374*4882a593Smuzhiyun 	 * for DAX files if underneath dax_device is not synchronous.
1375*4882a593Smuzhiyun 	 */
1376*4882a593Smuzhiyun 	if (!daxdev_mapping_supported(vma, target->bt_daxdev))
1377*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	file_accessed(file);
1380*4882a593Smuzhiyun 	vma->vm_ops = &xfs_file_vm_ops;
1381*4882a593Smuzhiyun 	if (IS_DAX(inode))
1382*4882a593Smuzhiyun 		vma->vm_flags |= VM_HUGEPAGE;
1383*4882a593Smuzhiyun 	return 0;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun const struct file_operations xfs_file_operations = {
1387*4882a593Smuzhiyun 	.llseek		= xfs_file_llseek,
1388*4882a593Smuzhiyun 	.read_iter	= xfs_file_read_iter,
1389*4882a593Smuzhiyun 	.write_iter	= xfs_file_write_iter,
1390*4882a593Smuzhiyun 	.splice_read	= generic_file_splice_read,
1391*4882a593Smuzhiyun 	.splice_write	= iter_file_splice_write,
1392*4882a593Smuzhiyun 	.iopoll		= iomap_dio_iopoll,
1393*4882a593Smuzhiyun 	.unlocked_ioctl	= xfs_file_ioctl,
1394*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1395*4882a593Smuzhiyun 	.compat_ioctl	= xfs_file_compat_ioctl,
1396*4882a593Smuzhiyun #endif
1397*4882a593Smuzhiyun 	.mmap		= xfs_file_mmap,
1398*4882a593Smuzhiyun 	.mmap_supported_flags = MAP_SYNC,
1399*4882a593Smuzhiyun 	.open		= xfs_file_open,
1400*4882a593Smuzhiyun 	.release	= xfs_file_release,
1401*4882a593Smuzhiyun 	.fsync		= xfs_file_fsync,
1402*4882a593Smuzhiyun 	.get_unmapped_area = thp_get_unmapped_area,
1403*4882a593Smuzhiyun 	.fallocate	= xfs_file_fallocate,
1404*4882a593Smuzhiyun 	.fadvise	= xfs_file_fadvise,
1405*4882a593Smuzhiyun 	.remap_file_range = xfs_file_remap_range,
1406*4882a593Smuzhiyun };
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun const struct file_operations xfs_dir_file_operations = {
1409*4882a593Smuzhiyun 	.open		= xfs_dir_open,
1410*4882a593Smuzhiyun 	.read		= generic_read_dir,
1411*4882a593Smuzhiyun 	.iterate_shared	= xfs_file_readdir,
1412*4882a593Smuzhiyun 	.llseek		= generic_file_llseek,
1413*4882a593Smuzhiyun 	.unlocked_ioctl	= xfs_file_ioctl,
1414*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1415*4882a593Smuzhiyun 	.compat_ioctl	= xfs_file_compat_ioctl,
1416*4882a593Smuzhiyun #endif
1417*4882a593Smuzhiyun 	.fsync		= xfs_dir_fsync,
1418*4882a593Smuzhiyun };
1419