xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_inode.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/iversion.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include "xfs.h"
9*4882a593Smuzhiyun #include "xfs_fs.h"
10*4882a593Smuzhiyun #include "xfs_shared.h"
11*4882a593Smuzhiyun #include "xfs_format.h"
12*4882a593Smuzhiyun #include "xfs_log_format.h"
13*4882a593Smuzhiyun #include "xfs_trans_resv.h"
14*4882a593Smuzhiyun #include "xfs_sb.h"
15*4882a593Smuzhiyun #include "xfs_mount.h"
16*4882a593Smuzhiyun #include "xfs_defer.h"
17*4882a593Smuzhiyun #include "xfs_inode.h"
18*4882a593Smuzhiyun #include "xfs_dir2.h"
19*4882a593Smuzhiyun #include "xfs_attr.h"
20*4882a593Smuzhiyun #include "xfs_trans_space.h"
21*4882a593Smuzhiyun #include "xfs_trans.h"
22*4882a593Smuzhiyun #include "xfs_buf_item.h"
23*4882a593Smuzhiyun #include "xfs_inode_item.h"
24*4882a593Smuzhiyun #include "xfs_ialloc.h"
25*4882a593Smuzhiyun #include "xfs_bmap.h"
26*4882a593Smuzhiyun #include "xfs_bmap_util.h"
27*4882a593Smuzhiyun #include "xfs_errortag.h"
28*4882a593Smuzhiyun #include "xfs_error.h"
29*4882a593Smuzhiyun #include "xfs_quota.h"
30*4882a593Smuzhiyun #include "xfs_filestream.h"
31*4882a593Smuzhiyun #include "xfs_trace.h"
32*4882a593Smuzhiyun #include "xfs_icache.h"
33*4882a593Smuzhiyun #include "xfs_symlink.h"
34*4882a593Smuzhiyun #include "xfs_trans_priv.h"
35*4882a593Smuzhiyun #include "xfs_log.h"
36*4882a593Smuzhiyun #include "xfs_bmap_btree.h"
37*4882a593Smuzhiyun #include "xfs_reflink.h"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun kmem_zone_t *xfs_inode_zone;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun  * Used in xfs_itruncate_extents().  This is the maximum number of extents
43*4882a593Smuzhiyun  * freed from a file in a single transaction.
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun #define	XFS_ITRUNC_MAX_EXTENTS	2
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
48*4882a593Smuzhiyun STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * helper function to extract extent size hint from inode
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode * ip)54*4882a593Smuzhiyun xfs_get_extsz_hint(
55*4882a593Smuzhiyun 	struct xfs_inode	*ip)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	/*
58*4882a593Smuzhiyun 	 * No point in aligning allocations if we need to COW to actually
59*4882a593Smuzhiyun 	 * write to them.
60*4882a593Smuzhiyun 	 */
61*4882a593Smuzhiyun 	if (xfs_is_always_cow_inode(ip))
62*4882a593Smuzhiyun 		return 0;
63*4882a593Smuzhiyun 	if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
64*4882a593Smuzhiyun 		return ip->i_d.di_extsize;
65*4882a593Smuzhiyun 	if (XFS_IS_REALTIME_INODE(ip))
66*4882a593Smuzhiyun 		return ip->i_mount->m_sb.sb_rextsize;
67*4882a593Smuzhiyun 	return 0;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun  * Helper function to extract CoW extent size hint from inode.
72*4882a593Smuzhiyun  * Between the extent size hint and the CoW extent size hint, we
73*4882a593Smuzhiyun  * return the greater of the two.  If the value is zero (automatic),
74*4882a593Smuzhiyun  * use the default size.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode * ip)77*4882a593Smuzhiyun xfs_get_cowextsz_hint(
78*4882a593Smuzhiyun 	struct xfs_inode	*ip)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	xfs_extlen_t		a, b;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	a = 0;
83*4882a593Smuzhiyun 	if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
84*4882a593Smuzhiyun 		a = ip->i_d.di_cowextsize;
85*4882a593Smuzhiyun 	b = xfs_get_extsz_hint(ip);
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	a = max(a, b);
88*4882a593Smuzhiyun 	if (a == 0)
89*4882a593Smuzhiyun 		return XFS_DEFAULT_COWEXTSZ_HINT;
90*4882a593Smuzhiyun 	return a;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * These two are wrapper routines around the xfs_ilock() routine used to
95*4882a593Smuzhiyun  * centralize some grungy code.  They are used in places that wish to lock the
96*4882a593Smuzhiyun  * inode solely for reading the extents.  The reason these places can't just
97*4882a593Smuzhiyun  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
98*4882a593Smuzhiyun  * bringing in of the extents from disk for a file in b-tree format.  If the
99*4882a593Smuzhiyun  * inode is in b-tree format, then we need to lock the inode exclusively until
100*4882a593Smuzhiyun  * the extents are read in.  Locking it exclusively all the time would limit
101*4882a593Smuzhiyun  * our parallelism unnecessarily, though.  What we do instead is check to see
102*4882a593Smuzhiyun  * if the extents have been read in yet, and only lock the inode exclusively
103*4882a593Smuzhiyun  * if they have not.
104*4882a593Smuzhiyun  *
105*4882a593Smuzhiyun  * The functions return a value which should be given to the corresponding
106*4882a593Smuzhiyun  * xfs_iunlock() call.
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun uint
xfs_ilock_data_map_shared(struct xfs_inode * ip)109*4882a593Smuzhiyun xfs_ilock_data_map_shared(
110*4882a593Smuzhiyun 	struct xfs_inode	*ip)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	uint			lock_mode = XFS_ILOCK_SHARED;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE &&
115*4882a593Smuzhiyun 	    (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
116*4882a593Smuzhiyun 		lock_mode = XFS_ILOCK_EXCL;
117*4882a593Smuzhiyun 	xfs_ilock(ip, lock_mode);
118*4882a593Smuzhiyun 	return lock_mode;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun uint
xfs_ilock_attr_map_shared(struct xfs_inode * ip)122*4882a593Smuzhiyun xfs_ilock_attr_map_shared(
123*4882a593Smuzhiyun 	struct xfs_inode	*ip)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	uint			lock_mode = XFS_ILOCK_SHARED;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (ip->i_afp &&
128*4882a593Smuzhiyun 	    ip->i_afp->if_format == XFS_DINODE_FMT_BTREE &&
129*4882a593Smuzhiyun 	    (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130*4882a593Smuzhiyun 		lock_mode = XFS_ILOCK_EXCL;
131*4882a593Smuzhiyun 	xfs_ilock(ip, lock_mode);
132*4882a593Smuzhiyun 	return lock_mode;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
137*4882a593Smuzhiyun  * multi-reader locks: i_mmap_lock and the i_lock.  This routine allows
138*4882a593Smuzhiyun  * various combinations of the locks to be obtained.
139*4882a593Smuzhiyun  *
140*4882a593Smuzhiyun  * The 3 locks should always be ordered so that the IO lock is obtained first,
141*4882a593Smuzhiyun  * the mmap lock second and the ilock last in order to prevent deadlock.
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  * Basic locking order:
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
146*4882a593Smuzhiyun  *
147*4882a593Smuzhiyun  * mmap_lock locking order:
148*4882a593Smuzhiyun  *
149*4882a593Smuzhiyun  * i_rwsem -> page lock -> mmap_lock
150*4882a593Smuzhiyun  * mmap_lock -> i_mmap_lock -> page_lock
151*4882a593Smuzhiyun  *
152*4882a593Smuzhiyun  * The difference in mmap_lock locking order mean that we cannot hold the
153*4882a593Smuzhiyun  * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154*4882a593Smuzhiyun  * fault in pages during copy in/out (for buffered IO) or require the mmap_lock
155*4882a593Smuzhiyun  * in get_user_pages() to map the user pages into the kernel address space for
156*4882a593Smuzhiyun  * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
157*4882a593Smuzhiyun  * page faults already hold the mmap_lock.
158*4882a593Smuzhiyun  *
159*4882a593Smuzhiyun  * Hence to serialise fully against both syscall and mmap based IO, we need to
160*4882a593Smuzhiyun  * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
161*4882a593Smuzhiyun  * taken in places where we need to invalidate the page cache in a race
162*4882a593Smuzhiyun  * free manner (e.g. truncate, hole punch and other extent manipulation
163*4882a593Smuzhiyun  * functions).
164*4882a593Smuzhiyun  */
165*4882a593Smuzhiyun void
xfs_ilock(xfs_inode_t * ip,uint lock_flags)166*4882a593Smuzhiyun xfs_ilock(
167*4882a593Smuzhiyun 	xfs_inode_t		*ip,
168*4882a593Smuzhiyun 	uint			lock_flags)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/*
173*4882a593Smuzhiyun 	 * You can't set both SHARED and EXCL for the same lock,
174*4882a593Smuzhiyun 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175*4882a593Smuzhiyun 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178*4882a593Smuzhiyun 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
179*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180*4882a593Smuzhiyun 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
181*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182*4882a593Smuzhiyun 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
183*4882a593Smuzhiyun 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (lock_flags & XFS_IOLOCK_EXCL) {
186*4882a593Smuzhiyun 		down_write_nested(&VFS_I(ip)->i_rwsem,
187*4882a593Smuzhiyun 				  XFS_IOLOCK_DEP(lock_flags));
188*4882a593Smuzhiyun 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
189*4882a593Smuzhiyun 		down_read_nested(&VFS_I(ip)->i_rwsem,
190*4882a593Smuzhiyun 				 XFS_IOLOCK_DEP(lock_flags));
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (lock_flags & XFS_MMAPLOCK_EXCL)
194*4882a593Smuzhiyun 		mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195*4882a593Smuzhiyun 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
196*4882a593Smuzhiyun 		mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (lock_flags & XFS_ILOCK_EXCL)
199*4882a593Smuzhiyun 		mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200*4882a593Smuzhiyun 	else if (lock_flags & XFS_ILOCK_SHARED)
201*4882a593Smuzhiyun 		mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun  * This is just like xfs_ilock(), except that the caller
206*4882a593Smuzhiyun  * is guaranteed not to sleep.  It returns 1 if it gets
207*4882a593Smuzhiyun  * the requested locks and 0 otherwise.  If the IO lock is
208*4882a593Smuzhiyun  * obtained but the inode lock cannot be, then the IO lock
209*4882a593Smuzhiyun  * is dropped before returning.
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * ip -- the inode being locked
212*4882a593Smuzhiyun  * lock_flags -- this parameter indicates the inode's locks to be
213*4882a593Smuzhiyun  *       to be locked.  See the comment for xfs_ilock() for a list
214*4882a593Smuzhiyun  *	 of valid values.
215*4882a593Smuzhiyun  */
216*4882a593Smuzhiyun int
xfs_ilock_nowait(xfs_inode_t * ip,uint lock_flags)217*4882a593Smuzhiyun xfs_ilock_nowait(
218*4882a593Smuzhiyun 	xfs_inode_t		*ip,
219*4882a593Smuzhiyun 	uint			lock_flags)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/*
224*4882a593Smuzhiyun 	 * You can't set both SHARED and EXCL for the same lock,
225*4882a593Smuzhiyun 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226*4882a593Smuzhiyun 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229*4882a593Smuzhiyun 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
230*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231*4882a593Smuzhiyun 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
232*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233*4882a593Smuzhiyun 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
234*4882a593Smuzhiyun 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (lock_flags & XFS_IOLOCK_EXCL) {
237*4882a593Smuzhiyun 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
238*4882a593Smuzhiyun 			goto out;
239*4882a593Smuzhiyun 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
240*4882a593Smuzhiyun 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
241*4882a593Smuzhiyun 			goto out;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
245*4882a593Smuzhiyun 		if (!mrtryupdate(&ip->i_mmaplock))
246*4882a593Smuzhiyun 			goto out_undo_iolock;
247*4882a593Smuzhiyun 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248*4882a593Smuzhiyun 		if (!mrtryaccess(&ip->i_mmaplock))
249*4882a593Smuzhiyun 			goto out_undo_iolock;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (lock_flags & XFS_ILOCK_EXCL) {
253*4882a593Smuzhiyun 		if (!mrtryupdate(&ip->i_lock))
254*4882a593Smuzhiyun 			goto out_undo_mmaplock;
255*4882a593Smuzhiyun 	} else if (lock_flags & XFS_ILOCK_SHARED) {
256*4882a593Smuzhiyun 		if (!mrtryaccess(&ip->i_lock))
257*4882a593Smuzhiyun 			goto out_undo_mmaplock;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 	return 1;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun out_undo_mmaplock:
262*4882a593Smuzhiyun 	if (lock_flags & XFS_MMAPLOCK_EXCL)
263*4882a593Smuzhiyun 		mrunlock_excl(&ip->i_mmaplock);
264*4882a593Smuzhiyun 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
265*4882a593Smuzhiyun 		mrunlock_shared(&ip->i_mmaplock);
266*4882a593Smuzhiyun out_undo_iolock:
267*4882a593Smuzhiyun 	if (lock_flags & XFS_IOLOCK_EXCL)
268*4882a593Smuzhiyun 		up_write(&VFS_I(ip)->i_rwsem);
269*4882a593Smuzhiyun 	else if (lock_flags & XFS_IOLOCK_SHARED)
270*4882a593Smuzhiyun 		up_read(&VFS_I(ip)->i_rwsem);
271*4882a593Smuzhiyun out:
272*4882a593Smuzhiyun 	return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun  * xfs_iunlock() is used to drop the inode locks acquired with
277*4882a593Smuzhiyun  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
278*4882a593Smuzhiyun  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279*4882a593Smuzhiyun  * that we know which locks to drop.
280*4882a593Smuzhiyun  *
281*4882a593Smuzhiyun  * ip -- the inode being unlocked
282*4882a593Smuzhiyun  * lock_flags -- this parameter indicates the inode's locks to be
283*4882a593Smuzhiyun  *       to be unlocked.  See the comment for xfs_ilock() for a list
284*4882a593Smuzhiyun  *	 of valid values for this parameter.
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  */
287*4882a593Smuzhiyun void
xfs_iunlock(xfs_inode_t * ip,uint lock_flags)288*4882a593Smuzhiyun xfs_iunlock(
289*4882a593Smuzhiyun 	xfs_inode_t		*ip,
290*4882a593Smuzhiyun 	uint			lock_flags)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	/*
293*4882a593Smuzhiyun 	 * You can't set both SHARED and EXCL for the same lock,
294*4882a593Smuzhiyun 	 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295*4882a593Smuzhiyun 	 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296*4882a593Smuzhiyun 	 */
297*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298*4882a593Smuzhiyun 	       (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
299*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300*4882a593Smuzhiyun 	       (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
301*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302*4882a593Smuzhiyun 	       (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
303*4882a593Smuzhiyun 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
304*4882a593Smuzhiyun 	ASSERT(lock_flags != 0);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (lock_flags & XFS_IOLOCK_EXCL)
307*4882a593Smuzhiyun 		up_write(&VFS_I(ip)->i_rwsem);
308*4882a593Smuzhiyun 	else if (lock_flags & XFS_IOLOCK_SHARED)
309*4882a593Smuzhiyun 		up_read(&VFS_I(ip)->i_rwsem);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	if (lock_flags & XFS_MMAPLOCK_EXCL)
312*4882a593Smuzhiyun 		mrunlock_excl(&ip->i_mmaplock);
313*4882a593Smuzhiyun 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
314*4882a593Smuzhiyun 		mrunlock_shared(&ip->i_mmaplock);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (lock_flags & XFS_ILOCK_EXCL)
317*4882a593Smuzhiyun 		mrunlock_excl(&ip->i_lock);
318*4882a593Smuzhiyun 	else if (lock_flags & XFS_ILOCK_SHARED)
319*4882a593Smuzhiyun 		mrunlock_shared(&ip->i_lock);
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun  * give up write locks.  the i/o lock cannot be held nested
326*4882a593Smuzhiyun  * if it is being demoted.
327*4882a593Smuzhiyun  */
328*4882a593Smuzhiyun void
xfs_ilock_demote(xfs_inode_t * ip,uint lock_flags)329*4882a593Smuzhiyun xfs_ilock_demote(
330*4882a593Smuzhiyun 	xfs_inode_t		*ip,
331*4882a593Smuzhiyun 	uint			lock_flags)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334*4882a593Smuzhiyun 	ASSERT((lock_flags &
335*4882a593Smuzhiyun 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (lock_flags & XFS_ILOCK_EXCL)
338*4882a593Smuzhiyun 		mrdemote(&ip->i_lock);
339*4882a593Smuzhiyun 	if (lock_flags & XFS_MMAPLOCK_EXCL)
340*4882a593Smuzhiyun 		mrdemote(&ip->i_mmaplock);
341*4882a593Smuzhiyun 	if (lock_flags & XFS_IOLOCK_EXCL)
342*4882a593Smuzhiyun 		downgrade_write(&VFS_I(ip)->i_rwsem);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun #if defined(DEBUG) || defined(XFS_WARN)
348*4882a593Smuzhiyun int
xfs_isilocked(xfs_inode_t * ip,uint lock_flags)349*4882a593Smuzhiyun xfs_isilocked(
350*4882a593Smuzhiyun 	xfs_inode_t		*ip,
351*4882a593Smuzhiyun 	uint			lock_flags)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354*4882a593Smuzhiyun 		if (!(lock_flags & XFS_ILOCK_SHARED))
355*4882a593Smuzhiyun 			return !!ip->i_lock.mr_writer;
356*4882a593Smuzhiyun 		return rwsem_is_locked(&ip->i_lock.mr_lock);
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360*4882a593Smuzhiyun 		if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361*4882a593Smuzhiyun 			return !!ip->i_mmaplock.mr_writer;
362*4882a593Smuzhiyun 		return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366*4882a593Smuzhiyun 		if (!(lock_flags & XFS_IOLOCK_SHARED))
367*4882a593Smuzhiyun 			return !debug_locks ||
368*4882a593Smuzhiyun 				lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369*4882a593Smuzhiyun 		return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
370*4882a593Smuzhiyun 	}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	ASSERT(0);
373*4882a593Smuzhiyun 	return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun #endif
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /*
378*4882a593Smuzhiyun  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379*4882a593Smuzhiyun  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380*4882a593Smuzhiyun  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381*4882a593Smuzhiyun  * errors and warnings.
382*4882a593Smuzhiyun  */
383*4882a593Smuzhiyun #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
384*4882a593Smuzhiyun static bool
xfs_lockdep_subclass_ok(int subclass)385*4882a593Smuzhiyun xfs_lockdep_subclass_ok(
386*4882a593Smuzhiyun 	int subclass)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	return subclass < MAX_LOCKDEP_SUBCLASSES;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun #else
391*4882a593Smuzhiyun #define xfs_lockdep_subclass_ok(subclass)	(true)
392*4882a593Smuzhiyun #endif
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /*
395*4882a593Smuzhiyun  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
396*4882a593Smuzhiyun  * value. This can be called for any type of inode lock combination, including
397*4882a593Smuzhiyun  * parent locking. Care must be taken to ensure we don't overrun the subclass
398*4882a593Smuzhiyun  * storage fields in the class mask we build.
399*4882a593Smuzhiyun  */
400*4882a593Smuzhiyun static inline int
xfs_lock_inumorder(int lock_mode,int subclass)401*4882a593Smuzhiyun xfs_lock_inumorder(int lock_mode, int subclass)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	int	class = 0;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
406*4882a593Smuzhiyun 			      XFS_ILOCK_RTSUM)));
407*4882a593Smuzhiyun 	ASSERT(xfs_lockdep_subclass_ok(subclass));
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
410*4882a593Smuzhiyun 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
411*4882a593Smuzhiyun 		class += subclass << XFS_IOLOCK_SHIFT;
412*4882a593Smuzhiyun 	}
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
415*4882a593Smuzhiyun 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416*4882a593Smuzhiyun 		class += subclass << XFS_MMAPLOCK_SHIFT;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420*4882a593Smuzhiyun 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421*4882a593Smuzhiyun 		class += subclass << XFS_ILOCK_SHIFT;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun  * The following routine will lock n inodes in exclusive mode.  We assume the
429*4882a593Smuzhiyun  * caller calls us with the inodes in i_ino order.
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * We need to detect deadlock where an inode that we lock is in the AIL and we
432*4882a593Smuzhiyun  * start waiting for another inode that is locked by a thread in a long running
433*4882a593Smuzhiyun  * transaction (such as truncate). This can result in deadlock since the long
434*4882a593Smuzhiyun  * running trans might need to wait for the inode we just locked in order to
435*4882a593Smuzhiyun  * push the tail and free space in the log.
436*4882a593Smuzhiyun  *
437*4882a593Smuzhiyun  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
438*4882a593Smuzhiyun  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
439*4882a593Smuzhiyun  * lock more than one at a time, lockdep will report false positives saying we
440*4882a593Smuzhiyun  * have violated locking orders.
441*4882a593Smuzhiyun  */
442*4882a593Smuzhiyun static void
xfs_lock_inodes(struct xfs_inode ** ips,int inodes,uint lock_mode)443*4882a593Smuzhiyun xfs_lock_inodes(
444*4882a593Smuzhiyun 	struct xfs_inode	**ips,
445*4882a593Smuzhiyun 	int			inodes,
446*4882a593Smuzhiyun 	uint			lock_mode)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	int			attempts = 0, i, j, try_lock;
449*4882a593Smuzhiyun 	struct xfs_log_item	*lp;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
453*4882a593Smuzhiyun 	 * support an arbitrary depth of locking here, but absolute limits on
454*4882a593Smuzhiyun 	 * inodes depend on the type of locking and the limits placed by
455*4882a593Smuzhiyun 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
456*4882a593Smuzhiyun 	 * the asserts.
457*4882a593Smuzhiyun 	 */
458*4882a593Smuzhiyun 	ASSERT(ips && inodes >= 2 && inodes <= 5);
459*4882a593Smuzhiyun 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
460*4882a593Smuzhiyun 			    XFS_ILOCK_EXCL));
461*4882a593Smuzhiyun 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
462*4882a593Smuzhiyun 			      XFS_ILOCK_SHARED)));
463*4882a593Smuzhiyun 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464*4882a593Smuzhiyun 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465*4882a593Smuzhiyun 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466*4882a593Smuzhiyun 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (lock_mode & XFS_IOLOCK_EXCL) {
469*4882a593Smuzhiyun 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470*4882a593Smuzhiyun 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
471*4882a593Smuzhiyun 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	try_lock = 0;
474*4882a593Smuzhiyun 	i = 0;
475*4882a593Smuzhiyun again:
476*4882a593Smuzhiyun 	for (; i < inodes; i++) {
477*4882a593Smuzhiyun 		ASSERT(ips[i]);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
480*4882a593Smuzhiyun 			continue;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 		/*
483*4882a593Smuzhiyun 		 * If try_lock is not set yet, make sure all locked inodes are
484*4882a593Smuzhiyun 		 * not in the AIL.  If any are, set try_lock to be used later.
485*4882a593Smuzhiyun 		 */
486*4882a593Smuzhiyun 		if (!try_lock) {
487*4882a593Smuzhiyun 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
488*4882a593Smuzhiyun 				lp = &ips[j]->i_itemp->ili_item;
489*4882a593Smuzhiyun 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
490*4882a593Smuzhiyun 					try_lock++;
491*4882a593Smuzhiyun 			}
492*4882a593Smuzhiyun 		}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		/*
495*4882a593Smuzhiyun 		 * If any of the previous locks we have locked is in the AIL,
496*4882a593Smuzhiyun 		 * we must TRY to get the second and subsequent locks. If
497*4882a593Smuzhiyun 		 * we can't get any, we must release all we have
498*4882a593Smuzhiyun 		 * and try again.
499*4882a593Smuzhiyun 		 */
500*4882a593Smuzhiyun 		if (!try_lock) {
501*4882a593Smuzhiyun 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
502*4882a593Smuzhiyun 			continue;
503*4882a593Smuzhiyun 		}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		/* try_lock means we have an inode locked that is in the AIL. */
506*4882a593Smuzhiyun 		ASSERT(i != 0);
507*4882a593Smuzhiyun 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
508*4882a593Smuzhiyun 			continue;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		/*
511*4882a593Smuzhiyun 		 * Unlock all previous guys and try again.  xfs_iunlock will try
512*4882a593Smuzhiyun 		 * to push the tail if the inode is in the AIL.
513*4882a593Smuzhiyun 		 */
514*4882a593Smuzhiyun 		attempts++;
515*4882a593Smuzhiyun 		for (j = i - 1; j >= 0; j--) {
516*4882a593Smuzhiyun 			/*
517*4882a593Smuzhiyun 			 * Check to see if we've already unlocked this one.  Not
518*4882a593Smuzhiyun 			 * the first one going back, and the inode ptr is the
519*4882a593Smuzhiyun 			 * same.
520*4882a593Smuzhiyun 			 */
521*4882a593Smuzhiyun 			if (j != (i - 1) && ips[j] == ips[j + 1])
522*4882a593Smuzhiyun 				continue;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 			xfs_iunlock(ips[j], lock_mode);
525*4882a593Smuzhiyun 		}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 		if ((attempts % 5) == 0) {
528*4882a593Smuzhiyun 			delay(1); /* Don't just spin the CPU */
529*4882a593Smuzhiyun 		}
530*4882a593Smuzhiyun 		i = 0;
531*4882a593Smuzhiyun 		try_lock = 0;
532*4882a593Smuzhiyun 		goto again;
533*4882a593Smuzhiyun 	}
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun  * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
538*4882a593Smuzhiyun  * the mmaplock or the ilock, but not more than one type at a time. If we lock
539*4882a593Smuzhiyun  * more than one at a time, lockdep will report false positives saying we have
540*4882a593Smuzhiyun  * violated locking orders.  The iolock must be double-locked separately since
541*4882a593Smuzhiyun  * we use i_rwsem for that.  We now support taking one lock EXCL and the other
542*4882a593Smuzhiyun  * SHARED.
543*4882a593Smuzhiyun  */
544*4882a593Smuzhiyun void
xfs_lock_two_inodes(struct xfs_inode * ip0,uint ip0_mode,struct xfs_inode * ip1,uint ip1_mode)545*4882a593Smuzhiyun xfs_lock_two_inodes(
546*4882a593Smuzhiyun 	struct xfs_inode	*ip0,
547*4882a593Smuzhiyun 	uint			ip0_mode,
548*4882a593Smuzhiyun 	struct xfs_inode	*ip1,
549*4882a593Smuzhiyun 	uint			ip1_mode)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	struct xfs_inode	*temp;
552*4882a593Smuzhiyun 	uint			mode_temp;
553*4882a593Smuzhiyun 	int			attempts = 0;
554*4882a593Smuzhiyun 	struct xfs_log_item	*lp;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	ASSERT(hweight32(ip0_mode) == 1);
557*4882a593Smuzhiyun 	ASSERT(hweight32(ip1_mode) == 1);
558*4882a593Smuzhiyun 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559*4882a593Smuzhiyun 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560*4882a593Smuzhiyun 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561*4882a593Smuzhiyun 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562*4882a593Smuzhiyun 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563*4882a593Smuzhiyun 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564*4882a593Smuzhiyun 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565*4882a593Smuzhiyun 	       !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566*4882a593Smuzhiyun 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567*4882a593Smuzhiyun 	       !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	ASSERT(ip0->i_ino != ip1->i_ino);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	if (ip0->i_ino > ip1->i_ino) {
572*4882a593Smuzhiyun 		temp = ip0;
573*4882a593Smuzhiyun 		ip0 = ip1;
574*4882a593Smuzhiyun 		ip1 = temp;
575*4882a593Smuzhiyun 		mode_temp = ip0_mode;
576*4882a593Smuzhiyun 		ip0_mode = ip1_mode;
577*4882a593Smuzhiyun 		ip1_mode = mode_temp;
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun  again:
581*4882a593Smuzhiyun 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/*
584*4882a593Smuzhiyun 	 * If the first lock we have locked is in the AIL, we must TRY to get
585*4882a593Smuzhiyun 	 * the second lock. If we can't get it, we must release the first one
586*4882a593Smuzhiyun 	 * and try again.
587*4882a593Smuzhiyun 	 */
588*4882a593Smuzhiyun 	lp = &ip0->i_itemp->ili_item;
589*4882a593Smuzhiyun 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
590*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591*4882a593Smuzhiyun 			xfs_iunlock(ip0, ip0_mode);
592*4882a593Smuzhiyun 			if ((++attempts % 5) == 0)
593*4882a593Smuzhiyun 				delay(1); /* Don't just spin the CPU */
594*4882a593Smuzhiyun 			goto again;
595*4882a593Smuzhiyun 		}
596*4882a593Smuzhiyun 	} else {
597*4882a593Smuzhiyun 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
598*4882a593Smuzhiyun 	}
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun STATIC uint
_xfs_dic2xflags(uint16_t di_flags,uint64_t di_flags2,bool has_attr)602*4882a593Smuzhiyun _xfs_dic2xflags(
603*4882a593Smuzhiyun 	uint16_t		di_flags,
604*4882a593Smuzhiyun 	uint64_t		di_flags2,
605*4882a593Smuzhiyun 	bool			has_attr)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	uint			flags = 0;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	if (di_flags & XFS_DIFLAG_ANY) {
610*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_REALTIME)
611*4882a593Smuzhiyun 			flags |= FS_XFLAG_REALTIME;
612*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_PREALLOC)
613*4882a593Smuzhiyun 			flags |= FS_XFLAG_PREALLOC;
614*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
615*4882a593Smuzhiyun 			flags |= FS_XFLAG_IMMUTABLE;
616*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_APPEND)
617*4882a593Smuzhiyun 			flags |= FS_XFLAG_APPEND;
618*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_SYNC)
619*4882a593Smuzhiyun 			flags |= FS_XFLAG_SYNC;
620*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_NOATIME)
621*4882a593Smuzhiyun 			flags |= FS_XFLAG_NOATIME;
622*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_NODUMP)
623*4882a593Smuzhiyun 			flags |= FS_XFLAG_NODUMP;
624*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_RTINHERIT)
625*4882a593Smuzhiyun 			flags |= FS_XFLAG_RTINHERIT;
626*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
627*4882a593Smuzhiyun 			flags |= FS_XFLAG_PROJINHERIT;
628*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
629*4882a593Smuzhiyun 			flags |= FS_XFLAG_NOSYMLINKS;
630*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_EXTSIZE)
631*4882a593Smuzhiyun 			flags |= FS_XFLAG_EXTSIZE;
632*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
633*4882a593Smuzhiyun 			flags |= FS_XFLAG_EXTSZINHERIT;
634*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_NODEFRAG)
635*4882a593Smuzhiyun 			flags |= FS_XFLAG_NODEFRAG;
636*4882a593Smuzhiyun 		if (di_flags & XFS_DIFLAG_FILESTREAM)
637*4882a593Smuzhiyun 			flags |= FS_XFLAG_FILESTREAM;
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	if (di_flags2 & XFS_DIFLAG2_ANY) {
641*4882a593Smuzhiyun 		if (di_flags2 & XFS_DIFLAG2_DAX)
642*4882a593Smuzhiyun 			flags |= FS_XFLAG_DAX;
643*4882a593Smuzhiyun 		if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
644*4882a593Smuzhiyun 			flags |= FS_XFLAG_COWEXTSIZE;
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	if (has_attr)
648*4882a593Smuzhiyun 		flags |= FS_XFLAG_HASATTR;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	return flags;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun uint
xfs_ip2xflags(struct xfs_inode * ip)654*4882a593Smuzhiyun xfs_ip2xflags(
655*4882a593Smuzhiyun 	struct xfs_inode	*ip)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct xfs_icdinode	*dic = &ip->i_d;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun /*
663*4882a593Smuzhiyun  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
664*4882a593Smuzhiyun  * is allowed, otherwise it has to be an exact match. If a CI match is found,
665*4882a593Smuzhiyun  * ci_name->name will point to a the actual name (caller must free) or
666*4882a593Smuzhiyun  * will be set to NULL if an exact match is found.
667*4882a593Smuzhiyun  */
668*4882a593Smuzhiyun int
xfs_lookup(xfs_inode_t * dp,struct xfs_name * name,xfs_inode_t ** ipp,struct xfs_name * ci_name)669*4882a593Smuzhiyun xfs_lookup(
670*4882a593Smuzhiyun 	xfs_inode_t		*dp,
671*4882a593Smuzhiyun 	struct xfs_name		*name,
672*4882a593Smuzhiyun 	xfs_inode_t		**ipp,
673*4882a593Smuzhiyun 	struct xfs_name		*ci_name)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	xfs_ino_t		inum;
676*4882a593Smuzhiyun 	int			error;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	trace_xfs_lookup(dp, name);
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(dp->i_mount))
681*4882a593Smuzhiyun 		return -EIO;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
684*4882a593Smuzhiyun 	if (error)
685*4882a593Smuzhiyun 		goto out_unlock;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
688*4882a593Smuzhiyun 	if (error)
689*4882a593Smuzhiyun 		goto out_free_name;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	return 0;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun out_free_name:
694*4882a593Smuzhiyun 	if (ci_name)
695*4882a593Smuzhiyun 		kmem_free(ci_name->name);
696*4882a593Smuzhiyun out_unlock:
697*4882a593Smuzhiyun 	*ipp = NULL;
698*4882a593Smuzhiyun 	return error;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun /* Propagate di_flags from a parent inode to a child inode. */
702*4882a593Smuzhiyun static void
xfs_inode_inherit_flags(struct xfs_inode * ip,const struct xfs_inode * pip)703*4882a593Smuzhiyun xfs_inode_inherit_flags(
704*4882a593Smuzhiyun 	struct xfs_inode	*ip,
705*4882a593Smuzhiyun 	const struct xfs_inode	*pip)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	unsigned int		di_flags = 0;
708*4882a593Smuzhiyun 	umode_t			mode = VFS_I(ip)->i_mode;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (S_ISDIR(mode)) {
711*4882a593Smuzhiyun 		if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
712*4882a593Smuzhiyun 			di_flags |= XFS_DIFLAG_RTINHERIT;
713*4882a593Smuzhiyun 		if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
714*4882a593Smuzhiyun 			di_flags |= XFS_DIFLAG_EXTSZINHERIT;
715*4882a593Smuzhiyun 			ip->i_d.di_extsize = pip->i_d.di_extsize;
716*4882a593Smuzhiyun 		}
717*4882a593Smuzhiyun 		if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
718*4882a593Smuzhiyun 			di_flags |= XFS_DIFLAG_PROJINHERIT;
719*4882a593Smuzhiyun 	} else if (S_ISREG(mode)) {
720*4882a593Smuzhiyun 		if ((pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) &&
721*4882a593Smuzhiyun 		    xfs_sb_version_hasrealtime(&ip->i_mount->m_sb))
722*4882a593Smuzhiyun 			di_flags |= XFS_DIFLAG_REALTIME;
723*4882a593Smuzhiyun 		if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
724*4882a593Smuzhiyun 			di_flags |= XFS_DIFLAG_EXTSIZE;
725*4882a593Smuzhiyun 			ip->i_d.di_extsize = pip->i_d.di_extsize;
726*4882a593Smuzhiyun 		}
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 	if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
729*4882a593Smuzhiyun 	    xfs_inherit_noatime)
730*4882a593Smuzhiyun 		di_flags |= XFS_DIFLAG_NOATIME;
731*4882a593Smuzhiyun 	if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
732*4882a593Smuzhiyun 	    xfs_inherit_nodump)
733*4882a593Smuzhiyun 		di_flags |= XFS_DIFLAG_NODUMP;
734*4882a593Smuzhiyun 	if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
735*4882a593Smuzhiyun 	    xfs_inherit_sync)
736*4882a593Smuzhiyun 		di_flags |= XFS_DIFLAG_SYNC;
737*4882a593Smuzhiyun 	if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
738*4882a593Smuzhiyun 	    xfs_inherit_nosymlinks)
739*4882a593Smuzhiyun 		di_flags |= XFS_DIFLAG_NOSYMLINKS;
740*4882a593Smuzhiyun 	if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
741*4882a593Smuzhiyun 	    xfs_inherit_nodefrag)
742*4882a593Smuzhiyun 		di_flags |= XFS_DIFLAG_NODEFRAG;
743*4882a593Smuzhiyun 	if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
744*4882a593Smuzhiyun 		di_flags |= XFS_DIFLAG_FILESTREAM;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	ip->i_d.di_flags |= di_flags;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun /* Propagate di_flags2 from a parent inode to a child inode. */
750*4882a593Smuzhiyun static void
xfs_inode_inherit_flags2(struct xfs_inode * ip,const struct xfs_inode * pip)751*4882a593Smuzhiyun xfs_inode_inherit_flags2(
752*4882a593Smuzhiyun 	struct xfs_inode	*ip,
753*4882a593Smuzhiyun 	const struct xfs_inode	*pip)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
756*4882a593Smuzhiyun 		ip->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
757*4882a593Smuzhiyun 		ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 	if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
760*4882a593Smuzhiyun 		ip->i_d.di_flags2 |= XFS_DIFLAG2_DAX;
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun /*
764*4882a593Smuzhiyun  * Allocate an inode on disk and return a copy of its in-core version.
765*4882a593Smuzhiyun  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
766*4882a593Smuzhiyun  * appropriately within the inode.  The uid and gid for the inode are
767*4882a593Smuzhiyun  * set according to the contents of the given cred structure.
768*4882a593Smuzhiyun  *
769*4882a593Smuzhiyun  * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
770*4882a593Smuzhiyun  * has a free inode available, call xfs_iget() to obtain the in-core
771*4882a593Smuzhiyun  * version of the allocated inode.  Finally, fill in the inode and
772*4882a593Smuzhiyun  * log its initial contents.  In this case, ialloc_context would be
773*4882a593Smuzhiyun  * set to NULL.
774*4882a593Smuzhiyun  *
775*4882a593Smuzhiyun  * If xfs_dialloc() does not have an available inode, it will replenish
776*4882a593Smuzhiyun  * its supply by doing an allocation. Since we can only do one
777*4882a593Smuzhiyun  * allocation within a transaction without deadlocks, we must commit
778*4882a593Smuzhiyun  * the current transaction before returning the inode itself.
779*4882a593Smuzhiyun  * In this case, therefore, we will set ialloc_context and return.
780*4882a593Smuzhiyun  * The caller should then commit the current transaction, start a new
781*4882a593Smuzhiyun  * transaction, and call xfs_ialloc() again to actually get the inode.
782*4882a593Smuzhiyun  *
783*4882a593Smuzhiyun  * To ensure that some other process does not grab the inode that
784*4882a593Smuzhiyun  * was allocated during the first call to xfs_ialloc(), this routine
785*4882a593Smuzhiyun  * also returns the [locked] bp pointing to the head of the freelist
786*4882a593Smuzhiyun  * as ialloc_context.  The caller should hold this buffer across
787*4882a593Smuzhiyun  * the commit and pass it back into this routine on the second call.
788*4882a593Smuzhiyun  *
789*4882a593Smuzhiyun  * If we are allocating quota inodes, we do not have a parent inode
790*4882a593Smuzhiyun  * to attach to or associate with (i.e. pip == NULL) because they
791*4882a593Smuzhiyun  * are not linked into the directory structure - they are attached
792*4882a593Smuzhiyun  * directly to the superblock - and so have no parent.
793*4882a593Smuzhiyun  */
794*4882a593Smuzhiyun static int
xfs_ialloc(xfs_trans_t * tp,xfs_inode_t * pip,umode_t mode,xfs_nlink_t nlink,dev_t rdev,prid_t prid,xfs_buf_t ** ialloc_context,xfs_inode_t ** ipp)795*4882a593Smuzhiyun xfs_ialloc(
796*4882a593Smuzhiyun 	xfs_trans_t	*tp,
797*4882a593Smuzhiyun 	xfs_inode_t	*pip,
798*4882a593Smuzhiyun 	umode_t		mode,
799*4882a593Smuzhiyun 	xfs_nlink_t	nlink,
800*4882a593Smuzhiyun 	dev_t		rdev,
801*4882a593Smuzhiyun 	prid_t		prid,
802*4882a593Smuzhiyun 	xfs_buf_t	**ialloc_context,
803*4882a593Smuzhiyun 	xfs_inode_t	**ipp)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun 	struct inode	*dir = pip ? VFS_I(pip) : NULL;
806*4882a593Smuzhiyun 	struct xfs_mount *mp = tp->t_mountp;
807*4882a593Smuzhiyun 	xfs_ino_t	ino;
808*4882a593Smuzhiyun 	xfs_inode_t	*ip;
809*4882a593Smuzhiyun 	uint		flags;
810*4882a593Smuzhiyun 	int		error;
811*4882a593Smuzhiyun 	struct timespec64 tv;
812*4882a593Smuzhiyun 	struct inode	*inode;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/*
815*4882a593Smuzhiyun 	 * Call the space management code to pick
816*4882a593Smuzhiyun 	 * the on-disk inode to be allocated.
817*4882a593Smuzhiyun 	 */
818*4882a593Smuzhiyun 	error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
819*4882a593Smuzhiyun 			    ialloc_context, &ino);
820*4882a593Smuzhiyun 	if (error)
821*4882a593Smuzhiyun 		return error;
822*4882a593Smuzhiyun 	if (*ialloc_context || ino == NULLFSINO) {
823*4882a593Smuzhiyun 		*ipp = NULL;
824*4882a593Smuzhiyun 		return 0;
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 	ASSERT(*ialloc_context == NULL);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 	/*
829*4882a593Smuzhiyun 	 * Protect against obviously corrupt allocation btree records. Later
830*4882a593Smuzhiyun 	 * xfs_iget checks will catch re-allocation of other active in-memory
831*4882a593Smuzhiyun 	 * and on-disk inodes. If we don't catch reallocating the parent inode
832*4882a593Smuzhiyun 	 * here we will deadlock in xfs_iget() so we have to do these checks
833*4882a593Smuzhiyun 	 * first.
834*4882a593Smuzhiyun 	 */
835*4882a593Smuzhiyun 	if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
836*4882a593Smuzhiyun 		xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
837*4882a593Smuzhiyun 		return -EFSCORRUPTED;
838*4882a593Smuzhiyun 	}
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	/*
841*4882a593Smuzhiyun 	 * Get the in-core inode with the lock held exclusively.
842*4882a593Smuzhiyun 	 * This is because we're setting fields here we need
843*4882a593Smuzhiyun 	 * to prevent others from looking at until we're done.
844*4882a593Smuzhiyun 	 */
845*4882a593Smuzhiyun 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
846*4882a593Smuzhiyun 			 XFS_ILOCK_EXCL, &ip);
847*4882a593Smuzhiyun 	if (error)
848*4882a593Smuzhiyun 		return error;
849*4882a593Smuzhiyun 	ASSERT(ip != NULL);
850*4882a593Smuzhiyun 	inode = VFS_I(ip);
851*4882a593Smuzhiyun 	set_nlink(inode, nlink);
852*4882a593Smuzhiyun 	inode->i_rdev = rdev;
853*4882a593Smuzhiyun 	ip->i_d.di_projid = prid;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	if (dir && !(dir->i_mode & S_ISGID) &&
856*4882a593Smuzhiyun 	    (mp->m_flags & XFS_MOUNT_GRPID)) {
857*4882a593Smuzhiyun 		inode->i_uid = current_fsuid();
858*4882a593Smuzhiyun 		inode->i_gid = dir->i_gid;
859*4882a593Smuzhiyun 		inode->i_mode = mode;
860*4882a593Smuzhiyun 	} else {
861*4882a593Smuzhiyun 		inode_init_owner(inode, dir, mode);
862*4882a593Smuzhiyun 	}
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	/*
865*4882a593Smuzhiyun 	 * If the group ID of the new file does not match the effective group
866*4882a593Smuzhiyun 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
867*4882a593Smuzhiyun 	 * (and only if the irix_sgid_inherit compatibility variable is set).
868*4882a593Smuzhiyun 	 */
869*4882a593Smuzhiyun 	if (irix_sgid_inherit &&
870*4882a593Smuzhiyun 	    (inode->i_mode & S_ISGID) && !in_group_p(inode->i_gid))
871*4882a593Smuzhiyun 		inode->i_mode &= ~S_ISGID;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	ip->i_d.di_size = 0;
874*4882a593Smuzhiyun 	ip->i_df.if_nextents = 0;
875*4882a593Smuzhiyun 	ASSERT(ip->i_d.di_nblocks == 0);
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	tv = current_time(inode);
878*4882a593Smuzhiyun 	inode->i_mtime = tv;
879*4882a593Smuzhiyun 	inode->i_atime = tv;
880*4882a593Smuzhiyun 	inode->i_ctime = tv;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	ip->i_d.di_extsize = 0;
883*4882a593Smuzhiyun 	ip->i_d.di_dmevmask = 0;
884*4882a593Smuzhiyun 	ip->i_d.di_dmstate = 0;
885*4882a593Smuzhiyun 	ip->i_d.di_flags = 0;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
888*4882a593Smuzhiyun 		inode_set_iversion(inode, 1);
889*4882a593Smuzhiyun 		ip->i_d.di_flags2 = mp->m_ino_geo.new_diflags2;
890*4882a593Smuzhiyun 		ip->i_d.di_cowextsize = 0;
891*4882a593Smuzhiyun 		ip->i_d.di_crtime = tv;
892*4882a593Smuzhiyun 	}
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	flags = XFS_ILOG_CORE;
895*4882a593Smuzhiyun 	switch (mode & S_IFMT) {
896*4882a593Smuzhiyun 	case S_IFIFO:
897*4882a593Smuzhiyun 	case S_IFCHR:
898*4882a593Smuzhiyun 	case S_IFBLK:
899*4882a593Smuzhiyun 	case S_IFSOCK:
900*4882a593Smuzhiyun 		ip->i_df.if_format = XFS_DINODE_FMT_DEV;
901*4882a593Smuzhiyun 		ip->i_df.if_flags = 0;
902*4882a593Smuzhiyun 		flags |= XFS_ILOG_DEV;
903*4882a593Smuzhiyun 		break;
904*4882a593Smuzhiyun 	case S_IFREG:
905*4882a593Smuzhiyun 	case S_IFDIR:
906*4882a593Smuzhiyun 		if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY))
907*4882a593Smuzhiyun 			xfs_inode_inherit_flags(ip, pip);
908*4882a593Smuzhiyun 		if (pip && (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY))
909*4882a593Smuzhiyun 			xfs_inode_inherit_flags2(ip, pip);
910*4882a593Smuzhiyun 		/* FALLTHROUGH */
911*4882a593Smuzhiyun 	case S_IFLNK:
912*4882a593Smuzhiyun 		ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
913*4882a593Smuzhiyun 		ip->i_df.if_flags = XFS_IFEXTENTS;
914*4882a593Smuzhiyun 		ip->i_df.if_bytes = 0;
915*4882a593Smuzhiyun 		ip->i_df.if_u1.if_root = NULL;
916*4882a593Smuzhiyun 		break;
917*4882a593Smuzhiyun 	default:
918*4882a593Smuzhiyun 		ASSERT(0);
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	/*
922*4882a593Smuzhiyun 	 * Log the new values stuffed into the inode.
923*4882a593Smuzhiyun 	 */
924*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
925*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, flags);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	/* now that we have an i_mode we can setup the inode structure */
928*4882a593Smuzhiyun 	xfs_setup_inode(ip);
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	*ipp = ip;
931*4882a593Smuzhiyun 	return 0;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun /*
935*4882a593Smuzhiyun  * Allocates a new inode from disk and return a pointer to the
936*4882a593Smuzhiyun  * incore copy. This routine will internally commit the current
937*4882a593Smuzhiyun  * transaction and allocate a new one if the Space Manager needed
938*4882a593Smuzhiyun  * to do an allocation to replenish the inode free-list.
939*4882a593Smuzhiyun  *
940*4882a593Smuzhiyun  * This routine is designed to be called from xfs_create and
941*4882a593Smuzhiyun  * xfs_create_dir.
942*4882a593Smuzhiyun  *
943*4882a593Smuzhiyun  */
944*4882a593Smuzhiyun int
xfs_dir_ialloc(xfs_trans_t ** tpp,xfs_inode_t * dp,umode_t mode,xfs_nlink_t nlink,dev_t rdev,prid_t prid,xfs_inode_t ** ipp)945*4882a593Smuzhiyun xfs_dir_ialloc(
946*4882a593Smuzhiyun 	xfs_trans_t	**tpp,		/* input: current transaction;
947*4882a593Smuzhiyun 					   output: may be a new transaction. */
948*4882a593Smuzhiyun 	xfs_inode_t	*dp,		/* directory within whose allocate
949*4882a593Smuzhiyun 					   the inode. */
950*4882a593Smuzhiyun 	umode_t		mode,
951*4882a593Smuzhiyun 	xfs_nlink_t	nlink,
952*4882a593Smuzhiyun 	dev_t		rdev,
953*4882a593Smuzhiyun 	prid_t		prid,		/* project id */
954*4882a593Smuzhiyun 	xfs_inode_t	**ipp)		/* pointer to inode; it will be
955*4882a593Smuzhiyun 					   locked. */
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun 	xfs_trans_t	*tp;
958*4882a593Smuzhiyun 	xfs_inode_t	*ip;
959*4882a593Smuzhiyun 	xfs_buf_t	*ialloc_context = NULL;
960*4882a593Smuzhiyun 	int		code;
961*4882a593Smuzhiyun 	void		*dqinfo;
962*4882a593Smuzhiyun 	uint		tflags;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	tp = *tpp;
965*4882a593Smuzhiyun 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	/*
968*4882a593Smuzhiyun 	 * xfs_ialloc will return a pointer to an incore inode if
969*4882a593Smuzhiyun 	 * the Space Manager has an available inode on the free
970*4882a593Smuzhiyun 	 * list. Otherwise, it will do an allocation and replenish
971*4882a593Smuzhiyun 	 * the freelist.  Since we can only do one allocation per
972*4882a593Smuzhiyun 	 * transaction without deadlocks, we will need to commit the
973*4882a593Smuzhiyun 	 * current transaction and start a new one.  We will then
974*4882a593Smuzhiyun 	 * need to call xfs_ialloc again to get the inode.
975*4882a593Smuzhiyun 	 *
976*4882a593Smuzhiyun 	 * If xfs_ialloc did an allocation to replenish the freelist,
977*4882a593Smuzhiyun 	 * it returns the bp containing the head of the freelist as
978*4882a593Smuzhiyun 	 * ialloc_context. We will hold a lock on it across the
979*4882a593Smuzhiyun 	 * transaction commit so that no other process can steal
980*4882a593Smuzhiyun 	 * the inode(s) that we've just allocated.
981*4882a593Smuzhiyun 	 */
982*4882a593Smuzhiyun 	code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
983*4882a593Smuzhiyun 			&ip);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/*
986*4882a593Smuzhiyun 	 * Return an error if we were unable to allocate a new inode.
987*4882a593Smuzhiyun 	 * This should only happen if we run out of space on disk or
988*4882a593Smuzhiyun 	 * encounter a disk error.
989*4882a593Smuzhiyun 	 */
990*4882a593Smuzhiyun 	if (code) {
991*4882a593Smuzhiyun 		*ipp = NULL;
992*4882a593Smuzhiyun 		return code;
993*4882a593Smuzhiyun 	}
994*4882a593Smuzhiyun 	if (!ialloc_context && !ip) {
995*4882a593Smuzhiyun 		*ipp = NULL;
996*4882a593Smuzhiyun 		return -ENOSPC;
997*4882a593Smuzhiyun 	}
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	/*
1000*4882a593Smuzhiyun 	 * If the AGI buffer is non-NULL, then we were unable to get an
1001*4882a593Smuzhiyun 	 * inode in one operation.  We need to commit the current
1002*4882a593Smuzhiyun 	 * transaction and call xfs_ialloc() again.  It is guaranteed
1003*4882a593Smuzhiyun 	 * to succeed the second time.
1004*4882a593Smuzhiyun 	 */
1005*4882a593Smuzhiyun 	if (ialloc_context) {
1006*4882a593Smuzhiyun 		/*
1007*4882a593Smuzhiyun 		 * Normally, xfs_trans_commit releases all the locks.
1008*4882a593Smuzhiyun 		 * We call bhold to hang on to the ialloc_context across
1009*4882a593Smuzhiyun 		 * the commit.  Holding this buffer prevents any other
1010*4882a593Smuzhiyun 		 * processes from doing any allocations in this
1011*4882a593Smuzhiyun 		 * allocation group.
1012*4882a593Smuzhiyun 		 */
1013*4882a593Smuzhiyun 		xfs_trans_bhold(tp, ialloc_context);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 		/*
1016*4882a593Smuzhiyun 		 * We want the quota changes to be associated with the next
1017*4882a593Smuzhiyun 		 * transaction, NOT this one. So, detach the dqinfo from this
1018*4882a593Smuzhiyun 		 * and attach it to the next transaction.
1019*4882a593Smuzhiyun 		 */
1020*4882a593Smuzhiyun 		dqinfo = NULL;
1021*4882a593Smuzhiyun 		tflags = 0;
1022*4882a593Smuzhiyun 		if (tp->t_dqinfo) {
1023*4882a593Smuzhiyun 			dqinfo = (void *)tp->t_dqinfo;
1024*4882a593Smuzhiyun 			tp->t_dqinfo = NULL;
1025*4882a593Smuzhiyun 			tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1026*4882a593Smuzhiyun 			tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1027*4882a593Smuzhiyun 		}
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 		code = xfs_trans_roll(&tp);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		/*
1032*4882a593Smuzhiyun 		 * Re-attach the quota info that we detached from prev trx.
1033*4882a593Smuzhiyun 		 */
1034*4882a593Smuzhiyun 		if (dqinfo) {
1035*4882a593Smuzhiyun 			tp->t_dqinfo = dqinfo;
1036*4882a593Smuzhiyun 			tp->t_flags |= tflags;
1037*4882a593Smuzhiyun 		}
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 		if (code) {
1040*4882a593Smuzhiyun 			xfs_buf_relse(ialloc_context);
1041*4882a593Smuzhiyun 			*tpp = tp;
1042*4882a593Smuzhiyun 			*ipp = NULL;
1043*4882a593Smuzhiyun 			return code;
1044*4882a593Smuzhiyun 		}
1045*4882a593Smuzhiyun 		xfs_trans_bjoin(tp, ialloc_context);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		/*
1048*4882a593Smuzhiyun 		 * Call ialloc again. Since we've locked out all
1049*4882a593Smuzhiyun 		 * other allocations in this allocation group,
1050*4882a593Smuzhiyun 		 * this call should always succeed.
1051*4882a593Smuzhiyun 		 */
1052*4882a593Smuzhiyun 		code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
1053*4882a593Smuzhiyun 				  &ialloc_context, &ip);
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		/*
1056*4882a593Smuzhiyun 		 * If we get an error at this point, return to the caller
1057*4882a593Smuzhiyun 		 * so that the current transaction can be aborted.
1058*4882a593Smuzhiyun 		 */
1059*4882a593Smuzhiyun 		if (code) {
1060*4882a593Smuzhiyun 			*tpp = tp;
1061*4882a593Smuzhiyun 			*ipp = NULL;
1062*4882a593Smuzhiyun 			return code;
1063*4882a593Smuzhiyun 		}
1064*4882a593Smuzhiyun 		ASSERT(!ialloc_context && ip);
1065*4882a593Smuzhiyun 
1066*4882a593Smuzhiyun 	}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	*ipp = ip;
1069*4882a593Smuzhiyun 	*tpp = tp;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	return 0;
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun /*
1075*4882a593Smuzhiyun  * Decrement the link count on an inode & log the change.  If this causes the
1076*4882a593Smuzhiyun  * link count to go to zero, move the inode to AGI unlinked list so that it can
1077*4882a593Smuzhiyun  * be freed when the last active reference goes away via xfs_inactive().
1078*4882a593Smuzhiyun  */
1079*4882a593Smuzhiyun static int			/* error */
xfs_droplink(xfs_trans_t * tp,xfs_inode_t * ip)1080*4882a593Smuzhiyun xfs_droplink(
1081*4882a593Smuzhiyun 	xfs_trans_t *tp,
1082*4882a593Smuzhiyun 	xfs_inode_t *ip)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	drop_nlink(VFS_I(ip));
1087*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	if (VFS_I(ip)->i_nlink)
1090*4882a593Smuzhiyun 		return 0;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	return xfs_iunlink(tp, ip);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun /*
1096*4882a593Smuzhiyun  * Increment the link count on an inode & log the change.
1097*4882a593Smuzhiyun  */
1098*4882a593Smuzhiyun static void
xfs_bumplink(xfs_trans_t * tp,xfs_inode_t * ip)1099*4882a593Smuzhiyun xfs_bumplink(
1100*4882a593Smuzhiyun 	xfs_trans_t *tp,
1101*4882a593Smuzhiyun 	xfs_inode_t *ip)
1102*4882a593Smuzhiyun {
1103*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	inc_nlink(VFS_I(ip));
1106*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun int
xfs_create(xfs_inode_t * dp,struct xfs_name * name,umode_t mode,dev_t rdev,xfs_inode_t ** ipp)1110*4882a593Smuzhiyun xfs_create(
1111*4882a593Smuzhiyun 	xfs_inode_t		*dp,
1112*4882a593Smuzhiyun 	struct xfs_name		*name,
1113*4882a593Smuzhiyun 	umode_t			mode,
1114*4882a593Smuzhiyun 	dev_t			rdev,
1115*4882a593Smuzhiyun 	xfs_inode_t		**ipp)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	int			is_dir = S_ISDIR(mode);
1118*4882a593Smuzhiyun 	struct xfs_mount	*mp = dp->i_mount;
1119*4882a593Smuzhiyun 	struct xfs_inode	*ip = NULL;
1120*4882a593Smuzhiyun 	struct xfs_trans	*tp = NULL;
1121*4882a593Smuzhiyun 	int			error;
1122*4882a593Smuzhiyun 	bool                    unlock_dp_on_error = false;
1123*4882a593Smuzhiyun 	prid_t			prid;
1124*4882a593Smuzhiyun 	struct xfs_dquot	*udqp = NULL;
1125*4882a593Smuzhiyun 	struct xfs_dquot	*gdqp = NULL;
1126*4882a593Smuzhiyun 	struct xfs_dquot	*pdqp = NULL;
1127*4882a593Smuzhiyun 	struct xfs_trans_res	*tres;
1128*4882a593Smuzhiyun 	uint			resblks;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	trace_xfs_create(dp, name);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
1133*4882a593Smuzhiyun 		return -EIO;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	prid = xfs_get_initial_prid(dp);
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	/*
1138*4882a593Smuzhiyun 	 * Make sure that we have allocated dquot(s) on disk.
1139*4882a593Smuzhiyun 	 */
1140*4882a593Smuzhiyun 	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1141*4882a593Smuzhiyun 					XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1142*4882a593Smuzhiyun 					&udqp, &gdqp, &pdqp);
1143*4882a593Smuzhiyun 	if (error)
1144*4882a593Smuzhiyun 		return error;
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	if (is_dir) {
1147*4882a593Smuzhiyun 		resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1148*4882a593Smuzhiyun 		tres = &M_RES(mp)->tr_mkdir;
1149*4882a593Smuzhiyun 	} else {
1150*4882a593Smuzhiyun 		resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1151*4882a593Smuzhiyun 		tres = &M_RES(mp)->tr_create;
1152*4882a593Smuzhiyun 	}
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	/*
1155*4882a593Smuzhiyun 	 * Initially assume that the file does not exist and
1156*4882a593Smuzhiyun 	 * reserve the resources for that case.  If that is not
1157*4882a593Smuzhiyun 	 * the case we'll drop the one we have and get a more
1158*4882a593Smuzhiyun 	 * appropriate transaction later.
1159*4882a593Smuzhiyun 	 */
1160*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1161*4882a593Smuzhiyun 	if (error == -ENOSPC) {
1162*4882a593Smuzhiyun 		/* flush outstanding delalloc blocks and retry */
1163*4882a593Smuzhiyun 		xfs_flush_inodes(mp);
1164*4882a593Smuzhiyun 		error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 	if (error)
1167*4882a593Smuzhiyun 		goto out_release_inode;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1170*4882a593Smuzhiyun 	unlock_dp_on_error = true;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	/*
1173*4882a593Smuzhiyun 	 * Reserve disk quota and the inode.
1174*4882a593Smuzhiyun 	 */
1175*4882a593Smuzhiyun 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1176*4882a593Smuzhiyun 						pdqp, resblks, 1, 0);
1177*4882a593Smuzhiyun 	if (error)
1178*4882a593Smuzhiyun 		goto out_trans_cancel;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	/*
1181*4882a593Smuzhiyun 	 * A newly created regular or special file just has one directory
1182*4882a593Smuzhiyun 	 * entry pointing to them, but a directory also the "." entry
1183*4882a593Smuzhiyun 	 * pointing to itself.
1184*4882a593Smuzhiyun 	 */
1185*4882a593Smuzhiyun 	error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
1186*4882a593Smuzhiyun 	if (error)
1187*4882a593Smuzhiyun 		goto out_trans_cancel;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	/*
1190*4882a593Smuzhiyun 	 * Now we join the directory inode to the transaction.  We do not do it
1191*4882a593Smuzhiyun 	 * earlier because xfs_dir_ialloc might commit the previous transaction
1192*4882a593Smuzhiyun 	 * (and release all the locks).  An error from here on will result in
1193*4882a593Smuzhiyun 	 * the transaction cancel unlocking dp so don't do it explicitly in the
1194*4882a593Smuzhiyun 	 * error path.
1195*4882a593Smuzhiyun 	 */
1196*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1197*4882a593Smuzhiyun 	unlock_dp_on_error = false;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1200*4882a593Smuzhiyun 					resblks - XFS_IALLOC_SPACE_RES(mp));
1201*4882a593Smuzhiyun 	if (error) {
1202*4882a593Smuzhiyun 		ASSERT(error != -ENOSPC);
1203*4882a593Smuzhiyun 		goto out_trans_cancel;
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1206*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	if (is_dir) {
1209*4882a593Smuzhiyun 		error = xfs_dir_init(tp, ip, dp);
1210*4882a593Smuzhiyun 		if (error)
1211*4882a593Smuzhiyun 			goto out_trans_cancel;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 		xfs_bumplink(tp, dp);
1214*4882a593Smuzhiyun 	}
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	/*
1217*4882a593Smuzhiyun 	 * If this is a synchronous mount, make sure that the
1218*4882a593Smuzhiyun 	 * create transaction goes to disk before returning to
1219*4882a593Smuzhiyun 	 * the user.
1220*4882a593Smuzhiyun 	 */
1221*4882a593Smuzhiyun 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1222*4882a593Smuzhiyun 		xfs_trans_set_sync(tp);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	/*
1225*4882a593Smuzhiyun 	 * Attach the dquot(s) to the inodes and modify them incore.
1226*4882a593Smuzhiyun 	 * These ids of the inode couldn't have changed since the new
1227*4882a593Smuzhiyun 	 * inode has been locked ever since it was created.
1228*4882a593Smuzhiyun 	 */
1229*4882a593Smuzhiyun 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	error = xfs_trans_commit(tp);
1232*4882a593Smuzhiyun 	if (error)
1233*4882a593Smuzhiyun 		goto out_release_inode;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	xfs_qm_dqrele(udqp);
1236*4882a593Smuzhiyun 	xfs_qm_dqrele(gdqp);
1237*4882a593Smuzhiyun 	xfs_qm_dqrele(pdqp);
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	*ipp = ip;
1240*4882a593Smuzhiyun 	return 0;
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun  out_trans_cancel:
1243*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
1244*4882a593Smuzhiyun  out_release_inode:
1245*4882a593Smuzhiyun 	/*
1246*4882a593Smuzhiyun 	 * Wait until after the current transaction is aborted to finish the
1247*4882a593Smuzhiyun 	 * setup of the inode and release the inode.  This prevents recursive
1248*4882a593Smuzhiyun 	 * transactions and deadlocks from xfs_inactive.
1249*4882a593Smuzhiyun 	 */
1250*4882a593Smuzhiyun 	if (ip) {
1251*4882a593Smuzhiyun 		xfs_finish_inode_setup(ip);
1252*4882a593Smuzhiyun 		xfs_irele(ip);
1253*4882a593Smuzhiyun 	}
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun 	xfs_qm_dqrele(udqp);
1256*4882a593Smuzhiyun 	xfs_qm_dqrele(gdqp);
1257*4882a593Smuzhiyun 	xfs_qm_dqrele(pdqp);
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	if (unlock_dp_on_error)
1260*4882a593Smuzhiyun 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
1261*4882a593Smuzhiyun 	return error;
1262*4882a593Smuzhiyun }
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun int
xfs_create_tmpfile(struct xfs_inode * dp,umode_t mode,struct xfs_inode ** ipp)1265*4882a593Smuzhiyun xfs_create_tmpfile(
1266*4882a593Smuzhiyun 	struct xfs_inode	*dp,
1267*4882a593Smuzhiyun 	umode_t			mode,
1268*4882a593Smuzhiyun 	struct xfs_inode	**ipp)
1269*4882a593Smuzhiyun {
1270*4882a593Smuzhiyun 	struct xfs_mount	*mp = dp->i_mount;
1271*4882a593Smuzhiyun 	struct xfs_inode	*ip = NULL;
1272*4882a593Smuzhiyun 	struct xfs_trans	*tp = NULL;
1273*4882a593Smuzhiyun 	int			error;
1274*4882a593Smuzhiyun 	prid_t                  prid;
1275*4882a593Smuzhiyun 	struct xfs_dquot	*udqp = NULL;
1276*4882a593Smuzhiyun 	struct xfs_dquot	*gdqp = NULL;
1277*4882a593Smuzhiyun 	struct xfs_dquot	*pdqp = NULL;
1278*4882a593Smuzhiyun 	struct xfs_trans_res	*tres;
1279*4882a593Smuzhiyun 	uint			resblks;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
1282*4882a593Smuzhiyun 		return -EIO;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	prid = xfs_get_initial_prid(dp);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	/*
1287*4882a593Smuzhiyun 	 * Make sure that we have allocated dquot(s) on disk.
1288*4882a593Smuzhiyun 	 */
1289*4882a593Smuzhiyun 	error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1290*4882a593Smuzhiyun 				XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1291*4882a593Smuzhiyun 				&udqp, &gdqp, &pdqp);
1292*4882a593Smuzhiyun 	if (error)
1293*4882a593Smuzhiyun 		return error;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	resblks = XFS_IALLOC_SPACE_RES(mp);
1296*4882a593Smuzhiyun 	tres = &M_RES(mp)->tr_create_tmpfile;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
1299*4882a593Smuzhiyun 	if (error)
1300*4882a593Smuzhiyun 		goto out_release_inode;
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1303*4882a593Smuzhiyun 						pdqp, resblks, 1, 0);
1304*4882a593Smuzhiyun 	if (error)
1305*4882a593Smuzhiyun 		goto out_trans_cancel;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	error = xfs_dir_ialloc(&tp, dp, mode, 0, 0, prid, &ip);
1308*4882a593Smuzhiyun 	if (error)
1309*4882a593Smuzhiyun 		goto out_trans_cancel;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	if (mp->m_flags & XFS_MOUNT_WSYNC)
1312*4882a593Smuzhiyun 		xfs_trans_set_sync(tp);
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	/*
1315*4882a593Smuzhiyun 	 * Attach the dquot(s) to the inodes and modify them incore.
1316*4882a593Smuzhiyun 	 * These ids of the inode couldn't have changed since the new
1317*4882a593Smuzhiyun 	 * inode has been locked ever since it was created.
1318*4882a593Smuzhiyun 	 */
1319*4882a593Smuzhiyun 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	error = xfs_iunlink(tp, ip);
1322*4882a593Smuzhiyun 	if (error)
1323*4882a593Smuzhiyun 		goto out_trans_cancel;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	error = xfs_trans_commit(tp);
1326*4882a593Smuzhiyun 	if (error)
1327*4882a593Smuzhiyun 		goto out_release_inode;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	xfs_qm_dqrele(udqp);
1330*4882a593Smuzhiyun 	xfs_qm_dqrele(gdqp);
1331*4882a593Smuzhiyun 	xfs_qm_dqrele(pdqp);
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 	*ipp = ip;
1334*4882a593Smuzhiyun 	return 0;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun  out_trans_cancel:
1337*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
1338*4882a593Smuzhiyun  out_release_inode:
1339*4882a593Smuzhiyun 	/*
1340*4882a593Smuzhiyun 	 * Wait until after the current transaction is aborted to finish the
1341*4882a593Smuzhiyun 	 * setup of the inode and release the inode.  This prevents recursive
1342*4882a593Smuzhiyun 	 * transactions and deadlocks from xfs_inactive.
1343*4882a593Smuzhiyun 	 */
1344*4882a593Smuzhiyun 	if (ip) {
1345*4882a593Smuzhiyun 		xfs_finish_inode_setup(ip);
1346*4882a593Smuzhiyun 		xfs_irele(ip);
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	xfs_qm_dqrele(udqp);
1350*4882a593Smuzhiyun 	xfs_qm_dqrele(gdqp);
1351*4882a593Smuzhiyun 	xfs_qm_dqrele(pdqp);
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	return error;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun int
xfs_link(xfs_inode_t * tdp,xfs_inode_t * sip,struct xfs_name * target_name)1357*4882a593Smuzhiyun xfs_link(
1358*4882a593Smuzhiyun 	xfs_inode_t		*tdp,
1359*4882a593Smuzhiyun 	xfs_inode_t		*sip,
1360*4882a593Smuzhiyun 	struct xfs_name		*target_name)
1361*4882a593Smuzhiyun {
1362*4882a593Smuzhiyun 	xfs_mount_t		*mp = tdp->i_mount;
1363*4882a593Smuzhiyun 	xfs_trans_t		*tp;
1364*4882a593Smuzhiyun 	int			error;
1365*4882a593Smuzhiyun 	int			resblks;
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 	trace_xfs_link(tdp, target_name);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
1372*4882a593Smuzhiyun 		return -EIO;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	error = xfs_qm_dqattach(sip);
1375*4882a593Smuzhiyun 	if (error)
1376*4882a593Smuzhiyun 		goto std_return;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	error = xfs_qm_dqattach(tdp);
1379*4882a593Smuzhiyun 	if (error)
1380*4882a593Smuzhiyun 		goto std_return;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1383*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
1384*4882a593Smuzhiyun 	if (error == -ENOSPC) {
1385*4882a593Smuzhiyun 		resblks = 0;
1386*4882a593Smuzhiyun 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
1387*4882a593Smuzhiyun 	}
1388*4882a593Smuzhiyun 	if (error)
1389*4882a593Smuzhiyun 		goto std_return;
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 	xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1394*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	/*
1397*4882a593Smuzhiyun 	 * If we are using project inheritance, we only allow hard link
1398*4882a593Smuzhiyun 	 * creation in our tree when the project IDs are the same; else
1399*4882a593Smuzhiyun 	 * the tree quota mechanism could be circumvented.
1400*4882a593Smuzhiyun 	 */
1401*4882a593Smuzhiyun 	if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1402*4882a593Smuzhiyun 		     tdp->i_d.di_projid != sip->i_d.di_projid)) {
1403*4882a593Smuzhiyun 		error = -EXDEV;
1404*4882a593Smuzhiyun 		goto error_return;
1405*4882a593Smuzhiyun 	}
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if (!resblks) {
1408*4882a593Smuzhiyun 		error = xfs_dir_canenter(tp, tdp, target_name);
1409*4882a593Smuzhiyun 		if (error)
1410*4882a593Smuzhiyun 			goto error_return;
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	/*
1414*4882a593Smuzhiyun 	 * Handle initial link state of O_TMPFILE inode
1415*4882a593Smuzhiyun 	 */
1416*4882a593Smuzhiyun 	if (VFS_I(sip)->i_nlink == 0) {
1417*4882a593Smuzhiyun 		error = xfs_iunlink_remove(tp, sip);
1418*4882a593Smuzhiyun 		if (error)
1419*4882a593Smuzhiyun 			goto error_return;
1420*4882a593Smuzhiyun 	}
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1423*4882a593Smuzhiyun 				   resblks);
1424*4882a593Smuzhiyun 	if (error)
1425*4882a593Smuzhiyun 		goto error_return;
1426*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1427*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	xfs_bumplink(tp, sip);
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	/*
1432*4882a593Smuzhiyun 	 * If this is a synchronous mount, make sure that the
1433*4882a593Smuzhiyun 	 * link transaction goes to disk before returning to
1434*4882a593Smuzhiyun 	 * the user.
1435*4882a593Smuzhiyun 	 */
1436*4882a593Smuzhiyun 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1437*4882a593Smuzhiyun 		xfs_trans_set_sync(tp);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	return xfs_trans_commit(tp);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun  error_return:
1442*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
1443*4882a593Smuzhiyun  std_return:
1444*4882a593Smuzhiyun 	return error;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun /* Clear the reflink flag and the cowblocks tag if possible. */
1448*4882a593Smuzhiyun static void
xfs_itruncate_clear_reflink_flags(struct xfs_inode * ip)1449*4882a593Smuzhiyun xfs_itruncate_clear_reflink_flags(
1450*4882a593Smuzhiyun 	struct xfs_inode	*ip)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun 	struct xfs_ifork	*dfork;
1453*4882a593Smuzhiyun 	struct xfs_ifork	*cfork;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	if (!xfs_is_reflink_inode(ip))
1456*4882a593Smuzhiyun 		return;
1457*4882a593Smuzhiyun 	dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1458*4882a593Smuzhiyun 	cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1459*4882a593Smuzhiyun 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1460*4882a593Smuzhiyun 		ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1461*4882a593Smuzhiyun 	if (cfork->if_bytes == 0)
1462*4882a593Smuzhiyun 		xfs_inode_clear_cowblocks_tag(ip);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun /*
1466*4882a593Smuzhiyun  * Free up the underlying blocks past new_size.  The new size must be smaller
1467*4882a593Smuzhiyun  * than the current size.  This routine can be used both for the attribute and
1468*4882a593Smuzhiyun  * data fork, and does not modify the inode size, which is left to the caller.
1469*4882a593Smuzhiyun  *
1470*4882a593Smuzhiyun  * The transaction passed to this routine must have made a permanent log
1471*4882a593Smuzhiyun  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1472*4882a593Smuzhiyun  * given transaction and start new ones, so make sure everything involved in
1473*4882a593Smuzhiyun  * the transaction is tidy before calling here.  Some transaction will be
1474*4882a593Smuzhiyun  * returned to the caller to be committed.  The incoming transaction must
1475*4882a593Smuzhiyun  * already include the inode, and both inode locks must be held exclusively.
1476*4882a593Smuzhiyun  * The inode must also be "held" within the transaction.  On return the inode
1477*4882a593Smuzhiyun  * will be "held" within the returned transaction.  This routine does NOT
1478*4882a593Smuzhiyun  * require any disk space to be reserved for it within the transaction.
1479*4882a593Smuzhiyun  *
1480*4882a593Smuzhiyun  * If we get an error, we must return with the inode locked and linked into the
1481*4882a593Smuzhiyun  * current transaction. This keeps things simple for the higher level code,
1482*4882a593Smuzhiyun  * because it always knows that the inode is locked and held in the transaction
1483*4882a593Smuzhiyun  * that returns to it whether errors occur or not.  We don't mark the inode
1484*4882a593Smuzhiyun  * dirty on error so that transactions can be easily aborted if possible.
1485*4882a593Smuzhiyun  */
1486*4882a593Smuzhiyun int
xfs_itruncate_extents_flags(struct xfs_trans ** tpp,struct xfs_inode * ip,int whichfork,xfs_fsize_t new_size,int flags)1487*4882a593Smuzhiyun xfs_itruncate_extents_flags(
1488*4882a593Smuzhiyun 	struct xfs_trans	**tpp,
1489*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1490*4882a593Smuzhiyun 	int			whichfork,
1491*4882a593Smuzhiyun 	xfs_fsize_t		new_size,
1492*4882a593Smuzhiyun 	int			flags)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
1495*4882a593Smuzhiyun 	struct xfs_trans	*tp = *tpp;
1496*4882a593Smuzhiyun 	xfs_fileoff_t		first_unmap_block;
1497*4882a593Smuzhiyun 	xfs_filblks_t		unmap_len;
1498*4882a593Smuzhiyun 	int			error = 0;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1501*4882a593Smuzhiyun 	ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1502*4882a593Smuzhiyun 	       xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1503*4882a593Smuzhiyun 	ASSERT(new_size <= XFS_ISIZE(ip));
1504*4882a593Smuzhiyun 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1505*4882a593Smuzhiyun 	ASSERT(ip->i_itemp != NULL);
1506*4882a593Smuzhiyun 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1507*4882a593Smuzhiyun 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	trace_xfs_itruncate_extents_start(ip, new_size);
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 	flags |= xfs_bmapi_aflag(whichfork);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	/*
1514*4882a593Smuzhiyun 	 * Since it is possible for space to become allocated beyond
1515*4882a593Smuzhiyun 	 * the end of the file (in a crash where the space is allocated
1516*4882a593Smuzhiyun 	 * but the inode size is not yet updated), simply remove any
1517*4882a593Smuzhiyun 	 * blocks which show up between the new EOF and the maximum
1518*4882a593Smuzhiyun 	 * possible file size.
1519*4882a593Smuzhiyun 	 *
1520*4882a593Smuzhiyun 	 * We have to free all the blocks to the bmbt maximum offset, even if
1521*4882a593Smuzhiyun 	 * the page cache can't scale that far.
1522*4882a593Smuzhiyun 	 */
1523*4882a593Smuzhiyun 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1524*4882a593Smuzhiyun 	if (first_unmap_block >= XFS_MAX_FILEOFF) {
1525*4882a593Smuzhiyun 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1526*4882a593Smuzhiyun 		return 0;
1527*4882a593Smuzhiyun 	}
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1530*4882a593Smuzhiyun 	while (unmap_len > 0) {
1531*4882a593Smuzhiyun 		ASSERT(tp->t_firstblock == NULLFSBLOCK);
1532*4882a593Smuzhiyun 		error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1533*4882a593Smuzhiyun 				flags, XFS_ITRUNC_MAX_EXTENTS);
1534*4882a593Smuzhiyun 		if (error)
1535*4882a593Smuzhiyun 			goto out;
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 		/* free the just unmapped extents */
1538*4882a593Smuzhiyun 		error = xfs_defer_finish(&tp);
1539*4882a593Smuzhiyun 		if (error)
1540*4882a593Smuzhiyun 			goto out;
1541*4882a593Smuzhiyun 	}
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	if (whichfork == XFS_DATA_FORK) {
1544*4882a593Smuzhiyun 		/* Remove all pending CoW reservations. */
1545*4882a593Smuzhiyun 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1546*4882a593Smuzhiyun 				first_unmap_block, XFS_MAX_FILEOFF, true);
1547*4882a593Smuzhiyun 		if (error)
1548*4882a593Smuzhiyun 			goto out;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 		xfs_itruncate_clear_reflink_flags(ip);
1551*4882a593Smuzhiyun 	}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/*
1554*4882a593Smuzhiyun 	 * Always re-log the inode so that our permanent transaction can keep
1555*4882a593Smuzhiyun 	 * on rolling it forward in the log.
1556*4882a593Smuzhiyun 	 */
1557*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	trace_xfs_itruncate_extents_end(ip, new_size);
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun out:
1562*4882a593Smuzhiyun 	*tpp = tp;
1563*4882a593Smuzhiyun 	return error;
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun int
xfs_release(xfs_inode_t * ip)1567*4882a593Smuzhiyun xfs_release(
1568*4882a593Smuzhiyun 	xfs_inode_t	*ip)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun 	xfs_mount_t	*mp = ip->i_mount;
1571*4882a593Smuzhiyun 	int		error;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1574*4882a593Smuzhiyun 		return 0;
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	/* If this is a read-only mount, don't do this (would generate I/O) */
1577*4882a593Smuzhiyun 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1578*4882a593Smuzhiyun 		return 0;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	if (!XFS_FORCED_SHUTDOWN(mp)) {
1581*4882a593Smuzhiyun 		int truncated;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 		/*
1584*4882a593Smuzhiyun 		 * If we previously truncated this file and removed old data
1585*4882a593Smuzhiyun 		 * in the process, we want to initiate "early" writeout on
1586*4882a593Smuzhiyun 		 * the last close.  This is an attempt to combat the notorious
1587*4882a593Smuzhiyun 		 * NULL files problem which is particularly noticeable from a
1588*4882a593Smuzhiyun 		 * truncate down, buffered (re-)write (delalloc), followed by
1589*4882a593Smuzhiyun 		 * a crash.  What we are effectively doing here is
1590*4882a593Smuzhiyun 		 * significantly reducing the time window where we'd otherwise
1591*4882a593Smuzhiyun 		 * be exposed to that problem.
1592*4882a593Smuzhiyun 		 */
1593*4882a593Smuzhiyun 		truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1594*4882a593Smuzhiyun 		if (truncated) {
1595*4882a593Smuzhiyun 			xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1596*4882a593Smuzhiyun 			if (ip->i_delayed_blks > 0) {
1597*4882a593Smuzhiyun 				error = filemap_flush(VFS_I(ip)->i_mapping);
1598*4882a593Smuzhiyun 				if (error)
1599*4882a593Smuzhiyun 					return error;
1600*4882a593Smuzhiyun 			}
1601*4882a593Smuzhiyun 		}
1602*4882a593Smuzhiyun 	}
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (VFS_I(ip)->i_nlink == 0)
1605*4882a593Smuzhiyun 		return 0;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (xfs_can_free_eofblocks(ip, false)) {
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 		/*
1610*4882a593Smuzhiyun 		 * Check if the inode is being opened, written and closed
1611*4882a593Smuzhiyun 		 * frequently and we have delayed allocation blocks outstanding
1612*4882a593Smuzhiyun 		 * (e.g. streaming writes from the NFS server), truncating the
1613*4882a593Smuzhiyun 		 * blocks past EOF will cause fragmentation to occur.
1614*4882a593Smuzhiyun 		 *
1615*4882a593Smuzhiyun 		 * In this case don't do the truncation, but we have to be
1616*4882a593Smuzhiyun 		 * careful how we detect this case. Blocks beyond EOF show up as
1617*4882a593Smuzhiyun 		 * i_delayed_blks even when the inode is clean, so we need to
1618*4882a593Smuzhiyun 		 * truncate them away first before checking for a dirty release.
1619*4882a593Smuzhiyun 		 * Hence on the first dirty close we will still remove the
1620*4882a593Smuzhiyun 		 * speculative allocation, but after that we will leave it in
1621*4882a593Smuzhiyun 		 * place.
1622*4882a593Smuzhiyun 		 */
1623*4882a593Smuzhiyun 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1624*4882a593Smuzhiyun 			return 0;
1625*4882a593Smuzhiyun 		/*
1626*4882a593Smuzhiyun 		 * If we can't get the iolock just skip truncating the blocks
1627*4882a593Smuzhiyun 		 * past EOF because we could deadlock with the mmap_lock
1628*4882a593Smuzhiyun 		 * otherwise. We'll get another chance to drop them once the
1629*4882a593Smuzhiyun 		 * last reference to the inode is dropped, so we'll never leak
1630*4882a593Smuzhiyun 		 * blocks permanently.
1631*4882a593Smuzhiyun 		 */
1632*4882a593Smuzhiyun 		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1633*4882a593Smuzhiyun 			error = xfs_free_eofblocks(ip);
1634*4882a593Smuzhiyun 			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1635*4882a593Smuzhiyun 			if (error)
1636*4882a593Smuzhiyun 				return error;
1637*4882a593Smuzhiyun 		}
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 		/* delalloc blocks after truncation means it really is dirty */
1640*4882a593Smuzhiyun 		if (ip->i_delayed_blks)
1641*4882a593Smuzhiyun 			xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1642*4882a593Smuzhiyun 	}
1643*4882a593Smuzhiyun 	return 0;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun /*
1647*4882a593Smuzhiyun  * xfs_inactive_truncate
1648*4882a593Smuzhiyun  *
1649*4882a593Smuzhiyun  * Called to perform a truncate when an inode becomes unlinked.
1650*4882a593Smuzhiyun  */
1651*4882a593Smuzhiyun STATIC int
xfs_inactive_truncate(struct xfs_inode * ip)1652*4882a593Smuzhiyun xfs_inactive_truncate(
1653*4882a593Smuzhiyun 	struct xfs_inode *ip)
1654*4882a593Smuzhiyun {
1655*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
1656*4882a593Smuzhiyun 	struct xfs_trans	*tp;
1657*4882a593Smuzhiyun 	int			error;
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1660*4882a593Smuzhiyun 	if (error) {
1661*4882a593Smuzhiyun 		ASSERT(XFS_FORCED_SHUTDOWN(mp));
1662*4882a593Smuzhiyun 		return error;
1663*4882a593Smuzhiyun 	}
1664*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1665*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, ip, 0);
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	/*
1668*4882a593Smuzhiyun 	 * Log the inode size first to prevent stale data exposure in the event
1669*4882a593Smuzhiyun 	 * of a system crash before the truncate completes. See the related
1670*4882a593Smuzhiyun 	 * comment in xfs_vn_setattr_size() for details.
1671*4882a593Smuzhiyun 	 */
1672*4882a593Smuzhiyun 	ip->i_d.di_size = 0;
1673*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1676*4882a593Smuzhiyun 	if (error)
1677*4882a593Smuzhiyun 		goto error_trans_cancel;
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	ASSERT(ip->i_df.if_nextents == 0);
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	error = xfs_trans_commit(tp);
1682*4882a593Smuzhiyun 	if (error)
1683*4882a593Smuzhiyun 		goto error_unlock;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1686*4882a593Smuzhiyun 	return 0;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun error_trans_cancel:
1689*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
1690*4882a593Smuzhiyun error_unlock:
1691*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1692*4882a593Smuzhiyun 	return error;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun /*
1696*4882a593Smuzhiyun  * xfs_inactive_ifree()
1697*4882a593Smuzhiyun  *
1698*4882a593Smuzhiyun  * Perform the inode free when an inode is unlinked.
1699*4882a593Smuzhiyun  */
1700*4882a593Smuzhiyun STATIC int
xfs_inactive_ifree(struct xfs_inode * ip)1701*4882a593Smuzhiyun xfs_inactive_ifree(
1702*4882a593Smuzhiyun 	struct xfs_inode *ip)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
1705*4882a593Smuzhiyun 	struct xfs_trans	*tp;
1706*4882a593Smuzhiyun 	int			error;
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	/*
1709*4882a593Smuzhiyun 	 * We try to use a per-AG reservation for any block needed by the finobt
1710*4882a593Smuzhiyun 	 * tree, but as the finobt feature predates the per-AG reservation
1711*4882a593Smuzhiyun 	 * support a degraded file system might not have enough space for the
1712*4882a593Smuzhiyun 	 * reservation at mount time.  In that case try to dip into the reserved
1713*4882a593Smuzhiyun 	 * pool and pray.
1714*4882a593Smuzhiyun 	 *
1715*4882a593Smuzhiyun 	 * Send a warning if the reservation does happen to fail, as the inode
1716*4882a593Smuzhiyun 	 * now remains allocated and sits on the unlinked list until the fs is
1717*4882a593Smuzhiyun 	 * repaired.
1718*4882a593Smuzhiyun 	 */
1719*4882a593Smuzhiyun 	if (unlikely(mp->m_finobt_nores)) {
1720*4882a593Smuzhiyun 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1721*4882a593Smuzhiyun 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1722*4882a593Smuzhiyun 				&tp);
1723*4882a593Smuzhiyun 	} else {
1724*4882a593Smuzhiyun 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1725*4882a593Smuzhiyun 	}
1726*4882a593Smuzhiyun 	if (error) {
1727*4882a593Smuzhiyun 		if (error == -ENOSPC) {
1728*4882a593Smuzhiyun 			xfs_warn_ratelimited(mp,
1729*4882a593Smuzhiyun 			"Failed to remove inode(s) from unlinked list. "
1730*4882a593Smuzhiyun 			"Please free space, unmount and run xfs_repair.");
1731*4882a593Smuzhiyun 		} else {
1732*4882a593Smuzhiyun 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
1733*4882a593Smuzhiyun 		}
1734*4882a593Smuzhiyun 		return error;
1735*4882a593Smuzhiyun 	}
1736*4882a593Smuzhiyun 
1737*4882a593Smuzhiyun 	/*
1738*4882a593Smuzhiyun 	 * We do not hold the inode locked across the entire rolling transaction
1739*4882a593Smuzhiyun 	 * here. We only need to hold it for the first transaction that
1740*4882a593Smuzhiyun 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1741*4882a593Smuzhiyun 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1742*4882a593Smuzhiyun 	 * here breaks the relationship between cluster buffer invalidation and
1743*4882a593Smuzhiyun 	 * stale inode invalidation on cluster buffer item journal commit
1744*4882a593Smuzhiyun 	 * completion, and can result in leaving dirty stale inodes hanging
1745*4882a593Smuzhiyun 	 * around in memory.
1746*4882a593Smuzhiyun 	 *
1747*4882a593Smuzhiyun 	 * We have no need for serialising this inode operation against other
1748*4882a593Smuzhiyun 	 * operations - we freed the inode and hence reallocation is required
1749*4882a593Smuzhiyun 	 * and that will serialise on reallocating the space the deferops need
1750*4882a593Smuzhiyun 	 * to free. Hence we can unlock the inode on the first commit of
1751*4882a593Smuzhiyun 	 * the transaction rather than roll it right through the deferops. This
1752*4882a593Smuzhiyun 	 * avoids relogging the XFS_ISTALE inode.
1753*4882a593Smuzhiyun 	 *
1754*4882a593Smuzhiyun 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1755*4882a593Smuzhiyun 	 * by asserting that the inode is still locked when it returns.
1756*4882a593Smuzhiyun 	 */
1757*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1758*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	error = xfs_ifree(tp, ip);
1761*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1762*4882a593Smuzhiyun 	if (error) {
1763*4882a593Smuzhiyun 		/*
1764*4882a593Smuzhiyun 		 * If we fail to free the inode, shut down.  The cancel
1765*4882a593Smuzhiyun 		 * might do that, we need to make sure.  Otherwise the
1766*4882a593Smuzhiyun 		 * inode might be lost for a long time or forever.
1767*4882a593Smuzhiyun 		 */
1768*4882a593Smuzhiyun 		if (!XFS_FORCED_SHUTDOWN(mp)) {
1769*4882a593Smuzhiyun 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1770*4882a593Smuzhiyun 				__func__, error);
1771*4882a593Smuzhiyun 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1772*4882a593Smuzhiyun 		}
1773*4882a593Smuzhiyun 		xfs_trans_cancel(tp);
1774*4882a593Smuzhiyun 		return error;
1775*4882a593Smuzhiyun 	}
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	/*
1778*4882a593Smuzhiyun 	 * Credit the quota account(s). The inode is gone.
1779*4882a593Smuzhiyun 	 */
1780*4882a593Smuzhiyun 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1781*4882a593Smuzhiyun 
1782*4882a593Smuzhiyun 	/*
1783*4882a593Smuzhiyun 	 * Just ignore errors at this point.  There is nothing we can do except
1784*4882a593Smuzhiyun 	 * to try to keep going. Make sure it's not a silent error.
1785*4882a593Smuzhiyun 	 */
1786*4882a593Smuzhiyun 	error = xfs_trans_commit(tp);
1787*4882a593Smuzhiyun 	if (error)
1788*4882a593Smuzhiyun 		xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1789*4882a593Smuzhiyun 			__func__, error);
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	return 0;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun /*
1795*4882a593Smuzhiyun  * xfs_inactive
1796*4882a593Smuzhiyun  *
1797*4882a593Smuzhiyun  * This is called when the vnode reference count for the vnode
1798*4882a593Smuzhiyun  * goes to zero.  If the file has been unlinked, then it must
1799*4882a593Smuzhiyun  * now be truncated.  Also, we clear all of the read-ahead state
1800*4882a593Smuzhiyun  * kept for the inode here since the file is now closed.
1801*4882a593Smuzhiyun  */
1802*4882a593Smuzhiyun void
xfs_inactive(xfs_inode_t * ip)1803*4882a593Smuzhiyun xfs_inactive(
1804*4882a593Smuzhiyun 	xfs_inode_t	*ip)
1805*4882a593Smuzhiyun {
1806*4882a593Smuzhiyun 	struct xfs_mount	*mp;
1807*4882a593Smuzhiyun 	int			error;
1808*4882a593Smuzhiyun 	int			truncate = 0;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	/*
1811*4882a593Smuzhiyun 	 * If the inode is already free, then there can be nothing
1812*4882a593Smuzhiyun 	 * to clean up here.
1813*4882a593Smuzhiyun 	 */
1814*4882a593Smuzhiyun 	if (VFS_I(ip)->i_mode == 0) {
1815*4882a593Smuzhiyun 		ASSERT(ip->i_df.if_broot_bytes == 0);
1816*4882a593Smuzhiyun 		return;
1817*4882a593Smuzhiyun 	}
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	mp = ip->i_mount;
1820*4882a593Smuzhiyun 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	/* If this is a read-only mount, don't do this (would generate I/O) */
1823*4882a593Smuzhiyun 	if (mp->m_flags & XFS_MOUNT_RDONLY)
1824*4882a593Smuzhiyun 		return;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	/* Try to clean out the cow blocks if there are any. */
1827*4882a593Smuzhiyun 	if (xfs_inode_has_cow_data(ip))
1828*4882a593Smuzhiyun 		xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	if (VFS_I(ip)->i_nlink != 0) {
1831*4882a593Smuzhiyun 		/*
1832*4882a593Smuzhiyun 		 * force is true because we are evicting an inode from the
1833*4882a593Smuzhiyun 		 * cache. Post-eof blocks must be freed, lest we end up with
1834*4882a593Smuzhiyun 		 * broken free space accounting.
1835*4882a593Smuzhiyun 		 *
1836*4882a593Smuzhiyun 		 * Note: don't bother with iolock here since lockdep complains
1837*4882a593Smuzhiyun 		 * about acquiring it in reclaim context. We have the only
1838*4882a593Smuzhiyun 		 * reference to the inode at this point anyways.
1839*4882a593Smuzhiyun 		 */
1840*4882a593Smuzhiyun 		if (xfs_can_free_eofblocks(ip, true))
1841*4882a593Smuzhiyun 			xfs_free_eofblocks(ip);
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 		return;
1844*4882a593Smuzhiyun 	}
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	if (S_ISREG(VFS_I(ip)->i_mode) &&
1847*4882a593Smuzhiyun 	    (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1848*4882a593Smuzhiyun 	     ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1849*4882a593Smuzhiyun 		truncate = 1;
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	error = xfs_qm_dqattach(ip);
1852*4882a593Smuzhiyun 	if (error)
1853*4882a593Smuzhiyun 		return;
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 	if (S_ISLNK(VFS_I(ip)->i_mode))
1856*4882a593Smuzhiyun 		error = xfs_inactive_symlink(ip);
1857*4882a593Smuzhiyun 	else if (truncate)
1858*4882a593Smuzhiyun 		error = xfs_inactive_truncate(ip);
1859*4882a593Smuzhiyun 	if (error)
1860*4882a593Smuzhiyun 		return;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	/*
1863*4882a593Smuzhiyun 	 * If there are attributes associated with the file then blow them away
1864*4882a593Smuzhiyun 	 * now.  The code calls a routine that recursively deconstructs the
1865*4882a593Smuzhiyun 	 * attribute fork. If also blows away the in-core attribute fork.
1866*4882a593Smuzhiyun 	 */
1867*4882a593Smuzhiyun 	if (XFS_IFORK_Q(ip)) {
1868*4882a593Smuzhiyun 		error = xfs_attr_inactive(ip);
1869*4882a593Smuzhiyun 		if (error)
1870*4882a593Smuzhiyun 			return;
1871*4882a593Smuzhiyun 	}
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	ASSERT(!ip->i_afp);
1874*4882a593Smuzhiyun 	ASSERT(ip->i_d.di_forkoff == 0);
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	/*
1877*4882a593Smuzhiyun 	 * Free the inode.
1878*4882a593Smuzhiyun 	 */
1879*4882a593Smuzhiyun 	error = xfs_inactive_ifree(ip);
1880*4882a593Smuzhiyun 	if (error)
1881*4882a593Smuzhiyun 		return;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	/*
1884*4882a593Smuzhiyun 	 * Release the dquots held by inode, if any.
1885*4882a593Smuzhiyun 	 */
1886*4882a593Smuzhiyun 	xfs_qm_dqdetach(ip);
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun /*
1890*4882a593Smuzhiyun  * In-Core Unlinked List Lookups
1891*4882a593Smuzhiyun  * =============================
1892*4882a593Smuzhiyun  *
1893*4882a593Smuzhiyun  * Every inode is supposed to be reachable from some other piece of metadata
1894*4882a593Smuzhiyun  * with the exception of the root directory.  Inodes with a connection to a
1895*4882a593Smuzhiyun  * file descriptor but not linked from anywhere in the on-disk directory tree
1896*4882a593Smuzhiyun  * are collectively known as unlinked inodes, though the filesystem itself
1897*4882a593Smuzhiyun  * maintains links to these inodes so that on-disk metadata are consistent.
1898*4882a593Smuzhiyun  *
1899*4882a593Smuzhiyun  * XFS implements a per-AG on-disk hash table of unlinked inodes.  The AGI
1900*4882a593Smuzhiyun  * header contains a number of buckets that point to an inode, and each inode
1901*4882a593Smuzhiyun  * record has a pointer to the next inode in the hash chain.  This
1902*4882a593Smuzhiyun  * singly-linked list causes scaling problems in the iunlink remove function
1903*4882a593Smuzhiyun  * because we must walk that list to find the inode that points to the inode
1904*4882a593Smuzhiyun  * being removed from the unlinked hash bucket list.
1905*4882a593Smuzhiyun  *
1906*4882a593Smuzhiyun  * What if we modelled the unlinked list as a collection of records capturing
1907*4882a593Smuzhiyun  * "X.next_unlinked = Y" relations?  If we indexed those records on Y, we'd
1908*4882a593Smuzhiyun  * have a fast way to look up unlinked list predecessors, which avoids the
1909*4882a593Smuzhiyun  * slow list walk.  That's exactly what we do here (in-core) with a per-AG
1910*4882a593Smuzhiyun  * rhashtable.
1911*4882a593Smuzhiyun  *
1912*4882a593Smuzhiyun  * Because this is a backref cache, we ignore operational failures since the
1913*4882a593Smuzhiyun  * iunlink code can fall back to the slow bucket walk.  The only errors that
1914*4882a593Smuzhiyun  * should bubble out are for obviously incorrect situations.
1915*4882a593Smuzhiyun  *
1916*4882a593Smuzhiyun  * All users of the backref cache MUST hold the AGI buffer lock to serialize
1917*4882a593Smuzhiyun  * access or have otherwise provided for concurrency control.
1918*4882a593Smuzhiyun  */
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun /* Capture a "X.next_unlinked = Y" relationship. */
1921*4882a593Smuzhiyun struct xfs_iunlink {
1922*4882a593Smuzhiyun 	struct rhash_head	iu_rhash_head;
1923*4882a593Smuzhiyun 	xfs_agino_t		iu_agino;		/* X */
1924*4882a593Smuzhiyun 	xfs_agino_t		iu_next_unlinked;	/* Y */
1925*4882a593Smuzhiyun };
1926*4882a593Smuzhiyun 
1927*4882a593Smuzhiyun /* Unlinked list predecessor lookup hashtable construction */
1928*4882a593Smuzhiyun static int
xfs_iunlink_obj_cmpfn(struct rhashtable_compare_arg * arg,const void * obj)1929*4882a593Smuzhiyun xfs_iunlink_obj_cmpfn(
1930*4882a593Smuzhiyun 	struct rhashtable_compare_arg	*arg,
1931*4882a593Smuzhiyun 	const void			*obj)
1932*4882a593Smuzhiyun {
1933*4882a593Smuzhiyun 	const xfs_agino_t		*key = arg->key;
1934*4882a593Smuzhiyun 	const struct xfs_iunlink	*iu = obj;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	if (iu->iu_next_unlinked != *key)
1937*4882a593Smuzhiyun 		return 1;
1938*4882a593Smuzhiyun 	return 0;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun 
1941*4882a593Smuzhiyun static const struct rhashtable_params xfs_iunlink_hash_params = {
1942*4882a593Smuzhiyun 	.min_size		= XFS_AGI_UNLINKED_BUCKETS,
1943*4882a593Smuzhiyun 	.key_len		= sizeof(xfs_agino_t),
1944*4882a593Smuzhiyun 	.key_offset		= offsetof(struct xfs_iunlink,
1945*4882a593Smuzhiyun 					   iu_next_unlinked),
1946*4882a593Smuzhiyun 	.head_offset		= offsetof(struct xfs_iunlink, iu_rhash_head),
1947*4882a593Smuzhiyun 	.automatic_shrinking	= true,
1948*4882a593Smuzhiyun 	.obj_cmpfn		= xfs_iunlink_obj_cmpfn,
1949*4882a593Smuzhiyun };
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun /*
1952*4882a593Smuzhiyun  * Return X, where X.next_unlinked == @agino.  Returns NULLAGINO if no such
1953*4882a593Smuzhiyun  * relation is found.
1954*4882a593Smuzhiyun  */
1955*4882a593Smuzhiyun static xfs_agino_t
xfs_iunlink_lookup_backref(struct xfs_perag * pag,xfs_agino_t agino)1956*4882a593Smuzhiyun xfs_iunlink_lookup_backref(
1957*4882a593Smuzhiyun 	struct xfs_perag	*pag,
1958*4882a593Smuzhiyun 	xfs_agino_t		agino)
1959*4882a593Smuzhiyun {
1960*4882a593Smuzhiyun 	struct xfs_iunlink	*iu;
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1963*4882a593Smuzhiyun 			xfs_iunlink_hash_params);
1964*4882a593Smuzhiyun 	return iu ? iu->iu_agino : NULLAGINO;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun /*
1968*4882a593Smuzhiyun  * Take ownership of an iunlink cache entry and insert it into the hash table.
1969*4882a593Smuzhiyun  * If successful, the entry will be owned by the cache; if not, it is freed.
1970*4882a593Smuzhiyun  * Either way, the caller does not own @iu after this call.
1971*4882a593Smuzhiyun  */
1972*4882a593Smuzhiyun static int
xfs_iunlink_insert_backref(struct xfs_perag * pag,struct xfs_iunlink * iu)1973*4882a593Smuzhiyun xfs_iunlink_insert_backref(
1974*4882a593Smuzhiyun 	struct xfs_perag	*pag,
1975*4882a593Smuzhiyun 	struct xfs_iunlink	*iu)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun 	int			error;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1980*4882a593Smuzhiyun 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
1981*4882a593Smuzhiyun 	/*
1982*4882a593Smuzhiyun 	 * Fail loudly if there already was an entry because that's a sign of
1983*4882a593Smuzhiyun 	 * corruption of in-memory data.  Also fail loudly if we see an error
1984*4882a593Smuzhiyun 	 * code we didn't anticipate from the rhashtable code.  Currently we
1985*4882a593Smuzhiyun 	 * only anticipate ENOMEM.
1986*4882a593Smuzhiyun 	 */
1987*4882a593Smuzhiyun 	if (error) {
1988*4882a593Smuzhiyun 		WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1989*4882a593Smuzhiyun 		kmem_free(iu);
1990*4882a593Smuzhiyun 	}
1991*4882a593Smuzhiyun 	/*
1992*4882a593Smuzhiyun 	 * Absorb any runtime errors that aren't a result of corruption because
1993*4882a593Smuzhiyun 	 * this is a cache and we can always fall back to bucket list scanning.
1994*4882a593Smuzhiyun 	 */
1995*4882a593Smuzhiyun 	if (error != 0 && error != -EEXIST)
1996*4882a593Smuzhiyun 		error = 0;
1997*4882a593Smuzhiyun 	return error;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun /* Remember that @prev_agino.next_unlinked = @this_agino. */
2001*4882a593Smuzhiyun static int
xfs_iunlink_add_backref(struct xfs_perag * pag,xfs_agino_t prev_agino,xfs_agino_t this_agino)2002*4882a593Smuzhiyun xfs_iunlink_add_backref(
2003*4882a593Smuzhiyun 	struct xfs_perag	*pag,
2004*4882a593Smuzhiyun 	xfs_agino_t		prev_agino,
2005*4882a593Smuzhiyun 	xfs_agino_t		this_agino)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun 	struct xfs_iunlink	*iu;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
2010*4882a593Smuzhiyun 		return 0;
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun 	iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
2013*4882a593Smuzhiyun 	iu->iu_agino = prev_agino;
2014*4882a593Smuzhiyun 	iu->iu_next_unlinked = this_agino;
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 	return xfs_iunlink_insert_backref(pag, iu);
2017*4882a593Smuzhiyun }
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun /*
2020*4882a593Smuzhiyun  * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
2021*4882a593Smuzhiyun  * If @next_unlinked is NULLAGINO, we drop the backref and exit.  If there
2022*4882a593Smuzhiyun  * wasn't any such entry then we don't bother.
2023*4882a593Smuzhiyun  */
2024*4882a593Smuzhiyun static int
xfs_iunlink_change_backref(struct xfs_perag * pag,xfs_agino_t agino,xfs_agino_t next_unlinked)2025*4882a593Smuzhiyun xfs_iunlink_change_backref(
2026*4882a593Smuzhiyun 	struct xfs_perag	*pag,
2027*4882a593Smuzhiyun 	xfs_agino_t		agino,
2028*4882a593Smuzhiyun 	xfs_agino_t		next_unlinked)
2029*4882a593Smuzhiyun {
2030*4882a593Smuzhiyun 	struct xfs_iunlink	*iu;
2031*4882a593Smuzhiyun 	int			error;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	/* Look up the old entry; if there wasn't one then exit. */
2034*4882a593Smuzhiyun 	iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
2035*4882a593Smuzhiyun 			xfs_iunlink_hash_params);
2036*4882a593Smuzhiyun 	if (!iu)
2037*4882a593Smuzhiyun 		return 0;
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	/*
2040*4882a593Smuzhiyun 	 * Remove the entry.  This shouldn't ever return an error, but if we
2041*4882a593Smuzhiyun 	 * couldn't remove the old entry we don't want to add it again to the
2042*4882a593Smuzhiyun 	 * hash table, and if the entry disappeared on us then someone's
2043*4882a593Smuzhiyun 	 * violated the locking rules and we need to fail loudly.  Either way
2044*4882a593Smuzhiyun 	 * we cannot remove the inode because internal state is or would have
2045*4882a593Smuzhiyun 	 * been corrupt.
2046*4882a593Smuzhiyun 	 */
2047*4882a593Smuzhiyun 	error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
2048*4882a593Smuzhiyun 			&iu->iu_rhash_head, xfs_iunlink_hash_params);
2049*4882a593Smuzhiyun 	if (error)
2050*4882a593Smuzhiyun 		return error;
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	/* If there is no new next entry just free our item and return. */
2053*4882a593Smuzhiyun 	if (next_unlinked == NULLAGINO) {
2054*4882a593Smuzhiyun 		kmem_free(iu);
2055*4882a593Smuzhiyun 		return 0;
2056*4882a593Smuzhiyun 	}
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	/* Update the entry and re-add it to the hash table. */
2059*4882a593Smuzhiyun 	iu->iu_next_unlinked = next_unlinked;
2060*4882a593Smuzhiyun 	return xfs_iunlink_insert_backref(pag, iu);
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun /* Set up the in-core predecessor structures. */
2064*4882a593Smuzhiyun int
xfs_iunlink_init(struct xfs_perag * pag)2065*4882a593Smuzhiyun xfs_iunlink_init(
2066*4882a593Smuzhiyun 	struct xfs_perag	*pag)
2067*4882a593Smuzhiyun {
2068*4882a593Smuzhiyun 	return rhashtable_init(&pag->pagi_unlinked_hash,
2069*4882a593Smuzhiyun 			&xfs_iunlink_hash_params);
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun /* Free the in-core predecessor structures. */
2073*4882a593Smuzhiyun static void
xfs_iunlink_free_item(void * ptr,void * arg)2074*4882a593Smuzhiyun xfs_iunlink_free_item(
2075*4882a593Smuzhiyun 	void			*ptr,
2076*4882a593Smuzhiyun 	void			*arg)
2077*4882a593Smuzhiyun {
2078*4882a593Smuzhiyun 	struct xfs_iunlink	*iu = ptr;
2079*4882a593Smuzhiyun 	bool			*freed_anything = arg;
2080*4882a593Smuzhiyun 
2081*4882a593Smuzhiyun 	*freed_anything = true;
2082*4882a593Smuzhiyun 	kmem_free(iu);
2083*4882a593Smuzhiyun }
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun void
xfs_iunlink_destroy(struct xfs_perag * pag)2086*4882a593Smuzhiyun xfs_iunlink_destroy(
2087*4882a593Smuzhiyun 	struct xfs_perag	*pag)
2088*4882a593Smuzhiyun {
2089*4882a593Smuzhiyun 	bool			freed_anything = false;
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 	rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2092*4882a593Smuzhiyun 			xfs_iunlink_free_item, &freed_anything);
2093*4882a593Smuzhiyun 
2094*4882a593Smuzhiyun 	ASSERT(freed_anything == false || XFS_FORCED_SHUTDOWN(pag->pag_mount));
2095*4882a593Smuzhiyun }
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun /*
2098*4882a593Smuzhiyun  * Point the AGI unlinked bucket at an inode and log the results.  The caller
2099*4882a593Smuzhiyun  * is responsible for validating the old value.
2100*4882a593Smuzhiyun  */
2101*4882a593Smuzhiyun STATIC int
xfs_iunlink_update_bucket(struct xfs_trans * tp,xfs_agnumber_t agno,struct xfs_buf * agibp,unsigned int bucket_index,xfs_agino_t new_agino)2102*4882a593Smuzhiyun xfs_iunlink_update_bucket(
2103*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2104*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
2105*4882a593Smuzhiyun 	struct xfs_buf		*agibp,
2106*4882a593Smuzhiyun 	unsigned int		bucket_index,
2107*4882a593Smuzhiyun 	xfs_agino_t		new_agino)
2108*4882a593Smuzhiyun {
2109*4882a593Smuzhiyun 	struct xfs_agi		*agi = agibp->b_addr;
2110*4882a593Smuzhiyun 	xfs_agino_t		old_value;
2111*4882a593Smuzhiyun 	int			offset;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2116*4882a593Smuzhiyun 	trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
2117*4882a593Smuzhiyun 			old_value, new_agino);
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	/*
2120*4882a593Smuzhiyun 	 * We should never find the head of the list already set to the value
2121*4882a593Smuzhiyun 	 * passed in because either we're adding or removing ourselves from the
2122*4882a593Smuzhiyun 	 * head of the list.
2123*4882a593Smuzhiyun 	 */
2124*4882a593Smuzhiyun 	if (old_value == new_agino) {
2125*4882a593Smuzhiyun 		xfs_buf_mark_corrupt(agibp);
2126*4882a593Smuzhiyun 		return -EFSCORRUPTED;
2127*4882a593Smuzhiyun 	}
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2130*4882a593Smuzhiyun 	offset = offsetof(struct xfs_agi, agi_unlinked) +
2131*4882a593Smuzhiyun 			(sizeof(xfs_agino_t) * bucket_index);
2132*4882a593Smuzhiyun 	xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2133*4882a593Smuzhiyun 	return 0;
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun /* Set an on-disk inode's next_unlinked pointer. */
2137*4882a593Smuzhiyun STATIC void
xfs_iunlink_update_dinode(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agino_t agino,struct xfs_buf * ibp,struct xfs_dinode * dip,struct xfs_imap * imap,xfs_agino_t next_agino)2138*4882a593Smuzhiyun xfs_iunlink_update_dinode(
2139*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2140*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
2141*4882a593Smuzhiyun 	xfs_agino_t		agino,
2142*4882a593Smuzhiyun 	struct xfs_buf		*ibp,
2143*4882a593Smuzhiyun 	struct xfs_dinode	*dip,
2144*4882a593Smuzhiyun 	struct xfs_imap		*imap,
2145*4882a593Smuzhiyun 	xfs_agino_t		next_agino)
2146*4882a593Smuzhiyun {
2147*4882a593Smuzhiyun 	struct xfs_mount	*mp = tp->t_mountp;
2148*4882a593Smuzhiyun 	int			offset;
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	trace_xfs_iunlink_update_dinode(mp, agno, agino,
2153*4882a593Smuzhiyun 			be32_to_cpu(dip->di_next_unlinked), next_agino);
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	dip->di_next_unlinked = cpu_to_be32(next_agino);
2156*4882a593Smuzhiyun 	offset = imap->im_boffset +
2157*4882a593Smuzhiyun 			offsetof(struct xfs_dinode, di_next_unlinked);
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun 	/* need to recalc the inode CRC if appropriate */
2160*4882a593Smuzhiyun 	xfs_dinode_calc_crc(mp, dip);
2161*4882a593Smuzhiyun 	xfs_trans_inode_buf(tp, ibp);
2162*4882a593Smuzhiyun 	xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun /* Set an in-core inode's unlinked pointer and return the old value. */
2166*4882a593Smuzhiyun STATIC int
xfs_iunlink_update_inode(struct xfs_trans * tp,struct xfs_inode * ip,xfs_agnumber_t agno,xfs_agino_t next_agino,xfs_agino_t * old_next_agino)2167*4882a593Smuzhiyun xfs_iunlink_update_inode(
2168*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2169*4882a593Smuzhiyun 	struct xfs_inode	*ip,
2170*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
2171*4882a593Smuzhiyun 	xfs_agino_t		next_agino,
2172*4882a593Smuzhiyun 	xfs_agino_t		*old_next_agino)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun 	struct xfs_mount	*mp = tp->t_mountp;
2175*4882a593Smuzhiyun 	struct xfs_dinode	*dip;
2176*4882a593Smuzhiyun 	struct xfs_buf		*ibp;
2177*4882a593Smuzhiyun 	xfs_agino_t		old_value;
2178*4882a593Smuzhiyun 	int			error;
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0);
2183*4882a593Smuzhiyun 	if (error)
2184*4882a593Smuzhiyun 		return error;
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	/* Make sure the old pointer isn't garbage. */
2187*4882a593Smuzhiyun 	old_value = be32_to_cpu(dip->di_next_unlinked);
2188*4882a593Smuzhiyun 	if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2189*4882a593Smuzhiyun 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2190*4882a593Smuzhiyun 				sizeof(*dip), __this_address);
2191*4882a593Smuzhiyun 		error = -EFSCORRUPTED;
2192*4882a593Smuzhiyun 		goto out;
2193*4882a593Smuzhiyun 	}
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	/*
2196*4882a593Smuzhiyun 	 * Since we're updating a linked list, we should never find that the
2197*4882a593Smuzhiyun 	 * current pointer is the same as the new value, unless we're
2198*4882a593Smuzhiyun 	 * terminating the list.
2199*4882a593Smuzhiyun 	 */
2200*4882a593Smuzhiyun 	*old_next_agino = old_value;
2201*4882a593Smuzhiyun 	if (old_value == next_agino) {
2202*4882a593Smuzhiyun 		if (next_agino != NULLAGINO) {
2203*4882a593Smuzhiyun 			xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2204*4882a593Smuzhiyun 					dip, sizeof(*dip), __this_address);
2205*4882a593Smuzhiyun 			error = -EFSCORRUPTED;
2206*4882a593Smuzhiyun 		}
2207*4882a593Smuzhiyun 		goto out;
2208*4882a593Smuzhiyun 	}
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	/* Ok, update the new pointer. */
2211*4882a593Smuzhiyun 	xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2212*4882a593Smuzhiyun 			ibp, dip, &ip->i_imap, next_agino);
2213*4882a593Smuzhiyun 	return 0;
2214*4882a593Smuzhiyun out:
2215*4882a593Smuzhiyun 	xfs_trans_brelse(tp, ibp);
2216*4882a593Smuzhiyun 	return error;
2217*4882a593Smuzhiyun }
2218*4882a593Smuzhiyun 
2219*4882a593Smuzhiyun /*
2220*4882a593Smuzhiyun  * This is called when the inode's link count has gone to 0 or we are creating
2221*4882a593Smuzhiyun  * a tmpfile via O_TMPFILE.  The inode @ip must have nlink == 0.
2222*4882a593Smuzhiyun  *
2223*4882a593Smuzhiyun  * We place the on-disk inode on a list in the AGI.  It will be pulled from this
2224*4882a593Smuzhiyun  * list when the inode is freed.
2225*4882a593Smuzhiyun  */
2226*4882a593Smuzhiyun STATIC int
xfs_iunlink(struct xfs_trans * tp,struct xfs_inode * ip)2227*4882a593Smuzhiyun xfs_iunlink(
2228*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2229*4882a593Smuzhiyun 	struct xfs_inode	*ip)
2230*4882a593Smuzhiyun {
2231*4882a593Smuzhiyun 	struct xfs_mount	*mp = tp->t_mountp;
2232*4882a593Smuzhiyun 	struct xfs_agi		*agi;
2233*4882a593Smuzhiyun 	struct xfs_buf		*agibp;
2234*4882a593Smuzhiyun 	xfs_agino_t		next_agino;
2235*4882a593Smuzhiyun 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2236*4882a593Smuzhiyun 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2237*4882a593Smuzhiyun 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2238*4882a593Smuzhiyun 	int			error;
2239*4882a593Smuzhiyun 
2240*4882a593Smuzhiyun 	ASSERT(VFS_I(ip)->i_nlink == 0);
2241*4882a593Smuzhiyun 	ASSERT(VFS_I(ip)->i_mode != 0);
2242*4882a593Smuzhiyun 	trace_xfs_iunlink(ip);
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2245*4882a593Smuzhiyun 	error = xfs_read_agi(mp, tp, agno, &agibp);
2246*4882a593Smuzhiyun 	if (error)
2247*4882a593Smuzhiyun 		return error;
2248*4882a593Smuzhiyun 	agi = agibp->b_addr;
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	/*
2251*4882a593Smuzhiyun 	 * Get the index into the agi hash table for the list this inode will
2252*4882a593Smuzhiyun 	 * go on.  Make sure the pointer isn't garbage and that this inode
2253*4882a593Smuzhiyun 	 * isn't already on the list.
2254*4882a593Smuzhiyun 	 */
2255*4882a593Smuzhiyun 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2256*4882a593Smuzhiyun 	if (next_agino == agino ||
2257*4882a593Smuzhiyun 	    !xfs_verify_agino_or_null(mp, agno, next_agino)) {
2258*4882a593Smuzhiyun 		xfs_buf_mark_corrupt(agibp);
2259*4882a593Smuzhiyun 		return -EFSCORRUPTED;
2260*4882a593Smuzhiyun 	}
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	if (next_agino != NULLAGINO) {
2263*4882a593Smuzhiyun 		xfs_agino_t		old_agino;
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 		/*
2266*4882a593Smuzhiyun 		 * There is already another inode in the bucket, so point this
2267*4882a593Smuzhiyun 		 * inode to the current head of the list.
2268*4882a593Smuzhiyun 		 */
2269*4882a593Smuzhiyun 		error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2270*4882a593Smuzhiyun 				&old_agino);
2271*4882a593Smuzhiyun 		if (error)
2272*4882a593Smuzhiyun 			return error;
2273*4882a593Smuzhiyun 		ASSERT(old_agino == NULLAGINO);
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 		/*
2276*4882a593Smuzhiyun 		 * agino has been unlinked, add a backref from the next inode
2277*4882a593Smuzhiyun 		 * back to agino.
2278*4882a593Smuzhiyun 		 */
2279*4882a593Smuzhiyun 		error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino);
2280*4882a593Smuzhiyun 		if (error)
2281*4882a593Smuzhiyun 			return error;
2282*4882a593Smuzhiyun 	}
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	/* Point the head of the list to point to this inode. */
2285*4882a593Smuzhiyun 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun /* Return the imap, dinode pointer, and buffer for an inode. */
2289*4882a593Smuzhiyun STATIC int
xfs_iunlink_map_ino(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agino_t agino,struct xfs_imap * imap,struct xfs_dinode ** dipp,struct xfs_buf ** bpp)2290*4882a593Smuzhiyun xfs_iunlink_map_ino(
2291*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2292*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
2293*4882a593Smuzhiyun 	xfs_agino_t		agino,
2294*4882a593Smuzhiyun 	struct xfs_imap		*imap,
2295*4882a593Smuzhiyun 	struct xfs_dinode	**dipp,
2296*4882a593Smuzhiyun 	struct xfs_buf		**bpp)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun 	struct xfs_mount	*mp = tp->t_mountp;
2299*4882a593Smuzhiyun 	int			error;
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 	imap->im_blkno = 0;
2302*4882a593Smuzhiyun 	error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2303*4882a593Smuzhiyun 	if (error) {
2304*4882a593Smuzhiyun 		xfs_warn(mp, "%s: xfs_imap returned error %d.",
2305*4882a593Smuzhiyun 				__func__, error);
2306*4882a593Smuzhiyun 		return error;
2307*4882a593Smuzhiyun 	}
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0);
2310*4882a593Smuzhiyun 	if (error) {
2311*4882a593Smuzhiyun 		xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2312*4882a593Smuzhiyun 				__func__, error);
2313*4882a593Smuzhiyun 		return error;
2314*4882a593Smuzhiyun 	}
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	return 0;
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun /*
2320*4882a593Smuzhiyun  * Walk the unlinked chain from @head_agino until we find the inode that
2321*4882a593Smuzhiyun  * points to @target_agino.  Return the inode number, map, dinode pointer,
2322*4882a593Smuzhiyun  * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2323*4882a593Smuzhiyun  *
2324*4882a593Smuzhiyun  * @tp, @pag, @head_agino, and @target_agino are input parameters.
2325*4882a593Smuzhiyun  * @agino, @imap, @dipp, and @bpp are all output parameters.
2326*4882a593Smuzhiyun  *
2327*4882a593Smuzhiyun  * Do not call this function if @target_agino is the head of the list.
2328*4882a593Smuzhiyun  */
2329*4882a593Smuzhiyun STATIC int
xfs_iunlink_map_prev(struct xfs_trans * tp,xfs_agnumber_t agno,xfs_agino_t head_agino,xfs_agino_t target_agino,xfs_agino_t * agino,struct xfs_imap * imap,struct xfs_dinode ** dipp,struct xfs_buf ** bpp,struct xfs_perag * pag)2330*4882a593Smuzhiyun xfs_iunlink_map_prev(
2331*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2332*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
2333*4882a593Smuzhiyun 	xfs_agino_t		head_agino,
2334*4882a593Smuzhiyun 	xfs_agino_t		target_agino,
2335*4882a593Smuzhiyun 	xfs_agino_t		*agino,
2336*4882a593Smuzhiyun 	struct xfs_imap		*imap,
2337*4882a593Smuzhiyun 	struct xfs_dinode	**dipp,
2338*4882a593Smuzhiyun 	struct xfs_buf		**bpp,
2339*4882a593Smuzhiyun 	struct xfs_perag	*pag)
2340*4882a593Smuzhiyun {
2341*4882a593Smuzhiyun 	struct xfs_mount	*mp = tp->t_mountp;
2342*4882a593Smuzhiyun 	xfs_agino_t		next_agino;
2343*4882a593Smuzhiyun 	int			error;
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 	ASSERT(head_agino != target_agino);
2346*4882a593Smuzhiyun 	*bpp = NULL;
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	/* See if our backref cache can find it faster. */
2349*4882a593Smuzhiyun 	*agino = xfs_iunlink_lookup_backref(pag, target_agino);
2350*4882a593Smuzhiyun 	if (*agino != NULLAGINO) {
2351*4882a593Smuzhiyun 		error = xfs_iunlink_map_ino(tp, agno, *agino, imap, dipp, bpp);
2352*4882a593Smuzhiyun 		if (error)
2353*4882a593Smuzhiyun 			return error;
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 		if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2356*4882a593Smuzhiyun 			return 0;
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 		/*
2359*4882a593Smuzhiyun 		 * If we get here the cache contents were corrupt, so drop the
2360*4882a593Smuzhiyun 		 * buffer and fall back to walking the bucket list.
2361*4882a593Smuzhiyun 		 */
2362*4882a593Smuzhiyun 		xfs_trans_brelse(tp, *bpp);
2363*4882a593Smuzhiyun 		*bpp = NULL;
2364*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
2365*4882a593Smuzhiyun 	}
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	trace_xfs_iunlink_map_prev_fallback(mp, agno);
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun 	/* Otherwise, walk the entire bucket until we find it. */
2370*4882a593Smuzhiyun 	next_agino = head_agino;
2371*4882a593Smuzhiyun 	while (next_agino != target_agino) {
2372*4882a593Smuzhiyun 		xfs_agino_t	unlinked_agino;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 		if (*bpp)
2375*4882a593Smuzhiyun 			xfs_trans_brelse(tp, *bpp);
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 		*agino = next_agino;
2378*4882a593Smuzhiyun 		error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2379*4882a593Smuzhiyun 				bpp);
2380*4882a593Smuzhiyun 		if (error)
2381*4882a593Smuzhiyun 			return error;
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 		unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2384*4882a593Smuzhiyun 		/*
2385*4882a593Smuzhiyun 		 * Make sure this pointer is valid and isn't an obvious
2386*4882a593Smuzhiyun 		 * infinite loop.
2387*4882a593Smuzhiyun 		 */
2388*4882a593Smuzhiyun 		if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2389*4882a593Smuzhiyun 		    next_agino == unlinked_agino) {
2390*4882a593Smuzhiyun 			XFS_CORRUPTION_ERROR(__func__,
2391*4882a593Smuzhiyun 					XFS_ERRLEVEL_LOW, mp,
2392*4882a593Smuzhiyun 					*dipp, sizeof(**dipp));
2393*4882a593Smuzhiyun 			error = -EFSCORRUPTED;
2394*4882a593Smuzhiyun 			return error;
2395*4882a593Smuzhiyun 		}
2396*4882a593Smuzhiyun 		next_agino = unlinked_agino;
2397*4882a593Smuzhiyun 	}
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	return 0;
2400*4882a593Smuzhiyun }
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun /*
2403*4882a593Smuzhiyun  * Pull the on-disk inode from the AGI unlinked list.
2404*4882a593Smuzhiyun  */
2405*4882a593Smuzhiyun STATIC int
xfs_iunlink_remove(struct xfs_trans * tp,struct xfs_inode * ip)2406*4882a593Smuzhiyun xfs_iunlink_remove(
2407*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2408*4882a593Smuzhiyun 	struct xfs_inode	*ip)
2409*4882a593Smuzhiyun {
2410*4882a593Smuzhiyun 	struct xfs_mount	*mp = tp->t_mountp;
2411*4882a593Smuzhiyun 	struct xfs_agi		*agi;
2412*4882a593Smuzhiyun 	struct xfs_buf		*agibp;
2413*4882a593Smuzhiyun 	struct xfs_buf		*last_ibp;
2414*4882a593Smuzhiyun 	struct xfs_dinode	*last_dip = NULL;
2415*4882a593Smuzhiyun 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2416*4882a593Smuzhiyun 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2417*4882a593Smuzhiyun 	xfs_agino_t		next_agino;
2418*4882a593Smuzhiyun 	xfs_agino_t		head_agino;
2419*4882a593Smuzhiyun 	short			bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2420*4882a593Smuzhiyun 	int			error;
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	trace_xfs_iunlink_remove(ip);
2423*4882a593Smuzhiyun 
2424*4882a593Smuzhiyun 	/* Get the agi buffer first.  It ensures lock ordering on the list. */
2425*4882a593Smuzhiyun 	error = xfs_read_agi(mp, tp, agno, &agibp);
2426*4882a593Smuzhiyun 	if (error)
2427*4882a593Smuzhiyun 		return error;
2428*4882a593Smuzhiyun 	agi = agibp->b_addr;
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 	/*
2431*4882a593Smuzhiyun 	 * Get the index into the agi hash table for the list this inode will
2432*4882a593Smuzhiyun 	 * go on.  Make sure the head pointer isn't garbage.
2433*4882a593Smuzhiyun 	 */
2434*4882a593Smuzhiyun 	head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2435*4882a593Smuzhiyun 	if (!xfs_verify_agino(mp, agno, head_agino)) {
2436*4882a593Smuzhiyun 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2437*4882a593Smuzhiyun 				agi, sizeof(*agi));
2438*4882a593Smuzhiyun 		return -EFSCORRUPTED;
2439*4882a593Smuzhiyun 	}
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	/*
2442*4882a593Smuzhiyun 	 * Set our inode's next_unlinked pointer to NULL and then return
2443*4882a593Smuzhiyun 	 * the old pointer value so that we can update whatever was previous
2444*4882a593Smuzhiyun 	 * to us in the list to point to whatever was next in the list.
2445*4882a593Smuzhiyun 	 */
2446*4882a593Smuzhiyun 	error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO, &next_agino);
2447*4882a593Smuzhiyun 	if (error)
2448*4882a593Smuzhiyun 		return error;
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 	/*
2451*4882a593Smuzhiyun 	 * If there was a backref pointing from the next inode back to this
2452*4882a593Smuzhiyun 	 * one, remove it because we've removed this inode from the list.
2453*4882a593Smuzhiyun 	 *
2454*4882a593Smuzhiyun 	 * Later, if this inode was in the middle of the list we'll update
2455*4882a593Smuzhiyun 	 * this inode's backref to point from the next inode.
2456*4882a593Smuzhiyun 	 */
2457*4882a593Smuzhiyun 	if (next_agino != NULLAGINO) {
2458*4882a593Smuzhiyun 		error = xfs_iunlink_change_backref(agibp->b_pag, next_agino,
2459*4882a593Smuzhiyun 				NULLAGINO);
2460*4882a593Smuzhiyun 		if (error)
2461*4882a593Smuzhiyun 			return error;
2462*4882a593Smuzhiyun 	}
2463*4882a593Smuzhiyun 
2464*4882a593Smuzhiyun 	if (head_agino != agino) {
2465*4882a593Smuzhiyun 		struct xfs_imap	imap;
2466*4882a593Smuzhiyun 		xfs_agino_t	prev_agino;
2467*4882a593Smuzhiyun 
2468*4882a593Smuzhiyun 		/* We need to search the list for the inode being freed. */
2469*4882a593Smuzhiyun 		error = xfs_iunlink_map_prev(tp, agno, head_agino, agino,
2470*4882a593Smuzhiyun 				&prev_agino, &imap, &last_dip, &last_ibp,
2471*4882a593Smuzhiyun 				agibp->b_pag);
2472*4882a593Smuzhiyun 		if (error)
2473*4882a593Smuzhiyun 			return error;
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 		/* Point the previous inode on the list to the next inode. */
2476*4882a593Smuzhiyun 		xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2477*4882a593Smuzhiyun 				last_dip, &imap, next_agino);
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 		/*
2480*4882a593Smuzhiyun 		 * Now we deal with the backref for this inode.  If this inode
2481*4882a593Smuzhiyun 		 * pointed at a real inode, change the backref that pointed to
2482*4882a593Smuzhiyun 		 * us to point to our old next.  If this inode was the end of
2483*4882a593Smuzhiyun 		 * the list, delete the backref that pointed to us.  Note that
2484*4882a593Smuzhiyun 		 * change_backref takes care of deleting the backref if
2485*4882a593Smuzhiyun 		 * next_agino is NULLAGINO.
2486*4882a593Smuzhiyun 		 */
2487*4882a593Smuzhiyun 		return xfs_iunlink_change_backref(agibp->b_pag, agino,
2488*4882a593Smuzhiyun 				next_agino);
2489*4882a593Smuzhiyun 	}
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	/* Point the head of the list to the next unlinked inode. */
2492*4882a593Smuzhiyun 	return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2493*4882a593Smuzhiyun 			next_agino);
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun /*
2497*4882a593Smuzhiyun  * Look up the inode number specified and if it is not already marked XFS_ISTALE
2498*4882a593Smuzhiyun  * mark it stale. We should only find clean inodes in this lookup that aren't
2499*4882a593Smuzhiyun  * already stale.
2500*4882a593Smuzhiyun  */
2501*4882a593Smuzhiyun static void
xfs_ifree_mark_inode_stale(struct xfs_buf * bp,struct xfs_inode * free_ip,xfs_ino_t inum)2502*4882a593Smuzhiyun xfs_ifree_mark_inode_stale(
2503*4882a593Smuzhiyun 	struct xfs_buf		*bp,
2504*4882a593Smuzhiyun 	struct xfs_inode	*free_ip,
2505*4882a593Smuzhiyun 	xfs_ino_t		inum)
2506*4882a593Smuzhiyun {
2507*4882a593Smuzhiyun 	struct xfs_mount	*mp = bp->b_mount;
2508*4882a593Smuzhiyun 	struct xfs_perag	*pag = bp->b_pag;
2509*4882a593Smuzhiyun 	struct xfs_inode_log_item *iip;
2510*4882a593Smuzhiyun 	struct xfs_inode	*ip;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun retry:
2513*4882a593Smuzhiyun 	rcu_read_lock();
2514*4882a593Smuzhiyun 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2515*4882a593Smuzhiyun 
2516*4882a593Smuzhiyun 	/* Inode not in memory, nothing to do */
2517*4882a593Smuzhiyun 	if (!ip) {
2518*4882a593Smuzhiyun 		rcu_read_unlock();
2519*4882a593Smuzhiyun 		return;
2520*4882a593Smuzhiyun 	}
2521*4882a593Smuzhiyun 
2522*4882a593Smuzhiyun 	/*
2523*4882a593Smuzhiyun 	 * because this is an RCU protected lookup, we could find a recently
2524*4882a593Smuzhiyun 	 * freed or even reallocated inode during the lookup. We need to check
2525*4882a593Smuzhiyun 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
2526*4882a593Smuzhiyun 	 * valid, the wrong inode or stale.
2527*4882a593Smuzhiyun 	 */
2528*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
2529*4882a593Smuzhiyun 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2530*4882a593Smuzhiyun 		goto out_iflags_unlock;
2531*4882a593Smuzhiyun 
2532*4882a593Smuzhiyun 	/*
2533*4882a593Smuzhiyun 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2534*4882a593Smuzhiyun 	 * other inodes that we did not find in the list attached to the buffer
2535*4882a593Smuzhiyun 	 * and are not already marked stale. If we can't lock it, back off and
2536*4882a593Smuzhiyun 	 * retry.
2537*4882a593Smuzhiyun 	 */
2538*4882a593Smuzhiyun 	if (ip != free_ip) {
2539*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2540*4882a593Smuzhiyun 			spin_unlock(&ip->i_flags_lock);
2541*4882a593Smuzhiyun 			rcu_read_unlock();
2542*4882a593Smuzhiyun 			delay(1);
2543*4882a593Smuzhiyun 			goto retry;
2544*4882a593Smuzhiyun 		}
2545*4882a593Smuzhiyun 	}
2546*4882a593Smuzhiyun 	ip->i_flags |= XFS_ISTALE;
2547*4882a593Smuzhiyun 
2548*4882a593Smuzhiyun 	/*
2549*4882a593Smuzhiyun 	 * If the inode is flushing, it is already attached to the buffer.  All
2550*4882a593Smuzhiyun 	 * we needed to do here is mark the inode stale so buffer IO completion
2551*4882a593Smuzhiyun 	 * will remove it from the AIL.
2552*4882a593Smuzhiyun 	 */
2553*4882a593Smuzhiyun 	iip = ip->i_itemp;
2554*4882a593Smuzhiyun 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2555*4882a593Smuzhiyun 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2556*4882a593Smuzhiyun 		ASSERT(iip->ili_last_fields);
2557*4882a593Smuzhiyun 		goto out_iunlock;
2558*4882a593Smuzhiyun 	}
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	/*
2561*4882a593Smuzhiyun 	 * Inodes not attached to the buffer can be released immediately.
2562*4882a593Smuzhiyun 	 * Everything else has to go through xfs_iflush_abort() on journal
2563*4882a593Smuzhiyun 	 * commit as the flock synchronises removal of the inode from the
2564*4882a593Smuzhiyun 	 * cluster buffer against inode reclaim.
2565*4882a593Smuzhiyun 	 */
2566*4882a593Smuzhiyun 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
2567*4882a593Smuzhiyun 		goto out_iunlock;
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	__xfs_iflags_set(ip, XFS_IFLUSHING);
2570*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
2571*4882a593Smuzhiyun 	rcu_read_unlock();
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 	/* we have a dirty inode in memory that has not yet been flushed. */
2574*4882a593Smuzhiyun 	spin_lock(&iip->ili_lock);
2575*4882a593Smuzhiyun 	iip->ili_last_fields = iip->ili_fields;
2576*4882a593Smuzhiyun 	iip->ili_fields = 0;
2577*4882a593Smuzhiyun 	iip->ili_fsync_fields = 0;
2578*4882a593Smuzhiyun 	spin_unlock(&iip->ili_lock);
2579*4882a593Smuzhiyun 	ASSERT(iip->ili_last_fields);
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	if (ip != free_ip)
2582*4882a593Smuzhiyun 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2583*4882a593Smuzhiyun 	return;
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun out_iunlock:
2586*4882a593Smuzhiyun 	if (ip != free_ip)
2587*4882a593Smuzhiyun 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
2588*4882a593Smuzhiyun out_iflags_unlock:
2589*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
2590*4882a593Smuzhiyun 	rcu_read_unlock();
2591*4882a593Smuzhiyun }
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun /*
2594*4882a593Smuzhiyun  * A big issue when freeing the inode cluster is that we _cannot_ skip any
2595*4882a593Smuzhiyun  * inodes that are in memory - they all must be marked stale and attached to
2596*4882a593Smuzhiyun  * the cluster buffer.
2597*4882a593Smuzhiyun  */
2598*4882a593Smuzhiyun STATIC int
xfs_ifree_cluster(struct xfs_inode * free_ip,struct xfs_trans * tp,struct xfs_icluster * xic)2599*4882a593Smuzhiyun xfs_ifree_cluster(
2600*4882a593Smuzhiyun 	struct xfs_inode	*free_ip,
2601*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2602*4882a593Smuzhiyun 	struct xfs_icluster	*xic)
2603*4882a593Smuzhiyun {
2604*4882a593Smuzhiyun 	struct xfs_mount	*mp = free_ip->i_mount;
2605*4882a593Smuzhiyun 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
2606*4882a593Smuzhiyun 	struct xfs_buf		*bp;
2607*4882a593Smuzhiyun 	xfs_daddr_t		blkno;
2608*4882a593Smuzhiyun 	xfs_ino_t		inum = xic->first_ino;
2609*4882a593Smuzhiyun 	int			nbufs;
2610*4882a593Smuzhiyun 	int			i, j;
2611*4882a593Smuzhiyun 	int			ioffset;
2612*4882a593Smuzhiyun 	int			error;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2617*4882a593Smuzhiyun 		/*
2618*4882a593Smuzhiyun 		 * The allocation bitmap tells us which inodes of the chunk were
2619*4882a593Smuzhiyun 		 * physically allocated. Skip the cluster if an inode falls into
2620*4882a593Smuzhiyun 		 * a sparse region.
2621*4882a593Smuzhiyun 		 */
2622*4882a593Smuzhiyun 		ioffset = inum - xic->first_ino;
2623*4882a593Smuzhiyun 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2624*4882a593Smuzhiyun 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2625*4882a593Smuzhiyun 			continue;
2626*4882a593Smuzhiyun 		}
2627*4882a593Smuzhiyun 
2628*4882a593Smuzhiyun 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2629*4882a593Smuzhiyun 					 XFS_INO_TO_AGBNO(mp, inum));
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 		/*
2632*4882a593Smuzhiyun 		 * We obtain and lock the backing buffer first in the process
2633*4882a593Smuzhiyun 		 * here to ensure dirty inodes attached to the buffer remain in
2634*4882a593Smuzhiyun 		 * the flushing state while we mark them stale.
2635*4882a593Smuzhiyun 		 *
2636*4882a593Smuzhiyun 		 * If we scan the in-memory inodes first, then buffer IO can
2637*4882a593Smuzhiyun 		 * complete before we get a lock on it, and hence we may fail
2638*4882a593Smuzhiyun 		 * to mark all the active inodes on the buffer stale.
2639*4882a593Smuzhiyun 		 */
2640*4882a593Smuzhiyun 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2641*4882a593Smuzhiyun 				mp->m_bsize * igeo->blocks_per_cluster,
2642*4882a593Smuzhiyun 				XBF_UNMAPPED, &bp);
2643*4882a593Smuzhiyun 		if (error)
2644*4882a593Smuzhiyun 			return error;
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun 		/*
2647*4882a593Smuzhiyun 		 * This buffer may not have been correctly initialised as we
2648*4882a593Smuzhiyun 		 * didn't read it from disk. That's not important because we are
2649*4882a593Smuzhiyun 		 * only using to mark the buffer as stale in the log, and to
2650*4882a593Smuzhiyun 		 * attach stale cached inodes on it. That means it will never be
2651*4882a593Smuzhiyun 		 * dispatched for IO. If it is, we want to know about it, and we
2652*4882a593Smuzhiyun 		 * want it to fail. We can acheive this by adding a write
2653*4882a593Smuzhiyun 		 * verifier to the buffer.
2654*4882a593Smuzhiyun 		 */
2655*4882a593Smuzhiyun 		bp->b_ops = &xfs_inode_buf_ops;
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 		/*
2658*4882a593Smuzhiyun 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2659*4882a593Smuzhiyun 		 * too. This requires lookups, and will skip inodes that we've
2660*4882a593Smuzhiyun 		 * already marked XFS_ISTALE.
2661*4882a593Smuzhiyun 		 */
2662*4882a593Smuzhiyun 		for (i = 0; i < igeo->inodes_per_cluster; i++)
2663*4882a593Smuzhiyun 			xfs_ifree_mark_inode_stale(bp, free_ip, inum + i);
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 		xfs_trans_stale_inode_buf(tp, bp);
2666*4882a593Smuzhiyun 		xfs_trans_binval(tp, bp);
2667*4882a593Smuzhiyun 	}
2668*4882a593Smuzhiyun 	return 0;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun /*
2672*4882a593Smuzhiyun  * This is called to return an inode to the inode free list.  The inode should
2673*4882a593Smuzhiyun  * already be truncated to 0 length and have no pages associated with it.  This
2674*4882a593Smuzhiyun  * routine also assumes that the inode is already a part of the transaction.
2675*4882a593Smuzhiyun  *
2676*4882a593Smuzhiyun  * The on-disk copy of the inode will have been added to the list of unlinked
2677*4882a593Smuzhiyun  * inodes in the AGI. We need to remove the inode from that list atomically with
2678*4882a593Smuzhiyun  * respect to freeing it here.
2679*4882a593Smuzhiyun  */
2680*4882a593Smuzhiyun int
xfs_ifree(struct xfs_trans * tp,struct xfs_inode * ip)2681*4882a593Smuzhiyun xfs_ifree(
2682*4882a593Smuzhiyun 	struct xfs_trans	*tp,
2683*4882a593Smuzhiyun 	struct xfs_inode	*ip)
2684*4882a593Smuzhiyun {
2685*4882a593Smuzhiyun 	int			error;
2686*4882a593Smuzhiyun 	struct xfs_icluster	xic = { 0 };
2687*4882a593Smuzhiyun 	struct xfs_inode_log_item *iip = ip->i_itemp;
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2690*4882a593Smuzhiyun 	ASSERT(VFS_I(ip)->i_nlink == 0);
2691*4882a593Smuzhiyun 	ASSERT(ip->i_df.if_nextents == 0);
2692*4882a593Smuzhiyun 	ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2693*4882a593Smuzhiyun 	ASSERT(ip->i_d.di_nblocks == 0);
2694*4882a593Smuzhiyun 
2695*4882a593Smuzhiyun 	/*
2696*4882a593Smuzhiyun 	 * Free the inode first so that we guarantee that the AGI lock is going
2697*4882a593Smuzhiyun 	 * to be taken before we remove the inode from the unlinked list. This
2698*4882a593Smuzhiyun 	 * makes the AGI lock -> unlinked list modification order the same as
2699*4882a593Smuzhiyun 	 * used in O_TMPFILE creation.
2700*4882a593Smuzhiyun 	 */
2701*4882a593Smuzhiyun 	error = xfs_difree(tp, ip->i_ino, &xic);
2702*4882a593Smuzhiyun 	if (error)
2703*4882a593Smuzhiyun 		return error;
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	error = xfs_iunlink_remove(tp, ip);
2706*4882a593Smuzhiyun 	if (error)
2707*4882a593Smuzhiyun 		return error;
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	/*
2710*4882a593Smuzhiyun 	 * Free any local-format data sitting around before we reset the
2711*4882a593Smuzhiyun 	 * data fork to extents format.  Note that the attr fork data has
2712*4882a593Smuzhiyun 	 * already been freed by xfs_attr_inactive.
2713*4882a593Smuzhiyun 	 */
2714*4882a593Smuzhiyun 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2715*4882a593Smuzhiyun 		kmem_free(ip->i_df.if_u1.if_data);
2716*4882a593Smuzhiyun 		ip->i_df.if_u1.if_data = NULL;
2717*4882a593Smuzhiyun 		ip->i_df.if_bytes = 0;
2718*4882a593Smuzhiyun 	}
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 	VFS_I(ip)->i_mode = 0;		/* mark incore inode as free */
2721*4882a593Smuzhiyun 	ip->i_d.di_flags = 0;
2722*4882a593Smuzhiyun 	ip->i_d.di_flags2 = ip->i_mount->m_ino_geo.new_diflags2;
2723*4882a593Smuzhiyun 	ip->i_d.di_dmevmask = 0;
2724*4882a593Smuzhiyun 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2725*4882a593Smuzhiyun 	ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	/* Don't attempt to replay owner changes for a deleted inode */
2728*4882a593Smuzhiyun 	spin_lock(&iip->ili_lock);
2729*4882a593Smuzhiyun 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2730*4882a593Smuzhiyun 	spin_unlock(&iip->ili_lock);
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 	/*
2733*4882a593Smuzhiyun 	 * Bump the generation count so no one will be confused
2734*4882a593Smuzhiyun 	 * by reincarnations of this inode.
2735*4882a593Smuzhiyun 	 */
2736*4882a593Smuzhiyun 	VFS_I(ip)->i_generation++;
2737*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun 	if (xic.deleted)
2740*4882a593Smuzhiyun 		error = xfs_ifree_cluster(ip, tp, &xic);
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 	return error;
2743*4882a593Smuzhiyun }
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun /*
2746*4882a593Smuzhiyun  * This is called to unpin an inode.  The caller must have the inode locked
2747*4882a593Smuzhiyun  * in at least shared mode so that the buffer cannot be subsequently pinned
2748*4882a593Smuzhiyun  * once someone is waiting for it to be unpinned.
2749*4882a593Smuzhiyun  */
2750*4882a593Smuzhiyun static void
xfs_iunpin(struct xfs_inode * ip)2751*4882a593Smuzhiyun xfs_iunpin(
2752*4882a593Smuzhiyun 	struct xfs_inode	*ip)
2753*4882a593Smuzhiyun {
2754*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 	/* Give the log a push to start the unpinning I/O */
2759*4882a593Smuzhiyun 	xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun }
2762*4882a593Smuzhiyun 
2763*4882a593Smuzhiyun static void
__xfs_iunpin_wait(struct xfs_inode * ip)2764*4882a593Smuzhiyun __xfs_iunpin_wait(
2765*4882a593Smuzhiyun 	struct xfs_inode	*ip)
2766*4882a593Smuzhiyun {
2767*4882a593Smuzhiyun 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2768*4882a593Smuzhiyun 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2769*4882a593Smuzhiyun 
2770*4882a593Smuzhiyun 	xfs_iunpin(ip);
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun 	do {
2773*4882a593Smuzhiyun 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2774*4882a593Smuzhiyun 		if (xfs_ipincount(ip))
2775*4882a593Smuzhiyun 			io_schedule();
2776*4882a593Smuzhiyun 	} while (xfs_ipincount(ip));
2777*4882a593Smuzhiyun 	finish_wait(wq, &wait.wq_entry);
2778*4882a593Smuzhiyun }
2779*4882a593Smuzhiyun 
2780*4882a593Smuzhiyun void
xfs_iunpin_wait(struct xfs_inode * ip)2781*4882a593Smuzhiyun xfs_iunpin_wait(
2782*4882a593Smuzhiyun 	struct xfs_inode	*ip)
2783*4882a593Smuzhiyun {
2784*4882a593Smuzhiyun 	if (xfs_ipincount(ip))
2785*4882a593Smuzhiyun 		__xfs_iunpin_wait(ip);
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun /*
2789*4882a593Smuzhiyun  * Removing an inode from the namespace involves removing the directory entry
2790*4882a593Smuzhiyun  * and dropping the link count on the inode. Removing the directory entry can
2791*4882a593Smuzhiyun  * result in locking an AGF (directory blocks were freed) and removing a link
2792*4882a593Smuzhiyun  * count can result in placing the inode on an unlinked list which results in
2793*4882a593Smuzhiyun  * locking an AGI.
2794*4882a593Smuzhiyun  *
2795*4882a593Smuzhiyun  * The big problem here is that we have an ordering constraint on AGF and AGI
2796*4882a593Smuzhiyun  * locking - inode allocation locks the AGI, then can allocate a new extent for
2797*4882a593Smuzhiyun  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2798*4882a593Smuzhiyun  * removes the inode from the unlinked list, requiring that we lock the AGI
2799*4882a593Smuzhiyun  * first, and then freeing the inode can result in an inode chunk being freed
2800*4882a593Smuzhiyun  * and hence freeing disk space requiring that we lock an AGF.
2801*4882a593Smuzhiyun  *
2802*4882a593Smuzhiyun  * Hence the ordering that is imposed by other parts of the code is AGI before
2803*4882a593Smuzhiyun  * AGF. This means we cannot remove the directory entry before we drop the inode
2804*4882a593Smuzhiyun  * reference count and put it on the unlinked list as this results in a lock
2805*4882a593Smuzhiyun  * order of AGF then AGI, and this can deadlock against inode allocation and
2806*4882a593Smuzhiyun  * freeing. Therefore we must drop the link counts before we remove the
2807*4882a593Smuzhiyun  * directory entry.
2808*4882a593Smuzhiyun  *
2809*4882a593Smuzhiyun  * This is still safe from a transactional point of view - it is not until we
2810*4882a593Smuzhiyun  * get to xfs_defer_finish() that we have the possibility of multiple
2811*4882a593Smuzhiyun  * transactions in this operation. Hence as long as we remove the directory
2812*4882a593Smuzhiyun  * entry and drop the link count in the first transaction of the remove
2813*4882a593Smuzhiyun  * operation, there are no transactional constraints on the ordering here.
2814*4882a593Smuzhiyun  */
2815*4882a593Smuzhiyun int
xfs_remove(xfs_inode_t * dp,struct xfs_name * name,xfs_inode_t * ip)2816*4882a593Smuzhiyun xfs_remove(
2817*4882a593Smuzhiyun 	xfs_inode_t             *dp,
2818*4882a593Smuzhiyun 	struct xfs_name		*name,
2819*4882a593Smuzhiyun 	xfs_inode_t		*ip)
2820*4882a593Smuzhiyun {
2821*4882a593Smuzhiyun 	xfs_mount_t		*mp = dp->i_mount;
2822*4882a593Smuzhiyun 	xfs_trans_t             *tp = NULL;
2823*4882a593Smuzhiyun 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2824*4882a593Smuzhiyun 	int                     error = 0;
2825*4882a593Smuzhiyun 	uint			resblks;
2826*4882a593Smuzhiyun 
2827*4882a593Smuzhiyun 	trace_xfs_remove(dp, name);
2828*4882a593Smuzhiyun 
2829*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(mp))
2830*4882a593Smuzhiyun 		return -EIO;
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun 	error = xfs_qm_dqattach(dp);
2833*4882a593Smuzhiyun 	if (error)
2834*4882a593Smuzhiyun 		goto std_return;
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	error = xfs_qm_dqattach(ip);
2837*4882a593Smuzhiyun 	if (error)
2838*4882a593Smuzhiyun 		goto std_return;
2839*4882a593Smuzhiyun 
2840*4882a593Smuzhiyun 	/*
2841*4882a593Smuzhiyun 	 * We try to get the real space reservation first,
2842*4882a593Smuzhiyun 	 * allowing for directory btree deletion(s) implying
2843*4882a593Smuzhiyun 	 * possible bmap insert(s).  If we can't get the space
2844*4882a593Smuzhiyun 	 * reservation then we use 0 instead, and avoid the bmap
2845*4882a593Smuzhiyun 	 * btree insert(s) in the directory code by, if the bmap
2846*4882a593Smuzhiyun 	 * insert tries to happen, instead trimming the LAST
2847*4882a593Smuzhiyun 	 * block from the directory.
2848*4882a593Smuzhiyun 	 */
2849*4882a593Smuzhiyun 	resblks = XFS_REMOVE_SPACE_RES(mp);
2850*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2851*4882a593Smuzhiyun 	if (error == -ENOSPC) {
2852*4882a593Smuzhiyun 		resblks = 0;
2853*4882a593Smuzhiyun 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2854*4882a593Smuzhiyun 				&tp);
2855*4882a593Smuzhiyun 	}
2856*4882a593Smuzhiyun 	if (error) {
2857*4882a593Smuzhiyun 		ASSERT(error != -ENOSPC);
2858*4882a593Smuzhiyun 		goto std_return;
2859*4882a593Smuzhiyun 	}
2860*4882a593Smuzhiyun 
2861*4882a593Smuzhiyun 	xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
2864*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2865*4882a593Smuzhiyun 
2866*4882a593Smuzhiyun 	/*
2867*4882a593Smuzhiyun 	 * If we're removing a directory perform some additional validation.
2868*4882a593Smuzhiyun 	 */
2869*4882a593Smuzhiyun 	if (is_dir) {
2870*4882a593Smuzhiyun 		ASSERT(VFS_I(ip)->i_nlink >= 2);
2871*4882a593Smuzhiyun 		if (VFS_I(ip)->i_nlink != 2) {
2872*4882a593Smuzhiyun 			error = -ENOTEMPTY;
2873*4882a593Smuzhiyun 			goto out_trans_cancel;
2874*4882a593Smuzhiyun 		}
2875*4882a593Smuzhiyun 		if (!xfs_dir_isempty(ip)) {
2876*4882a593Smuzhiyun 			error = -ENOTEMPTY;
2877*4882a593Smuzhiyun 			goto out_trans_cancel;
2878*4882a593Smuzhiyun 		}
2879*4882a593Smuzhiyun 
2880*4882a593Smuzhiyun 		/* Drop the link from ip's "..".  */
2881*4882a593Smuzhiyun 		error = xfs_droplink(tp, dp);
2882*4882a593Smuzhiyun 		if (error)
2883*4882a593Smuzhiyun 			goto out_trans_cancel;
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 		/* Drop the "." link from ip to self.  */
2886*4882a593Smuzhiyun 		error = xfs_droplink(tp, ip);
2887*4882a593Smuzhiyun 		if (error)
2888*4882a593Smuzhiyun 			goto out_trans_cancel;
2889*4882a593Smuzhiyun 	} else {
2890*4882a593Smuzhiyun 		/*
2891*4882a593Smuzhiyun 		 * When removing a non-directory we need to log the parent
2892*4882a593Smuzhiyun 		 * inode here.  For a directory this is done implicitly
2893*4882a593Smuzhiyun 		 * by the xfs_droplink call for the ".." entry.
2894*4882a593Smuzhiyun 		 */
2895*4882a593Smuzhiyun 		xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2896*4882a593Smuzhiyun 	}
2897*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 	/* Drop the link from dp to ip. */
2900*4882a593Smuzhiyun 	error = xfs_droplink(tp, ip);
2901*4882a593Smuzhiyun 	if (error)
2902*4882a593Smuzhiyun 		goto out_trans_cancel;
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun 	error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2905*4882a593Smuzhiyun 	if (error) {
2906*4882a593Smuzhiyun 		ASSERT(error != -ENOENT);
2907*4882a593Smuzhiyun 		goto out_trans_cancel;
2908*4882a593Smuzhiyun 	}
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 	/*
2911*4882a593Smuzhiyun 	 * If this is a synchronous mount, make sure that the
2912*4882a593Smuzhiyun 	 * remove transaction goes to disk before returning to
2913*4882a593Smuzhiyun 	 * the user.
2914*4882a593Smuzhiyun 	 */
2915*4882a593Smuzhiyun 	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2916*4882a593Smuzhiyun 		xfs_trans_set_sync(tp);
2917*4882a593Smuzhiyun 
2918*4882a593Smuzhiyun 	error = xfs_trans_commit(tp);
2919*4882a593Smuzhiyun 	if (error)
2920*4882a593Smuzhiyun 		goto std_return;
2921*4882a593Smuzhiyun 
2922*4882a593Smuzhiyun 	if (is_dir && xfs_inode_is_filestream(ip))
2923*4882a593Smuzhiyun 		xfs_filestream_deassociate(ip);
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	return 0;
2926*4882a593Smuzhiyun 
2927*4882a593Smuzhiyun  out_trans_cancel:
2928*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
2929*4882a593Smuzhiyun  std_return:
2930*4882a593Smuzhiyun 	return error;
2931*4882a593Smuzhiyun }
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun /*
2934*4882a593Smuzhiyun  * Enter all inodes for a rename transaction into a sorted array.
2935*4882a593Smuzhiyun  */
2936*4882a593Smuzhiyun #define __XFS_SORT_INODES	5
2937*4882a593Smuzhiyun STATIC void
xfs_sort_for_rename(struct xfs_inode * dp1,struct xfs_inode * dp2,struct xfs_inode * ip1,struct xfs_inode * ip2,struct xfs_inode * wip,struct xfs_inode ** i_tab,int * num_inodes)2938*4882a593Smuzhiyun xfs_sort_for_rename(
2939*4882a593Smuzhiyun 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2940*4882a593Smuzhiyun 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2941*4882a593Smuzhiyun 	struct xfs_inode	*ip1,	/* in: inode of old entry */
2942*4882a593Smuzhiyun 	struct xfs_inode	*ip2,	/* in: inode of new entry */
2943*4882a593Smuzhiyun 	struct xfs_inode	*wip,	/* in: whiteout inode */
2944*4882a593Smuzhiyun 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2945*4882a593Smuzhiyun 	int			*num_inodes)  /* in/out: inodes in array */
2946*4882a593Smuzhiyun {
2947*4882a593Smuzhiyun 	int			i, j;
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	ASSERT(*num_inodes == __XFS_SORT_INODES);
2950*4882a593Smuzhiyun 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun 	/*
2953*4882a593Smuzhiyun 	 * i_tab contains a list of pointers to inodes.  We initialize
2954*4882a593Smuzhiyun 	 * the table here & we'll sort it.  We will then use it to
2955*4882a593Smuzhiyun 	 * order the acquisition of the inode locks.
2956*4882a593Smuzhiyun 	 *
2957*4882a593Smuzhiyun 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2958*4882a593Smuzhiyun 	 */
2959*4882a593Smuzhiyun 	i = 0;
2960*4882a593Smuzhiyun 	i_tab[i++] = dp1;
2961*4882a593Smuzhiyun 	i_tab[i++] = dp2;
2962*4882a593Smuzhiyun 	i_tab[i++] = ip1;
2963*4882a593Smuzhiyun 	if (ip2)
2964*4882a593Smuzhiyun 		i_tab[i++] = ip2;
2965*4882a593Smuzhiyun 	if (wip)
2966*4882a593Smuzhiyun 		i_tab[i++] = wip;
2967*4882a593Smuzhiyun 	*num_inodes = i;
2968*4882a593Smuzhiyun 
2969*4882a593Smuzhiyun 	/*
2970*4882a593Smuzhiyun 	 * Sort the elements via bubble sort.  (Remember, there are at
2971*4882a593Smuzhiyun 	 * most 5 elements to sort, so this is adequate.)
2972*4882a593Smuzhiyun 	 */
2973*4882a593Smuzhiyun 	for (i = 0; i < *num_inodes; i++) {
2974*4882a593Smuzhiyun 		for (j = 1; j < *num_inodes; j++) {
2975*4882a593Smuzhiyun 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2976*4882a593Smuzhiyun 				struct xfs_inode *temp = i_tab[j];
2977*4882a593Smuzhiyun 				i_tab[j] = i_tab[j-1];
2978*4882a593Smuzhiyun 				i_tab[j-1] = temp;
2979*4882a593Smuzhiyun 			}
2980*4882a593Smuzhiyun 		}
2981*4882a593Smuzhiyun 	}
2982*4882a593Smuzhiyun }
2983*4882a593Smuzhiyun 
2984*4882a593Smuzhiyun static int
xfs_finish_rename(struct xfs_trans * tp)2985*4882a593Smuzhiyun xfs_finish_rename(
2986*4882a593Smuzhiyun 	struct xfs_trans	*tp)
2987*4882a593Smuzhiyun {
2988*4882a593Smuzhiyun 	/*
2989*4882a593Smuzhiyun 	 * If this is a synchronous mount, make sure that the rename transaction
2990*4882a593Smuzhiyun 	 * goes to disk before returning to the user.
2991*4882a593Smuzhiyun 	 */
2992*4882a593Smuzhiyun 	if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2993*4882a593Smuzhiyun 		xfs_trans_set_sync(tp);
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	return xfs_trans_commit(tp);
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun /*
2999*4882a593Smuzhiyun  * xfs_cross_rename()
3000*4882a593Smuzhiyun  *
3001*4882a593Smuzhiyun  * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
3002*4882a593Smuzhiyun  */
3003*4882a593Smuzhiyun STATIC int
xfs_cross_rename(struct xfs_trans * tp,struct xfs_inode * dp1,struct xfs_name * name1,struct xfs_inode * ip1,struct xfs_inode * dp2,struct xfs_name * name2,struct xfs_inode * ip2,int spaceres)3004*4882a593Smuzhiyun xfs_cross_rename(
3005*4882a593Smuzhiyun 	struct xfs_trans	*tp,
3006*4882a593Smuzhiyun 	struct xfs_inode	*dp1,
3007*4882a593Smuzhiyun 	struct xfs_name		*name1,
3008*4882a593Smuzhiyun 	struct xfs_inode	*ip1,
3009*4882a593Smuzhiyun 	struct xfs_inode	*dp2,
3010*4882a593Smuzhiyun 	struct xfs_name		*name2,
3011*4882a593Smuzhiyun 	struct xfs_inode	*ip2,
3012*4882a593Smuzhiyun 	int			spaceres)
3013*4882a593Smuzhiyun {
3014*4882a593Smuzhiyun 	int		error = 0;
3015*4882a593Smuzhiyun 	int		ip1_flags = 0;
3016*4882a593Smuzhiyun 	int		ip2_flags = 0;
3017*4882a593Smuzhiyun 	int		dp2_flags = 0;
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	/* Swap inode number for dirent in first parent */
3020*4882a593Smuzhiyun 	error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
3021*4882a593Smuzhiyun 	if (error)
3022*4882a593Smuzhiyun 		goto out_trans_abort;
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun 	/* Swap inode number for dirent in second parent */
3025*4882a593Smuzhiyun 	error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
3026*4882a593Smuzhiyun 	if (error)
3027*4882a593Smuzhiyun 		goto out_trans_abort;
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 	/*
3030*4882a593Smuzhiyun 	 * If we're renaming one or more directories across different parents,
3031*4882a593Smuzhiyun 	 * update the respective ".." entries (and link counts) to match the new
3032*4882a593Smuzhiyun 	 * parents.
3033*4882a593Smuzhiyun 	 */
3034*4882a593Smuzhiyun 	if (dp1 != dp2) {
3035*4882a593Smuzhiyun 		dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3036*4882a593Smuzhiyun 
3037*4882a593Smuzhiyun 		if (S_ISDIR(VFS_I(ip2)->i_mode)) {
3038*4882a593Smuzhiyun 			error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
3039*4882a593Smuzhiyun 						dp1->i_ino, spaceres);
3040*4882a593Smuzhiyun 			if (error)
3041*4882a593Smuzhiyun 				goto out_trans_abort;
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 			/* transfer ip2 ".." reference to dp1 */
3044*4882a593Smuzhiyun 			if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
3045*4882a593Smuzhiyun 				error = xfs_droplink(tp, dp2);
3046*4882a593Smuzhiyun 				if (error)
3047*4882a593Smuzhiyun 					goto out_trans_abort;
3048*4882a593Smuzhiyun 				xfs_bumplink(tp, dp1);
3049*4882a593Smuzhiyun 			}
3050*4882a593Smuzhiyun 
3051*4882a593Smuzhiyun 			/*
3052*4882a593Smuzhiyun 			 * Although ip1 isn't changed here, userspace needs
3053*4882a593Smuzhiyun 			 * to be warned about the change, so that applications
3054*4882a593Smuzhiyun 			 * relying on it (like backup ones), will properly
3055*4882a593Smuzhiyun 			 * notify the change
3056*4882a593Smuzhiyun 			 */
3057*4882a593Smuzhiyun 			ip1_flags |= XFS_ICHGTIME_CHG;
3058*4882a593Smuzhiyun 			ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3059*4882a593Smuzhiyun 		}
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 		if (S_ISDIR(VFS_I(ip1)->i_mode)) {
3062*4882a593Smuzhiyun 			error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
3063*4882a593Smuzhiyun 						dp2->i_ino, spaceres);
3064*4882a593Smuzhiyun 			if (error)
3065*4882a593Smuzhiyun 				goto out_trans_abort;
3066*4882a593Smuzhiyun 
3067*4882a593Smuzhiyun 			/* transfer ip1 ".." reference to dp2 */
3068*4882a593Smuzhiyun 			if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3069*4882a593Smuzhiyun 				error = xfs_droplink(tp, dp1);
3070*4882a593Smuzhiyun 				if (error)
3071*4882a593Smuzhiyun 					goto out_trans_abort;
3072*4882a593Smuzhiyun 				xfs_bumplink(tp, dp2);
3073*4882a593Smuzhiyun 			}
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 			/*
3076*4882a593Smuzhiyun 			 * Although ip2 isn't changed here, userspace needs
3077*4882a593Smuzhiyun 			 * to be warned about the change, so that applications
3078*4882a593Smuzhiyun 			 * relying on it (like backup ones), will properly
3079*4882a593Smuzhiyun 			 * notify the change
3080*4882a593Smuzhiyun 			 */
3081*4882a593Smuzhiyun 			ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3082*4882a593Smuzhiyun 			ip2_flags |= XFS_ICHGTIME_CHG;
3083*4882a593Smuzhiyun 		}
3084*4882a593Smuzhiyun 	}
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun 	if (ip1_flags) {
3087*4882a593Smuzhiyun 		xfs_trans_ichgtime(tp, ip1, ip1_flags);
3088*4882a593Smuzhiyun 		xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3089*4882a593Smuzhiyun 	}
3090*4882a593Smuzhiyun 	if (ip2_flags) {
3091*4882a593Smuzhiyun 		xfs_trans_ichgtime(tp, ip2, ip2_flags);
3092*4882a593Smuzhiyun 		xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3093*4882a593Smuzhiyun 	}
3094*4882a593Smuzhiyun 	if (dp2_flags) {
3095*4882a593Smuzhiyun 		xfs_trans_ichgtime(tp, dp2, dp2_flags);
3096*4882a593Smuzhiyun 		xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3097*4882a593Smuzhiyun 	}
3098*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3099*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3100*4882a593Smuzhiyun 	return xfs_finish_rename(tp);
3101*4882a593Smuzhiyun 
3102*4882a593Smuzhiyun out_trans_abort:
3103*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
3104*4882a593Smuzhiyun 	return error;
3105*4882a593Smuzhiyun }
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun /*
3108*4882a593Smuzhiyun  * xfs_rename_alloc_whiteout()
3109*4882a593Smuzhiyun  *
3110*4882a593Smuzhiyun  * Return a referenced, unlinked, unlocked inode that can be used as a
3111*4882a593Smuzhiyun  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3112*4882a593Smuzhiyun  * crash between allocating the inode and linking it into the rename transaction
3113*4882a593Smuzhiyun  * recovery will free the inode and we won't leak it.
3114*4882a593Smuzhiyun  */
3115*4882a593Smuzhiyun static int
xfs_rename_alloc_whiteout(struct xfs_inode * dp,struct xfs_inode ** wip)3116*4882a593Smuzhiyun xfs_rename_alloc_whiteout(
3117*4882a593Smuzhiyun 	struct xfs_inode	*dp,
3118*4882a593Smuzhiyun 	struct xfs_inode	**wip)
3119*4882a593Smuzhiyun {
3120*4882a593Smuzhiyun 	struct xfs_inode	*tmpfile;
3121*4882a593Smuzhiyun 	int			error;
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
3124*4882a593Smuzhiyun 	if (error)
3125*4882a593Smuzhiyun 		return error;
3126*4882a593Smuzhiyun 
3127*4882a593Smuzhiyun 	/*
3128*4882a593Smuzhiyun 	 * Prepare the tmpfile inode as if it were created through the VFS.
3129*4882a593Smuzhiyun 	 * Complete the inode setup and flag it as linkable.  nlink is already
3130*4882a593Smuzhiyun 	 * zero, so we can skip the drop_nlink.
3131*4882a593Smuzhiyun 	 */
3132*4882a593Smuzhiyun 	xfs_setup_iops(tmpfile);
3133*4882a593Smuzhiyun 	xfs_finish_inode_setup(tmpfile);
3134*4882a593Smuzhiyun 	VFS_I(tmpfile)->i_state |= I_LINKABLE;
3135*4882a593Smuzhiyun 
3136*4882a593Smuzhiyun 	*wip = tmpfile;
3137*4882a593Smuzhiyun 	return 0;
3138*4882a593Smuzhiyun }
3139*4882a593Smuzhiyun 
3140*4882a593Smuzhiyun /*
3141*4882a593Smuzhiyun  * xfs_rename
3142*4882a593Smuzhiyun  */
3143*4882a593Smuzhiyun int
xfs_rename(struct xfs_inode * src_dp,struct xfs_name * src_name,struct xfs_inode * src_ip,struct xfs_inode * target_dp,struct xfs_name * target_name,struct xfs_inode * target_ip,unsigned int flags)3144*4882a593Smuzhiyun xfs_rename(
3145*4882a593Smuzhiyun 	struct xfs_inode	*src_dp,
3146*4882a593Smuzhiyun 	struct xfs_name		*src_name,
3147*4882a593Smuzhiyun 	struct xfs_inode	*src_ip,
3148*4882a593Smuzhiyun 	struct xfs_inode	*target_dp,
3149*4882a593Smuzhiyun 	struct xfs_name		*target_name,
3150*4882a593Smuzhiyun 	struct xfs_inode	*target_ip,
3151*4882a593Smuzhiyun 	unsigned int		flags)
3152*4882a593Smuzhiyun {
3153*4882a593Smuzhiyun 	struct xfs_mount	*mp = src_dp->i_mount;
3154*4882a593Smuzhiyun 	struct xfs_trans	*tp;
3155*4882a593Smuzhiyun 	struct xfs_inode	*wip = NULL;		/* whiteout inode */
3156*4882a593Smuzhiyun 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
3157*4882a593Smuzhiyun 	int			i;
3158*4882a593Smuzhiyun 	int			num_inodes = __XFS_SORT_INODES;
3159*4882a593Smuzhiyun 	bool			new_parent = (src_dp != target_dp);
3160*4882a593Smuzhiyun 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3161*4882a593Smuzhiyun 	int			spaceres;
3162*4882a593Smuzhiyun 	int			error;
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3165*4882a593Smuzhiyun 
3166*4882a593Smuzhiyun 	if ((flags & RENAME_EXCHANGE) && !target_ip)
3167*4882a593Smuzhiyun 		return -EINVAL;
3168*4882a593Smuzhiyun 
3169*4882a593Smuzhiyun 	/*
3170*4882a593Smuzhiyun 	 * If we are doing a whiteout operation, allocate the whiteout inode
3171*4882a593Smuzhiyun 	 * we will be placing at the target and ensure the type is set
3172*4882a593Smuzhiyun 	 * appropriately.
3173*4882a593Smuzhiyun 	 */
3174*4882a593Smuzhiyun 	if (flags & RENAME_WHITEOUT) {
3175*4882a593Smuzhiyun 		error = xfs_rename_alloc_whiteout(target_dp, &wip);
3176*4882a593Smuzhiyun 		if (error)
3177*4882a593Smuzhiyun 			return error;
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun 		/* setup target dirent info as whiteout */
3180*4882a593Smuzhiyun 		src_name->type = XFS_DIR3_FT_CHRDEV;
3181*4882a593Smuzhiyun 	}
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3184*4882a593Smuzhiyun 				inodes, &num_inodes);
3185*4882a593Smuzhiyun 
3186*4882a593Smuzhiyun 	spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3187*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3188*4882a593Smuzhiyun 	if (error == -ENOSPC) {
3189*4882a593Smuzhiyun 		spaceres = 0;
3190*4882a593Smuzhiyun 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3191*4882a593Smuzhiyun 				&tp);
3192*4882a593Smuzhiyun 	}
3193*4882a593Smuzhiyun 	if (error)
3194*4882a593Smuzhiyun 		goto out_release_wip;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	/*
3197*4882a593Smuzhiyun 	 * Attach the dquots to the inodes
3198*4882a593Smuzhiyun 	 */
3199*4882a593Smuzhiyun 	error = xfs_qm_vop_rename_dqattach(inodes);
3200*4882a593Smuzhiyun 	if (error)
3201*4882a593Smuzhiyun 		goto out_trans_cancel;
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 	/*
3204*4882a593Smuzhiyun 	 * Lock all the participating inodes. Depending upon whether
3205*4882a593Smuzhiyun 	 * the target_name exists in the target directory, and
3206*4882a593Smuzhiyun 	 * whether the target directory is the same as the source
3207*4882a593Smuzhiyun 	 * directory, we can lock from 2 to 4 inodes.
3208*4882a593Smuzhiyun 	 */
3209*4882a593Smuzhiyun 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3210*4882a593Smuzhiyun 
3211*4882a593Smuzhiyun 	/*
3212*4882a593Smuzhiyun 	 * Join all the inodes to the transaction. From this point on,
3213*4882a593Smuzhiyun 	 * we can rely on either trans_commit or trans_cancel to unlock
3214*4882a593Smuzhiyun 	 * them.
3215*4882a593Smuzhiyun 	 */
3216*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3217*4882a593Smuzhiyun 	if (new_parent)
3218*4882a593Smuzhiyun 		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3219*4882a593Smuzhiyun 	xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3220*4882a593Smuzhiyun 	if (target_ip)
3221*4882a593Smuzhiyun 		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3222*4882a593Smuzhiyun 	if (wip)
3223*4882a593Smuzhiyun 		xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 	/*
3226*4882a593Smuzhiyun 	 * If we are using project inheritance, we only allow renames
3227*4882a593Smuzhiyun 	 * into our tree when the project IDs are the same; else the
3228*4882a593Smuzhiyun 	 * tree quota mechanism would be circumvented.
3229*4882a593Smuzhiyun 	 */
3230*4882a593Smuzhiyun 	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3231*4882a593Smuzhiyun 		     target_dp->i_d.di_projid != src_ip->i_d.di_projid)) {
3232*4882a593Smuzhiyun 		error = -EXDEV;
3233*4882a593Smuzhiyun 		goto out_trans_cancel;
3234*4882a593Smuzhiyun 	}
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 	/* RENAME_EXCHANGE is unique from here on. */
3237*4882a593Smuzhiyun 	if (flags & RENAME_EXCHANGE)
3238*4882a593Smuzhiyun 		return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3239*4882a593Smuzhiyun 					target_dp, target_name, target_ip,
3240*4882a593Smuzhiyun 					spaceres);
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	/*
3243*4882a593Smuzhiyun 	 * Check for expected errors before we dirty the transaction
3244*4882a593Smuzhiyun 	 * so we can return an error without a transaction abort.
3245*4882a593Smuzhiyun 	 */
3246*4882a593Smuzhiyun 	if (target_ip == NULL) {
3247*4882a593Smuzhiyun 		/*
3248*4882a593Smuzhiyun 		 * If there's no space reservation, check the entry will
3249*4882a593Smuzhiyun 		 * fit before actually inserting it.
3250*4882a593Smuzhiyun 		 */
3251*4882a593Smuzhiyun 		if (!spaceres) {
3252*4882a593Smuzhiyun 			error = xfs_dir_canenter(tp, target_dp, target_name);
3253*4882a593Smuzhiyun 			if (error)
3254*4882a593Smuzhiyun 				goto out_trans_cancel;
3255*4882a593Smuzhiyun 		}
3256*4882a593Smuzhiyun 	} else {
3257*4882a593Smuzhiyun 		/*
3258*4882a593Smuzhiyun 		 * If target exists and it's a directory, check that whether
3259*4882a593Smuzhiyun 		 * it can be destroyed.
3260*4882a593Smuzhiyun 		 */
3261*4882a593Smuzhiyun 		if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3262*4882a593Smuzhiyun 		    (!xfs_dir_isempty(target_ip) ||
3263*4882a593Smuzhiyun 		     (VFS_I(target_ip)->i_nlink > 2))) {
3264*4882a593Smuzhiyun 			error = -EEXIST;
3265*4882a593Smuzhiyun 			goto out_trans_cancel;
3266*4882a593Smuzhiyun 		}
3267*4882a593Smuzhiyun 	}
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 	/*
3270*4882a593Smuzhiyun 	 * Lock the AGI buffers we need to handle bumping the nlink of the
3271*4882a593Smuzhiyun 	 * whiteout inode off the unlinked list and to handle dropping the
3272*4882a593Smuzhiyun 	 * nlink of the target inode.  Per locking order rules, do this in
3273*4882a593Smuzhiyun 	 * increasing AG order and before directory block allocation tries to
3274*4882a593Smuzhiyun 	 * grab AGFs because we grab AGIs before AGFs.
3275*4882a593Smuzhiyun 	 *
3276*4882a593Smuzhiyun 	 * The (vfs) caller must ensure that if src is a directory then
3277*4882a593Smuzhiyun 	 * target_ip is either null or an empty directory.
3278*4882a593Smuzhiyun 	 */
3279*4882a593Smuzhiyun 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3280*4882a593Smuzhiyun 		if (inodes[i] == wip ||
3281*4882a593Smuzhiyun 		    (inodes[i] == target_ip &&
3282*4882a593Smuzhiyun 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3283*4882a593Smuzhiyun 			struct xfs_buf	*bp;
3284*4882a593Smuzhiyun 			xfs_agnumber_t	agno;
3285*4882a593Smuzhiyun 
3286*4882a593Smuzhiyun 			agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
3287*4882a593Smuzhiyun 			error = xfs_read_agi(mp, tp, agno, &bp);
3288*4882a593Smuzhiyun 			if (error)
3289*4882a593Smuzhiyun 				goto out_trans_cancel;
3290*4882a593Smuzhiyun 		}
3291*4882a593Smuzhiyun 	}
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun 	/*
3294*4882a593Smuzhiyun 	 * Directory entry creation below may acquire the AGF. Remove
3295*4882a593Smuzhiyun 	 * the whiteout from the unlinked list first to preserve correct
3296*4882a593Smuzhiyun 	 * AGI/AGF locking order. This dirties the transaction so failures
3297*4882a593Smuzhiyun 	 * after this point will abort and log recovery will clean up the
3298*4882a593Smuzhiyun 	 * mess.
3299*4882a593Smuzhiyun 	 *
3300*4882a593Smuzhiyun 	 * For whiteouts, we need to bump the link count on the whiteout
3301*4882a593Smuzhiyun 	 * inode. After this point, we have a real link, clear the tmpfile
3302*4882a593Smuzhiyun 	 * state flag from the inode so it doesn't accidentally get misused
3303*4882a593Smuzhiyun 	 * in future.
3304*4882a593Smuzhiyun 	 */
3305*4882a593Smuzhiyun 	if (wip) {
3306*4882a593Smuzhiyun 		ASSERT(VFS_I(wip)->i_nlink == 0);
3307*4882a593Smuzhiyun 		error = xfs_iunlink_remove(tp, wip);
3308*4882a593Smuzhiyun 		if (error)
3309*4882a593Smuzhiyun 			goto out_trans_cancel;
3310*4882a593Smuzhiyun 
3311*4882a593Smuzhiyun 		xfs_bumplink(tp, wip);
3312*4882a593Smuzhiyun 		VFS_I(wip)->i_state &= ~I_LINKABLE;
3313*4882a593Smuzhiyun 	}
3314*4882a593Smuzhiyun 
3315*4882a593Smuzhiyun 	/*
3316*4882a593Smuzhiyun 	 * Set up the target.
3317*4882a593Smuzhiyun 	 */
3318*4882a593Smuzhiyun 	if (target_ip == NULL) {
3319*4882a593Smuzhiyun 		/*
3320*4882a593Smuzhiyun 		 * If target does not exist and the rename crosses
3321*4882a593Smuzhiyun 		 * directories, adjust the target directory link count
3322*4882a593Smuzhiyun 		 * to account for the ".." reference from the new entry.
3323*4882a593Smuzhiyun 		 */
3324*4882a593Smuzhiyun 		error = xfs_dir_createname(tp, target_dp, target_name,
3325*4882a593Smuzhiyun 					   src_ip->i_ino, spaceres);
3326*4882a593Smuzhiyun 		if (error)
3327*4882a593Smuzhiyun 			goto out_trans_cancel;
3328*4882a593Smuzhiyun 
3329*4882a593Smuzhiyun 		xfs_trans_ichgtime(tp, target_dp,
3330*4882a593Smuzhiyun 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3331*4882a593Smuzhiyun 
3332*4882a593Smuzhiyun 		if (new_parent && src_is_directory) {
3333*4882a593Smuzhiyun 			xfs_bumplink(tp, target_dp);
3334*4882a593Smuzhiyun 		}
3335*4882a593Smuzhiyun 	} else { /* target_ip != NULL */
3336*4882a593Smuzhiyun 		/*
3337*4882a593Smuzhiyun 		 * Link the source inode under the target name.
3338*4882a593Smuzhiyun 		 * If the source inode is a directory and we are moving
3339*4882a593Smuzhiyun 		 * it across directories, its ".." entry will be
3340*4882a593Smuzhiyun 		 * inconsistent until we replace that down below.
3341*4882a593Smuzhiyun 		 *
3342*4882a593Smuzhiyun 		 * In case there is already an entry with the same
3343*4882a593Smuzhiyun 		 * name at the destination directory, remove it first.
3344*4882a593Smuzhiyun 		 */
3345*4882a593Smuzhiyun 		error = xfs_dir_replace(tp, target_dp, target_name,
3346*4882a593Smuzhiyun 					src_ip->i_ino, spaceres);
3347*4882a593Smuzhiyun 		if (error)
3348*4882a593Smuzhiyun 			goto out_trans_cancel;
3349*4882a593Smuzhiyun 
3350*4882a593Smuzhiyun 		xfs_trans_ichgtime(tp, target_dp,
3351*4882a593Smuzhiyun 					XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3352*4882a593Smuzhiyun 
3353*4882a593Smuzhiyun 		/*
3354*4882a593Smuzhiyun 		 * Decrement the link count on the target since the target
3355*4882a593Smuzhiyun 		 * dir no longer points to it.
3356*4882a593Smuzhiyun 		 */
3357*4882a593Smuzhiyun 		error = xfs_droplink(tp, target_ip);
3358*4882a593Smuzhiyun 		if (error)
3359*4882a593Smuzhiyun 			goto out_trans_cancel;
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 		if (src_is_directory) {
3362*4882a593Smuzhiyun 			/*
3363*4882a593Smuzhiyun 			 * Drop the link from the old "." entry.
3364*4882a593Smuzhiyun 			 */
3365*4882a593Smuzhiyun 			error = xfs_droplink(tp, target_ip);
3366*4882a593Smuzhiyun 			if (error)
3367*4882a593Smuzhiyun 				goto out_trans_cancel;
3368*4882a593Smuzhiyun 		}
3369*4882a593Smuzhiyun 	} /* target_ip != NULL */
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 	/*
3372*4882a593Smuzhiyun 	 * Remove the source.
3373*4882a593Smuzhiyun 	 */
3374*4882a593Smuzhiyun 	if (new_parent && src_is_directory) {
3375*4882a593Smuzhiyun 		/*
3376*4882a593Smuzhiyun 		 * Rewrite the ".." entry to point to the new
3377*4882a593Smuzhiyun 		 * directory.
3378*4882a593Smuzhiyun 		 */
3379*4882a593Smuzhiyun 		error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3380*4882a593Smuzhiyun 					target_dp->i_ino, spaceres);
3381*4882a593Smuzhiyun 		ASSERT(error != -EEXIST);
3382*4882a593Smuzhiyun 		if (error)
3383*4882a593Smuzhiyun 			goto out_trans_cancel;
3384*4882a593Smuzhiyun 	}
3385*4882a593Smuzhiyun 
3386*4882a593Smuzhiyun 	/*
3387*4882a593Smuzhiyun 	 * We always want to hit the ctime on the source inode.
3388*4882a593Smuzhiyun 	 *
3389*4882a593Smuzhiyun 	 * This isn't strictly required by the standards since the source
3390*4882a593Smuzhiyun 	 * inode isn't really being changed, but old unix file systems did
3391*4882a593Smuzhiyun 	 * it and some incremental backup programs won't work without it.
3392*4882a593Smuzhiyun 	 */
3393*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3394*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3395*4882a593Smuzhiyun 
3396*4882a593Smuzhiyun 	/*
3397*4882a593Smuzhiyun 	 * Adjust the link count on src_dp.  This is necessary when
3398*4882a593Smuzhiyun 	 * renaming a directory, either within one parent when
3399*4882a593Smuzhiyun 	 * the target existed, or across two parent directories.
3400*4882a593Smuzhiyun 	 */
3401*4882a593Smuzhiyun 	if (src_is_directory && (new_parent || target_ip != NULL)) {
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 		/*
3404*4882a593Smuzhiyun 		 * Decrement link count on src_directory since the
3405*4882a593Smuzhiyun 		 * entry that's moved no longer points to it.
3406*4882a593Smuzhiyun 		 */
3407*4882a593Smuzhiyun 		error = xfs_droplink(tp, src_dp);
3408*4882a593Smuzhiyun 		if (error)
3409*4882a593Smuzhiyun 			goto out_trans_cancel;
3410*4882a593Smuzhiyun 	}
3411*4882a593Smuzhiyun 
3412*4882a593Smuzhiyun 	/*
3413*4882a593Smuzhiyun 	 * For whiteouts, we only need to update the source dirent with the
3414*4882a593Smuzhiyun 	 * inode number of the whiteout inode rather than removing it
3415*4882a593Smuzhiyun 	 * altogether.
3416*4882a593Smuzhiyun 	 */
3417*4882a593Smuzhiyun 	if (wip) {
3418*4882a593Smuzhiyun 		error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3419*4882a593Smuzhiyun 					spaceres);
3420*4882a593Smuzhiyun 	} else
3421*4882a593Smuzhiyun 		error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3422*4882a593Smuzhiyun 					   spaceres);
3423*4882a593Smuzhiyun 	if (error)
3424*4882a593Smuzhiyun 		goto out_trans_cancel;
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun 	xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3427*4882a593Smuzhiyun 	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3428*4882a593Smuzhiyun 	if (new_parent)
3429*4882a593Smuzhiyun 		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3430*4882a593Smuzhiyun 
3431*4882a593Smuzhiyun 	error = xfs_finish_rename(tp);
3432*4882a593Smuzhiyun 	if (wip)
3433*4882a593Smuzhiyun 		xfs_irele(wip);
3434*4882a593Smuzhiyun 	return error;
3435*4882a593Smuzhiyun 
3436*4882a593Smuzhiyun out_trans_cancel:
3437*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
3438*4882a593Smuzhiyun out_release_wip:
3439*4882a593Smuzhiyun 	if (wip)
3440*4882a593Smuzhiyun 		xfs_irele(wip);
3441*4882a593Smuzhiyun 	return error;
3442*4882a593Smuzhiyun }
3443*4882a593Smuzhiyun 
3444*4882a593Smuzhiyun static int
xfs_iflush(struct xfs_inode * ip,struct xfs_buf * bp)3445*4882a593Smuzhiyun xfs_iflush(
3446*4882a593Smuzhiyun 	struct xfs_inode	*ip,
3447*4882a593Smuzhiyun 	struct xfs_buf		*bp)
3448*4882a593Smuzhiyun {
3449*4882a593Smuzhiyun 	struct xfs_inode_log_item *iip = ip->i_itemp;
3450*4882a593Smuzhiyun 	struct xfs_dinode	*dip;
3451*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
3452*4882a593Smuzhiyun 	int			error;
3453*4882a593Smuzhiyun 
3454*4882a593Smuzhiyun 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3455*4882a593Smuzhiyun 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3456*4882a593Smuzhiyun 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3457*4882a593Smuzhiyun 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3458*4882a593Smuzhiyun 	ASSERT(iip->ili_item.li_buf == bp);
3459*4882a593Smuzhiyun 
3460*4882a593Smuzhiyun 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3461*4882a593Smuzhiyun 
3462*4882a593Smuzhiyun 	/*
3463*4882a593Smuzhiyun 	 * We don't flush the inode if any of the following checks fail, but we
3464*4882a593Smuzhiyun 	 * do still update the log item and attach to the backing buffer as if
3465*4882a593Smuzhiyun 	 * the flush happened. This is a formality to facilitate predictable
3466*4882a593Smuzhiyun 	 * error handling as the caller will shutdown and fail the buffer.
3467*4882a593Smuzhiyun 	 */
3468*4882a593Smuzhiyun 	error = -EFSCORRUPTED;
3469*4882a593Smuzhiyun 	if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3470*4882a593Smuzhiyun 			       mp, XFS_ERRTAG_IFLUSH_1)) {
3471*4882a593Smuzhiyun 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3472*4882a593Smuzhiyun 			"%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3473*4882a593Smuzhiyun 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3474*4882a593Smuzhiyun 		goto flush_out;
3475*4882a593Smuzhiyun 	}
3476*4882a593Smuzhiyun 	if (S_ISREG(VFS_I(ip)->i_mode)) {
3477*4882a593Smuzhiyun 		if (XFS_TEST_ERROR(
3478*4882a593Smuzhiyun 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3479*4882a593Smuzhiyun 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3480*4882a593Smuzhiyun 		    mp, XFS_ERRTAG_IFLUSH_3)) {
3481*4882a593Smuzhiyun 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3482*4882a593Smuzhiyun 				"%s: Bad regular inode %Lu, ptr "PTR_FMT,
3483*4882a593Smuzhiyun 				__func__, ip->i_ino, ip);
3484*4882a593Smuzhiyun 			goto flush_out;
3485*4882a593Smuzhiyun 		}
3486*4882a593Smuzhiyun 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3487*4882a593Smuzhiyun 		if (XFS_TEST_ERROR(
3488*4882a593Smuzhiyun 		    ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3489*4882a593Smuzhiyun 		    ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3490*4882a593Smuzhiyun 		    ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3491*4882a593Smuzhiyun 		    mp, XFS_ERRTAG_IFLUSH_4)) {
3492*4882a593Smuzhiyun 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3493*4882a593Smuzhiyun 				"%s: Bad directory inode %Lu, ptr "PTR_FMT,
3494*4882a593Smuzhiyun 				__func__, ip->i_ino, ip);
3495*4882a593Smuzhiyun 			goto flush_out;
3496*4882a593Smuzhiyun 		}
3497*4882a593Smuzhiyun 	}
3498*4882a593Smuzhiyun 	if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3499*4882a593Smuzhiyun 				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3500*4882a593Smuzhiyun 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3501*4882a593Smuzhiyun 			"%s: detected corrupt incore inode %Lu, "
3502*4882a593Smuzhiyun 			"total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3503*4882a593Smuzhiyun 			__func__, ip->i_ino,
3504*4882a593Smuzhiyun 			ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3505*4882a593Smuzhiyun 			ip->i_d.di_nblocks, ip);
3506*4882a593Smuzhiyun 		goto flush_out;
3507*4882a593Smuzhiyun 	}
3508*4882a593Smuzhiyun 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3509*4882a593Smuzhiyun 				mp, XFS_ERRTAG_IFLUSH_6)) {
3510*4882a593Smuzhiyun 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3511*4882a593Smuzhiyun 			"%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3512*4882a593Smuzhiyun 			__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
3513*4882a593Smuzhiyun 		goto flush_out;
3514*4882a593Smuzhiyun 	}
3515*4882a593Smuzhiyun 
3516*4882a593Smuzhiyun 	/*
3517*4882a593Smuzhiyun 	 * Inode item log recovery for v2 inodes are dependent on the
3518*4882a593Smuzhiyun 	 * di_flushiter count for correct sequencing. We bump the flush
3519*4882a593Smuzhiyun 	 * iteration count so we can detect flushes which postdate a log record
3520*4882a593Smuzhiyun 	 * during recovery. This is redundant as we now log every change and
3521*4882a593Smuzhiyun 	 * hence this can't happen but we need to still do it to ensure
3522*4882a593Smuzhiyun 	 * backwards compatibility with old kernels that predate logging all
3523*4882a593Smuzhiyun 	 * inode changes.
3524*4882a593Smuzhiyun 	 */
3525*4882a593Smuzhiyun 	if (!xfs_sb_version_has_v3inode(&mp->m_sb))
3526*4882a593Smuzhiyun 		ip->i_d.di_flushiter++;
3527*4882a593Smuzhiyun 
3528*4882a593Smuzhiyun 	/*
3529*4882a593Smuzhiyun 	 * If there are inline format data / attr forks attached to this inode,
3530*4882a593Smuzhiyun 	 * make sure they are not corrupt.
3531*4882a593Smuzhiyun 	 */
3532*4882a593Smuzhiyun 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3533*4882a593Smuzhiyun 	    xfs_ifork_verify_local_data(ip))
3534*4882a593Smuzhiyun 		goto flush_out;
3535*4882a593Smuzhiyun 	if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3536*4882a593Smuzhiyun 	    xfs_ifork_verify_local_attr(ip))
3537*4882a593Smuzhiyun 		goto flush_out;
3538*4882a593Smuzhiyun 
3539*4882a593Smuzhiyun 	/*
3540*4882a593Smuzhiyun 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
3541*4882a593Smuzhiyun 	 * copy out the core of the inode, because if the inode is dirty at all
3542*4882a593Smuzhiyun 	 * the core must be.
3543*4882a593Smuzhiyun 	 */
3544*4882a593Smuzhiyun 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3547*4882a593Smuzhiyun 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3548*4882a593Smuzhiyun 		ip->i_d.di_flushiter = 0;
3549*4882a593Smuzhiyun 
3550*4882a593Smuzhiyun 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3551*4882a593Smuzhiyun 	if (XFS_IFORK_Q(ip))
3552*4882a593Smuzhiyun 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3553*4882a593Smuzhiyun 
3554*4882a593Smuzhiyun 	/*
3555*4882a593Smuzhiyun 	 * We've recorded everything logged in the inode, so we'd like to clear
3556*4882a593Smuzhiyun 	 * the ili_fields bits so we don't log and flush things unnecessarily.
3557*4882a593Smuzhiyun 	 * However, we can't stop logging all this information until the data
3558*4882a593Smuzhiyun 	 * we've copied into the disk buffer is written to disk.  If we did we
3559*4882a593Smuzhiyun 	 * might overwrite the copy of the inode in the log with all the data
3560*4882a593Smuzhiyun 	 * after re-logging only part of it, and in the face of a crash we
3561*4882a593Smuzhiyun 	 * wouldn't have all the data we need to recover.
3562*4882a593Smuzhiyun 	 *
3563*4882a593Smuzhiyun 	 * What we do is move the bits to the ili_last_fields field.  When
3564*4882a593Smuzhiyun 	 * logging the inode, these bits are moved back to the ili_fields field.
3565*4882a593Smuzhiyun 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3566*4882a593Smuzhiyun 	 * we know that the information those bits represent is permanently on
3567*4882a593Smuzhiyun 	 * disk.  As long as the flush completes before the inode is logged
3568*4882a593Smuzhiyun 	 * again, then both ili_fields and ili_last_fields will be cleared.
3569*4882a593Smuzhiyun 	 */
3570*4882a593Smuzhiyun 	error = 0;
3571*4882a593Smuzhiyun flush_out:
3572*4882a593Smuzhiyun 	spin_lock(&iip->ili_lock);
3573*4882a593Smuzhiyun 	iip->ili_last_fields = iip->ili_fields;
3574*4882a593Smuzhiyun 	iip->ili_fields = 0;
3575*4882a593Smuzhiyun 	iip->ili_fsync_fields = 0;
3576*4882a593Smuzhiyun 	spin_unlock(&iip->ili_lock);
3577*4882a593Smuzhiyun 
3578*4882a593Smuzhiyun 	/*
3579*4882a593Smuzhiyun 	 * Store the current LSN of the inode so that we can tell whether the
3580*4882a593Smuzhiyun 	 * item has moved in the AIL from xfs_buf_inode_iodone().
3581*4882a593Smuzhiyun 	 */
3582*4882a593Smuzhiyun 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3583*4882a593Smuzhiyun 				&iip->ili_item.li_lsn);
3584*4882a593Smuzhiyun 
3585*4882a593Smuzhiyun 	/* generate the checksum. */
3586*4882a593Smuzhiyun 	xfs_dinode_calc_crc(mp, dip);
3587*4882a593Smuzhiyun 	return error;
3588*4882a593Smuzhiyun }
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun /*
3591*4882a593Smuzhiyun  * Non-blocking flush of dirty inode metadata into the backing buffer.
3592*4882a593Smuzhiyun  *
3593*4882a593Smuzhiyun  * The caller must have a reference to the inode and hold the cluster buffer
3594*4882a593Smuzhiyun  * locked. The function will walk across all the inodes on the cluster buffer it
3595*4882a593Smuzhiyun  * can find and lock without blocking, and flush them to the cluster buffer.
3596*4882a593Smuzhiyun  *
3597*4882a593Smuzhiyun  * On successful flushing of at least one inode, the caller must write out the
3598*4882a593Smuzhiyun  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3599*4882a593Smuzhiyun  * the caller needs to release the buffer. On failure, the filesystem will be
3600*4882a593Smuzhiyun  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3601*4882a593Smuzhiyun  * will be returned.
3602*4882a593Smuzhiyun  */
3603*4882a593Smuzhiyun int
xfs_iflush_cluster(struct xfs_buf * bp)3604*4882a593Smuzhiyun xfs_iflush_cluster(
3605*4882a593Smuzhiyun 	struct xfs_buf		*bp)
3606*4882a593Smuzhiyun {
3607*4882a593Smuzhiyun 	struct xfs_mount	*mp = bp->b_mount;
3608*4882a593Smuzhiyun 	struct xfs_log_item	*lip, *n;
3609*4882a593Smuzhiyun 	struct xfs_inode	*ip;
3610*4882a593Smuzhiyun 	struct xfs_inode_log_item *iip;
3611*4882a593Smuzhiyun 	int			clcount = 0;
3612*4882a593Smuzhiyun 	int			error = 0;
3613*4882a593Smuzhiyun 
3614*4882a593Smuzhiyun 	/*
3615*4882a593Smuzhiyun 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3616*4882a593Smuzhiyun 	 * can remove itself from the list.
3617*4882a593Smuzhiyun 	 */
3618*4882a593Smuzhiyun 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3619*4882a593Smuzhiyun 		iip = (struct xfs_inode_log_item *)lip;
3620*4882a593Smuzhiyun 		ip = iip->ili_inode;
3621*4882a593Smuzhiyun 
3622*4882a593Smuzhiyun 		/*
3623*4882a593Smuzhiyun 		 * Quick and dirty check to avoid locks if possible.
3624*4882a593Smuzhiyun 		 */
3625*4882a593Smuzhiyun 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3626*4882a593Smuzhiyun 			continue;
3627*4882a593Smuzhiyun 		if (xfs_ipincount(ip))
3628*4882a593Smuzhiyun 			continue;
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun 		/*
3631*4882a593Smuzhiyun 		 * The inode is still attached to the buffer, which means it is
3632*4882a593Smuzhiyun 		 * dirty but reclaim might try to grab it. Check carefully for
3633*4882a593Smuzhiyun 		 * that, and grab the ilock while still holding the i_flags_lock
3634*4882a593Smuzhiyun 		 * to guarantee reclaim will not be able to reclaim this inode
3635*4882a593Smuzhiyun 		 * once we drop the i_flags_lock.
3636*4882a593Smuzhiyun 		 */
3637*4882a593Smuzhiyun 		spin_lock(&ip->i_flags_lock);
3638*4882a593Smuzhiyun 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3639*4882a593Smuzhiyun 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3640*4882a593Smuzhiyun 			spin_unlock(&ip->i_flags_lock);
3641*4882a593Smuzhiyun 			continue;
3642*4882a593Smuzhiyun 		}
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun 		/*
3645*4882a593Smuzhiyun 		 * ILOCK will pin the inode against reclaim and prevent
3646*4882a593Smuzhiyun 		 * concurrent transactions modifying the inode while we are
3647*4882a593Smuzhiyun 		 * flushing the inode. If we get the lock, set the flushing
3648*4882a593Smuzhiyun 		 * state before we drop the i_flags_lock.
3649*4882a593Smuzhiyun 		 */
3650*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3651*4882a593Smuzhiyun 			spin_unlock(&ip->i_flags_lock);
3652*4882a593Smuzhiyun 			continue;
3653*4882a593Smuzhiyun 		}
3654*4882a593Smuzhiyun 		__xfs_iflags_set(ip, XFS_IFLUSHING);
3655*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 		/*
3658*4882a593Smuzhiyun 		 * Abort flushing this inode if we are shut down because the
3659*4882a593Smuzhiyun 		 * inode may not currently be in the AIL. This can occur when
3660*4882a593Smuzhiyun 		 * log I/O failure unpins the inode without inserting into the
3661*4882a593Smuzhiyun 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
3662*4882a593Smuzhiyun 		 * that otherwise looks like it should be flushed.
3663*4882a593Smuzhiyun 		 */
3664*4882a593Smuzhiyun 		if (XFS_FORCED_SHUTDOWN(mp)) {
3665*4882a593Smuzhiyun 			xfs_iunpin_wait(ip);
3666*4882a593Smuzhiyun 			xfs_iflush_abort(ip);
3667*4882a593Smuzhiyun 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3668*4882a593Smuzhiyun 			error = -EIO;
3669*4882a593Smuzhiyun 			continue;
3670*4882a593Smuzhiyun 		}
3671*4882a593Smuzhiyun 
3672*4882a593Smuzhiyun 		/* don't block waiting on a log force to unpin dirty inodes */
3673*4882a593Smuzhiyun 		if (xfs_ipincount(ip)) {
3674*4882a593Smuzhiyun 			xfs_iflags_clear(ip, XFS_IFLUSHING);
3675*4882a593Smuzhiyun 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
3676*4882a593Smuzhiyun 			continue;
3677*4882a593Smuzhiyun 		}
3678*4882a593Smuzhiyun 
3679*4882a593Smuzhiyun 		if (!xfs_inode_clean(ip))
3680*4882a593Smuzhiyun 			error = xfs_iflush(ip, bp);
3681*4882a593Smuzhiyun 		else
3682*4882a593Smuzhiyun 			xfs_iflags_clear(ip, XFS_IFLUSHING);
3683*4882a593Smuzhiyun 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
3684*4882a593Smuzhiyun 		if (error)
3685*4882a593Smuzhiyun 			break;
3686*4882a593Smuzhiyun 		clcount++;
3687*4882a593Smuzhiyun 	}
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 	if (error) {
3690*4882a593Smuzhiyun 		bp->b_flags |= XBF_ASYNC;
3691*4882a593Smuzhiyun 		xfs_buf_ioend_fail(bp);
3692*4882a593Smuzhiyun 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3693*4882a593Smuzhiyun 		return error;
3694*4882a593Smuzhiyun 	}
3695*4882a593Smuzhiyun 
3696*4882a593Smuzhiyun 	if (!clcount)
3697*4882a593Smuzhiyun 		return -EAGAIN;
3698*4882a593Smuzhiyun 
3699*4882a593Smuzhiyun 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
3700*4882a593Smuzhiyun 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3701*4882a593Smuzhiyun 	return 0;
3702*4882a593Smuzhiyun 
3703*4882a593Smuzhiyun }
3704*4882a593Smuzhiyun 
3705*4882a593Smuzhiyun /* Release an inode. */
3706*4882a593Smuzhiyun void
xfs_irele(struct xfs_inode * ip)3707*4882a593Smuzhiyun xfs_irele(
3708*4882a593Smuzhiyun 	struct xfs_inode	*ip)
3709*4882a593Smuzhiyun {
3710*4882a593Smuzhiyun 	trace_xfs_irele(ip, _RET_IP_);
3711*4882a593Smuzhiyun 	iput(VFS_I(ip));
3712*4882a593Smuzhiyun }
3713*4882a593Smuzhiyun 
3714*4882a593Smuzhiyun /*
3715*4882a593Smuzhiyun  * Ensure all commited transactions touching the inode are written to the log.
3716*4882a593Smuzhiyun  */
3717*4882a593Smuzhiyun int
xfs_log_force_inode(struct xfs_inode * ip)3718*4882a593Smuzhiyun xfs_log_force_inode(
3719*4882a593Smuzhiyun 	struct xfs_inode	*ip)
3720*4882a593Smuzhiyun {
3721*4882a593Smuzhiyun 	xfs_csn_t		seq = 0;
3722*4882a593Smuzhiyun 
3723*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_ILOCK_SHARED);
3724*4882a593Smuzhiyun 	if (xfs_ipincount(ip))
3725*4882a593Smuzhiyun 		seq = ip->i_itemp->ili_commit_seq;
3726*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
3727*4882a593Smuzhiyun 
3728*4882a593Smuzhiyun 	if (!seq)
3729*4882a593Smuzhiyun 		return 0;
3730*4882a593Smuzhiyun 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3731*4882a593Smuzhiyun }
3732*4882a593Smuzhiyun 
3733*4882a593Smuzhiyun /*
3734*4882a593Smuzhiyun  * Grab the exclusive iolock for a data copy from src to dest, making sure to
3735*4882a593Smuzhiyun  * abide vfs locking order (lowest pointer value goes first) and breaking the
3736*4882a593Smuzhiyun  * layout leases before proceeding.  The loop is needed because we cannot call
3737*4882a593Smuzhiyun  * the blocking break_layout() with the iolocks held, and therefore have to
3738*4882a593Smuzhiyun  * back out both locks.
3739*4882a593Smuzhiyun  */
3740*4882a593Smuzhiyun static int
xfs_iolock_two_inodes_and_break_layout(struct inode * src,struct inode * dest)3741*4882a593Smuzhiyun xfs_iolock_two_inodes_and_break_layout(
3742*4882a593Smuzhiyun 	struct inode		*src,
3743*4882a593Smuzhiyun 	struct inode		*dest)
3744*4882a593Smuzhiyun {
3745*4882a593Smuzhiyun 	int			error;
3746*4882a593Smuzhiyun 
3747*4882a593Smuzhiyun 	if (src > dest)
3748*4882a593Smuzhiyun 		swap(src, dest);
3749*4882a593Smuzhiyun 
3750*4882a593Smuzhiyun retry:
3751*4882a593Smuzhiyun 	/* Wait to break both inodes' layouts before we start locking. */
3752*4882a593Smuzhiyun 	error = break_layout(src, true);
3753*4882a593Smuzhiyun 	if (error)
3754*4882a593Smuzhiyun 		return error;
3755*4882a593Smuzhiyun 	if (src != dest) {
3756*4882a593Smuzhiyun 		error = break_layout(dest, true);
3757*4882a593Smuzhiyun 		if (error)
3758*4882a593Smuzhiyun 			return error;
3759*4882a593Smuzhiyun 	}
3760*4882a593Smuzhiyun 
3761*4882a593Smuzhiyun 	/* Lock one inode and make sure nobody got in and leased it. */
3762*4882a593Smuzhiyun 	inode_lock(src);
3763*4882a593Smuzhiyun 	error = break_layout(src, false);
3764*4882a593Smuzhiyun 	if (error) {
3765*4882a593Smuzhiyun 		inode_unlock(src);
3766*4882a593Smuzhiyun 		if (error == -EWOULDBLOCK)
3767*4882a593Smuzhiyun 			goto retry;
3768*4882a593Smuzhiyun 		return error;
3769*4882a593Smuzhiyun 	}
3770*4882a593Smuzhiyun 
3771*4882a593Smuzhiyun 	if (src == dest)
3772*4882a593Smuzhiyun 		return 0;
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun 	/* Lock the other inode and make sure nobody got in and leased it. */
3775*4882a593Smuzhiyun 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
3776*4882a593Smuzhiyun 	error = break_layout(dest, false);
3777*4882a593Smuzhiyun 	if (error) {
3778*4882a593Smuzhiyun 		inode_unlock(src);
3779*4882a593Smuzhiyun 		inode_unlock(dest);
3780*4882a593Smuzhiyun 		if (error == -EWOULDBLOCK)
3781*4882a593Smuzhiyun 			goto retry;
3782*4882a593Smuzhiyun 		return error;
3783*4882a593Smuzhiyun 	}
3784*4882a593Smuzhiyun 
3785*4882a593Smuzhiyun 	return 0;
3786*4882a593Smuzhiyun }
3787*4882a593Smuzhiyun 
3788*4882a593Smuzhiyun /*
3789*4882a593Smuzhiyun  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3790*4882a593Smuzhiyun  * mmap activity.
3791*4882a593Smuzhiyun  */
3792*4882a593Smuzhiyun int
xfs_ilock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)3793*4882a593Smuzhiyun xfs_ilock2_io_mmap(
3794*4882a593Smuzhiyun 	struct xfs_inode	*ip1,
3795*4882a593Smuzhiyun 	struct xfs_inode	*ip2)
3796*4882a593Smuzhiyun {
3797*4882a593Smuzhiyun 	int			ret;
3798*4882a593Smuzhiyun 
3799*4882a593Smuzhiyun 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3800*4882a593Smuzhiyun 	if (ret)
3801*4882a593Smuzhiyun 		return ret;
3802*4882a593Smuzhiyun 	if (ip1 == ip2)
3803*4882a593Smuzhiyun 		xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
3804*4882a593Smuzhiyun 	else
3805*4882a593Smuzhiyun 		xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL,
3806*4882a593Smuzhiyun 				    ip2, XFS_MMAPLOCK_EXCL);
3807*4882a593Smuzhiyun 	return 0;
3808*4882a593Smuzhiyun }
3809*4882a593Smuzhiyun 
3810*4882a593Smuzhiyun /* Unlock both inodes to allow IO and mmap activity. */
3811*4882a593Smuzhiyun void
xfs_iunlock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)3812*4882a593Smuzhiyun xfs_iunlock2_io_mmap(
3813*4882a593Smuzhiyun 	struct xfs_inode	*ip1,
3814*4882a593Smuzhiyun 	struct xfs_inode	*ip2)
3815*4882a593Smuzhiyun {
3816*4882a593Smuzhiyun 	bool			same_inode = (ip1 == ip2);
3817*4882a593Smuzhiyun 
3818*4882a593Smuzhiyun 	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
3819*4882a593Smuzhiyun 	if (!same_inode)
3820*4882a593Smuzhiyun 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
3821*4882a593Smuzhiyun 	inode_unlock(VFS_I(ip2));
3822*4882a593Smuzhiyun 	if (!same_inode)
3823*4882a593Smuzhiyun 		inode_unlock(VFS_I(ip1));
3824*4882a593Smuzhiyun }
3825