xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_icache.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_sb.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_inode.h"
15*4882a593Smuzhiyun #include "xfs_trans.h"
16*4882a593Smuzhiyun #include "xfs_trans_priv.h"
17*4882a593Smuzhiyun #include "xfs_inode_item.h"
18*4882a593Smuzhiyun #include "xfs_quota.h"
19*4882a593Smuzhiyun #include "xfs_trace.h"
20*4882a593Smuzhiyun #include "xfs_icache.h"
21*4882a593Smuzhiyun #include "xfs_bmap_util.h"
22*4882a593Smuzhiyun #include "xfs_dquot_item.h"
23*4882a593Smuzhiyun #include "xfs_dquot.h"
24*4882a593Smuzhiyun #include "xfs_reflink.h"
25*4882a593Smuzhiyun #include "xfs_ialloc.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/iversion.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Allocate and initialise an xfs_inode.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)33*4882a593Smuzhiyun xfs_inode_alloc(
34*4882a593Smuzhiyun 	struct xfs_mount	*mp,
35*4882a593Smuzhiyun 	xfs_ino_t		ino)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	struct xfs_inode	*ip;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/*
40*4882a593Smuzhiyun 	 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
41*4882a593Smuzhiyun 	 * and return NULL here on ENOMEM.
42*4882a593Smuzhiyun 	 */
43*4882a593Smuzhiyun 	ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
46*4882a593Smuzhiyun 		kmem_cache_free(xfs_inode_zone, ip);
47*4882a593Smuzhiyun 		return NULL;
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* VFS doesn't initialise i_mode or i_state! */
51*4882a593Smuzhiyun 	VFS_I(ip)->i_mode = 0;
52*4882a593Smuzhiyun 	VFS_I(ip)->i_state = 0;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	XFS_STATS_INC(mp, vn_active);
55*4882a593Smuzhiyun 	ASSERT(atomic_read(&ip->i_pincount) == 0);
56*4882a593Smuzhiyun 	ASSERT(ip->i_ino == 0);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* initialise the xfs inode */
59*4882a593Smuzhiyun 	ip->i_ino = ino;
60*4882a593Smuzhiyun 	ip->i_mount = mp;
61*4882a593Smuzhiyun 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
62*4882a593Smuzhiyun 	ip->i_afp = NULL;
63*4882a593Smuzhiyun 	ip->i_cowfp = NULL;
64*4882a593Smuzhiyun 	memset(&ip->i_df, 0, sizeof(ip->i_df));
65*4882a593Smuzhiyun 	ip->i_flags = 0;
66*4882a593Smuzhiyun 	ip->i_delayed_blks = 0;
67*4882a593Smuzhiyun 	memset(&ip->i_d, 0, sizeof(ip->i_d));
68*4882a593Smuzhiyun 	ip->i_sick = 0;
69*4882a593Smuzhiyun 	ip->i_checked = 0;
70*4882a593Smuzhiyun 	INIT_WORK(&ip->i_ioend_work, xfs_end_io);
71*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ip->i_ioend_list);
72*4882a593Smuzhiyun 	spin_lock_init(&ip->i_ioend_lock);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	return ip;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun STATIC void
xfs_inode_free_callback(struct rcu_head * head)78*4882a593Smuzhiyun xfs_inode_free_callback(
79*4882a593Smuzhiyun 	struct rcu_head		*head)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct inode		*inode = container_of(head, struct inode, i_rcu);
82*4882a593Smuzhiyun 	struct xfs_inode	*ip = XFS_I(inode);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	switch (VFS_I(ip)->i_mode & S_IFMT) {
85*4882a593Smuzhiyun 	case S_IFREG:
86*4882a593Smuzhiyun 	case S_IFDIR:
87*4882a593Smuzhiyun 	case S_IFLNK:
88*4882a593Smuzhiyun 		xfs_idestroy_fork(&ip->i_df);
89*4882a593Smuzhiyun 		break;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (ip->i_afp) {
93*4882a593Smuzhiyun 		xfs_idestroy_fork(ip->i_afp);
94*4882a593Smuzhiyun 		kmem_cache_free(xfs_ifork_zone, ip->i_afp);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 	if (ip->i_cowfp) {
97*4882a593Smuzhiyun 		xfs_idestroy_fork(ip->i_cowfp);
98*4882a593Smuzhiyun 		kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 	if (ip->i_itemp) {
101*4882a593Smuzhiyun 		ASSERT(!test_bit(XFS_LI_IN_AIL,
102*4882a593Smuzhiyun 				 &ip->i_itemp->ili_item.li_flags));
103*4882a593Smuzhiyun 		xfs_inode_item_destroy(ip);
104*4882a593Smuzhiyun 		ip->i_itemp = NULL;
105*4882a593Smuzhiyun 	}
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	kmem_cache_free(xfs_inode_zone, ip);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun static void
__xfs_inode_free(struct xfs_inode * ip)111*4882a593Smuzhiyun __xfs_inode_free(
112*4882a593Smuzhiyun 	struct xfs_inode	*ip)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	/* asserts to verify all state is correct here */
115*4882a593Smuzhiyun 	ASSERT(atomic_read(&ip->i_pincount) == 0);
116*4882a593Smuzhiyun 	ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
117*4882a593Smuzhiyun 	XFS_STATS_DEC(ip->i_mount, vn_active);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun void
xfs_inode_free(struct xfs_inode * ip)123*4882a593Smuzhiyun xfs_inode_free(
124*4882a593Smuzhiyun 	struct xfs_inode	*ip)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/*
129*4882a593Smuzhiyun 	 * Because we use RCU freeing we need to ensure the inode always
130*4882a593Smuzhiyun 	 * appears to be reclaimed with an invalid inode number when in the
131*4882a593Smuzhiyun 	 * free state. The ip->i_flags_lock provides the barrier against lookup
132*4882a593Smuzhiyun 	 * races.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
135*4882a593Smuzhiyun 	ip->i_flags = XFS_IRECLAIM;
136*4882a593Smuzhiyun 	ip->i_ino = 0;
137*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	__xfs_inode_free(ip);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun  * Queue background inode reclaim work if there are reclaimable inodes and there
144*4882a593Smuzhiyun  * isn't reclaim work already scheduled or in progress.
145*4882a593Smuzhiyun  */
146*4882a593Smuzhiyun static void
xfs_reclaim_work_queue(struct xfs_mount * mp)147*4882a593Smuzhiyun xfs_reclaim_work_queue(
148*4882a593Smuzhiyun 	struct xfs_mount        *mp)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	rcu_read_lock();
152*4882a593Smuzhiyun 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
153*4882a593Smuzhiyun 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
154*4882a593Smuzhiyun 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 	rcu_read_unlock();
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun static void
xfs_perag_set_reclaim_tag(struct xfs_perag * pag)160*4882a593Smuzhiyun xfs_perag_set_reclaim_tag(
161*4882a593Smuzhiyun 	struct xfs_perag	*pag)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	struct xfs_mount	*mp = pag->pag_mount;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	lockdep_assert_held(&pag->pag_ici_lock);
166*4882a593Smuzhiyun 	if (pag->pag_ici_reclaimable++)
167*4882a593Smuzhiyun 		return;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* propagate the reclaim tag up into the perag radix tree */
170*4882a593Smuzhiyun 	spin_lock(&mp->m_perag_lock);
171*4882a593Smuzhiyun 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
172*4882a593Smuzhiyun 			   XFS_ICI_RECLAIM_TAG);
173*4882a593Smuzhiyun 	spin_unlock(&mp->m_perag_lock);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* schedule periodic background inode reclaim */
176*4882a593Smuzhiyun 	xfs_reclaim_work_queue(mp);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun static void
xfs_perag_clear_reclaim_tag(struct xfs_perag * pag)182*4882a593Smuzhiyun xfs_perag_clear_reclaim_tag(
183*4882a593Smuzhiyun 	struct xfs_perag	*pag)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct xfs_mount	*mp = pag->pag_mount;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	lockdep_assert_held(&pag->pag_ici_lock);
188*4882a593Smuzhiyun 	if (--pag->pag_ici_reclaimable)
189*4882a593Smuzhiyun 		return;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	/* clear the reclaim tag from the perag radix tree */
192*4882a593Smuzhiyun 	spin_lock(&mp->m_perag_lock);
193*4882a593Smuzhiyun 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
194*4882a593Smuzhiyun 			     XFS_ICI_RECLAIM_TAG);
195*4882a593Smuzhiyun 	spin_unlock(&mp->m_perag_lock);
196*4882a593Smuzhiyun 	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /*
201*4882a593Smuzhiyun  * We set the inode flag atomically with the radix tree tag.
202*4882a593Smuzhiyun  * Once we get tag lookups on the radix tree, this inode flag
203*4882a593Smuzhiyun  * can go away.
204*4882a593Smuzhiyun  */
205*4882a593Smuzhiyun void
xfs_inode_set_reclaim_tag(struct xfs_inode * ip)206*4882a593Smuzhiyun xfs_inode_set_reclaim_tag(
207*4882a593Smuzhiyun 	struct xfs_inode	*ip)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
210*4882a593Smuzhiyun 	struct xfs_perag	*pag;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
213*4882a593Smuzhiyun 	spin_lock(&pag->pag_ici_lock);
214*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
217*4882a593Smuzhiyun 			   XFS_ICI_RECLAIM_TAG);
218*4882a593Smuzhiyun 	xfs_perag_set_reclaim_tag(pag);
219*4882a593Smuzhiyun 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
222*4882a593Smuzhiyun 	spin_unlock(&pag->pag_ici_lock);
223*4882a593Smuzhiyun 	xfs_perag_put(pag);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun STATIC void
xfs_inode_clear_reclaim_tag(struct xfs_perag * pag,xfs_ino_t ino)227*4882a593Smuzhiyun xfs_inode_clear_reclaim_tag(
228*4882a593Smuzhiyun 	struct xfs_perag	*pag,
229*4882a593Smuzhiyun 	xfs_ino_t		ino)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	radix_tree_tag_clear(&pag->pag_ici_root,
232*4882a593Smuzhiyun 			     XFS_INO_TO_AGINO(pag->pag_mount, ino),
233*4882a593Smuzhiyun 			     XFS_ICI_RECLAIM_TAG);
234*4882a593Smuzhiyun 	xfs_perag_clear_reclaim_tag(pag);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun static void
xfs_inew_wait(struct xfs_inode * ip)238*4882a593Smuzhiyun xfs_inew_wait(
239*4882a593Smuzhiyun 	struct xfs_inode	*ip)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
242*4882a593Smuzhiyun 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	do {
245*4882a593Smuzhiyun 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
246*4882a593Smuzhiyun 		if (!xfs_iflags_test(ip, XFS_INEW))
247*4882a593Smuzhiyun 			break;
248*4882a593Smuzhiyun 		schedule();
249*4882a593Smuzhiyun 	} while (true);
250*4882a593Smuzhiyun 	finish_wait(wq, &wait.wq_entry);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
255*4882a593Smuzhiyun  * part of the structure. This is made more complex by the fact we store
256*4882a593Smuzhiyun  * information about the on-disk values in the VFS inode and so we can't just
257*4882a593Smuzhiyun  * overwrite the values unconditionally. Hence we save the parameters we
258*4882a593Smuzhiyun  * need to retain across reinitialisation, and rewrite them into the VFS inode
259*4882a593Smuzhiyun  * after reinitialisation even if it fails.
260*4882a593Smuzhiyun  */
261*4882a593Smuzhiyun static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)262*4882a593Smuzhiyun xfs_reinit_inode(
263*4882a593Smuzhiyun 	struct xfs_mount	*mp,
264*4882a593Smuzhiyun 	struct inode		*inode)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	int		error;
267*4882a593Smuzhiyun 	uint32_t	nlink = inode->i_nlink;
268*4882a593Smuzhiyun 	uint32_t	generation = inode->i_generation;
269*4882a593Smuzhiyun 	uint64_t	version = inode_peek_iversion(inode);
270*4882a593Smuzhiyun 	umode_t		mode = inode->i_mode;
271*4882a593Smuzhiyun 	dev_t		dev = inode->i_rdev;
272*4882a593Smuzhiyun 	kuid_t		uid = inode->i_uid;
273*4882a593Smuzhiyun 	kgid_t		gid = inode->i_gid;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	error = inode_init_always(mp->m_super, inode);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	set_nlink(inode, nlink);
278*4882a593Smuzhiyun 	inode->i_generation = generation;
279*4882a593Smuzhiyun 	inode_set_iversion_queried(inode, version);
280*4882a593Smuzhiyun 	inode->i_mode = mode;
281*4882a593Smuzhiyun 	inode->i_rdev = dev;
282*4882a593Smuzhiyun 	inode->i_uid = uid;
283*4882a593Smuzhiyun 	inode->i_gid = gid;
284*4882a593Smuzhiyun 	return error;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * If we are allocating a new inode, then check what was returned is
289*4882a593Smuzhiyun  * actually a free, empty inode. If we are not allocating an inode,
290*4882a593Smuzhiyun  * then check we didn't find a free inode.
291*4882a593Smuzhiyun  *
292*4882a593Smuzhiyun  * Returns:
293*4882a593Smuzhiyun  *	0		if the inode free state matches the lookup context
294*4882a593Smuzhiyun  *	-ENOENT		if the inode is free and we are not allocating
295*4882a593Smuzhiyun  *	-EFSCORRUPTED	if there is any state mismatch at all
296*4882a593Smuzhiyun  */
297*4882a593Smuzhiyun static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)298*4882a593Smuzhiyun xfs_iget_check_free_state(
299*4882a593Smuzhiyun 	struct xfs_inode	*ip,
300*4882a593Smuzhiyun 	int			flags)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	if (flags & XFS_IGET_CREATE) {
303*4882a593Smuzhiyun 		/* should be a free inode */
304*4882a593Smuzhiyun 		if (VFS_I(ip)->i_mode != 0) {
305*4882a593Smuzhiyun 			xfs_warn(ip->i_mount,
306*4882a593Smuzhiyun "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
307*4882a593Smuzhiyun 				ip->i_ino, VFS_I(ip)->i_mode);
308*4882a593Smuzhiyun 			return -EFSCORRUPTED;
309*4882a593Smuzhiyun 		}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		if (ip->i_d.di_nblocks != 0) {
312*4882a593Smuzhiyun 			xfs_warn(ip->i_mount,
313*4882a593Smuzhiyun "Corruption detected! Free inode 0x%llx has blocks allocated!",
314*4882a593Smuzhiyun 				ip->i_ino);
315*4882a593Smuzhiyun 			return -EFSCORRUPTED;
316*4882a593Smuzhiyun 		}
317*4882a593Smuzhiyun 		return 0;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* should be an allocated inode */
321*4882a593Smuzhiyun 	if (VFS_I(ip)->i_mode == 0)
322*4882a593Smuzhiyun 		return -ENOENT;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return 0;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /*
328*4882a593Smuzhiyun  * Check the validity of the inode we just found it the cache
329*4882a593Smuzhiyun  */
330*4882a593Smuzhiyun static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)331*4882a593Smuzhiyun xfs_iget_cache_hit(
332*4882a593Smuzhiyun 	struct xfs_perag	*pag,
333*4882a593Smuzhiyun 	struct xfs_inode	*ip,
334*4882a593Smuzhiyun 	xfs_ino_t		ino,
335*4882a593Smuzhiyun 	int			flags,
336*4882a593Smuzhiyun 	int			lock_flags) __releases(RCU)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct inode		*inode = VFS_I(ip);
339*4882a593Smuzhiyun 	struct xfs_mount	*mp = ip->i_mount;
340*4882a593Smuzhiyun 	int			error;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/*
343*4882a593Smuzhiyun 	 * check for re-use of an inode within an RCU grace period due to the
344*4882a593Smuzhiyun 	 * radix tree nodes not being updated yet. We monitor for this by
345*4882a593Smuzhiyun 	 * setting the inode number to zero before freeing the inode structure.
346*4882a593Smuzhiyun 	 * If the inode has been reallocated and set up, then the inode number
347*4882a593Smuzhiyun 	 * will not match, so check for that, too.
348*4882a593Smuzhiyun 	 */
349*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
350*4882a593Smuzhiyun 	if (ip->i_ino != ino) {
351*4882a593Smuzhiyun 		trace_xfs_iget_skip(ip);
352*4882a593Smuzhiyun 		XFS_STATS_INC(mp, xs_ig_frecycle);
353*4882a593Smuzhiyun 		error = -EAGAIN;
354*4882a593Smuzhiyun 		goto out_error;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	/*
359*4882a593Smuzhiyun 	 * If we are racing with another cache hit that is currently
360*4882a593Smuzhiyun 	 * instantiating this inode or currently recycling it out of
361*4882a593Smuzhiyun 	 * reclaimabe state, wait for the initialisation to complete
362*4882a593Smuzhiyun 	 * before continuing.
363*4882a593Smuzhiyun 	 *
364*4882a593Smuzhiyun 	 * XXX(hch): eventually we should do something equivalent to
365*4882a593Smuzhiyun 	 *	     wait_on_inode to wait for these flags to be cleared
366*4882a593Smuzhiyun 	 *	     instead of polling for it.
367*4882a593Smuzhiyun 	 */
368*4882a593Smuzhiyun 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
369*4882a593Smuzhiyun 		trace_xfs_iget_skip(ip);
370*4882a593Smuzhiyun 		XFS_STATS_INC(mp, xs_ig_frecycle);
371*4882a593Smuzhiyun 		error = -EAGAIN;
372*4882a593Smuzhiyun 		goto out_error;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	/*
376*4882a593Smuzhiyun 	 * Check the inode free state is valid. This also detects lookup
377*4882a593Smuzhiyun 	 * racing with unlinks.
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	error = xfs_iget_check_free_state(ip, flags);
380*4882a593Smuzhiyun 	if (error)
381*4882a593Smuzhiyun 		goto out_error;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/*
384*4882a593Smuzhiyun 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
385*4882a593Smuzhiyun 	 * Need to carefully get it back into useable state.
386*4882a593Smuzhiyun 	 */
387*4882a593Smuzhiyun 	if (ip->i_flags & XFS_IRECLAIMABLE) {
388*4882a593Smuzhiyun 		trace_xfs_iget_reclaim(ip);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		if (flags & XFS_IGET_INCORE) {
391*4882a593Smuzhiyun 			error = -EAGAIN;
392*4882a593Smuzhiyun 			goto out_error;
393*4882a593Smuzhiyun 		}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		/*
396*4882a593Smuzhiyun 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
397*4882a593Smuzhiyun 		 * from stomping over us while we recycle the inode.  We can't
398*4882a593Smuzhiyun 		 * clear the radix tree reclaimable tag yet as it requires
399*4882a593Smuzhiyun 		 * pag_ici_lock to be held exclusive.
400*4882a593Smuzhiyun 		 */
401*4882a593Smuzhiyun 		ip->i_flags |= XFS_IRECLAIM;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
404*4882a593Smuzhiyun 		rcu_read_unlock();
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		ASSERT(!rwsem_is_locked(&inode->i_rwsem));
407*4882a593Smuzhiyun 		error = xfs_reinit_inode(mp, inode);
408*4882a593Smuzhiyun 		if (error) {
409*4882a593Smuzhiyun 			bool wake;
410*4882a593Smuzhiyun 			/*
411*4882a593Smuzhiyun 			 * Re-initializing the inode failed, and we are in deep
412*4882a593Smuzhiyun 			 * trouble.  Try to re-add it to the reclaim list.
413*4882a593Smuzhiyun 			 */
414*4882a593Smuzhiyun 			rcu_read_lock();
415*4882a593Smuzhiyun 			spin_lock(&ip->i_flags_lock);
416*4882a593Smuzhiyun 			wake = !!__xfs_iflags_test(ip, XFS_INEW);
417*4882a593Smuzhiyun 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
418*4882a593Smuzhiyun 			if (wake)
419*4882a593Smuzhiyun 				wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
420*4882a593Smuzhiyun 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
421*4882a593Smuzhiyun 			trace_xfs_iget_reclaim_fail(ip);
422*4882a593Smuzhiyun 			goto out_error;
423*4882a593Smuzhiyun 		}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		spin_lock(&pag->pag_ici_lock);
426*4882a593Smuzhiyun 		spin_lock(&ip->i_flags_lock);
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		/*
429*4882a593Smuzhiyun 		 * Clear the per-lifetime state in the inode as we are now
430*4882a593Smuzhiyun 		 * effectively a new inode and need to return to the initial
431*4882a593Smuzhiyun 		 * state before reuse occurs.
432*4882a593Smuzhiyun 		 */
433*4882a593Smuzhiyun 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
434*4882a593Smuzhiyun 		ip->i_flags |= XFS_INEW;
435*4882a593Smuzhiyun 		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
436*4882a593Smuzhiyun 		inode->i_state = I_NEW;
437*4882a593Smuzhiyun 		ip->i_sick = 0;
438*4882a593Smuzhiyun 		ip->i_checked = 0;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
441*4882a593Smuzhiyun 		spin_unlock(&pag->pag_ici_lock);
442*4882a593Smuzhiyun 	} else {
443*4882a593Smuzhiyun 		/* If the VFS inode is being torn down, pause and try again. */
444*4882a593Smuzhiyun 		if (!igrab(inode)) {
445*4882a593Smuzhiyun 			trace_xfs_iget_skip(ip);
446*4882a593Smuzhiyun 			error = -EAGAIN;
447*4882a593Smuzhiyun 			goto out_error;
448*4882a593Smuzhiyun 		}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		/* We've got a live one. */
451*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
452*4882a593Smuzhiyun 		rcu_read_unlock();
453*4882a593Smuzhiyun 		trace_xfs_iget_hit(ip);
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (lock_flags != 0)
457*4882a593Smuzhiyun 		xfs_ilock(ip, lock_flags);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (!(flags & XFS_IGET_INCORE))
460*4882a593Smuzhiyun 		xfs_iflags_clear(ip, XFS_ISTALE);
461*4882a593Smuzhiyun 	XFS_STATS_INC(mp, xs_ig_found);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	return 0;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun out_error:
466*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
467*4882a593Smuzhiyun 	rcu_read_unlock();
468*4882a593Smuzhiyun 	return error;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)473*4882a593Smuzhiyun xfs_iget_cache_miss(
474*4882a593Smuzhiyun 	struct xfs_mount	*mp,
475*4882a593Smuzhiyun 	struct xfs_perag	*pag,
476*4882a593Smuzhiyun 	xfs_trans_t		*tp,
477*4882a593Smuzhiyun 	xfs_ino_t		ino,
478*4882a593Smuzhiyun 	struct xfs_inode	**ipp,
479*4882a593Smuzhiyun 	int			flags,
480*4882a593Smuzhiyun 	int			lock_flags)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	struct xfs_inode	*ip;
483*4882a593Smuzhiyun 	int			error;
484*4882a593Smuzhiyun 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
485*4882a593Smuzhiyun 	int			iflags;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	ip = xfs_inode_alloc(mp, ino);
488*4882a593Smuzhiyun 	if (!ip)
489*4882a593Smuzhiyun 		return -ENOMEM;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
492*4882a593Smuzhiyun 	if (error)
493*4882a593Smuzhiyun 		goto out_destroy;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/*
496*4882a593Smuzhiyun 	 * For version 5 superblocks, if we are initialising a new inode and we
497*4882a593Smuzhiyun 	 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
498*4882a593Smuzhiyun 	 * simply build the new inode core with a random generation number.
499*4882a593Smuzhiyun 	 *
500*4882a593Smuzhiyun 	 * For version 4 (and older) superblocks, log recovery is dependent on
501*4882a593Smuzhiyun 	 * the di_flushiter field being initialised from the current on-disk
502*4882a593Smuzhiyun 	 * value and hence we must also read the inode off disk even when
503*4882a593Smuzhiyun 	 * initializing new inodes.
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
506*4882a593Smuzhiyun 	    (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
507*4882a593Smuzhiyun 		VFS_I(ip)->i_generation = prandom_u32();
508*4882a593Smuzhiyun 	} else {
509*4882a593Smuzhiyun 		struct xfs_dinode	*dip;
510*4882a593Smuzhiyun 		struct xfs_buf		*bp;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0);
513*4882a593Smuzhiyun 		if (error)
514*4882a593Smuzhiyun 			goto out_destroy;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 		error = xfs_inode_from_disk(ip, dip);
517*4882a593Smuzhiyun 		if (!error)
518*4882a593Smuzhiyun 			xfs_buf_set_ref(bp, XFS_INO_REF);
519*4882a593Smuzhiyun 		xfs_trans_brelse(tp, bp);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 		if (error)
522*4882a593Smuzhiyun 			goto out_destroy;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	trace_xfs_iget_miss(ip);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	/*
528*4882a593Smuzhiyun 	 * Check the inode free state is valid. This also detects lookup
529*4882a593Smuzhiyun 	 * racing with unlinks.
530*4882a593Smuzhiyun 	 */
531*4882a593Smuzhiyun 	error = xfs_iget_check_free_state(ip, flags);
532*4882a593Smuzhiyun 	if (error)
533*4882a593Smuzhiyun 		goto out_destroy;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/*
536*4882a593Smuzhiyun 	 * Preload the radix tree so we can insert safely under the
537*4882a593Smuzhiyun 	 * write spinlock. Note that we cannot sleep inside the preload
538*4882a593Smuzhiyun 	 * region. Since we can be called from transaction context, don't
539*4882a593Smuzhiyun 	 * recurse into the file system.
540*4882a593Smuzhiyun 	 */
541*4882a593Smuzhiyun 	if (radix_tree_preload(GFP_NOFS)) {
542*4882a593Smuzhiyun 		error = -EAGAIN;
543*4882a593Smuzhiyun 		goto out_destroy;
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/*
547*4882a593Smuzhiyun 	 * Because the inode hasn't been added to the radix-tree yet it can't
548*4882a593Smuzhiyun 	 * be found by another thread, so we can do the non-sleeping lock here.
549*4882a593Smuzhiyun 	 */
550*4882a593Smuzhiyun 	if (lock_flags) {
551*4882a593Smuzhiyun 		if (!xfs_ilock_nowait(ip, lock_flags))
552*4882a593Smuzhiyun 			BUG();
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/*
556*4882a593Smuzhiyun 	 * These values must be set before inserting the inode into the radix
557*4882a593Smuzhiyun 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
558*4882a593Smuzhiyun 	 * RCU locking mechanism) can find it and that lookup must see that this
559*4882a593Smuzhiyun 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
560*4882a593Smuzhiyun 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
561*4882a593Smuzhiyun 	 * memory barrier that ensures this detection works correctly at lookup
562*4882a593Smuzhiyun 	 * time.
563*4882a593Smuzhiyun 	 */
564*4882a593Smuzhiyun 	iflags = XFS_INEW;
565*4882a593Smuzhiyun 	if (flags & XFS_IGET_DONTCACHE)
566*4882a593Smuzhiyun 		d_mark_dontcache(VFS_I(ip));
567*4882a593Smuzhiyun 	ip->i_udquot = NULL;
568*4882a593Smuzhiyun 	ip->i_gdquot = NULL;
569*4882a593Smuzhiyun 	ip->i_pdquot = NULL;
570*4882a593Smuzhiyun 	xfs_iflags_set(ip, iflags);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* insert the new inode */
573*4882a593Smuzhiyun 	spin_lock(&pag->pag_ici_lock);
574*4882a593Smuzhiyun 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
575*4882a593Smuzhiyun 	if (unlikely(error)) {
576*4882a593Smuzhiyun 		WARN_ON(error != -EEXIST);
577*4882a593Smuzhiyun 		XFS_STATS_INC(mp, xs_ig_dup);
578*4882a593Smuzhiyun 		error = -EAGAIN;
579*4882a593Smuzhiyun 		goto out_preload_end;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 	spin_unlock(&pag->pag_ici_lock);
582*4882a593Smuzhiyun 	radix_tree_preload_end();
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	*ipp = ip;
585*4882a593Smuzhiyun 	return 0;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun out_preload_end:
588*4882a593Smuzhiyun 	spin_unlock(&pag->pag_ici_lock);
589*4882a593Smuzhiyun 	radix_tree_preload_end();
590*4882a593Smuzhiyun 	if (lock_flags)
591*4882a593Smuzhiyun 		xfs_iunlock(ip, lock_flags);
592*4882a593Smuzhiyun out_destroy:
593*4882a593Smuzhiyun 	__destroy_inode(VFS_I(ip));
594*4882a593Smuzhiyun 	xfs_inode_free(ip);
595*4882a593Smuzhiyun 	return error;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun  * Look up an inode by number in the given file system.  The inode is looked up
600*4882a593Smuzhiyun  * in the cache held in each AG.  If the inode is found in the cache, initialise
601*4882a593Smuzhiyun  * the vfs inode if necessary.
602*4882a593Smuzhiyun  *
603*4882a593Smuzhiyun  * If it is not in core, read it in from the file system's device, add it to the
604*4882a593Smuzhiyun  * cache and initialise the vfs inode.
605*4882a593Smuzhiyun  *
606*4882a593Smuzhiyun  * The inode is locked according to the value of the lock_flags parameter.
607*4882a593Smuzhiyun  * Inode lookup is only done during metadata operations and not as part of the
608*4882a593Smuzhiyun  * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
609*4882a593Smuzhiyun  */
610*4882a593Smuzhiyun int
xfs_iget(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,uint flags,uint lock_flags,struct xfs_inode ** ipp)611*4882a593Smuzhiyun xfs_iget(
612*4882a593Smuzhiyun 	struct xfs_mount	*mp,
613*4882a593Smuzhiyun 	struct xfs_trans	*tp,
614*4882a593Smuzhiyun 	xfs_ino_t		ino,
615*4882a593Smuzhiyun 	uint			flags,
616*4882a593Smuzhiyun 	uint			lock_flags,
617*4882a593Smuzhiyun 	struct xfs_inode	**ipp)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	struct xfs_inode	*ip;
620*4882a593Smuzhiyun 	struct xfs_perag	*pag;
621*4882a593Smuzhiyun 	xfs_agino_t		agino;
622*4882a593Smuzhiyun 	int			error;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	/* reject inode numbers outside existing AGs */
627*4882a593Smuzhiyun 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
628*4882a593Smuzhiyun 		return -EINVAL;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	XFS_STATS_INC(mp, xs_ig_attempts);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	/* get the perag structure and ensure that it's inode capable */
633*4882a593Smuzhiyun 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
634*4882a593Smuzhiyun 	agino = XFS_INO_TO_AGINO(mp, ino);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun again:
637*4882a593Smuzhiyun 	error = 0;
638*4882a593Smuzhiyun 	rcu_read_lock();
639*4882a593Smuzhiyun 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (ip) {
642*4882a593Smuzhiyun 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
643*4882a593Smuzhiyun 		if (error)
644*4882a593Smuzhiyun 			goto out_error_or_again;
645*4882a593Smuzhiyun 	} else {
646*4882a593Smuzhiyun 		rcu_read_unlock();
647*4882a593Smuzhiyun 		if (flags & XFS_IGET_INCORE) {
648*4882a593Smuzhiyun 			error = -ENODATA;
649*4882a593Smuzhiyun 			goto out_error_or_again;
650*4882a593Smuzhiyun 		}
651*4882a593Smuzhiyun 		XFS_STATS_INC(mp, xs_ig_missed);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
654*4882a593Smuzhiyun 							flags, lock_flags);
655*4882a593Smuzhiyun 		if (error)
656*4882a593Smuzhiyun 			goto out_error_or_again;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 	xfs_perag_put(pag);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	*ipp = ip;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	/*
663*4882a593Smuzhiyun 	 * If we have a real type for an on-disk inode, we can setup the inode
664*4882a593Smuzhiyun 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
665*4882a593Smuzhiyun 	 */
666*4882a593Smuzhiyun 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
667*4882a593Smuzhiyun 		xfs_setup_existing_inode(ip);
668*4882a593Smuzhiyun 	return 0;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun out_error_or_again:
671*4882a593Smuzhiyun 	if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
672*4882a593Smuzhiyun 		delay(1);
673*4882a593Smuzhiyun 		goto again;
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 	xfs_perag_put(pag);
676*4882a593Smuzhiyun 	return error;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun /*
680*4882a593Smuzhiyun  * "Is this a cached inode that's also allocated?"
681*4882a593Smuzhiyun  *
682*4882a593Smuzhiyun  * Look up an inode by number in the given file system.  If the inode is
683*4882a593Smuzhiyun  * in cache and isn't in purgatory, return 1 if the inode is allocated
684*4882a593Smuzhiyun  * and 0 if it is not.  For all other cases (not in cache, being torn
685*4882a593Smuzhiyun  * down, etc.), return a negative error code.
686*4882a593Smuzhiyun  *
687*4882a593Smuzhiyun  * The caller has to prevent inode allocation and freeing activity,
688*4882a593Smuzhiyun  * presumably by locking the AGI buffer.   This is to ensure that an
689*4882a593Smuzhiyun  * inode cannot transition from allocated to freed until the caller is
690*4882a593Smuzhiyun  * ready to allow that.  If the inode is in an intermediate state (new,
691*4882a593Smuzhiyun  * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
692*4882a593Smuzhiyun  * inode is not in the cache, -ENOENT will be returned.  The caller must
693*4882a593Smuzhiyun  * deal with these scenarios appropriately.
694*4882a593Smuzhiyun  *
695*4882a593Smuzhiyun  * This is a specialized use case for the online scrubber; if you're
696*4882a593Smuzhiyun  * reading this, you probably want xfs_iget.
697*4882a593Smuzhiyun  */
698*4882a593Smuzhiyun int
xfs_icache_inode_is_allocated(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,bool * inuse)699*4882a593Smuzhiyun xfs_icache_inode_is_allocated(
700*4882a593Smuzhiyun 	struct xfs_mount	*mp,
701*4882a593Smuzhiyun 	struct xfs_trans	*tp,
702*4882a593Smuzhiyun 	xfs_ino_t		ino,
703*4882a593Smuzhiyun 	bool			*inuse)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct xfs_inode	*ip;
706*4882a593Smuzhiyun 	int			error;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
709*4882a593Smuzhiyun 	if (error)
710*4882a593Smuzhiyun 		return error;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	*inuse = !!(VFS_I(ip)->i_mode);
713*4882a593Smuzhiyun 	xfs_irele(ip);
714*4882a593Smuzhiyun 	return 0;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun /*
718*4882a593Smuzhiyun  * The inode lookup is done in batches to keep the amount of lock traffic and
719*4882a593Smuzhiyun  * radix tree lookups to a minimum. The batch size is a trade off between
720*4882a593Smuzhiyun  * lookup reduction and stack usage. This is in the reclaim path, so we can't
721*4882a593Smuzhiyun  * be too greedy.
722*4882a593Smuzhiyun  */
723*4882a593Smuzhiyun #define XFS_LOOKUP_BATCH	32
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun /*
726*4882a593Smuzhiyun  * Decide if the given @ip is eligible to be a part of the inode walk, and
727*4882a593Smuzhiyun  * grab it if so.  Returns true if it's ready to go or false if we should just
728*4882a593Smuzhiyun  * ignore it.
729*4882a593Smuzhiyun  */
730*4882a593Smuzhiyun STATIC bool
xfs_inode_walk_ag_grab(struct xfs_inode * ip,int flags)731*4882a593Smuzhiyun xfs_inode_walk_ag_grab(
732*4882a593Smuzhiyun 	struct xfs_inode	*ip,
733*4882a593Smuzhiyun 	int			flags)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun 	struct inode		*inode = VFS_I(ip);
736*4882a593Smuzhiyun 	bool			newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	ASSERT(rcu_read_lock_held());
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/* Check for stale RCU freed inode */
741*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
742*4882a593Smuzhiyun 	if (!ip->i_ino)
743*4882a593Smuzhiyun 		goto out_unlock_noent;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
746*4882a593Smuzhiyun 	if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
747*4882a593Smuzhiyun 	    __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
748*4882a593Smuzhiyun 		goto out_unlock_noent;
749*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	/* nothing to sync during shutdown */
752*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
753*4882a593Smuzhiyun 		return false;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	/* If we can't grab the inode, it must on it's way to reclaim. */
756*4882a593Smuzhiyun 	if (!igrab(inode))
757*4882a593Smuzhiyun 		return false;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* inode is valid */
760*4882a593Smuzhiyun 	return true;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun out_unlock_noent:
763*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
764*4882a593Smuzhiyun 	return false;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun  * For a given per-AG structure @pag, grab, @execute, and rele all incore
769*4882a593Smuzhiyun  * inodes with the given radix tree @tag.
770*4882a593Smuzhiyun  */
771*4882a593Smuzhiyun STATIC int
xfs_inode_walk_ag(struct xfs_perag * pag,int iter_flags,int (* execute)(struct xfs_inode * ip,void * args),void * args,int tag)772*4882a593Smuzhiyun xfs_inode_walk_ag(
773*4882a593Smuzhiyun 	struct xfs_perag	*pag,
774*4882a593Smuzhiyun 	int			iter_flags,
775*4882a593Smuzhiyun 	int			(*execute)(struct xfs_inode *ip, void *args),
776*4882a593Smuzhiyun 	void			*args,
777*4882a593Smuzhiyun 	int			tag)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun 	struct xfs_mount	*mp = pag->pag_mount;
780*4882a593Smuzhiyun 	uint32_t		first_index;
781*4882a593Smuzhiyun 	int			last_error = 0;
782*4882a593Smuzhiyun 	int			skipped;
783*4882a593Smuzhiyun 	bool			done;
784*4882a593Smuzhiyun 	int			nr_found;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun restart:
787*4882a593Smuzhiyun 	done = false;
788*4882a593Smuzhiyun 	skipped = 0;
789*4882a593Smuzhiyun 	first_index = 0;
790*4882a593Smuzhiyun 	nr_found = 0;
791*4882a593Smuzhiyun 	do {
792*4882a593Smuzhiyun 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
793*4882a593Smuzhiyun 		int		error = 0;
794*4882a593Smuzhiyun 		int		i;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 		rcu_read_lock();
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		if (tag == XFS_ICI_NO_TAG)
799*4882a593Smuzhiyun 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
800*4882a593Smuzhiyun 					(void **)batch, first_index,
801*4882a593Smuzhiyun 					XFS_LOOKUP_BATCH);
802*4882a593Smuzhiyun 		else
803*4882a593Smuzhiyun 			nr_found = radix_tree_gang_lookup_tag(
804*4882a593Smuzhiyun 					&pag->pag_ici_root,
805*4882a593Smuzhiyun 					(void **) batch, first_index,
806*4882a593Smuzhiyun 					XFS_LOOKUP_BATCH, tag);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 		if (!nr_found) {
809*4882a593Smuzhiyun 			rcu_read_unlock();
810*4882a593Smuzhiyun 			break;
811*4882a593Smuzhiyun 		}
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 		/*
814*4882a593Smuzhiyun 		 * Grab the inodes before we drop the lock. if we found
815*4882a593Smuzhiyun 		 * nothing, nr == 0 and the loop will be skipped.
816*4882a593Smuzhiyun 		 */
817*4882a593Smuzhiyun 		for (i = 0; i < nr_found; i++) {
818*4882a593Smuzhiyun 			struct xfs_inode *ip = batch[i];
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 			if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
821*4882a593Smuzhiyun 				batch[i] = NULL;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 			/*
824*4882a593Smuzhiyun 			 * Update the index for the next lookup. Catch
825*4882a593Smuzhiyun 			 * overflows into the next AG range which can occur if
826*4882a593Smuzhiyun 			 * we have inodes in the last block of the AG and we
827*4882a593Smuzhiyun 			 * are currently pointing to the last inode.
828*4882a593Smuzhiyun 			 *
829*4882a593Smuzhiyun 			 * Because we may see inodes that are from the wrong AG
830*4882a593Smuzhiyun 			 * due to RCU freeing and reallocation, only update the
831*4882a593Smuzhiyun 			 * index if it lies in this AG. It was a race that lead
832*4882a593Smuzhiyun 			 * us to see this inode, so another lookup from the
833*4882a593Smuzhiyun 			 * same index will not find it again.
834*4882a593Smuzhiyun 			 */
835*4882a593Smuzhiyun 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
836*4882a593Smuzhiyun 				continue;
837*4882a593Smuzhiyun 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
838*4882a593Smuzhiyun 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
839*4882a593Smuzhiyun 				done = true;
840*4882a593Smuzhiyun 		}
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 		/* unlock now we've grabbed the inodes. */
843*4882a593Smuzhiyun 		rcu_read_unlock();
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 		for (i = 0; i < nr_found; i++) {
846*4882a593Smuzhiyun 			if (!batch[i])
847*4882a593Smuzhiyun 				continue;
848*4882a593Smuzhiyun 			if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
849*4882a593Smuzhiyun 			    xfs_iflags_test(batch[i], XFS_INEW))
850*4882a593Smuzhiyun 				xfs_inew_wait(batch[i]);
851*4882a593Smuzhiyun 			error = execute(batch[i], args);
852*4882a593Smuzhiyun 			xfs_irele(batch[i]);
853*4882a593Smuzhiyun 			if (error == -EAGAIN) {
854*4882a593Smuzhiyun 				skipped++;
855*4882a593Smuzhiyun 				continue;
856*4882a593Smuzhiyun 			}
857*4882a593Smuzhiyun 			if (error && last_error != -EFSCORRUPTED)
858*4882a593Smuzhiyun 				last_error = error;
859*4882a593Smuzhiyun 		}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 		/* bail out if the filesystem is corrupted.  */
862*4882a593Smuzhiyun 		if (error == -EFSCORRUPTED)
863*4882a593Smuzhiyun 			break;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 		cond_resched();
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	} while (nr_found && !done);
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (skipped) {
870*4882a593Smuzhiyun 		delay(1);
871*4882a593Smuzhiyun 		goto restart;
872*4882a593Smuzhiyun 	}
873*4882a593Smuzhiyun 	return last_error;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun /* Fetch the next (possibly tagged) per-AG structure. */
877*4882a593Smuzhiyun static inline struct xfs_perag *
xfs_inode_walk_get_perag(struct xfs_mount * mp,xfs_agnumber_t agno,int tag)878*4882a593Smuzhiyun xfs_inode_walk_get_perag(
879*4882a593Smuzhiyun 	struct xfs_mount	*mp,
880*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
881*4882a593Smuzhiyun 	int			tag)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun 	if (tag == XFS_ICI_NO_TAG)
884*4882a593Smuzhiyun 		return xfs_perag_get(mp, agno);
885*4882a593Smuzhiyun 	return xfs_perag_get_tag(mp, agno, tag);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun /*
889*4882a593Smuzhiyun  * Call the @execute function on all incore inodes matching the radix tree
890*4882a593Smuzhiyun  * @tag.
891*4882a593Smuzhiyun  */
892*4882a593Smuzhiyun int
xfs_inode_walk(struct xfs_mount * mp,int iter_flags,int (* execute)(struct xfs_inode * ip,void * args),void * args,int tag)893*4882a593Smuzhiyun xfs_inode_walk(
894*4882a593Smuzhiyun 	struct xfs_mount	*mp,
895*4882a593Smuzhiyun 	int			iter_flags,
896*4882a593Smuzhiyun 	int			(*execute)(struct xfs_inode *ip, void *args),
897*4882a593Smuzhiyun 	void			*args,
898*4882a593Smuzhiyun 	int			tag)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun 	struct xfs_perag	*pag;
901*4882a593Smuzhiyun 	int			error = 0;
902*4882a593Smuzhiyun 	int			last_error = 0;
903*4882a593Smuzhiyun 	xfs_agnumber_t		ag;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	ag = 0;
906*4882a593Smuzhiyun 	while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
907*4882a593Smuzhiyun 		ag = pag->pag_agno + 1;
908*4882a593Smuzhiyun 		error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
909*4882a593Smuzhiyun 		xfs_perag_put(pag);
910*4882a593Smuzhiyun 		if (error) {
911*4882a593Smuzhiyun 			last_error = error;
912*4882a593Smuzhiyun 			if (error == -EFSCORRUPTED)
913*4882a593Smuzhiyun 				break;
914*4882a593Smuzhiyun 		}
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 	return last_error;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun /*
920*4882a593Smuzhiyun  * Background scanning to trim post-EOF preallocated space. This is queued
921*4882a593Smuzhiyun  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
922*4882a593Smuzhiyun  */
923*4882a593Smuzhiyun void
xfs_queue_eofblocks(struct xfs_mount * mp)924*4882a593Smuzhiyun xfs_queue_eofblocks(
925*4882a593Smuzhiyun 	struct xfs_mount *mp)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	rcu_read_lock();
928*4882a593Smuzhiyun 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
929*4882a593Smuzhiyun 		queue_delayed_work(mp->m_eofblocks_workqueue,
930*4882a593Smuzhiyun 				   &mp->m_eofblocks_work,
931*4882a593Smuzhiyun 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
932*4882a593Smuzhiyun 	rcu_read_unlock();
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun void
xfs_eofblocks_worker(struct work_struct * work)936*4882a593Smuzhiyun xfs_eofblocks_worker(
937*4882a593Smuzhiyun 	struct work_struct *work)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	struct xfs_mount *mp = container_of(to_delayed_work(work),
940*4882a593Smuzhiyun 				struct xfs_mount, m_eofblocks_work);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	if (!sb_start_write_trylock(mp->m_super))
943*4882a593Smuzhiyun 		return;
944*4882a593Smuzhiyun 	xfs_icache_free_eofblocks(mp, NULL);
945*4882a593Smuzhiyun 	sb_end_write(mp->m_super);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	xfs_queue_eofblocks(mp);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun  * Background scanning to trim preallocated CoW space. This is queued
952*4882a593Smuzhiyun  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
953*4882a593Smuzhiyun  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
954*4882a593Smuzhiyun  */
955*4882a593Smuzhiyun void
xfs_queue_cowblocks(struct xfs_mount * mp)956*4882a593Smuzhiyun xfs_queue_cowblocks(
957*4882a593Smuzhiyun 	struct xfs_mount *mp)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	rcu_read_lock();
960*4882a593Smuzhiyun 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
961*4882a593Smuzhiyun 		queue_delayed_work(mp->m_eofblocks_workqueue,
962*4882a593Smuzhiyun 				   &mp->m_cowblocks_work,
963*4882a593Smuzhiyun 				   msecs_to_jiffies(xfs_cowb_secs * 1000));
964*4882a593Smuzhiyun 	rcu_read_unlock();
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun void
xfs_cowblocks_worker(struct work_struct * work)968*4882a593Smuzhiyun xfs_cowblocks_worker(
969*4882a593Smuzhiyun 	struct work_struct *work)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun 	struct xfs_mount *mp = container_of(to_delayed_work(work),
972*4882a593Smuzhiyun 				struct xfs_mount, m_cowblocks_work);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	if (!sb_start_write_trylock(mp->m_super))
975*4882a593Smuzhiyun 		return;
976*4882a593Smuzhiyun 	xfs_icache_free_cowblocks(mp, NULL);
977*4882a593Smuzhiyun 	sb_end_write(mp->m_super);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	xfs_queue_cowblocks(mp);
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun /*
983*4882a593Smuzhiyun  * Grab the inode for reclaim exclusively.
984*4882a593Smuzhiyun  *
985*4882a593Smuzhiyun  * We have found this inode via a lookup under RCU, so the inode may have
986*4882a593Smuzhiyun  * already been freed, or it may be in the process of being recycled by
987*4882a593Smuzhiyun  * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
988*4882a593Smuzhiyun  * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
989*4882a593Smuzhiyun  * will not be set. Hence we need to check for both these flag conditions to
990*4882a593Smuzhiyun  * avoid inodes that are no longer reclaim candidates.
991*4882a593Smuzhiyun  *
992*4882a593Smuzhiyun  * Note: checking for other state flags here, under the i_flags_lock or not, is
993*4882a593Smuzhiyun  * racy and should be avoided. Those races should be resolved only after we have
994*4882a593Smuzhiyun  * ensured that we are able to reclaim this inode and the world can see that we
995*4882a593Smuzhiyun  * are going to reclaim it.
996*4882a593Smuzhiyun  *
997*4882a593Smuzhiyun  * Return true if we grabbed it, false otherwise.
998*4882a593Smuzhiyun  */
999*4882a593Smuzhiyun static bool
xfs_reclaim_inode_grab(struct xfs_inode * ip)1000*4882a593Smuzhiyun xfs_reclaim_inode_grab(
1001*4882a593Smuzhiyun 	struct xfs_inode	*ip)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun 	ASSERT(rcu_read_lock_held());
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
1006*4882a593Smuzhiyun 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1007*4882a593Smuzhiyun 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1008*4882a593Smuzhiyun 		/* not a reclaim candidate. */
1009*4882a593Smuzhiyun 		spin_unlock(&ip->i_flags_lock);
1010*4882a593Smuzhiyun 		return false;
1011*4882a593Smuzhiyun 	}
1012*4882a593Smuzhiyun 	__xfs_iflags_set(ip, XFS_IRECLAIM);
1013*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
1014*4882a593Smuzhiyun 	return true;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun /*
1018*4882a593Smuzhiyun  * Inode reclaim is non-blocking, so the default action if progress cannot be
1019*4882a593Smuzhiyun  * made is to "requeue" the inode for reclaim by unlocking it and clearing the
1020*4882a593Smuzhiyun  * XFS_IRECLAIM flag.  If we are in a shutdown state, we don't care about
1021*4882a593Smuzhiyun  * blocking anymore and hence we can wait for the inode to be able to reclaim
1022*4882a593Smuzhiyun  * it.
1023*4882a593Smuzhiyun  *
1024*4882a593Smuzhiyun  * We do no IO here - if callers require inodes to be cleaned they must push the
1025*4882a593Smuzhiyun  * AIL first to trigger writeback of dirty inodes.  This enables writeback to be
1026*4882a593Smuzhiyun  * done in the background in a non-blocking manner, and enables memory reclaim
1027*4882a593Smuzhiyun  * to make progress without blocking.
1028*4882a593Smuzhiyun  */
1029*4882a593Smuzhiyun static void
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag)1030*4882a593Smuzhiyun xfs_reclaim_inode(
1031*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1032*4882a593Smuzhiyun 	struct xfs_perag	*pag)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
1037*4882a593Smuzhiyun 		goto out;
1038*4882a593Smuzhiyun 	if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
1039*4882a593Smuzhiyun 		goto out_iunlock;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1042*4882a593Smuzhiyun 		xfs_iunpin_wait(ip);
1043*4882a593Smuzhiyun 		xfs_iflush_abort(ip);
1044*4882a593Smuzhiyun 		goto reclaim;
1045*4882a593Smuzhiyun 	}
1046*4882a593Smuzhiyun 	if (xfs_ipincount(ip))
1047*4882a593Smuzhiyun 		goto out_clear_flush;
1048*4882a593Smuzhiyun 	if (!xfs_inode_clean(ip))
1049*4882a593Smuzhiyun 		goto out_clear_flush;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	xfs_iflags_clear(ip, XFS_IFLUSHING);
1052*4882a593Smuzhiyun reclaim:
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	/*
1055*4882a593Smuzhiyun 	 * Because we use RCU freeing we need to ensure the inode always appears
1056*4882a593Smuzhiyun 	 * to be reclaimed with an invalid inode number when in the free state.
1057*4882a593Smuzhiyun 	 * We do this as early as possible under the ILOCK so that
1058*4882a593Smuzhiyun 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1059*4882a593Smuzhiyun 	 * detect races with us here. By doing this, we guarantee that once
1060*4882a593Smuzhiyun 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1061*4882a593Smuzhiyun 	 * it will see either a valid inode that will serialise correctly, or it
1062*4882a593Smuzhiyun 	 * will see an invalid inode that it can skip.
1063*4882a593Smuzhiyun 	 */
1064*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
1065*4882a593Smuzhiyun 	ip->i_flags = XFS_IRECLAIM;
1066*4882a593Smuzhiyun 	ip->i_ino = 0;
1067*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1072*4882a593Smuzhiyun 	/*
1073*4882a593Smuzhiyun 	 * Remove the inode from the per-AG radix tree.
1074*4882a593Smuzhiyun 	 *
1075*4882a593Smuzhiyun 	 * Because radix_tree_delete won't complain even if the item was never
1076*4882a593Smuzhiyun 	 * added to the tree assert that it's been there before to catch
1077*4882a593Smuzhiyun 	 * problems with the inode life time early on.
1078*4882a593Smuzhiyun 	 */
1079*4882a593Smuzhiyun 	spin_lock(&pag->pag_ici_lock);
1080*4882a593Smuzhiyun 	if (!radix_tree_delete(&pag->pag_ici_root,
1081*4882a593Smuzhiyun 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
1082*4882a593Smuzhiyun 		ASSERT(0);
1083*4882a593Smuzhiyun 	xfs_perag_clear_reclaim_tag(pag);
1084*4882a593Smuzhiyun 	spin_unlock(&pag->pag_ici_lock);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	/*
1087*4882a593Smuzhiyun 	 * Here we do an (almost) spurious inode lock in order to coordinate
1088*4882a593Smuzhiyun 	 * with inode cache radix tree lookups.  This is because the lookup
1089*4882a593Smuzhiyun 	 * can reference the inodes in the cache without taking references.
1090*4882a593Smuzhiyun 	 *
1091*4882a593Smuzhiyun 	 * We make that OK here by ensuring that we wait until the inode is
1092*4882a593Smuzhiyun 	 * unlocked after the lookup before we go ahead and free it.
1093*4882a593Smuzhiyun 	 */
1094*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1095*4882a593Smuzhiyun 	xfs_qm_dqdetach(ip);
1096*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1097*4882a593Smuzhiyun 	ASSERT(xfs_inode_clean(ip));
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	__xfs_inode_free(ip);
1100*4882a593Smuzhiyun 	return;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun out_clear_flush:
1103*4882a593Smuzhiyun 	xfs_iflags_clear(ip, XFS_IFLUSHING);
1104*4882a593Smuzhiyun out_iunlock:
1105*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1106*4882a593Smuzhiyun out:
1107*4882a593Smuzhiyun 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun /*
1111*4882a593Smuzhiyun  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1112*4882a593Smuzhiyun  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1113*4882a593Smuzhiyun  * then a shut down during filesystem unmount reclaim walk leak all the
1114*4882a593Smuzhiyun  * unreclaimed inodes.
1115*4882a593Smuzhiyun  *
1116*4882a593Smuzhiyun  * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1117*4882a593Smuzhiyun  * so that callers that want to block until all dirty inodes are written back
1118*4882a593Smuzhiyun  * and reclaimed can sanely loop.
1119*4882a593Smuzhiyun  */
1120*4882a593Smuzhiyun static void
xfs_reclaim_inodes_ag(struct xfs_mount * mp,int * nr_to_scan)1121*4882a593Smuzhiyun xfs_reclaim_inodes_ag(
1122*4882a593Smuzhiyun 	struct xfs_mount	*mp,
1123*4882a593Smuzhiyun 	int			*nr_to_scan)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun 	struct xfs_perag	*pag;
1126*4882a593Smuzhiyun 	xfs_agnumber_t		ag = 0;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1129*4882a593Smuzhiyun 		unsigned long	first_index = 0;
1130*4882a593Smuzhiyun 		int		done = 0;
1131*4882a593Smuzhiyun 		int		nr_found = 0;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 		ag = pag->pag_agno + 1;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 		first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1136*4882a593Smuzhiyun 		do {
1137*4882a593Smuzhiyun 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1138*4882a593Smuzhiyun 			int	i;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 			rcu_read_lock();
1141*4882a593Smuzhiyun 			nr_found = radix_tree_gang_lookup_tag(
1142*4882a593Smuzhiyun 					&pag->pag_ici_root,
1143*4882a593Smuzhiyun 					(void **)batch, first_index,
1144*4882a593Smuzhiyun 					XFS_LOOKUP_BATCH,
1145*4882a593Smuzhiyun 					XFS_ICI_RECLAIM_TAG);
1146*4882a593Smuzhiyun 			if (!nr_found) {
1147*4882a593Smuzhiyun 				done = 1;
1148*4882a593Smuzhiyun 				rcu_read_unlock();
1149*4882a593Smuzhiyun 				break;
1150*4882a593Smuzhiyun 			}
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 			/*
1153*4882a593Smuzhiyun 			 * Grab the inodes before we drop the lock. if we found
1154*4882a593Smuzhiyun 			 * nothing, nr == 0 and the loop will be skipped.
1155*4882a593Smuzhiyun 			 */
1156*4882a593Smuzhiyun 			for (i = 0; i < nr_found; i++) {
1157*4882a593Smuzhiyun 				struct xfs_inode *ip = batch[i];
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 				if (done || !xfs_reclaim_inode_grab(ip))
1160*4882a593Smuzhiyun 					batch[i] = NULL;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 				/*
1163*4882a593Smuzhiyun 				 * Update the index for the next lookup. Catch
1164*4882a593Smuzhiyun 				 * overflows into the next AG range which can
1165*4882a593Smuzhiyun 				 * occur if we have inodes in the last block of
1166*4882a593Smuzhiyun 				 * the AG and we are currently pointing to the
1167*4882a593Smuzhiyun 				 * last inode.
1168*4882a593Smuzhiyun 				 *
1169*4882a593Smuzhiyun 				 * Because we may see inodes that are from the
1170*4882a593Smuzhiyun 				 * wrong AG due to RCU freeing and
1171*4882a593Smuzhiyun 				 * reallocation, only update the index if it
1172*4882a593Smuzhiyun 				 * lies in this AG. It was a race that lead us
1173*4882a593Smuzhiyun 				 * to see this inode, so another lookup from
1174*4882a593Smuzhiyun 				 * the same index will not find it again.
1175*4882a593Smuzhiyun 				 */
1176*4882a593Smuzhiyun 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1177*4882a593Smuzhiyun 								pag->pag_agno)
1178*4882a593Smuzhiyun 					continue;
1179*4882a593Smuzhiyun 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1180*4882a593Smuzhiyun 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1181*4882a593Smuzhiyun 					done = 1;
1182*4882a593Smuzhiyun 			}
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 			/* unlock now we've grabbed the inodes. */
1185*4882a593Smuzhiyun 			rcu_read_unlock();
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 			for (i = 0; i < nr_found; i++) {
1188*4882a593Smuzhiyun 				if (batch[i])
1189*4882a593Smuzhiyun 					xfs_reclaim_inode(batch[i], pag);
1190*4882a593Smuzhiyun 			}
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1193*4882a593Smuzhiyun 			cond_resched();
1194*4882a593Smuzhiyun 		} while (nr_found && !done && *nr_to_scan > 0);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 		if (done)
1197*4882a593Smuzhiyun 			first_index = 0;
1198*4882a593Smuzhiyun 		WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1199*4882a593Smuzhiyun 		xfs_perag_put(pag);
1200*4882a593Smuzhiyun 	}
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun void
xfs_reclaim_inodes(struct xfs_mount * mp)1204*4882a593Smuzhiyun xfs_reclaim_inodes(
1205*4882a593Smuzhiyun 	struct xfs_mount	*mp)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun 	int		nr_to_scan = INT_MAX;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1210*4882a593Smuzhiyun 		xfs_ail_push_all_sync(mp->m_ail);
1211*4882a593Smuzhiyun 		xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun /*
1216*4882a593Smuzhiyun  * The shrinker infrastructure determines how many inodes we should scan for
1217*4882a593Smuzhiyun  * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1218*4882a593Smuzhiyun  * push the AIL here. We also want to proactively free up memory if we can to
1219*4882a593Smuzhiyun  * minimise the amount of work memory reclaim has to do so we kick the
1220*4882a593Smuzhiyun  * background reclaim if it isn't already scheduled.
1221*4882a593Smuzhiyun  */
1222*4882a593Smuzhiyun long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,int nr_to_scan)1223*4882a593Smuzhiyun xfs_reclaim_inodes_nr(
1224*4882a593Smuzhiyun 	struct xfs_mount	*mp,
1225*4882a593Smuzhiyun 	int			nr_to_scan)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	/* kick background reclaimer and push the AIL */
1228*4882a593Smuzhiyun 	xfs_reclaim_work_queue(mp);
1229*4882a593Smuzhiyun 	xfs_ail_push_all(mp->m_ail);
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1232*4882a593Smuzhiyun 	return 0;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun /*
1236*4882a593Smuzhiyun  * Return the number of reclaimable inodes in the filesystem for
1237*4882a593Smuzhiyun  * the shrinker to determine how much to reclaim.
1238*4882a593Smuzhiyun  */
1239*4882a593Smuzhiyun int
xfs_reclaim_inodes_count(struct xfs_mount * mp)1240*4882a593Smuzhiyun xfs_reclaim_inodes_count(
1241*4882a593Smuzhiyun 	struct xfs_mount	*mp)
1242*4882a593Smuzhiyun {
1243*4882a593Smuzhiyun 	struct xfs_perag	*pag;
1244*4882a593Smuzhiyun 	xfs_agnumber_t		ag = 0;
1245*4882a593Smuzhiyun 	int			reclaimable = 0;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1248*4882a593Smuzhiyun 		ag = pag->pag_agno + 1;
1249*4882a593Smuzhiyun 		reclaimable += pag->pag_ici_reclaimable;
1250*4882a593Smuzhiyun 		xfs_perag_put(pag);
1251*4882a593Smuzhiyun 	}
1252*4882a593Smuzhiyun 	return reclaimable;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun 
1255*4882a593Smuzhiyun STATIC bool
xfs_inode_match_id(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1256*4882a593Smuzhiyun xfs_inode_match_id(
1257*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1258*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1261*4882a593Smuzhiyun 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1262*4882a593Smuzhiyun 		return false;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1265*4882a593Smuzhiyun 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1266*4882a593Smuzhiyun 		return false;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1269*4882a593Smuzhiyun 	    ip->i_d.di_projid != eofb->eof_prid)
1270*4882a593Smuzhiyun 		return false;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	return true;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun /*
1276*4882a593Smuzhiyun  * A union-based inode filtering algorithm. Process the inode if any of the
1277*4882a593Smuzhiyun  * criteria match. This is for global/internal scans only.
1278*4882a593Smuzhiyun  */
1279*4882a593Smuzhiyun STATIC bool
xfs_inode_match_id_union(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1280*4882a593Smuzhiyun xfs_inode_match_id_union(
1281*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1282*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1285*4882a593Smuzhiyun 	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1286*4882a593Smuzhiyun 		return true;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1289*4882a593Smuzhiyun 	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1290*4882a593Smuzhiyun 		return true;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1293*4882a593Smuzhiyun 	    ip->i_d.di_projid == eofb->eof_prid)
1294*4882a593Smuzhiyun 		return true;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	return false;
1297*4882a593Smuzhiyun }
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun /*
1300*4882a593Smuzhiyun  * Is this inode @ip eligible for eof/cow block reclamation, given some
1301*4882a593Smuzhiyun  * filtering parameters @eofb?  The inode is eligible if @eofb is null or
1302*4882a593Smuzhiyun  * if the predicate functions match.
1303*4882a593Smuzhiyun  */
1304*4882a593Smuzhiyun static bool
xfs_inode_matches_eofb(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1305*4882a593Smuzhiyun xfs_inode_matches_eofb(
1306*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1307*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb)
1308*4882a593Smuzhiyun {
1309*4882a593Smuzhiyun 	bool			match;
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	if (!eofb)
1312*4882a593Smuzhiyun 		return true;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1315*4882a593Smuzhiyun 		match = xfs_inode_match_id_union(ip, eofb);
1316*4882a593Smuzhiyun 	else
1317*4882a593Smuzhiyun 		match = xfs_inode_match_id(ip, eofb);
1318*4882a593Smuzhiyun 	if (!match)
1319*4882a593Smuzhiyun 		return false;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	/* skip the inode if the file size is too small */
1322*4882a593Smuzhiyun 	if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1323*4882a593Smuzhiyun 	    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1324*4882a593Smuzhiyun 		return false;
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	return true;
1327*4882a593Smuzhiyun }
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun /*
1330*4882a593Smuzhiyun  * This is a fast pass over the inode cache to try to get reclaim moving on as
1331*4882a593Smuzhiyun  * many inodes as possible in a short period of time. It kicks itself every few
1332*4882a593Smuzhiyun  * seconds, as well as being kicked by the inode cache shrinker when memory
1333*4882a593Smuzhiyun  * goes low.
1334*4882a593Smuzhiyun  */
1335*4882a593Smuzhiyun void
xfs_reclaim_worker(struct work_struct * work)1336*4882a593Smuzhiyun xfs_reclaim_worker(
1337*4882a593Smuzhiyun 	struct work_struct *work)
1338*4882a593Smuzhiyun {
1339*4882a593Smuzhiyun 	struct xfs_mount *mp = container_of(to_delayed_work(work),
1340*4882a593Smuzhiyun 					struct xfs_mount, m_reclaim_work);
1341*4882a593Smuzhiyun 	int		nr_to_scan = INT_MAX;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1344*4882a593Smuzhiyun 	xfs_reclaim_work_queue(mp);
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,void * args)1348*4882a593Smuzhiyun xfs_inode_free_eofblocks(
1349*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1350*4882a593Smuzhiyun 	void			*args)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb = args;
1353*4882a593Smuzhiyun 	bool			wait;
1354*4882a593Smuzhiyun 	int			ret;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun 	if (!xfs_can_free_eofblocks(ip, false)) {
1359*4882a593Smuzhiyun 		/* inode could be preallocated or append-only */
1360*4882a593Smuzhiyun 		trace_xfs_inode_free_eofblocks_invalid(ip);
1361*4882a593Smuzhiyun 		xfs_inode_clear_eofblocks_tag(ip);
1362*4882a593Smuzhiyun 		return 0;
1363*4882a593Smuzhiyun 	}
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	/*
1366*4882a593Smuzhiyun 	 * If the mapping is dirty the operation can block and wait for some
1367*4882a593Smuzhiyun 	 * time. Unless we are waiting, skip it.
1368*4882a593Smuzhiyun 	 */
1369*4882a593Smuzhiyun 	if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1370*4882a593Smuzhiyun 		return 0;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	if (!xfs_inode_matches_eofb(ip, eofb))
1373*4882a593Smuzhiyun 		return 0;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	/*
1376*4882a593Smuzhiyun 	 * If the caller is waiting, return -EAGAIN to keep the background
1377*4882a593Smuzhiyun 	 * scanner moving and revisit the inode in a subsequent pass.
1378*4882a593Smuzhiyun 	 */
1379*4882a593Smuzhiyun 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1380*4882a593Smuzhiyun 		if (wait)
1381*4882a593Smuzhiyun 			return -EAGAIN;
1382*4882a593Smuzhiyun 		return 0;
1383*4882a593Smuzhiyun 	}
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	ret = xfs_free_eofblocks(ip);
1386*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	return ret;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun int
xfs_icache_free_eofblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1392*4882a593Smuzhiyun xfs_icache_free_eofblocks(
1393*4882a593Smuzhiyun 	struct xfs_mount	*mp,
1394*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun 	return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb,
1397*4882a593Smuzhiyun 			XFS_ICI_EOFBLOCKS_TAG);
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun 
1400*4882a593Smuzhiyun /*
1401*4882a593Smuzhiyun  * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1402*4882a593Smuzhiyun  * multiple quotas, we don't know exactly which quota caused an allocation
1403*4882a593Smuzhiyun  * failure. We make a best effort by including each quota under low free space
1404*4882a593Smuzhiyun  * conditions (less than 1% free space) in the scan.
1405*4882a593Smuzhiyun  */
1406*4882a593Smuzhiyun static int
__xfs_inode_free_quota_eofblocks(struct xfs_inode * ip,int (* execute)(struct xfs_mount * mp,struct xfs_eofblocks * eofb))1407*4882a593Smuzhiyun __xfs_inode_free_quota_eofblocks(
1408*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1409*4882a593Smuzhiyun 	int			(*execute)(struct xfs_mount *mp,
1410*4882a593Smuzhiyun 					   struct xfs_eofblocks	*eofb))
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun 	int scan = 0;
1413*4882a593Smuzhiyun 	struct xfs_eofblocks eofb = {0};
1414*4882a593Smuzhiyun 	struct xfs_dquot *dq;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	/*
1417*4882a593Smuzhiyun 	 * Run a sync scan to increase effectiveness and use the union filter to
1418*4882a593Smuzhiyun 	 * cover all applicable quotas in a single scan.
1419*4882a593Smuzhiyun 	 */
1420*4882a593Smuzhiyun 	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1423*4882a593Smuzhiyun 		dq = xfs_inode_dquot(ip, XFS_DQTYPE_USER);
1424*4882a593Smuzhiyun 		if (dq && xfs_dquot_lowsp(dq)) {
1425*4882a593Smuzhiyun 			eofb.eof_uid = VFS_I(ip)->i_uid;
1426*4882a593Smuzhiyun 			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1427*4882a593Smuzhiyun 			scan = 1;
1428*4882a593Smuzhiyun 		}
1429*4882a593Smuzhiyun 	}
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1432*4882a593Smuzhiyun 		dq = xfs_inode_dquot(ip, XFS_DQTYPE_GROUP);
1433*4882a593Smuzhiyun 		if (dq && xfs_dquot_lowsp(dq)) {
1434*4882a593Smuzhiyun 			eofb.eof_gid = VFS_I(ip)->i_gid;
1435*4882a593Smuzhiyun 			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1436*4882a593Smuzhiyun 			scan = 1;
1437*4882a593Smuzhiyun 		}
1438*4882a593Smuzhiyun 	}
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 	if (scan)
1441*4882a593Smuzhiyun 		execute(ip->i_mount, &eofb);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 	return scan;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun int
xfs_inode_free_quota_eofblocks(struct xfs_inode * ip)1447*4882a593Smuzhiyun xfs_inode_free_quota_eofblocks(
1448*4882a593Smuzhiyun 	struct xfs_inode *ip)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun static inline unsigned long
xfs_iflag_for_tag(int tag)1454*4882a593Smuzhiyun xfs_iflag_for_tag(
1455*4882a593Smuzhiyun 	int		tag)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun 	switch (tag) {
1458*4882a593Smuzhiyun 	case XFS_ICI_EOFBLOCKS_TAG:
1459*4882a593Smuzhiyun 		return XFS_IEOFBLOCKS;
1460*4882a593Smuzhiyun 	case XFS_ICI_COWBLOCKS_TAG:
1461*4882a593Smuzhiyun 		return XFS_ICOWBLOCKS;
1462*4882a593Smuzhiyun 	default:
1463*4882a593Smuzhiyun 		ASSERT(0);
1464*4882a593Smuzhiyun 		return 0;
1465*4882a593Smuzhiyun 	}
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun static void
__xfs_inode_set_blocks_tag(xfs_inode_t * ip,void (* execute)(struct xfs_mount * mp),void (* set_tp)(struct xfs_mount * mp,xfs_agnumber_t agno,int error,unsigned long caller_ip),int tag)1469*4882a593Smuzhiyun __xfs_inode_set_blocks_tag(
1470*4882a593Smuzhiyun 	xfs_inode_t	*ip,
1471*4882a593Smuzhiyun 	void		(*execute)(struct xfs_mount *mp),
1472*4882a593Smuzhiyun 	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1473*4882a593Smuzhiyun 				  int error, unsigned long caller_ip),
1474*4882a593Smuzhiyun 	int		tag)
1475*4882a593Smuzhiyun {
1476*4882a593Smuzhiyun 	struct xfs_mount *mp = ip->i_mount;
1477*4882a593Smuzhiyun 	struct xfs_perag *pag;
1478*4882a593Smuzhiyun 	int tagged;
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	/*
1481*4882a593Smuzhiyun 	 * Don't bother locking the AG and looking up in the radix trees
1482*4882a593Smuzhiyun 	 * if we already know that we have the tag set.
1483*4882a593Smuzhiyun 	 */
1484*4882a593Smuzhiyun 	if (ip->i_flags & xfs_iflag_for_tag(tag))
1485*4882a593Smuzhiyun 		return;
1486*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
1487*4882a593Smuzhiyun 	ip->i_flags |= xfs_iflag_for_tag(tag);
1488*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1491*4882a593Smuzhiyun 	spin_lock(&pag->pag_ici_lock);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1494*4882a593Smuzhiyun 	radix_tree_tag_set(&pag->pag_ici_root,
1495*4882a593Smuzhiyun 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1496*4882a593Smuzhiyun 	if (!tagged) {
1497*4882a593Smuzhiyun 		/* propagate the eofblocks tag up into the perag radix tree */
1498*4882a593Smuzhiyun 		spin_lock(&ip->i_mount->m_perag_lock);
1499*4882a593Smuzhiyun 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1500*4882a593Smuzhiyun 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1501*4882a593Smuzhiyun 				   tag);
1502*4882a593Smuzhiyun 		spin_unlock(&ip->i_mount->m_perag_lock);
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 		/* kick off background trimming */
1505*4882a593Smuzhiyun 		execute(ip->i_mount);
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 		set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1508*4882a593Smuzhiyun 	}
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	spin_unlock(&pag->pag_ici_lock);
1511*4882a593Smuzhiyun 	xfs_perag_put(pag);
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1515*4882a593Smuzhiyun xfs_inode_set_eofblocks_tag(
1516*4882a593Smuzhiyun 	xfs_inode_t	*ip)
1517*4882a593Smuzhiyun {
1518*4882a593Smuzhiyun 	trace_xfs_inode_set_eofblocks_tag(ip);
1519*4882a593Smuzhiyun 	return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1520*4882a593Smuzhiyun 			trace_xfs_perag_set_eofblocks,
1521*4882a593Smuzhiyun 			XFS_ICI_EOFBLOCKS_TAG);
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun static void
__xfs_inode_clear_blocks_tag(xfs_inode_t * ip,void (* clear_tp)(struct xfs_mount * mp,xfs_agnumber_t agno,int error,unsigned long caller_ip),int tag)1525*4882a593Smuzhiyun __xfs_inode_clear_blocks_tag(
1526*4882a593Smuzhiyun 	xfs_inode_t	*ip,
1527*4882a593Smuzhiyun 	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1528*4882a593Smuzhiyun 				    int error, unsigned long caller_ip),
1529*4882a593Smuzhiyun 	int		tag)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	struct xfs_mount *mp = ip->i_mount;
1532*4882a593Smuzhiyun 	struct xfs_perag *pag;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	spin_lock(&ip->i_flags_lock);
1535*4882a593Smuzhiyun 	ip->i_flags &= ~xfs_iflag_for_tag(tag);
1536*4882a593Smuzhiyun 	spin_unlock(&ip->i_flags_lock);
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1539*4882a593Smuzhiyun 	spin_lock(&pag->pag_ici_lock);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	radix_tree_tag_clear(&pag->pag_ici_root,
1542*4882a593Smuzhiyun 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1543*4882a593Smuzhiyun 	if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1544*4882a593Smuzhiyun 		/* clear the eofblocks tag from the perag radix tree */
1545*4882a593Smuzhiyun 		spin_lock(&ip->i_mount->m_perag_lock);
1546*4882a593Smuzhiyun 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1547*4882a593Smuzhiyun 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1548*4882a593Smuzhiyun 				     tag);
1549*4882a593Smuzhiyun 		spin_unlock(&ip->i_mount->m_perag_lock);
1550*4882a593Smuzhiyun 		clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1551*4882a593Smuzhiyun 	}
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	spin_unlock(&pag->pag_ici_lock);
1554*4882a593Smuzhiyun 	xfs_perag_put(pag);
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1558*4882a593Smuzhiyun xfs_inode_clear_eofblocks_tag(
1559*4882a593Smuzhiyun 	xfs_inode_t	*ip)
1560*4882a593Smuzhiyun {
1561*4882a593Smuzhiyun 	trace_xfs_inode_clear_eofblocks_tag(ip);
1562*4882a593Smuzhiyun 	return __xfs_inode_clear_blocks_tag(ip,
1563*4882a593Smuzhiyun 			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun /*
1567*4882a593Smuzhiyun  * Set ourselves up to free CoW blocks from this file.  If it's already clean
1568*4882a593Smuzhiyun  * then we can bail out quickly, but otherwise we must back off if the file
1569*4882a593Smuzhiyun  * is undergoing some kind of write.
1570*4882a593Smuzhiyun  */
1571*4882a593Smuzhiyun static bool
xfs_prep_free_cowblocks(struct xfs_inode * ip)1572*4882a593Smuzhiyun xfs_prep_free_cowblocks(
1573*4882a593Smuzhiyun 	struct xfs_inode	*ip)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun 	/*
1576*4882a593Smuzhiyun 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1577*4882a593Smuzhiyun 	 * possible the inode was fully unshared since it was originally tagged.
1578*4882a593Smuzhiyun 	 */
1579*4882a593Smuzhiyun 	if (!xfs_inode_has_cow_data(ip)) {
1580*4882a593Smuzhiyun 		trace_xfs_inode_free_cowblocks_invalid(ip);
1581*4882a593Smuzhiyun 		xfs_inode_clear_cowblocks_tag(ip);
1582*4882a593Smuzhiyun 		return false;
1583*4882a593Smuzhiyun 	}
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	/*
1586*4882a593Smuzhiyun 	 * If the mapping is dirty or under writeback we cannot touch the
1587*4882a593Smuzhiyun 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1588*4882a593Smuzhiyun 	 */
1589*4882a593Smuzhiyun 	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1590*4882a593Smuzhiyun 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1591*4882a593Smuzhiyun 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1592*4882a593Smuzhiyun 	    atomic_read(&VFS_I(ip)->i_dio_count))
1593*4882a593Smuzhiyun 		return false;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	return true;
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun /*
1599*4882a593Smuzhiyun  * Automatic CoW Reservation Freeing
1600*4882a593Smuzhiyun  *
1601*4882a593Smuzhiyun  * These functions automatically garbage collect leftover CoW reservations
1602*4882a593Smuzhiyun  * that were made on behalf of a cowextsize hint when we start to run out
1603*4882a593Smuzhiyun  * of quota or when the reservations sit around for too long.  If the file
1604*4882a593Smuzhiyun  * has dirty pages or is undergoing writeback, its CoW reservations will
1605*4882a593Smuzhiyun  * be retained.
1606*4882a593Smuzhiyun  *
1607*4882a593Smuzhiyun  * The actual garbage collection piggybacks off the same code that runs
1608*4882a593Smuzhiyun  * the speculative EOF preallocation garbage collector.
1609*4882a593Smuzhiyun  */
1610*4882a593Smuzhiyun STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,void * args)1611*4882a593Smuzhiyun xfs_inode_free_cowblocks(
1612*4882a593Smuzhiyun 	struct xfs_inode	*ip,
1613*4882a593Smuzhiyun 	void			*args)
1614*4882a593Smuzhiyun {
1615*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb = args;
1616*4882a593Smuzhiyun 	int			ret = 0;
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	if (!xfs_prep_free_cowblocks(ip))
1619*4882a593Smuzhiyun 		return 0;
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	if (!xfs_inode_matches_eofb(ip, eofb))
1622*4882a593Smuzhiyun 		return 0;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	/* Free the CoW blocks */
1625*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
1626*4882a593Smuzhiyun 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	/*
1629*4882a593Smuzhiyun 	 * Check again, nobody else should be able to dirty blocks or change
1630*4882a593Smuzhiyun 	 * the reflink iflag now that we have the first two locks held.
1631*4882a593Smuzhiyun 	 */
1632*4882a593Smuzhiyun 	if (xfs_prep_free_cowblocks(ip))
1633*4882a593Smuzhiyun 		ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1636*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	return ret;
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun int
xfs_icache_free_cowblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1642*4882a593Smuzhiyun xfs_icache_free_cowblocks(
1643*4882a593Smuzhiyun 	struct xfs_mount	*mp,
1644*4882a593Smuzhiyun 	struct xfs_eofblocks	*eofb)
1645*4882a593Smuzhiyun {
1646*4882a593Smuzhiyun 	return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb,
1647*4882a593Smuzhiyun 			XFS_ICI_COWBLOCKS_TAG);
1648*4882a593Smuzhiyun }
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun int
xfs_inode_free_quota_cowblocks(struct xfs_inode * ip)1651*4882a593Smuzhiyun xfs_inode_free_quota_cowblocks(
1652*4882a593Smuzhiyun 	struct xfs_inode *ip)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1655*4882a593Smuzhiyun }
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)1658*4882a593Smuzhiyun xfs_inode_set_cowblocks_tag(
1659*4882a593Smuzhiyun 	xfs_inode_t	*ip)
1660*4882a593Smuzhiyun {
1661*4882a593Smuzhiyun 	trace_xfs_inode_set_cowblocks_tag(ip);
1662*4882a593Smuzhiyun 	return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1663*4882a593Smuzhiyun 			trace_xfs_perag_set_cowblocks,
1664*4882a593Smuzhiyun 			XFS_ICI_COWBLOCKS_TAG);
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)1668*4882a593Smuzhiyun xfs_inode_clear_cowblocks_tag(
1669*4882a593Smuzhiyun 	xfs_inode_t	*ip)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	trace_xfs_inode_clear_cowblocks_tag(ip);
1672*4882a593Smuzhiyun 	return __xfs_inode_clear_blocks_tag(ip,
1673*4882a593Smuzhiyun 			trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun /* Disable post-EOF and CoW block auto-reclamation. */
1677*4882a593Smuzhiyun void
xfs_stop_block_reaping(struct xfs_mount * mp)1678*4882a593Smuzhiyun xfs_stop_block_reaping(
1679*4882a593Smuzhiyun 	struct xfs_mount	*mp)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun 	cancel_delayed_work_sync(&mp->m_eofblocks_work);
1682*4882a593Smuzhiyun 	cancel_delayed_work_sync(&mp->m_cowblocks_work);
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun /* Enable post-EOF and CoW block auto-reclamation. */
1686*4882a593Smuzhiyun void
xfs_start_block_reaping(struct xfs_mount * mp)1687*4882a593Smuzhiyun xfs_start_block_reaping(
1688*4882a593Smuzhiyun 	struct xfs_mount	*mp)
1689*4882a593Smuzhiyun {
1690*4882a593Smuzhiyun 	xfs_queue_eofblocks(mp);
1691*4882a593Smuzhiyun 	xfs_queue_cowblocks(mp);
1692*4882a593Smuzhiyun }
1693