xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_iwalk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2019 Oracle.  All Rights Reserved.
4*4882a593Smuzhiyun  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_mount.h"
13*4882a593Smuzhiyun #include "xfs_inode.h"
14*4882a593Smuzhiyun #include "xfs_btree.h"
15*4882a593Smuzhiyun #include "xfs_ialloc.h"
16*4882a593Smuzhiyun #include "xfs_ialloc_btree.h"
17*4882a593Smuzhiyun #include "xfs_iwalk.h"
18*4882a593Smuzhiyun #include "xfs_error.h"
19*4882a593Smuzhiyun #include "xfs_trace.h"
20*4882a593Smuzhiyun #include "xfs_icache.h"
21*4882a593Smuzhiyun #include "xfs_health.h"
22*4882a593Smuzhiyun #include "xfs_trans.h"
23*4882a593Smuzhiyun #include "xfs_pwork.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Walking Inodes in the Filesystem
27*4882a593Smuzhiyun  * ================================
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  * This iterator function walks a subset of filesystem inodes in increasing
30*4882a593Smuzhiyun  * order from @startino until there are no more inodes.  For each allocated
31*4882a593Smuzhiyun  * inode it finds, it calls a walk function with the relevant inode number and
32*4882a593Smuzhiyun  * a pointer to caller-provided data.  The walk function can return the usual
33*4882a593Smuzhiyun  * negative error code to stop the iteration; 0 to continue the iteration; or
34*4882a593Smuzhiyun  * -ECANCELED to stop the iteration.  This return value is returned to the
35*4882a593Smuzhiyun  * caller.
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Internally, we allow the walk function to do anything, which means that we
38*4882a593Smuzhiyun  * cannot maintain the inobt cursor or our lock on the AGI buffer.  We
39*4882a593Smuzhiyun  * therefore cache the inobt records in kernel memory and only call the walk
40*4882a593Smuzhiyun  * function when our memory buffer is full.  @nr_recs is the number of records
41*4882a593Smuzhiyun  * that we've cached, and @sz_recs is the size of our cache.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * It is the responsibility of the walk function to ensure it accesses
44*4882a593Smuzhiyun  * allocated inodes, as the inobt records may be stale by the time they are
45*4882a593Smuzhiyun  * acted upon.
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun struct xfs_iwalk_ag {
49*4882a593Smuzhiyun 	/* parallel work control data; will be null if single threaded */
50*4882a593Smuzhiyun 	struct xfs_pwork		pwork;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	struct xfs_mount		*mp;
53*4882a593Smuzhiyun 	struct xfs_trans		*tp;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Where do we start the traversal? */
56*4882a593Smuzhiyun 	xfs_ino_t			startino;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	/* What was the last inode number we saw when iterating the inobt? */
59*4882a593Smuzhiyun 	xfs_ino_t			lastino;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	/* Array of inobt records we cache. */
62*4882a593Smuzhiyun 	struct xfs_inobt_rec_incore	*recs;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	/* Number of entries allocated for the @recs array. */
65*4882a593Smuzhiyun 	unsigned int			sz_recs;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Number of entries in the @recs array that are in use. */
68*4882a593Smuzhiyun 	unsigned int			nr_recs;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* Inode walk function and data pointer. */
71*4882a593Smuzhiyun 	xfs_iwalk_fn			iwalk_fn;
72*4882a593Smuzhiyun 	xfs_inobt_walk_fn		inobt_walk_fn;
73*4882a593Smuzhiyun 	void				*data;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/*
76*4882a593Smuzhiyun 	 * Make it look like the inodes up to startino are free so that
77*4882a593Smuzhiyun 	 * bulkstat can start its inode iteration at the correct place without
78*4882a593Smuzhiyun 	 * needing to special case everywhere.
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	unsigned int			trim_start:1;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* Skip empty inobt records? */
83*4882a593Smuzhiyun 	unsigned int			skip_empty:1;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * Loop over all clusters in a chunk for a given incore inode allocation btree
88*4882a593Smuzhiyun  * record.  Do a readahead if there are any allocated inodes in that cluster.
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun STATIC void
xfs_iwalk_ichunk_ra(struct xfs_mount * mp,xfs_agnumber_t agno,struct xfs_inobt_rec_incore * irec)91*4882a593Smuzhiyun xfs_iwalk_ichunk_ra(
92*4882a593Smuzhiyun 	struct xfs_mount		*mp,
93*4882a593Smuzhiyun 	xfs_agnumber_t			agno,
94*4882a593Smuzhiyun 	struct xfs_inobt_rec_incore	*irec)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	struct xfs_ino_geometry		*igeo = M_IGEO(mp);
97*4882a593Smuzhiyun 	xfs_agblock_t			agbno;
98*4882a593Smuzhiyun 	struct blk_plug			plug;
99*4882a593Smuzhiyun 	int				i;	/* inode chunk index */
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	blk_start_plug(&plug);
104*4882a593Smuzhiyun 	for (i = 0; i < XFS_INODES_PER_CHUNK; i += igeo->inodes_per_cluster) {
105*4882a593Smuzhiyun 		xfs_inofree_t	imask;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 		imask = xfs_inobt_maskn(i, igeo->inodes_per_cluster);
108*4882a593Smuzhiyun 		if (imask & ~irec->ir_free) {
109*4882a593Smuzhiyun 			xfs_btree_reada_bufs(mp, agno, agbno,
110*4882a593Smuzhiyun 					igeo->blocks_per_cluster,
111*4882a593Smuzhiyun 					&xfs_inode_buf_ops);
112*4882a593Smuzhiyun 		}
113*4882a593Smuzhiyun 		agbno += igeo->blocks_per_cluster;
114*4882a593Smuzhiyun 	}
115*4882a593Smuzhiyun 	blk_finish_plug(&plug);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * Set the bits in @irec's free mask that correspond to the inodes before
120*4882a593Smuzhiyun  * @agino so that we skip them.  This is how we restart an inode walk that was
121*4882a593Smuzhiyun  * interrupted in the middle of an inode record.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun STATIC void
xfs_iwalk_adjust_start(xfs_agino_t agino,struct xfs_inobt_rec_incore * irec)124*4882a593Smuzhiyun xfs_iwalk_adjust_start(
125*4882a593Smuzhiyun 	xfs_agino_t			agino,	/* starting inode of chunk */
126*4882a593Smuzhiyun 	struct xfs_inobt_rec_incore	*irec)	/* btree record */
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	int				idx;	/* index into inode chunk */
129*4882a593Smuzhiyun 	int				i;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	idx = agino - irec->ir_startino;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/*
134*4882a593Smuzhiyun 	 * We got a right chunk with some left inodes allocated at it.  Grab
135*4882a593Smuzhiyun 	 * the chunk record.  Mark all the uninteresting inodes free because
136*4882a593Smuzhiyun 	 * they're before our start point.
137*4882a593Smuzhiyun 	 */
138*4882a593Smuzhiyun 	for (i = 0; i < idx; i++) {
139*4882a593Smuzhiyun 		if (XFS_INOBT_MASK(i) & ~irec->ir_free)
140*4882a593Smuzhiyun 			irec->ir_freecount++;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	irec->ir_free |= xfs_inobt_maskn(0, idx);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /* Allocate memory for a walk. */
147*4882a593Smuzhiyun STATIC int
xfs_iwalk_alloc(struct xfs_iwalk_ag * iwag)148*4882a593Smuzhiyun xfs_iwalk_alloc(
149*4882a593Smuzhiyun 	struct xfs_iwalk_ag	*iwag)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	size_t			size;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	ASSERT(iwag->recs == NULL);
154*4882a593Smuzhiyun 	iwag->nr_recs = 0;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* Allocate a prefetch buffer for inobt records. */
157*4882a593Smuzhiyun 	size = iwag->sz_recs * sizeof(struct xfs_inobt_rec_incore);
158*4882a593Smuzhiyun 	iwag->recs = kmem_alloc(size, KM_MAYFAIL);
159*4882a593Smuzhiyun 	if (iwag->recs == NULL)
160*4882a593Smuzhiyun 		return -ENOMEM;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return 0;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* Free memory we allocated for a walk. */
166*4882a593Smuzhiyun STATIC void
xfs_iwalk_free(struct xfs_iwalk_ag * iwag)167*4882a593Smuzhiyun xfs_iwalk_free(
168*4882a593Smuzhiyun 	struct xfs_iwalk_ag	*iwag)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	kmem_free(iwag->recs);
171*4882a593Smuzhiyun 	iwag->recs = NULL;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /* For each inuse inode in each cached inobt record, call our function. */
175*4882a593Smuzhiyun STATIC int
xfs_iwalk_ag_recs(struct xfs_iwalk_ag * iwag)176*4882a593Smuzhiyun xfs_iwalk_ag_recs(
177*4882a593Smuzhiyun 	struct xfs_iwalk_ag		*iwag)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct xfs_mount		*mp = iwag->mp;
180*4882a593Smuzhiyun 	struct xfs_trans		*tp = iwag->tp;
181*4882a593Smuzhiyun 	xfs_ino_t			ino;
182*4882a593Smuzhiyun 	unsigned int			i, j;
183*4882a593Smuzhiyun 	xfs_agnumber_t			agno;
184*4882a593Smuzhiyun 	int				error;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	agno = XFS_INO_TO_AGNO(mp, iwag->startino);
187*4882a593Smuzhiyun 	for (i = 0; i < iwag->nr_recs; i++) {
188*4882a593Smuzhiyun 		struct xfs_inobt_rec_incore	*irec = &iwag->recs[i];
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		trace_xfs_iwalk_ag_rec(mp, agno, irec);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 		if (xfs_pwork_want_abort(&iwag->pwork))
193*4882a593Smuzhiyun 			return 0;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 		if (iwag->inobt_walk_fn) {
196*4882a593Smuzhiyun 			error = iwag->inobt_walk_fn(mp, tp, agno, irec,
197*4882a593Smuzhiyun 					iwag->data);
198*4882a593Smuzhiyun 			if (error)
199*4882a593Smuzhiyun 				return error;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		if (!iwag->iwalk_fn)
203*4882a593Smuzhiyun 			continue;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		for (j = 0; j < XFS_INODES_PER_CHUNK; j++) {
206*4882a593Smuzhiyun 			if (xfs_pwork_want_abort(&iwag->pwork))
207*4882a593Smuzhiyun 				return 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 			/* Skip if this inode is free */
210*4882a593Smuzhiyun 			if (XFS_INOBT_MASK(j) & irec->ir_free)
211*4882a593Smuzhiyun 				continue;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 			/* Otherwise call our function. */
214*4882a593Smuzhiyun 			ino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino + j);
215*4882a593Smuzhiyun 			error = iwag->iwalk_fn(mp, tp, ino, iwag->data);
216*4882a593Smuzhiyun 			if (error)
217*4882a593Smuzhiyun 				return error;
218*4882a593Smuzhiyun 		}
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return 0;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /* Delete cursor and let go of AGI. */
225*4882a593Smuzhiyun static inline void
xfs_iwalk_del_inobt(struct xfs_trans * tp,struct xfs_btree_cur ** curpp,struct xfs_buf ** agi_bpp,int error)226*4882a593Smuzhiyun xfs_iwalk_del_inobt(
227*4882a593Smuzhiyun 	struct xfs_trans	*tp,
228*4882a593Smuzhiyun 	struct xfs_btree_cur	**curpp,
229*4882a593Smuzhiyun 	struct xfs_buf		**agi_bpp,
230*4882a593Smuzhiyun 	int			error)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	if (*curpp) {
233*4882a593Smuzhiyun 		xfs_btree_del_cursor(*curpp, error);
234*4882a593Smuzhiyun 		*curpp = NULL;
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 	if (*agi_bpp) {
237*4882a593Smuzhiyun 		xfs_trans_brelse(tp, *agi_bpp);
238*4882a593Smuzhiyun 		*agi_bpp = NULL;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun  * Set ourselves up for walking inobt records starting from a given point in
244*4882a593Smuzhiyun  * the filesystem.
245*4882a593Smuzhiyun  *
246*4882a593Smuzhiyun  * If caller passed in a nonzero start inode number, load the record from the
247*4882a593Smuzhiyun  * inobt and make the record look like all the inodes before agino are free so
248*4882a593Smuzhiyun  * that we skip them, and then move the cursor to the next inobt record.  This
249*4882a593Smuzhiyun  * is how we support starting an iwalk in the middle of an inode chunk.
250*4882a593Smuzhiyun  *
251*4882a593Smuzhiyun  * If the caller passed in a start number of zero, move the cursor to the first
252*4882a593Smuzhiyun  * inobt record.
253*4882a593Smuzhiyun  *
254*4882a593Smuzhiyun  * The caller is responsible for cleaning up the cursor and buffer pointer
255*4882a593Smuzhiyun  * regardless of the error status.
256*4882a593Smuzhiyun  */
257*4882a593Smuzhiyun STATIC int
xfs_iwalk_ag_start(struct xfs_iwalk_ag * iwag,xfs_agnumber_t agno,xfs_agino_t agino,struct xfs_btree_cur ** curpp,struct xfs_buf ** agi_bpp,int * has_more)258*4882a593Smuzhiyun xfs_iwalk_ag_start(
259*4882a593Smuzhiyun 	struct xfs_iwalk_ag	*iwag,
260*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
261*4882a593Smuzhiyun 	xfs_agino_t		agino,
262*4882a593Smuzhiyun 	struct xfs_btree_cur	**curpp,
263*4882a593Smuzhiyun 	struct xfs_buf		**agi_bpp,
264*4882a593Smuzhiyun 	int			*has_more)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct xfs_mount	*mp = iwag->mp;
267*4882a593Smuzhiyun 	struct xfs_trans	*tp = iwag->tp;
268*4882a593Smuzhiyun 	struct xfs_inobt_rec_incore *irec;
269*4882a593Smuzhiyun 	int			error;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	/* Set up a fresh cursor and empty the inobt cache. */
272*4882a593Smuzhiyun 	iwag->nr_recs = 0;
273*4882a593Smuzhiyun 	error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
274*4882a593Smuzhiyun 	if (error)
275*4882a593Smuzhiyun 		return error;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* Starting at the beginning of the AG?  That's easy! */
278*4882a593Smuzhiyun 	if (agino == 0)
279*4882a593Smuzhiyun 		return xfs_inobt_lookup(*curpp, 0, XFS_LOOKUP_GE, has_more);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	/*
282*4882a593Smuzhiyun 	 * Otherwise, we have to grab the inobt record where we left off, stuff
283*4882a593Smuzhiyun 	 * the record into our cache, and then see if there are more records.
284*4882a593Smuzhiyun 	 * We require a lookup cache of at least two elements so that the
285*4882a593Smuzhiyun 	 * caller doesn't have to deal with tearing down the cursor to walk the
286*4882a593Smuzhiyun 	 * records.
287*4882a593Smuzhiyun 	 */
288*4882a593Smuzhiyun 	error = xfs_inobt_lookup(*curpp, agino, XFS_LOOKUP_LE, has_more);
289*4882a593Smuzhiyun 	if (error)
290*4882a593Smuzhiyun 		return error;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/*
293*4882a593Smuzhiyun 	 * If the LE lookup at @agino yields no records, jump ahead to the
294*4882a593Smuzhiyun 	 * inobt cursor increment to see if there are more records to process.
295*4882a593Smuzhiyun 	 */
296*4882a593Smuzhiyun 	if (!*has_more)
297*4882a593Smuzhiyun 		goto out_advance;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* Get the record, should always work */
300*4882a593Smuzhiyun 	irec = &iwag->recs[iwag->nr_recs];
301*4882a593Smuzhiyun 	error = xfs_inobt_get_rec(*curpp, irec, has_more);
302*4882a593Smuzhiyun 	if (error)
303*4882a593Smuzhiyun 		return error;
304*4882a593Smuzhiyun 	if (XFS_IS_CORRUPT(mp, *has_more != 1))
305*4882a593Smuzhiyun 		return -EFSCORRUPTED;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	iwag->lastino = XFS_AGINO_TO_INO(mp, agno,
308*4882a593Smuzhiyun 				irec->ir_startino + XFS_INODES_PER_CHUNK - 1);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/*
311*4882a593Smuzhiyun 	 * If the LE lookup yielded an inobt record before the cursor position,
312*4882a593Smuzhiyun 	 * skip it and see if there's another one after it.
313*4882a593Smuzhiyun 	 */
314*4882a593Smuzhiyun 	if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino)
315*4882a593Smuzhiyun 		goto out_advance;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * If agino fell in the middle of the inode record, make it look like
319*4882a593Smuzhiyun 	 * the inodes up to agino are free so that we don't return them again.
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	if (iwag->trim_start)
322*4882a593Smuzhiyun 		xfs_iwalk_adjust_start(agino, irec);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/*
325*4882a593Smuzhiyun 	 * The prefetch calculation is supposed to give us a large enough inobt
326*4882a593Smuzhiyun 	 * record cache that grab_ichunk can stage a partial first record and
327*4882a593Smuzhiyun 	 * the loop body can cache a record without having to check for cache
328*4882a593Smuzhiyun 	 * space until after it reads an inobt record.
329*4882a593Smuzhiyun 	 */
330*4882a593Smuzhiyun 	iwag->nr_recs++;
331*4882a593Smuzhiyun 	ASSERT(iwag->nr_recs < iwag->sz_recs);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun out_advance:
334*4882a593Smuzhiyun 	return xfs_btree_increment(*curpp, 0, has_more);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun /*
338*4882a593Smuzhiyun  * The inobt record cache is full, so preserve the inobt cursor state and
339*4882a593Smuzhiyun  * run callbacks on the cached inobt records.  When we're done, restore the
340*4882a593Smuzhiyun  * cursor state to wherever the cursor would have been had the cache not been
341*4882a593Smuzhiyun  * full (and therefore we could've just incremented the cursor) if *@has_more
342*4882a593Smuzhiyun  * is true.  On exit, *@has_more will indicate whether or not the caller should
343*4882a593Smuzhiyun  * try for more inode records.
344*4882a593Smuzhiyun  */
345*4882a593Smuzhiyun STATIC int
xfs_iwalk_run_callbacks(struct xfs_iwalk_ag * iwag,xfs_agnumber_t agno,struct xfs_btree_cur ** curpp,struct xfs_buf ** agi_bpp,int * has_more)346*4882a593Smuzhiyun xfs_iwalk_run_callbacks(
347*4882a593Smuzhiyun 	struct xfs_iwalk_ag		*iwag,
348*4882a593Smuzhiyun 	xfs_agnumber_t			agno,
349*4882a593Smuzhiyun 	struct xfs_btree_cur		**curpp,
350*4882a593Smuzhiyun 	struct xfs_buf			**agi_bpp,
351*4882a593Smuzhiyun 	int				*has_more)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct xfs_mount		*mp = iwag->mp;
354*4882a593Smuzhiyun 	struct xfs_trans		*tp = iwag->tp;
355*4882a593Smuzhiyun 	struct xfs_inobt_rec_incore	*irec;
356*4882a593Smuzhiyun 	xfs_agino_t			next_agino;
357*4882a593Smuzhiyun 	int				error;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	next_agino = XFS_INO_TO_AGINO(mp, iwag->lastino) + 1;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	ASSERT(iwag->nr_recs > 0);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	/* Delete cursor but remember the last record we cached... */
364*4882a593Smuzhiyun 	xfs_iwalk_del_inobt(tp, curpp, agi_bpp, 0);
365*4882a593Smuzhiyun 	irec = &iwag->recs[iwag->nr_recs - 1];
366*4882a593Smuzhiyun 	ASSERT(next_agino >= irec->ir_startino + XFS_INODES_PER_CHUNK);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	error = xfs_iwalk_ag_recs(iwag);
369*4882a593Smuzhiyun 	if (error)
370*4882a593Smuzhiyun 		return error;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	/* ...empty the cache... */
373*4882a593Smuzhiyun 	iwag->nr_recs = 0;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	if (!has_more)
376*4882a593Smuzhiyun 		return 0;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/* ...and recreate the cursor just past where we left off. */
379*4882a593Smuzhiyun 	error = xfs_inobt_cur(mp, tp, agno, XFS_BTNUM_INO, curpp, agi_bpp);
380*4882a593Smuzhiyun 	if (error)
381*4882a593Smuzhiyun 		return error;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	return xfs_inobt_lookup(*curpp, next_agino, XFS_LOOKUP_GE, has_more);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /* Walk all inodes in a single AG, from @iwag->startino to the end of the AG. */
387*4882a593Smuzhiyun STATIC int
xfs_iwalk_ag(struct xfs_iwalk_ag * iwag)388*4882a593Smuzhiyun xfs_iwalk_ag(
389*4882a593Smuzhiyun 	struct xfs_iwalk_ag		*iwag)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	struct xfs_mount		*mp = iwag->mp;
392*4882a593Smuzhiyun 	struct xfs_trans		*tp = iwag->tp;
393*4882a593Smuzhiyun 	struct xfs_buf			*agi_bp = NULL;
394*4882a593Smuzhiyun 	struct xfs_btree_cur		*cur = NULL;
395*4882a593Smuzhiyun 	xfs_agnumber_t			agno;
396*4882a593Smuzhiyun 	xfs_agino_t			agino;
397*4882a593Smuzhiyun 	int				has_more;
398*4882a593Smuzhiyun 	int				error = 0;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Set up our cursor at the right place in the inode btree. */
401*4882a593Smuzhiyun 	agno = XFS_INO_TO_AGNO(mp, iwag->startino);
402*4882a593Smuzhiyun 	agino = XFS_INO_TO_AGINO(mp, iwag->startino);
403*4882a593Smuzhiyun 	error = xfs_iwalk_ag_start(iwag, agno, agino, &cur, &agi_bp, &has_more);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	while (!error && has_more) {
406*4882a593Smuzhiyun 		struct xfs_inobt_rec_incore	*irec;
407*4882a593Smuzhiyun 		xfs_ino_t			rec_fsino;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		cond_resched();
410*4882a593Smuzhiyun 		if (xfs_pwork_want_abort(&iwag->pwork))
411*4882a593Smuzhiyun 			goto out;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		/* Fetch the inobt record. */
414*4882a593Smuzhiyun 		irec = &iwag->recs[iwag->nr_recs];
415*4882a593Smuzhiyun 		error = xfs_inobt_get_rec(cur, irec, &has_more);
416*4882a593Smuzhiyun 		if (error || !has_more)
417*4882a593Smuzhiyun 			break;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		/* Make sure that we always move forward. */
420*4882a593Smuzhiyun 		rec_fsino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino);
421*4882a593Smuzhiyun 		if (iwag->lastino != NULLFSINO &&
422*4882a593Smuzhiyun 		    XFS_IS_CORRUPT(mp, iwag->lastino >= rec_fsino)) {
423*4882a593Smuzhiyun 			error = -EFSCORRUPTED;
424*4882a593Smuzhiyun 			goto out;
425*4882a593Smuzhiyun 		}
426*4882a593Smuzhiyun 		iwag->lastino = rec_fsino + XFS_INODES_PER_CHUNK - 1;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 		/* No allocated inodes in this chunk; skip it. */
429*4882a593Smuzhiyun 		if (iwag->skip_empty && irec->ir_freecount == irec->ir_count) {
430*4882a593Smuzhiyun 			error = xfs_btree_increment(cur, 0, &has_more);
431*4882a593Smuzhiyun 			if (error)
432*4882a593Smuzhiyun 				break;
433*4882a593Smuzhiyun 			continue;
434*4882a593Smuzhiyun 		}
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		/*
437*4882a593Smuzhiyun 		 * Start readahead for this inode chunk in anticipation of
438*4882a593Smuzhiyun 		 * walking the inodes.
439*4882a593Smuzhiyun 		 */
440*4882a593Smuzhiyun 		if (iwag->iwalk_fn)
441*4882a593Smuzhiyun 			xfs_iwalk_ichunk_ra(mp, agno, irec);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 		/*
444*4882a593Smuzhiyun 		 * If there's space in the buffer for more records, increment
445*4882a593Smuzhiyun 		 * the btree cursor and grab more.
446*4882a593Smuzhiyun 		 */
447*4882a593Smuzhiyun 		if (++iwag->nr_recs < iwag->sz_recs) {
448*4882a593Smuzhiyun 			error = xfs_btree_increment(cur, 0, &has_more);
449*4882a593Smuzhiyun 			if (error || !has_more)
450*4882a593Smuzhiyun 				break;
451*4882a593Smuzhiyun 			continue;
452*4882a593Smuzhiyun 		}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		/*
455*4882a593Smuzhiyun 		 * Otherwise, we need to save cursor state and run the callback
456*4882a593Smuzhiyun 		 * function on the cached records.  The run_callbacks function
457*4882a593Smuzhiyun 		 * is supposed to return a cursor pointing to the record where
458*4882a593Smuzhiyun 		 * we would be if we had been able to increment like above.
459*4882a593Smuzhiyun 		 */
460*4882a593Smuzhiyun 		ASSERT(has_more);
461*4882a593Smuzhiyun 		error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp,
462*4882a593Smuzhiyun 				&has_more);
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	if (iwag->nr_recs == 0 || error)
466*4882a593Smuzhiyun 		goto out;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/* Walk the unprocessed records in the cache. */
469*4882a593Smuzhiyun 	error = xfs_iwalk_run_callbacks(iwag, agno, &cur, &agi_bp, &has_more);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun out:
472*4882a593Smuzhiyun 	xfs_iwalk_del_inobt(tp, &cur, &agi_bp, error);
473*4882a593Smuzhiyun 	return error;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun /*
477*4882a593Smuzhiyun  * We experimentally determined that the reduction in ioctl call overhead
478*4882a593Smuzhiyun  * diminishes when userspace asks for more than 2048 inodes, so we'll cap
479*4882a593Smuzhiyun  * prefetch at this point.
480*4882a593Smuzhiyun  */
481*4882a593Smuzhiyun #define IWALK_MAX_INODE_PREFETCH	(2048U)
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun  * Given the number of inodes to prefetch, set the number of inobt records that
485*4882a593Smuzhiyun  * we cache in memory, which controls the number of inodes we try to read
486*4882a593Smuzhiyun  * ahead.  Set the maximum if @inodes == 0.
487*4882a593Smuzhiyun  */
488*4882a593Smuzhiyun static inline unsigned int
xfs_iwalk_prefetch(unsigned int inodes)489*4882a593Smuzhiyun xfs_iwalk_prefetch(
490*4882a593Smuzhiyun 	unsigned int		inodes)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	unsigned int		inobt_records;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	/*
495*4882a593Smuzhiyun 	 * If the caller didn't tell us the number of inodes they wanted,
496*4882a593Smuzhiyun 	 * assume the maximum prefetch possible for best performance.
497*4882a593Smuzhiyun 	 * Otherwise, cap prefetch at that maximum so that we don't start an
498*4882a593Smuzhiyun 	 * absurd amount of prefetch.
499*4882a593Smuzhiyun 	 */
500*4882a593Smuzhiyun 	if (inodes == 0)
501*4882a593Smuzhiyun 		inodes = IWALK_MAX_INODE_PREFETCH;
502*4882a593Smuzhiyun 	inodes = min(inodes, IWALK_MAX_INODE_PREFETCH);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	/* Round the inode count up to a full chunk. */
505*4882a593Smuzhiyun 	inodes = round_up(inodes, XFS_INODES_PER_CHUNK);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/*
508*4882a593Smuzhiyun 	 * In order to convert the number of inodes to prefetch into an
509*4882a593Smuzhiyun 	 * estimate of the number of inobt records to cache, we require a
510*4882a593Smuzhiyun 	 * conversion factor that reflects our expectations of the average
511*4882a593Smuzhiyun 	 * loading factor of an inode chunk.  Based on data gathered, most
512*4882a593Smuzhiyun 	 * (but not all) filesystems manage to keep the inode chunks totally
513*4882a593Smuzhiyun 	 * full, so we'll underestimate slightly so that our readahead will
514*4882a593Smuzhiyun 	 * still deliver the performance we want on aging filesystems:
515*4882a593Smuzhiyun 	 *
516*4882a593Smuzhiyun 	 * inobt = inodes / (INODES_PER_CHUNK * (4 / 5));
517*4882a593Smuzhiyun 	 *
518*4882a593Smuzhiyun 	 * The funny math is to avoid integer division.
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	inobt_records = (inodes * 5) / (4 * XFS_INODES_PER_CHUNK);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	/*
523*4882a593Smuzhiyun 	 * Allocate enough space to prefetch at least two inobt records so that
524*4882a593Smuzhiyun 	 * we can cache both the record where the iwalk started and the next
525*4882a593Smuzhiyun 	 * record.  This simplifies the AG inode walk loop setup code.
526*4882a593Smuzhiyun 	 */
527*4882a593Smuzhiyun 	return max(inobt_records, 2U);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun  * Walk all inodes in the filesystem starting from @startino.  The @iwalk_fn
532*4882a593Smuzhiyun  * will be called for each allocated inode, being passed the inode's number and
533*4882a593Smuzhiyun  * @data.  @max_prefetch controls how many inobt records' worth of inodes we
534*4882a593Smuzhiyun  * try to readahead.
535*4882a593Smuzhiyun  */
536*4882a593Smuzhiyun int
xfs_iwalk(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t startino,unsigned int flags,xfs_iwalk_fn iwalk_fn,unsigned int inode_records,void * data)537*4882a593Smuzhiyun xfs_iwalk(
538*4882a593Smuzhiyun 	struct xfs_mount	*mp,
539*4882a593Smuzhiyun 	struct xfs_trans	*tp,
540*4882a593Smuzhiyun 	xfs_ino_t		startino,
541*4882a593Smuzhiyun 	unsigned int		flags,
542*4882a593Smuzhiyun 	xfs_iwalk_fn		iwalk_fn,
543*4882a593Smuzhiyun 	unsigned int		inode_records,
544*4882a593Smuzhiyun 	void			*data)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	struct xfs_iwalk_ag	iwag = {
547*4882a593Smuzhiyun 		.mp		= mp,
548*4882a593Smuzhiyun 		.tp		= tp,
549*4882a593Smuzhiyun 		.iwalk_fn	= iwalk_fn,
550*4882a593Smuzhiyun 		.data		= data,
551*4882a593Smuzhiyun 		.startino	= startino,
552*4882a593Smuzhiyun 		.sz_recs	= xfs_iwalk_prefetch(inode_records),
553*4882a593Smuzhiyun 		.trim_start	= 1,
554*4882a593Smuzhiyun 		.skip_empty	= 1,
555*4882a593Smuzhiyun 		.pwork		= XFS_PWORK_SINGLE_THREADED,
556*4882a593Smuzhiyun 		.lastino	= NULLFSINO,
557*4882a593Smuzhiyun 	};
558*4882a593Smuzhiyun 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
559*4882a593Smuzhiyun 	int			error;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	ASSERT(agno < mp->m_sb.sb_agcount);
562*4882a593Smuzhiyun 	ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	error = xfs_iwalk_alloc(&iwag);
565*4882a593Smuzhiyun 	if (error)
566*4882a593Smuzhiyun 		return error;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	for (; agno < mp->m_sb.sb_agcount; agno++) {
569*4882a593Smuzhiyun 		error = xfs_iwalk_ag(&iwag);
570*4882a593Smuzhiyun 		if (error)
571*4882a593Smuzhiyun 			break;
572*4882a593Smuzhiyun 		iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
573*4882a593Smuzhiyun 		if (flags & XFS_INOBT_WALK_SAME_AG)
574*4882a593Smuzhiyun 			break;
575*4882a593Smuzhiyun 	}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	xfs_iwalk_free(&iwag);
578*4882a593Smuzhiyun 	return error;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /* Run per-thread iwalk work. */
582*4882a593Smuzhiyun static int
xfs_iwalk_ag_work(struct xfs_mount * mp,struct xfs_pwork * pwork)583*4882a593Smuzhiyun xfs_iwalk_ag_work(
584*4882a593Smuzhiyun 	struct xfs_mount	*mp,
585*4882a593Smuzhiyun 	struct xfs_pwork	*pwork)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct xfs_iwalk_ag	*iwag;
588*4882a593Smuzhiyun 	int			error = 0;
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	iwag = container_of(pwork, struct xfs_iwalk_ag, pwork);
591*4882a593Smuzhiyun 	if (xfs_pwork_want_abort(pwork))
592*4882a593Smuzhiyun 		goto out;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	error = xfs_iwalk_alloc(iwag);
595*4882a593Smuzhiyun 	if (error)
596*4882a593Smuzhiyun 		goto out;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	error = xfs_iwalk_ag(iwag);
599*4882a593Smuzhiyun 	xfs_iwalk_free(iwag);
600*4882a593Smuzhiyun out:
601*4882a593Smuzhiyun 	kmem_free(iwag);
602*4882a593Smuzhiyun 	return error;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun  * Walk all the inodes in the filesystem using multiple threads to process each
607*4882a593Smuzhiyun  * AG.
608*4882a593Smuzhiyun  */
609*4882a593Smuzhiyun int
xfs_iwalk_threaded(struct xfs_mount * mp,xfs_ino_t startino,unsigned int flags,xfs_iwalk_fn iwalk_fn,unsigned int inode_records,bool polled,void * data)610*4882a593Smuzhiyun xfs_iwalk_threaded(
611*4882a593Smuzhiyun 	struct xfs_mount	*mp,
612*4882a593Smuzhiyun 	xfs_ino_t		startino,
613*4882a593Smuzhiyun 	unsigned int		flags,
614*4882a593Smuzhiyun 	xfs_iwalk_fn		iwalk_fn,
615*4882a593Smuzhiyun 	unsigned int		inode_records,
616*4882a593Smuzhiyun 	bool			polled,
617*4882a593Smuzhiyun 	void			*data)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	struct xfs_pwork_ctl	pctl;
620*4882a593Smuzhiyun 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
621*4882a593Smuzhiyun 	unsigned int		nr_threads;
622*4882a593Smuzhiyun 	int			error;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	ASSERT(agno < mp->m_sb.sb_agcount);
625*4882a593Smuzhiyun 	ASSERT(!(flags & ~XFS_IWALK_FLAGS_ALL));
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	nr_threads = xfs_pwork_guess_datadev_parallelism(mp);
628*4882a593Smuzhiyun 	error = xfs_pwork_init(mp, &pctl, xfs_iwalk_ag_work, "xfs_iwalk",
629*4882a593Smuzhiyun 			nr_threads);
630*4882a593Smuzhiyun 	if (error)
631*4882a593Smuzhiyun 		return error;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	for (; agno < mp->m_sb.sb_agcount; agno++) {
634*4882a593Smuzhiyun 		struct xfs_iwalk_ag	*iwag;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		if (xfs_pwork_ctl_want_abort(&pctl))
637*4882a593Smuzhiyun 			break;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		iwag = kmem_zalloc(sizeof(struct xfs_iwalk_ag), 0);
640*4882a593Smuzhiyun 		iwag->mp = mp;
641*4882a593Smuzhiyun 		iwag->iwalk_fn = iwalk_fn;
642*4882a593Smuzhiyun 		iwag->data = data;
643*4882a593Smuzhiyun 		iwag->startino = startino;
644*4882a593Smuzhiyun 		iwag->sz_recs = xfs_iwalk_prefetch(inode_records);
645*4882a593Smuzhiyun 		iwag->lastino = NULLFSINO;
646*4882a593Smuzhiyun 		xfs_pwork_queue(&pctl, &iwag->pwork);
647*4882a593Smuzhiyun 		startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
648*4882a593Smuzhiyun 		if (flags & XFS_INOBT_WALK_SAME_AG)
649*4882a593Smuzhiyun 			break;
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	if (polled)
653*4882a593Smuzhiyun 		xfs_pwork_poll(&pctl);
654*4882a593Smuzhiyun 	return xfs_pwork_destroy(&pctl);
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun /*
658*4882a593Smuzhiyun  * Allow callers to cache up to a page's worth of inobt records.  This reflects
659*4882a593Smuzhiyun  * the existing inumbers prefetching behavior.  Since the inobt walk does not
660*4882a593Smuzhiyun  * itself do anything with the inobt records, we can set a fairly high limit
661*4882a593Smuzhiyun  * here.
662*4882a593Smuzhiyun  */
663*4882a593Smuzhiyun #define MAX_INOBT_WALK_PREFETCH	\
664*4882a593Smuzhiyun 	(PAGE_SIZE / sizeof(struct xfs_inobt_rec_incore))
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun  * Given the number of records that the user wanted, set the number of inobt
668*4882a593Smuzhiyun  * records that we buffer in memory.  Set the maximum if @inobt_records == 0.
669*4882a593Smuzhiyun  */
670*4882a593Smuzhiyun static inline unsigned int
xfs_inobt_walk_prefetch(unsigned int inobt_records)671*4882a593Smuzhiyun xfs_inobt_walk_prefetch(
672*4882a593Smuzhiyun 	unsigned int		inobt_records)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	/*
675*4882a593Smuzhiyun 	 * If the caller didn't tell us the number of inobt records they
676*4882a593Smuzhiyun 	 * wanted, assume the maximum prefetch possible for best performance.
677*4882a593Smuzhiyun 	 */
678*4882a593Smuzhiyun 	if (inobt_records == 0)
679*4882a593Smuzhiyun 		inobt_records = MAX_INOBT_WALK_PREFETCH;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/*
682*4882a593Smuzhiyun 	 * Allocate enough space to prefetch at least two inobt records so that
683*4882a593Smuzhiyun 	 * we can cache both the record where the iwalk started and the next
684*4882a593Smuzhiyun 	 * record.  This simplifies the AG inode walk loop setup code.
685*4882a593Smuzhiyun 	 */
686*4882a593Smuzhiyun 	inobt_records = max(inobt_records, 2U);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/*
689*4882a593Smuzhiyun 	 * Cap prefetch at that maximum so that we don't use an absurd amount
690*4882a593Smuzhiyun 	 * of memory.
691*4882a593Smuzhiyun 	 */
692*4882a593Smuzhiyun 	return min_t(unsigned int, inobt_records, MAX_INOBT_WALK_PREFETCH);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun /*
696*4882a593Smuzhiyun  * Walk all inode btree records in the filesystem starting from @startino.  The
697*4882a593Smuzhiyun  * @inobt_walk_fn will be called for each btree record, being passed the incore
698*4882a593Smuzhiyun  * record and @data.  @max_prefetch controls how many inobt records we try to
699*4882a593Smuzhiyun  * cache ahead of time.
700*4882a593Smuzhiyun  */
701*4882a593Smuzhiyun int
xfs_inobt_walk(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t startino,unsigned int flags,xfs_inobt_walk_fn inobt_walk_fn,unsigned int inobt_records,void * data)702*4882a593Smuzhiyun xfs_inobt_walk(
703*4882a593Smuzhiyun 	struct xfs_mount	*mp,
704*4882a593Smuzhiyun 	struct xfs_trans	*tp,
705*4882a593Smuzhiyun 	xfs_ino_t		startino,
706*4882a593Smuzhiyun 	unsigned int		flags,
707*4882a593Smuzhiyun 	xfs_inobt_walk_fn	inobt_walk_fn,
708*4882a593Smuzhiyun 	unsigned int		inobt_records,
709*4882a593Smuzhiyun 	void			*data)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	struct xfs_iwalk_ag	iwag = {
712*4882a593Smuzhiyun 		.mp		= mp,
713*4882a593Smuzhiyun 		.tp		= tp,
714*4882a593Smuzhiyun 		.inobt_walk_fn	= inobt_walk_fn,
715*4882a593Smuzhiyun 		.data		= data,
716*4882a593Smuzhiyun 		.startino	= startino,
717*4882a593Smuzhiyun 		.sz_recs	= xfs_inobt_walk_prefetch(inobt_records),
718*4882a593Smuzhiyun 		.pwork		= XFS_PWORK_SINGLE_THREADED,
719*4882a593Smuzhiyun 		.lastino	= NULLFSINO,
720*4882a593Smuzhiyun 	};
721*4882a593Smuzhiyun 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
722*4882a593Smuzhiyun 	int			error;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	ASSERT(agno < mp->m_sb.sb_agcount);
725*4882a593Smuzhiyun 	ASSERT(!(flags & ~XFS_INOBT_WALK_FLAGS_ALL));
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	error = xfs_iwalk_alloc(&iwag);
728*4882a593Smuzhiyun 	if (error)
729*4882a593Smuzhiyun 		return error;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	for (; agno < mp->m_sb.sb_agcount; agno++) {
732*4882a593Smuzhiyun 		error = xfs_iwalk_ag(&iwag);
733*4882a593Smuzhiyun 		if (error)
734*4882a593Smuzhiyun 			break;
735*4882a593Smuzhiyun 		iwag.startino = XFS_AGINO_TO_INO(mp, agno + 1, 0);
736*4882a593Smuzhiyun 		if (flags & XFS_INOBT_WALK_SAME_AG)
737*4882a593Smuzhiyun 			break;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	xfs_iwalk_free(&iwag);
741*4882a593Smuzhiyun 	return error;
742*4882a593Smuzhiyun }
743