xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_itable.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_mount.h"
13*4882a593Smuzhiyun #include "xfs_inode.h"
14*4882a593Smuzhiyun #include "xfs_btree.h"
15*4882a593Smuzhiyun #include "xfs_ialloc.h"
16*4882a593Smuzhiyun #include "xfs_ialloc_btree.h"
17*4882a593Smuzhiyun #include "xfs_iwalk.h"
18*4882a593Smuzhiyun #include "xfs_itable.h"
19*4882a593Smuzhiyun #include "xfs_error.h"
20*4882a593Smuzhiyun #include "xfs_icache.h"
21*4882a593Smuzhiyun #include "xfs_health.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Bulk Stat
25*4882a593Smuzhiyun  * =========
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Use the inode walking functions to fill out struct xfs_bulkstat for every
28*4882a593Smuzhiyun  * allocated inode, then pass the stat information to some externally provided
29*4882a593Smuzhiyun  * iteration function.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun struct xfs_bstat_chunk {
33*4882a593Smuzhiyun 	bulkstat_one_fmt_pf	formatter;
34*4882a593Smuzhiyun 	struct xfs_ibulk	*breq;
35*4882a593Smuzhiyun 	struct xfs_bulkstat	*buf;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * Fill out the bulkstat info for a single inode and report it somewhere.
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * bc->breq->lastino is effectively the inode cursor as we walk through the
42*4882a593Smuzhiyun  * filesystem.  Therefore, we update it any time we need to move the cursor
43*4882a593Smuzhiyun  * forward, regardless of whether or not we're sending any bstat information
44*4882a593Smuzhiyun  * back to userspace.  If the inode is internal metadata or, has been freed
45*4882a593Smuzhiyun  * out from under us, we just simply keep going.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * However, if any other type of error happens we want to stop right where we
48*4882a593Smuzhiyun  * are so that userspace will call back with exact number of the bad inode and
49*4882a593Smuzhiyun  * we can send back an error code.
50*4882a593Smuzhiyun  *
51*4882a593Smuzhiyun  * Note that if the formatter tells us there's no space left in the buffer we
52*4882a593Smuzhiyun  * move the cursor forward and abort the walk.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun STATIC int
xfs_bulkstat_one_int(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,struct xfs_bstat_chunk * bc)55*4882a593Smuzhiyun xfs_bulkstat_one_int(
56*4882a593Smuzhiyun 	struct xfs_mount	*mp,
57*4882a593Smuzhiyun 	struct xfs_trans	*tp,
58*4882a593Smuzhiyun 	xfs_ino_t		ino,
59*4882a593Smuzhiyun 	struct xfs_bstat_chunk	*bc)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct xfs_icdinode	*dic;		/* dinode core info pointer */
62*4882a593Smuzhiyun 	struct xfs_inode	*ip;		/* incore inode pointer */
63*4882a593Smuzhiyun 	struct inode		*inode;
64*4882a593Smuzhiyun 	struct xfs_bulkstat	*buf = bc->buf;
65*4882a593Smuzhiyun 	int			error = -EINVAL;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (xfs_internal_inum(mp, ino))
68*4882a593Smuzhiyun 		goto out_advance;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	error = xfs_iget(mp, tp, ino,
71*4882a593Smuzhiyun 			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
72*4882a593Smuzhiyun 			 XFS_ILOCK_SHARED, &ip);
73*4882a593Smuzhiyun 	if (error == -ENOENT || error == -EINVAL)
74*4882a593Smuzhiyun 		goto out_advance;
75*4882a593Smuzhiyun 	if (error)
76*4882a593Smuzhiyun 		goto out;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	ASSERT(ip != NULL);
79*4882a593Smuzhiyun 	ASSERT(ip->i_imap.im_blkno != 0);
80*4882a593Smuzhiyun 	inode = VFS_I(ip);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	dic = &ip->i_d;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* xfs_iget returns the following without needing
85*4882a593Smuzhiyun 	 * further change.
86*4882a593Smuzhiyun 	 */
87*4882a593Smuzhiyun 	buf->bs_projectid = ip->i_d.di_projid;
88*4882a593Smuzhiyun 	buf->bs_ino = ino;
89*4882a593Smuzhiyun 	buf->bs_uid = i_uid_read(inode);
90*4882a593Smuzhiyun 	buf->bs_gid = i_gid_read(inode);
91*4882a593Smuzhiyun 	buf->bs_size = dic->di_size;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	buf->bs_nlink = inode->i_nlink;
94*4882a593Smuzhiyun 	buf->bs_atime = inode->i_atime.tv_sec;
95*4882a593Smuzhiyun 	buf->bs_atime_nsec = inode->i_atime.tv_nsec;
96*4882a593Smuzhiyun 	buf->bs_mtime = inode->i_mtime.tv_sec;
97*4882a593Smuzhiyun 	buf->bs_mtime_nsec = inode->i_mtime.tv_nsec;
98*4882a593Smuzhiyun 	buf->bs_ctime = inode->i_ctime.tv_sec;
99*4882a593Smuzhiyun 	buf->bs_ctime_nsec = inode->i_ctime.tv_nsec;
100*4882a593Smuzhiyun 	buf->bs_btime = dic->di_crtime.tv_sec;
101*4882a593Smuzhiyun 	buf->bs_btime_nsec = dic->di_crtime.tv_nsec;
102*4882a593Smuzhiyun 	buf->bs_gen = inode->i_generation;
103*4882a593Smuzhiyun 	buf->bs_mode = inode->i_mode;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	buf->bs_xflags = xfs_ip2xflags(ip);
106*4882a593Smuzhiyun 	buf->bs_extsize_blks = dic->di_extsize;
107*4882a593Smuzhiyun 	buf->bs_extents = xfs_ifork_nextents(&ip->i_df);
108*4882a593Smuzhiyun 	xfs_bulkstat_health(ip, buf);
109*4882a593Smuzhiyun 	buf->bs_aextents = xfs_ifork_nextents(ip->i_afp);
110*4882a593Smuzhiyun 	buf->bs_forkoff = XFS_IFORK_BOFF(ip);
111*4882a593Smuzhiyun 	buf->bs_version = XFS_BULKSTAT_VERSION_V5;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	if (xfs_sb_version_has_v3inode(&mp->m_sb)) {
114*4882a593Smuzhiyun 		if (dic->di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
115*4882a593Smuzhiyun 			buf->bs_cowextsize_blks = dic->di_cowextsize;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	switch (ip->i_df.if_format) {
119*4882a593Smuzhiyun 	case XFS_DINODE_FMT_DEV:
120*4882a593Smuzhiyun 		buf->bs_rdev = sysv_encode_dev(inode->i_rdev);
121*4882a593Smuzhiyun 		buf->bs_blksize = BLKDEV_IOSIZE;
122*4882a593Smuzhiyun 		buf->bs_blocks = 0;
123*4882a593Smuzhiyun 		break;
124*4882a593Smuzhiyun 	case XFS_DINODE_FMT_LOCAL:
125*4882a593Smuzhiyun 		buf->bs_rdev = 0;
126*4882a593Smuzhiyun 		buf->bs_blksize = mp->m_sb.sb_blocksize;
127*4882a593Smuzhiyun 		buf->bs_blocks = 0;
128*4882a593Smuzhiyun 		break;
129*4882a593Smuzhiyun 	case XFS_DINODE_FMT_EXTENTS:
130*4882a593Smuzhiyun 	case XFS_DINODE_FMT_BTREE:
131*4882a593Smuzhiyun 		buf->bs_rdev = 0;
132*4882a593Smuzhiyun 		buf->bs_blksize = mp->m_sb.sb_blocksize;
133*4882a593Smuzhiyun 		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
134*4882a593Smuzhiyun 		break;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
137*4882a593Smuzhiyun 	xfs_irele(ip);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	error = bc->formatter(bc->breq, buf);
140*4882a593Smuzhiyun 	if (error == -ECANCELED)
141*4882a593Smuzhiyun 		goto out_advance;
142*4882a593Smuzhiyun 	if (error)
143*4882a593Smuzhiyun 		goto out;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun out_advance:
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * Advance the cursor to the inode that comes after the one we just
148*4882a593Smuzhiyun 	 * looked at.  We want the caller to move along if the bulkstat
149*4882a593Smuzhiyun 	 * information was copied successfully; if we tried to grab the inode
150*4882a593Smuzhiyun 	 * but it's no longer allocated; or if it's internal metadata.
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	bc->breq->startino = ino + 1;
153*4882a593Smuzhiyun out:
154*4882a593Smuzhiyun 	return error;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun /* Bulkstat a single inode. */
158*4882a593Smuzhiyun int
xfs_bulkstat_one(struct xfs_ibulk * breq,bulkstat_one_fmt_pf formatter)159*4882a593Smuzhiyun xfs_bulkstat_one(
160*4882a593Smuzhiyun 	struct xfs_ibulk	*breq,
161*4882a593Smuzhiyun 	bulkstat_one_fmt_pf	formatter)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	struct xfs_bstat_chunk	bc = {
164*4882a593Smuzhiyun 		.formatter	= formatter,
165*4882a593Smuzhiyun 		.breq		= breq,
166*4882a593Smuzhiyun 	};
167*4882a593Smuzhiyun 	int			error;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	ASSERT(breq->icount == 1);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
172*4882a593Smuzhiyun 			KM_MAYFAIL);
173*4882a593Smuzhiyun 	if (!bc.buf)
174*4882a593Smuzhiyun 		return -ENOMEM;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	error = xfs_bulkstat_one_int(breq->mp, NULL, breq->startino, &bc);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	kmem_free(bc.buf);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	/*
181*4882a593Smuzhiyun 	 * If we reported one inode to userspace then we abort because we hit
182*4882a593Smuzhiyun 	 * the end of the buffer.  Don't leak that back to userspace.
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	if (error == -ECANCELED)
185*4882a593Smuzhiyun 		error = 0;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	return error;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun static int
xfs_bulkstat_iwalk(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,void * data)191*4882a593Smuzhiyun xfs_bulkstat_iwalk(
192*4882a593Smuzhiyun 	struct xfs_mount	*mp,
193*4882a593Smuzhiyun 	struct xfs_trans	*tp,
194*4882a593Smuzhiyun 	xfs_ino_t		ino,
195*4882a593Smuzhiyun 	void			*data)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	int			error;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	error = xfs_bulkstat_one_int(mp, tp, ino, data);
200*4882a593Smuzhiyun 	/* bulkstat just skips over missing inodes */
201*4882a593Smuzhiyun 	if (error == -ENOENT || error == -EINVAL)
202*4882a593Smuzhiyun 		return 0;
203*4882a593Smuzhiyun 	return error;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun  * Check the incoming lastino parameter.
208*4882a593Smuzhiyun  *
209*4882a593Smuzhiyun  * We allow any inode value that could map to physical space inside the
210*4882a593Smuzhiyun  * filesystem because if there are no inodes there, bulkstat moves on to the
211*4882a593Smuzhiyun  * next chunk.  In other words, the magic agino value of zero takes us to the
212*4882a593Smuzhiyun  * first chunk in the AG, and an agino value past the end of the AG takes us to
213*4882a593Smuzhiyun  * the first chunk in the next AG.
214*4882a593Smuzhiyun  *
215*4882a593Smuzhiyun  * Therefore we can end early if the requested inode is beyond the end of the
216*4882a593Smuzhiyun  * filesystem or doesn't map properly.
217*4882a593Smuzhiyun  */
218*4882a593Smuzhiyun static inline bool
xfs_bulkstat_already_done(struct xfs_mount * mp,xfs_ino_t startino)219*4882a593Smuzhiyun xfs_bulkstat_already_done(
220*4882a593Smuzhiyun 	struct xfs_mount	*mp,
221*4882a593Smuzhiyun 	xfs_ino_t		startino)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, startino);
224*4882a593Smuzhiyun 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, startino);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	return agno >= mp->m_sb.sb_agcount ||
227*4882a593Smuzhiyun 	       startino != XFS_AGINO_TO_INO(mp, agno, agino);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* Return stat information in bulk (by-inode) for the filesystem. */
231*4882a593Smuzhiyun int
xfs_bulkstat(struct xfs_ibulk * breq,bulkstat_one_fmt_pf formatter)232*4882a593Smuzhiyun xfs_bulkstat(
233*4882a593Smuzhiyun 	struct xfs_ibulk	*breq,
234*4882a593Smuzhiyun 	bulkstat_one_fmt_pf	formatter)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	struct xfs_bstat_chunk	bc = {
237*4882a593Smuzhiyun 		.formatter	= formatter,
238*4882a593Smuzhiyun 		.breq		= breq,
239*4882a593Smuzhiyun 	};
240*4882a593Smuzhiyun 	int			error;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
243*4882a593Smuzhiyun 		return 0;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	bc.buf = kmem_zalloc(sizeof(struct xfs_bulkstat),
246*4882a593Smuzhiyun 			KM_MAYFAIL);
247*4882a593Smuzhiyun 	if (!bc.buf)
248*4882a593Smuzhiyun 		return -ENOMEM;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	error = xfs_iwalk(breq->mp, NULL, breq->startino, breq->flags,
251*4882a593Smuzhiyun 			xfs_bulkstat_iwalk, breq->icount, &bc);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	kmem_free(bc.buf);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * We found some inodes, so clear the error status and return them.
257*4882a593Smuzhiyun 	 * The lastino pointer will point directly at the inode that triggered
258*4882a593Smuzhiyun 	 * any error that occurred, so on the next call the error will be
259*4882a593Smuzhiyun 	 * triggered again and propagated to userspace as there will be no
260*4882a593Smuzhiyun 	 * formatted inodes in the buffer.
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	if (breq->ocount > 0)
263*4882a593Smuzhiyun 		error = 0;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return error;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /* Convert bulkstat (v5) to bstat (v1). */
269*4882a593Smuzhiyun void
xfs_bulkstat_to_bstat(struct xfs_mount * mp,struct xfs_bstat * bs1,const struct xfs_bulkstat * bstat)270*4882a593Smuzhiyun xfs_bulkstat_to_bstat(
271*4882a593Smuzhiyun 	struct xfs_mount		*mp,
272*4882a593Smuzhiyun 	struct xfs_bstat		*bs1,
273*4882a593Smuzhiyun 	const struct xfs_bulkstat	*bstat)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	/* memset is needed here because of padding holes in the structure. */
276*4882a593Smuzhiyun 	memset(bs1, 0, sizeof(struct xfs_bstat));
277*4882a593Smuzhiyun 	bs1->bs_ino = bstat->bs_ino;
278*4882a593Smuzhiyun 	bs1->bs_mode = bstat->bs_mode;
279*4882a593Smuzhiyun 	bs1->bs_nlink = bstat->bs_nlink;
280*4882a593Smuzhiyun 	bs1->bs_uid = bstat->bs_uid;
281*4882a593Smuzhiyun 	bs1->bs_gid = bstat->bs_gid;
282*4882a593Smuzhiyun 	bs1->bs_rdev = bstat->bs_rdev;
283*4882a593Smuzhiyun 	bs1->bs_blksize = bstat->bs_blksize;
284*4882a593Smuzhiyun 	bs1->bs_size = bstat->bs_size;
285*4882a593Smuzhiyun 	bs1->bs_atime.tv_sec = bstat->bs_atime;
286*4882a593Smuzhiyun 	bs1->bs_mtime.tv_sec = bstat->bs_mtime;
287*4882a593Smuzhiyun 	bs1->bs_ctime.tv_sec = bstat->bs_ctime;
288*4882a593Smuzhiyun 	bs1->bs_atime.tv_nsec = bstat->bs_atime_nsec;
289*4882a593Smuzhiyun 	bs1->bs_mtime.tv_nsec = bstat->bs_mtime_nsec;
290*4882a593Smuzhiyun 	bs1->bs_ctime.tv_nsec = bstat->bs_ctime_nsec;
291*4882a593Smuzhiyun 	bs1->bs_blocks = bstat->bs_blocks;
292*4882a593Smuzhiyun 	bs1->bs_xflags = bstat->bs_xflags;
293*4882a593Smuzhiyun 	bs1->bs_extsize = XFS_FSB_TO_B(mp, bstat->bs_extsize_blks);
294*4882a593Smuzhiyun 	bs1->bs_extents = bstat->bs_extents;
295*4882a593Smuzhiyun 	bs1->bs_gen = bstat->bs_gen;
296*4882a593Smuzhiyun 	bs1->bs_projid_lo = bstat->bs_projectid & 0xFFFF;
297*4882a593Smuzhiyun 	bs1->bs_forkoff = bstat->bs_forkoff;
298*4882a593Smuzhiyun 	bs1->bs_projid_hi = bstat->bs_projectid >> 16;
299*4882a593Smuzhiyun 	bs1->bs_sick = bstat->bs_sick;
300*4882a593Smuzhiyun 	bs1->bs_checked = bstat->bs_checked;
301*4882a593Smuzhiyun 	bs1->bs_cowextsize = XFS_FSB_TO_B(mp, bstat->bs_cowextsize_blks);
302*4882a593Smuzhiyun 	bs1->bs_dmevmask = 0;
303*4882a593Smuzhiyun 	bs1->bs_dmstate = 0;
304*4882a593Smuzhiyun 	bs1->bs_aextents = bstat->bs_aextents;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun struct xfs_inumbers_chunk {
308*4882a593Smuzhiyun 	inumbers_fmt_pf		formatter;
309*4882a593Smuzhiyun 	struct xfs_ibulk	*breq;
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun  * INUMBERS
314*4882a593Smuzhiyun  * ========
315*4882a593Smuzhiyun  * This is how we export inode btree records to userspace, so that XFS tools
316*4882a593Smuzhiyun  * can figure out where inodes are allocated.
317*4882a593Smuzhiyun  */
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun  * Format the inode group structure and report it somewhere.
321*4882a593Smuzhiyun  *
322*4882a593Smuzhiyun  * Similar to xfs_bulkstat_one_int, lastino is the inode cursor as we walk
323*4882a593Smuzhiyun  * through the filesystem so we move it forward unless there was a runtime
324*4882a593Smuzhiyun  * error.  If the formatter tells us the buffer is now full we also move the
325*4882a593Smuzhiyun  * cursor forward and abort the walk.
326*4882a593Smuzhiyun  */
327*4882a593Smuzhiyun STATIC int
xfs_inumbers_walk(struct xfs_mount * mp,struct xfs_trans * tp,xfs_agnumber_t agno,const struct xfs_inobt_rec_incore * irec,void * data)328*4882a593Smuzhiyun xfs_inumbers_walk(
329*4882a593Smuzhiyun 	struct xfs_mount	*mp,
330*4882a593Smuzhiyun 	struct xfs_trans	*tp,
331*4882a593Smuzhiyun 	xfs_agnumber_t		agno,
332*4882a593Smuzhiyun 	const struct xfs_inobt_rec_incore *irec,
333*4882a593Smuzhiyun 	void			*data)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct xfs_inumbers	inogrp = {
336*4882a593Smuzhiyun 		.xi_startino	= XFS_AGINO_TO_INO(mp, agno, irec->ir_startino),
337*4882a593Smuzhiyun 		.xi_alloccount	= irec->ir_count - irec->ir_freecount,
338*4882a593Smuzhiyun 		.xi_allocmask	= ~irec->ir_free,
339*4882a593Smuzhiyun 		.xi_version	= XFS_INUMBERS_VERSION_V5,
340*4882a593Smuzhiyun 	};
341*4882a593Smuzhiyun 	struct xfs_inumbers_chunk *ic = data;
342*4882a593Smuzhiyun 	int			error;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	error = ic->formatter(ic->breq, &inogrp);
345*4882a593Smuzhiyun 	if (error && error != -ECANCELED)
346*4882a593Smuzhiyun 		return error;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) +
349*4882a593Smuzhiyun 			XFS_INODES_PER_CHUNK;
350*4882a593Smuzhiyun 	return error;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun  * Return inode number table for the filesystem.
355*4882a593Smuzhiyun  */
356*4882a593Smuzhiyun int
xfs_inumbers(struct xfs_ibulk * breq,inumbers_fmt_pf formatter)357*4882a593Smuzhiyun xfs_inumbers(
358*4882a593Smuzhiyun 	struct xfs_ibulk	*breq,
359*4882a593Smuzhiyun 	inumbers_fmt_pf		formatter)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct xfs_inumbers_chunk ic = {
362*4882a593Smuzhiyun 		.formatter	= formatter,
363*4882a593Smuzhiyun 		.breq		= breq,
364*4882a593Smuzhiyun 	};
365*4882a593Smuzhiyun 	int			error = 0;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	if (xfs_bulkstat_already_done(breq->mp, breq->startino))
368*4882a593Smuzhiyun 		return 0;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	error = xfs_inobt_walk(breq->mp, NULL, breq->startino, breq->flags,
371*4882a593Smuzhiyun 			xfs_inumbers_walk, breq->icount, &ic);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/*
374*4882a593Smuzhiyun 	 * We found some inode groups, so clear the error status and return
375*4882a593Smuzhiyun 	 * them.  The lastino pointer will point directly at the inode that
376*4882a593Smuzhiyun 	 * triggered any error that occurred, so on the next call the error
377*4882a593Smuzhiyun 	 * will be triggered again and propagated to userspace as there will be
378*4882a593Smuzhiyun 	 * no formatted inode groups in the buffer.
379*4882a593Smuzhiyun 	 */
380*4882a593Smuzhiyun 	if (breq->ocount > 0)
381*4882a593Smuzhiyun 		error = 0;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	return error;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /* Convert an inumbers (v5) struct to a inogrp (v1) struct. */
387*4882a593Smuzhiyun void
xfs_inumbers_to_inogrp(struct xfs_inogrp * ig1,const struct xfs_inumbers * ig)388*4882a593Smuzhiyun xfs_inumbers_to_inogrp(
389*4882a593Smuzhiyun 	struct xfs_inogrp		*ig1,
390*4882a593Smuzhiyun 	const struct xfs_inumbers	*ig)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	/* memset is needed here because of padding holes in the structure. */
393*4882a593Smuzhiyun 	memset(ig1, 0, sizeof(struct xfs_inogrp));
394*4882a593Smuzhiyun 	ig1->xi_startino = ig->xi_startino;
395*4882a593Smuzhiyun 	ig1->xi_alloccount = ig->xi_alloccount;
396*4882a593Smuzhiyun 	ig1->xi_allocmask = ig->xi_allocmask;
397*4882a593Smuzhiyun }
398