xref: /OK3568_Linux_fs/kernel/fs/xfs/scrub/agheader.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4*4882a593Smuzhiyun  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_mount.h"
12*4882a593Smuzhiyun #include "xfs_btree.h"
13*4882a593Smuzhiyun #include "xfs_sb.h"
14*4882a593Smuzhiyun #include "xfs_alloc.h"
15*4882a593Smuzhiyun #include "xfs_ialloc.h"
16*4882a593Smuzhiyun #include "xfs_rmap.h"
17*4882a593Smuzhiyun #include "scrub/scrub.h"
18*4882a593Smuzhiyun #include "scrub/common.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* Superblock */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /* Cross-reference with the other btrees. */
23*4882a593Smuzhiyun STATIC void
xchk_superblock_xref(struct xfs_scrub * sc,struct xfs_buf * bp)24*4882a593Smuzhiyun xchk_superblock_xref(
25*4882a593Smuzhiyun 	struct xfs_scrub	*sc,
26*4882a593Smuzhiyun 	struct xfs_buf		*bp)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
29*4882a593Smuzhiyun 	xfs_agnumber_t		agno = sc->sm->sm_agno;
30*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
31*4882a593Smuzhiyun 	int			error;
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
34*4882a593Smuzhiyun 		return;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	agbno = XFS_SB_BLOCK(mp);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	error = xchk_ag_init(sc, agno, &sc->sa);
39*4882a593Smuzhiyun 	if (!xchk_xref_process_error(sc, agno, agbno, &error))
40*4882a593Smuzhiyun 		return;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	xchk_xref_is_used_space(sc, agbno, 1);
43*4882a593Smuzhiyun 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
44*4882a593Smuzhiyun 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
45*4882a593Smuzhiyun 	xchk_xref_is_not_shared(sc, agbno, 1);
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/* scrub teardown will take care of sc->sa for us */
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun  * Scrub the filesystem superblock.
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * Note: We do /not/ attempt to check AG 0's superblock.  Mount is
54*4882a593Smuzhiyun  * responsible for validating all the geometry information in sb 0, so
55*4882a593Smuzhiyun  * if the filesystem is capable of initiating online scrub, then clearly
56*4882a593Smuzhiyun  * sb 0 is ok and we can use its information to check everything else.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun int
xchk_superblock(struct xfs_scrub * sc)59*4882a593Smuzhiyun xchk_superblock(
60*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
63*4882a593Smuzhiyun 	struct xfs_buf		*bp;
64*4882a593Smuzhiyun 	struct xfs_dsb		*sb;
65*4882a593Smuzhiyun 	xfs_agnumber_t		agno;
66*4882a593Smuzhiyun 	uint32_t		v2_ok;
67*4882a593Smuzhiyun 	__be32			features_mask;
68*4882a593Smuzhiyun 	int			error;
69*4882a593Smuzhiyun 	__be16			vernum_mask;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	agno = sc->sm->sm_agno;
72*4882a593Smuzhiyun 	if (agno == 0)
73*4882a593Smuzhiyun 		return 0;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
76*4882a593Smuzhiyun 	/*
77*4882a593Smuzhiyun 	 * The superblock verifier can return several different error codes
78*4882a593Smuzhiyun 	 * if it thinks the superblock doesn't look right.  For a mount these
79*4882a593Smuzhiyun 	 * would all get bounced back to userspace, but if we're here then the
80*4882a593Smuzhiyun 	 * fs mounted successfully, which means that this secondary superblock
81*4882a593Smuzhiyun 	 * is simply incorrect.  Treat all these codes the same way we treat
82*4882a593Smuzhiyun 	 * any corruption.
83*4882a593Smuzhiyun 	 */
84*4882a593Smuzhiyun 	switch (error) {
85*4882a593Smuzhiyun 	case -EINVAL:	/* also -EWRONGFS */
86*4882a593Smuzhiyun 	case -ENOSYS:
87*4882a593Smuzhiyun 	case -EFBIG:
88*4882a593Smuzhiyun 		error = -EFSCORRUPTED;
89*4882a593Smuzhiyun 	default:
90*4882a593Smuzhiyun 		break;
91*4882a593Smuzhiyun 	}
92*4882a593Smuzhiyun 	if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
93*4882a593Smuzhiyun 		return error;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	sb = bp->b_addr;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	/*
98*4882a593Smuzhiyun 	 * Verify the geometries match.  Fields that are permanently
99*4882a593Smuzhiyun 	 * set by mkfs are checked; fields that can be updated later
100*4882a593Smuzhiyun 	 * (and are not propagated to backup superblocks) are preen
101*4882a593Smuzhiyun 	 * checked.
102*4882a593Smuzhiyun 	 */
103*4882a593Smuzhiyun 	if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
104*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
107*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
110*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
113*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
116*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
119*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
122*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
125*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
128*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
131*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
134*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
137*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
140*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
143*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/* Check sb_versionnum bits that are set at mkfs time. */
146*4882a593Smuzhiyun 	vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
147*4882a593Smuzhiyun 				  XFS_SB_VERSION_NUMBITS |
148*4882a593Smuzhiyun 				  XFS_SB_VERSION_ALIGNBIT |
149*4882a593Smuzhiyun 				  XFS_SB_VERSION_DALIGNBIT |
150*4882a593Smuzhiyun 				  XFS_SB_VERSION_SHAREDBIT |
151*4882a593Smuzhiyun 				  XFS_SB_VERSION_LOGV2BIT |
152*4882a593Smuzhiyun 				  XFS_SB_VERSION_SECTORBIT |
153*4882a593Smuzhiyun 				  XFS_SB_VERSION_EXTFLGBIT |
154*4882a593Smuzhiyun 				  XFS_SB_VERSION_DIRV2BIT);
155*4882a593Smuzhiyun 	if ((sb->sb_versionnum & vernum_mask) !=
156*4882a593Smuzhiyun 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
157*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	/* Check sb_versionnum bits that can be set after mkfs time. */
160*4882a593Smuzhiyun 	vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
161*4882a593Smuzhiyun 				  XFS_SB_VERSION_NLINKBIT |
162*4882a593Smuzhiyun 				  XFS_SB_VERSION_QUOTABIT);
163*4882a593Smuzhiyun 	if ((sb->sb_versionnum & vernum_mask) !=
164*4882a593Smuzhiyun 	    (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
165*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
168*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
171*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
174*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
177*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
180*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
183*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
186*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
189*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
192*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
195*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
198*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/*
201*4882a593Smuzhiyun 	 * Skip the summary counters since we track them in memory anyway.
202*4882a593Smuzhiyun 	 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
206*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
209*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/*
212*4882a593Smuzhiyun 	 * Skip the quota flags since repair will force quotacheck.
213*4882a593Smuzhiyun 	 * sb_qflags
214*4882a593Smuzhiyun 	 */
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (sb->sb_flags != mp->m_sb.sb_flags)
217*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
220*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
223*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
226*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
229*4882a593Smuzhiyun 		xchk_block_set_preen(sc, bp);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
232*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
235*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
238*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
241*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	/* Do we see any invalid bits in sb_features2? */
244*4882a593Smuzhiyun 	if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
245*4882a593Smuzhiyun 		if (sb->sb_features2 != 0)
246*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
247*4882a593Smuzhiyun 	} else {
248*4882a593Smuzhiyun 		v2_ok = XFS_SB_VERSION2_OKBITS;
249*4882a593Smuzhiyun 		if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
250*4882a593Smuzhiyun 			v2_ok |= XFS_SB_VERSION2_CRCBIT;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 		if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
253*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 		if (sb->sb_features2 != sb->sb_bad_features2)
256*4882a593Smuzhiyun 			xchk_block_set_preen(sc, bp);
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* Check sb_features2 flags that are set at mkfs time. */
260*4882a593Smuzhiyun 	features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
261*4882a593Smuzhiyun 				    XFS_SB_VERSION2_PROJID32BIT |
262*4882a593Smuzhiyun 				    XFS_SB_VERSION2_CRCBIT |
263*4882a593Smuzhiyun 				    XFS_SB_VERSION2_FTYPE);
264*4882a593Smuzhiyun 	if ((sb->sb_features2 & features_mask) !=
265*4882a593Smuzhiyun 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
266*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* Check sb_features2 flags that can be set after mkfs time. */
269*4882a593Smuzhiyun 	features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
270*4882a593Smuzhiyun 	if ((sb->sb_features2 & features_mask) !=
271*4882a593Smuzhiyun 	    (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
272*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (!xfs_sb_version_hascrc(&mp->m_sb)) {
275*4882a593Smuzhiyun 		/* all v5 fields must be zero */
276*4882a593Smuzhiyun 		if (memchr_inv(&sb->sb_features_compat, 0,
277*4882a593Smuzhiyun 				sizeof(struct xfs_dsb) -
278*4882a593Smuzhiyun 				offsetof(struct xfs_dsb, sb_features_compat)))
279*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
280*4882a593Smuzhiyun 	} else {
281*4882a593Smuzhiyun 		/* Check compat flags; all are set at mkfs time. */
282*4882a593Smuzhiyun 		features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
283*4882a593Smuzhiyun 		if ((sb->sb_features_compat & features_mask) !=
284*4882a593Smuzhiyun 		    (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
285*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		/* Check ro compat flags; all are set at mkfs time. */
288*4882a593Smuzhiyun 		features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
289*4882a593Smuzhiyun 					    XFS_SB_FEAT_RO_COMPAT_FINOBT |
290*4882a593Smuzhiyun 					    XFS_SB_FEAT_RO_COMPAT_RMAPBT |
291*4882a593Smuzhiyun 					    XFS_SB_FEAT_RO_COMPAT_REFLINK);
292*4882a593Smuzhiyun 		if ((sb->sb_features_ro_compat & features_mask) !=
293*4882a593Smuzhiyun 		    (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
294*4882a593Smuzhiyun 		     features_mask))
295*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		/* Check incompat flags; all are set at mkfs time. */
298*4882a593Smuzhiyun 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
299*4882a593Smuzhiyun 					    XFS_SB_FEAT_INCOMPAT_FTYPE |
300*4882a593Smuzhiyun 					    XFS_SB_FEAT_INCOMPAT_SPINODES |
301*4882a593Smuzhiyun 					    XFS_SB_FEAT_INCOMPAT_META_UUID);
302*4882a593Smuzhiyun 		if ((sb->sb_features_incompat & features_mask) !=
303*4882a593Smuzhiyun 		    (cpu_to_be32(mp->m_sb.sb_features_incompat) &
304*4882a593Smuzhiyun 		     features_mask))
305*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		/* Check log incompat flags; all are set at mkfs time. */
308*4882a593Smuzhiyun 		features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
309*4882a593Smuzhiyun 		if ((sb->sb_features_log_incompat & features_mask) !=
310*4882a593Smuzhiyun 		    (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
311*4882a593Smuzhiyun 		     features_mask))
312*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		/* Don't care about sb_crc */
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 		if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
317*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 		if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
320*4882a593Smuzhiyun 			xchk_block_set_preen(sc, bp);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		/* Don't care about sb_lsn */
323*4882a593Smuzhiyun 	}
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
326*4882a593Smuzhiyun 		/* The metadata UUID must be the same for all supers */
327*4882a593Smuzhiyun 		if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
328*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, bp);
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* Everything else must be zero. */
332*4882a593Smuzhiyun 	if (memchr_inv(sb + 1, 0,
333*4882a593Smuzhiyun 			BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
334*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, bp);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	xchk_superblock_xref(sc, bp);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	return error;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun /* AGF */
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /* Tally freespace record lengths. */
344*4882a593Smuzhiyun STATIC int
xchk_agf_record_bno_lengths(struct xfs_btree_cur * cur,struct xfs_alloc_rec_incore * rec,void * priv)345*4882a593Smuzhiyun xchk_agf_record_bno_lengths(
346*4882a593Smuzhiyun 	struct xfs_btree_cur		*cur,
347*4882a593Smuzhiyun 	struct xfs_alloc_rec_incore	*rec,
348*4882a593Smuzhiyun 	void				*priv)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	xfs_extlen_t			*blocks = priv;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	(*blocks) += rec->ar_blockcount;
353*4882a593Smuzhiyun 	return 0;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /* Check agf_freeblks */
357*4882a593Smuzhiyun static inline void
xchk_agf_xref_freeblks(struct xfs_scrub * sc)358*4882a593Smuzhiyun xchk_agf_xref_freeblks(
359*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
362*4882a593Smuzhiyun 	xfs_extlen_t		blocks = 0;
363*4882a593Smuzhiyun 	int			error;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (!sc->sa.bno_cur)
366*4882a593Smuzhiyun 		return;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	error = xfs_alloc_query_all(sc->sa.bno_cur,
369*4882a593Smuzhiyun 			xchk_agf_record_bno_lengths, &blocks);
370*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
371*4882a593Smuzhiyun 		return;
372*4882a593Smuzhiyun 	if (blocks != be32_to_cpu(agf->agf_freeblks))
373*4882a593Smuzhiyun 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /* Cross reference the AGF with the cntbt (freespace by length btree) */
377*4882a593Smuzhiyun static inline void
xchk_agf_xref_cntbt(struct xfs_scrub * sc)378*4882a593Smuzhiyun xchk_agf_xref_cntbt(
379*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
382*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
383*4882a593Smuzhiyun 	xfs_extlen_t		blocks;
384*4882a593Smuzhiyun 	int			have;
385*4882a593Smuzhiyun 	int			error;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (!sc->sa.cnt_cur)
388*4882a593Smuzhiyun 		return;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* Any freespace at all? */
391*4882a593Smuzhiyun 	error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
392*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
393*4882a593Smuzhiyun 		return;
394*4882a593Smuzhiyun 	if (!have) {
395*4882a593Smuzhiyun 		if (agf->agf_freeblks != cpu_to_be32(0))
396*4882a593Smuzhiyun 			xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
397*4882a593Smuzhiyun 		return;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Check agf_longest */
401*4882a593Smuzhiyun 	error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
402*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
403*4882a593Smuzhiyun 		return;
404*4882a593Smuzhiyun 	if (!have || blocks != be32_to_cpu(agf->agf_longest))
405*4882a593Smuzhiyun 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun /* Check the btree block counts in the AGF against the btrees. */
409*4882a593Smuzhiyun STATIC void
xchk_agf_xref_btreeblks(struct xfs_scrub * sc)410*4882a593Smuzhiyun xchk_agf_xref_btreeblks(
411*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
414*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
415*4882a593Smuzhiyun 	xfs_agblock_t		blocks;
416*4882a593Smuzhiyun 	xfs_agblock_t		btreeblks;
417*4882a593Smuzhiyun 	int			error;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* Check agf_rmap_blocks; set up for agf_btreeblks check */
420*4882a593Smuzhiyun 	if (sc->sa.rmap_cur) {
421*4882a593Smuzhiyun 		error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
422*4882a593Smuzhiyun 		if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
423*4882a593Smuzhiyun 			return;
424*4882a593Smuzhiyun 		btreeblks = blocks - 1;
425*4882a593Smuzhiyun 		if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
426*4882a593Smuzhiyun 			xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
427*4882a593Smuzhiyun 	} else {
428*4882a593Smuzhiyun 		btreeblks = 0;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	/*
432*4882a593Smuzhiyun 	 * No rmap cursor; we can't xref if we have the rmapbt feature.
433*4882a593Smuzhiyun 	 * We also can't do it if we're missing the free space btree cursors.
434*4882a593Smuzhiyun 	 */
435*4882a593Smuzhiyun 	if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
436*4882a593Smuzhiyun 	    !sc->sa.bno_cur || !sc->sa.cnt_cur)
437*4882a593Smuzhiyun 		return;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/* Check agf_btreeblks */
440*4882a593Smuzhiyun 	error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
441*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
442*4882a593Smuzhiyun 		return;
443*4882a593Smuzhiyun 	btreeblks += blocks - 1;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
446*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
447*4882a593Smuzhiyun 		return;
448*4882a593Smuzhiyun 	btreeblks += blocks - 1;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
451*4882a593Smuzhiyun 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /* Check agf_refcount_blocks against tree size */
455*4882a593Smuzhiyun static inline void
xchk_agf_xref_refcblks(struct xfs_scrub * sc)456*4882a593Smuzhiyun xchk_agf_xref_refcblks(
457*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	struct xfs_agf		*agf = sc->sa.agf_bp->b_addr;
460*4882a593Smuzhiyun 	xfs_agblock_t		blocks;
461*4882a593Smuzhiyun 	int			error;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (!sc->sa.refc_cur)
464*4882a593Smuzhiyun 		return;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
467*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
468*4882a593Smuzhiyun 		return;
469*4882a593Smuzhiyun 	if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
470*4882a593Smuzhiyun 		xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /* Cross-reference with the other btrees. */
474*4882a593Smuzhiyun STATIC void
xchk_agf_xref(struct xfs_scrub * sc)475*4882a593Smuzhiyun xchk_agf_xref(
476*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
479*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
480*4882a593Smuzhiyun 	int			error;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
483*4882a593Smuzhiyun 		return;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	agbno = XFS_AGF_BLOCK(mp);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	error = xchk_ag_btcur_init(sc, &sc->sa);
488*4882a593Smuzhiyun 	if (error)
489*4882a593Smuzhiyun 		return;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	xchk_xref_is_used_space(sc, agbno, 1);
492*4882a593Smuzhiyun 	xchk_agf_xref_freeblks(sc);
493*4882a593Smuzhiyun 	xchk_agf_xref_cntbt(sc);
494*4882a593Smuzhiyun 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
495*4882a593Smuzhiyun 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
496*4882a593Smuzhiyun 	xchk_agf_xref_btreeblks(sc);
497*4882a593Smuzhiyun 	xchk_xref_is_not_shared(sc, agbno, 1);
498*4882a593Smuzhiyun 	xchk_agf_xref_refcblks(sc);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* scrub teardown will take care of sc->sa for us */
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun /* Scrub the AGF. */
504*4882a593Smuzhiyun int
xchk_agf(struct xfs_scrub * sc)505*4882a593Smuzhiyun xchk_agf(
506*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
509*4882a593Smuzhiyun 	struct xfs_agf		*agf;
510*4882a593Smuzhiyun 	struct xfs_perag	*pag;
511*4882a593Smuzhiyun 	xfs_agnumber_t		agno;
512*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
513*4882a593Smuzhiyun 	xfs_agblock_t		eoag;
514*4882a593Smuzhiyun 	xfs_agblock_t		agfl_first;
515*4882a593Smuzhiyun 	xfs_agblock_t		agfl_last;
516*4882a593Smuzhiyun 	xfs_agblock_t		agfl_count;
517*4882a593Smuzhiyun 	xfs_agblock_t		fl_count;
518*4882a593Smuzhiyun 	int			level;
519*4882a593Smuzhiyun 	int			error = 0;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	agno = sc->sa.agno = sc->sm->sm_agno;
522*4882a593Smuzhiyun 	error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
523*4882a593Smuzhiyun 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
524*4882a593Smuzhiyun 	if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
525*4882a593Smuzhiyun 		goto out;
526*4882a593Smuzhiyun 	xchk_buffer_recheck(sc, sc->sa.agf_bp);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	agf = sc->sa.agf_bp->b_addr;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	/* Check the AG length */
531*4882a593Smuzhiyun 	eoag = be32_to_cpu(agf->agf_length);
532*4882a593Smuzhiyun 	if (eoag != xfs_ag_block_count(mp, agno))
533*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* Check the AGF btree roots and levels */
536*4882a593Smuzhiyun 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
537*4882a593Smuzhiyun 	if (!xfs_verify_agbno(mp, agno, agbno))
538*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
541*4882a593Smuzhiyun 	if (!xfs_verify_agbno(mp, agno, agbno))
542*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
545*4882a593Smuzhiyun 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
546*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
549*4882a593Smuzhiyun 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
550*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
553*4882a593Smuzhiyun 		agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
554*4882a593Smuzhiyun 		if (!xfs_verify_agbno(mp, agno, agbno))
555*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 		level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
558*4882a593Smuzhiyun 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
559*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
563*4882a593Smuzhiyun 		agbno = be32_to_cpu(agf->agf_refcount_root);
564*4882a593Smuzhiyun 		if (!xfs_verify_agbno(mp, agno, agbno))
565*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 		level = be32_to_cpu(agf->agf_refcount_level);
568*4882a593Smuzhiyun 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
569*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* Check the AGFL counters */
573*4882a593Smuzhiyun 	agfl_first = be32_to_cpu(agf->agf_flfirst);
574*4882a593Smuzhiyun 	agfl_last = be32_to_cpu(agf->agf_fllast);
575*4882a593Smuzhiyun 	agfl_count = be32_to_cpu(agf->agf_flcount);
576*4882a593Smuzhiyun 	if (agfl_last > agfl_first)
577*4882a593Smuzhiyun 		fl_count = agfl_last - agfl_first + 1;
578*4882a593Smuzhiyun 	else
579*4882a593Smuzhiyun 		fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
580*4882a593Smuzhiyun 	if (agfl_count != 0 && fl_count != agfl_count)
581*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/* Do the incore counters match? */
584*4882a593Smuzhiyun 	pag = xfs_perag_get(mp, agno);
585*4882a593Smuzhiyun 	if (pag->pagf_freeblks != be32_to_cpu(agf->agf_freeblks))
586*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
587*4882a593Smuzhiyun 	if (pag->pagf_flcount != be32_to_cpu(agf->agf_flcount))
588*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
589*4882a593Smuzhiyun 	if (pag->pagf_btreeblks != be32_to_cpu(agf->agf_btreeblks))
590*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
591*4882a593Smuzhiyun 	xfs_perag_put(pag);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	xchk_agf_xref(sc);
594*4882a593Smuzhiyun out:
595*4882a593Smuzhiyun 	return error;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /* AGFL */
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun struct xchk_agfl_info {
601*4882a593Smuzhiyun 	unsigned int		sz_entries;
602*4882a593Smuzhiyun 	unsigned int		nr_entries;
603*4882a593Smuzhiyun 	xfs_agblock_t		*entries;
604*4882a593Smuzhiyun 	struct xfs_scrub	*sc;
605*4882a593Smuzhiyun };
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun /* Cross-reference with the other btrees. */
608*4882a593Smuzhiyun STATIC void
xchk_agfl_block_xref(struct xfs_scrub * sc,xfs_agblock_t agbno)609*4882a593Smuzhiyun xchk_agfl_block_xref(
610*4882a593Smuzhiyun 	struct xfs_scrub	*sc,
611*4882a593Smuzhiyun 	xfs_agblock_t		agbno)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
614*4882a593Smuzhiyun 		return;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	xchk_xref_is_used_space(sc, agbno, 1);
617*4882a593Smuzhiyun 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
618*4882a593Smuzhiyun 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
619*4882a593Smuzhiyun 	xchk_xref_is_not_shared(sc, agbno, 1);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /* Scrub an AGFL block. */
623*4882a593Smuzhiyun STATIC int
xchk_agfl_block(struct xfs_mount * mp,xfs_agblock_t agbno,void * priv)624*4882a593Smuzhiyun xchk_agfl_block(
625*4882a593Smuzhiyun 	struct xfs_mount	*mp,
626*4882a593Smuzhiyun 	xfs_agblock_t		agbno,
627*4882a593Smuzhiyun 	void			*priv)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	struct xchk_agfl_info	*sai = priv;
630*4882a593Smuzhiyun 	struct xfs_scrub	*sc = sai->sc;
631*4882a593Smuzhiyun 	xfs_agnumber_t		agno = sc->sa.agno;
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	if (xfs_verify_agbno(mp, agno, agbno) &&
634*4882a593Smuzhiyun 	    sai->nr_entries < sai->sz_entries)
635*4882a593Smuzhiyun 		sai->entries[sai->nr_entries++] = agbno;
636*4882a593Smuzhiyun 	else
637*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	xchk_agfl_block_xref(sc, agbno);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
642*4882a593Smuzhiyun 		return -ECANCELED;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun static int
xchk_agblock_cmp(const void * pa,const void * pb)648*4882a593Smuzhiyun xchk_agblock_cmp(
649*4882a593Smuzhiyun 	const void		*pa,
650*4882a593Smuzhiyun 	const void		*pb)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun 	const xfs_agblock_t	*a = pa;
653*4882a593Smuzhiyun 	const xfs_agblock_t	*b = pb;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	return (int)*a - (int)*b;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun /* Cross-reference with the other btrees. */
659*4882a593Smuzhiyun STATIC void
xchk_agfl_xref(struct xfs_scrub * sc)660*4882a593Smuzhiyun xchk_agfl_xref(
661*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
664*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
665*4882a593Smuzhiyun 	int			error;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
668*4882a593Smuzhiyun 		return;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	agbno = XFS_AGFL_BLOCK(mp);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	error = xchk_ag_btcur_init(sc, &sc->sa);
673*4882a593Smuzhiyun 	if (error)
674*4882a593Smuzhiyun 		return;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	xchk_xref_is_used_space(sc, agbno, 1);
677*4882a593Smuzhiyun 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
678*4882a593Smuzhiyun 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
679*4882a593Smuzhiyun 	xchk_xref_is_not_shared(sc, agbno, 1);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	/*
682*4882a593Smuzhiyun 	 * Scrub teardown will take care of sc->sa for us.  Leave sc->sa
683*4882a593Smuzhiyun 	 * active so that the agfl block xref can use it too.
684*4882a593Smuzhiyun 	 */
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /* Scrub the AGFL. */
688*4882a593Smuzhiyun int
xchk_agfl(struct xfs_scrub * sc)689*4882a593Smuzhiyun xchk_agfl(
690*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	struct xchk_agfl_info	sai;
693*4882a593Smuzhiyun 	struct xfs_agf		*agf;
694*4882a593Smuzhiyun 	xfs_agnumber_t		agno;
695*4882a593Smuzhiyun 	unsigned int		agflcount;
696*4882a593Smuzhiyun 	unsigned int		i;
697*4882a593Smuzhiyun 	int			error;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	agno = sc->sa.agno = sc->sm->sm_agno;
700*4882a593Smuzhiyun 	error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
701*4882a593Smuzhiyun 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
702*4882a593Smuzhiyun 	if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
703*4882a593Smuzhiyun 		goto out;
704*4882a593Smuzhiyun 	if (!sc->sa.agf_bp)
705*4882a593Smuzhiyun 		return -EFSCORRUPTED;
706*4882a593Smuzhiyun 	xchk_buffer_recheck(sc, sc->sa.agfl_bp);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	xchk_agfl_xref(sc);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
711*4882a593Smuzhiyun 		goto out;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	/* Allocate buffer to ensure uniqueness of AGFL entries. */
714*4882a593Smuzhiyun 	agf = sc->sa.agf_bp->b_addr;
715*4882a593Smuzhiyun 	agflcount = be32_to_cpu(agf->agf_flcount);
716*4882a593Smuzhiyun 	if (agflcount > xfs_agfl_size(sc->mp)) {
717*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
718*4882a593Smuzhiyun 		goto out;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 	memset(&sai, 0, sizeof(sai));
721*4882a593Smuzhiyun 	sai.sc = sc;
722*4882a593Smuzhiyun 	sai.sz_entries = agflcount;
723*4882a593Smuzhiyun 	sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
724*4882a593Smuzhiyun 			KM_MAYFAIL);
725*4882a593Smuzhiyun 	if (!sai.entries) {
726*4882a593Smuzhiyun 		error = -ENOMEM;
727*4882a593Smuzhiyun 		goto out;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* Check the blocks in the AGFL. */
731*4882a593Smuzhiyun 	error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
732*4882a593Smuzhiyun 			sc->sa.agfl_bp, xchk_agfl_block, &sai);
733*4882a593Smuzhiyun 	if (error == -ECANCELED) {
734*4882a593Smuzhiyun 		error = 0;
735*4882a593Smuzhiyun 		goto out_free;
736*4882a593Smuzhiyun 	}
737*4882a593Smuzhiyun 	if (error)
738*4882a593Smuzhiyun 		goto out_free;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	if (agflcount != sai.nr_entries) {
741*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agf_bp);
742*4882a593Smuzhiyun 		goto out_free;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	/* Sort entries, check for duplicates. */
746*4882a593Smuzhiyun 	sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
747*4882a593Smuzhiyun 			xchk_agblock_cmp, NULL);
748*4882a593Smuzhiyun 	for (i = 1; i < sai.nr_entries; i++) {
749*4882a593Smuzhiyun 		if (sai.entries[i] == sai.entries[i - 1]) {
750*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agf_bp);
751*4882a593Smuzhiyun 			break;
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun out_free:
756*4882a593Smuzhiyun 	kmem_free(sai.entries);
757*4882a593Smuzhiyun out:
758*4882a593Smuzhiyun 	return error;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /* AGI */
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun /* Check agi_count/agi_freecount */
764*4882a593Smuzhiyun static inline void
xchk_agi_xref_icounts(struct xfs_scrub * sc)765*4882a593Smuzhiyun xchk_agi_xref_icounts(
766*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	struct xfs_agi		*agi = sc->sa.agi_bp->b_addr;
769*4882a593Smuzhiyun 	xfs_agino_t		icount;
770*4882a593Smuzhiyun 	xfs_agino_t		freecount;
771*4882a593Smuzhiyun 	int			error;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	if (!sc->sa.ino_cur)
774*4882a593Smuzhiyun 		return;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
777*4882a593Smuzhiyun 	if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
778*4882a593Smuzhiyun 		return;
779*4882a593Smuzhiyun 	if (be32_to_cpu(agi->agi_count) != icount ||
780*4882a593Smuzhiyun 	    be32_to_cpu(agi->agi_freecount) != freecount)
781*4882a593Smuzhiyun 		xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun /* Check agi_[fi]blocks against tree size */
785*4882a593Smuzhiyun static inline void
xchk_agi_xref_fiblocks(struct xfs_scrub * sc)786*4882a593Smuzhiyun xchk_agi_xref_fiblocks(
787*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	struct xfs_agi		*agi = sc->sa.agi_bp->b_addr;
790*4882a593Smuzhiyun 	xfs_agblock_t		blocks;
791*4882a593Smuzhiyun 	int			error = 0;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	if (!xfs_sb_version_hasinobtcounts(&sc->mp->m_sb))
794*4882a593Smuzhiyun 		return;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (sc->sa.ino_cur) {
797*4882a593Smuzhiyun 		error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
798*4882a593Smuzhiyun 		if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
799*4882a593Smuzhiyun 			return;
800*4882a593Smuzhiyun 		if (blocks != be32_to_cpu(agi->agi_iblocks))
801*4882a593Smuzhiyun 			xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	if (sc->sa.fino_cur) {
805*4882a593Smuzhiyun 		error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
806*4882a593Smuzhiyun 		if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
807*4882a593Smuzhiyun 			return;
808*4882a593Smuzhiyun 		if (blocks != be32_to_cpu(agi->agi_fblocks))
809*4882a593Smuzhiyun 			xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun /* Cross-reference with the other btrees. */
814*4882a593Smuzhiyun STATIC void
xchk_agi_xref(struct xfs_scrub * sc)815*4882a593Smuzhiyun xchk_agi_xref(
816*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
819*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
820*4882a593Smuzhiyun 	int			error;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
823*4882a593Smuzhiyun 		return;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	agbno = XFS_AGI_BLOCK(mp);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	error = xchk_ag_btcur_init(sc, &sc->sa);
828*4882a593Smuzhiyun 	if (error)
829*4882a593Smuzhiyun 		return;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	xchk_xref_is_used_space(sc, agbno, 1);
832*4882a593Smuzhiyun 	xchk_xref_is_not_inode_chunk(sc, agbno, 1);
833*4882a593Smuzhiyun 	xchk_agi_xref_icounts(sc);
834*4882a593Smuzhiyun 	xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
835*4882a593Smuzhiyun 	xchk_xref_is_not_shared(sc, agbno, 1);
836*4882a593Smuzhiyun 	xchk_agi_xref_fiblocks(sc);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/* scrub teardown will take care of sc->sa for us */
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun /* Scrub the AGI. */
842*4882a593Smuzhiyun int
xchk_agi(struct xfs_scrub * sc)843*4882a593Smuzhiyun xchk_agi(
844*4882a593Smuzhiyun 	struct xfs_scrub	*sc)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	struct xfs_mount	*mp = sc->mp;
847*4882a593Smuzhiyun 	struct xfs_agi		*agi;
848*4882a593Smuzhiyun 	struct xfs_perag	*pag;
849*4882a593Smuzhiyun 	xfs_agnumber_t		agno;
850*4882a593Smuzhiyun 	xfs_agblock_t		agbno;
851*4882a593Smuzhiyun 	xfs_agblock_t		eoag;
852*4882a593Smuzhiyun 	xfs_agino_t		agino;
853*4882a593Smuzhiyun 	xfs_agino_t		first_agino;
854*4882a593Smuzhiyun 	xfs_agino_t		last_agino;
855*4882a593Smuzhiyun 	xfs_agino_t		icount;
856*4882a593Smuzhiyun 	int			i;
857*4882a593Smuzhiyun 	int			level;
858*4882a593Smuzhiyun 	int			error = 0;
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	agno = sc->sa.agno = sc->sm->sm_agno;
861*4882a593Smuzhiyun 	error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
862*4882a593Smuzhiyun 			&sc->sa.agf_bp, &sc->sa.agfl_bp);
863*4882a593Smuzhiyun 	if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
864*4882a593Smuzhiyun 		goto out;
865*4882a593Smuzhiyun 	xchk_buffer_recheck(sc, sc->sa.agi_bp);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	agi = sc->sa.agi_bp->b_addr;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	/* Check the AG length */
870*4882a593Smuzhiyun 	eoag = be32_to_cpu(agi->agi_length);
871*4882a593Smuzhiyun 	if (eoag != xfs_ag_block_count(mp, agno))
872*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	/* Check btree roots and levels */
875*4882a593Smuzhiyun 	agbno = be32_to_cpu(agi->agi_root);
876*4882a593Smuzhiyun 	if (!xfs_verify_agbno(mp, agno, agbno))
877*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	level = be32_to_cpu(agi->agi_level);
880*4882a593Smuzhiyun 	if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
881*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
884*4882a593Smuzhiyun 		agbno = be32_to_cpu(agi->agi_free_root);
885*4882a593Smuzhiyun 		if (!xfs_verify_agbno(mp, agno, agbno))
886*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 		level = be32_to_cpu(agi->agi_free_level);
889*4882a593Smuzhiyun 		if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
890*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	/* Check inode counters */
894*4882a593Smuzhiyun 	xfs_agino_range(mp, agno, &first_agino, &last_agino);
895*4882a593Smuzhiyun 	icount = be32_to_cpu(agi->agi_count);
896*4882a593Smuzhiyun 	if (icount > last_agino - first_agino + 1 ||
897*4882a593Smuzhiyun 	    icount < be32_to_cpu(agi->agi_freecount))
898*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	/* Check inode pointers */
901*4882a593Smuzhiyun 	agino = be32_to_cpu(agi->agi_newino);
902*4882a593Smuzhiyun 	if (!xfs_verify_agino_or_null(mp, agno, agino))
903*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	agino = be32_to_cpu(agi->agi_dirino);
906*4882a593Smuzhiyun 	if (!xfs_verify_agino_or_null(mp, agno, agino))
907*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	/* Check unlinked inode buckets */
910*4882a593Smuzhiyun 	for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
911*4882a593Smuzhiyun 		agino = be32_to_cpu(agi->agi_unlinked[i]);
912*4882a593Smuzhiyun 		if (!xfs_verify_agino_or_null(mp, agno, agino))
913*4882a593Smuzhiyun 			xchk_block_set_corrupt(sc, sc->sa.agi_bp);
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	if (agi->agi_pad32 != cpu_to_be32(0))
917*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	/* Do the incore counters match? */
920*4882a593Smuzhiyun 	pag = xfs_perag_get(mp, agno);
921*4882a593Smuzhiyun 	if (pag->pagi_count != be32_to_cpu(agi->agi_count))
922*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
923*4882a593Smuzhiyun 	if (pag->pagi_freecount != be32_to_cpu(agi->agi_freecount))
924*4882a593Smuzhiyun 		xchk_block_set_corrupt(sc, sc->sa.agi_bp);
925*4882a593Smuzhiyun 	xfs_perag_put(pag);
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	xchk_agi_xref(sc);
928*4882a593Smuzhiyun out:
929*4882a593Smuzhiyun 	return error;
930*4882a593Smuzhiyun }
931