1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2019 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_btree.h"
11*4882a593Smuzhiyun #include "xfs_sb.h"
12*4882a593Smuzhiyun #include "xfs_health.h"
13*4882a593Smuzhiyun #include "scrub/scrub.h"
14*4882a593Smuzhiyun #include "scrub/health.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun * Scrub and In-Core Filesystem Health Assessments
18*4882a593Smuzhiyun * ===============================================
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Online scrub and repair have the time and the ability to perform stronger
21*4882a593Smuzhiyun * checks than we can do from the metadata verifiers, because they can
22*4882a593Smuzhiyun * cross-reference records between data structures. Therefore, scrub is in a
23*4882a593Smuzhiyun * good position to update the online filesystem health assessments to reflect
24*4882a593Smuzhiyun * the good/bad state of the data structure.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * We therefore extend scrub in the following ways to achieve this:
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * 1. Create a "sick_mask" field in the scrub context. When we're setting up a
29*4882a593Smuzhiyun * scrub call, set this to the default XFS_SICK_* flag(s) for the selected
30*4882a593Smuzhiyun * scrub type (call it A). Scrub and repair functions can override the default
31*4882a593Smuzhiyun * sick_mask value if they choose.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * 2. If the scrubber returns a runtime error code, we exit making no changes
34*4882a593Smuzhiyun * to the incore sick state.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * 3. If the scrubber finds that A is clean, use sick_mask to clear the incore
37*4882a593Smuzhiyun * sick flags before exiting.
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * 4. If the scrubber finds that A is corrupt, use sick_mask to set the incore
40*4882a593Smuzhiyun * sick flags. If the user didn't want to repair then we exit, leaving the
41*4882a593Smuzhiyun * metadata structure unfixed and the sick flag set.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * 5. Now we know that A is corrupt and the user wants to repair, so run the
44*4882a593Smuzhiyun * repairer. If the repairer returns an error code, we exit with that error
45*4882a593Smuzhiyun * code, having made no further changes to the incore sick state.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * 6. If repair rebuilds A correctly and the subsequent re-scrub of A is clean,
48*4882a593Smuzhiyun * use sick_mask to clear the incore sick flags. This should have the effect
49*4882a593Smuzhiyun * that A is no longer marked sick.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * 7. If repair rebuilds A incorrectly, the re-scrub will find it corrupt and
52*4882a593Smuzhiyun * use sick_mask to set the incore sick flags. This should have no externally
53*4882a593Smuzhiyun * visible effect since we already set them in step (4).
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * There are some complications to this story, however. For certain types of
56*4882a593Smuzhiyun * complementary metadata indices (e.g. inobt/finobt), it is easier to rebuild
57*4882a593Smuzhiyun * both structures at the same time. The following principles apply to this
58*4882a593Smuzhiyun * type of repair strategy:
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * 8. Any repair function that rebuilds multiple structures should update
61*4882a593Smuzhiyun * sick_mask_visible to reflect whatever other structures are rebuilt, and
62*4882a593Smuzhiyun * verify that all the rebuilt structures can pass a scrub check. The outcomes
63*4882a593Smuzhiyun * of 5-7 still apply, but with a sick_mask that covers everything being
64*4882a593Smuzhiyun * rebuilt.
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Map our scrub type to a sick mask and a set of health update functions. */
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun enum xchk_health_group {
70*4882a593Smuzhiyun XHG_FS = 1,
71*4882a593Smuzhiyun XHG_RT,
72*4882a593Smuzhiyun XHG_AG,
73*4882a593Smuzhiyun XHG_INO,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun struct xchk_health_map {
77*4882a593Smuzhiyun enum xchk_health_group group;
78*4882a593Smuzhiyun unsigned int sick_mask;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun static const struct xchk_health_map type_to_health_flag[XFS_SCRUB_TYPE_NR] = {
82*4882a593Smuzhiyun [XFS_SCRUB_TYPE_SB] = { XHG_AG, XFS_SICK_AG_SB },
83*4882a593Smuzhiyun [XFS_SCRUB_TYPE_AGF] = { XHG_AG, XFS_SICK_AG_AGF },
84*4882a593Smuzhiyun [XFS_SCRUB_TYPE_AGFL] = { XHG_AG, XFS_SICK_AG_AGFL },
85*4882a593Smuzhiyun [XFS_SCRUB_TYPE_AGI] = { XHG_AG, XFS_SICK_AG_AGI },
86*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BNOBT] = { XHG_AG, XFS_SICK_AG_BNOBT },
87*4882a593Smuzhiyun [XFS_SCRUB_TYPE_CNTBT] = { XHG_AG, XFS_SICK_AG_CNTBT },
88*4882a593Smuzhiyun [XFS_SCRUB_TYPE_INOBT] = { XHG_AG, XFS_SICK_AG_INOBT },
89*4882a593Smuzhiyun [XFS_SCRUB_TYPE_FINOBT] = { XHG_AG, XFS_SICK_AG_FINOBT },
90*4882a593Smuzhiyun [XFS_SCRUB_TYPE_RMAPBT] = { XHG_AG, XFS_SICK_AG_RMAPBT },
91*4882a593Smuzhiyun [XFS_SCRUB_TYPE_REFCNTBT] = { XHG_AG, XFS_SICK_AG_REFCNTBT },
92*4882a593Smuzhiyun [XFS_SCRUB_TYPE_INODE] = { XHG_INO, XFS_SICK_INO_CORE },
93*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BMBTD] = { XHG_INO, XFS_SICK_INO_BMBTD },
94*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BMBTA] = { XHG_INO, XFS_SICK_INO_BMBTA },
95*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BMBTC] = { XHG_INO, XFS_SICK_INO_BMBTC },
96*4882a593Smuzhiyun [XFS_SCRUB_TYPE_DIR] = { XHG_INO, XFS_SICK_INO_DIR },
97*4882a593Smuzhiyun [XFS_SCRUB_TYPE_XATTR] = { XHG_INO, XFS_SICK_INO_XATTR },
98*4882a593Smuzhiyun [XFS_SCRUB_TYPE_SYMLINK] = { XHG_INO, XFS_SICK_INO_SYMLINK },
99*4882a593Smuzhiyun [XFS_SCRUB_TYPE_PARENT] = { XHG_INO, XFS_SICK_INO_PARENT },
100*4882a593Smuzhiyun [XFS_SCRUB_TYPE_RTBITMAP] = { XHG_RT, XFS_SICK_RT_BITMAP },
101*4882a593Smuzhiyun [XFS_SCRUB_TYPE_RTSUM] = { XHG_RT, XFS_SICK_RT_SUMMARY },
102*4882a593Smuzhiyun [XFS_SCRUB_TYPE_UQUOTA] = { XHG_FS, XFS_SICK_FS_UQUOTA },
103*4882a593Smuzhiyun [XFS_SCRUB_TYPE_GQUOTA] = { XHG_FS, XFS_SICK_FS_GQUOTA },
104*4882a593Smuzhiyun [XFS_SCRUB_TYPE_PQUOTA] = { XHG_FS, XFS_SICK_FS_PQUOTA },
105*4882a593Smuzhiyun [XFS_SCRUB_TYPE_FSCOUNTERS] = { XHG_FS, XFS_SICK_FS_COUNTERS },
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Return the health status mask for this scrub type. */
109*4882a593Smuzhiyun unsigned int
xchk_health_mask_for_scrub_type(__u32 scrub_type)110*4882a593Smuzhiyun xchk_health_mask_for_scrub_type(
111*4882a593Smuzhiyun __u32 scrub_type)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun return type_to_health_flag[scrub_type].sick_mask;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Update filesystem health assessments based on what we found and did.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * If the scrubber finds errors, we mark sick whatever's mentioned in
120*4882a593Smuzhiyun * sick_mask, no matter whether this is a first scan or an
121*4882a593Smuzhiyun * evaluation of repair effectiveness.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * Otherwise, no direct corruption was found, so mark whatever's in
124*4882a593Smuzhiyun * sick_mask as healthy.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun void
xchk_update_health(struct xfs_scrub * sc)127*4882a593Smuzhiyun xchk_update_health(
128*4882a593Smuzhiyun struct xfs_scrub *sc)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct xfs_perag *pag;
131*4882a593Smuzhiyun bool bad;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (!sc->sick_mask)
134*4882a593Smuzhiyun return;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun bad = (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT);
137*4882a593Smuzhiyun switch (type_to_health_flag[sc->sm->sm_type].group) {
138*4882a593Smuzhiyun case XHG_AG:
139*4882a593Smuzhiyun pag = xfs_perag_get(sc->mp, sc->sm->sm_agno);
140*4882a593Smuzhiyun if (bad)
141*4882a593Smuzhiyun xfs_ag_mark_sick(pag, sc->sick_mask);
142*4882a593Smuzhiyun else
143*4882a593Smuzhiyun xfs_ag_mark_healthy(pag, sc->sick_mask);
144*4882a593Smuzhiyun xfs_perag_put(pag);
145*4882a593Smuzhiyun break;
146*4882a593Smuzhiyun case XHG_INO:
147*4882a593Smuzhiyun if (!sc->ip)
148*4882a593Smuzhiyun return;
149*4882a593Smuzhiyun if (bad)
150*4882a593Smuzhiyun xfs_inode_mark_sick(sc->ip, sc->sick_mask);
151*4882a593Smuzhiyun else
152*4882a593Smuzhiyun xfs_inode_mark_healthy(sc->ip, sc->sick_mask);
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun case XHG_FS:
155*4882a593Smuzhiyun if (bad)
156*4882a593Smuzhiyun xfs_fs_mark_sick(sc->mp, sc->sick_mask);
157*4882a593Smuzhiyun else
158*4882a593Smuzhiyun xfs_fs_mark_healthy(sc->mp, sc->sick_mask);
159*4882a593Smuzhiyun break;
160*4882a593Smuzhiyun case XHG_RT:
161*4882a593Smuzhiyun if (bad)
162*4882a593Smuzhiyun xfs_rt_mark_sick(sc->mp, sc->sick_mask);
163*4882a593Smuzhiyun else
164*4882a593Smuzhiyun xfs_rt_mark_healthy(sc->mp, sc->sick_mask);
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun default:
167*4882a593Smuzhiyun ASSERT(0);
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Is the given per-AG btree healthy enough for scanning? */
173*4882a593Smuzhiyun bool
xchk_ag_btree_healthy_enough(struct xfs_scrub * sc,struct xfs_perag * pag,xfs_btnum_t btnum)174*4882a593Smuzhiyun xchk_ag_btree_healthy_enough(
175*4882a593Smuzhiyun struct xfs_scrub *sc,
176*4882a593Smuzhiyun struct xfs_perag *pag,
177*4882a593Smuzhiyun xfs_btnum_t btnum)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun unsigned int mask = 0;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun * We always want the cursor if it's the same type as whatever we're
183*4882a593Smuzhiyun * scrubbing, even if we already know the structure is corrupt.
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun * Otherwise, we're only interested in the btree for cross-referencing.
186*4882a593Smuzhiyun * If we know the btree is bad then don't bother, just set XFAIL.
187*4882a593Smuzhiyun */
188*4882a593Smuzhiyun switch (btnum) {
189*4882a593Smuzhiyun case XFS_BTNUM_BNO:
190*4882a593Smuzhiyun if (sc->sm->sm_type == XFS_SCRUB_TYPE_BNOBT)
191*4882a593Smuzhiyun return true;
192*4882a593Smuzhiyun mask = XFS_SICK_AG_BNOBT;
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun case XFS_BTNUM_CNT:
195*4882a593Smuzhiyun if (sc->sm->sm_type == XFS_SCRUB_TYPE_CNTBT)
196*4882a593Smuzhiyun return true;
197*4882a593Smuzhiyun mask = XFS_SICK_AG_CNTBT;
198*4882a593Smuzhiyun break;
199*4882a593Smuzhiyun case XFS_BTNUM_INO:
200*4882a593Smuzhiyun if (sc->sm->sm_type == XFS_SCRUB_TYPE_INOBT)
201*4882a593Smuzhiyun return true;
202*4882a593Smuzhiyun mask = XFS_SICK_AG_INOBT;
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun case XFS_BTNUM_FINO:
205*4882a593Smuzhiyun if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
206*4882a593Smuzhiyun return true;
207*4882a593Smuzhiyun mask = XFS_SICK_AG_FINOBT;
208*4882a593Smuzhiyun break;
209*4882a593Smuzhiyun case XFS_BTNUM_RMAP:
210*4882a593Smuzhiyun if (sc->sm->sm_type == XFS_SCRUB_TYPE_RMAPBT)
211*4882a593Smuzhiyun return true;
212*4882a593Smuzhiyun mask = XFS_SICK_AG_RMAPBT;
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun case XFS_BTNUM_REFC:
215*4882a593Smuzhiyun if (sc->sm->sm_type == XFS_SCRUB_TYPE_REFCNTBT)
216*4882a593Smuzhiyun return true;
217*4882a593Smuzhiyun mask = XFS_SICK_AG_REFCNTBT;
218*4882a593Smuzhiyun break;
219*4882a593Smuzhiyun default:
220*4882a593Smuzhiyun ASSERT(0);
221*4882a593Smuzhiyun return true;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (xfs_ag_has_sickness(pag, mask)) {
225*4882a593Smuzhiyun sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
226*4882a593Smuzhiyun return false;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun return true;
230*4882a593Smuzhiyun }
231