1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2017 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_mount.h"
12*4882a593Smuzhiyun #include "xfs_log_format.h"
13*4882a593Smuzhiyun #include "xfs_trans.h"
14*4882a593Smuzhiyun #include "xfs_inode.h"
15*4882a593Smuzhiyun #include "xfs_quota.h"
16*4882a593Smuzhiyun #include "xfs_qm.h"
17*4882a593Smuzhiyun #include "xfs_errortag.h"
18*4882a593Smuzhiyun #include "xfs_error.h"
19*4882a593Smuzhiyun #include "xfs_scrub.h"
20*4882a593Smuzhiyun #include "scrub/scrub.h"
21*4882a593Smuzhiyun #include "scrub/common.h"
22*4882a593Smuzhiyun #include "scrub/trace.h"
23*4882a593Smuzhiyun #include "scrub/repair.h"
24*4882a593Smuzhiyun #include "scrub/health.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * Online Scrub and Repair
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Traditionally, XFS (the kernel driver) did not know how to check or
30*4882a593Smuzhiyun * repair on-disk data structures. That task was left to the xfs_check
31*4882a593Smuzhiyun * and xfs_repair tools, both of which require taking the filesystem
32*4882a593Smuzhiyun * offline for a thorough but time consuming examination. Online
33*4882a593Smuzhiyun * scrub & repair, on the other hand, enables us to check the metadata
34*4882a593Smuzhiyun * for obvious errors while carefully stepping around the filesystem's
35*4882a593Smuzhiyun * ongoing operations, locking rules, etc.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Given that most XFS metadata consist of records stored in a btree,
38*4882a593Smuzhiyun * most of the checking functions iterate the btree blocks themselves
39*4882a593Smuzhiyun * looking for irregularities. When a record block is encountered, each
40*4882a593Smuzhiyun * record can be checked for obviously bad values. Record values can
41*4882a593Smuzhiyun * also be cross-referenced against other btrees to look for potential
42*4882a593Smuzhiyun * misunderstandings between pieces of metadata.
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * It is expected that the checkers responsible for per-AG metadata
45*4882a593Smuzhiyun * structures will lock the AG headers (AGI, AGF, AGFL), iterate the
46*4882a593Smuzhiyun * metadata structure, and perform any relevant cross-referencing before
47*4882a593Smuzhiyun * unlocking the AG and returning the results to userspace. These
48*4882a593Smuzhiyun * scrubbers must not keep an AG locked for too long to avoid tying up
49*4882a593Smuzhiyun * the block and inode allocators.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * Block maps and b-trees rooted in an inode present a special challenge
52*4882a593Smuzhiyun * because they can involve extents from any AG. The general scrubber
53*4882a593Smuzhiyun * structure of lock -> check -> xref -> unlock still holds, but AG
54*4882a593Smuzhiyun * locking order rules /must/ be obeyed to avoid deadlocks. The
55*4882a593Smuzhiyun * ordering rule, of course, is that we must lock in increasing AG
56*4882a593Smuzhiyun * order. Helper functions are provided to track which AG headers we've
57*4882a593Smuzhiyun * already locked. If we detect an imminent locking order violation, we
58*4882a593Smuzhiyun * can signal a potential deadlock, in which case the scrubber can jump
59*4882a593Smuzhiyun * out to the top level, lock all the AGs in order, and retry the scrub.
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * For file data (directories, extended attributes, symlinks) scrub, we
62*4882a593Smuzhiyun * can simply lock the inode and walk the data. For btree data
63*4882a593Smuzhiyun * (directories and attributes) we follow the same btree-scrubbing
64*4882a593Smuzhiyun * strategy outlined previously to check the records.
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * We use a bit of trickery with transactions to avoid buffer deadlocks
67*4882a593Smuzhiyun * if there is a cycle in the metadata. The basic problem is that
68*4882a593Smuzhiyun * travelling down a btree involves locking the current buffer at each
69*4882a593Smuzhiyun * tree level. If a pointer should somehow point back to a buffer that
70*4882a593Smuzhiyun * we've already examined, we will deadlock due to the second buffer
71*4882a593Smuzhiyun * locking attempt. Note however that grabbing a buffer in transaction
72*4882a593Smuzhiyun * context links the locked buffer to the transaction. If we try to
73*4882a593Smuzhiyun * re-grab the buffer in the context of the same transaction, we avoid
74*4882a593Smuzhiyun * the second lock attempt and continue. Between the verifier and the
75*4882a593Smuzhiyun * scrubber, something will notice that something is amiss and report
76*4882a593Smuzhiyun * the corruption. Therefore, each scrubber will allocate an empty
77*4882a593Smuzhiyun * transaction, attach buffers to it, and cancel the transaction at the
78*4882a593Smuzhiyun * end of the scrub run. Cancelling a non-dirty transaction simply
79*4882a593Smuzhiyun * unlocks the buffers.
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * There are four pieces of data that scrub can communicate to
82*4882a593Smuzhiyun * userspace. The first is the error code (errno), which can be used to
83*4882a593Smuzhiyun * communicate operational errors in performing the scrub. There are
84*4882a593Smuzhiyun * also three flags that can be set in the scrub context. If the data
85*4882a593Smuzhiyun * structure itself is corrupt, the CORRUPT flag will be set. If
86*4882a593Smuzhiyun * the metadata is correct but otherwise suboptimal, the PREEN flag
87*4882a593Smuzhiyun * will be set.
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun * We perform secondary validation of filesystem metadata by
90*4882a593Smuzhiyun * cross-referencing every record with all other available metadata.
91*4882a593Smuzhiyun * For example, for block mapping extents, we verify that there are no
92*4882a593Smuzhiyun * records in the free space and inode btrees corresponding to that
93*4882a593Smuzhiyun * space extent and that there is a corresponding entry in the reverse
94*4882a593Smuzhiyun * mapping btree. Inconsistent metadata is noted by setting the
95*4882a593Smuzhiyun * XCORRUPT flag; btree query function errors are noted by setting the
96*4882a593Smuzhiyun * XFAIL flag and deleting the cursor to prevent further attempts to
97*4882a593Smuzhiyun * cross-reference with a defective btree.
98*4882a593Smuzhiyun *
99*4882a593Smuzhiyun * If a piece of metadata proves corrupt or suboptimal, the userspace
100*4882a593Smuzhiyun * program can ask the kernel to apply some tender loving care (TLC) to
101*4882a593Smuzhiyun * the metadata object by setting the REPAIR flag and re-calling the
102*4882a593Smuzhiyun * scrub ioctl. "Corruption" is defined by metadata violating the
103*4882a593Smuzhiyun * on-disk specification; operations cannot continue if the violation is
104*4882a593Smuzhiyun * left untreated. It is possible for XFS to continue if an object is
105*4882a593Smuzhiyun * "suboptimal", however performance may be degraded. Repairs are
106*4882a593Smuzhiyun * usually performed by rebuilding the metadata entirely out of
107*4882a593Smuzhiyun * redundant metadata. Optimizing, on the other hand, can sometimes be
108*4882a593Smuzhiyun * done without rebuilding entire structures.
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Generally speaking, the repair code has the following code structure:
111*4882a593Smuzhiyun * Lock -> scrub -> repair -> commit -> re-lock -> re-scrub -> unlock.
112*4882a593Smuzhiyun * The first check helps us figure out if we need to rebuild or simply
113*4882a593Smuzhiyun * optimize the structure so that the rebuild knows what to do. The
114*4882a593Smuzhiyun * second check evaluates the completeness of the repair; that is what
115*4882a593Smuzhiyun * is reported to userspace.
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun * A quick note on symbol prefixes:
118*4882a593Smuzhiyun * - "xfs_" are general XFS symbols.
119*4882a593Smuzhiyun * - "xchk_" are symbols related to metadata checking.
120*4882a593Smuzhiyun * - "xrep_" are symbols related to metadata repair.
121*4882a593Smuzhiyun * - "xfs_scrub_" are symbols that tie online fsck to the rest of XFS.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Scrub probe -- userspace uses this to probe if we're willing to scrub
126*4882a593Smuzhiyun * or repair a given mountpoint. This will be used by xfs_scrub to
127*4882a593Smuzhiyun * probe the kernel's abilities to scrub (and repair) the metadata. We
128*4882a593Smuzhiyun * do this by validating the ioctl inputs from userspace, preparing the
129*4882a593Smuzhiyun * filesystem for a scrub (or a repair) operation, and immediately
130*4882a593Smuzhiyun * returning to userspace. Userspace can use the returned errno and
131*4882a593Smuzhiyun * structure state to decide (in broad terms) if scrub/repair are
132*4882a593Smuzhiyun * supported by the running kernel.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun static int
xchk_probe(struct xfs_scrub * sc)135*4882a593Smuzhiyun xchk_probe(
136*4882a593Smuzhiyun struct xfs_scrub *sc)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun int error = 0;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (xchk_should_terminate(sc, &error))
141*4882a593Smuzhiyun return error;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return 0;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* Scrub setup and teardown */
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun /* Free all the resources and finish the transactions. */
149*4882a593Smuzhiyun STATIC int
xchk_teardown(struct xfs_scrub * sc,struct xfs_inode * ip_in,int error)150*4882a593Smuzhiyun xchk_teardown(
151*4882a593Smuzhiyun struct xfs_scrub *sc,
152*4882a593Smuzhiyun struct xfs_inode *ip_in,
153*4882a593Smuzhiyun int error)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun xchk_ag_free(sc, &sc->sa);
156*4882a593Smuzhiyun if (sc->tp) {
157*4882a593Smuzhiyun if (error == 0 && (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
158*4882a593Smuzhiyun error = xfs_trans_commit(sc->tp);
159*4882a593Smuzhiyun else
160*4882a593Smuzhiyun xfs_trans_cancel(sc->tp);
161*4882a593Smuzhiyun sc->tp = NULL;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun if (sc->ip) {
164*4882a593Smuzhiyun if (sc->ilock_flags)
165*4882a593Smuzhiyun xfs_iunlock(sc->ip, sc->ilock_flags);
166*4882a593Smuzhiyun if (sc->ip != ip_in &&
167*4882a593Smuzhiyun !xfs_internal_inum(sc->mp, sc->ip->i_ino))
168*4882a593Smuzhiyun xfs_irele(sc->ip);
169*4882a593Smuzhiyun sc->ip = NULL;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun sb_end_write(sc->mp->m_super);
172*4882a593Smuzhiyun if (sc->flags & XCHK_REAPING_DISABLED)
173*4882a593Smuzhiyun xchk_start_reaping(sc);
174*4882a593Smuzhiyun if (sc->flags & XCHK_HAS_QUOTAOFFLOCK) {
175*4882a593Smuzhiyun mutex_unlock(&sc->mp->m_quotainfo->qi_quotaofflock);
176*4882a593Smuzhiyun sc->flags &= ~XCHK_HAS_QUOTAOFFLOCK;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun if (sc->buf) {
179*4882a593Smuzhiyun kmem_free(sc->buf);
180*4882a593Smuzhiyun sc->buf = NULL;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun return error;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Scrubbing dispatch. */
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun static const struct xchk_meta_ops meta_scrub_ops[] = {
188*4882a593Smuzhiyun [XFS_SCRUB_TYPE_PROBE] = { /* ioctl presence test */
189*4882a593Smuzhiyun .type = ST_NONE,
190*4882a593Smuzhiyun .setup = xchk_setup_fs,
191*4882a593Smuzhiyun .scrub = xchk_probe,
192*4882a593Smuzhiyun .repair = xrep_probe,
193*4882a593Smuzhiyun },
194*4882a593Smuzhiyun [XFS_SCRUB_TYPE_SB] = { /* superblock */
195*4882a593Smuzhiyun .type = ST_PERAG,
196*4882a593Smuzhiyun .setup = xchk_setup_fs,
197*4882a593Smuzhiyun .scrub = xchk_superblock,
198*4882a593Smuzhiyun .repair = xrep_superblock,
199*4882a593Smuzhiyun },
200*4882a593Smuzhiyun [XFS_SCRUB_TYPE_AGF] = { /* agf */
201*4882a593Smuzhiyun .type = ST_PERAG,
202*4882a593Smuzhiyun .setup = xchk_setup_fs,
203*4882a593Smuzhiyun .scrub = xchk_agf,
204*4882a593Smuzhiyun .repair = xrep_agf,
205*4882a593Smuzhiyun },
206*4882a593Smuzhiyun [XFS_SCRUB_TYPE_AGFL]= { /* agfl */
207*4882a593Smuzhiyun .type = ST_PERAG,
208*4882a593Smuzhiyun .setup = xchk_setup_fs,
209*4882a593Smuzhiyun .scrub = xchk_agfl,
210*4882a593Smuzhiyun .repair = xrep_agfl,
211*4882a593Smuzhiyun },
212*4882a593Smuzhiyun [XFS_SCRUB_TYPE_AGI] = { /* agi */
213*4882a593Smuzhiyun .type = ST_PERAG,
214*4882a593Smuzhiyun .setup = xchk_setup_fs,
215*4882a593Smuzhiyun .scrub = xchk_agi,
216*4882a593Smuzhiyun .repair = xrep_agi,
217*4882a593Smuzhiyun },
218*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BNOBT] = { /* bnobt */
219*4882a593Smuzhiyun .type = ST_PERAG,
220*4882a593Smuzhiyun .setup = xchk_setup_ag_allocbt,
221*4882a593Smuzhiyun .scrub = xchk_bnobt,
222*4882a593Smuzhiyun .repair = xrep_notsupported,
223*4882a593Smuzhiyun },
224*4882a593Smuzhiyun [XFS_SCRUB_TYPE_CNTBT] = { /* cntbt */
225*4882a593Smuzhiyun .type = ST_PERAG,
226*4882a593Smuzhiyun .setup = xchk_setup_ag_allocbt,
227*4882a593Smuzhiyun .scrub = xchk_cntbt,
228*4882a593Smuzhiyun .repair = xrep_notsupported,
229*4882a593Smuzhiyun },
230*4882a593Smuzhiyun [XFS_SCRUB_TYPE_INOBT] = { /* inobt */
231*4882a593Smuzhiyun .type = ST_PERAG,
232*4882a593Smuzhiyun .setup = xchk_setup_ag_iallocbt,
233*4882a593Smuzhiyun .scrub = xchk_inobt,
234*4882a593Smuzhiyun .repair = xrep_notsupported,
235*4882a593Smuzhiyun },
236*4882a593Smuzhiyun [XFS_SCRUB_TYPE_FINOBT] = { /* finobt */
237*4882a593Smuzhiyun .type = ST_PERAG,
238*4882a593Smuzhiyun .setup = xchk_setup_ag_iallocbt,
239*4882a593Smuzhiyun .scrub = xchk_finobt,
240*4882a593Smuzhiyun .has = xfs_sb_version_hasfinobt,
241*4882a593Smuzhiyun .repair = xrep_notsupported,
242*4882a593Smuzhiyun },
243*4882a593Smuzhiyun [XFS_SCRUB_TYPE_RMAPBT] = { /* rmapbt */
244*4882a593Smuzhiyun .type = ST_PERAG,
245*4882a593Smuzhiyun .setup = xchk_setup_ag_rmapbt,
246*4882a593Smuzhiyun .scrub = xchk_rmapbt,
247*4882a593Smuzhiyun .has = xfs_sb_version_hasrmapbt,
248*4882a593Smuzhiyun .repair = xrep_notsupported,
249*4882a593Smuzhiyun },
250*4882a593Smuzhiyun [XFS_SCRUB_TYPE_REFCNTBT] = { /* refcountbt */
251*4882a593Smuzhiyun .type = ST_PERAG,
252*4882a593Smuzhiyun .setup = xchk_setup_ag_refcountbt,
253*4882a593Smuzhiyun .scrub = xchk_refcountbt,
254*4882a593Smuzhiyun .has = xfs_sb_version_hasreflink,
255*4882a593Smuzhiyun .repair = xrep_notsupported,
256*4882a593Smuzhiyun },
257*4882a593Smuzhiyun [XFS_SCRUB_TYPE_INODE] = { /* inode record */
258*4882a593Smuzhiyun .type = ST_INODE,
259*4882a593Smuzhiyun .setup = xchk_setup_inode,
260*4882a593Smuzhiyun .scrub = xchk_inode,
261*4882a593Smuzhiyun .repair = xrep_notsupported,
262*4882a593Smuzhiyun },
263*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BMBTD] = { /* inode data fork */
264*4882a593Smuzhiyun .type = ST_INODE,
265*4882a593Smuzhiyun .setup = xchk_setup_inode_bmap,
266*4882a593Smuzhiyun .scrub = xchk_bmap_data,
267*4882a593Smuzhiyun .repair = xrep_notsupported,
268*4882a593Smuzhiyun },
269*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BMBTA] = { /* inode attr fork */
270*4882a593Smuzhiyun .type = ST_INODE,
271*4882a593Smuzhiyun .setup = xchk_setup_inode_bmap,
272*4882a593Smuzhiyun .scrub = xchk_bmap_attr,
273*4882a593Smuzhiyun .repair = xrep_notsupported,
274*4882a593Smuzhiyun },
275*4882a593Smuzhiyun [XFS_SCRUB_TYPE_BMBTC] = { /* inode CoW fork */
276*4882a593Smuzhiyun .type = ST_INODE,
277*4882a593Smuzhiyun .setup = xchk_setup_inode_bmap,
278*4882a593Smuzhiyun .scrub = xchk_bmap_cow,
279*4882a593Smuzhiyun .repair = xrep_notsupported,
280*4882a593Smuzhiyun },
281*4882a593Smuzhiyun [XFS_SCRUB_TYPE_DIR] = { /* directory */
282*4882a593Smuzhiyun .type = ST_INODE,
283*4882a593Smuzhiyun .setup = xchk_setup_directory,
284*4882a593Smuzhiyun .scrub = xchk_directory,
285*4882a593Smuzhiyun .repair = xrep_notsupported,
286*4882a593Smuzhiyun },
287*4882a593Smuzhiyun [XFS_SCRUB_TYPE_XATTR] = { /* extended attributes */
288*4882a593Smuzhiyun .type = ST_INODE,
289*4882a593Smuzhiyun .setup = xchk_setup_xattr,
290*4882a593Smuzhiyun .scrub = xchk_xattr,
291*4882a593Smuzhiyun .repair = xrep_notsupported,
292*4882a593Smuzhiyun },
293*4882a593Smuzhiyun [XFS_SCRUB_TYPE_SYMLINK] = { /* symbolic link */
294*4882a593Smuzhiyun .type = ST_INODE,
295*4882a593Smuzhiyun .setup = xchk_setup_symlink,
296*4882a593Smuzhiyun .scrub = xchk_symlink,
297*4882a593Smuzhiyun .repair = xrep_notsupported,
298*4882a593Smuzhiyun },
299*4882a593Smuzhiyun [XFS_SCRUB_TYPE_PARENT] = { /* parent pointers */
300*4882a593Smuzhiyun .type = ST_INODE,
301*4882a593Smuzhiyun .setup = xchk_setup_parent,
302*4882a593Smuzhiyun .scrub = xchk_parent,
303*4882a593Smuzhiyun .repair = xrep_notsupported,
304*4882a593Smuzhiyun },
305*4882a593Smuzhiyun [XFS_SCRUB_TYPE_RTBITMAP] = { /* realtime bitmap */
306*4882a593Smuzhiyun .type = ST_FS,
307*4882a593Smuzhiyun .setup = xchk_setup_rt,
308*4882a593Smuzhiyun .scrub = xchk_rtbitmap,
309*4882a593Smuzhiyun .has = xfs_sb_version_hasrealtime,
310*4882a593Smuzhiyun .repair = xrep_notsupported,
311*4882a593Smuzhiyun },
312*4882a593Smuzhiyun [XFS_SCRUB_TYPE_RTSUM] = { /* realtime summary */
313*4882a593Smuzhiyun .type = ST_FS,
314*4882a593Smuzhiyun .setup = xchk_setup_rt,
315*4882a593Smuzhiyun .scrub = xchk_rtsummary,
316*4882a593Smuzhiyun .has = xfs_sb_version_hasrealtime,
317*4882a593Smuzhiyun .repair = xrep_notsupported,
318*4882a593Smuzhiyun },
319*4882a593Smuzhiyun [XFS_SCRUB_TYPE_UQUOTA] = { /* user quota */
320*4882a593Smuzhiyun .type = ST_FS,
321*4882a593Smuzhiyun .setup = xchk_setup_quota,
322*4882a593Smuzhiyun .scrub = xchk_quota,
323*4882a593Smuzhiyun .repair = xrep_notsupported,
324*4882a593Smuzhiyun },
325*4882a593Smuzhiyun [XFS_SCRUB_TYPE_GQUOTA] = { /* group quota */
326*4882a593Smuzhiyun .type = ST_FS,
327*4882a593Smuzhiyun .setup = xchk_setup_quota,
328*4882a593Smuzhiyun .scrub = xchk_quota,
329*4882a593Smuzhiyun .repair = xrep_notsupported,
330*4882a593Smuzhiyun },
331*4882a593Smuzhiyun [XFS_SCRUB_TYPE_PQUOTA] = { /* project quota */
332*4882a593Smuzhiyun .type = ST_FS,
333*4882a593Smuzhiyun .setup = xchk_setup_quota,
334*4882a593Smuzhiyun .scrub = xchk_quota,
335*4882a593Smuzhiyun .repair = xrep_notsupported,
336*4882a593Smuzhiyun },
337*4882a593Smuzhiyun [XFS_SCRUB_TYPE_FSCOUNTERS] = { /* fs summary counters */
338*4882a593Smuzhiyun .type = ST_FS,
339*4882a593Smuzhiyun .setup = xchk_setup_fscounters,
340*4882a593Smuzhiyun .scrub = xchk_fscounters,
341*4882a593Smuzhiyun .repair = xrep_notsupported,
342*4882a593Smuzhiyun },
343*4882a593Smuzhiyun };
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* This isn't a stable feature, warn once per day. */
346*4882a593Smuzhiyun static inline void
xchk_experimental_warning(struct xfs_mount * mp)347*4882a593Smuzhiyun xchk_experimental_warning(
348*4882a593Smuzhiyun struct xfs_mount *mp)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun static struct ratelimit_state scrub_warning = RATELIMIT_STATE_INIT(
351*4882a593Smuzhiyun "xchk_warning", 86400 * HZ, 1);
352*4882a593Smuzhiyun ratelimit_set_flags(&scrub_warning, RATELIMIT_MSG_ON_RELEASE);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (__ratelimit(&scrub_warning))
355*4882a593Smuzhiyun xfs_alert(mp,
356*4882a593Smuzhiyun "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun static int
xchk_validate_inputs(struct xfs_mount * mp,struct xfs_scrub_metadata * sm)360*4882a593Smuzhiyun xchk_validate_inputs(
361*4882a593Smuzhiyun struct xfs_mount *mp,
362*4882a593Smuzhiyun struct xfs_scrub_metadata *sm)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun int error;
365*4882a593Smuzhiyun const struct xchk_meta_ops *ops;
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun error = -EINVAL;
368*4882a593Smuzhiyun /* Check our inputs. */
369*4882a593Smuzhiyun sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
370*4882a593Smuzhiyun if (sm->sm_flags & ~XFS_SCRUB_FLAGS_IN)
371*4882a593Smuzhiyun goto out;
372*4882a593Smuzhiyun /* sm_reserved[] must be zero */
373*4882a593Smuzhiyun if (memchr_inv(sm->sm_reserved, 0, sizeof(sm->sm_reserved)))
374*4882a593Smuzhiyun goto out;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun error = -ENOENT;
377*4882a593Smuzhiyun /* Do we know about this type of metadata? */
378*4882a593Smuzhiyun if (sm->sm_type >= XFS_SCRUB_TYPE_NR)
379*4882a593Smuzhiyun goto out;
380*4882a593Smuzhiyun ops = &meta_scrub_ops[sm->sm_type];
381*4882a593Smuzhiyun if (ops->setup == NULL || ops->scrub == NULL)
382*4882a593Smuzhiyun goto out;
383*4882a593Smuzhiyun /* Does this fs even support this type of metadata? */
384*4882a593Smuzhiyun if (ops->has && !ops->has(&mp->m_sb))
385*4882a593Smuzhiyun goto out;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun error = -EINVAL;
388*4882a593Smuzhiyun /* restricting fields must be appropriate for type */
389*4882a593Smuzhiyun switch (ops->type) {
390*4882a593Smuzhiyun case ST_NONE:
391*4882a593Smuzhiyun case ST_FS:
392*4882a593Smuzhiyun if (sm->sm_ino || sm->sm_gen || sm->sm_agno)
393*4882a593Smuzhiyun goto out;
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun case ST_PERAG:
396*4882a593Smuzhiyun if (sm->sm_ino || sm->sm_gen ||
397*4882a593Smuzhiyun sm->sm_agno >= mp->m_sb.sb_agcount)
398*4882a593Smuzhiyun goto out;
399*4882a593Smuzhiyun break;
400*4882a593Smuzhiyun case ST_INODE:
401*4882a593Smuzhiyun if (sm->sm_agno || (sm->sm_gen && !sm->sm_ino))
402*4882a593Smuzhiyun goto out;
403*4882a593Smuzhiyun break;
404*4882a593Smuzhiyun default:
405*4882a593Smuzhiyun goto out;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * We only want to repair read-write v5+ filesystems. Defer the check
410*4882a593Smuzhiyun * for ops->repair until after our scrub confirms that we need to
411*4882a593Smuzhiyun * perform repairs so that we avoid failing due to not supporting
412*4882a593Smuzhiyun * repairing an object that doesn't need repairs.
413*4882a593Smuzhiyun */
414*4882a593Smuzhiyun if (sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) {
415*4882a593Smuzhiyun error = -EOPNOTSUPP;
416*4882a593Smuzhiyun if (!xfs_sb_version_hascrc(&mp->m_sb))
417*4882a593Smuzhiyun goto out;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun error = -EROFS;
420*4882a593Smuzhiyun if (mp->m_flags & XFS_MOUNT_RDONLY)
421*4882a593Smuzhiyun goto out;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun error = 0;
425*4882a593Smuzhiyun out:
426*4882a593Smuzhiyun return error;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun #ifdef CONFIG_XFS_ONLINE_REPAIR
xchk_postmortem(struct xfs_scrub * sc)430*4882a593Smuzhiyun static inline void xchk_postmortem(struct xfs_scrub *sc)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun /*
433*4882a593Smuzhiyun * Userspace asked us to repair something, we repaired it, rescanned
434*4882a593Smuzhiyun * it, and the rescan says it's still broken. Scream about this in
435*4882a593Smuzhiyun * the system logs.
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun if ((sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
438*4882a593Smuzhiyun (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
439*4882a593Smuzhiyun XFS_SCRUB_OFLAG_XCORRUPT)))
440*4882a593Smuzhiyun xrep_failure(sc->mp);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun #else
xchk_postmortem(struct xfs_scrub * sc)443*4882a593Smuzhiyun static inline void xchk_postmortem(struct xfs_scrub *sc)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * Userspace asked us to scrub something, it's broken, and we have no
447*4882a593Smuzhiyun * way of fixing it. Scream in the logs.
448*4882a593Smuzhiyun */
449*4882a593Smuzhiyun if (sc->sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
450*4882a593Smuzhiyun XFS_SCRUB_OFLAG_XCORRUPT))
451*4882a593Smuzhiyun xfs_alert_ratelimited(sc->mp,
452*4882a593Smuzhiyun "Corruption detected during scrub.");
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun #endif /* CONFIG_XFS_ONLINE_REPAIR */
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* Dispatch metadata scrubbing. */
457*4882a593Smuzhiyun int
xfs_scrub_metadata(struct xfs_inode * ip,struct xfs_scrub_metadata * sm)458*4882a593Smuzhiyun xfs_scrub_metadata(
459*4882a593Smuzhiyun struct xfs_inode *ip,
460*4882a593Smuzhiyun struct xfs_scrub_metadata *sm)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct xfs_scrub sc = {
463*4882a593Smuzhiyun .mp = ip->i_mount,
464*4882a593Smuzhiyun .sm = sm,
465*4882a593Smuzhiyun .sa = {
466*4882a593Smuzhiyun .agno = NULLAGNUMBER,
467*4882a593Smuzhiyun },
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun struct xfs_mount *mp = ip->i_mount;
470*4882a593Smuzhiyun int error = 0;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(meta_scrub_ops) !=
473*4882a593Smuzhiyun (sizeof(struct xchk_meta_ops) * XFS_SCRUB_TYPE_NR));
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun trace_xchk_start(ip, sm, error);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* Forbidden if we are shut down or mounted norecovery. */
478*4882a593Smuzhiyun error = -ESHUTDOWN;
479*4882a593Smuzhiyun if (XFS_FORCED_SHUTDOWN(mp))
480*4882a593Smuzhiyun goto out;
481*4882a593Smuzhiyun error = -ENOTRECOVERABLE;
482*4882a593Smuzhiyun if (mp->m_flags & XFS_MOUNT_NORECOVERY)
483*4882a593Smuzhiyun goto out;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun error = xchk_validate_inputs(mp, sm);
486*4882a593Smuzhiyun if (error)
487*4882a593Smuzhiyun goto out;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun xchk_experimental_warning(mp);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun sc.ops = &meta_scrub_ops[sm->sm_type];
492*4882a593Smuzhiyun sc.sick_mask = xchk_health_mask_for_scrub_type(sm->sm_type);
493*4882a593Smuzhiyun retry_op:
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun * If freeze runs concurrently with a scrub, the freeze can be delayed
496*4882a593Smuzhiyun * indefinitely as we walk the filesystem and iterate over metadata
497*4882a593Smuzhiyun * buffers. Freeze quiesces the log (which waits for the buffer LRU to
498*4882a593Smuzhiyun * be emptied) and that won't happen while checking is running.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun sb_start_write(mp->m_super);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* Set up for the operation. */
503*4882a593Smuzhiyun error = sc.ops->setup(&sc, ip);
504*4882a593Smuzhiyun if (error)
505*4882a593Smuzhiyun goto out_teardown;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* Scrub for errors. */
508*4882a593Smuzhiyun error = sc.ops->scrub(&sc);
509*4882a593Smuzhiyun if (!(sc.flags & XCHK_TRY_HARDER) && error == -EDEADLOCK) {
510*4882a593Smuzhiyun /*
511*4882a593Smuzhiyun * Scrubbers return -EDEADLOCK to mean 'try harder'.
512*4882a593Smuzhiyun * Tear down everything we hold, then set up again with
513*4882a593Smuzhiyun * preparation for worst-case scenarios.
514*4882a593Smuzhiyun */
515*4882a593Smuzhiyun error = xchk_teardown(&sc, ip, 0);
516*4882a593Smuzhiyun if (error)
517*4882a593Smuzhiyun goto out;
518*4882a593Smuzhiyun sc.flags |= XCHK_TRY_HARDER;
519*4882a593Smuzhiyun goto retry_op;
520*4882a593Smuzhiyun } else if (error)
521*4882a593Smuzhiyun goto out_teardown;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun xchk_update_health(&sc);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if ((sc.sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR) &&
526*4882a593Smuzhiyun !(sc.flags & XREP_ALREADY_FIXED)) {
527*4882a593Smuzhiyun bool needs_fix;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Let debug users force us into the repair routines. */
530*4882a593Smuzhiyun if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_FORCE_SCRUB_REPAIR))
531*4882a593Smuzhiyun sc.sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun needs_fix = (sc.sm->sm_flags & (XFS_SCRUB_OFLAG_CORRUPT |
534*4882a593Smuzhiyun XFS_SCRUB_OFLAG_XCORRUPT |
535*4882a593Smuzhiyun XFS_SCRUB_OFLAG_PREEN));
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * If userspace asked for a repair but it wasn't necessary,
538*4882a593Smuzhiyun * report that back to userspace.
539*4882a593Smuzhiyun */
540*4882a593Smuzhiyun if (!needs_fix) {
541*4882a593Smuzhiyun sc.sm->sm_flags |= XFS_SCRUB_OFLAG_NO_REPAIR_NEEDED;
542*4882a593Smuzhiyun goto out_nofix;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * If it's broken, userspace wants us to fix it, and we haven't
547*4882a593Smuzhiyun * already tried to fix it, then attempt a repair.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun error = xrep_attempt(ip, &sc);
550*4882a593Smuzhiyun if (error == -EAGAIN) {
551*4882a593Smuzhiyun /*
552*4882a593Smuzhiyun * Either the repair function succeeded or it couldn't
553*4882a593Smuzhiyun * get all the resources it needs; either way, we go
554*4882a593Smuzhiyun * back to the beginning and call the scrub function.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun error = xchk_teardown(&sc, ip, 0);
557*4882a593Smuzhiyun if (error) {
558*4882a593Smuzhiyun xrep_failure(mp);
559*4882a593Smuzhiyun goto out;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun goto retry_op;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun out_nofix:
566*4882a593Smuzhiyun xchk_postmortem(&sc);
567*4882a593Smuzhiyun out_teardown:
568*4882a593Smuzhiyun error = xchk_teardown(&sc, ip, error);
569*4882a593Smuzhiyun out:
570*4882a593Smuzhiyun trace_xchk_done(ip, sm, error);
571*4882a593Smuzhiyun if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
572*4882a593Smuzhiyun sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
573*4882a593Smuzhiyun error = 0;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun return error;
576*4882a593Smuzhiyun }
577