1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2018 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_mount.h"
12*4882a593Smuzhiyun #include "xfs_btree.h"
13*4882a593Smuzhiyun #include "xfs_log_format.h"
14*4882a593Smuzhiyun #include "xfs_trans.h"
15*4882a593Smuzhiyun #include "xfs_sb.h"
16*4882a593Smuzhiyun #include "xfs_inode.h"
17*4882a593Smuzhiyun #include "xfs_alloc.h"
18*4882a593Smuzhiyun #include "xfs_alloc_btree.h"
19*4882a593Smuzhiyun #include "xfs_ialloc.h"
20*4882a593Smuzhiyun #include "xfs_ialloc_btree.h"
21*4882a593Smuzhiyun #include "xfs_rmap.h"
22*4882a593Smuzhiyun #include "xfs_rmap_btree.h"
23*4882a593Smuzhiyun #include "xfs_refcount_btree.h"
24*4882a593Smuzhiyun #include "xfs_extent_busy.h"
25*4882a593Smuzhiyun #include "xfs_ag_resv.h"
26*4882a593Smuzhiyun #include "xfs_quota.h"
27*4882a593Smuzhiyun #include "scrub/scrub.h"
28*4882a593Smuzhiyun #include "scrub/common.h"
29*4882a593Smuzhiyun #include "scrub/trace.h"
30*4882a593Smuzhiyun #include "scrub/repair.h"
31*4882a593Smuzhiyun #include "scrub/bitmap.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun * Attempt to repair some metadata, if the metadata is corrupt and userspace
35*4882a593Smuzhiyun * told us to fix it. This function returns -EAGAIN to mean "re-run scrub",
36*4882a593Smuzhiyun * and will set *fixed to true if it thinks it repaired anything.
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun int
xrep_attempt(struct xfs_inode * ip,struct xfs_scrub * sc)39*4882a593Smuzhiyun xrep_attempt(
40*4882a593Smuzhiyun struct xfs_inode *ip,
41*4882a593Smuzhiyun struct xfs_scrub *sc)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun int error = 0;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun trace_xrep_attempt(ip, sc->sm, error);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun xchk_ag_btcur_free(&sc->sa);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Repair whatever's broken. */
50*4882a593Smuzhiyun ASSERT(sc->ops->repair);
51*4882a593Smuzhiyun error = sc->ops->repair(sc);
52*4882a593Smuzhiyun trace_xrep_done(ip, sc->sm, error);
53*4882a593Smuzhiyun switch (error) {
54*4882a593Smuzhiyun case 0:
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Repair succeeded. Commit the fixes and perform a second
57*4882a593Smuzhiyun * scrub so that we can tell userspace if we fixed the problem.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun sc->sm->sm_flags &= ~XFS_SCRUB_FLAGS_OUT;
60*4882a593Smuzhiyun sc->flags |= XREP_ALREADY_FIXED;
61*4882a593Smuzhiyun return -EAGAIN;
62*4882a593Smuzhiyun case -EDEADLOCK:
63*4882a593Smuzhiyun case -EAGAIN:
64*4882a593Smuzhiyun /* Tell the caller to try again having grabbed all the locks. */
65*4882a593Smuzhiyun if (!(sc->flags & XCHK_TRY_HARDER)) {
66*4882a593Smuzhiyun sc->flags |= XCHK_TRY_HARDER;
67*4882a593Smuzhiyun return -EAGAIN;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * We tried harder but still couldn't grab all the resources
71*4882a593Smuzhiyun * we needed to fix it. The corruption has not been fixed,
72*4882a593Smuzhiyun * so report back to userspace.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun return -EFSCORRUPTED;
75*4882a593Smuzhiyun default:
76*4882a593Smuzhiyun return error;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun * Complain about unfixable problems in the filesystem. We don't log
82*4882a593Smuzhiyun * corruptions when IFLAG_REPAIR wasn't set on the assumption that the driver
83*4882a593Smuzhiyun * program is xfs_scrub, which will call back with IFLAG_REPAIR set if the
84*4882a593Smuzhiyun * administrator isn't running xfs_scrub in no-repairs mode.
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * Use this helper function because _ratelimited silently declares a static
87*4882a593Smuzhiyun * structure to track rate limiting information.
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun void
xrep_failure(struct xfs_mount * mp)90*4882a593Smuzhiyun xrep_failure(
91*4882a593Smuzhiyun struct xfs_mount *mp)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun xfs_alert_ratelimited(mp,
94*4882a593Smuzhiyun "Corruption not fixed during online repair. Unmount and run xfs_repair.");
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * Repair probe -- userspace uses this to probe if we're willing to repair a
99*4882a593Smuzhiyun * given mountpoint.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun int
xrep_probe(struct xfs_scrub * sc)102*4882a593Smuzhiyun xrep_probe(
103*4882a593Smuzhiyun struct xfs_scrub *sc)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun int error = 0;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (xchk_should_terminate(sc, &error))
108*4882a593Smuzhiyun return error;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun return 0;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * Roll a transaction, keeping the AG headers locked and reinitializing
115*4882a593Smuzhiyun * the btree cursors.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun int
xrep_roll_ag_trans(struct xfs_scrub * sc)118*4882a593Smuzhiyun xrep_roll_ag_trans(
119*4882a593Smuzhiyun struct xfs_scrub *sc)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun int error;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* Keep the AG header buffers locked so we can keep going. */
124*4882a593Smuzhiyun if (sc->sa.agi_bp)
125*4882a593Smuzhiyun xfs_trans_bhold(sc->tp, sc->sa.agi_bp);
126*4882a593Smuzhiyun if (sc->sa.agf_bp)
127*4882a593Smuzhiyun xfs_trans_bhold(sc->tp, sc->sa.agf_bp);
128*4882a593Smuzhiyun if (sc->sa.agfl_bp)
129*4882a593Smuzhiyun xfs_trans_bhold(sc->tp, sc->sa.agfl_bp);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * Roll the transaction. We still own the buffer and the buffer lock
133*4882a593Smuzhiyun * regardless of whether or not the roll succeeds. If the roll fails,
134*4882a593Smuzhiyun * the buffers will be released during teardown on our way out of the
135*4882a593Smuzhiyun * kernel. If it succeeds, we join them to the new transaction and
136*4882a593Smuzhiyun * move on.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun error = xfs_trans_roll(&sc->tp);
139*4882a593Smuzhiyun if (error)
140*4882a593Smuzhiyun return error;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* Join AG headers to the new transaction. */
143*4882a593Smuzhiyun if (sc->sa.agi_bp)
144*4882a593Smuzhiyun xfs_trans_bjoin(sc->tp, sc->sa.agi_bp);
145*4882a593Smuzhiyun if (sc->sa.agf_bp)
146*4882a593Smuzhiyun xfs_trans_bjoin(sc->tp, sc->sa.agf_bp);
147*4882a593Smuzhiyun if (sc->sa.agfl_bp)
148*4882a593Smuzhiyun xfs_trans_bjoin(sc->tp, sc->sa.agfl_bp);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun * Does the given AG have enough space to rebuild a btree? Neither AG
155*4882a593Smuzhiyun * reservation can be critical, and we must have enough space (factoring
156*4882a593Smuzhiyun * in AG reservations) to construct a whole btree.
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun bool
xrep_ag_has_space(struct xfs_perag * pag,xfs_extlen_t nr_blocks,enum xfs_ag_resv_type type)159*4882a593Smuzhiyun xrep_ag_has_space(
160*4882a593Smuzhiyun struct xfs_perag *pag,
161*4882a593Smuzhiyun xfs_extlen_t nr_blocks,
162*4882a593Smuzhiyun enum xfs_ag_resv_type type)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun return !xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) &&
165*4882a593Smuzhiyun !xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA) &&
166*4882a593Smuzhiyun pag->pagf_freeblks > xfs_ag_resv_needed(pag, type) + nr_blocks;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * Figure out how many blocks to reserve for an AG repair. We calculate the
171*4882a593Smuzhiyun * worst case estimate for the number of blocks we'd need to rebuild one of
172*4882a593Smuzhiyun * any type of per-AG btree.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun xfs_extlen_t
xrep_calc_ag_resblks(struct xfs_scrub * sc)175*4882a593Smuzhiyun xrep_calc_ag_resblks(
176*4882a593Smuzhiyun struct xfs_scrub *sc)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct xfs_mount *mp = sc->mp;
179*4882a593Smuzhiyun struct xfs_scrub_metadata *sm = sc->sm;
180*4882a593Smuzhiyun struct xfs_perag *pag;
181*4882a593Smuzhiyun struct xfs_buf *bp;
182*4882a593Smuzhiyun xfs_agino_t icount = NULLAGINO;
183*4882a593Smuzhiyun xfs_extlen_t aglen = NULLAGBLOCK;
184*4882a593Smuzhiyun xfs_extlen_t usedlen;
185*4882a593Smuzhiyun xfs_extlen_t freelen;
186*4882a593Smuzhiyun xfs_extlen_t bnobt_sz;
187*4882a593Smuzhiyun xfs_extlen_t inobt_sz;
188*4882a593Smuzhiyun xfs_extlen_t rmapbt_sz;
189*4882a593Smuzhiyun xfs_extlen_t refcbt_sz;
190*4882a593Smuzhiyun int error;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun pag = xfs_perag_get(mp, sm->sm_agno);
196*4882a593Smuzhiyun if (pag->pagi_init) {
197*4882a593Smuzhiyun /* Use in-core icount if possible. */
198*4882a593Smuzhiyun icount = pag->pagi_count;
199*4882a593Smuzhiyun } else {
200*4882a593Smuzhiyun /* Try to get the actual counters from disk. */
201*4882a593Smuzhiyun error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp);
202*4882a593Smuzhiyun if (!error) {
203*4882a593Smuzhiyun icount = pag->pagi_count;
204*4882a593Smuzhiyun xfs_buf_relse(bp);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* Now grab the block counters from the AGF. */
209*4882a593Smuzhiyun error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
210*4882a593Smuzhiyun if (!error) {
211*4882a593Smuzhiyun struct xfs_agf *agf = bp->b_addr;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun aglen = be32_to_cpu(agf->agf_length);
214*4882a593Smuzhiyun freelen = be32_to_cpu(agf->agf_freeblks);
215*4882a593Smuzhiyun usedlen = aglen - freelen;
216*4882a593Smuzhiyun xfs_buf_relse(bp);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun xfs_perag_put(pag);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* If the icount is impossible, make some worst-case assumptions. */
221*4882a593Smuzhiyun if (icount == NULLAGINO ||
222*4882a593Smuzhiyun !xfs_verify_agino(mp, sm->sm_agno, icount)) {
223*4882a593Smuzhiyun xfs_agino_t first, last;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun xfs_agino_range(mp, sm->sm_agno, &first, &last);
226*4882a593Smuzhiyun icount = last - first + 1;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* If the block counts are impossible, make worst-case assumptions. */
230*4882a593Smuzhiyun if (aglen == NULLAGBLOCK ||
231*4882a593Smuzhiyun aglen != xfs_ag_block_count(mp, sm->sm_agno) ||
232*4882a593Smuzhiyun freelen >= aglen) {
233*4882a593Smuzhiyun aglen = xfs_ag_block_count(mp, sm->sm_agno);
234*4882a593Smuzhiyun freelen = aglen;
235*4882a593Smuzhiyun usedlen = aglen;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
239*4882a593Smuzhiyun freelen, usedlen);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun * Figure out how many blocks we'd need worst case to rebuild
243*4882a593Smuzhiyun * each type of btree. Note that we can only rebuild the
244*4882a593Smuzhiyun * bnobt/cntbt or inobt/finobt as pairs.
245*4882a593Smuzhiyun */
246*4882a593Smuzhiyun bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
247*4882a593Smuzhiyun if (xfs_sb_version_hassparseinodes(&mp->m_sb))
248*4882a593Smuzhiyun inobt_sz = xfs_iallocbt_calc_size(mp, icount /
249*4882a593Smuzhiyun XFS_INODES_PER_HOLEMASK_BIT);
250*4882a593Smuzhiyun else
251*4882a593Smuzhiyun inobt_sz = xfs_iallocbt_calc_size(mp, icount /
252*4882a593Smuzhiyun XFS_INODES_PER_CHUNK);
253*4882a593Smuzhiyun if (xfs_sb_version_hasfinobt(&mp->m_sb))
254*4882a593Smuzhiyun inobt_sz *= 2;
255*4882a593Smuzhiyun if (xfs_sb_version_hasreflink(&mp->m_sb))
256*4882a593Smuzhiyun refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
257*4882a593Smuzhiyun else
258*4882a593Smuzhiyun refcbt_sz = 0;
259*4882a593Smuzhiyun if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Guess how many blocks we need to rebuild the rmapbt.
262*4882a593Smuzhiyun * For non-reflink filesystems we can't have more records than
263*4882a593Smuzhiyun * used blocks. However, with reflink it's possible to have
264*4882a593Smuzhiyun * more than one rmap record per AG block. We don't know how
265*4882a593Smuzhiyun * many rmaps there could be in the AG, so we start off with
266*4882a593Smuzhiyun * what we hope is an generous over-estimation.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun if (xfs_sb_version_hasreflink(&mp->m_sb))
269*4882a593Smuzhiyun rmapbt_sz = xfs_rmapbt_calc_size(mp,
270*4882a593Smuzhiyun (unsigned long long)aglen * 2);
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
273*4882a593Smuzhiyun } else {
274*4882a593Smuzhiyun rmapbt_sz = 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
278*4882a593Smuzhiyun inobt_sz, rmapbt_sz, refcbt_sz);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Allocate a block in an AG. */
284*4882a593Smuzhiyun int
xrep_alloc_ag_block(struct xfs_scrub * sc,const struct xfs_owner_info * oinfo,xfs_fsblock_t * fsbno,enum xfs_ag_resv_type resv)285*4882a593Smuzhiyun xrep_alloc_ag_block(
286*4882a593Smuzhiyun struct xfs_scrub *sc,
287*4882a593Smuzhiyun const struct xfs_owner_info *oinfo,
288*4882a593Smuzhiyun xfs_fsblock_t *fsbno,
289*4882a593Smuzhiyun enum xfs_ag_resv_type resv)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct xfs_alloc_arg args = {0};
292*4882a593Smuzhiyun xfs_agblock_t bno;
293*4882a593Smuzhiyun int error;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun switch (resv) {
296*4882a593Smuzhiyun case XFS_AG_RESV_AGFL:
297*4882a593Smuzhiyun case XFS_AG_RESV_RMAPBT:
298*4882a593Smuzhiyun error = xfs_alloc_get_freelist(sc->tp, sc->sa.agf_bp, &bno, 1);
299*4882a593Smuzhiyun if (error)
300*4882a593Smuzhiyun return error;
301*4882a593Smuzhiyun if (bno == NULLAGBLOCK)
302*4882a593Smuzhiyun return -ENOSPC;
303*4882a593Smuzhiyun xfs_extent_busy_reuse(sc->mp, sc->sa.agno, bno,
304*4882a593Smuzhiyun 1, false);
305*4882a593Smuzhiyun *fsbno = XFS_AGB_TO_FSB(sc->mp, sc->sa.agno, bno);
306*4882a593Smuzhiyun if (resv == XFS_AG_RESV_RMAPBT)
307*4882a593Smuzhiyun xfs_ag_resv_rmapbt_alloc(sc->mp, sc->sa.agno);
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun default:
310*4882a593Smuzhiyun break;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun args.tp = sc->tp;
314*4882a593Smuzhiyun args.mp = sc->mp;
315*4882a593Smuzhiyun args.oinfo = *oinfo;
316*4882a593Smuzhiyun args.fsbno = XFS_AGB_TO_FSB(args.mp, sc->sa.agno, 0);
317*4882a593Smuzhiyun args.minlen = 1;
318*4882a593Smuzhiyun args.maxlen = 1;
319*4882a593Smuzhiyun args.prod = 1;
320*4882a593Smuzhiyun args.type = XFS_ALLOCTYPE_THIS_AG;
321*4882a593Smuzhiyun args.resv = resv;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun error = xfs_alloc_vextent(&args);
324*4882a593Smuzhiyun if (error)
325*4882a593Smuzhiyun return error;
326*4882a593Smuzhiyun if (args.fsbno == NULLFSBLOCK)
327*4882a593Smuzhiyun return -ENOSPC;
328*4882a593Smuzhiyun ASSERT(args.len == 1);
329*4882a593Smuzhiyun *fsbno = args.fsbno;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* Initialize a new AG btree root block with zero entries. */
335*4882a593Smuzhiyun int
xrep_init_btblock(struct xfs_scrub * sc,xfs_fsblock_t fsb,struct xfs_buf ** bpp,xfs_btnum_t btnum,const struct xfs_buf_ops * ops)336*4882a593Smuzhiyun xrep_init_btblock(
337*4882a593Smuzhiyun struct xfs_scrub *sc,
338*4882a593Smuzhiyun xfs_fsblock_t fsb,
339*4882a593Smuzhiyun struct xfs_buf **bpp,
340*4882a593Smuzhiyun xfs_btnum_t btnum,
341*4882a593Smuzhiyun const struct xfs_buf_ops *ops)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct xfs_trans *tp = sc->tp;
344*4882a593Smuzhiyun struct xfs_mount *mp = sc->mp;
345*4882a593Smuzhiyun struct xfs_buf *bp;
346*4882a593Smuzhiyun int error;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
349*4882a593Smuzhiyun XFS_FSB_TO_AGBNO(mp, fsb), btnum);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
352*4882a593Smuzhiyun error = xfs_trans_get_buf(tp, mp->m_ddev_targp,
353*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, fsb), XFS_FSB_TO_BB(mp, 1), 0,
354*4882a593Smuzhiyun &bp);
355*4882a593Smuzhiyun if (error)
356*4882a593Smuzhiyun return error;
357*4882a593Smuzhiyun xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
358*4882a593Smuzhiyun xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno);
359*4882a593Smuzhiyun xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
360*4882a593Smuzhiyun xfs_trans_log_buf(tp, bp, 0, BBTOB(bp->b_length) - 1);
361*4882a593Smuzhiyun bp->b_ops = ops;
362*4882a593Smuzhiyun *bpp = bp;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return 0;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun * Reconstructing per-AG Btrees
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * When a space btree is corrupt, we don't bother trying to fix it. Instead,
371*4882a593Smuzhiyun * we scan secondary space metadata to derive the records that should be in
372*4882a593Smuzhiyun * the damaged btree, initialize a fresh btree root, and insert the records.
373*4882a593Smuzhiyun * Note that for rebuilding the rmapbt we scan all the primary data to
374*4882a593Smuzhiyun * generate the new records.
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * However, that leaves the matter of removing all the metadata describing the
377*4882a593Smuzhiyun * old broken structure. For primary metadata we use the rmap data to collect
378*4882a593Smuzhiyun * every extent with a matching rmap owner (bitmap); we then iterate all other
379*4882a593Smuzhiyun * metadata structures with the same rmap owner to collect the extents that
380*4882a593Smuzhiyun * cannot be removed (sublist). We then subtract sublist from bitmap to
381*4882a593Smuzhiyun * derive the blocks that were used by the old btree. These blocks can be
382*4882a593Smuzhiyun * reaped.
383*4882a593Smuzhiyun *
384*4882a593Smuzhiyun * For rmapbt reconstructions we must use different tactics for extent
385*4882a593Smuzhiyun * collection. First we iterate all primary metadata (this excludes the old
386*4882a593Smuzhiyun * rmapbt, obviously) to generate new rmap records. The gaps in the rmap
387*4882a593Smuzhiyun * records are collected as bitmap. The bnobt records are collected as
388*4882a593Smuzhiyun * sublist. As with the other btrees we subtract sublist from bitmap, and the
389*4882a593Smuzhiyun * result (since the rmapbt lives in the free space) are the blocks from the
390*4882a593Smuzhiyun * old rmapbt.
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * Disposal of Blocks from Old per-AG Btrees
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * Now that we've constructed a new btree to replace the damaged one, we want
395*4882a593Smuzhiyun * to dispose of the blocks that (we think) the old btree was using.
396*4882a593Smuzhiyun * Previously, we used the rmapbt to collect the extents (bitmap) with the
397*4882a593Smuzhiyun * rmap owner corresponding to the tree we rebuilt, collected extents for any
398*4882a593Smuzhiyun * blocks with the same rmap owner that are owned by another data structure
399*4882a593Smuzhiyun * (sublist), and subtracted sublist from bitmap. In theory the extents
400*4882a593Smuzhiyun * remaining in bitmap are the old btree's blocks.
401*4882a593Smuzhiyun *
402*4882a593Smuzhiyun * Unfortunately, it's possible that the btree was crosslinked with other
403*4882a593Smuzhiyun * blocks on disk. The rmap data can tell us if there are multiple owners, so
404*4882a593Smuzhiyun * if the rmapbt says there is an owner of this block other than @oinfo, then
405*4882a593Smuzhiyun * the block is crosslinked. Remove the reverse mapping and continue.
406*4882a593Smuzhiyun *
407*4882a593Smuzhiyun * If there is one rmap record, we can free the block, which removes the
408*4882a593Smuzhiyun * reverse mapping but doesn't add the block to the free space. Our repair
409*4882a593Smuzhiyun * strategy is to hope the other metadata objects crosslinked on this block
410*4882a593Smuzhiyun * will be rebuilt (atop different blocks), thereby removing all the cross
411*4882a593Smuzhiyun * links.
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * If there are no rmap records at all, we also free the block. If the btree
414*4882a593Smuzhiyun * being rebuilt lives in the free space (bnobt/cntbt/rmapbt) then there isn't
415*4882a593Smuzhiyun * supposed to be a rmap record and everything is ok. For other btrees there
416*4882a593Smuzhiyun * had to have been an rmap entry for the block to have ended up on @bitmap,
417*4882a593Smuzhiyun * so if it's gone now there's something wrong and the fs will shut down.
418*4882a593Smuzhiyun *
419*4882a593Smuzhiyun * Note: If there are multiple rmap records with only the same rmap owner as
420*4882a593Smuzhiyun * the btree we're trying to rebuild and the block is indeed owned by another
421*4882a593Smuzhiyun * data structure with the same rmap owner, then the block will be in sublist
422*4882a593Smuzhiyun * and therefore doesn't need disposal. If there are multiple rmap records
423*4882a593Smuzhiyun * with only the same rmap owner but the block is not owned by something with
424*4882a593Smuzhiyun * the same rmap owner, the block will be freed.
425*4882a593Smuzhiyun *
426*4882a593Smuzhiyun * The caller is responsible for locking the AG headers for the entire rebuild
427*4882a593Smuzhiyun * operation so that nothing else can sneak in and change the AG state while
428*4882a593Smuzhiyun * we're not looking. We also assume that the caller already invalidated any
429*4882a593Smuzhiyun * buffers associated with @bitmap.
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /*
433*4882a593Smuzhiyun * Invalidate buffers for per-AG btree blocks we're dumping. This function
434*4882a593Smuzhiyun * is not intended for use with file data repairs; we have bunmapi for that.
435*4882a593Smuzhiyun */
436*4882a593Smuzhiyun int
xrep_invalidate_blocks(struct xfs_scrub * sc,struct xbitmap * bitmap)437*4882a593Smuzhiyun xrep_invalidate_blocks(
438*4882a593Smuzhiyun struct xfs_scrub *sc,
439*4882a593Smuzhiyun struct xbitmap *bitmap)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct xbitmap_range *bmr;
442*4882a593Smuzhiyun struct xbitmap_range *n;
443*4882a593Smuzhiyun struct xfs_buf *bp;
444*4882a593Smuzhiyun xfs_fsblock_t fsbno;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /*
447*4882a593Smuzhiyun * For each block in each extent, see if there's an incore buffer for
448*4882a593Smuzhiyun * exactly that block; if so, invalidate it. The buffer cache only
449*4882a593Smuzhiyun * lets us look for one buffer at a time, so we have to look one block
450*4882a593Smuzhiyun * at a time. Avoid invalidating AG headers and post-EOFS blocks
451*4882a593Smuzhiyun * because we never own those; and if we can't TRYLOCK the buffer we
452*4882a593Smuzhiyun * assume it's owned by someone else.
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
455*4882a593Smuzhiyun /* Skip AG headers and post-EOFS blocks */
456*4882a593Smuzhiyun if (!xfs_verify_fsbno(sc->mp, fsbno))
457*4882a593Smuzhiyun continue;
458*4882a593Smuzhiyun bp = xfs_buf_incore(sc->mp->m_ddev_targp,
459*4882a593Smuzhiyun XFS_FSB_TO_DADDR(sc->mp, fsbno),
460*4882a593Smuzhiyun XFS_FSB_TO_BB(sc->mp, 1), XBF_TRYLOCK);
461*4882a593Smuzhiyun if (bp) {
462*4882a593Smuzhiyun xfs_trans_bjoin(sc->tp, bp);
463*4882a593Smuzhiyun xfs_trans_binval(sc->tp, bp);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun return 0;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Ensure the freelist is the correct size. */
471*4882a593Smuzhiyun int
xrep_fix_freelist(struct xfs_scrub * sc,bool can_shrink)472*4882a593Smuzhiyun xrep_fix_freelist(
473*4882a593Smuzhiyun struct xfs_scrub *sc,
474*4882a593Smuzhiyun bool can_shrink)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct xfs_alloc_arg args = {0};
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun args.mp = sc->mp;
479*4882a593Smuzhiyun args.tp = sc->tp;
480*4882a593Smuzhiyun args.agno = sc->sa.agno;
481*4882a593Smuzhiyun args.alignment = 1;
482*4882a593Smuzhiyun args.pag = sc->sa.pag;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun return xfs_alloc_fix_freelist(&args,
485*4882a593Smuzhiyun can_shrink ? 0 : XFS_ALLOC_FLAG_NOSHRINK);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /*
489*4882a593Smuzhiyun * Put a block back on the AGFL.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun STATIC int
xrep_put_freelist(struct xfs_scrub * sc,xfs_agblock_t agbno)492*4882a593Smuzhiyun xrep_put_freelist(
493*4882a593Smuzhiyun struct xfs_scrub *sc,
494*4882a593Smuzhiyun xfs_agblock_t agbno)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun int error;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* Make sure there's space on the freelist. */
499*4882a593Smuzhiyun error = xrep_fix_freelist(sc, true);
500*4882a593Smuzhiyun if (error)
501*4882a593Smuzhiyun return error;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * Since we're "freeing" a lost block onto the AGFL, we have to
505*4882a593Smuzhiyun * create an rmap for the block prior to merging it or else other
506*4882a593Smuzhiyun * parts will break.
507*4882a593Smuzhiyun */
508*4882a593Smuzhiyun error = xfs_rmap_alloc(sc->tp, sc->sa.agf_bp, sc->sa.agno, agbno, 1,
509*4882a593Smuzhiyun &XFS_RMAP_OINFO_AG);
510*4882a593Smuzhiyun if (error)
511*4882a593Smuzhiyun return error;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /* Put the block on the AGFL. */
514*4882a593Smuzhiyun error = xfs_alloc_put_freelist(sc->tp, sc->sa.agf_bp, sc->sa.agfl_bp,
515*4882a593Smuzhiyun agbno, 0);
516*4882a593Smuzhiyun if (error)
517*4882a593Smuzhiyun return error;
518*4882a593Smuzhiyun xfs_extent_busy_insert(sc->tp, sc->sa.agno, agbno, 1,
519*4882a593Smuzhiyun XFS_EXTENT_BUSY_SKIP_DISCARD);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /* Dispose of a single block. */
525*4882a593Smuzhiyun STATIC int
xrep_reap_block(struct xfs_scrub * sc,xfs_fsblock_t fsbno,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type resv)526*4882a593Smuzhiyun xrep_reap_block(
527*4882a593Smuzhiyun struct xfs_scrub *sc,
528*4882a593Smuzhiyun xfs_fsblock_t fsbno,
529*4882a593Smuzhiyun const struct xfs_owner_info *oinfo,
530*4882a593Smuzhiyun enum xfs_ag_resv_type resv)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct xfs_btree_cur *cur;
533*4882a593Smuzhiyun struct xfs_buf *agf_bp = NULL;
534*4882a593Smuzhiyun xfs_agnumber_t agno;
535*4882a593Smuzhiyun xfs_agblock_t agbno;
536*4882a593Smuzhiyun bool has_other_rmap;
537*4882a593Smuzhiyun int error;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun agno = XFS_FSB_TO_AGNO(sc->mp, fsbno);
540*4882a593Smuzhiyun agbno = XFS_FSB_TO_AGBNO(sc->mp, fsbno);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun /*
543*4882a593Smuzhiyun * If we are repairing per-inode metadata, we need to read in the AGF
544*4882a593Smuzhiyun * buffer. Otherwise, we're repairing a per-AG structure, so reuse
545*4882a593Smuzhiyun * the AGF buffer that the setup functions already grabbed.
546*4882a593Smuzhiyun */
547*4882a593Smuzhiyun if (sc->ip) {
548*4882a593Smuzhiyun error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf_bp);
549*4882a593Smuzhiyun if (error)
550*4882a593Smuzhiyun return error;
551*4882a593Smuzhiyun } else {
552*4882a593Smuzhiyun agf_bp = sc->sa.agf_bp;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf_bp, agno);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* Can we find any other rmappings? */
557*4882a593Smuzhiyun error = xfs_rmap_has_other_keys(cur, agbno, 1, oinfo, &has_other_rmap);
558*4882a593Smuzhiyun xfs_btree_del_cursor(cur, error);
559*4882a593Smuzhiyun if (error)
560*4882a593Smuzhiyun goto out_free;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /*
563*4882a593Smuzhiyun * If there are other rmappings, this block is cross linked and must
564*4882a593Smuzhiyun * not be freed. Remove the reverse mapping and move on. Otherwise,
565*4882a593Smuzhiyun * we were the only owner of the block, so free the extent, which will
566*4882a593Smuzhiyun * also remove the rmap.
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * XXX: XFS doesn't support detecting the case where a single block
569*4882a593Smuzhiyun * metadata structure is crosslinked with a multi-block structure
570*4882a593Smuzhiyun * because the buffer cache doesn't detect aliasing problems, so we
571*4882a593Smuzhiyun * can't fix 100% of crosslinking problems (yet). The verifiers will
572*4882a593Smuzhiyun * blow on writeout, the filesystem will shut down, and the admin gets
573*4882a593Smuzhiyun * to run xfs_repair.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun if (has_other_rmap)
576*4882a593Smuzhiyun error = xfs_rmap_free(sc->tp, agf_bp, agno, agbno, 1, oinfo);
577*4882a593Smuzhiyun else if (resv == XFS_AG_RESV_AGFL)
578*4882a593Smuzhiyun error = xrep_put_freelist(sc, agbno);
579*4882a593Smuzhiyun else
580*4882a593Smuzhiyun error = xfs_free_extent(sc->tp, fsbno, 1, oinfo, resv);
581*4882a593Smuzhiyun if (agf_bp != sc->sa.agf_bp)
582*4882a593Smuzhiyun xfs_trans_brelse(sc->tp, agf_bp);
583*4882a593Smuzhiyun if (error)
584*4882a593Smuzhiyun return error;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (sc->ip)
587*4882a593Smuzhiyun return xfs_trans_roll_inode(&sc->tp, sc->ip);
588*4882a593Smuzhiyun return xrep_roll_ag_trans(sc);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun out_free:
591*4882a593Smuzhiyun if (agf_bp != sc->sa.agf_bp)
592*4882a593Smuzhiyun xfs_trans_brelse(sc->tp, agf_bp);
593*4882a593Smuzhiyun return error;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* Dispose of every block of every extent in the bitmap. */
597*4882a593Smuzhiyun int
xrep_reap_extents(struct xfs_scrub * sc,struct xbitmap * bitmap,const struct xfs_owner_info * oinfo,enum xfs_ag_resv_type type)598*4882a593Smuzhiyun xrep_reap_extents(
599*4882a593Smuzhiyun struct xfs_scrub *sc,
600*4882a593Smuzhiyun struct xbitmap *bitmap,
601*4882a593Smuzhiyun const struct xfs_owner_info *oinfo,
602*4882a593Smuzhiyun enum xfs_ag_resv_type type)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct xbitmap_range *bmr;
605*4882a593Smuzhiyun struct xbitmap_range *n;
606*4882a593Smuzhiyun xfs_fsblock_t fsbno;
607*4882a593Smuzhiyun int error = 0;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun ASSERT(xfs_sb_version_hasrmapbt(&sc->mp->m_sb));
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun for_each_xbitmap_block(fsbno, bmr, n, bitmap) {
612*4882a593Smuzhiyun ASSERT(sc->ip != NULL ||
613*4882a593Smuzhiyun XFS_FSB_TO_AGNO(sc->mp, fsbno) == sc->sa.agno);
614*4882a593Smuzhiyun trace_xrep_dispose_btree_extent(sc->mp,
615*4882a593Smuzhiyun XFS_FSB_TO_AGNO(sc->mp, fsbno),
616*4882a593Smuzhiyun XFS_FSB_TO_AGBNO(sc->mp, fsbno), 1);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun error = xrep_reap_block(sc, fsbno, oinfo, type);
619*4882a593Smuzhiyun if (error)
620*4882a593Smuzhiyun break;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun return error;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * Finding per-AG Btree Roots for AGF/AGI Reconstruction
628*4882a593Smuzhiyun *
629*4882a593Smuzhiyun * If the AGF or AGI become slightly corrupted, it may be necessary to rebuild
630*4882a593Smuzhiyun * the AG headers by using the rmap data to rummage through the AG looking for
631*4882a593Smuzhiyun * btree roots. This is not guaranteed to work if the AG is heavily damaged
632*4882a593Smuzhiyun * or the rmap data are corrupt.
633*4882a593Smuzhiyun *
634*4882a593Smuzhiyun * Callers of xrep_find_ag_btree_roots must lock the AGF and AGFL
635*4882a593Smuzhiyun * buffers if the AGF is being rebuilt; or the AGF and AGI buffers if the
636*4882a593Smuzhiyun * AGI is being rebuilt. It must maintain these locks until it's safe for
637*4882a593Smuzhiyun * other threads to change the btrees' shapes. The caller provides
638*4882a593Smuzhiyun * information about the btrees to look for by passing in an array of
639*4882a593Smuzhiyun * xrep_find_ag_btree with the (rmap owner, buf_ops, magic) fields set.
640*4882a593Smuzhiyun * The (root, height) fields will be set on return if anything is found. The
641*4882a593Smuzhiyun * last element of the array should have a NULL buf_ops to mark the end of the
642*4882a593Smuzhiyun * array.
643*4882a593Smuzhiyun *
644*4882a593Smuzhiyun * For every rmapbt record matching any of the rmap owners in btree_info,
645*4882a593Smuzhiyun * read each block referenced by the rmap record. If the block is a btree
646*4882a593Smuzhiyun * block from this filesystem matching any of the magic numbers and has a
647*4882a593Smuzhiyun * level higher than what we've already seen, remember the block and the
648*4882a593Smuzhiyun * height of the tree required to have such a block. When the call completes,
649*4882a593Smuzhiyun * we return the highest block we've found for each btree description; those
650*4882a593Smuzhiyun * should be the roots.
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun struct xrep_findroot {
654*4882a593Smuzhiyun struct xfs_scrub *sc;
655*4882a593Smuzhiyun struct xfs_buf *agfl_bp;
656*4882a593Smuzhiyun struct xfs_agf *agf;
657*4882a593Smuzhiyun struct xrep_find_ag_btree *btree_info;
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun /* See if our block is in the AGFL. */
661*4882a593Smuzhiyun STATIC int
xrep_findroot_agfl_walk(struct xfs_mount * mp,xfs_agblock_t bno,void * priv)662*4882a593Smuzhiyun xrep_findroot_agfl_walk(
663*4882a593Smuzhiyun struct xfs_mount *mp,
664*4882a593Smuzhiyun xfs_agblock_t bno,
665*4882a593Smuzhiyun void *priv)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun xfs_agblock_t *agbno = priv;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun return (*agbno == bno) ? -ECANCELED : 0;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* Does this block match the btree information passed in? */
673*4882a593Smuzhiyun STATIC int
xrep_findroot_block(struct xrep_findroot * ri,struct xrep_find_ag_btree * fab,uint64_t owner,xfs_agblock_t agbno,bool * done_with_block)674*4882a593Smuzhiyun xrep_findroot_block(
675*4882a593Smuzhiyun struct xrep_findroot *ri,
676*4882a593Smuzhiyun struct xrep_find_ag_btree *fab,
677*4882a593Smuzhiyun uint64_t owner,
678*4882a593Smuzhiyun xfs_agblock_t agbno,
679*4882a593Smuzhiyun bool *done_with_block)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun struct xfs_mount *mp = ri->sc->mp;
682*4882a593Smuzhiyun struct xfs_buf *bp;
683*4882a593Smuzhiyun struct xfs_btree_block *btblock;
684*4882a593Smuzhiyun xfs_daddr_t daddr;
685*4882a593Smuzhiyun int block_level;
686*4882a593Smuzhiyun int error = 0;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun daddr = XFS_AGB_TO_DADDR(mp, ri->sc->sa.agno, agbno);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /*
691*4882a593Smuzhiyun * Blocks in the AGFL have stale contents that might just happen to
692*4882a593Smuzhiyun * have a matching magic and uuid. We don't want to pull these blocks
693*4882a593Smuzhiyun * in as part of a tree root, so we have to filter out the AGFL stuff
694*4882a593Smuzhiyun * here. If the AGFL looks insane we'll just refuse to repair.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun if (owner == XFS_RMAP_OWN_AG) {
697*4882a593Smuzhiyun error = xfs_agfl_walk(mp, ri->agf, ri->agfl_bp,
698*4882a593Smuzhiyun xrep_findroot_agfl_walk, &agbno);
699*4882a593Smuzhiyun if (error == -ECANCELED)
700*4882a593Smuzhiyun return 0;
701*4882a593Smuzhiyun if (error)
702*4882a593Smuzhiyun return error;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /*
706*4882a593Smuzhiyun * Read the buffer into memory so that we can see if it's a match for
707*4882a593Smuzhiyun * our btree type. We have no clue if it is beforehand, and we want to
708*4882a593Smuzhiyun * avoid xfs_trans_read_buf's behavior of dumping the DONE state (which
709*4882a593Smuzhiyun * will cause needless disk reads in subsequent calls to this function)
710*4882a593Smuzhiyun * and logging metadata verifier failures.
711*4882a593Smuzhiyun *
712*4882a593Smuzhiyun * Therefore, pass in NULL buffer ops. If the buffer was already in
713*4882a593Smuzhiyun * memory from some other caller it will already have b_ops assigned.
714*4882a593Smuzhiyun * If it was in memory from a previous unsuccessful findroot_block
715*4882a593Smuzhiyun * call, the buffer won't have b_ops but it should be clean and ready
716*4882a593Smuzhiyun * for us to try to verify if the read call succeeds. The same applies
717*4882a593Smuzhiyun * if the buffer wasn't in memory at all.
718*4882a593Smuzhiyun *
719*4882a593Smuzhiyun * Note: If we never match a btree type with this buffer, it will be
720*4882a593Smuzhiyun * left in memory with NULL b_ops. This shouldn't be a problem unless
721*4882a593Smuzhiyun * the buffer gets written.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun error = xfs_trans_read_buf(mp, ri->sc->tp, mp->m_ddev_targp, daddr,
724*4882a593Smuzhiyun mp->m_bsize, 0, &bp, NULL);
725*4882a593Smuzhiyun if (error)
726*4882a593Smuzhiyun return error;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* Ensure the block magic matches the btree type we're looking for. */
729*4882a593Smuzhiyun btblock = XFS_BUF_TO_BLOCK(bp);
730*4882a593Smuzhiyun ASSERT(fab->buf_ops->magic[1] != 0);
731*4882a593Smuzhiyun if (btblock->bb_magic != fab->buf_ops->magic[1])
732*4882a593Smuzhiyun goto out;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /*
735*4882a593Smuzhiyun * If the buffer already has ops applied and they're not the ones for
736*4882a593Smuzhiyun * this btree type, we know this block doesn't match the btree and we
737*4882a593Smuzhiyun * can bail out.
738*4882a593Smuzhiyun *
739*4882a593Smuzhiyun * If the buffer ops match ours, someone else has already validated
740*4882a593Smuzhiyun * the block for us, so we can move on to checking if this is a root
741*4882a593Smuzhiyun * block candidate.
742*4882a593Smuzhiyun *
743*4882a593Smuzhiyun * If the buffer does not have ops, nobody has successfully validated
744*4882a593Smuzhiyun * the contents and the buffer cannot be dirty. If the magic, uuid,
745*4882a593Smuzhiyun * and structure match this btree type then we'll move on to checking
746*4882a593Smuzhiyun * if it's a root block candidate. If there is no match, bail out.
747*4882a593Smuzhiyun */
748*4882a593Smuzhiyun if (bp->b_ops) {
749*4882a593Smuzhiyun if (bp->b_ops != fab->buf_ops)
750*4882a593Smuzhiyun goto out;
751*4882a593Smuzhiyun } else {
752*4882a593Smuzhiyun ASSERT(!xfs_trans_buf_is_dirty(bp));
753*4882a593Smuzhiyun if (!uuid_equal(&btblock->bb_u.s.bb_uuid,
754*4882a593Smuzhiyun &mp->m_sb.sb_meta_uuid))
755*4882a593Smuzhiyun goto out;
756*4882a593Smuzhiyun /*
757*4882a593Smuzhiyun * Read verifiers can reference b_ops, so we set the pointer
758*4882a593Smuzhiyun * here. If the verifier fails we'll reset the buffer state
759*4882a593Smuzhiyun * to what it was before we touched the buffer.
760*4882a593Smuzhiyun */
761*4882a593Smuzhiyun bp->b_ops = fab->buf_ops;
762*4882a593Smuzhiyun fab->buf_ops->verify_read(bp);
763*4882a593Smuzhiyun if (bp->b_error) {
764*4882a593Smuzhiyun bp->b_ops = NULL;
765*4882a593Smuzhiyun bp->b_error = 0;
766*4882a593Smuzhiyun goto out;
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * Some read verifiers will (re)set b_ops, so we must be
771*4882a593Smuzhiyun * careful not to change b_ops after running the verifier.
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /*
776*4882a593Smuzhiyun * This block passes the magic/uuid and verifier tests for this btree
777*4882a593Smuzhiyun * type. We don't need the caller to try the other tree types.
778*4882a593Smuzhiyun */
779*4882a593Smuzhiyun *done_with_block = true;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun * Compare this btree block's level to the height of the current
783*4882a593Smuzhiyun * candidate root block.
784*4882a593Smuzhiyun *
785*4882a593Smuzhiyun * If the level matches the root we found previously, throw away both
786*4882a593Smuzhiyun * blocks because there can't be two candidate roots.
787*4882a593Smuzhiyun *
788*4882a593Smuzhiyun * If level is lower in the tree than the root we found previously,
789*4882a593Smuzhiyun * ignore this block.
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun block_level = xfs_btree_get_level(btblock);
792*4882a593Smuzhiyun if (block_level + 1 == fab->height) {
793*4882a593Smuzhiyun fab->root = NULLAGBLOCK;
794*4882a593Smuzhiyun goto out;
795*4882a593Smuzhiyun } else if (block_level < fab->height) {
796*4882a593Smuzhiyun goto out;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /*
800*4882a593Smuzhiyun * This is the highest block in the tree that we've found so far.
801*4882a593Smuzhiyun * Update the btree height to reflect what we've learned from this
802*4882a593Smuzhiyun * block.
803*4882a593Smuzhiyun */
804*4882a593Smuzhiyun fab->height = block_level + 1;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /*
807*4882a593Smuzhiyun * If this block doesn't have sibling pointers, then it's the new root
808*4882a593Smuzhiyun * block candidate. Otherwise, the root will be found farther up the
809*4882a593Smuzhiyun * tree.
810*4882a593Smuzhiyun */
811*4882a593Smuzhiyun if (btblock->bb_u.s.bb_leftsib == cpu_to_be32(NULLAGBLOCK) &&
812*4882a593Smuzhiyun btblock->bb_u.s.bb_rightsib == cpu_to_be32(NULLAGBLOCK))
813*4882a593Smuzhiyun fab->root = agbno;
814*4882a593Smuzhiyun else
815*4882a593Smuzhiyun fab->root = NULLAGBLOCK;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun trace_xrep_findroot_block(mp, ri->sc->sa.agno, agbno,
818*4882a593Smuzhiyun be32_to_cpu(btblock->bb_magic), fab->height - 1);
819*4882a593Smuzhiyun out:
820*4882a593Smuzhiyun xfs_trans_brelse(ri->sc->tp, bp);
821*4882a593Smuzhiyun return error;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /*
825*4882a593Smuzhiyun * Do any of the blocks in this rmap record match one of the btrees we're
826*4882a593Smuzhiyun * looking for?
827*4882a593Smuzhiyun */
828*4882a593Smuzhiyun STATIC int
xrep_findroot_rmap(struct xfs_btree_cur * cur,struct xfs_rmap_irec * rec,void * priv)829*4882a593Smuzhiyun xrep_findroot_rmap(
830*4882a593Smuzhiyun struct xfs_btree_cur *cur,
831*4882a593Smuzhiyun struct xfs_rmap_irec *rec,
832*4882a593Smuzhiyun void *priv)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun struct xrep_findroot *ri = priv;
835*4882a593Smuzhiyun struct xrep_find_ag_btree *fab;
836*4882a593Smuzhiyun xfs_agblock_t b;
837*4882a593Smuzhiyun bool done;
838*4882a593Smuzhiyun int error = 0;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun /* Ignore anything that isn't AG metadata. */
841*4882a593Smuzhiyun if (!XFS_RMAP_NON_INODE_OWNER(rec->rm_owner))
842*4882a593Smuzhiyun return 0;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun /* Otherwise scan each block + btree type. */
845*4882a593Smuzhiyun for (b = 0; b < rec->rm_blockcount; b++) {
846*4882a593Smuzhiyun done = false;
847*4882a593Smuzhiyun for (fab = ri->btree_info; fab->buf_ops; fab++) {
848*4882a593Smuzhiyun if (rec->rm_owner != fab->rmap_owner)
849*4882a593Smuzhiyun continue;
850*4882a593Smuzhiyun error = xrep_findroot_block(ri, fab,
851*4882a593Smuzhiyun rec->rm_owner, rec->rm_startblock + b,
852*4882a593Smuzhiyun &done);
853*4882a593Smuzhiyun if (error)
854*4882a593Smuzhiyun return error;
855*4882a593Smuzhiyun if (done)
856*4882a593Smuzhiyun break;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun return 0;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* Find the roots of the per-AG btrees described in btree_info. */
864*4882a593Smuzhiyun int
xrep_find_ag_btree_roots(struct xfs_scrub * sc,struct xfs_buf * agf_bp,struct xrep_find_ag_btree * btree_info,struct xfs_buf * agfl_bp)865*4882a593Smuzhiyun xrep_find_ag_btree_roots(
866*4882a593Smuzhiyun struct xfs_scrub *sc,
867*4882a593Smuzhiyun struct xfs_buf *agf_bp,
868*4882a593Smuzhiyun struct xrep_find_ag_btree *btree_info,
869*4882a593Smuzhiyun struct xfs_buf *agfl_bp)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun struct xfs_mount *mp = sc->mp;
872*4882a593Smuzhiyun struct xrep_findroot ri;
873*4882a593Smuzhiyun struct xrep_find_ag_btree *fab;
874*4882a593Smuzhiyun struct xfs_btree_cur *cur;
875*4882a593Smuzhiyun int error;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun ASSERT(xfs_buf_islocked(agf_bp));
878*4882a593Smuzhiyun ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun ri.sc = sc;
881*4882a593Smuzhiyun ri.btree_info = btree_info;
882*4882a593Smuzhiyun ri.agf = agf_bp->b_addr;
883*4882a593Smuzhiyun ri.agfl_bp = agfl_bp;
884*4882a593Smuzhiyun for (fab = btree_info; fab->buf_ops; fab++) {
885*4882a593Smuzhiyun ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
886*4882a593Smuzhiyun ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
887*4882a593Smuzhiyun fab->root = NULLAGBLOCK;
888*4882a593Smuzhiyun fab->height = 0;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
892*4882a593Smuzhiyun error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
893*4882a593Smuzhiyun xfs_btree_del_cursor(cur, error);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun return error;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /* Force a quotacheck the next time we mount. */
899*4882a593Smuzhiyun void
xrep_force_quotacheck(struct xfs_scrub * sc,xfs_dqtype_t type)900*4882a593Smuzhiyun xrep_force_quotacheck(
901*4882a593Smuzhiyun struct xfs_scrub *sc,
902*4882a593Smuzhiyun xfs_dqtype_t type)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun uint flag;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun flag = xfs_quota_chkd_flag(type);
907*4882a593Smuzhiyun if (!(flag & sc->mp->m_qflags))
908*4882a593Smuzhiyun return;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun sc->mp->m_qflags &= ~flag;
911*4882a593Smuzhiyun spin_lock(&sc->mp->m_sb_lock);
912*4882a593Smuzhiyun sc->mp->m_sb.sb_qflags &= ~flag;
913*4882a593Smuzhiyun spin_unlock(&sc->mp->m_sb_lock);
914*4882a593Smuzhiyun xfs_log_sb(sc->tp);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /*
918*4882a593Smuzhiyun * Attach dquots to this inode, or schedule quotacheck to fix them.
919*4882a593Smuzhiyun *
920*4882a593Smuzhiyun * This function ensures that the appropriate dquots are attached to an inode.
921*4882a593Smuzhiyun * We cannot allow the dquot code to allocate an on-disk dquot block here
922*4882a593Smuzhiyun * because we're already in transaction context with the inode locked. The
923*4882a593Smuzhiyun * on-disk dquot should already exist anyway. If the quota code signals
924*4882a593Smuzhiyun * corruption or missing quota information, schedule quotacheck, which will
925*4882a593Smuzhiyun * repair corruptions in the quota metadata.
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun int
xrep_ino_dqattach(struct xfs_scrub * sc)928*4882a593Smuzhiyun xrep_ino_dqattach(
929*4882a593Smuzhiyun struct xfs_scrub *sc)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun int error;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun error = xfs_qm_dqattach_locked(sc->ip, false);
934*4882a593Smuzhiyun switch (error) {
935*4882a593Smuzhiyun case -EFSBADCRC:
936*4882a593Smuzhiyun case -EFSCORRUPTED:
937*4882a593Smuzhiyun case -ENOENT:
938*4882a593Smuzhiyun xfs_err_ratelimited(sc->mp,
939*4882a593Smuzhiyun "inode %llu repair encountered quota error %d, quotacheck forced.",
940*4882a593Smuzhiyun (unsigned long long)sc->ip->i_ino, error);
941*4882a593Smuzhiyun if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
942*4882a593Smuzhiyun xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
943*4882a593Smuzhiyun if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
944*4882a593Smuzhiyun xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
945*4882a593Smuzhiyun if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
946*4882a593Smuzhiyun xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
947*4882a593Smuzhiyun /* fall through */
948*4882a593Smuzhiyun case -ESRCH:
949*4882a593Smuzhiyun error = 0;
950*4882a593Smuzhiyun break;
951*4882a593Smuzhiyun default:
952*4882a593Smuzhiyun break;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun return error;
956*4882a593Smuzhiyun }
957