1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2017 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_mount.h"
12*4882a593Smuzhiyun #include "xfs_btree.h"
13*4882a593Smuzhiyun #include "xfs_rmap.h"
14*4882a593Smuzhiyun #include "xfs_refcount.h"
15*4882a593Smuzhiyun #include "scrub/scrub.h"
16*4882a593Smuzhiyun #include "scrub/common.h"
17*4882a593Smuzhiyun #include "scrub/btree.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun * Set us up to scrub reverse mapping btrees.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun int
xchk_setup_ag_rmapbt(struct xfs_scrub * sc,struct xfs_inode * ip)23*4882a593Smuzhiyun xchk_setup_ag_rmapbt(
24*4882a593Smuzhiyun struct xfs_scrub *sc,
25*4882a593Smuzhiyun struct xfs_inode *ip)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun return xchk_setup_ag_btree(sc, ip, false);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Reverse-mapping scrubber. */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Cross-reference a rmap against the refcount btree. */
33*4882a593Smuzhiyun STATIC void
xchk_rmapbt_xref_refc(struct xfs_scrub * sc,struct xfs_rmap_irec * irec)34*4882a593Smuzhiyun xchk_rmapbt_xref_refc(
35*4882a593Smuzhiyun struct xfs_scrub *sc,
36*4882a593Smuzhiyun struct xfs_rmap_irec *irec)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun xfs_agblock_t fbno;
39*4882a593Smuzhiyun xfs_extlen_t flen;
40*4882a593Smuzhiyun bool non_inode;
41*4882a593Smuzhiyun bool is_bmbt;
42*4882a593Smuzhiyun bool is_attr;
43*4882a593Smuzhiyun bool is_unwritten;
44*4882a593Smuzhiyun int error;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (!sc->sa.refc_cur || xchk_skip_xref(sc->sm))
47*4882a593Smuzhiyun return;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun non_inode = XFS_RMAP_NON_INODE_OWNER(irec->rm_owner);
50*4882a593Smuzhiyun is_bmbt = irec->rm_flags & XFS_RMAP_BMBT_BLOCK;
51*4882a593Smuzhiyun is_attr = irec->rm_flags & XFS_RMAP_ATTR_FORK;
52*4882a593Smuzhiyun is_unwritten = irec->rm_flags & XFS_RMAP_UNWRITTEN;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* If this is shared, must be a data fork extent. */
55*4882a593Smuzhiyun error = xfs_refcount_find_shared(sc->sa.refc_cur, irec->rm_startblock,
56*4882a593Smuzhiyun irec->rm_blockcount, &fbno, &flen, false);
57*4882a593Smuzhiyun if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
58*4882a593Smuzhiyun return;
59*4882a593Smuzhiyun if (flen != 0 && (non_inode || is_attr || is_bmbt || is_unwritten))
60*4882a593Smuzhiyun xchk_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /* Cross-reference with the other btrees. */
64*4882a593Smuzhiyun STATIC void
xchk_rmapbt_xref(struct xfs_scrub * sc,struct xfs_rmap_irec * irec)65*4882a593Smuzhiyun xchk_rmapbt_xref(
66*4882a593Smuzhiyun struct xfs_scrub *sc,
67*4882a593Smuzhiyun struct xfs_rmap_irec *irec)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun xfs_agblock_t agbno = irec->rm_startblock;
70*4882a593Smuzhiyun xfs_extlen_t len = irec->rm_blockcount;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
73*4882a593Smuzhiyun return;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun xchk_xref_is_used_space(sc, agbno, len);
76*4882a593Smuzhiyun if (irec->rm_owner == XFS_RMAP_OWN_INODES)
77*4882a593Smuzhiyun xchk_xref_is_inode_chunk(sc, agbno, len);
78*4882a593Smuzhiyun else
79*4882a593Smuzhiyun xchk_xref_is_not_inode_chunk(sc, agbno, len);
80*4882a593Smuzhiyun if (irec->rm_owner == XFS_RMAP_OWN_COW)
81*4882a593Smuzhiyun xchk_xref_is_cow_staging(sc, irec->rm_startblock,
82*4882a593Smuzhiyun irec->rm_blockcount);
83*4882a593Smuzhiyun else
84*4882a593Smuzhiyun xchk_rmapbt_xref_refc(sc, irec);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Scrub an rmapbt record. */
88*4882a593Smuzhiyun STATIC int
xchk_rmapbt_rec(struct xchk_btree * bs,union xfs_btree_rec * rec)89*4882a593Smuzhiyun xchk_rmapbt_rec(
90*4882a593Smuzhiyun struct xchk_btree *bs,
91*4882a593Smuzhiyun union xfs_btree_rec *rec)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct xfs_mount *mp = bs->cur->bc_mp;
94*4882a593Smuzhiyun struct xfs_rmap_irec irec;
95*4882a593Smuzhiyun xfs_agnumber_t agno = bs->cur->bc_ag.agno;
96*4882a593Smuzhiyun bool non_inode;
97*4882a593Smuzhiyun bool is_unwritten;
98*4882a593Smuzhiyun bool is_bmbt;
99*4882a593Smuzhiyun bool is_attr;
100*4882a593Smuzhiyun int error;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun error = xfs_rmap_btrec_to_irec(rec, &irec);
103*4882a593Smuzhiyun if (!xchk_btree_process_error(bs->sc, bs->cur, 0, &error))
104*4882a593Smuzhiyun goto out;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Check extent. */
107*4882a593Smuzhiyun if (irec.rm_startblock + irec.rm_blockcount <= irec.rm_startblock)
108*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (irec.rm_owner == XFS_RMAP_OWN_FS) {
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun * xfs_verify_agbno returns false for static fs metadata.
113*4882a593Smuzhiyun * Since that only exists at the start of the AG, validate
114*4882a593Smuzhiyun * that by hand.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun if (irec.rm_startblock != 0 ||
117*4882a593Smuzhiyun irec.rm_blockcount != XFS_AGFL_BLOCK(mp) + 1)
118*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
119*4882a593Smuzhiyun } else {
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * Otherwise we must point somewhere past the static metadata
122*4882a593Smuzhiyun * but before the end of the FS. Run the regular check.
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun if (!xfs_verify_agbno(mp, agno, irec.rm_startblock) ||
125*4882a593Smuzhiyun !xfs_verify_agbno(mp, agno, irec.rm_startblock +
126*4882a593Smuzhiyun irec.rm_blockcount - 1))
127*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Check flags. */
131*4882a593Smuzhiyun non_inode = XFS_RMAP_NON_INODE_OWNER(irec.rm_owner);
132*4882a593Smuzhiyun is_bmbt = irec.rm_flags & XFS_RMAP_BMBT_BLOCK;
133*4882a593Smuzhiyun is_attr = irec.rm_flags & XFS_RMAP_ATTR_FORK;
134*4882a593Smuzhiyun is_unwritten = irec.rm_flags & XFS_RMAP_UNWRITTEN;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun if (is_bmbt && irec.rm_offset != 0)
137*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (non_inode && irec.rm_offset != 0)
140*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (is_unwritten && (is_bmbt || non_inode || is_attr))
143*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (non_inode && (is_bmbt || is_unwritten || is_attr))
146*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (!non_inode) {
149*4882a593Smuzhiyun if (!xfs_verify_ino(mp, irec.rm_owner))
150*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
151*4882a593Smuzhiyun } else {
152*4882a593Smuzhiyun /* Non-inode owner within the magic values? */
153*4882a593Smuzhiyun if (irec.rm_owner <= XFS_RMAP_OWN_MIN ||
154*4882a593Smuzhiyun irec.rm_owner > XFS_RMAP_OWN_FS)
155*4882a593Smuzhiyun xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun xchk_rmapbt_xref(bs->sc, &irec);
159*4882a593Smuzhiyun out:
160*4882a593Smuzhiyun return error;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* Scrub the rmap btree for some AG. */
164*4882a593Smuzhiyun int
xchk_rmapbt(struct xfs_scrub * sc)165*4882a593Smuzhiyun xchk_rmapbt(
166*4882a593Smuzhiyun struct xfs_scrub *sc)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun return xchk_btree(sc, sc->sa.rmap_cur, xchk_rmapbt_rec,
169*4882a593Smuzhiyun &XFS_RMAP_OINFO_AG, NULL);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* xref check that the extent is owned by a given owner */
173*4882a593Smuzhiyun static inline void
xchk_xref_check_owner(struct xfs_scrub * sc,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo,bool should_have_rmap)174*4882a593Smuzhiyun xchk_xref_check_owner(
175*4882a593Smuzhiyun struct xfs_scrub *sc,
176*4882a593Smuzhiyun xfs_agblock_t bno,
177*4882a593Smuzhiyun xfs_extlen_t len,
178*4882a593Smuzhiyun const struct xfs_owner_info *oinfo,
179*4882a593Smuzhiyun bool should_have_rmap)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun bool has_rmap;
182*4882a593Smuzhiyun int error;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
185*4882a593Smuzhiyun return;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun error = xfs_rmap_record_exists(sc->sa.rmap_cur, bno, len, oinfo,
188*4882a593Smuzhiyun &has_rmap);
189*4882a593Smuzhiyun if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
190*4882a593Smuzhiyun return;
191*4882a593Smuzhiyun if (has_rmap != should_have_rmap)
192*4882a593Smuzhiyun xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* xref check that the extent is owned by a given owner */
196*4882a593Smuzhiyun void
xchk_xref_is_owned_by(struct xfs_scrub * sc,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo)197*4882a593Smuzhiyun xchk_xref_is_owned_by(
198*4882a593Smuzhiyun struct xfs_scrub *sc,
199*4882a593Smuzhiyun xfs_agblock_t bno,
200*4882a593Smuzhiyun xfs_extlen_t len,
201*4882a593Smuzhiyun const struct xfs_owner_info *oinfo)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun xchk_xref_check_owner(sc, bno, len, oinfo, true);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* xref check that the extent is not owned by a given owner */
207*4882a593Smuzhiyun void
xchk_xref_is_not_owned_by(struct xfs_scrub * sc,xfs_agblock_t bno,xfs_extlen_t len,const struct xfs_owner_info * oinfo)208*4882a593Smuzhiyun xchk_xref_is_not_owned_by(
209*4882a593Smuzhiyun struct xfs_scrub *sc,
210*4882a593Smuzhiyun xfs_agblock_t bno,
211*4882a593Smuzhiyun xfs_extlen_t len,
212*4882a593Smuzhiyun const struct xfs_owner_info *oinfo)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun xchk_xref_check_owner(sc, bno, len, oinfo, false);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* xref check that the extent has no reverse mapping at all */
218*4882a593Smuzhiyun void
xchk_xref_has_no_owner(struct xfs_scrub * sc,xfs_agblock_t bno,xfs_extlen_t len)219*4882a593Smuzhiyun xchk_xref_has_no_owner(
220*4882a593Smuzhiyun struct xfs_scrub *sc,
221*4882a593Smuzhiyun xfs_agblock_t bno,
222*4882a593Smuzhiyun xfs_extlen_t len)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun bool has_rmap;
225*4882a593Smuzhiyun int error;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
228*4882a593Smuzhiyun return;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun error = xfs_rmap_has_record(sc->sa.rmap_cur, bno, len, &has_rmap);
231*4882a593Smuzhiyun if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
232*4882a593Smuzhiyun return;
233*4882a593Smuzhiyun if (has_rmap)
234*4882a593Smuzhiyun xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
235*4882a593Smuzhiyun }
236