1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2017 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_mount.h"
12*4882a593Smuzhiyun #include "xfs_log_format.h"
13*4882a593Smuzhiyun #include "xfs_inode.h"
14*4882a593Smuzhiyun #include "xfs_icache.h"
15*4882a593Smuzhiyun #include "xfs_dir2.h"
16*4882a593Smuzhiyun #include "xfs_dir2_priv.h"
17*4882a593Smuzhiyun #include "scrub/scrub.h"
18*4882a593Smuzhiyun #include "scrub/common.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Set us up to scrub parents. */
21*4882a593Smuzhiyun int
xchk_setup_parent(struct xfs_scrub * sc,struct xfs_inode * ip)22*4882a593Smuzhiyun xchk_setup_parent(
23*4882a593Smuzhiyun struct xfs_scrub *sc,
24*4882a593Smuzhiyun struct xfs_inode *ip)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun return xchk_setup_inode_contents(sc, ip, 0);
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Parent pointers */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* Look for an entry in a parent pointing to this inode. */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun struct xchk_parent_ctx {
34*4882a593Smuzhiyun struct dir_context dc;
35*4882a593Smuzhiyun struct xfs_scrub *sc;
36*4882a593Smuzhiyun xfs_ino_t ino;
37*4882a593Smuzhiyun xfs_nlink_t nlink;
38*4882a593Smuzhiyun bool cancelled;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /* Look for a single entry in a directory pointing to an inode. */
42*4882a593Smuzhiyun STATIC int
xchk_parent_actor(struct dir_context * dc,const char * name,int namelen,loff_t pos,u64 ino,unsigned type)43*4882a593Smuzhiyun xchk_parent_actor(
44*4882a593Smuzhiyun struct dir_context *dc,
45*4882a593Smuzhiyun const char *name,
46*4882a593Smuzhiyun int namelen,
47*4882a593Smuzhiyun loff_t pos,
48*4882a593Smuzhiyun u64 ino,
49*4882a593Smuzhiyun unsigned type)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct xchk_parent_ctx *spc;
52*4882a593Smuzhiyun int error = 0;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun spc = container_of(dc, struct xchk_parent_ctx, dc);
55*4882a593Smuzhiyun if (spc->ino == ino)
56*4882a593Smuzhiyun spc->nlink++;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * If we're facing a fatal signal, bail out. Store the cancellation
60*4882a593Smuzhiyun * status separately because the VFS readdir code squashes error codes
61*4882a593Smuzhiyun * into short directory reads.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun if (xchk_should_terminate(spc->sc, &error))
64*4882a593Smuzhiyun spc->cancelled = true;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun return error;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Count the number of dentries in the parent dir that point to this inode. */
70*4882a593Smuzhiyun STATIC int
xchk_parent_count_parent_dentries(struct xfs_scrub * sc,struct xfs_inode * parent,xfs_nlink_t * nlink)71*4882a593Smuzhiyun xchk_parent_count_parent_dentries(
72*4882a593Smuzhiyun struct xfs_scrub *sc,
73*4882a593Smuzhiyun struct xfs_inode *parent,
74*4882a593Smuzhiyun xfs_nlink_t *nlink)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct xchk_parent_ctx spc = {
77*4882a593Smuzhiyun .dc.actor = xchk_parent_actor,
78*4882a593Smuzhiyun .ino = sc->ip->i_ino,
79*4882a593Smuzhiyun .sc = sc,
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun size_t bufsize;
82*4882a593Smuzhiyun loff_t oldpos;
83*4882a593Smuzhiyun uint lock_mode;
84*4882a593Smuzhiyun int error = 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * If there are any blocks, read-ahead block 0 as we're almost
88*4882a593Smuzhiyun * certain to have the next operation be a read there. This is
89*4882a593Smuzhiyun * how we guarantee that the parent's extent map has been loaded,
90*4882a593Smuzhiyun * if there is one.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun lock_mode = xfs_ilock_data_map_shared(parent);
93*4882a593Smuzhiyun if (parent->i_df.if_nextents > 0)
94*4882a593Smuzhiyun error = xfs_dir3_data_readahead(parent, 0, 0);
95*4882a593Smuzhiyun xfs_iunlock(parent, lock_mode);
96*4882a593Smuzhiyun if (error)
97*4882a593Smuzhiyun return error;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * Iterate the parent dir to confirm that there is
101*4882a593Smuzhiyun * exactly one entry pointing back to the inode being
102*4882a593Smuzhiyun * scanned.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun bufsize = (size_t)min_t(loff_t, XFS_READDIR_BUFSIZE,
105*4882a593Smuzhiyun parent->i_d.di_size);
106*4882a593Smuzhiyun oldpos = 0;
107*4882a593Smuzhiyun while (true) {
108*4882a593Smuzhiyun error = xfs_readdir(sc->tp, parent, &spc.dc, bufsize);
109*4882a593Smuzhiyun if (error)
110*4882a593Smuzhiyun goto out;
111*4882a593Smuzhiyun if (spc.cancelled) {
112*4882a593Smuzhiyun error = -EAGAIN;
113*4882a593Smuzhiyun goto out;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun if (oldpos == spc.dc.pos)
116*4882a593Smuzhiyun break;
117*4882a593Smuzhiyun oldpos = spc.dc.pos;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun *nlink = spc.nlink;
120*4882a593Smuzhiyun out:
121*4882a593Smuzhiyun return error;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * Given the inode number of the alleged parent of the inode being
126*4882a593Smuzhiyun * scrubbed, try to validate that the parent has exactly one directory
127*4882a593Smuzhiyun * entry pointing back to the inode being scrubbed.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun STATIC int
xchk_parent_validate(struct xfs_scrub * sc,xfs_ino_t dnum,bool * try_again)130*4882a593Smuzhiyun xchk_parent_validate(
131*4882a593Smuzhiyun struct xfs_scrub *sc,
132*4882a593Smuzhiyun xfs_ino_t dnum,
133*4882a593Smuzhiyun bool *try_again)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct xfs_mount *mp = sc->mp;
136*4882a593Smuzhiyun struct xfs_inode *dp = NULL;
137*4882a593Smuzhiyun xfs_nlink_t expected_nlink;
138*4882a593Smuzhiyun xfs_nlink_t nlink;
139*4882a593Smuzhiyun int error = 0;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun *try_again = false;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
144*4882a593Smuzhiyun goto out;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* '..' must not point to ourselves. */
147*4882a593Smuzhiyun if (sc->ip->i_ino == dnum) {
148*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
149*4882a593Smuzhiyun goto out;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * If we're an unlinked directory, the parent /won't/ have a link
154*4882a593Smuzhiyun * to us. Otherwise, it should have one link.
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun expected_nlink = VFS_I(sc->ip)->i_nlink == 0 ? 0 : 1;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * Grab this parent inode. We release the inode before we
160*4882a593Smuzhiyun * cancel the scrub transaction. Since we're don't know a
161*4882a593Smuzhiyun * priori that releasing the inode won't trigger eofblocks
162*4882a593Smuzhiyun * cleanup (which allocates what would be a nested transaction)
163*4882a593Smuzhiyun * if the parent pointer erroneously points to a file, we
164*4882a593Smuzhiyun * can't use DONTCACHE here because DONTCACHE inodes can trigger
165*4882a593Smuzhiyun * immediate inactive cleanup of the inode.
166*4882a593Smuzhiyun *
167*4882a593Smuzhiyun * If _iget returns -EINVAL then the parent inode number is garbage
168*4882a593Smuzhiyun * and the directory is corrupt. If the _iget returns -EFSCORRUPTED
169*4882a593Smuzhiyun * or -EFSBADCRC then the parent is corrupt which is a cross
170*4882a593Smuzhiyun * referencing error. Any other error is an operational error.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun error = xfs_iget(mp, sc->tp, dnum, XFS_IGET_UNTRUSTED, 0, &dp);
173*4882a593Smuzhiyun if (error == -EINVAL) {
174*4882a593Smuzhiyun error = -EFSCORRUPTED;
175*4882a593Smuzhiyun xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error);
176*4882a593Smuzhiyun goto out;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
179*4882a593Smuzhiyun goto out;
180*4882a593Smuzhiyun if (dp == sc->ip || !S_ISDIR(VFS_I(dp)->i_mode)) {
181*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
182*4882a593Smuzhiyun goto out_rele;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * We prefer to keep the inode locked while we lock and search
187*4882a593Smuzhiyun * its alleged parent for a forward reference. If we can grab
188*4882a593Smuzhiyun * the iolock, validate the pointers and we're done. We must
189*4882a593Smuzhiyun * use nowait here to avoid an ABBA deadlock on the parent and
190*4882a593Smuzhiyun * the child inodes.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun if (xfs_ilock_nowait(dp, XFS_IOLOCK_SHARED)) {
193*4882a593Smuzhiyun error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
194*4882a593Smuzhiyun if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0,
195*4882a593Smuzhiyun &error))
196*4882a593Smuzhiyun goto out_unlock;
197*4882a593Smuzhiyun if (nlink != expected_nlink)
198*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
199*4882a593Smuzhiyun goto out_unlock;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * The game changes if we get here. We failed to lock the parent,
204*4882a593Smuzhiyun * so we're going to try to verify both pointers while only holding
205*4882a593Smuzhiyun * one lock so as to avoid deadlocking with something that's actually
206*4882a593Smuzhiyun * trying to traverse down the directory tree.
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun xfs_iunlock(sc->ip, sc->ilock_flags);
209*4882a593Smuzhiyun sc->ilock_flags = 0;
210*4882a593Smuzhiyun error = xchk_ilock_inverted(dp, XFS_IOLOCK_SHARED);
211*4882a593Smuzhiyun if (error)
212*4882a593Smuzhiyun goto out_rele;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* Go looking for our dentry. */
215*4882a593Smuzhiyun error = xchk_parent_count_parent_dentries(sc, dp, &nlink);
216*4882a593Smuzhiyun if (!xchk_fblock_xref_process_error(sc, XFS_DATA_FORK, 0, &error))
217*4882a593Smuzhiyun goto out_unlock;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* Drop the parent lock, relock this inode. */
220*4882a593Smuzhiyun xfs_iunlock(dp, XFS_IOLOCK_SHARED);
221*4882a593Smuzhiyun error = xchk_ilock_inverted(sc->ip, XFS_IOLOCK_EXCL);
222*4882a593Smuzhiyun if (error)
223*4882a593Smuzhiyun goto out_rele;
224*4882a593Smuzhiyun sc->ilock_flags = XFS_IOLOCK_EXCL;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * If we're an unlinked directory, the parent /won't/ have a link
228*4882a593Smuzhiyun * to us. Otherwise, it should have one link. We have to re-set
229*4882a593Smuzhiyun * it here because we dropped the lock on sc->ip.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun expected_nlink = VFS_I(sc->ip)->i_nlink == 0 ? 0 : 1;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* Look up '..' to see if the inode changed. */
234*4882a593Smuzhiyun error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
235*4882a593Smuzhiyun if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
236*4882a593Smuzhiyun goto out_rele;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Drat, parent changed. Try again! */
239*4882a593Smuzhiyun if (dnum != dp->i_ino) {
240*4882a593Smuzhiyun xfs_irele(dp);
241*4882a593Smuzhiyun *try_again = true;
242*4882a593Smuzhiyun return 0;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun xfs_irele(dp);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * '..' didn't change, so check that there was only one entry
248*4882a593Smuzhiyun * for us in the parent.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun if (nlink != expected_nlink)
251*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
252*4882a593Smuzhiyun return error;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun out_unlock:
255*4882a593Smuzhiyun xfs_iunlock(dp, XFS_IOLOCK_SHARED);
256*4882a593Smuzhiyun out_rele:
257*4882a593Smuzhiyun xfs_irele(dp);
258*4882a593Smuzhiyun out:
259*4882a593Smuzhiyun return error;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* Scrub a parent pointer. */
263*4882a593Smuzhiyun int
xchk_parent(struct xfs_scrub * sc)264*4882a593Smuzhiyun xchk_parent(
265*4882a593Smuzhiyun struct xfs_scrub *sc)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct xfs_mount *mp = sc->mp;
268*4882a593Smuzhiyun xfs_ino_t dnum;
269*4882a593Smuzhiyun bool try_again;
270*4882a593Smuzhiyun int tries = 0;
271*4882a593Smuzhiyun int error = 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * If we're a directory, check that the '..' link points up to
275*4882a593Smuzhiyun * a directory that has one entry pointing to us.
276*4882a593Smuzhiyun */
277*4882a593Smuzhiyun if (!S_ISDIR(VFS_I(sc->ip)->i_mode))
278*4882a593Smuzhiyun return -ENOENT;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* We're not a special inode, are we? */
281*4882a593Smuzhiyun if (!xfs_verify_dir_ino(mp, sc->ip->i_ino)) {
282*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
283*4882a593Smuzhiyun goto out;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * The VFS grabs a read or write lock via i_rwsem before it reads
288*4882a593Smuzhiyun * or writes to a directory. If we've gotten this far we've
289*4882a593Smuzhiyun * already obtained IOLOCK_EXCL, which (since 4.10) is the same as
290*4882a593Smuzhiyun * getting a write lock on i_rwsem. Therefore, it is safe for us
291*4882a593Smuzhiyun * to drop the ILOCK here in order to do directory lookups.
292*4882a593Smuzhiyun */
293*4882a593Smuzhiyun sc->ilock_flags &= ~(XFS_ILOCK_EXCL | XFS_MMAPLOCK_EXCL);
294*4882a593Smuzhiyun xfs_iunlock(sc->ip, XFS_ILOCK_EXCL | XFS_MMAPLOCK_EXCL);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Look up '..' */
297*4882a593Smuzhiyun error = xfs_dir_lookup(sc->tp, sc->ip, &xfs_name_dotdot, &dnum, NULL);
298*4882a593Smuzhiyun if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
299*4882a593Smuzhiyun goto out;
300*4882a593Smuzhiyun if (!xfs_verify_dir_ino(mp, dnum)) {
301*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
302*4882a593Smuzhiyun goto out;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* Is this the root dir? Then '..' must point to itself. */
306*4882a593Smuzhiyun if (sc->ip == mp->m_rootip) {
307*4882a593Smuzhiyun if (sc->ip->i_ino != mp->m_sb.sb_rootino ||
308*4882a593Smuzhiyun sc->ip->i_ino != dnum)
309*4882a593Smuzhiyun xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
310*4882a593Smuzhiyun goto out;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun do {
314*4882a593Smuzhiyun error = xchk_parent_validate(sc, dnum, &try_again);
315*4882a593Smuzhiyun if (error)
316*4882a593Smuzhiyun goto out;
317*4882a593Smuzhiyun } while (try_again && ++tries < 20);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * We gave it our best shot but failed, so mark this scrub
321*4882a593Smuzhiyun * incomplete. Userspace can decide if it wants to try again.
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun if (try_again && tries == 20)
324*4882a593Smuzhiyun xchk_set_incomplete(sc);
325*4882a593Smuzhiyun out:
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * If we failed to lock the parent inode even after a retry, just mark
328*4882a593Smuzhiyun * this scrub incomplete and return.
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun if ((sc->flags & XCHK_TRY_HARDER) && error == -EDEADLOCK) {
331*4882a593Smuzhiyun error = 0;
332*4882a593Smuzhiyun xchk_set_incomplete(sc);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun return error;
335*4882a593Smuzhiyun }
336