xref: /OK3568_Linux_fs/kernel/fs/xfs/scrub/attr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017 Oracle.  All Rights Reserved.
4*4882a593Smuzhiyun  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_mount.h"
12*4882a593Smuzhiyun #include "xfs_log_format.h"
13*4882a593Smuzhiyun #include "xfs_inode.h"
14*4882a593Smuzhiyun #include "xfs_da_format.h"
15*4882a593Smuzhiyun #include "xfs_da_btree.h"
16*4882a593Smuzhiyun #include "xfs_attr.h"
17*4882a593Smuzhiyun #include "xfs_attr_leaf.h"
18*4882a593Smuzhiyun #include "scrub/scrub.h"
19*4882a593Smuzhiyun #include "scrub/common.h"
20*4882a593Smuzhiyun #include "scrub/dabtree.h"
21*4882a593Smuzhiyun #include "scrub/attr.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Allocate enough memory to hold an attr value and attr block bitmaps,
25*4882a593Smuzhiyun  * reallocating the buffer if necessary.  Buffer contents are not preserved
26*4882a593Smuzhiyun  * across a reallocation.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun int
xchk_setup_xattr_buf(struct xfs_scrub * sc,size_t value_size,xfs_km_flags_t flags)29*4882a593Smuzhiyun xchk_setup_xattr_buf(
30*4882a593Smuzhiyun 	struct xfs_scrub	*sc,
31*4882a593Smuzhiyun 	size_t			value_size,
32*4882a593Smuzhiyun 	xfs_km_flags_t		flags)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	size_t			sz;
35*4882a593Smuzhiyun 	struct xchk_xattr_buf	*ab = sc->buf;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/*
38*4882a593Smuzhiyun 	 * We need enough space to read an xattr value from the file or enough
39*4882a593Smuzhiyun 	 * space to hold three copies of the xattr free space bitmap.  We don't
40*4882a593Smuzhiyun 	 * need the buffer space for both purposes at the same time.
41*4882a593Smuzhiyun 	 */
42*4882a593Smuzhiyun 	sz = 3 * sizeof(long) * BITS_TO_LONGS(sc->mp->m_attr_geo->blksize);
43*4882a593Smuzhiyun 	sz = max_t(size_t, sz, value_size);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/*
46*4882a593Smuzhiyun 	 * If there's already a buffer, figure out if we need to reallocate it
47*4882a593Smuzhiyun 	 * to accommodate a larger size.
48*4882a593Smuzhiyun 	 */
49*4882a593Smuzhiyun 	if (ab) {
50*4882a593Smuzhiyun 		if (sz <= ab->sz)
51*4882a593Smuzhiyun 			return 0;
52*4882a593Smuzhiyun 		kmem_free(ab);
53*4882a593Smuzhiyun 		sc->buf = NULL;
54*4882a593Smuzhiyun 	}
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	/*
57*4882a593Smuzhiyun 	 * Don't zero the buffer upon allocation to avoid runtime overhead.
58*4882a593Smuzhiyun 	 * All users must be careful never to read uninitialized contents.
59*4882a593Smuzhiyun 	 */
60*4882a593Smuzhiyun 	ab = kmem_alloc_large(sizeof(*ab) + sz, flags);
61*4882a593Smuzhiyun 	if (!ab)
62*4882a593Smuzhiyun 		return -ENOMEM;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	ab->sz = sz;
65*4882a593Smuzhiyun 	sc->buf = ab;
66*4882a593Smuzhiyun 	return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Set us up to scrub an inode's extended attributes. */
70*4882a593Smuzhiyun int
xchk_setup_xattr(struct xfs_scrub * sc,struct xfs_inode * ip)71*4882a593Smuzhiyun xchk_setup_xattr(
72*4882a593Smuzhiyun 	struct xfs_scrub	*sc,
73*4882a593Smuzhiyun 	struct xfs_inode	*ip)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	int			error;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/*
78*4882a593Smuzhiyun 	 * We failed to get memory while checking attrs, so this time try to
79*4882a593Smuzhiyun 	 * get all the memory we're ever going to need.  Allocate the buffer
80*4882a593Smuzhiyun 	 * without the inode lock held, which means we can sleep.
81*4882a593Smuzhiyun 	 */
82*4882a593Smuzhiyun 	if (sc->flags & XCHK_TRY_HARDER) {
83*4882a593Smuzhiyun 		error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, 0);
84*4882a593Smuzhiyun 		if (error)
85*4882a593Smuzhiyun 			return error;
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return xchk_setup_inode_contents(sc, ip, 0);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* Extended Attributes */
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun struct xchk_xattr {
94*4882a593Smuzhiyun 	struct xfs_attr_list_context	context;
95*4882a593Smuzhiyun 	struct xfs_scrub		*sc;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  * Check that an extended attribute key can be looked up by hash.
100*4882a593Smuzhiyun  *
101*4882a593Smuzhiyun  * We use the XFS attribute list iterator (i.e. xfs_attr_list_ilocked)
102*4882a593Smuzhiyun  * to call this function for every attribute key in an inode.  Once
103*4882a593Smuzhiyun  * we're here, we load the attribute value to see if any errors happen,
104*4882a593Smuzhiyun  * or if we get more or less data than we expected.
105*4882a593Smuzhiyun  */
106*4882a593Smuzhiyun static void
xchk_xattr_listent(struct xfs_attr_list_context * context,int flags,unsigned char * name,int namelen,int valuelen)107*4882a593Smuzhiyun xchk_xattr_listent(
108*4882a593Smuzhiyun 	struct xfs_attr_list_context	*context,
109*4882a593Smuzhiyun 	int				flags,
110*4882a593Smuzhiyun 	unsigned char			*name,
111*4882a593Smuzhiyun 	int				namelen,
112*4882a593Smuzhiyun 	int				valuelen)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct xchk_xattr		*sx;
115*4882a593Smuzhiyun 	struct xfs_da_args		args = { NULL };
116*4882a593Smuzhiyun 	int				error = 0;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	sx = container_of(context, struct xchk_xattr, context);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (xchk_should_terminate(sx->sc, &error)) {
121*4882a593Smuzhiyun 		context->seen_enough = error;
122*4882a593Smuzhiyun 		return;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	if (flags & XFS_ATTR_INCOMPLETE) {
126*4882a593Smuzhiyun 		/* Incomplete attr key, just mark the inode for preening. */
127*4882a593Smuzhiyun 		xchk_ino_set_preen(sx->sc, context->dp->i_ino);
128*4882a593Smuzhiyun 		return;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	/* Does this name make sense? */
132*4882a593Smuzhiyun 	if (!xfs_attr_namecheck(name, namelen)) {
133*4882a593Smuzhiyun 		xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK, args.blkno);
134*4882a593Smuzhiyun 		return;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/*
138*4882a593Smuzhiyun 	 * Try to allocate enough memory to extrat the attr value.  If that
139*4882a593Smuzhiyun 	 * doesn't work, we overload the seen_enough variable to convey
140*4882a593Smuzhiyun 	 * the error message back to the main scrub function.
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	error = xchk_setup_xattr_buf(sx->sc, valuelen, KM_MAYFAIL);
143*4882a593Smuzhiyun 	if (error == -ENOMEM)
144*4882a593Smuzhiyun 		error = -EDEADLOCK;
145*4882a593Smuzhiyun 	if (error) {
146*4882a593Smuzhiyun 		context->seen_enough = error;
147*4882a593Smuzhiyun 		return;
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	args.op_flags = XFS_DA_OP_NOTIME;
151*4882a593Smuzhiyun 	args.attr_filter = flags & XFS_ATTR_NSP_ONDISK_MASK;
152*4882a593Smuzhiyun 	args.geo = context->dp->i_mount->m_attr_geo;
153*4882a593Smuzhiyun 	args.whichfork = XFS_ATTR_FORK;
154*4882a593Smuzhiyun 	args.dp = context->dp;
155*4882a593Smuzhiyun 	args.name = name;
156*4882a593Smuzhiyun 	args.namelen = namelen;
157*4882a593Smuzhiyun 	args.hashval = xfs_da_hashname(args.name, args.namelen);
158*4882a593Smuzhiyun 	args.trans = context->tp;
159*4882a593Smuzhiyun 	args.value = xchk_xattr_valuebuf(sx->sc);
160*4882a593Smuzhiyun 	args.valuelen = valuelen;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	error = xfs_attr_get_ilocked(&args);
163*4882a593Smuzhiyun 	/* ENODATA means the hash lookup failed and the attr is bad */
164*4882a593Smuzhiyun 	if (error == -ENODATA)
165*4882a593Smuzhiyun 		error = -EFSCORRUPTED;
166*4882a593Smuzhiyun 	if (!xchk_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
167*4882a593Smuzhiyun 			&error))
168*4882a593Smuzhiyun 		goto fail_xref;
169*4882a593Smuzhiyun 	if (args.valuelen != valuelen)
170*4882a593Smuzhiyun 		xchk_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
171*4882a593Smuzhiyun 					     args.blkno);
172*4882a593Smuzhiyun fail_xref:
173*4882a593Smuzhiyun 	if (sx->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
174*4882a593Smuzhiyun 		context->seen_enough = 1;
175*4882a593Smuzhiyun 	return;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun  * Mark a range [start, start+len) in this map.  Returns true if the
180*4882a593Smuzhiyun  * region was free, and false if there's a conflict or a problem.
181*4882a593Smuzhiyun  *
182*4882a593Smuzhiyun  * Within a char, the lowest bit of the char represents the byte with
183*4882a593Smuzhiyun  * the smallest address
184*4882a593Smuzhiyun  */
185*4882a593Smuzhiyun STATIC bool
xchk_xattr_set_map(struct xfs_scrub * sc,unsigned long * map,unsigned int start,unsigned int len)186*4882a593Smuzhiyun xchk_xattr_set_map(
187*4882a593Smuzhiyun 	struct xfs_scrub	*sc,
188*4882a593Smuzhiyun 	unsigned long		*map,
189*4882a593Smuzhiyun 	unsigned int		start,
190*4882a593Smuzhiyun 	unsigned int		len)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	unsigned int		mapsize = sc->mp->m_attr_geo->blksize;
193*4882a593Smuzhiyun 	bool			ret = true;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	if (start >= mapsize)
196*4882a593Smuzhiyun 		return false;
197*4882a593Smuzhiyun 	if (start + len > mapsize) {
198*4882a593Smuzhiyun 		len = mapsize - start;
199*4882a593Smuzhiyun 		ret = false;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (find_next_bit(map, mapsize, start) < start + len)
203*4882a593Smuzhiyun 		ret = false;
204*4882a593Smuzhiyun 	bitmap_set(map, start, len);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	return ret;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun  * Check the leaf freemap from the usage bitmap.  Returns false if the
211*4882a593Smuzhiyun  * attr freemap has problems or points to used space.
212*4882a593Smuzhiyun  */
213*4882a593Smuzhiyun STATIC bool
xchk_xattr_check_freemap(struct xfs_scrub * sc,unsigned long * map,struct xfs_attr3_icleaf_hdr * leafhdr)214*4882a593Smuzhiyun xchk_xattr_check_freemap(
215*4882a593Smuzhiyun 	struct xfs_scrub		*sc,
216*4882a593Smuzhiyun 	unsigned long			*map,
217*4882a593Smuzhiyun 	struct xfs_attr3_icleaf_hdr	*leafhdr)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	unsigned long			*freemap = xchk_xattr_freemap(sc);
220*4882a593Smuzhiyun 	unsigned long			*dstmap = xchk_xattr_dstmap(sc);
221*4882a593Smuzhiyun 	unsigned int			mapsize = sc->mp->m_attr_geo->blksize;
222*4882a593Smuzhiyun 	int				i;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Construct bitmap of freemap contents. */
225*4882a593Smuzhiyun 	bitmap_zero(freemap, mapsize);
226*4882a593Smuzhiyun 	for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
227*4882a593Smuzhiyun 		if (!xchk_xattr_set_map(sc, freemap,
228*4882a593Smuzhiyun 				leafhdr->freemap[i].base,
229*4882a593Smuzhiyun 				leafhdr->freemap[i].size))
230*4882a593Smuzhiyun 			return false;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* Look for bits that are set in freemap and are marked in use. */
234*4882a593Smuzhiyun 	return bitmap_and(dstmap, freemap, map, mapsize) == 0;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun  * Check this leaf entry's relations to everything else.
239*4882a593Smuzhiyun  * Returns the number of bytes used for the name/value data.
240*4882a593Smuzhiyun  */
241*4882a593Smuzhiyun STATIC void
xchk_xattr_entry(struct xchk_da_btree * ds,int level,char * buf_end,struct xfs_attr_leafblock * leaf,struct xfs_attr3_icleaf_hdr * leafhdr,struct xfs_attr_leaf_entry * ent,int idx,unsigned int * usedbytes,__u32 * last_hashval)242*4882a593Smuzhiyun xchk_xattr_entry(
243*4882a593Smuzhiyun 	struct xchk_da_btree		*ds,
244*4882a593Smuzhiyun 	int				level,
245*4882a593Smuzhiyun 	char				*buf_end,
246*4882a593Smuzhiyun 	struct xfs_attr_leafblock	*leaf,
247*4882a593Smuzhiyun 	struct xfs_attr3_icleaf_hdr	*leafhdr,
248*4882a593Smuzhiyun 	struct xfs_attr_leaf_entry	*ent,
249*4882a593Smuzhiyun 	int				idx,
250*4882a593Smuzhiyun 	unsigned int			*usedbytes,
251*4882a593Smuzhiyun 	__u32				*last_hashval)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	struct xfs_mount		*mp = ds->state->mp;
254*4882a593Smuzhiyun 	unsigned long			*usedmap = xchk_xattr_usedmap(ds->sc);
255*4882a593Smuzhiyun 	char				*name_end;
256*4882a593Smuzhiyun 	struct xfs_attr_leaf_name_local	*lentry;
257*4882a593Smuzhiyun 	struct xfs_attr_leaf_name_remote *rentry;
258*4882a593Smuzhiyun 	unsigned int			nameidx;
259*4882a593Smuzhiyun 	unsigned int			namesize;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (ent->pad2 != 0)
262*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	/* Hash values in order? */
265*4882a593Smuzhiyun 	if (be32_to_cpu(ent->hashval) < *last_hashval)
266*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
267*4882a593Smuzhiyun 	*last_hashval = be32_to_cpu(ent->hashval);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	nameidx = be16_to_cpu(ent->nameidx);
270*4882a593Smuzhiyun 	if (nameidx < leafhdr->firstused ||
271*4882a593Smuzhiyun 	    nameidx >= mp->m_attr_geo->blksize) {
272*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
273*4882a593Smuzhiyun 		return;
274*4882a593Smuzhiyun 	}
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* Check the name information. */
277*4882a593Smuzhiyun 	if (ent->flags & XFS_ATTR_LOCAL) {
278*4882a593Smuzhiyun 		lentry = xfs_attr3_leaf_name_local(leaf, idx);
279*4882a593Smuzhiyun 		namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
280*4882a593Smuzhiyun 				be16_to_cpu(lentry->valuelen));
281*4882a593Smuzhiyun 		name_end = (char *)lentry + namesize;
282*4882a593Smuzhiyun 		if (lentry->namelen == 0)
283*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
284*4882a593Smuzhiyun 	} else {
285*4882a593Smuzhiyun 		rentry = xfs_attr3_leaf_name_remote(leaf, idx);
286*4882a593Smuzhiyun 		namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
287*4882a593Smuzhiyun 		name_end = (char *)rentry + namesize;
288*4882a593Smuzhiyun 		if (rentry->namelen == 0 || rentry->valueblk == 0)
289*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 	if (name_end > buf_end)
292*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	if (!xchk_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
295*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
296*4882a593Smuzhiyun 	if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
297*4882a593Smuzhiyun 		*usedbytes += namesize;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun /* Scrub an attribute leaf. */
301*4882a593Smuzhiyun STATIC int
xchk_xattr_block(struct xchk_da_btree * ds,int level)302*4882a593Smuzhiyun xchk_xattr_block(
303*4882a593Smuzhiyun 	struct xchk_da_btree		*ds,
304*4882a593Smuzhiyun 	int				level)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	struct xfs_attr3_icleaf_hdr	leafhdr;
307*4882a593Smuzhiyun 	struct xfs_mount		*mp = ds->state->mp;
308*4882a593Smuzhiyun 	struct xfs_da_state_blk		*blk = &ds->state->path.blk[level];
309*4882a593Smuzhiyun 	struct xfs_buf			*bp = blk->bp;
310*4882a593Smuzhiyun 	xfs_dablk_t			*last_checked = ds->private;
311*4882a593Smuzhiyun 	struct xfs_attr_leafblock	*leaf = bp->b_addr;
312*4882a593Smuzhiyun 	struct xfs_attr_leaf_entry	*ent;
313*4882a593Smuzhiyun 	struct xfs_attr_leaf_entry	*entries;
314*4882a593Smuzhiyun 	unsigned long			*usedmap;
315*4882a593Smuzhiyun 	char				*buf_end;
316*4882a593Smuzhiyun 	size_t				off;
317*4882a593Smuzhiyun 	__u32				last_hashval = 0;
318*4882a593Smuzhiyun 	unsigned int			usedbytes = 0;
319*4882a593Smuzhiyun 	unsigned int			hdrsize;
320*4882a593Smuzhiyun 	int				i;
321*4882a593Smuzhiyun 	int				error;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	if (*last_checked == blk->blkno)
324*4882a593Smuzhiyun 		return 0;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* Allocate memory for block usage checking. */
327*4882a593Smuzhiyun 	error = xchk_setup_xattr_buf(ds->sc, 0, KM_MAYFAIL);
328*4882a593Smuzhiyun 	if (error == -ENOMEM)
329*4882a593Smuzhiyun 		return -EDEADLOCK;
330*4882a593Smuzhiyun 	if (error)
331*4882a593Smuzhiyun 		return error;
332*4882a593Smuzhiyun 	usedmap = xchk_xattr_usedmap(ds->sc);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	*last_checked = blk->blkno;
335*4882a593Smuzhiyun 	bitmap_zero(usedmap, mp->m_attr_geo->blksize);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	/* Check all the padding. */
338*4882a593Smuzhiyun 	if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
339*4882a593Smuzhiyun 		struct xfs_attr3_leafblock	*leaf = bp->b_addr;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
342*4882a593Smuzhiyun 		    leaf->hdr.info.hdr.pad != 0)
343*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
344*4882a593Smuzhiyun 	} else {
345*4882a593Smuzhiyun 		if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
346*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* Check the leaf header */
350*4882a593Smuzhiyun 	xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
351*4882a593Smuzhiyun 	hdrsize = xfs_attr3_leaf_hdr_size(leaf);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
354*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
355*4882a593Smuzhiyun 	if (leafhdr.firstused > mp->m_attr_geo->blksize)
356*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
357*4882a593Smuzhiyun 	if (leafhdr.firstused < hdrsize)
358*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
359*4882a593Smuzhiyun 	if (!xchk_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
360*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
363*4882a593Smuzhiyun 		goto out;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	entries = xfs_attr3_leaf_entryp(leaf);
366*4882a593Smuzhiyun 	if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
367*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
370*4882a593Smuzhiyun 	for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
371*4882a593Smuzhiyun 		/* Mark the leaf entry itself. */
372*4882a593Smuzhiyun 		off = (char *)ent - (char *)leaf;
373*4882a593Smuzhiyun 		if (!xchk_xattr_set_map(ds->sc, usedmap, off,
374*4882a593Smuzhiyun 				sizeof(xfs_attr_leaf_entry_t))) {
375*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
376*4882a593Smuzhiyun 			goto out;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		/* Check the entry and nameval. */
380*4882a593Smuzhiyun 		xchk_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
381*4882a593Smuzhiyun 				ent, i, &usedbytes, &last_hashval);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
384*4882a593Smuzhiyun 			goto out;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	if (!xchk_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
388*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (leafhdr.usedbytes != usedbytes)
391*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun out:
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun /* Scrub a attribute btree record. */
398*4882a593Smuzhiyun STATIC int
xchk_xattr_rec(struct xchk_da_btree * ds,int level)399*4882a593Smuzhiyun xchk_xattr_rec(
400*4882a593Smuzhiyun 	struct xchk_da_btree		*ds,
401*4882a593Smuzhiyun 	int				level)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct xfs_mount		*mp = ds->state->mp;
404*4882a593Smuzhiyun 	struct xfs_da_state_blk		*blk = &ds->state->path.blk[level];
405*4882a593Smuzhiyun 	struct xfs_attr_leaf_name_local	*lentry;
406*4882a593Smuzhiyun 	struct xfs_attr_leaf_name_remote	*rentry;
407*4882a593Smuzhiyun 	struct xfs_buf			*bp;
408*4882a593Smuzhiyun 	struct xfs_attr_leaf_entry	*ent;
409*4882a593Smuzhiyun 	xfs_dahash_t			calc_hash;
410*4882a593Smuzhiyun 	xfs_dahash_t			hash;
411*4882a593Smuzhiyun 	int				nameidx;
412*4882a593Smuzhiyun 	int				hdrsize;
413*4882a593Smuzhiyun 	unsigned int			badflags;
414*4882a593Smuzhiyun 	int				error;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	ent = xfs_attr3_leaf_entryp(blk->bp->b_addr) + blk->index;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* Check the whole block, if necessary. */
421*4882a593Smuzhiyun 	error = xchk_xattr_block(ds, level);
422*4882a593Smuzhiyun 	if (error)
423*4882a593Smuzhiyun 		goto out;
424*4882a593Smuzhiyun 	if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
425*4882a593Smuzhiyun 		goto out;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/* Check the hash of the entry. */
428*4882a593Smuzhiyun 	error = xchk_da_btree_hash(ds, level, &ent->hashval);
429*4882a593Smuzhiyun 	if (error)
430*4882a593Smuzhiyun 		goto out;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	/* Find the attr entry's location. */
433*4882a593Smuzhiyun 	bp = blk->bp;
434*4882a593Smuzhiyun 	hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
435*4882a593Smuzhiyun 	nameidx = be16_to_cpu(ent->nameidx);
436*4882a593Smuzhiyun 	if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
437*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
438*4882a593Smuzhiyun 		goto out;
439*4882a593Smuzhiyun 	}
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/* Retrieve the entry and check it. */
442*4882a593Smuzhiyun 	hash = be32_to_cpu(ent->hashval);
443*4882a593Smuzhiyun 	badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
444*4882a593Smuzhiyun 			XFS_ATTR_INCOMPLETE);
445*4882a593Smuzhiyun 	if ((ent->flags & badflags) != 0)
446*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
447*4882a593Smuzhiyun 	if (ent->flags & XFS_ATTR_LOCAL) {
448*4882a593Smuzhiyun 		lentry = (struct xfs_attr_leaf_name_local *)
449*4882a593Smuzhiyun 				(((char *)bp->b_addr) + nameidx);
450*4882a593Smuzhiyun 		if (lentry->namelen <= 0) {
451*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
452*4882a593Smuzhiyun 			goto out;
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 		calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
455*4882a593Smuzhiyun 	} else {
456*4882a593Smuzhiyun 		rentry = (struct xfs_attr_leaf_name_remote *)
457*4882a593Smuzhiyun 				(((char *)bp->b_addr) + nameidx);
458*4882a593Smuzhiyun 		if (rentry->namelen <= 0) {
459*4882a593Smuzhiyun 			xchk_da_set_corrupt(ds, level);
460*4882a593Smuzhiyun 			goto out;
461*4882a593Smuzhiyun 		}
462*4882a593Smuzhiyun 		calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 	if (calc_hash != hash)
465*4882a593Smuzhiyun 		xchk_da_set_corrupt(ds, level);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun out:
468*4882a593Smuzhiyun 	return error;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /* Scrub the extended attribute metadata. */
472*4882a593Smuzhiyun int
xchk_xattr(struct xfs_scrub * sc)473*4882a593Smuzhiyun xchk_xattr(
474*4882a593Smuzhiyun 	struct xfs_scrub		*sc)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	struct xchk_xattr		sx;
477*4882a593Smuzhiyun 	xfs_dablk_t			last_checked = -1U;
478*4882a593Smuzhiyun 	int				error = 0;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (!xfs_inode_hasattr(sc->ip))
481*4882a593Smuzhiyun 		return -ENOENT;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	memset(&sx, 0, sizeof(sx));
484*4882a593Smuzhiyun 	/* Check attribute tree structure */
485*4882a593Smuzhiyun 	error = xchk_da_btree(sc, XFS_ATTR_FORK, xchk_xattr_rec,
486*4882a593Smuzhiyun 			&last_checked);
487*4882a593Smuzhiyun 	if (error)
488*4882a593Smuzhiyun 		goto out;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
491*4882a593Smuzhiyun 		goto out;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* Check that every attr key can also be looked up by hash. */
494*4882a593Smuzhiyun 	sx.context.dp = sc->ip;
495*4882a593Smuzhiyun 	sx.context.resynch = 1;
496*4882a593Smuzhiyun 	sx.context.put_listent = xchk_xattr_listent;
497*4882a593Smuzhiyun 	sx.context.tp = sc->tp;
498*4882a593Smuzhiyun 	sx.context.allow_incomplete = true;
499*4882a593Smuzhiyun 	sx.sc = sc;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	/*
502*4882a593Smuzhiyun 	 * Look up every xattr in this file by name.
503*4882a593Smuzhiyun 	 *
504*4882a593Smuzhiyun 	 * Use the backend implementation of xfs_attr_list to call
505*4882a593Smuzhiyun 	 * xchk_xattr_listent on every attribute key in this inode.
506*4882a593Smuzhiyun 	 * In other words, we use the same iterator/callback mechanism
507*4882a593Smuzhiyun 	 * that listattr uses to scrub extended attributes, though in our
508*4882a593Smuzhiyun 	 * _listent function, we check the value of the attribute.
509*4882a593Smuzhiyun 	 *
510*4882a593Smuzhiyun 	 * The VFS only locks i_rwsem when modifying attrs, so keep all
511*4882a593Smuzhiyun 	 * three locks held because that's the only way to ensure we're
512*4882a593Smuzhiyun 	 * the only thread poking into the da btree.  We traverse the da
513*4882a593Smuzhiyun 	 * btree while holding a leaf buffer locked for the xattr name
514*4882a593Smuzhiyun 	 * iteration, which doesn't really follow the usual buffer
515*4882a593Smuzhiyun 	 * locking order.
516*4882a593Smuzhiyun 	 */
517*4882a593Smuzhiyun 	error = xfs_attr_list_ilocked(&sx.context);
518*4882a593Smuzhiyun 	if (!xchk_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
519*4882a593Smuzhiyun 		goto out;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Did our listent function try to return any errors? */
522*4882a593Smuzhiyun 	if (sx.context.seen_enough < 0)
523*4882a593Smuzhiyun 		error = sx.context.seen_enough;
524*4882a593Smuzhiyun out:
525*4882a593Smuzhiyun 	return error;
526*4882a593Smuzhiyun }
527