xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_buf_item_recover.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4*4882a593Smuzhiyun  * All Rights Reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_bit.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_trans.h"
15*4882a593Smuzhiyun #include "xfs_buf_item.h"
16*4882a593Smuzhiyun #include "xfs_trans_priv.h"
17*4882a593Smuzhiyun #include "xfs_trace.h"
18*4882a593Smuzhiyun #include "xfs_log.h"
19*4882a593Smuzhiyun #include "xfs_log_priv.h"
20*4882a593Smuzhiyun #include "xfs_log_recover.h"
21*4882a593Smuzhiyun #include "xfs_error.h"
22*4882a593Smuzhiyun #include "xfs_inode.h"
23*4882a593Smuzhiyun #include "xfs_dir2.h"
24*4882a593Smuzhiyun #include "xfs_quota.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * This structure is used during recovery to record the buf log items which
28*4882a593Smuzhiyun  * have been canceled and should not be replayed.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun struct xfs_buf_cancel {
31*4882a593Smuzhiyun 	xfs_daddr_t		bc_blkno;
32*4882a593Smuzhiyun 	uint			bc_len;
33*4882a593Smuzhiyun 	int			bc_refcount;
34*4882a593Smuzhiyun 	struct list_head	bc_list;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static struct xfs_buf_cancel *
xlog_find_buffer_cancelled(struct xlog * log,xfs_daddr_t blkno,uint len)38*4882a593Smuzhiyun xlog_find_buffer_cancelled(
39*4882a593Smuzhiyun 	struct xlog		*log,
40*4882a593Smuzhiyun 	xfs_daddr_t		blkno,
41*4882a593Smuzhiyun 	uint			len)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	struct list_head	*bucket;
44*4882a593Smuzhiyun 	struct xfs_buf_cancel	*bcp;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	if (!log->l_buf_cancel_table)
47*4882a593Smuzhiyun 		return NULL;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	bucket = XLOG_BUF_CANCEL_BUCKET(log, blkno);
50*4882a593Smuzhiyun 	list_for_each_entry(bcp, bucket, bc_list) {
51*4882a593Smuzhiyun 		if (bcp->bc_blkno == blkno && bcp->bc_len == len)
52*4882a593Smuzhiyun 			return bcp;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return NULL;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun static bool
xlog_add_buffer_cancelled(struct xlog * log,xfs_daddr_t blkno,uint len)59*4882a593Smuzhiyun xlog_add_buffer_cancelled(
60*4882a593Smuzhiyun 	struct xlog		*log,
61*4882a593Smuzhiyun 	xfs_daddr_t		blkno,
62*4882a593Smuzhiyun 	uint			len)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	struct xfs_buf_cancel	*bcp;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/*
67*4882a593Smuzhiyun 	 * If we find an existing cancel record, this indicates that the buffer
68*4882a593Smuzhiyun 	 * was cancelled multiple times.  To ensure that during pass 2 we keep
69*4882a593Smuzhiyun 	 * the record in the table until we reach its last occurrence in the
70*4882a593Smuzhiyun 	 * log, a reference count is kept to tell how many times we expect to
71*4882a593Smuzhiyun 	 * see this record during the second pass.
72*4882a593Smuzhiyun 	 */
73*4882a593Smuzhiyun 	bcp = xlog_find_buffer_cancelled(log, blkno, len);
74*4882a593Smuzhiyun 	if (bcp) {
75*4882a593Smuzhiyun 		bcp->bc_refcount++;
76*4882a593Smuzhiyun 		return false;
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	bcp = kmem_alloc(sizeof(struct xfs_buf_cancel), 0);
80*4882a593Smuzhiyun 	bcp->bc_blkno = blkno;
81*4882a593Smuzhiyun 	bcp->bc_len = len;
82*4882a593Smuzhiyun 	bcp->bc_refcount = 1;
83*4882a593Smuzhiyun 	list_add_tail(&bcp->bc_list, XLOG_BUF_CANCEL_BUCKET(log, blkno));
84*4882a593Smuzhiyun 	return true;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun  * Check if there is and entry for blkno, len in the buffer cancel record table.
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun bool
xlog_is_buffer_cancelled(struct xlog * log,xfs_daddr_t blkno,uint len)91*4882a593Smuzhiyun xlog_is_buffer_cancelled(
92*4882a593Smuzhiyun 	struct xlog		*log,
93*4882a593Smuzhiyun 	xfs_daddr_t		blkno,
94*4882a593Smuzhiyun 	uint			len)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	return xlog_find_buffer_cancelled(log, blkno, len) != NULL;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun  * Check if there is and entry for blkno, len in the buffer cancel record table,
101*4882a593Smuzhiyun  * and decremented the reference count on it if there is one.
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * Remove the cancel record once the refcount hits zero, so that if the same
104*4882a593Smuzhiyun  * buffer is re-used again after its last cancellation we actually replay the
105*4882a593Smuzhiyun  * changes made at that point.
106*4882a593Smuzhiyun  */
107*4882a593Smuzhiyun static bool
xlog_put_buffer_cancelled(struct xlog * log,xfs_daddr_t blkno,uint len)108*4882a593Smuzhiyun xlog_put_buffer_cancelled(
109*4882a593Smuzhiyun 	struct xlog		*log,
110*4882a593Smuzhiyun 	xfs_daddr_t		blkno,
111*4882a593Smuzhiyun 	uint			len)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct xfs_buf_cancel	*bcp;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	bcp = xlog_find_buffer_cancelled(log, blkno, len);
116*4882a593Smuzhiyun 	if (!bcp) {
117*4882a593Smuzhiyun 		ASSERT(0);
118*4882a593Smuzhiyun 		return false;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (--bcp->bc_refcount == 0) {
122*4882a593Smuzhiyun 		list_del(&bcp->bc_list);
123*4882a593Smuzhiyun 		kmem_free(bcp);
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 	return true;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* log buffer item recovery */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Sort buffer items for log recovery.  Most buffer items should end up on the
132*4882a593Smuzhiyun  * buffer list and are recovered first, with the following exceptions:
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * 1. XFS_BLF_CANCEL buffers must be processed last because some log items
135*4882a593Smuzhiyun  *    might depend on the incor ecancellation record, and replaying a cancelled
136*4882a593Smuzhiyun  *    buffer item can remove the incore record.
137*4882a593Smuzhiyun  *
138*4882a593Smuzhiyun  * 2. XFS_BLF_INODE_BUF buffers are handled after most regular items so that
139*4882a593Smuzhiyun  *    we replay di_next_unlinked only after flushing the inode 'free' state
140*4882a593Smuzhiyun  *    to the inode buffer.
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * See xlog_recover_reorder_trans for more details.
143*4882a593Smuzhiyun  */
144*4882a593Smuzhiyun STATIC enum xlog_recover_reorder
xlog_recover_buf_reorder(struct xlog_recover_item * item)145*4882a593Smuzhiyun xlog_recover_buf_reorder(
146*4882a593Smuzhiyun 	struct xlog_recover_item	*item)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_CANCEL)
151*4882a593Smuzhiyun 		return XLOG_REORDER_CANCEL_LIST;
152*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
153*4882a593Smuzhiyun 		return XLOG_REORDER_INODE_BUFFER_LIST;
154*4882a593Smuzhiyun 	return XLOG_REORDER_BUFFER_LIST;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun STATIC void
xlog_recover_buf_ra_pass2(struct xlog * log,struct xlog_recover_item * item)158*4882a593Smuzhiyun xlog_recover_buf_ra_pass2(
159*4882a593Smuzhiyun 	struct xlog                     *log,
160*4882a593Smuzhiyun 	struct xlog_recover_item        *item)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	xlog_buf_readahead(log, buf_f->blf_blkno, buf_f->blf_len, NULL);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun  * Build up the table of buf cancel records so that we don't replay cancelled
169*4882a593Smuzhiyun  * data in the second pass.
170*4882a593Smuzhiyun  */
171*4882a593Smuzhiyun static int
xlog_recover_buf_commit_pass1(struct xlog * log,struct xlog_recover_item * item)172*4882a593Smuzhiyun xlog_recover_buf_commit_pass1(
173*4882a593Smuzhiyun 	struct xlog			*log,
174*4882a593Smuzhiyun 	struct xlog_recover_item	*item)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct xfs_buf_log_format	*bf = item->ri_buf[0].i_addr;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (!xfs_buf_log_check_iovec(&item->ri_buf[0])) {
179*4882a593Smuzhiyun 		xfs_err(log->l_mp, "bad buffer log item size (%d)",
180*4882a593Smuzhiyun 				item->ri_buf[0].i_len);
181*4882a593Smuzhiyun 		return -EFSCORRUPTED;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (!(bf->blf_flags & XFS_BLF_CANCEL))
185*4882a593Smuzhiyun 		trace_xfs_log_recover_buf_not_cancel(log, bf);
186*4882a593Smuzhiyun 	else if (xlog_add_buffer_cancelled(log, bf->blf_blkno, bf->blf_len))
187*4882a593Smuzhiyun 		trace_xfs_log_recover_buf_cancel_add(log, bf);
188*4882a593Smuzhiyun 	else
189*4882a593Smuzhiyun 		trace_xfs_log_recover_buf_cancel_ref_inc(log, bf);
190*4882a593Smuzhiyun 	return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun  * Validate the recovered buffer is of the correct type and attach the
195*4882a593Smuzhiyun  * appropriate buffer operations to them for writeback. Magic numbers are in a
196*4882a593Smuzhiyun  * few places:
197*4882a593Smuzhiyun  *	the first 16 bits of the buffer (inode buffer, dquot buffer),
198*4882a593Smuzhiyun  *	the first 32 bits of the buffer (most blocks),
199*4882a593Smuzhiyun  *	inside a struct xfs_da_blkinfo at the start of the buffer.
200*4882a593Smuzhiyun  */
201*4882a593Smuzhiyun static void
xlog_recover_validate_buf_type(struct xfs_mount * mp,struct xfs_buf * bp,struct xfs_buf_log_format * buf_f,xfs_lsn_t current_lsn)202*4882a593Smuzhiyun xlog_recover_validate_buf_type(
203*4882a593Smuzhiyun 	struct xfs_mount		*mp,
204*4882a593Smuzhiyun 	struct xfs_buf			*bp,
205*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f,
206*4882a593Smuzhiyun 	xfs_lsn_t			current_lsn)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	struct xfs_da_blkinfo		*info = bp->b_addr;
209*4882a593Smuzhiyun 	uint32_t			magic32;
210*4882a593Smuzhiyun 	uint16_t			magic16;
211*4882a593Smuzhiyun 	uint16_t			magicda;
212*4882a593Smuzhiyun 	char				*warnmsg = NULL;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/*
215*4882a593Smuzhiyun 	 * We can only do post recovery validation on items on CRC enabled
216*4882a593Smuzhiyun 	 * fielsystems as we need to know when the buffer was written to be able
217*4882a593Smuzhiyun 	 * to determine if we should have replayed the item. If we replay old
218*4882a593Smuzhiyun 	 * metadata over a newer buffer, then it will enter a temporarily
219*4882a593Smuzhiyun 	 * inconsistent state resulting in verification failures. Hence for now
220*4882a593Smuzhiyun 	 * just avoid the verification stage for non-crc filesystems
221*4882a593Smuzhiyun 	 */
222*4882a593Smuzhiyun 	if (!xfs_sb_version_hascrc(&mp->m_sb))
223*4882a593Smuzhiyun 		return;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	magic32 = be32_to_cpu(*(__be32 *)bp->b_addr);
226*4882a593Smuzhiyun 	magic16 = be16_to_cpu(*(__be16*)bp->b_addr);
227*4882a593Smuzhiyun 	magicda = be16_to_cpu(info->magic);
228*4882a593Smuzhiyun 	switch (xfs_blft_from_flags(buf_f)) {
229*4882a593Smuzhiyun 	case XFS_BLFT_BTREE_BUF:
230*4882a593Smuzhiyun 		switch (magic32) {
231*4882a593Smuzhiyun 		case XFS_ABTB_CRC_MAGIC:
232*4882a593Smuzhiyun 		case XFS_ABTB_MAGIC:
233*4882a593Smuzhiyun 			bp->b_ops = &xfs_bnobt_buf_ops;
234*4882a593Smuzhiyun 			break;
235*4882a593Smuzhiyun 		case XFS_ABTC_CRC_MAGIC:
236*4882a593Smuzhiyun 		case XFS_ABTC_MAGIC:
237*4882a593Smuzhiyun 			bp->b_ops = &xfs_cntbt_buf_ops;
238*4882a593Smuzhiyun 			break;
239*4882a593Smuzhiyun 		case XFS_IBT_CRC_MAGIC:
240*4882a593Smuzhiyun 		case XFS_IBT_MAGIC:
241*4882a593Smuzhiyun 			bp->b_ops = &xfs_inobt_buf_ops;
242*4882a593Smuzhiyun 			break;
243*4882a593Smuzhiyun 		case XFS_FIBT_CRC_MAGIC:
244*4882a593Smuzhiyun 		case XFS_FIBT_MAGIC:
245*4882a593Smuzhiyun 			bp->b_ops = &xfs_finobt_buf_ops;
246*4882a593Smuzhiyun 			break;
247*4882a593Smuzhiyun 		case XFS_BMAP_CRC_MAGIC:
248*4882a593Smuzhiyun 		case XFS_BMAP_MAGIC:
249*4882a593Smuzhiyun 			bp->b_ops = &xfs_bmbt_buf_ops;
250*4882a593Smuzhiyun 			break;
251*4882a593Smuzhiyun 		case XFS_RMAP_CRC_MAGIC:
252*4882a593Smuzhiyun 			bp->b_ops = &xfs_rmapbt_buf_ops;
253*4882a593Smuzhiyun 			break;
254*4882a593Smuzhiyun 		case XFS_REFC_CRC_MAGIC:
255*4882a593Smuzhiyun 			bp->b_ops = &xfs_refcountbt_buf_ops;
256*4882a593Smuzhiyun 			break;
257*4882a593Smuzhiyun 		default:
258*4882a593Smuzhiyun 			warnmsg = "Bad btree block magic!";
259*4882a593Smuzhiyun 			break;
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 		break;
262*4882a593Smuzhiyun 	case XFS_BLFT_AGF_BUF:
263*4882a593Smuzhiyun 		if (magic32 != XFS_AGF_MAGIC) {
264*4882a593Smuzhiyun 			warnmsg = "Bad AGF block magic!";
265*4882a593Smuzhiyun 			break;
266*4882a593Smuzhiyun 		}
267*4882a593Smuzhiyun 		bp->b_ops = &xfs_agf_buf_ops;
268*4882a593Smuzhiyun 		break;
269*4882a593Smuzhiyun 	case XFS_BLFT_AGFL_BUF:
270*4882a593Smuzhiyun 		if (magic32 != XFS_AGFL_MAGIC) {
271*4882a593Smuzhiyun 			warnmsg = "Bad AGFL block magic!";
272*4882a593Smuzhiyun 			break;
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 		bp->b_ops = &xfs_agfl_buf_ops;
275*4882a593Smuzhiyun 		break;
276*4882a593Smuzhiyun 	case XFS_BLFT_AGI_BUF:
277*4882a593Smuzhiyun 		if (magic32 != XFS_AGI_MAGIC) {
278*4882a593Smuzhiyun 			warnmsg = "Bad AGI block magic!";
279*4882a593Smuzhiyun 			break;
280*4882a593Smuzhiyun 		}
281*4882a593Smuzhiyun 		bp->b_ops = &xfs_agi_buf_ops;
282*4882a593Smuzhiyun 		break;
283*4882a593Smuzhiyun 	case XFS_BLFT_UDQUOT_BUF:
284*4882a593Smuzhiyun 	case XFS_BLFT_PDQUOT_BUF:
285*4882a593Smuzhiyun 	case XFS_BLFT_GDQUOT_BUF:
286*4882a593Smuzhiyun #ifdef CONFIG_XFS_QUOTA
287*4882a593Smuzhiyun 		if (magic16 != XFS_DQUOT_MAGIC) {
288*4882a593Smuzhiyun 			warnmsg = "Bad DQUOT block magic!";
289*4882a593Smuzhiyun 			break;
290*4882a593Smuzhiyun 		}
291*4882a593Smuzhiyun 		bp->b_ops = &xfs_dquot_buf_ops;
292*4882a593Smuzhiyun #else
293*4882a593Smuzhiyun 		xfs_alert(mp,
294*4882a593Smuzhiyun 	"Trying to recover dquots without QUOTA support built in!");
295*4882a593Smuzhiyun 		ASSERT(0);
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun 		break;
298*4882a593Smuzhiyun 	case XFS_BLFT_DINO_BUF:
299*4882a593Smuzhiyun 		if (magic16 != XFS_DINODE_MAGIC) {
300*4882a593Smuzhiyun 			warnmsg = "Bad INODE block magic!";
301*4882a593Smuzhiyun 			break;
302*4882a593Smuzhiyun 		}
303*4882a593Smuzhiyun 		bp->b_ops = &xfs_inode_buf_ops;
304*4882a593Smuzhiyun 		break;
305*4882a593Smuzhiyun 	case XFS_BLFT_SYMLINK_BUF:
306*4882a593Smuzhiyun 		if (magic32 != XFS_SYMLINK_MAGIC) {
307*4882a593Smuzhiyun 			warnmsg = "Bad symlink block magic!";
308*4882a593Smuzhiyun 			break;
309*4882a593Smuzhiyun 		}
310*4882a593Smuzhiyun 		bp->b_ops = &xfs_symlink_buf_ops;
311*4882a593Smuzhiyun 		break;
312*4882a593Smuzhiyun 	case XFS_BLFT_DIR_BLOCK_BUF:
313*4882a593Smuzhiyun 		if (magic32 != XFS_DIR2_BLOCK_MAGIC &&
314*4882a593Smuzhiyun 		    magic32 != XFS_DIR3_BLOCK_MAGIC) {
315*4882a593Smuzhiyun 			warnmsg = "Bad dir block magic!";
316*4882a593Smuzhiyun 			break;
317*4882a593Smuzhiyun 		}
318*4882a593Smuzhiyun 		bp->b_ops = &xfs_dir3_block_buf_ops;
319*4882a593Smuzhiyun 		break;
320*4882a593Smuzhiyun 	case XFS_BLFT_DIR_DATA_BUF:
321*4882a593Smuzhiyun 		if (magic32 != XFS_DIR2_DATA_MAGIC &&
322*4882a593Smuzhiyun 		    magic32 != XFS_DIR3_DATA_MAGIC) {
323*4882a593Smuzhiyun 			warnmsg = "Bad dir data magic!";
324*4882a593Smuzhiyun 			break;
325*4882a593Smuzhiyun 		}
326*4882a593Smuzhiyun 		bp->b_ops = &xfs_dir3_data_buf_ops;
327*4882a593Smuzhiyun 		break;
328*4882a593Smuzhiyun 	case XFS_BLFT_DIR_FREE_BUF:
329*4882a593Smuzhiyun 		if (magic32 != XFS_DIR2_FREE_MAGIC &&
330*4882a593Smuzhiyun 		    magic32 != XFS_DIR3_FREE_MAGIC) {
331*4882a593Smuzhiyun 			warnmsg = "Bad dir3 free magic!";
332*4882a593Smuzhiyun 			break;
333*4882a593Smuzhiyun 		}
334*4882a593Smuzhiyun 		bp->b_ops = &xfs_dir3_free_buf_ops;
335*4882a593Smuzhiyun 		break;
336*4882a593Smuzhiyun 	case XFS_BLFT_DIR_LEAF1_BUF:
337*4882a593Smuzhiyun 		if (magicda != XFS_DIR2_LEAF1_MAGIC &&
338*4882a593Smuzhiyun 		    magicda != XFS_DIR3_LEAF1_MAGIC) {
339*4882a593Smuzhiyun 			warnmsg = "Bad dir leaf1 magic!";
340*4882a593Smuzhiyun 			break;
341*4882a593Smuzhiyun 		}
342*4882a593Smuzhiyun 		bp->b_ops = &xfs_dir3_leaf1_buf_ops;
343*4882a593Smuzhiyun 		break;
344*4882a593Smuzhiyun 	case XFS_BLFT_DIR_LEAFN_BUF:
345*4882a593Smuzhiyun 		if (magicda != XFS_DIR2_LEAFN_MAGIC &&
346*4882a593Smuzhiyun 		    magicda != XFS_DIR3_LEAFN_MAGIC) {
347*4882a593Smuzhiyun 			warnmsg = "Bad dir leafn magic!";
348*4882a593Smuzhiyun 			break;
349*4882a593Smuzhiyun 		}
350*4882a593Smuzhiyun 		bp->b_ops = &xfs_dir3_leafn_buf_ops;
351*4882a593Smuzhiyun 		break;
352*4882a593Smuzhiyun 	case XFS_BLFT_DA_NODE_BUF:
353*4882a593Smuzhiyun 		if (magicda != XFS_DA_NODE_MAGIC &&
354*4882a593Smuzhiyun 		    magicda != XFS_DA3_NODE_MAGIC) {
355*4882a593Smuzhiyun 			warnmsg = "Bad da node magic!";
356*4882a593Smuzhiyun 			break;
357*4882a593Smuzhiyun 		}
358*4882a593Smuzhiyun 		bp->b_ops = &xfs_da3_node_buf_ops;
359*4882a593Smuzhiyun 		break;
360*4882a593Smuzhiyun 	case XFS_BLFT_ATTR_LEAF_BUF:
361*4882a593Smuzhiyun 		if (magicda != XFS_ATTR_LEAF_MAGIC &&
362*4882a593Smuzhiyun 		    magicda != XFS_ATTR3_LEAF_MAGIC) {
363*4882a593Smuzhiyun 			warnmsg = "Bad attr leaf magic!";
364*4882a593Smuzhiyun 			break;
365*4882a593Smuzhiyun 		}
366*4882a593Smuzhiyun 		bp->b_ops = &xfs_attr3_leaf_buf_ops;
367*4882a593Smuzhiyun 		break;
368*4882a593Smuzhiyun 	case XFS_BLFT_ATTR_RMT_BUF:
369*4882a593Smuzhiyun 		if (magic32 != XFS_ATTR3_RMT_MAGIC) {
370*4882a593Smuzhiyun 			warnmsg = "Bad attr remote magic!";
371*4882a593Smuzhiyun 			break;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 		bp->b_ops = &xfs_attr3_rmt_buf_ops;
374*4882a593Smuzhiyun 		break;
375*4882a593Smuzhiyun 	case XFS_BLFT_SB_BUF:
376*4882a593Smuzhiyun 		if (magic32 != XFS_SB_MAGIC) {
377*4882a593Smuzhiyun 			warnmsg = "Bad SB block magic!";
378*4882a593Smuzhiyun 			break;
379*4882a593Smuzhiyun 		}
380*4882a593Smuzhiyun 		bp->b_ops = &xfs_sb_buf_ops;
381*4882a593Smuzhiyun 		break;
382*4882a593Smuzhiyun #ifdef CONFIG_XFS_RT
383*4882a593Smuzhiyun 	case XFS_BLFT_RTBITMAP_BUF:
384*4882a593Smuzhiyun 	case XFS_BLFT_RTSUMMARY_BUF:
385*4882a593Smuzhiyun 		/* no magic numbers for verification of RT buffers */
386*4882a593Smuzhiyun 		bp->b_ops = &xfs_rtbuf_ops;
387*4882a593Smuzhiyun 		break;
388*4882a593Smuzhiyun #endif /* CONFIG_XFS_RT */
389*4882a593Smuzhiyun 	default:
390*4882a593Smuzhiyun 		xfs_warn(mp, "Unknown buffer type %d!",
391*4882a593Smuzhiyun 			 xfs_blft_from_flags(buf_f));
392*4882a593Smuzhiyun 		break;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/*
396*4882a593Smuzhiyun 	 * Nothing else to do in the case of a NULL current LSN as this means
397*4882a593Smuzhiyun 	 * the buffer is more recent than the change in the log and will be
398*4882a593Smuzhiyun 	 * skipped.
399*4882a593Smuzhiyun 	 */
400*4882a593Smuzhiyun 	if (current_lsn == NULLCOMMITLSN)
401*4882a593Smuzhiyun 		return;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (warnmsg) {
404*4882a593Smuzhiyun 		xfs_warn(mp, warnmsg);
405*4882a593Smuzhiyun 		ASSERT(0);
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/*
409*4882a593Smuzhiyun 	 * We must update the metadata LSN of the buffer as it is written out to
410*4882a593Smuzhiyun 	 * ensure that older transactions never replay over this one and corrupt
411*4882a593Smuzhiyun 	 * the buffer. This can occur if log recovery is interrupted at some
412*4882a593Smuzhiyun 	 * point after the current transaction completes, at which point a
413*4882a593Smuzhiyun 	 * subsequent mount starts recovery from the beginning.
414*4882a593Smuzhiyun 	 *
415*4882a593Smuzhiyun 	 * Write verifiers update the metadata LSN from log items attached to
416*4882a593Smuzhiyun 	 * the buffer. Therefore, initialize a bli purely to carry the LSN to
417*4882a593Smuzhiyun 	 * the verifier.
418*4882a593Smuzhiyun 	 */
419*4882a593Smuzhiyun 	if (bp->b_ops) {
420*4882a593Smuzhiyun 		struct xfs_buf_log_item	*bip;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		bp->b_flags |= _XBF_LOGRECOVERY;
423*4882a593Smuzhiyun 		xfs_buf_item_init(bp, mp);
424*4882a593Smuzhiyun 		bip = bp->b_log_item;
425*4882a593Smuzhiyun 		bip->bli_item.li_lsn = current_lsn;
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun  * Perform a 'normal' buffer recovery.  Each logged region of the
431*4882a593Smuzhiyun  * buffer should be copied over the corresponding region in the
432*4882a593Smuzhiyun  * given buffer.  The bitmap in the buf log format structure indicates
433*4882a593Smuzhiyun  * where to place the logged data.
434*4882a593Smuzhiyun  */
435*4882a593Smuzhiyun STATIC void
xlog_recover_do_reg_buffer(struct xfs_mount * mp,struct xlog_recover_item * item,struct xfs_buf * bp,struct xfs_buf_log_format * buf_f,xfs_lsn_t current_lsn)436*4882a593Smuzhiyun xlog_recover_do_reg_buffer(
437*4882a593Smuzhiyun 	struct xfs_mount		*mp,
438*4882a593Smuzhiyun 	struct xlog_recover_item	*item,
439*4882a593Smuzhiyun 	struct xfs_buf			*bp,
440*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f,
441*4882a593Smuzhiyun 	xfs_lsn_t			current_lsn)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	int			i;
444*4882a593Smuzhiyun 	int			bit;
445*4882a593Smuzhiyun 	int			nbits;
446*4882a593Smuzhiyun 	xfs_failaddr_t		fa;
447*4882a593Smuzhiyun 	const size_t		size_disk_dquot = sizeof(struct xfs_disk_dquot);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	trace_xfs_log_recover_buf_reg_buf(mp->m_log, buf_f);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	bit = 0;
452*4882a593Smuzhiyun 	i = 1;  /* 0 is the buf format structure */
453*4882a593Smuzhiyun 	while (1) {
454*4882a593Smuzhiyun 		bit = xfs_next_bit(buf_f->blf_data_map,
455*4882a593Smuzhiyun 				   buf_f->blf_map_size, bit);
456*4882a593Smuzhiyun 		if (bit == -1)
457*4882a593Smuzhiyun 			break;
458*4882a593Smuzhiyun 		nbits = xfs_contig_bits(buf_f->blf_data_map,
459*4882a593Smuzhiyun 					buf_f->blf_map_size, bit);
460*4882a593Smuzhiyun 		ASSERT(nbits > 0);
461*4882a593Smuzhiyun 		ASSERT(item->ri_buf[i].i_addr != NULL);
462*4882a593Smuzhiyun 		ASSERT(item->ri_buf[i].i_len % XFS_BLF_CHUNK == 0);
463*4882a593Smuzhiyun 		ASSERT(BBTOB(bp->b_length) >=
464*4882a593Smuzhiyun 		       ((uint)bit << XFS_BLF_SHIFT) + (nbits << XFS_BLF_SHIFT));
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		/*
467*4882a593Smuzhiyun 		 * The dirty regions logged in the buffer, even though
468*4882a593Smuzhiyun 		 * contiguous, may span multiple chunks. This is because the
469*4882a593Smuzhiyun 		 * dirty region may span a physical page boundary in a buffer
470*4882a593Smuzhiyun 		 * and hence be split into two separate vectors for writing into
471*4882a593Smuzhiyun 		 * the log. Hence we need to trim nbits back to the length of
472*4882a593Smuzhiyun 		 * the current region being copied out of the log.
473*4882a593Smuzhiyun 		 */
474*4882a593Smuzhiyun 		if (item->ri_buf[i].i_len < (nbits << XFS_BLF_SHIFT))
475*4882a593Smuzhiyun 			nbits = item->ri_buf[i].i_len >> XFS_BLF_SHIFT;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 		/*
478*4882a593Smuzhiyun 		 * Do a sanity check if this is a dquot buffer. Just checking
479*4882a593Smuzhiyun 		 * the first dquot in the buffer should do. XXXThis is
480*4882a593Smuzhiyun 		 * probably a good thing to do for other buf types also.
481*4882a593Smuzhiyun 		 */
482*4882a593Smuzhiyun 		fa = NULL;
483*4882a593Smuzhiyun 		if (buf_f->blf_flags &
484*4882a593Smuzhiyun 		   (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
485*4882a593Smuzhiyun 			if (item->ri_buf[i].i_addr == NULL) {
486*4882a593Smuzhiyun 				xfs_alert(mp,
487*4882a593Smuzhiyun 					"XFS: NULL dquot in %s.", __func__);
488*4882a593Smuzhiyun 				goto next;
489*4882a593Smuzhiyun 			}
490*4882a593Smuzhiyun 			if (item->ri_buf[i].i_len < size_disk_dquot) {
491*4882a593Smuzhiyun 				xfs_alert(mp,
492*4882a593Smuzhiyun 					"XFS: dquot too small (%d) in %s.",
493*4882a593Smuzhiyun 					item->ri_buf[i].i_len, __func__);
494*4882a593Smuzhiyun 				goto next;
495*4882a593Smuzhiyun 			}
496*4882a593Smuzhiyun 			fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1);
497*4882a593Smuzhiyun 			if (fa) {
498*4882a593Smuzhiyun 				xfs_alert(mp,
499*4882a593Smuzhiyun 	"dquot corrupt at %pS trying to replay into block 0x%llx",
500*4882a593Smuzhiyun 					fa, bp->b_bn);
501*4882a593Smuzhiyun 				goto next;
502*4882a593Smuzhiyun 			}
503*4882a593Smuzhiyun 		}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 		memcpy(xfs_buf_offset(bp,
506*4882a593Smuzhiyun 			(uint)bit << XFS_BLF_SHIFT),	/* dest */
507*4882a593Smuzhiyun 			item->ri_buf[i].i_addr,		/* source */
508*4882a593Smuzhiyun 			nbits<<XFS_BLF_SHIFT);		/* length */
509*4882a593Smuzhiyun  next:
510*4882a593Smuzhiyun 		i++;
511*4882a593Smuzhiyun 		bit += nbits;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Shouldn't be any more regions */
515*4882a593Smuzhiyun 	ASSERT(i == item->ri_total);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	xlog_recover_validate_buf_type(mp, bp, buf_f, current_lsn);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /*
521*4882a593Smuzhiyun  * Perform a dquot buffer recovery.
522*4882a593Smuzhiyun  * Simple algorithm: if we have found a QUOTAOFF log item of the same type
523*4882a593Smuzhiyun  * (ie. USR or GRP), then just toss this buffer away; don't recover it.
524*4882a593Smuzhiyun  * Else, treat it as a regular buffer and do recovery.
525*4882a593Smuzhiyun  *
526*4882a593Smuzhiyun  * Return false if the buffer was tossed and true if we recovered the buffer to
527*4882a593Smuzhiyun  * indicate to the caller if the buffer needs writing.
528*4882a593Smuzhiyun  */
529*4882a593Smuzhiyun STATIC bool
xlog_recover_do_dquot_buffer(struct xfs_mount * mp,struct xlog * log,struct xlog_recover_item * item,struct xfs_buf * bp,struct xfs_buf_log_format * buf_f)530*4882a593Smuzhiyun xlog_recover_do_dquot_buffer(
531*4882a593Smuzhiyun 	struct xfs_mount		*mp,
532*4882a593Smuzhiyun 	struct xlog			*log,
533*4882a593Smuzhiyun 	struct xlog_recover_item	*item,
534*4882a593Smuzhiyun 	struct xfs_buf			*bp,
535*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	uint			type;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	trace_xfs_log_recover_buf_dquot_buf(log, buf_f);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/*
542*4882a593Smuzhiyun 	 * Filesystems are required to send in quota flags at mount time.
543*4882a593Smuzhiyun 	 */
544*4882a593Smuzhiyun 	if (!mp->m_qflags)
545*4882a593Smuzhiyun 		return false;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	type = 0;
548*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF)
549*4882a593Smuzhiyun 		type |= XFS_DQTYPE_USER;
550*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF)
551*4882a593Smuzhiyun 		type |= XFS_DQTYPE_PROJ;
552*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF)
553*4882a593Smuzhiyun 		type |= XFS_DQTYPE_GROUP;
554*4882a593Smuzhiyun 	/*
555*4882a593Smuzhiyun 	 * This type of quotas was turned off, so ignore this buffer
556*4882a593Smuzhiyun 	 */
557*4882a593Smuzhiyun 	if (log->l_quotaoffs_flag & type)
558*4882a593Smuzhiyun 		return false;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	xlog_recover_do_reg_buffer(mp, item, bp, buf_f, NULLCOMMITLSN);
561*4882a593Smuzhiyun 	return true;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun /*
565*4882a593Smuzhiyun  * Perform recovery for a buffer full of inodes.  In these buffers, the only
566*4882a593Smuzhiyun  * data which should be recovered is that which corresponds to the
567*4882a593Smuzhiyun  * di_next_unlinked pointers in the on disk inode structures.  The rest of the
568*4882a593Smuzhiyun  * data for the inodes is always logged through the inodes themselves rather
569*4882a593Smuzhiyun  * than the inode buffer and is recovered in xlog_recover_inode_pass2().
570*4882a593Smuzhiyun  *
571*4882a593Smuzhiyun  * The only time when buffers full of inodes are fully recovered is when the
572*4882a593Smuzhiyun  * buffer is full of newly allocated inodes.  In this case the buffer will
573*4882a593Smuzhiyun  * not be marked as an inode buffer and so will be sent to
574*4882a593Smuzhiyun  * xlog_recover_do_reg_buffer() below during recovery.
575*4882a593Smuzhiyun  */
576*4882a593Smuzhiyun STATIC int
xlog_recover_do_inode_buffer(struct xfs_mount * mp,struct xlog_recover_item * item,struct xfs_buf * bp,struct xfs_buf_log_format * buf_f)577*4882a593Smuzhiyun xlog_recover_do_inode_buffer(
578*4882a593Smuzhiyun 	struct xfs_mount		*mp,
579*4882a593Smuzhiyun 	struct xlog_recover_item	*item,
580*4882a593Smuzhiyun 	struct xfs_buf			*bp,
581*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	int				i;
584*4882a593Smuzhiyun 	int				item_index = 0;
585*4882a593Smuzhiyun 	int				bit = 0;
586*4882a593Smuzhiyun 	int				nbits = 0;
587*4882a593Smuzhiyun 	int				reg_buf_offset = 0;
588*4882a593Smuzhiyun 	int				reg_buf_bytes = 0;
589*4882a593Smuzhiyun 	int				next_unlinked_offset;
590*4882a593Smuzhiyun 	int				inodes_per_buf;
591*4882a593Smuzhiyun 	xfs_agino_t			*logged_nextp;
592*4882a593Smuzhiyun 	xfs_agino_t			*buffer_nextp;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/*
597*4882a593Smuzhiyun 	 * Post recovery validation only works properly on CRC enabled
598*4882a593Smuzhiyun 	 * filesystems.
599*4882a593Smuzhiyun 	 */
600*4882a593Smuzhiyun 	if (xfs_sb_version_hascrc(&mp->m_sb))
601*4882a593Smuzhiyun 		bp->b_ops = &xfs_inode_buf_ops;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	inodes_per_buf = BBTOB(bp->b_length) >> mp->m_sb.sb_inodelog;
604*4882a593Smuzhiyun 	for (i = 0; i < inodes_per_buf; i++) {
605*4882a593Smuzhiyun 		next_unlinked_offset = (i * mp->m_sb.sb_inodesize) +
606*4882a593Smuzhiyun 			offsetof(xfs_dinode_t, di_next_unlinked);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		while (next_unlinked_offset >=
609*4882a593Smuzhiyun 		       (reg_buf_offset + reg_buf_bytes)) {
610*4882a593Smuzhiyun 			/*
611*4882a593Smuzhiyun 			 * The next di_next_unlinked field is beyond
612*4882a593Smuzhiyun 			 * the current logged region.  Find the next
613*4882a593Smuzhiyun 			 * logged region that contains or is beyond
614*4882a593Smuzhiyun 			 * the current di_next_unlinked field.
615*4882a593Smuzhiyun 			 */
616*4882a593Smuzhiyun 			bit += nbits;
617*4882a593Smuzhiyun 			bit = xfs_next_bit(buf_f->blf_data_map,
618*4882a593Smuzhiyun 					   buf_f->blf_map_size, bit);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 			/*
621*4882a593Smuzhiyun 			 * If there are no more logged regions in the
622*4882a593Smuzhiyun 			 * buffer, then we're done.
623*4882a593Smuzhiyun 			 */
624*4882a593Smuzhiyun 			if (bit == -1)
625*4882a593Smuzhiyun 				return 0;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 			nbits = xfs_contig_bits(buf_f->blf_data_map,
628*4882a593Smuzhiyun 						buf_f->blf_map_size, bit);
629*4882a593Smuzhiyun 			ASSERT(nbits > 0);
630*4882a593Smuzhiyun 			reg_buf_offset = bit << XFS_BLF_SHIFT;
631*4882a593Smuzhiyun 			reg_buf_bytes = nbits << XFS_BLF_SHIFT;
632*4882a593Smuzhiyun 			item_index++;
633*4882a593Smuzhiyun 		}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		/*
636*4882a593Smuzhiyun 		 * If the current logged region starts after the current
637*4882a593Smuzhiyun 		 * di_next_unlinked field, then move on to the next
638*4882a593Smuzhiyun 		 * di_next_unlinked field.
639*4882a593Smuzhiyun 		 */
640*4882a593Smuzhiyun 		if (next_unlinked_offset < reg_buf_offset)
641*4882a593Smuzhiyun 			continue;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		ASSERT(item->ri_buf[item_index].i_addr != NULL);
644*4882a593Smuzhiyun 		ASSERT((item->ri_buf[item_index].i_len % XFS_BLF_CHUNK) == 0);
645*4882a593Smuzhiyun 		ASSERT((reg_buf_offset + reg_buf_bytes) <= BBTOB(bp->b_length));
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 		/*
648*4882a593Smuzhiyun 		 * The current logged region contains a copy of the
649*4882a593Smuzhiyun 		 * current di_next_unlinked field.  Extract its value
650*4882a593Smuzhiyun 		 * and copy it to the buffer copy.
651*4882a593Smuzhiyun 		 */
652*4882a593Smuzhiyun 		logged_nextp = item->ri_buf[item_index].i_addr +
653*4882a593Smuzhiyun 				next_unlinked_offset - reg_buf_offset;
654*4882a593Smuzhiyun 		if (XFS_IS_CORRUPT(mp, *logged_nextp == 0)) {
655*4882a593Smuzhiyun 			xfs_alert(mp,
656*4882a593Smuzhiyun 		"Bad inode buffer log record (ptr = "PTR_FMT", bp = "PTR_FMT"). "
657*4882a593Smuzhiyun 		"Trying to replay bad (0) inode di_next_unlinked field.",
658*4882a593Smuzhiyun 				item, bp);
659*4882a593Smuzhiyun 			return -EFSCORRUPTED;
660*4882a593Smuzhiyun 		}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		buffer_nextp = xfs_buf_offset(bp, next_unlinked_offset);
663*4882a593Smuzhiyun 		*buffer_nextp = *logged_nextp;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		/*
666*4882a593Smuzhiyun 		 * If necessary, recalculate the CRC in the on-disk inode. We
667*4882a593Smuzhiyun 		 * have to leave the inode in a consistent state for whoever
668*4882a593Smuzhiyun 		 * reads it next....
669*4882a593Smuzhiyun 		 */
670*4882a593Smuzhiyun 		xfs_dinode_calc_crc(mp,
671*4882a593Smuzhiyun 				xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize));
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun  * V5 filesystems know the age of the buffer on disk being recovered. We can
680*4882a593Smuzhiyun  * have newer objects on disk than we are replaying, and so for these cases we
681*4882a593Smuzhiyun  * don't want to replay the current change as that will make the buffer contents
682*4882a593Smuzhiyun  * temporarily invalid on disk.
683*4882a593Smuzhiyun  *
684*4882a593Smuzhiyun  * The magic number might not match the buffer type we are going to recover
685*4882a593Smuzhiyun  * (e.g. reallocated blocks), so we ignore the xfs_buf_log_format flags.  Hence
686*4882a593Smuzhiyun  * extract the LSN of the existing object in the buffer based on it's current
687*4882a593Smuzhiyun  * magic number.  If we don't recognise the magic number in the buffer, then
688*4882a593Smuzhiyun  * return a LSN of -1 so that the caller knows it was an unrecognised block and
689*4882a593Smuzhiyun  * so can recover the buffer.
690*4882a593Smuzhiyun  *
691*4882a593Smuzhiyun  * Note: we cannot rely solely on magic number matches to determine that the
692*4882a593Smuzhiyun  * buffer has a valid LSN - we also need to verify that it belongs to this
693*4882a593Smuzhiyun  * filesystem, so we need to extract the object's LSN and compare it to that
694*4882a593Smuzhiyun  * which we read from the superblock. If the UUIDs don't match, then we've got a
695*4882a593Smuzhiyun  * stale metadata block from an old filesystem instance that we need to recover
696*4882a593Smuzhiyun  * over the top of.
697*4882a593Smuzhiyun  */
698*4882a593Smuzhiyun static xfs_lsn_t
xlog_recover_get_buf_lsn(struct xfs_mount * mp,struct xfs_buf * bp)699*4882a593Smuzhiyun xlog_recover_get_buf_lsn(
700*4882a593Smuzhiyun 	struct xfs_mount	*mp,
701*4882a593Smuzhiyun 	struct xfs_buf		*bp)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	uint32_t		magic32;
704*4882a593Smuzhiyun 	uint16_t		magic16;
705*4882a593Smuzhiyun 	uint16_t		magicda;
706*4882a593Smuzhiyun 	void			*blk = bp->b_addr;
707*4882a593Smuzhiyun 	uuid_t			*uuid;
708*4882a593Smuzhiyun 	xfs_lsn_t		lsn = -1;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/* v4 filesystems always recover immediately */
711*4882a593Smuzhiyun 	if (!xfs_sb_version_hascrc(&mp->m_sb))
712*4882a593Smuzhiyun 		goto recover_immediately;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	magic32 = be32_to_cpu(*(__be32 *)blk);
715*4882a593Smuzhiyun 	switch (magic32) {
716*4882a593Smuzhiyun 	case XFS_ABTB_CRC_MAGIC:
717*4882a593Smuzhiyun 	case XFS_ABTC_CRC_MAGIC:
718*4882a593Smuzhiyun 	case XFS_ABTB_MAGIC:
719*4882a593Smuzhiyun 	case XFS_ABTC_MAGIC:
720*4882a593Smuzhiyun 	case XFS_RMAP_CRC_MAGIC:
721*4882a593Smuzhiyun 	case XFS_REFC_CRC_MAGIC:
722*4882a593Smuzhiyun 	case XFS_FIBT_CRC_MAGIC:
723*4882a593Smuzhiyun 	case XFS_FIBT_MAGIC:
724*4882a593Smuzhiyun 	case XFS_IBT_CRC_MAGIC:
725*4882a593Smuzhiyun 	case XFS_IBT_MAGIC: {
726*4882a593Smuzhiyun 		struct xfs_btree_block *btb = blk;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 		lsn = be64_to_cpu(btb->bb_u.s.bb_lsn);
729*4882a593Smuzhiyun 		uuid = &btb->bb_u.s.bb_uuid;
730*4882a593Smuzhiyun 		break;
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 	case XFS_BMAP_CRC_MAGIC:
733*4882a593Smuzhiyun 	case XFS_BMAP_MAGIC: {
734*4882a593Smuzhiyun 		struct xfs_btree_block *btb = blk;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 		lsn = be64_to_cpu(btb->bb_u.l.bb_lsn);
737*4882a593Smuzhiyun 		uuid = &btb->bb_u.l.bb_uuid;
738*4882a593Smuzhiyun 		break;
739*4882a593Smuzhiyun 	}
740*4882a593Smuzhiyun 	case XFS_AGF_MAGIC:
741*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_agf *)blk)->agf_lsn);
742*4882a593Smuzhiyun 		uuid = &((struct xfs_agf *)blk)->agf_uuid;
743*4882a593Smuzhiyun 		break;
744*4882a593Smuzhiyun 	case XFS_AGFL_MAGIC:
745*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_agfl *)blk)->agfl_lsn);
746*4882a593Smuzhiyun 		uuid = &((struct xfs_agfl *)blk)->agfl_uuid;
747*4882a593Smuzhiyun 		break;
748*4882a593Smuzhiyun 	case XFS_AGI_MAGIC:
749*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_agi *)blk)->agi_lsn);
750*4882a593Smuzhiyun 		uuid = &((struct xfs_agi *)blk)->agi_uuid;
751*4882a593Smuzhiyun 		break;
752*4882a593Smuzhiyun 	case XFS_SYMLINK_MAGIC:
753*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_dsymlink_hdr *)blk)->sl_lsn);
754*4882a593Smuzhiyun 		uuid = &((struct xfs_dsymlink_hdr *)blk)->sl_uuid;
755*4882a593Smuzhiyun 		break;
756*4882a593Smuzhiyun 	case XFS_DIR3_BLOCK_MAGIC:
757*4882a593Smuzhiyun 	case XFS_DIR3_DATA_MAGIC:
758*4882a593Smuzhiyun 	case XFS_DIR3_FREE_MAGIC:
759*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_dir3_blk_hdr *)blk)->lsn);
760*4882a593Smuzhiyun 		uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
761*4882a593Smuzhiyun 		break;
762*4882a593Smuzhiyun 	case XFS_ATTR3_RMT_MAGIC:
763*4882a593Smuzhiyun 		/*
764*4882a593Smuzhiyun 		 * Remote attr blocks are written synchronously, rather than
765*4882a593Smuzhiyun 		 * being logged. That means they do not contain a valid LSN
766*4882a593Smuzhiyun 		 * (i.e. transactionally ordered) in them, and hence any time we
767*4882a593Smuzhiyun 		 * see a buffer to replay over the top of a remote attribute
768*4882a593Smuzhiyun 		 * block we should simply do so.
769*4882a593Smuzhiyun 		 */
770*4882a593Smuzhiyun 		goto recover_immediately;
771*4882a593Smuzhiyun 	case XFS_SB_MAGIC:
772*4882a593Smuzhiyun 		/*
773*4882a593Smuzhiyun 		 * superblock uuids are magic. We may or may not have a
774*4882a593Smuzhiyun 		 * sb_meta_uuid on disk, but it will be set in the in-core
775*4882a593Smuzhiyun 		 * superblock. We set the uuid pointer for verification
776*4882a593Smuzhiyun 		 * according to the superblock feature mask to ensure we check
777*4882a593Smuzhiyun 		 * the relevant UUID in the superblock.
778*4882a593Smuzhiyun 		 */
779*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
780*4882a593Smuzhiyun 		if (xfs_sb_version_hasmetauuid(&mp->m_sb))
781*4882a593Smuzhiyun 			uuid = &((struct xfs_dsb *)blk)->sb_meta_uuid;
782*4882a593Smuzhiyun 		else
783*4882a593Smuzhiyun 			uuid = &((struct xfs_dsb *)blk)->sb_uuid;
784*4882a593Smuzhiyun 		break;
785*4882a593Smuzhiyun 	default:
786*4882a593Smuzhiyun 		break;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (lsn != (xfs_lsn_t)-1) {
790*4882a593Smuzhiyun 		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
791*4882a593Smuzhiyun 			goto recover_immediately;
792*4882a593Smuzhiyun 		return lsn;
793*4882a593Smuzhiyun 	}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	magicda = be16_to_cpu(((struct xfs_da_blkinfo *)blk)->magic);
796*4882a593Smuzhiyun 	switch (magicda) {
797*4882a593Smuzhiyun 	case XFS_DIR3_LEAF1_MAGIC:
798*4882a593Smuzhiyun 	case XFS_DIR3_LEAFN_MAGIC:
799*4882a593Smuzhiyun 	case XFS_ATTR3_LEAF_MAGIC:
800*4882a593Smuzhiyun 	case XFS_DA3_NODE_MAGIC:
801*4882a593Smuzhiyun 		lsn = be64_to_cpu(((struct xfs_da3_blkinfo *)blk)->lsn);
802*4882a593Smuzhiyun 		uuid = &((struct xfs_da3_blkinfo *)blk)->uuid;
803*4882a593Smuzhiyun 		break;
804*4882a593Smuzhiyun 	default:
805*4882a593Smuzhiyun 		break;
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (lsn != (xfs_lsn_t)-1) {
809*4882a593Smuzhiyun 		if (!uuid_equal(&mp->m_sb.sb_meta_uuid, uuid))
810*4882a593Smuzhiyun 			goto recover_immediately;
811*4882a593Smuzhiyun 		return lsn;
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/*
815*4882a593Smuzhiyun 	 * We do individual object checks on dquot and inode buffers as they
816*4882a593Smuzhiyun 	 * have their own individual LSN records. Also, we could have a stale
817*4882a593Smuzhiyun 	 * buffer here, so we have to at least recognise these buffer types.
818*4882a593Smuzhiyun 	 *
819*4882a593Smuzhiyun 	 * A notd complexity here is inode unlinked list processing - it logs
820*4882a593Smuzhiyun 	 * the inode directly in the buffer, but we don't know which inodes have
821*4882a593Smuzhiyun 	 * been modified, and there is no global buffer LSN. Hence we need to
822*4882a593Smuzhiyun 	 * recover all inode buffer types immediately. This problem will be
823*4882a593Smuzhiyun 	 * fixed by logical logging of the unlinked list modifications.
824*4882a593Smuzhiyun 	 */
825*4882a593Smuzhiyun 	magic16 = be16_to_cpu(*(__be16 *)blk);
826*4882a593Smuzhiyun 	switch (magic16) {
827*4882a593Smuzhiyun 	case XFS_DQUOT_MAGIC:
828*4882a593Smuzhiyun 	case XFS_DINODE_MAGIC:
829*4882a593Smuzhiyun 		goto recover_immediately;
830*4882a593Smuzhiyun 	default:
831*4882a593Smuzhiyun 		break;
832*4882a593Smuzhiyun 	}
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* unknown buffer contents, recover immediately */
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun recover_immediately:
837*4882a593Smuzhiyun 	return (xfs_lsn_t)-1;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun  * This routine replays a modification made to a buffer at runtime.
843*4882a593Smuzhiyun  * There are actually two types of buffer, regular and inode, which
844*4882a593Smuzhiyun  * are handled differently.  Inode buffers are handled differently
845*4882a593Smuzhiyun  * in that we only recover a specific set of data from them, namely
846*4882a593Smuzhiyun  * the inode di_next_unlinked fields.  This is because all other inode
847*4882a593Smuzhiyun  * data is actually logged via inode records and any data we replay
848*4882a593Smuzhiyun  * here which overlaps that may be stale.
849*4882a593Smuzhiyun  *
850*4882a593Smuzhiyun  * When meta-data buffers are freed at run time we log a buffer item
851*4882a593Smuzhiyun  * with the XFS_BLF_CANCEL bit set to indicate that previous copies
852*4882a593Smuzhiyun  * of the buffer in the log should not be replayed at recovery time.
853*4882a593Smuzhiyun  * This is so that if the blocks covered by the buffer are reused for
854*4882a593Smuzhiyun  * file data before we crash we don't end up replaying old, freed
855*4882a593Smuzhiyun  * meta-data into a user's file.
856*4882a593Smuzhiyun  *
857*4882a593Smuzhiyun  * To handle the cancellation of buffer log items, we make two passes
858*4882a593Smuzhiyun  * over the log during recovery.  During the first we build a table of
859*4882a593Smuzhiyun  * those buffers which have been cancelled, and during the second we
860*4882a593Smuzhiyun  * only replay those buffers which do not have corresponding cancel
861*4882a593Smuzhiyun  * records in the table.  See xlog_recover_buf_pass[1,2] above
862*4882a593Smuzhiyun  * for more details on the implementation of the table of cancel records.
863*4882a593Smuzhiyun  */
864*4882a593Smuzhiyun STATIC int
xlog_recover_buf_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t current_lsn)865*4882a593Smuzhiyun xlog_recover_buf_commit_pass2(
866*4882a593Smuzhiyun 	struct xlog			*log,
867*4882a593Smuzhiyun 	struct list_head		*buffer_list,
868*4882a593Smuzhiyun 	struct xlog_recover_item	*item,
869*4882a593Smuzhiyun 	xfs_lsn_t			current_lsn)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	struct xfs_buf_log_format	*buf_f = item->ri_buf[0].i_addr;
872*4882a593Smuzhiyun 	struct xfs_mount		*mp = log->l_mp;
873*4882a593Smuzhiyun 	struct xfs_buf			*bp;
874*4882a593Smuzhiyun 	int				error;
875*4882a593Smuzhiyun 	uint				buf_flags;
876*4882a593Smuzhiyun 	xfs_lsn_t			lsn;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/*
879*4882a593Smuzhiyun 	 * In this pass we only want to recover all the buffers which have
880*4882a593Smuzhiyun 	 * not been cancelled and are not cancellation buffers themselves.
881*4882a593Smuzhiyun 	 */
882*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_CANCEL) {
883*4882a593Smuzhiyun 		if (xlog_put_buffer_cancelled(log, buf_f->blf_blkno,
884*4882a593Smuzhiyun 				buf_f->blf_len))
885*4882a593Smuzhiyun 			goto cancelled;
886*4882a593Smuzhiyun 	} else {
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 		if (xlog_is_buffer_cancelled(log, buf_f->blf_blkno,
889*4882a593Smuzhiyun 				buf_f->blf_len))
890*4882a593Smuzhiyun 			goto cancelled;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	trace_xfs_log_recover_buf_recover(log, buf_f);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	buf_flags = 0;
896*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
897*4882a593Smuzhiyun 		buf_flags |= XBF_UNMAPPED;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	error = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
900*4882a593Smuzhiyun 			  buf_flags, &bp, NULL);
901*4882a593Smuzhiyun 	if (error)
902*4882a593Smuzhiyun 		return error;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	/*
905*4882a593Smuzhiyun 	 * Recover the buffer only if we get an LSN from it and it's less than
906*4882a593Smuzhiyun 	 * the lsn of the transaction we are replaying.
907*4882a593Smuzhiyun 	 *
908*4882a593Smuzhiyun 	 * Note that we have to be extremely careful of readahead here.
909*4882a593Smuzhiyun 	 * Readahead does not attach verfiers to the buffers so if we don't
910*4882a593Smuzhiyun 	 * actually do any replay after readahead because of the LSN we found
911*4882a593Smuzhiyun 	 * in the buffer if more recent than that current transaction then we
912*4882a593Smuzhiyun 	 * need to attach the verifier directly. Failure to do so can lead to
913*4882a593Smuzhiyun 	 * future recovery actions (e.g. EFI and unlinked list recovery) can
914*4882a593Smuzhiyun 	 * operate on the buffers and they won't get the verifier attached. This
915*4882a593Smuzhiyun 	 * can lead to blocks on disk having the correct content but a stale
916*4882a593Smuzhiyun 	 * CRC.
917*4882a593Smuzhiyun 	 *
918*4882a593Smuzhiyun 	 * It is safe to assume these clean buffers are currently up to date.
919*4882a593Smuzhiyun 	 * If the buffer is dirtied by a later transaction being replayed, then
920*4882a593Smuzhiyun 	 * the verifier will be reset to match whatever recover turns that
921*4882a593Smuzhiyun 	 * buffer into.
922*4882a593Smuzhiyun 	 */
923*4882a593Smuzhiyun 	lsn = xlog_recover_get_buf_lsn(mp, bp);
924*4882a593Smuzhiyun 	if (lsn && lsn != -1 && XFS_LSN_CMP(lsn, current_lsn) >= 0) {
925*4882a593Smuzhiyun 		trace_xfs_log_recover_buf_skip(log, buf_f);
926*4882a593Smuzhiyun 		xlog_recover_validate_buf_type(mp, bp, buf_f, NULLCOMMITLSN);
927*4882a593Smuzhiyun 		goto out_release;
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	if (buf_f->blf_flags & XFS_BLF_INODE_BUF) {
931*4882a593Smuzhiyun 		error = xlog_recover_do_inode_buffer(mp, item, bp, buf_f);
932*4882a593Smuzhiyun 		if (error)
933*4882a593Smuzhiyun 			goto out_release;
934*4882a593Smuzhiyun 	} else if (buf_f->blf_flags &
935*4882a593Smuzhiyun 		  (XFS_BLF_UDQUOT_BUF|XFS_BLF_PDQUOT_BUF|XFS_BLF_GDQUOT_BUF)) {
936*4882a593Smuzhiyun 		bool	dirty;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 		dirty = xlog_recover_do_dquot_buffer(mp, log, item, bp, buf_f);
939*4882a593Smuzhiyun 		if (!dirty)
940*4882a593Smuzhiyun 			goto out_release;
941*4882a593Smuzhiyun 	} else {
942*4882a593Smuzhiyun 		xlog_recover_do_reg_buffer(mp, item, bp, buf_f, current_lsn);
943*4882a593Smuzhiyun 	}
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	/*
946*4882a593Smuzhiyun 	 * Perform delayed write on the buffer.  Asynchronous writes will be
947*4882a593Smuzhiyun 	 * slower when taking into account all the buffers to be flushed.
948*4882a593Smuzhiyun 	 *
949*4882a593Smuzhiyun 	 * Also make sure that only inode buffers with good sizes stay in
950*4882a593Smuzhiyun 	 * the buffer cache.  The kernel moves inodes in buffers of 1 block
951*4882a593Smuzhiyun 	 * or inode_cluster_size bytes, whichever is bigger.  The inode
952*4882a593Smuzhiyun 	 * buffers in the log can be a different size if the log was generated
953*4882a593Smuzhiyun 	 * by an older kernel using unclustered inode buffers or a newer kernel
954*4882a593Smuzhiyun 	 * running with a different inode cluster size.  Regardless, if
955*4882a593Smuzhiyun 	 * the inode buffer size isn't max(blocksize, inode_cluster_size)
956*4882a593Smuzhiyun 	 * for *our* value of inode_cluster_size, then we need to keep
957*4882a593Smuzhiyun 	 * the buffer out of the buffer cache so that the buffer won't
958*4882a593Smuzhiyun 	 * overlap with future reads of those inodes.
959*4882a593Smuzhiyun 	 */
960*4882a593Smuzhiyun 	if (XFS_DINODE_MAGIC ==
961*4882a593Smuzhiyun 	    be16_to_cpu(*((__be16 *)xfs_buf_offset(bp, 0))) &&
962*4882a593Smuzhiyun 	    (BBTOB(bp->b_length) != M_IGEO(log->l_mp)->inode_cluster_size)) {
963*4882a593Smuzhiyun 		xfs_buf_stale(bp);
964*4882a593Smuzhiyun 		error = xfs_bwrite(bp);
965*4882a593Smuzhiyun 	} else {
966*4882a593Smuzhiyun 		ASSERT(bp->b_mount == mp);
967*4882a593Smuzhiyun 		bp->b_flags |= _XBF_LOGRECOVERY;
968*4882a593Smuzhiyun 		xfs_buf_delwri_queue(bp, buffer_list);
969*4882a593Smuzhiyun 	}
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun out_release:
972*4882a593Smuzhiyun 	xfs_buf_relse(bp);
973*4882a593Smuzhiyun 	return error;
974*4882a593Smuzhiyun cancelled:
975*4882a593Smuzhiyun 	trace_xfs_log_recover_buf_cancel(log, buf_f);
976*4882a593Smuzhiyun 	return 0;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_buf_item_ops = {
980*4882a593Smuzhiyun 	.item_type		= XFS_LI_BUF,
981*4882a593Smuzhiyun 	.reorder		= xlog_recover_buf_reorder,
982*4882a593Smuzhiyun 	.ra_pass2		= xlog_recover_buf_ra_pass2,
983*4882a593Smuzhiyun 	.commit_pass1		= xlog_recover_buf_commit_pass1,
984*4882a593Smuzhiyun 	.commit_pass2		= xlog_recover_buf_commit_pass2,
985*4882a593Smuzhiyun };
986