1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_bit.h"
13*4882a593Smuzhiyun #include "xfs_sb.h"
14*4882a593Smuzhiyun #include "xfs_mount.h"
15*4882a593Smuzhiyun #include "xfs_defer.h"
16*4882a593Smuzhiyun #include "xfs_inode.h"
17*4882a593Smuzhiyun #include "xfs_trans.h"
18*4882a593Smuzhiyun #include "xfs_log.h"
19*4882a593Smuzhiyun #include "xfs_log_priv.h"
20*4882a593Smuzhiyun #include "xfs_log_recover.h"
21*4882a593Smuzhiyun #include "xfs_trans_priv.h"
22*4882a593Smuzhiyun #include "xfs_alloc.h"
23*4882a593Smuzhiyun #include "xfs_ialloc.h"
24*4882a593Smuzhiyun #include "xfs_trace.h"
25*4882a593Smuzhiyun #include "xfs_icache.h"
26*4882a593Smuzhiyun #include "xfs_error.h"
27*4882a593Smuzhiyun #include "xfs_buf_item.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun STATIC int
32*4882a593Smuzhiyun xlog_find_zeroed(
33*4882a593Smuzhiyun struct xlog *,
34*4882a593Smuzhiyun xfs_daddr_t *);
35*4882a593Smuzhiyun STATIC int
36*4882a593Smuzhiyun xlog_clear_stale_blocks(
37*4882a593Smuzhiyun struct xlog *,
38*4882a593Smuzhiyun xfs_lsn_t);
39*4882a593Smuzhiyun #if defined(DEBUG)
40*4882a593Smuzhiyun STATIC void
41*4882a593Smuzhiyun xlog_recover_check_summary(
42*4882a593Smuzhiyun struct xlog *);
43*4882a593Smuzhiyun #else
44*4882a593Smuzhiyun #define xlog_recover_check_summary(log)
45*4882a593Smuzhiyun #endif
46*4882a593Smuzhiyun STATIC int
47*4882a593Smuzhiyun xlog_do_recovery_pass(
48*4882a593Smuzhiyun struct xlog *, xfs_daddr_t, xfs_daddr_t, int, xfs_daddr_t *);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Sector aligned buffer routines for buffer create/read/write/access
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun * Verify the log-relative block number and length in basic blocks are valid for
56*4882a593Smuzhiyun * an operation involving the given XFS log buffer. Returns true if the fields
57*4882a593Smuzhiyun * are valid, false otherwise.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun static inline bool
xlog_verify_bno(struct xlog * log,xfs_daddr_t blk_no,int bbcount)60*4882a593Smuzhiyun xlog_verify_bno(
61*4882a593Smuzhiyun struct xlog *log,
62*4882a593Smuzhiyun xfs_daddr_t blk_no,
63*4882a593Smuzhiyun int bbcount)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun if (blk_no < 0 || blk_no >= log->l_logBBsize)
66*4882a593Smuzhiyun return false;
67*4882a593Smuzhiyun if (bbcount <= 0 || (blk_no + bbcount) > log->l_logBBsize)
68*4882a593Smuzhiyun return false;
69*4882a593Smuzhiyun return true;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun * Allocate a buffer to hold log data. The buffer needs to be able to map to
74*4882a593Smuzhiyun * a range of nbblks basic blocks at any valid offset within the log.
75*4882a593Smuzhiyun */
76*4882a593Smuzhiyun static char *
xlog_alloc_buffer(struct xlog * log,int nbblks)77*4882a593Smuzhiyun xlog_alloc_buffer(
78*4882a593Smuzhiyun struct xlog *log,
79*4882a593Smuzhiyun int nbblks)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun int align_mask = xfs_buftarg_dma_alignment(log->l_targ);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * Pass log block 0 since we don't have an addr yet, buffer will be
85*4882a593Smuzhiyun * verified on read.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, 0, nbblks))) {
88*4882a593Smuzhiyun xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
89*4882a593Smuzhiyun nbblks);
90*4882a593Smuzhiyun return NULL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * We do log I/O in units of log sectors (a power-of-2 multiple of the
95*4882a593Smuzhiyun * basic block size), so we round up the requested size to accommodate
96*4882a593Smuzhiyun * the basic blocks required for complete log sectors.
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * In addition, the buffer may be used for a non-sector-aligned block
99*4882a593Smuzhiyun * offset, in which case an I/O of the requested size could extend
100*4882a593Smuzhiyun * beyond the end of the buffer. If the requested size is only 1 basic
101*4882a593Smuzhiyun * block it will never straddle a sector boundary, so this won't be an
102*4882a593Smuzhiyun * issue. Nor will this be a problem if the log I/O is done in basic
103*4882a593Smuzhiyun * blocks (sector size 1). But otherwise we extend the buffer by one
104*4882a593Smuzhiyun * extra log sector to ensure there's space to accommodate this
105*4882a593Smuzhiyun * possibility.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun if (nbblks > 1 && log->l_sectBBsize > 1)
108*4882a593Smuzhiyun nbblks += log->l_sectBBsize;
109*4882a593Smuzhiyun nbblks = round_up(nbblks, log->l_sectBBsize);
110*4882a593Smuzhiyun return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun * Return the address of the start of the given block number's data
115*4882a593Smuzhiyun * in a log buffer. The buffer covers a log sector-aligned region.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun static inline unsigned int
xlog_align(struct xlog * log,xfs_daddr_t blk_no)118*4882a593Smuzhiyun xlog_align(
119*4882a593Smuzhiyun struct xlog *log,
120*4882a593Smuzhiyun xfs_daddr_t blk_no)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return BBTOB(blk_no & ((xfs_daddr_t)log->l_sectBBsize - 1));
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun static int
xlog_do_io(struct xlog * log,xfs_daddr_t blk_no,unsigned int nbblks,char * data,unsigned int op)126*4882a593Smuzhiyun xlog_do_io(
127*4882a593Smuzhiyun struct xlog *log,
128*4882a593Smuzhiyun xfs_daddr_t blk_no,
129*4882a593Smuzhiyun unsigned int nbblks,
130*4882a593Smuzhiyun char *data,
131*4882a593Smuzhiyun unsigned int op)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun int error;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp, !xlog_verify_bno(log, blk_no, nbblks))) {
136*4882a593Smuzhiyun xfs_warn(log->l_mp,
137*4882a593Smuzhiyun "Invalid log block/length (0x%llx, 0x%x) for buffer",
138*4882a593Smuzhiyun blk_no, nbblks);
139*4882a593Smuzhiyun return -EFSCORRUPTED;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun blk_no = round_down(blk_no, log->l_sectBBsize);
143*4882a593Smuzhiyun nbblks = round_up(nbblks, log->l_sectBBsize);
144*4882a593Smuzhiyun ASSERT(nbblks > 0);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun error = xfs_rw_bdev(log->l_targ->bt_bdev, log->l_logBBstart + blk_no,
147*4882a593Smuzhiyun BBTOB(nbblks), data, op);
148*4882a593Smuzhiyun if (error && !XFS_FORCED_SHUTDOWN(log->l_mp)) {
149*4882a593Smuzhiyun xfs_alert(log->l_mp,
150*4882a593Smuzhiyun "log recovery %s I/O error at daddr 0x%llx len %d error %d",
151*4882a593Smuzhiyun op == REQ_OP_WRITE ? "write" : "read",
152*4882a593Smuzhiyun blk_no, nbblks, error);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun return error;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun STATIC int
xlog_bread_noalign(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data)158*4882a593Smuzhiyun xlog_bread_noalign(
159*4882a593Smuzhiyun struct xlog *log,
160*4882a593Smuzhiyun xfs_daddr_t blk_no,
161*4882a593Smuzhiyun int nbblks,
162*4882a593Smuzhiyun char *data)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun STATIC int
xlog_bread(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data,char ** offset)168*4882a593Smuzhiyun xlog_bread(
169*4882a593Smuzhiyun struct xlog *log,
170*4882a593Smuzhiyun xfs_daddr_t blk_no,
171*4882a593Smuzhiyun int nbblks,
172*4882a593Smuzhiyun char *data,
173*4882a593Smuzhiyun char **offset)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun int error;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun error = xlog_do_io(log, blk_no, nbblks, data, REQ_OP_READ);
178*4882a593Smuzhiyun if (!error)
179*4882a593Smuzhiyun *offset = data + xlog_align(log, blk_no);
180*4882a593Smuzhiyun return error;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun STATIC int
xlog_bwrite(struct xlog * log,xfs_daddr_t blk_no,int nbblks,char * data)184*4882a593Smuzhiyun xlog_bwrite(
185*4882a593Smuzhiyun struct xlog *log,
186*4882a593Smuzhiyun xfs_daddr_t blk_no,
187*4882a593Smuzhiyun int nbblks,
188*4882a593Smuzhiyun char *data)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun return xlog_do_io(log, blk_no, nbblks, data, REQ_OP_WRITE);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun #ifdef DEBUG
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * dump debug superblock and log record information
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun STATIC void
xlog_header_check_dump(xfs_mount_t * mp,xlog_rec_header_t * head)198*4882a593Smuzhiyun xlog_header_check_dump(
199*4882a593Smuzhiyun xfs_mount_t *mp,
200*4882a593Smuzhiyun xlog_rec_header_t *head)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun xfs_debug(mp, "%s: SB : uuid = %pU, fmt = %d",
203*4882a593Smuzhiyun __func__, &mp->m_sb.sb_uuid, XLOG_FMT);
204*4882a593Smuzhiyun xfs_debug(mp, " log : uuid = %pU, fmt = %d",
205*4882a593Smuzhiyun &head->h_fs_uuid, be32_to_cpu(head->h_fmt));
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun #else
208*4882a593Smuzhiyun #define xlog_header_check_dump(mp, head)
209*4882a593Smuzhiyun #endif
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * check log record header for recovery
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun STATIC int
xlog_header_check_recover(xfs_mount_t * mp,xlog_rec_header_t * head)215*4882a593Smuzhiyun xlog_header_check_recover(
216*4882a593Smuzhiyun xfs_mount_t *mp,
217*4882a593Smuzhiyun xlog_rec_header_t *head)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * IRIX doesn't write the h_fmt field and leaves it zeroed
223*4882a593Smuzhiyun * (XLOG_FMT_UNKNOWN). This stops us from trying to recover
224*4882a593Smuzhiyun * a dirty log created in IRIX.
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun if (XFS_IS_CORRUPT(mp, head->h_fmt != cpu_to_be32(XLOG_FMT))) {
227*4882a593Smuzhiyun xfs_warn(mp,
228*4882a593Smuzhiyun "dirty log written in incompatible format - can't recover");
229*4882a593Smuzhiyun xlog_header_check_dump(mp, head);
230*4882a593Smuzhiyun return -EFSCORRUPTED;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
233*4882a593Smuzhiyun &head->h_fs_uuid))) {
234*4882a593Smuzhiyun xfs_warn(mp,
235*4882a593Smuzhiyun "dirty log entry has mismatched uuid - can't recover");
236*4882a593Smuzhiyun xlog_header_check_dump(mp, head);
237*4882a593Smuzhiyun return -EFSCORRUPTED;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun * read the head block of the log and check the header
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun STATIC int
xlog_header_check_mount(xfs_mount_t * mp,xlog_rec_header_t * head)246*4882a593Smuzhiyun xlog_header_check_mount(
247*4882a593Smuzhiyun xfs_mount_t *mp,
248*4882a593Smuzhiyun xlog_rec_header_t *head)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun ASSERT(head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM));
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (uuid_is_null(&head->h_fs_uuid)) {
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * IRIX doesn't write the h_fs_uuid or h_fmt fields. If
255*4882a593Smuzhiyun * h_fs_uuid is null, we assume this log was last mounted
256*4882a593Smuzhiyun * by IRIX and continue.
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun xfs_warn(mp, "null uuid in log - IRIX style log");
259*4882a593Smuzhiyun } else if (XFS_IS_CORRUPT(mp, !uuid_equal(&mp->m_sb.sb_uuid,
260*4882a593Smuzhiyun &head->h_fs_uuid))) {
261*4882a593Smuzhiyun xfs_warn(mp, "log has mismatched uuid - can't recover");
262*4882a593Smuzhiyun xlog_header_check_dump(mp, head);
263*4882a593Smuzhiyun return -EFSCORRUPTED;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun return 0;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun * This routine finds (to an approximation) the first block in the physical
270*4882a593Smuzhiyun * log which contains the given cycle. It uses a binary search algorithm.
271*4882a593Smuzhiyun * Note that the algorithm can not be perfect because the disk will not
272*4882a593Smuzhiyun * necessarily be perfect.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun STATIC int
xlog_find_cycle_start(struct xlog * log,char * buffer,xfs_daddr_t first_blk,xfs_daddr_t * last_blk,uint cycle)275*4882a593Smuzhiyun xlog_find_cycle_start(
276*4882a593Smuzhiyun struct xlog *log,
277*4882a593Smuzhiyun char *buffer,
278*4882a593Smuzhiyun xfs_daddr_t first_blk,
279*4882a593Smuzhiyun xfs_daddr_t *last_blk,
280*4882a593Smuzhiyun uint cycle)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun char *offset;
283*4882a593Smuzhiyun xfs_daddr_t mid_blk;
284*4882a593Smuzhiyun xfs_daddr_t end_blk;
285*4882a593Smuzhiyun uint mid_cycle;
286*4882a593Smuzhiyun int error;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun end_blk = *last_blk;
289*4882a593Smuzhiyun mid_blk = BLK_AVG(first_blk, end_blk);
290*4882a593Smuzhiyun while (mid_blk != first_blk && mid_blk != end_blk) {
291*4882a593Smuzhiyun error = xlog_bread(log, mid_blk, 1, buffer, &offset);
292*4882a593Smuzhiyun if (error)
293*4882a593Smuzhiyun return error;
294*4882a593Smuzhiyun mid_cycle = xlog_get_cycle(offset);
295*4882a593Smuzhiyun if (mid_cycle == cycle)
296*4882a593Smuzhiyun end_blk = mid_blk; /* last_half_cycle == mid_cycle */
297*4882a593Smuzhiyun else
298*4882a593Smuzhiyun first_blk = mid_blk; /* first_half_cycle == mid_cycle */
299*4882a593Smuzhiyun mid_blk = BLK_AVG(first_blk, end_blk);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun ASSERT((mid_blk == first_blk && mid_blk+1 == end_blk) ||
302*4882a593Smuzhiyun (mid_blk == end_blk && mid_blk-1 == first_blk));
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun *last_blk = end_blk;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /*
310*4882a593Smuzhiyun * Check that a range of blocks does not contain stop_on_cycle_no.
311*4882a593Smuzhiyun * Fill in *new_blk with the block offset where such a block is
312*4882a593Smuzhiyun * found, or with -1 (an invalid block number) if there is no such
313*4882a593Smuzhiyun * block in the range. The scan needs to occur from front to back
314*4882a593Smuzhiyun * and the pointer into the region must be updated since a later
315*4882a593Smuzhiyun * routine will need to perform another test.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun STATIC int
xlog_find_verify_cycle(struct xlog * log,xfs_daddr_t start_blk,int nbblks,uint stop_on_cycle_no,xfs_daddr_t * new_blk)318*4882a593Smuzhiyun xlog_find_verify_cycle(
319*4882a593Smuzhiyun struct xlog *log,
320*4882a593Smuzhiyun xfs_daddr_t start_blk,
321*4882a593Smuzhiyun int nbblks,
322*4882a593Smuzhiyun uint stop_on_cycle_no,
323*4882a593Smuzhiyun xfs_daddr_t *new_blk)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun xfs_daddr_t i, j;
326*4882a593Smuzhiyun uint cycle;
327*4882a593Smuzhiyun char *buffer;
328*4882a593Smuzhiyun xfs_daddr_t bufblks;
329*4882a593Smuzhiyun char *buf = NULL;
330*4882a593Smuzhiyun int error = 0;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * Greedily allocate a buffer big enough to handle the full
334*4882a593Smuzhiyun * range of basic blocks we'll be examining. If that fails,
335*4882a593Smuzhiyun * try a smaller size. We need to be able to read at least
336*4882a593Smuzhiyun * a log sector, or we're out of luck.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun bufblks = 1 << ffs(nbblks);
339*4882a593Smuzhiyun while (bufblks > log->l_logBBsize)
340*4882a593Smuzhiyun bufblks >>= 1;
341*4882a593Smuzhiyun while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
342*4882a593Smuzhiyun bufblks >>= 1;
343*4882a593Smuzhiyun if (bufblks < log->l_sectBBsize)
344*4882a593Smuzhiyun return -ENOMEM;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun for (i = start_blk; i < start_blk + nbblks; i += bufblks) {
348*4882a593Smuzhiyun int bcount;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun bcount = min(bufblks, (start_blk + nbblks - i));
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun error = xlog_bread(log, i, bcount, buffer, &buf);
353*4882a593Smuzhiyun if (error)
354*4882a593Smuzhiyun goto out;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun for (j = 0; j < bcount; j++) {
357*4882a593Smuzhiyun cycle = xlog_get_cycle(buf);
358*4882a593Smuzhiyun if (cycle == stop_on_cycle_no) {
359*4882a593Smuzhiyun *new_blk = i+j;
360*4882a593Smuzhiyun goto out;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun buf += BBSIZE;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun *new_blk = -1;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun out:
370*4882a593Smuzhiyun kmem_free(buffer);
371*4882a593Smuzhiyun return error;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun static inline int
xlog_logrec_hblks(struct xlog * log,struct xlog_rec_header * rh)375*4882a593Smuzhiyun xlog_logrec_hblks(struct xlog *log, struct xlog_rec_header *rh)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
378*4882a593Smuzhiyun int h_size = be32_to_cpu(rh->h_size);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun if ((be32_to_cpu(rh->h_version) & XLOG_VERSION_2) &&
381*4882a593Smuzhiyun h_size > XLOG_HEADER_CYCLE_SIZE)
382*4882a593Smuzhiyun return DIV_ROUND_UP(h_size, XLOG_HEADER_CYCLE_SIZE);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun return 1;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /*
388*4882a593Smuzhiyun * Potentially backup over partial log record write.
389*4882a593Smuzhiyun *
390*4882a593Smuzhiyun * In the typical case, last_blk is the number of the block directly after
391*4882a593Smuzhiyun * a good log record. Therefore, we subtract one to get the block number
392*4882a593Smuzhiyun * of the last block in the given buffer. extra_bblks contains the number
393*4882a593Smuzhiyun * of blocks we would have read on a previous read. This happens when the
394*4882a593Smuzhiyun * last log record is split over the end of the physical log.
395*4882a593Smuzhiyun *
396*4882a593Smuzhiyun * extra_bblks is the number of blocks potentially verified on a previous
397*4882a593Smuzhiyun * call to this routine.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun STATIC int
xlog_find_verify_log_record(struct xlog * log,xfs_daddr_t start_blk,xfs_daddr_t * last_blk,int extra_bblks)400*4882a593Smuzhiyun xlog_find_verify_log_record(
401*4882a593Smuzhiyun struct xlog *log,
402*4882a593Smuzhiyun xfs_daddr_t start_blk,
403*4882a593Smuzhiyun xfs_daddr_t *last_blk,
404*4882a593Smuzhiyun int extra_bblks)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun xfs_daddr_t i;
407*4882a593Smuzhiyun char *buffer;
408*4882a593Smuzhiyun char *offset = NULL;
409*4882a593Smuzhiyun xlog_rec_header_t *head = NULL;
410*4882a593Smuzhiyun int error = 0;
411*4882a593Smuzhiyun int smallmem = 0;
412*4882a593Smuzhiyun int num_blks = *last_blk - start_blk;
413*4882a593Smuzhiyun int xhdrs;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun ASSERT(start_blk != 0 || *last_blk != start_blk);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun buffer = xlog_alloc_buffer(log, num_blks);
418*4882a593Smuzhiyun if (!buffer) {
419*4882a593Smuzhiyun buffer = xlog_alloc_buffer(log, 1);
420*4882a593Smuzhiyun if (!buffer)
421*4882a593Smuzhiyun return -ENOMEM;
422*4882a593Smuzhiyun smallmem = 1;
423*4882a593Smuzhiyun } else {
424*4882a593Smuzhiyun error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
425*4882a593Smuzhiyun if (error)
426*4882a593Smuzhiyun goto out;
427*4882a593Smuzhiyun offset += ((num_blks - 1) << BBSHIFT);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun for (i = (*last_blk) - 1; i >= 0; i--) {
431*4882a593Smuzhiyun if (i < start_blk) {
432*4882a593Smuzhiyun /* valid log record not found */
433*4882a593Smuzhiyun xfs_warn(log->l_mp,
434*4882a593Smuzhiyun "Log inconsistent (didn't find previous header)");
435*4882a593Smuzhiyun ASSERT(0);
436*4882a593Smuzhiyun error = -EFSCORRUPTED;
437*4882a593Smuzhiyun goto out;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (smallmem) {
441*4882a593Smuzhiyun error = xlog_bread(log, i, 1, buffer, &offset);
442*4882a593Smuzhiyun if (error)
443*4882a593Smuzhiyun goto out;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun head = (xlog_rec_header_t *)offset;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (head->h_magicno == cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
449*4882a593Smuzhiyun break;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (!smallmem)
452*4882a593Smuzhiyun offset -= BBSIZE;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /*
456*4882a593Smuzhiyun * We hit the beginning of the physical log & still no header. Return
457*4882a593Smuzhiyun * to caller. If caller can handle a return of -1, then this routine
458*4882a593Smuzhiyun * will be called again for the end of the physical log.
459*4882a593Smuzhiyun */
460*4882a593Smuzhiyun if (i == -1) {
461*4882a593Smuzhiyun error = 1;
462*4882a593Smuzhiyun goto out;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /*
466*4882a593Smuzhiyun * We have the final block of the good log (the first block
467*4882a593Smuzhiyun * of the log record _before_ the head. So we check the uuid.
468*4882a593Smuzhiyun */
469*4882a593Smuzhiyun if ((error = xlog_header_check_mount(log->l_mp, head)))
470*4882a593Smuzhiyun goto out;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /*
473*4882a593Smuzhiyun * We may have found a log record header before we expected one.
474*4882a593Smuzhiyun * last_blk will be the 1st block # with a given cycle #. We may end
475*4882a593Smuzhiyun * up reading an entire log record. In this case, we don't want to
476*4882a593Smuzhiyun * reset last_blk. Only when last_blk points in the middle of a log
477*4882a593Smuzhiyun * record do we update last_blk.
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun xhdrs = xlog_logrec_hblks(log, head);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (*last_blk - i + extra_bblks !=
482*4882a593Smuzhiyun BTOBB(be32_to_cpu(head->h_len)) + xhdrs)
483*4882a593Smuzhiyun *last_blk = i;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun out:
486*4882a593Smuzhiyun kmem_free(buffer);
487*4882a593Smuzhiyun return error;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /*
491*4882a593Smuzhiyun * Head is defined to be the point of the log where the next log write
492*4882a593Smuzhiyun * could go. This means that incomplete LR writes at the end are
493*4882a593Smuzhiyun * eliminated when calculating the head. We aren't guaranteed that previous
494*4882a593Smuzhiyun * LR have complete transactions. We only know that a cycle number of
495*4882a593Smuzhiyun * current cycle number -1 won't be present in the log if we start writing
496*4882a593Smuzhiyun * from our current block number.
497*4882a593Smuzhiyun *
498*4882a593Smuzhiyun * last_blk contains the block number of the first block with a given
499*4882a593Smuzhiyun * cycle number.
500*4882a593Smuzhiyun *
501*4882a593Smuzhiyun * Return: zero if normal, non-zero if error.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun STATIC int
xlog_find_head(struct xlog * log,xfs_daddr_t * return_head_blk)504*4882a593Smuzhiyun xlog_find_head(
505*4882a593Smuzhiyun struct xlog *log,
506*4882a593Smuzhiyun xfs_daddr_t *return_head_blk)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun char *buffer;
509*4882a593Smuzhiyun char *offset;
510*4882a593Smuzhiyun xfs_daddr_t new_blk, first_blk, start_blk, last_blk, head_blk;
511*4882a593Smuzhiyun int num_scan_bblks;
512*4882a593Smuzhiyun uint first_half_cycle, last_half_cycle;
513*4882a593Smuzhiyun uint stop_on_cycle;
514*4882a593Smuzhiyun int error, log_bbnum = log->l_logBBsize;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* Is the end of the log device zeroed? */
517*4882a593Smuzhiyun error = xlog_find_zeroed(log, &first_blk);
518*4882a593Smuzhiyun if (error < 0) {
519*4882a593Smuzhiyun xfs_warn(log->l_mp, "empty log check failed");
520*4882a593Smuzhiyun return error;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun if (error == 1) {
523*4882a593Smuzhiyun *return_head_blk = first_blk;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Is the whole lot zeroed? */
526*4882a593Smuzhiyun if (!first_blk) {
527*4882a593Smuzhiyun /* Linux XFS shouldn't generate totally zeroed logs -
528*4882a593Smuzhiyun * mkfs etc write a dummy unmount record to a fresh
529*4882a593Smuzhiyun * log so we can store the uuid in there
530*4882a593Smuzhiyun */
531*4882a593Smuzhiyun xfs_warn(log->l_mp, "totally zeroed log");
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun first_blk = 0; /* get cycle # of 1st block */
538*4882a593Smuzhiyun buffer = xlog_alloc_buffer(log, 1);
539*4882a593Smuzhiyun if (!buffer)
540*4882a593Smuzhiyun return -ENOMEM;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun error = xlog_bread(log, 0, 1, buffer, &offset);
543*4882a593Smuzhiyun if (error)
544*4882a593Smuzhiyun goto out_free_buffer;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun first_half_cycle = xlog_get_cycle(offset);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun last_blk = head_blk = log_bbnum - 1; /* get cycle # of last block */
549*4882a593Smuzhiyun error = xlog_bread(log, last_blk, 1, buffer, &offset);
550*4882a593Smuzhiyun if (error)
551*4882a593Smuzhiyun goto out_free_buffer;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun last_half_cycle = xlog_get_cycle(offset);
554*4882a593Smuzhiyun ASSERT(last_half_cycle != 0);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * If the 1st half cycle number is equal to the last half cycle number,
558*4882a593Smuzhiyun * then the entire log is stamped with the same cycle number. In this
559*4882a593Smuzhiyun * case, head_blk can't be set to zero (which makes sense). The below
560*4882a593Smuzhiyun * math doesn't work out properly with head_blk equal to zero. Instead,
561*4882a593Smuzhiyun * we set it to log_bbnum which is an invalid block number, but this
562*4882a593Smuzhiyun * value makes the math correct. If head_blk doesn't changed through
563*4882a593Smuzhiyun * all the tests below, *head_blk is set to zero at the very end rather
564*4882a593Smuzhiyun * than log_bbnum. In a sense, log_bbnum and zero are the same block
565*4882a593Smuzhiyun * in a circular file.
566*4882a593Smuzhiyun */
567*4882a593Smuzhiyun if (first_half_cycle == last_half_cycle) {
568*4882a593Smuzhiyun /*
569*4882a593Smuzhiyun * In this case we believe that the entire log should have
570*4882a593Smuzhiyun * cycle number last_half_cycle. We need to scan backwards
571*4882a593Smuzhiyun * from the end verifying that there are no holes still
572*4882a593Smuzhiyun * containing last_half_cycle - 1. If we find such a hole,
573*4882a593Smuzhiyun * then the start of that hole will be the new head. The
574*4882a593Smuzhiyun * simple case looks like
575*4882a593Smuzhiyun * x | x ... | x - 1 | x
576*4882a593Smuzhiyun * Another case that fits this picture would be
577*4882a593Smuzhiyun * x | x + 1 | x ... | x
578*4882a593Smuzhiyun * In this case the head really is somewhere at the end of the
579*4882a593Smuzhiyun * log, as one of the latest writes at the beginning was
580*4882a593Smuzhiyun * incomplete.
581*4882a593Smuzhiyun * One more case is
582*4882a593Smuzhiyun * x | x + 1 | x ... | x - 1 | x
583*4882a593Smuzhiyun * This is really the combination of the above two cases, and
584*4882a593Smuzhiyun * the head has to end up at the start of the x-1 hole at the
585*4882a593Smuzhiyun * end of the log.
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * In the 256k log case, we will read from the beginning to the
588*4882a593Smuzhiyun * end of the log and search for cycle numbers equal to x-1.
589*4882a593Smuzhiyun * We don't worry about the x+1 blocks that we encounter,
590*4882a593Smuzhiyun * because we know that they cannot be the head since the log
591*4882a593Smuzhiyun * started with x.
592*4882a593Smuzhiyun */
593*4882a593Smuzhiyun head_blk = log_bbnum;
594*4882a593Smuzhiyun stop_on_cycle = last_half_cycle - 1;
595*4882a593Smuzhiyun } else {
596*4882a593Smuzhiyun /*
597*4882a593Smuzhiyun * In this case we want to find the first block with cycle
598*4882a593Smuzhiyun * number matching last_half_cycle. We expect the log to be
599*4882a593Smuzhiyun * some variation on
600*4882a593Smuzhiyun * x + 1 ... | x ... | x
601*4882a593Smuzhiyun * The first block with cycle number x (last_half_cycle) will
602*4882a593Smuzhiyun * be where the new head belongs. First we do a binary search
603*4882a593Smuzhiyun * for the first occurrence of last_half_cycle. The binary
604*4882a593Smuzhiyun * search may not be totally accurate, so then we scan back
605*4882a593Smuzhiyun * from there looking for occurrences of last_half_cycle before
606*4882a593Smuzhiyun * us. If that backwards scan wraps around the beginning of
607*4882a593Smuzhiyun * the log, then we look for occurrences of last_half_cycle - 1
608*4882a593Smuzhiyun * at the end of the log. The cases we're looking for look
609*4882a593Smuzhiyun * like
610*4882a593Smuzhiyun * v binary search stopped here
611*4882a593Smuzhiyun * x + 1 ... | x | x + 1 | x ... | x
612*4882a593Smuzhiyun * ^ but we want to locate this spot
613*4882a593Smuzhiyun * or
614*4882a593Smuzhiyun * <---------> less than scan distance
615*4882a593Smuzhiyun * x + 1 ... | x ... | x - 1 | x
616*4882a593Smuzhiyun * ^ we want to locate this spot
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun stop_on_cycle = last_half_cycle;
619*4882a593Smuzhiyun error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
620*4882a593Smuzhiyun last_half_cycle);
621*4882a593Smuzhiyun if (error)
622*4882a593Smuzhiyun goto out_free_buffer;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /*
626*4882a593Smuzhiyun * Now validate the answer. Scan back some number of maximum possible
627*4882a593Smuzhiyun * blocks and make sure each one has the expected cycle number. The
628*4882a593Smuzhiyun * maximum is determined by the total possible amount of buffering
629*4882a593Smuzhiyun * in the in-core log. The following number can be made tighter if
630*4882a593Smuzhiyun * we actually look at the block size of the filesystem.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun num_scan_bblks = min_t(int, log_bbnum, XLOG_TOTAL_REC_SHIFT(log));
633*4882a593Smuzhiyun if (head_blk >= num_scan_bblks) {
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * We are guaranteed that the entire check can be performed
636*4882a593Smuzhiyun * in one buffer.
637*4882a593Smuzhiyun */
638*4882a593Smuzhiyun start_blk = head_blk - num_scan_bblks;
639*4882a593Smuzhiyun if ((error = xlog_find_verify_cycle(log,
640*4882a593Smuzhiyun start_blk, num_scan_bblks,
641*4882a593Smuzhiyun stop_on_cycle, &new_blk)))
642*4882a593Smuzhiyun goto out_free_buffer;
643*4882a593Smuzhiyun if (new_blk != -1)
644*4882a593Smuzhiyun head_blk = new_blk;
645*4882a593Smuzhiyun } else { /* need to read 2 parts of log */
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * We are going to scan backwards in the log in two parts.
648*4882a593Smuzhiyun * First we scan the physical end of the log. In this part
649*4882a593Smuzhiyun * of the log, we are looking for blocks with cycle number
650*4882a593Smuzhiyun * last_half_cycle - 1.
651*4882a593Smuzhiyun * If we find one, then we know that the log starts there, as
652*4882a593Smuzhiyun * we've found a hole that didn't get written in going around
653*4882a593Smuzhiyun * the end of the physical log. The simple case for this is
654*4882a593Smuzhiyun * x + 1 ... | x ... | x - 1 | x
655*4882a593Smuzhiyun * <---------> less than scan distance
656*4882a593Smuzhiyun * If all of the blocks at the end of the log have cycle number
657*4882a593Smuzhiyun * last_half_cycle, then we check the blocks at the start of
658*4882a593Smuzhiyun * the log looking for occurrences of last_half_cycle. If we
659*4882a593Smuzhiyun * find one, then our current estimate for the location of the
660*4882a593Smuzhiyun * first occurrence of last_half_cycle is wrong and we move
661*4882a593Smuzhiyun * back to the hole we've found. This case looks like
662*4882a593Smuzhiyun * x + 1 ... | x | x + 1 | x ...
663*4882a593Smuzhiyun * ^ binary search stopped here
664*4882a593Smuzhiyun * Another case we need to handle that only occurs in 256k
665*4882a593Smuzhiyun * logs is
666*4882a593Smuzhiyun * x + 1 ... | x ... | x+1 | x ...
667*4882a593Smuzhiyun * ^ binary search stops here
668*4882a593Smuzhiyun * In a 256k log, the scan at the end of the log will see the
669*4882a593Smuzhiyun * x + 1 blocks. We need to skip past those since that is
670*4882a593Smuzhiyun * certainly not the head of the log. By searching for
671*4882a593Smuzhiyun * last_half_cycle-1 we accomplish that.
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun ASSERT(head_blk <= INT_MAX &&
674*4882a593Smuzhiyun (xfs_daddr_t) num_scan_bblks >= head_blk);
675*4882a593Smuzhiyun start_blk = log_bbnum - (num_scan_bblks - head_blk);
676*4882a593Smuzhiyun if ((error = xlog_find_verify_cycle(log, start_blk,
677*4882a593Smuzhiyun num_scan_bblks - (int)head_blk,
678*4882a593Smuzhiyun (stop_on_cycle - 1), &new_blk)))
679*4882a593Smuzhiyun goto out_free_buffer;
680*4882a593Smuzhiyun if (new_blk != -1) {
681*4882a593Smuzhiyun head_blk = new_blk;
682*4882a593Smuzhiyun goto validate_head;
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun /*
686*4882a593Smuzhiyun * Scan beginning of log now. The last part of the physical
687*4882a593Smuzhiyun * log is good. This scan needs to verify that it doesn't find
688*4882a593Smuzhiyun * the last_half_cycle.
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun start_blk = 0;
691*4882a593Smuzhiyun ASSERT(head_blk <= INT_MAX);
692*4882a593Smuzhiyun if ((error = xlog_find_verify_cycle(log,
693*4882a593Smuzhiyun start_blk, (int)head_blk,
694*4882a593Smuzhiyun stop_on_cycle, &new_blk)))
695*4882a593Smuzhiyun goto out_free_buffer;
696*4882a593Smuzhiyun if (new_blk != -1)
697*4882a593Smuzhiyun head_blk = new_blk;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun validate_head:
701*4882a593Smuzhiyun /*
702*4882a593Smuzhiyun * Now we need to make sure head_blk is not pointing to a block in
703*4882a593Smuzhiyun * the middle of a log record.
704*4882a593Smuzhiyun */
705*4882a593Smuzhiyun num_scan_bblks = XLOG_REC_SHIFT(log);
706*4882a593Smuzhiyun if (head_blk >= num_scan_bblks) {
707*4882a593Smuzhiyun start_blk = head_blk - num_scan_bblks; /* don't read head_blk */
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /* start ptr at last block ptr before head_blk */
710*4882a593Smuzhiyun error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
711*4882a593Smuzhiyun if (error == 1)
712*4882a593Smuzhiyun error = -EIO;
713*4882a593Smuzhiyun if (error)
714*4882a593Smuzhiyun goto out_free_buffer;
715*4882a593Smuzhiyun } else {
716*4882a593Smuzhiyun start_blk = 0;
717*4882a593Smuzhiyun ASSERT(head_blk <= INT_MAX);
718*4882a593Smuzhiyun error = xlog_find_verify_log_record(log, start_blk, &head_blk, 0);
719*4882a593Smuzhiyun if (error < 0)
720*4882a593Smuzhiyun goto out_free_buffer;
721*4882a593Smuzhiyun if (error == 1) {
722*4882a593Smuzhiyun /* We hit the beginning of the log during our search */
723*4882a593Smuzhiyun start_blk = log_bbnum - (num_scan_bblks - head_blk);
724*4882a593Smuzhiyun new_blk = log_bbnum;
725*4882a593Smuzhiyun ASSERT(start_blk <= INT_MAX &&
726*4882a593Smuzhiyun (xfs_daddr_t) log_bbnum-start_blk >= 0);
727*4882a593Smuzhiyun ASSERT(head_blk <= INT_MAX);
728*4882a593Smuzhiyun error = xlog_find_verify_log_record(log, start_blk,
729*4882a593Smuzhiyun &new_blk, (int)head_blk);
730*4882a593Smuzhiyun if (error == 1)
731*4882a593Smuzhiyun error = -EIO;
732*4882a593Smuzhiyun if (error)
733*4882a593Smuzhiyun goto out_free_buffer;
734*4882a593Smuzhiyun if (new_blk != log_bbnum)
735*4882a593Smuzhiyun head_blk = new_blk;
736*4882a593Smuzhiyun } else if (error)
737*4882a593Smuzhiyun goto out_free_buffer;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun kmem_free(buffer);
741*4882a593Smuzhiyun if (head_blk == log_bbnum)
742*4882a593Smuzhiyun *return_head_blk = 0;
743*4882a593Smuzhiyun else
744*4882a593Smuzhiyun *return_head_blk = head_blk;
745*4882a593Smuzhiyun /*
746*4882a593Smuzhiyun * When returning here, we have a good block number. Bad block
747*4882a593Smuzhiyun * means that during a previous crash, we didn't have a clean break
748*4882a593Smuzhiyun * from cycle number N to cycle number N-1. In this case, we need
749*4882a593Smuzhiyun * to find the first block with cycle number N-1.
750*4882a593Smuzhiyun */
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun out_free_buffer:
754*4882a593Smuzhiyun kmem_free(buffer);
755*4882a593Smuzhiyun if (error)
756*4882a593Smuzhiyun xfs_warn(log->l_mp, "failed to find log head");
757*4882a593Smuzhiyun return error;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /*
761*4882a593Smuzhiyun * Seek backwards in the log for log record headers.
762*4882a593Smuzhiyun *
763*4882a593Smuzhiyun * Given a starting log block, walk backwards until we find the provided number
764*4882a593Smuzhiyun * of records or hit the provided tail block. The return value is the number of
765*4882a593Smuzhiyun * records encountered or a negative error code. The log block and buffer
766*4882a593Smuzhiyun * pointer of the last record seen are returned in rblk and rhead respectively.
767*4882a593Smuzhiyun */
768*4882a593Smuzhiyun STATIC int
xlog_rseek_logrec_hdr(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int count,char * buffer,xfs_daddr_t * rblk,struct xlog_rec_header ** rhead,bool * wrapped)769*4882a593Smuzhiyun xlog_rseek_logrec_hdr(
770*4882a593Smuzhiyun struct xlog *log,
771*4882a593Smuzhiyun xfs_daddr_t head_blk,
772*4882a593Smuzhiyun xfs_daddr_t tail_blk,
773*4882a593Smuzhiyun int count,
774*4882a593Smuzhiyun char *buffer,
775*4882a593Smuzhiyun xfs_daddr_t *rblk,
776*4882a593Smuzhiyun struct xlog_rec_header **rhead,
777*4882a593Smuzhiyun bool *wrapped)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun int i;
780*4882a593Smuzhiyun int error;
781*4882a593Smuzhiyun int found = 0;
782*4882a593Smuzhiyun char *offset = NULL;
783*4882a593Smuzhiyun xfs_daddr_t end_blk;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun *wrapped = false;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun * Walk backwards from the head block until we hit the tail or the first
789*4882a593Smuzhiyun * block in the log.
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun end_blk = head_blk > tail_blk ? tail_blk : 0;
792*4882a593Smuzhiyun for (i = (int) head_blk - 1; i >= end_blk; i--) {
793*4882a593Smuzhiyun error = xlog_bread(log, i, 1, buffer, &offset);
794*4882a593Smuzhiyun if (error)
795*4882a593Smuzhiyun goto out_error;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
798*4882a593Smuzhiyun *rblk = i;
799*4882a593Smuzhiyun *rhead = (struct xlog_rec_header *) offset;
800*4882a593Smuzhiyun if (++found == count)
801*4882a593Smuzhiyun break;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /*
806*4882a593Smuzhiyun * If we haven't hit the tail block or the log record header count,
807*4882a593Smuzhiyun * start looking again from the end of the physical log. Note that
808*4882a593Smuzhiyun * callers can pass head == tail if the tail is not yet known.
809*4882a593Smuzhiyun */
810*4882a593Smuzhiyun if (tail_blk >= head_blk && found != count) {
811*4882a593Smuzhiyun for (i = log->l_logBBsize - 1; i >= (int) tail_blk; i--) {
812*4882a593Smuzhiyun error = xlog_bread(log, i, 1, buffer, &offset);
813*4882a593Smuzhiyun if (error)
814*4882a593Smuzhiyun goto out_error;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (*(__be32 *)offset ==
817*4882a593Smuzhiyun cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
818*4882a593Smuzhiyun *wrapped = true;
819*4882a593Smuzhiyun *rblk = i;
820*4882a593Smuzhiyun *rhead = (struct xlog_rec_header *) offset;
821*4882a593Smuzhiyun if (++found == count)
822*4882a593Smuzhiyun break;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun return found;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun out_error:
830*4882a593Smuzhiyun return error;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun * Seek forward in the log for log record headers.
835*4882a593Smuzhiyun *
836*4882a593Smuzhiyun * Given head and tail blocks, walk forward from the tail block until we find
837*4882a593Smuzhiyun * the provided number of records or hit the head block. The return value is the
838*4882a593Smuzhiyun * number of records encountered or a negative error code. The log block and
839*4882a593Smuzhiyun * buffer pointer of the last record seen are returned in rblk and rhead
840*4882a593Smuzhiyun * respectively.
841*4882a593Smuzhiyun */
842*4882a593Smuzhiyun STATIC int
xlog_seek_logrec_hdr(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int count,char * buffer,xfs_daddr_t * rblk,struct xlog_rec_header ** rhead,bool * wrapped)843*4882a593Smuzhiyun xlog_seek_logrec_hdr(
844*4882a593Smuzhiyun struct xlog *log,
845*4882a593Smuzhiyun xfs_daddr_t head_blk,
846*4882a593Smuzhiyun xfs_daddr_t tail_blk,
847*4882a593Smuzhiyun int count,
848*4882a593Smuzhiyun char *buffer,
849*4882a593Smuzhiyun xfs_daddr_t *rblk,
850*4882a593Smuzhiyun struct xlog_rec_header **rhead,
851*4882a593Smuzhiyun bool *wrapped)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun int i;
854*4882a593Smuzhiyun int error;
855*4882a593Smuzhiyun int found = 0;
856*4882a593Smuzhiyun char *offset = NULL;
857*4882a593Smuzhiyun xfs_daddr_t end_blk;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun *wrapped = false;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun /*
862*4882a593Smuzhiyun * Walk forward from the tail block until we hit the head or the last
863*4882a593Smuzhiyun * block in the log.
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun end_blk = head_blk > tail_blk ? head_blk : log->l_logBBsize - 1;
866*4882a593Smuzhiyun for (i = (int) tail_blk; i <= end_blk; i++) {
867*4882a593Smuzhiyun error = xlog_bread(log, i, 1, buffer, &offset);
868*4882a593Smuzhiyun if (error)
869*4882a593Smuzhiyun goto out_error;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (*(__be32 *) offset == cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
872*4882a593Smuzhiyun *rblk = i;
873*4882a593Smuzhiyun *rhead = (struct xlog_rec_header *) offset;
874*4882a593Smuzhiyun if (++found == count)
875*4882a593Smuzhiyun break;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /*
880*4882a593Smuzhiyun * If we haven't hit the head block or the log record header count,
881*4882a593Smuzhiyun * start looking again from the start of the physical log.
882*4882a593Smuzhiyun */
883*4882a593Smuzhiyun if (tail_blk > head_blk && found != count) {
884*4882a593Smuzhiyun for (i = 0; i < (int) head_blk; i++) {
885*4882a593Smuzhiyun error = xlog_bread(log, i, 1, buffer, &offset);
886*4882a593Smuzhiyun if (error)
887*4882a593Smuzhiyun goto out_error;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun if (*(__be32 *)offset ==
890*4882a593Smuzhiyun cpu_to_be32(XLOG_HEADER_MAGIC_NUM)) {
891*4882a593Smuzhiyun *wrapped = true;
892*4882a593Smuzhiyun *rblk = i;
893*4882a593Smuzhiyun *rhead = (struct xlog_rec_header *) offset;
894*4882a593Smuzhiyun if (++found == count)
895*4882a593Smuzhiyun break;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun return found;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun out_error:
903*4882a593Smuzhiyun return error;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun /*
907*4882a593Smuzhiyun * Calculate distance from head to tail (i.e., unused space in the log).
908*4882a593Smuzhiyun */
909*4882a593Smuzhiyun static inline int
xlog_tail_distance(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)910*4882a593Smuzhiyun xlog_tail_distance(
911*4882a593Smuzhiyun struct xlog *log,
912*4882a593Smuzhiyun xfs_daddr_t head_blk,
913*4882a593Smuzhiyun xfs_daddr_t tail_blk)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun if (head_blk < tail_blk)
916*4882a593Smuzhiyun return tail_blk - head_blk;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun return tail_blk + (log->l_logBBsize - head_blk);
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun /*
922*4882a593Smuzhiyun * Verify the log tail. This is particularly important when torn or incomplete
923*4882a593Smuzhiyun * writes have been detected near the front of the log and the head has been
924*4882a593Smuzhiyun * walked back accordingly.
925*4882a593Smuzhiyun *
926*4882a593Smuzhiyun * We also have to handle the case where the tail was pinned and the head
927*4882a593Smuzhiyun * blocked behind the tail right before a crash. If the tail had been pushed
928*4882a593Smuzhiyun * immediately prior to the crash and the subsequent checkpoint was only
929*4882a593Smuzhiyun * partially written, it's possible it overwrote the last referenced tail in the
930*4882a593Smuzhiyun * log with garbage. This is not a coherency problem because the tail must have
931*4882a593Smuzhiyun * been pushed before it can be overwritten, but appears as log corruption to
932*4882a593Smuzhiyun * recovery because we have no way to know the tail was updated if the
933*4882a593Smuzhiyun * subsequent checkpoint didn't write successfully.
934*4882a593Smuzhiyun *
935*4882a593Smuzhiyun * Therefore, CRC check the log from tail to head. If a failure occurs and the
936*4882a593Smuzhiyun * offending record is within max iclog bufs from the head, walk the tail
937*4882a593Smuzhiyun * forward and retry until a valid tail is found or corruption is detected out
938*4882a593Smuzhiyun * of the range of a possible overwrite.
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun STATIC int
xlog_verify_tail(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t * tail_blk,int hsize)941*4882a593Smuzhiyun xlog_verify_tail(
942*4882a593Smuzhiyun struct xlog *log,
943*4882a593Smuzhiyun xfs_daddr_t head_blk,
944*4882a593Smuzhiyun xfs_daddr_t *tail_blk,
945*4882a593Smuzhiyun int hsize)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun struct xlog_rec_header *thead;
948*4882a593Smuzhiyun char *buffer;
949*4882a593Smuzhiyun xfs_daddr_t first_bad;
950*4882a593Smuzhiyun int error = 0;
951*4882a593Smuzhiyun bool wrapped;
952*4882a593Smuzhiyun xfs_daddr_t tmp_tail;
953*4882a593Smuzhiyun xfs_daddr_t orig_tail = *tail_blk;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun buffer = xlog_alloc_buffer(log, 1);
956*4882a593Smuzhiyun if (!buffer)
957*4882a593Smuzhiyun return -ENOMEM;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun /*
960*4882a593Smuzhiyun * Make sure the tail points to a record (returns positive count on
961*4882a593Smuzhiyun * success).
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
964*4882a593Smuzhiyun &tmp_tail, &thead, &wrapped);
965*4882a593Smuzhiyun if (error < 0)
966*4882a593Smuzhiyun goto out;
967*4882a593Smuzhiyun if (*tail_blk != tmp_tail)
968*4882a593Smuzhiyun *tail_blk = tmp_tail;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * Run a CRC check from the tail to the head. We can't just check
972*4882a593Smuzhiyun * MAX_ICLOGS records past the tail because the tail may point to stale
973*4882a593Smuzhiyun * blocks cleared during the search for the head/tail. These blocks are
974*4882a593Smuzhiyun * overwritten with zero-length records and thus record count is not a
975*4882a593Smuzhiyun * reliable indicator of the iclog state before a crash.
976*4882a593Smuzhiyun */
977*4882a593Smuzhiyun first_bad = 0;
978*4882a593Smuzhiyun error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
979*4882a593Smuzhiyun XLOG_RECOVER_CRCPASS, &first_bad);
980*4882a593Smuzhiyun while ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
981*4882a593Smuzhiyun int tail_distance;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun /*
984*4882a593Smuzhiyun * Is corruption within range of the head? If so, retry from
985*4882a593Smuzhiyun * the next record. Otherwise return an error.
986*4882a593Smuzhiyun */
987*4882a593Smuzhiyun tail_distance = xlog_tail_distance(log, head_blk, first_bad);
988*4882a593Smuzhiyun if (tail_distance > BTOBB(XLOG_MAX_ICLOGS * hsize))
989*4882a593Smuzhiyun break;
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* skip to the next record; returns positive count on success */
992*4882a593Smuzhiyun error = xlog_seek_logrec_hdr(log, head_blk, first_bad, 2,
993*4882a593Smuzhiyun buffer, &tmp_tail, &thead, &wrapped);
994*4882a593Smuzhiyun if (error < 0)
995*4882a593Smuzhiyun goto out;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun *tail_blk = tmp_tail;
998*4882a593Smuzhiyun first_bad = 0;
999*4882a593Smuzhiyun error = xlog_do_recovery_pass(log, head_blk, *tail_blk,
1000*4882a593Smuzhiyun XLOG_RECOVER_CRCPASS, &first_bad);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun if (!error && *tail_blk != orig_tail)
1004*4882a593Smuzhiyun xfs_warn(log->l_mp,
1005*4882a593Smuzhiyun "Tail block (0x%llx) overwrite detected. Updated to 0x%llx",
1006*4882a593Smuzhiyun orig_tail, *tail_blk);
1007*4882a593Smuzhiyun out:
1008*4882a593Smuzhiyun kmem_free(buffer);
1009*4882a593Smuzhiyun return error;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /*
1013*4882a593Smuzhiyun * Detect and trim torn writes from the head of the log.
1014*4882a593Smuzhiyun *
1015*4882a593Smuzhiyun * Storage without sector atomicity guarantees can result in torn writes in the
1016*4882a593Smuzhiyun * log in the event of a crash. Our only means to detect this scenario is via
1017*4882a593Smuzhiyun * CRC verification. While we can't always be certain that CRC verification
1018*4882a593Smuzhiyun * failure is due to a torn write vs. an unrelated corruption, we do know that
1019*4882a593Smuzhiyun * only a certain number (XLOG_MAX_ICLOGS) of log records can be written out at
1020*4882a593Smuzhiyun * one time. Therefore, CRC verify up to XLOG_MAX_ICLOGS records at the head of
1021*4882a593Smuzhiyun * the log and treat failures in this range as torn writes as a matter of
1022*4882a593Smuzhiyun * policy. In the event of CRC failure, the head is walked back to the last good
1023*4882a593Smuzhiyun * record in the log and the tail is updated from that record and verified.
1024*4882a593Smuzhiyun */
1025*4882a593Smuzhiyun STATIC int
xlog_verify_head(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk,char * buffer,xfs_daddr_t * rhead_blk,struct xlog_rec_header ** rhead,bool * wrapped)1026*4882a593Smuzhiyun xlog_verify_head(
1027*4882a593Smuzhiyun struct xlog *log,
1028*4882a593Smuzhiyun xfs_daddr_t *head_blk, /* in/out: unverified head */
1029*4882a593Smuzhiyun xfs_daddr_t *tail_blk, /* out: tail block */
1030*4882a593Smuzhiyun char *buffer,
1031*4882a593Smuzhiyun xfs_daddr_t *rhead_blk, /* start blk of last record */
1032*4882a593Smuzhiyun struct xlog_rec_header **rhead, /* ptr to last record */
1033*4882a593Smuzhiyun bool *wrapped) /* last rec. wraps phys. log */
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun struct xlog_rec_header *tmp_rhead;
1036*4882a593Smuzhiyun char *tmp_buffer;
1037*4882a593Smuzhiyun xfs_daddr_t first_bad;
1038*4882a593Smuzhiyun xfs_daddr_t tmp_rhead_blk;
1039*4882a593Smuzhiyun int found;
1040*4882a593Smuzhiyun int error;
1041*4882a593Smuzhiyun bool tmp_wrapped;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun * Check the head of the log for torn writes. Search backwards from the
1045*4882a593Smuzhiyun * head until we hit the tail or the maximum number of log record I/Os
1046*4882a593Smuzhiyun * that could have been in flight at one time. Use a temporary buffer so
1047*4882a593Smuzhiyun * we don't trash the rhead/buffer pointers from the caller.
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun tmp_buffer = xlog_alloc_buffer(log, 1);
1050*4882a593Smuzhiyun if (!tmp_buffer)
1051*4882a593Smuzhiyun return -ENOMEM;
1052*4882a593Smuzhiyun error = xlog_rseek_logrec_hdr(log, *head_blk, *tail_blk,
1053*4882a593Smuzhiyun XLOG_MAX_ICLOGS, tmp_buffer,
1054*4882a593Smuzhiyun &tmp_rhead_blk, &tmp_rhead, &tmp_wrapped);
1055*4882a593Smuzhiyun kmem_free(tmp_buffer);
1056*4882a593Smuzhiyun if (error < 0)
1057*4882a593Smuzhiyun return error;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /*
1060*4882a593Smuzhiyun * Now run a CRC verification pass over the records starting at the
1061*4882a593Smuzhiyun * block found above to the current head. If a CRC failure occurs, the
1062*4882a593Smuzhiyun * log block of the first bad record is saved in first_bad.
1063*4882a593Smuzhiyun */
1064*4882a593Smuzhiyun error = xlog_do_recovery_pass(log, *head_blk, tmp_rhead_blk,
1065*4882a593Smuzhiyun XLOG_RECOVER_CRCPASS, &first_bad);
1066*4882a593Smuzhiyun if ((error == -EFSBADCRC || error == -EFSCORRUPTED) && first_bad) {
1067*4882a593Smuzhiyun /*
1068*4882a593Smuzhiyun * We've hit a potential torn write. Reset the error and warn
1069*4882a593Smuzhiyun * about it.
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun error = 0;
1072*4882a593Smuzhiyun xfs_warn(log->l_mp,
1073*4882a593Smuzhiyun "Torn write (CRC failure) detected at log block 0x%llx. Truncating head block from 0x%llx.",
1074*4882a593Smuzhiyun first_bad, *head_blk);
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /*
1077*4882a593Smuzhiyun * Get the header block and buffer pointer for the last good
1078*4882a593Smuzhiyun * record before the bad record.
1079*4882a593Smuzhiyun *
1080*4882a593Smuzhiyun * Note that xlog_find_tail() clears the blocks at the new head
1081*4882a593Smuzhiyun * (i.e., the records with invalid CRC) if the cycle number
1082*4882a593Smuzhiyun * matches the current cycle.
1083*4882a593Smuzhiyun */
1084*4882a593Smuzhiyun found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1,
1085*4882a593Smuzhiyun buffer, rhead_blk, rhead, wrapped);
1086*4882a593Smuzhiyun if (found < 0)
1087*4882a593Smuzhiyun return found;
1088*4882a593Smuzhiyun if (found == 0) /* XXX: right thing to do here? */
1089*4882a593Smuzhiyun return -EIO;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /*
1092*4882a593Smuzhiyun * Reset the head block to the starting block of the first bad
1093*4882a593Smuzhiyun * log record and set the tail block based on the last good
1094*4882a593Smuzhiyun * record.
1095*4882a593Smuzhiyun *
1096*4882a593Smuzhiyun * Bail out if the updated head/tail match as this indicates
1097*4882a593Smuzhiyun * possible corruption outside of the acceptable
1098*4882a593Smuzhiyun * (XLOG_MAX_ICLOGS) range. This is a job for xfs_repair...
1099*4882a593Smuzhiyun */
1100*4882a593Smuzhiyun *head_blk = first_bad;
1101*4882a593Smuzhiyun *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1102*4882a593Smuzhiyun if (*head_blk == *tail_blk) {
1103*4882a593Smuzhiyun ASSERT(0);
1104*4882a593Smuzhiyun return 0;
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun if (error)
1108*4882a593Smuzhiyun return error;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun return xlog_verify_tail(log, *head_blk, tail_blk,
1111*4882a593Smuzhiyun be32_to_cpu((*rhead)->h_size));
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun /*
1115*4882a593Smuzhiyun * We need to make sure we handle log wrapping properly, so we can't use the
1116*4882a593Smuzhiyun * calculated logbno directly. Make sure it wraps to the correct bno inside the
1117*4882a593Smuzhiyun * log.
1118*4882a593Smuzhiyun *
1119*4882a593Smuzhiyun * The log is limited to 32 bit sizes, so we use the appropriate modulus
1120*4882a593Smuzhiyun * operation here and cast it back to a 64 bit daddr on return.
1121*4882a593Smuzhiyun */
1122*4882a593Smuzhiyun static inline xfs_daddr_t
xlog_wrap_logbno(struct xlog * log,xfs_daddr_t bno)1123*4882a593Smuzhiyun xlog_wrap_logbno(
1124*4882a593Smuzhiyun struct xlog *log,
1125*4882a593Smuzhiyun xfs_daddr_t bno)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun int mod;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun div_s64_rem(bno, log->l_logBBsize, &mod);
1130*4882a593Smuzhiyun return mod;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /*
1134*4882a593Smuzhiyun * Check whether the head of the log points to an unmount record. In other
1135*4882a593Smuzhiyun * words, determine whether the log is clean. If so, update the in-core state
1136*4882a593Smuzhiyun * appropriately.
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun static int
xlog_check_unmount_rec(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk,struct xlog_rec_header * rhead,xfs_daddr_t rhead_blk,char * buffer,bool * clean)1139*4882a593Smuzhiyun xlog_check_unmount_rec(
1140*4882a593Smuzhiyun struct xlog *log,
1141*4882a593Smuzhiyun xfs_daddr_t *head_blk,
1142*4882a593Smuzhiyun xfs_daddr_t *tail_blk,
1143*4882a593Smuzhiyun struct xlog_rec_header *rhead,
1144*4882a593Smuzhiyun xfs_daddr_t rhead_blk,
1145*4882a593Smuzhiyun char *buffer,
1146*4882a593Smuzhiyun bool *clean)
1147*4882a593Smuzhiyun {
1148*4882a593Smuzhiyun struct xlog_op_header *op_head;
1149*4882a593Smuzhiyun xfs_daddr_t umount_data_blk;
1150*4882a593Smuzhiyun xfs_daddr_t after_umount_blk;
1151*4882a593Smuzhiyun int hblks;
1152*4882a593Smuzhiyun int error;
1153*4882a593Smuzhiyun char *offset;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun *clean = false;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /*
1158*4882a593Smuzhiyun * Look for unmount record. If we find it, then we know there was a
1159*4882a593Smuzhiyun * clean unmount. Since 'i' could be the last block in the physical
1160*4882a593Smuzhiyun * log, we convert to a log block before comparing to the head_blk.
1161*4882a593Smuzhiyun *
1162*4882a593Smuzhiyun * Save the current tail lsn to use to pass to xlog_clear_stale_blocks()
1163*4882a593Smuzhiyun * below. We won't want to clear the unmount record if there is one, so
1164*4882a593Smuzhiyun * we pass the lsn of the unmount record rather than the block after it.
1165*4882a593Smuzhiyun */
1166*4882a593Smuzhiyun hblks = xlog_logrec_hblks(log, rhead);
1167*4882a593Smuzhiyun after_umount_blk = xlog_wrap_logbno(log,
1168*4882a593Smuzhiyun rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (*head_blk == after_umount_blk &&
1171*4882a593Smuzhiyun be32_to_cpu(rhead->h_num_logops) == 1) {
1172*4882a593Smuzhiyun umount_data_blk = xlog_wrap_logbno(log, rhead_blk + hblks);
1173*4882a593Smuzhiyun error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1174*4882a593Smuzhiyun if (error)
1175*4882a593Smuzhiyun return error;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun op_head = (struct xlog_op_header *)offset;
1178*4882a593Smuzhiyun if (op_head->oh_flags & XLOG_UNMOUNT_TRANS) {
1179*4882a593Smuzhiyun /*
1180*4882a593Smuzhiyun * Set tail and last sync so that newly written log
1181*4882a593Smuzhiyun * records will point recovery to after the current
1182*4882a593Smuzhiyun * unmount record.
1183*4882a593Smuzhiyun */
1184*4882a593Smuzhiyun xlog_assign_atomic_lsn(&log->l_tail_lsn,
1185*4882a593Smuzhiyun log->l_curr_cycle, after_umount_blk);
1186*4882a593Smuzhiyun xlog_assign_atomic_lsn(&log->l_last_sync_lsn,
1187*4882a593Smuzhiyun log->l_curr_cycle, after_umount_blk);
1188*4882a593Smuzhiyun *tail_blk = after_umount_blk;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun *clean = true;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun return 0;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun static void
xlog_set_state(struct xlog * log,xfs_daddr_t head_blk,struct xlog_rec_header * rhead,xfs_daddr_t rhead_blk,bool bump_cycle)1198*4882a593Smuzhiyun xlog_set_state(
1199*4882a593Smuzhiyun struct xlog *log,
1200*4882a593Smuzhiyun xfs_daddr_t head_blk,
1201*4882a593Smuzhiyun struct xlog_rec_header *rhead,
1202*4882a593Smuzhiyun xfs_daddr_t rhead_blk,
1203*4882a593Smuzhiyun bool bump_cycle)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun /*
1206*4882a593Smuzhiyun * Reset log values according to the state of the log when we
1207*4882a593Smuzhiyun * crashed. In the case where head_blk == 0, we bump curr_cycle
1208*4882a593Smuzhiyun * one because the next write starts a new cycle rather than
1209*4882a593Smuzhiyun * continuing the cycle of the last good log record. At this
1210*4882a593Smuzhiyun * point we have guaranteed that all partial log records have been
1211*4882a593Smuzhiyun * accounted for. Therefore, we know that the last good log record
1212*4882a593Smuzhiyun * written was complete and ended exactly on the end boundary
1213*4882a593Smuzhiyun * of the physical log.
1214*4882a593Smuzhiyun */
1215*4882a593Smuzhiyun log->l_prev_block = rhead_blk;
1216*4882a593Smuzhiyun log->l_curr_block = (int)head_blk;
1217*4882a593Smuzhiyun log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1218*4882a593Smuzhiyun if (bump_cycle)
1219*4882a593Smuzhiyun log->l_curr_cycle++;
1220*4882a593Smuzhiyun atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1221*4882a593Smuzhiyun atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1222*4882a593Smuzhiyun xlog_assign_grant_head(&log->l_reserve_head.grant, log->l_curr_cycle,
1223*4882a593Smuzhiyun BBTOB(log->l_curr_block));
1224*4882a593Smuzhiyun xlog_assign_grant_head(&log->l_write_head.grant, log->l_curr_cycle,
1225*4882a593Smuzhiyun BBTOB(log->l_curr_block));
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun /*
1229*4882a593Smuzhiyun * Find the sync block number or the tail of the log.
1230*4882a593Smuzhiyun *
1231*4882a593Smuzhiyun * This will be the block number of the last record to have its
1232*4882a593Smuzhiyun * associated buffers synced to disk. Every log record header has
1233*4882a593Smuzhiyun * a sync lsn embedded in it. LSNs hold block numbers, so it is easy
1234*4882a593Smuzhiyun * to get a sync block number. The only concern is to figure out which
1235*4882a593Smuzhiyun * log record header to believe.
1236*4882a593Smuzhiyun *
1237*4882a593Smuzhiyun * The following algorithm uses the log record header with the largest
1238*4882a593Smuzhiyun * lsn. The entire log record does not need to be valid. We only care
1239*4882a593Smuzhiyun * that the header is valid.
1240*4882a593Smuzhiyun *
1241*4882a593Smuzhiyun * We could speed up search by using current head_blk buffer, but it is not
1242*4882a593Smuzhiyun * available.
1243*4882a593Smuzhiyun */
1244*4882a593Smuzhiyun STATIC int
xlog_find_tail(struct xlog * log,xfs_daddr_t * head_blk,xfs_daddr_t * tail_blk)1245*4882a593Smuzhiyun xlog_find_tail(
1246*4882a593Smuzhiyun struct xlog *log,
1247*4882a593Smuzhiyun xfs_daddr_t *head_blk,
1248*4882a593Smuzhiyun xfs_daddr_t *tail_blk)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun xlog_rec_header_t *rhead;
1251*4882a593Smuzhiyun char *offset = NULL;
1252*4882a593Smuzhiyun char *buffer;
1253*4882a593Smuzhiyun int error;
1254*4882a593Smuzhiyun xfs_daddr_t rhead_blk;
1255*4882a593Smuzhiyun xfs_lsn_t tail_lsn;
1256*4882a593Smuzhiyun bool wrapped = false;
1257*4882a593Smuzhiyun bool clean = false;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun /*
1260*4882a593Smuzhiyun * Find previous log record
1261*4882a593Smuzhiyun */
1262*4882a593Smuzhiyun if ((error = xlog_find_head(log, head_blk)))
1263*4882a593Smuzhiyun return error;
1264*4882a593Smuzhiyun ASSERT(*head_blk < INT_MAX);
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun buffer = xlog_alloc_buffer(log, 1);
1267*4882a593Smuzhiyun if (!buffer)
1268*4882a593Smuzhiyun return -ENOMEM;
1269*4882a593Smuzhiyun if (*head_blk == 0) { /* special case */
1270*4882a593Smuzhiyun error = xlog_bread(log, 0, 1, buffer, &offset);
1271*4882a593Smuzhiyun if (error)
1272*4882a593Smuzhiyun goto done;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun if (xlog_get_cycle(offset) == 0) {
1275*4882a593Smuzhiyun *tail_blk = 0;
1276*4882a593Smuzhiyun /* leave all other log inited values alone */
1277*4882a593Smuzhiyun goto done;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /*
1282*4882a593Smuzhiyun * Search backwards through the log looking for the log record header
1283*4882a593Smuzhiyun * block. This wraps all the way back around to the head so something is
1284*4882a593Smuzhiyun * seriously wrong if we can't find it.
1285*4882a593Smuzhiyun */
1286*4882a593Smuzhiyun error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1287*4882a593Smuzhiyun &rhead_blk, &rhead, &wrapped);
1288*4882a593Smuzhiyun if (error < 0)
1289*4882a593Smuzhiyun goto done;
1290*4882a593Smuzhiyun if (!error) {
1291*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: couldn't find sync record", __func__);
1292*4882a593Smuzhiyun error = -EFSCORRUPTED;
1293*4882a593Smuzhiyun goto done;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun /*
1298*4882a593Smuzhiyun * Set the log state based on the current head record.
1299*4882a593Smuzhiyun */
1300*4882a593Smuzhiyun xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1301*4882a593Smuzhiyun tail_lsn = atomic64_read(&log->l_tail_lsn);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun /*
1304*4882a593Smuzhiyun * Look for an unmount record at the head of the log. This sets the log
1305*4882a593Smuzhiyun * state to determine whether recovery is necessary.
1306*4882a593Smuzhiyun */
1307*4882a593Smuzhiyun error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1308*4882a593Smuzhiyun rhead_blk, buffer, &clean);
1309*4882a593Smuzhiyun if (error)
1310*4882a593Smuzhiyun goto done;
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /*
1313*4882a593Smuzhiyun * Verify the log head if the log is not clean (e.g., we have anything
1314*4882a593Smuzhiyun * but an unmount record at the head). This uses CRC verification to
1315*4882a593Smuzhiyun * detect and trim torn writes. If discovered, CRC failures are
1316*4882a593Smuzhiyun * considered torn writes and the log head is trimmed accordingly.
1317*4882a593Smuzhiyun *
1318*4882a593Smuzhiyun * Note that we can only run CRC verification when the log is dirty
1319*4882a593Smuzhiyun * because there's no guarantee that the log data behind an unmount
1320*4882a593Smuzhiyun * record is compatible with the current architecture.
1321*4882a593Smuzhiyun */
1322*4882a593Smuzhiyun if (!clean) {
1323*4882a593Smuzhiyun xfs_daddr_t orig_head = *head_blk;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1326*4882a593Smuzhiyun &rhead_blk, &rhead, &wrapped);
1327*4882a593Smuzhiyun if (error)
1328*4882a593Smuzhiyun goto done;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /* update in-core state again if the head changed */
1331*4882a593Smuzhiyun if (*head_blk != orig_head) {
1332*4882a593Smuzhiyun xlog_set_state(log, *head_blk, rhead, rhead_blk,
1333*4882a593Smuzhiyun wrapped);
1334*4882a593Smuzhiyun tail_lsn = atomic64_read(&log->l_tail_lsn);
1335*4882a593Smuzhiyun error = xlog_check_unmount_rec(log, head_blk, tail_blk,
1336*4882a593Smuzhiyun rhead, rhead_blk, buffer,
1337*4882a593Smuzhiyun &clean);
1338*4882a593Smuzhiyun if (error)
1339*4882a593Smuzhiyun goto done;
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun /*
1344*4882a593Smuzhiyun * Note that the unmount was clean. If the unmount was not clean, we
1345*4882a593Smuzhiyun * need to know this to rebuild the superblock counters from the perag
1346*4882a593Smuzhiyun * headers if we have a filesystem using non-persistent counters.
1347*4882a593Smuzhiyun */
1348*4882a593Smuzhiyun if (clean)
1349*4882a593Smuzhiyun log->l_mp->m_flags |= XFS_MOUNT_WAS_CLEAN;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /*
1352*4882a593Smuzhiyun * Make sure that there are no blocks in front of the head
1353*4882a593Smuzhiyun * with the same cycle number as the head. This can happen
1354*4882a593Smuzhiyun * because we allow multiple outstanding log writes concurrently,
1355*4882a593Smuzhiyun * and the later writes might make it out before earlier ones.
1356*4882a593Smuzhiyun *
1357*4882a593Smuzhiyun * We use the lsn from before modifying it so that we'll never
1358*4882a593Smuzhiyun * overwrite the unmount record after a clean unmount.
1359*4882a593Smuzhiyun *
1360*4882a593Smuzhiyun * Do this only if we are going to recover the filesystem
1361*4882a593Smuzhiyun *
1362*4882a593Smuzhiyun * NOTE: This used to say "if (!readonly)"
1363*4882a593Smuzhiyun * However on Linux, we can & do recover a read-only filesystem.
1364*4882a593Smuzhiyun * We only skip recovery if NORECOVERY is specified on mount,
1365*4882a593Smuzhiyun * in which case we would not be here.
1366*4882a593Smuzhiyun *
1367*4882a593Smuzhiyun * But... if the -device- itself is readonly, just skip this.
1368*4882a593Smuzhiyun * We can't recover this device anyway, so it won't matter.
1369*4882a593Smuzhiyun */
1370*4882a593Smuzhiyun if (!xfs_readonly_buftarg(log->l_targ))
1371*4882a593Smuzhiyun error = xlog_clear_stale_blocks(log, tail_lsn);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun done:
1374*4882a593Smuzhiyun kmem_free(buffer);
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun if (error)
1377*4882a593Smuzhiyun xfs_warn(log->l_mp, "failed to locate log tail");
1378*4882a593Smuzhiyun return error;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /*
1382*4882a593Smuzhiyun * Is the log zeroed at all?
1383*4882a593Smuzhiyun *
1384*4882a593Smuzhiyun * The last binary search should be changed to perform an X block read
1385*4882a593Smuzhiyun * once X becomes small enough. You can then search linearly through
1386*4882a593Smuzhiyun * the X blocks. This will cut down on the number of reads we need to do.
1387*4882a593Smuzhiyun *
1388*4882a593Smuzhiyun * If the log is partially zeroed, this routine will pass back the blkno
1389*4882a593Smuzhiyun * of the first block with cycle number 0. It won't have a complete LR
1390*4882a593Smuzhiyun * preceding it.
1391*4882a593Smuzhiyun *
1392*4882a593Smuzhiyun * Return:
1393*4882a593Smuzhiyun * 0 => the log is completely written to
1394*4882a593Smuzhiyun * 1 => use *blk_no as the first block of the log
1395*4882a593Smuzhiyun * <0 => error has occurred
1396*4882a593Smuzhiyun */
1397*4882a593Smuzhiyun STATIC int
xlog_find_zeroed(struct xlog * log,xfs_daddr_t * blk_no)1398*4882a593Smuzhiyun xlog_find_zeroed(
1399*4882a593Smuzhiyun struct xlog *log,
1400*4882a593Smuzhiyun xfs_daddr_t *blk_no)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun char *buffer;
1403*4882a593Smuzhiyun char *offset;
1404*4882a593Smuzhiyun uint first_cycle, last_cycle;
1405*4882a593Smuzhiyun xfs_daddr_t new_blk, last_blk, start_blk;
1406*4882a593Smuzhiyun xfs_daddr_t num_scan_bblks;
1407*4882a593Smuzhiyun int error, log_bbnum = log->l_logBBsize;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun *blk_no = 0;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun /* check totally zeroed log */
1412*4882a593Smuzhiyun buffer = xlog_alloc_buffer(log, 1);
1413*4882a593Smuzhiyun if (!buffer)
1414*4882a593Smuzhiyun return -ENOMEM;
1415*4882a593Smuzhiyun error = xlog_bread(log, 0, 1, buffer, &offset);
1416*4882a593Smuzhiyun if (error)
1417*4882a593Smuzhiyun goto out_free_buffer;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun first_cycle = xlog_get_cycle(offset);
1420*4882a593Smuzhiyun if (first_cycle == 0) { /* completely zeroed log */
1421*4882a593Smuzhiyun *blk_no = 0;
1422*4882a593Smuzhiyun kmem_free(buffer);
1423*4882a593Smuzhiyun return 1;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun /* check partially zeroed log */
1427*4882a593Smuzhiyun error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1428*4882a593Smuzhiyun if (error)
1429*4882a593Smuzhiyun goto out_free_buffer;
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun last_cycle = xlog_get_cycle(offset);
1432*4882a593Smuzhiyun if (last_cycle != 0) { /* log completely written to */
1433*4882a593Smuzhiyun kmem_free(buffer);
1434*4882a593Smuzhiyun return 0;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /* we have a partially zeroed log */
1438*4882a593Smuzhiyun last_blk = log_bbnum-1;
1439*4882a593Smuzhiyun error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1440*4882a593Smuzhiyun if (error)
1441*4882a593Smuzhiyun goto out_free_buffer;
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun /*
1444*4882a593Smuzhiyun * Validate the answer. Because there is no way to guarantee that
1445*4882a593Smuzhiyun * the entire log is made up of log records which are the same size,
1446*4882a593Smuzhiyun * we scan over the defined maximum blocks. At this point, the maximum
1447*4882a593Smuzhiyun * is not chosen to mean anything special. XXXmiken
1448*4882a593Smuzhiyun */
1449*4882a593Smuzhiyun num_scan_bblks = XLOG_TOTAL_REC_SHIFT(log);
1450*4882a593Smuzhiyun ASSERT(num_scan_bblks <= INT_MAX);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun if (last_blk < num_scan_bblks)
1453*4882a593Smuzhiyun num_scan_bblks = last_blk;
1454*4882a593Smuzhiyun start_blk = last_blk - num_scan_bblks;
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun /*
1457*4882a593Smuzhiyun * We search for any instances of cycle number 0 that occur before
1458*4882a593Smuzhiyun * our current estimate of the head. What we're trying to detect is
1459*4882a593Smuzhiyun * 1 ... | 0 | 1 | 0...
1460*4882a593Smuzhiyun * ^ binary search ends here
1461*4882a593Smuzhiyun */
1462*4882a593Smuzhiyun if ((error = xlog_find_verify_cycle(log, start_blk,
1463*4882a593Smuzhiyun (int)num_scan_bblks, 0, &new_blk)))
1464*4882a593Smuzhiyun goto out_free_buffer;
1465*4882a593Smuzhiyun if (new_blk != -1)
1466*4882a593Smuzhiyun last_blk = new_blk;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun /*
1469*4882a593Smuzhiyun * Potentially backup over partial log record write. We don't need
1470*4882a593Smuzhiyun * to search the end of the log because we know it is zero.
1471*4882a593Smuzhiyun */
1472*4882a593Smuzhiyun error = xlog_find_verify_log_record(log, start_blk, &last_blk, 0);
1473*4882a593Smuzhiyun if (error == 1)
1474*4882a593Smuzhiyun error = -EIO;
1475*4882a593Smuzhiyun if (error)
1476*4882a593Smuzhiyun goto out_free_buffer;
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun *blk_no = last_blk;
1479*4882a593Smuzhiyun out_free_buffer:
1480*4882a593Smuzhiyun kmem_free(buffer);
1481*4882a593Smuzhiyun if (error)
1482*4882a593Smuzhiyun return error;
1483*4882a593Smuzhiyun return 1;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun /*
1487*4882a593Smuzhiyun * These are simple subroutines used by xlog_clear_stale_blocks() below
1488*4882a593Smuzhiyun * to initialize a buffer full of empty log record headers and write
1489*4882a593Smuzhiyun * them into the log.
1490*4882a593Smuzhiyun */
1491*4882a593Smuzhiyun STATIC void
xlog_add_record(struct xlog * log,char * buf,int cycle,int block,int tail_cycle,int tail_block)1492*4882a593Smuzhiyun xlog_add_record(
1493*4882a593Smuzhiyun struct xlog *log,
1494*4882a593Smuzhiyun char *buf,
1495*4882a593Smuzhiyun int cycle,
1496*4882a593Smuzhiyun int block,
1497*4882a593Smuzhiyun int tail_cycle,
1498*4882a593Smuzhiyun int tail_block)
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun xlog_rec_header_t *recp = (xlog_rec_header_t *)buf;
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun memset(buf, 0, BBSIZE);
1503*4882a593Smuzhiyun recp->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
1504*4882a593Smuzhiyun recp->h_cycle = cpu_to_be32(cycle);
1505*4882a593Smuzhiyun recp->h_version = cpu_to_be32(
1506*4882a593Smuzhiyun xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
1507*4882a593Smuzhiyun recp->h_lsn = cpu_to_be64(xlog_assign_lsn(cycle, block));
1508*4882a593Smuzhiyun recp->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(tail_cycle, tail_block));
1509*4882a593Smuzhiyun recp->h_fmt = cpu_to_be32(XLOG_FMT);
1510*4882a593Smuzhiyun memcpy(&recp->h_fs_uuid, &log->l_mp->m_sb.sb_uuid, sizeof(uuid_t));
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun STATIC int
xlog_write_log_records(struct xlog * log,int cycle,int start_block,int blocks,int tail_cycle,int tail_block)1514*4882a593Smuzhiyun xlog_write_log_records(
1515*4882a593Smuzhiyun struct xlog *log,
1516*4882a593Smuzhiyun int cycle,
1517*4882a593Smuzhiyun int start_block,
1518*4882a593Smuzhiyun int blocks,
1519*4882a593Smuzhiyun int tail_cycle,
1520*4882a593Smuzhiyun int tail_block)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun char *offset;
1523*4882a593Smuzhiyun char *buffer;
1524*4882a593Smuzhiyun int balign, ealign;
1525*4882a593Smuzhiyun int sectbb = log->l_sectBBsize;
1526*4882a593Smuzhiyun int end_block = start_block + blocks;
1527*4882a593Smuzhiyun int bufblks;
1528*4882a593Smuzhiyun int error = 0;
1529*4882a593Smuzhiyun int i, j = 0;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun /*
1532*4882a593Smuzhiyun * Greedily allocate a buffer big enough to handle the full
1533*4882a593Smuzhiyun * range of basic blocks to be written. If that fails, try
1534*4882a593Smuzhiyun * a smaller size. We need to be able to write at least a
1535*4882a593Smuzhiyun * log sector, or we're out of luck.
1536*4882a593Smuzhiyun */
1537*4882a593Smuzhiyun bufblks = 1 << ffs(blocks);
1538*4882a593Smuzhiyun while (bufblks > log->l_logBBsize)
1539*4882a593Smuzhiyun bufblks >>= 1;
1540*4882a593Smuzhiyun while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1541*4882a593Smuzhiyun bufblks >>= 1;
1542*4882a593Smuzhiyun if (bufblks < sectbb)
1543*4882a593Smuzhiyun return -ENOMEM;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun /* We may need to do a read at the start to fill in part of
1547*4882a593Smuzhiyun * the buffer in the starting sector not covered by the first
1548*4882a593Smuzhiyun * write below.
1549*4882a593Smuzhiyun */
1550*4882a593Smuzhiyun balign = round_down(start_block, sectbb);
1551*4882a593Smuzhiyun if (balign != start_block) {
1552*4882a593Smuzhiyun error = xlog_bread_noalign(log, start_block, 1, buffer);
1553*4882a593Smuzhiyun if (error)
1554*4882a593Smuzhiyun goto out_free_buffer;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun j = start_block - balign;
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun for (i = start_block; i < end_block; i += bufblks) {
1560*4882a593Smuzhiyun int bcount, endcount;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun bcount = min(bufblks, end_block - start_block);
1563*4882a593Smuzhiyun endcount = bcount - j;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun /* We may need to do a read at the end to fill in part of
1566*4882a593Smuzhiyun * the buffer in the final sector not covered by the write.
1567*4882a593Smuzhiyun * If this is the same sector as the above read, skip it.
1568*4882a593Smuzhiyun */
1569*4882a593Smuzhiyun ealign = round_down(end_block, sectbb);
1570*4882a593Smuzhiyun if (j == 0 && (start_block + endcount > ealign)) {
1571*4882a593Smuzhiyun error = xlog_bread_noalign(log, ealign, sectbb,
1572*4882a593Smuzhiyun buffer + BBTOB(ealign - start_block));
1573*4882a593Smuzhiyun if (error)
1574*4882a593Smuzhiyun break;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun offset = buffer + xlog_align(log, start_block);
1579*4882a593Smuzhiyun for (; j < endcount; j++) {
1580*4882a593Smuzhiyun xlog_add_record(log, offset, cycle, i+j,
1581*4882a593Smuzhiyun tail_cycle, tail_block);
1582*4882a593Smuzhiyun offset += BBSIZE;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun error = xlog_bwrite(log, start_block, endcount, buffer);
1585*4882a593Smuzhiyun if (error)
1586*4882a593Smuzhiyun break;
1587*4882a593Smuzhiyun start_block += endcount;
1588*4882a593Smuzhiyun j = 0;
1589*4882a593Smuzhiyun }
1590*4882a593Smuzhiyun
1591*4882a593Smuzhiyun out_free_buffer:
1592*4882a593Smuzhiyun kmem_free(buffer);
1593*4882a593Smuzhiyun return error;
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun /*
1597*4882a593Smuzhiyun * This routine is called to blow away any incomplete log writes out
1598*4882a593Smuzhiyun * in front of the log head. We do this so that we won't become confused
1599*4882a593Smuzhiyun * if we come up, write only a little bit more, and then crash again.
1600*4882a593Smuzhiyun * If we leave the partial log records out there, this situation could
1601*4882a593Smuzhiyun * cause us to think those partial writes are valid blocks since they
1602*4882a593Smuzhiyun * have the current cycle number. We get rid of them by overwriting them
1603*4882a593Smuzhiyun * with empty log records with the old cycle number rather than the
1604*4882a593Smuzhiyun * current one.
1605*4882a593Smuzhiyun *
1606*4882a593Smuzhiyun * The tail lsn is passed in rather than taken from
1607*4882a593Smuzhiyun * the log so that we will not write over the unmount record after a
1608*4882a593Smuzhiyun * clean unmount in a 512 block log. Doing so would leave the log without
1609*4882a593Smuzhiyun * any valid log records in it until a new one was written. If we crashed
1610*4882a593Smuzhiyun * during that time we would not be able to recover.
1611*4882a593Smuzhiyun */
1612*4882a593Smuzhiyun STATIC int
xlog_clear_stale_blocks(struct xlog * log,xfs_lsn_t tail_lsn)1613*4882a593Smuzhiyun xlog_clear_stale_blocks(
1614*4882a593Smuzhiyun struct xlog *log,
1615*4882a593Smuzhiyun xfs_lsn_t tail_lsn)
1616*4882a593Smuzhiyun {
1617*4882a593Smuzhiyun int tail_cycle, head_cycle;
1618*4882a593Smuzhiyun int tail_block, head_block;
1619*4882a593Smuzhiyun int tail_distance, max_distance;
1620*4882a593Smuzhiyun int distance;
1621*4882a593Smuzhiyun int error;
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun tail_cycle = CYCLE_LSN(tail_lsn);
1624*4882a593Smuzhiyun tail_block = BLOCK_LSN(tail_lsn);
1625*4882a593Smuzhiyun head_cycle = log->l_curr_cycle;
1626*4882a593Smuzhiyun head_block = log->l_curr_block;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun /*
1629*4882a593Smuzhiyun * Figure out the distance between the new head of the log
1630*4882a593Smuzhiyun * and the tail. We want to write over any blocks beyond the
1631*4882a593Smuzhiyun * head that we may have written just before the crash, but
1632*4882a593Smuzhiyun * we don't want to overwrite the tail of the log.
1633*4882a593Smuzhiyun */
1634*4882a593Smuzhiyun if (head_cycle == tail_cycle) {
1635*4882a593Smuzhiyun /*
1636*4882a593Smuzhiyun * The tail is behind the head in the physical log,
1637*4882a593Smuzhiyun * so the distance from the head to the tail is the
1638*4882a593Smuzhiyun * distance from the head to the end of the log plus
1639*4882a593Smuzhiyun * the distance from the beginning of the log to the
1640*4882a593Smuzhiyun * tail.
1641*4882a593Smuzhiyun */
1642*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp,
1643*4882a593Smuzhiyun head_block < tail_block ||
1644*4882a593Smuzhiyun head_block >= log->l_logBBsize))
1645*4882a593Smuzhiyun return -EFSCORRUPTED;
1646*4882a593Smuzhiyun tail_distance = tail_block + (log->l_logBBsize - head_block);
1647*4882a593Smuzhiyun } else {
1648*4882a593Smuzhiyun /*
1649*4882a593Smuzhiyun * The head is behind the tail in the physical log,
1650*4882a593Smuzhiyun * so the distance from the head to the tail is just
1651*4882a593Smuzhiyun * the tail block minus the head block.
1652*4882a593Smuzhiyun */
1653*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp,
1654*4882a593Smuzhiyun head_block >= tail_block ||
1655*4882a593Smuzhiyun head_cycle != tail_cycle + 1))
1656*4882a593Smuzhiyun return -EFSCORRUPTED;
1657*4882a593Smuzhiyun tail_distance = tail_block - head_block;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun /*
1661*4882a593Smuzhiyun * If the head is right up against the tail, we can't clear
1662*4882a593Smuzhiyun * anything.
1663*4882a593Smuzhiyun */
1664*4882a593Smuzhiyun if (tail_distance <= 0) {
1665*4882a593Smuzhiyun ASSERT(tail_distance == 0);
1666*4882a593Smuzhiyun return 0;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun max_distance = XLOG_TOTAL_REC_SHIFT(log);
1670*4882a593Smuzhiyun /*
1671*4882a593Smuzhiyun * Take the smaller of the maximum amount of outstanding I/O
1672*4882a593Smuzhiyun * we could have and the distance to the tail to clear out.
1673*4882a593Smuzhiyun * We take the smaller so that we don't overwrite the tail and
1674*4882a593Smuzhiyun * we don't waste all day writing from the head to the tail
1675*4882a593Smuzhiyun * for no reason.
1676*4882a593Smuzhiyun */
1677*4882a593Smuzhiyun max_distance = min(max_distance, tail_distance);
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun if ((head_block + max_distance) <= log->l_logBBsize) {
1680*4882a593Smuzhiyun /*
1681*4882a593Smuzhiyun * We can stomp all the blocks we need to without
1682*4882a593Smuzhiyun * wrapping around the end of the log. Just do it
1683*4882a593Smuzhiyun * in a single write. Use the cycle number of the
1684*4882a593Smuzhiyun * current cycle minus one so that the log will look like:
1685*4882a593Smuzhiyun * n ... | n - 1 ...
1686*4882a593Smuzhiyun */
1687*4882a593Smuzhiyun error = xlog_write_log_records(log, (head_cycle - 1),
1688*4882a593Smuzhiyun head_block, max_distance, tail_cycle,
1689*4882a593Smuzhiyun tail_block);
1690*4882a593Smuzhiyun if (error)
1691*4882a593Smuzhiyun return error;
1692*4882a593Smuzhiyun } else {
1693*4882a593Smuzhiyun /*
1694*4882a593Smuzhiyun * We need to wrap around the end of the physical log in
1695*4882a593Smuzhiyun * order to clear all the blocks. Do it in two separate
1696*4882a593Smuzhiyun * I/Os. The first write should be from the head to the
1697*4882a593Smuzhiyun * end of the physical log, and it should use the current
1698*4882a593Smuzhiyun * cycle number minus one just like above.
1699*4882a593Smuzhiyun */
1700*4882a593Smuzhiyun distance = log->l_logBBsize - head_block;
1701*4882a593Smuzhiyun error = xlog_write_log_records(log, (head_cycle - 1),
1702*4882a593Smuzhiyun head_block, distance, tail_cycle,
1703*4882a593Smuzhiyun tail_block);
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun if (error)
1706*4882a593Smuzhiyun return error;
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun /*
1709*4882a593Smuzhiyun * Now write the blocks at the start of the physical log.
1710*4882a593Smuzhiyun * This writes the remainder of the blocks we want to clear.
1711*4882a593Smuzhiyun * It uses the current cycle number since we're now on the
1712*4882a593Smuzhiyun * same cycle as the head so that we get:
1713*4882a593Smuzhiyun * n ... n ... | n - 1 ...
1714*4882a593Smuzhiyun * ^^^^^ blocks we're writing
1715*4882a593Smuzhiyun */
1716*4882a593Smuzhiyun distance = max_distance - (log->l_logBBsize - head_block);
1717*4882a593Smuzhiyun error = xlog_write_log_records(log, head_cycle, 0, distance,
1718*4882a593Smuzhiyun tail_cycle, tail_block);
1719*4882a593Smuzhiyun if (error)
1720*4882a593Smuzhiyun return error;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
1723*4882a593Smuzhiyun return 0;
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun /*
1727*4882a593Smuzhiyun * Release the recovered intent item in the AIL that matches the given intent
1728*4882a593Smuzhiyun * type and intent id.
1729*4882a593Smuzhiyun */
1730*4882a593Smuzhiyun void
xlog_recover_release_intent(struct xlog * log,unsigned short intent_type,uint64_t intent_id)1731*4882a593Smuzhiyun xlog_recover_release_intent(
1732*4882a593Smuzhiyun struct xlog *log,
1733*4882a593Smuzhiyun unsigned short intent_type,
1734*4882a593Smuzhiyun uint64_t intent_id)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun struct xfs_ail_cursor cur;
1737*4882a593Smuzhiyun struct xfs_log_item *lip;
1738*4882a593Smuzhiyun struct xfs_ail *ailp = log->l_ailp;
1739*4882a593Smuzhiyun
1740*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
1741*4882a593Smuzhiyun for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0); lip != NULL;
1742*4882a593Smuzhiyun lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
1743*4882a593Smuzhiyun if (lip->li_type != intent_type)
1744*4882a593Smuzhiyun continue;
1745*4882a593Smuzhiyun if (!lip->li_ops->iop_match(lip, intent_id))
1746*4882a593Smuzhiyun continue;
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
1749*4882a593Smuzhiyun lip->li_ops->iop_release(lip);
1750*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
1751*4882a593Smuzhiyun break;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun xfs_trans_ail_cursor_done(&cur);
1755*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun /******************************************************************************
1759*4882a593Smuzhiyun *
1760*4882a593Smuzhiyun * Log recover routines
1761*4882a593Smuzhiyun *
1762*4882a593Smuzhiyun ******************************************************************************
1763*4882a593Smuzhiyun */
1764*4882a593Smuzhiyun static const struct xlog_recover_item_ops *xlog_recover_item_ops[] = {
1765*4882a593Smuzhiyun &xlog_buf_item_ops,
1766*4882a593Smuzhiyun &xlog_inode_item_ops,
1767*4882a593Smuzhiyun &xlog_dquot_item_ops,
1768*4882a593Smuzhiyun &xlog_quotaoff_item_ops,
1769*4882a593Smuzhiyun &xlog_icreate_item_ops,
1770*4882a593Smuzhiyun &xlog_efi_item_ops,
1771*4882a593Smuzhiyun &xlog_efd_item_ops,
1772*4882a593Smuzhiyun &xlog_rui_item_ops,
1773*4882a593Smuzhiyun &xlog_rud_item_ops,
1774*4882a593Smuzhiyun &xlog_cui_item_ops,
1775*4882a593Smuzhiyun &xlog_cud_item_ops,
1776*4882a593Smuzhiyun &xlog_bui_item_ops,
1777*4882a593Smuzhiyun &xlog_bud_item_ops,
1778*4882a593Smuzhiyun };
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun static const struct xlog_recover_item_ops *
xlog_find_item_ops(struct xlog_recover_item * item)1781*4882a593Smuzhiyun xlog_find_item_ops(
1782*4882a593Smuzhiyun struct xlog_recover_item *item)
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun unsigned int i;
1785*4882a593Smuzhiyun
1786*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(xlog_recover_item_ops); i++)
1787*4882a593Smuzhiyun if (ITEM_TYPE(item) == xlog_recover_item_ops[i]->item_type)
1788*4882a593Smuzhiyun return xlog_recover_item_ops[i];
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun return NULL;
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun
1793*4882a593Smuzhiyun /*
1794*4882a593Smuzhiyun * Sort the log items in the transaction.
1795*4882a593Smuzhiyun *
1796*4882a593Smuzhiyun * The ordering constraints are defined by the inode allocation and unlink
1797*4882a593Smuzhiyun * behaviour. The rules are:
1798*4882a593Smuzhiyun *
1799*4882a593Smuzhiyun * 1. Every item is only logged once in a given transaction. Hence it
1800*4882a593Smuzhiyun * represents the last logged state of the item. Hence ordering is
1801*4882a593Smuzhiyun * dependent on the order in which operations need to be performed so
1802*4882a593Smuzhiyun * required initial conditions are always met.
1803*4882a593Smuzhiyun *
1804*4882a593Smuzhiyun * 2. Cancelled buffers are recorded in pass 1 in a separate table and
1805*4882a593Smuzhiyun * there's nothing to replay from them so we can simply cull them
1806*4882a593Smuzhiyun * from the transaction. However, we can't do that until after we've
1807*4882a593Smuzhiyun * replayed all the other items because they may be dependent on the
1808*4882a593Smuzhiyun * cancelled buffer and replaying the cancelled buffer can remove it
1809*4882a593Smuzhiyun * form the cancelled buffer table. Hence they have tobe done last.
1810*4882a593Smuzhiyun *
1811*4882a593Smuzhiyun * 3. Inode allocation buffers must be replayed before inode items that
1812*4882a593Smuzhiyun * read the buffer and replay changes into it. For filesystems using the
1813*4882a593Smuzhiyun * ICREATE transactions, this means XFS_LI_ICREATE objects need to get
1814*4882a593Smuzhiyun * treated the same as inode allocation buffers as they create and
1815*4882a593Smuzhiyun * initialise the buffers directly.
1816*4882a593Smuzhiyun *
1817*4882a593Smuzhiyun * 4. Inode unlink buffers must be replayed after inode items are replayed.
1818*4882a593Smuzhiyun * This ensures that inodes are completely flushed to the inode buffer
1819*4882a593Smuzhiyun * in a "free" state before we remove the unlinked inode list pointer.
1820*4882a593Smuzhiyun *
1821*4882a593Smuzhiyun * Hence the ordering needs to be inode allocation buffers first, inode items
1822*4882a593Smuzhiyun * second, inode unlink buffers third and cancelled buffers last.
1823*4882a593Smuzhiyun *
1824*4882a593Smuzhiyun * But there's a problem with that - we can't tell an inode allocation buffer
1825*4882a593Smuzhiyun * apart from a regular buffer, so we can't separate them. We can, however,
1826*4882a593Smuzhiyun * tell an inode unlink buffer from the others, and so we can separate them out
1827*4882a593Smuzhiyun * from all the other buffers and move them to last.
1828*4882a593Smuzhiyun *
1829*4882a593Smuzhiyun * Hence, 4 lists, in order from head to tail:
1830*4882a593Smuzhiyun * - buffer_list for all buffers except cancelled/inode unlink buffers
1831*4882a593Smuzhiyun * - item_list for all non-buffer items
1832*4882a593Smuzhiyun * - inode_buffer_list for inode unlink buffers
1833*4882a593Smuzhiyun * - cancel_list for the cancelled buffers
1834*4882a593Smuzhiyun *
1835*4882a593Smuzhiyun * Note that we add objects to the tail of the lists so that first-to-last
1836*4882a593Smuzhiyun * ordering is preserved within the lists. Adding objects to the head of the
1837*4882a593Smuzhiyun * list means when we traverse from the head we walk them in last-to-first
1838*4882a593Smuzhiyun * order. For cancelled buffers and inode unlink buffers this doesn't matter,
1839*4882a593Smuzhiyun * but for all other items there may be specific ordering that we need to
1840*4882a593Smuzhiyun * preserve.
1841*4882a593Smuzhiyun */
1842*4882a593Smuzhiyun STATIC int
xlog_recover_reorder_trans(struct xlog * log,struct xlog_recover * trans,int pass)1843*4882a593Smuzhiyun xlog_recover_reorder_trans(
1844*4882a593Smuzhiyun struct xlog *log,
1845*4882a593Smuzhiyun struct xlog_recover *trans,
1846*4882a593Smuzhiyun int pass)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun struct xlog_recover_item *item, *n;
1849*4882a593Smuzhiyun int error = 0;
1850*4882a593Smuzhiyun LIST_HEAD(sort_list);
1851*4882a593Smuzhiyun LIST_HEAD(cancel_list);
1852*4882a593Smuzhiyun LIST_HEAD(buffer_list);
1853*4882a593Smuzhiyun LIST_HEAD(inode_buffer_list);
1854*4882a593Smuzhiyun LIST_HEAD(item_list);
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun list_splice_init(&trans->r_itemq, &sort_list);
1857*4882a593Smuzhiyun list_for_each_entry_safe(item, n, &sort_list, ri_list) {
1858*4882a593Smuzhiyun enum xlog_recover_reorder fate = XLOG_REORDER_ITEM_LIST;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun item->ri_ops = xlog_find_item_ops(item);
1861*4882a593Smuzhiyun if (!item->ri_ops) {
1862*4882a593Smuzhiyun xfs_warn(log->l_mp,
1863*4882a593Smuzhiyun "%s: unrecognized type of log operation (%d)",
1864*4882a593Smuzhiyun __func__, ITEM_TYPE(item));
1865*4882a593Smuzhiyun ASSERT(0);
1866*4882a593Smuzhiyun /*
1867*4882a593Smuzhiyun * return the remaining items back to the transaction
1868*4882a593Smuzhiyun * item list so they can be freed in caller.
1869*4882a593Smuzhiyun */
1870*4882a593Smuzhiyun if (!list_empty(&sort_list))
1871*4882a593Smuzhiyun list_splice_init(&sort_list, &trans->r_itemq);
1872*4882a593Smuzhiyun error = -EFSCORRUPTED;
1873*4882a593Smuzhiyun break;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun if (item->ri_ops->reorder)
1877*4882a593Smuzhiyun fate = item->ri_ops->reorder(item);
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun switch (fate) {
1880*4882a593Smuzhiyun case XLOG_REORDER_BUFFER_LIST:
1881*4882a593Smuzhiyun list_move_tail(&item->ri_list, &buffer_list);
1882*4882a593Smuzhiyun break;
1883*4882a593Smuzhiyun case XLOG_REORDER_CANCEL_LIST:
1884*4882a593Smuzhiyun trace_xfs_log_recover_item_reorder_head(log,
1885*4882a593Smuzhiyun trans, item, pass);
1886*4882a593Smuzhiyun list_move(&item->ri_list, &cancel_list);
1887*4882a593Smuzhiyun break;
1888*4882a593Smuzhiyun case XLOG_REORDER_INODE_BUFFER_LIST:
1889*4882a593Smuzhiyun list_move(&item->ri_list, &inode_buffer_list);
1890*4882a593Smuzhiyun break;
1891*4882a593Smuzhiyun case XLOG_REORDER_ITEM_LIST:
1892*4882a593Smuzhiyun trace_xfs_log_recover_item_reorder_tail(log,
1893*4882a593Smuzhiyun trans, item, pass);
1894*4882a593Smuzhiyun list_move_tail(&item->ri_list, &item_list);
1895*4882a593Smuzhiyun break;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun
1899*4882a593Smuzhiyun ASSERT(list_empty(&sort_list));
1900*4882a593Smuzhiyun if (!list_empty(&buffer_list))
1901*4882a593Smuzhiyun list_splice(&buffer_list, &trans->r_itemq);
1902*4882a593Smuzhiyun if (!list_empty(&item_list))
1903*4882a593Smuzhiyun list_splice_tail(&item_list, &trans->r_itemq);
1904*4882a593Smuzhiyun if (!list_empty(&inode_buffer_list))
1905*4882a593Smuzhiyun list_splice_tail(&inode_buffer_list, &trans->r_itemq);
1906*4882a593Smuzhiyun if (!list_empty(&cancel_list))
1907*4882a593Smuzhiyun list_splice_tail(&cancel_list, &trans->r_itemq);
1908*4882a593Smuzhiyun return error;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun void
xlog_buf_readahead(struct xlog * log,xfs_daddr_t blkno,uint len,const struct xfs_buf_ops * ops)1912*4882a593Smuzhiyun xlog_buf_readahead(
1913*4882a593Smuzhiyun struct xlog *log,
1914*4882a593Smuzhiyun xfs_daddr_t blkno,
1915*4882a593Smuzhiyun uint len,
1916*4882a593Smuzhiyun const struct xfs_buf_ops *ops)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun if (!xlog_is_buffer_cancelled(log, blkno, len))
1919*4882a593Smuzhiyun xfs_buf_readahead(log->l_mp->m_ddev_targp, blkno, len, ops);
1920*4882a593Smuzhiyun }
1921*4882a593Smuzhiyun
1922*4882a593Smuzhiyun STATIC int
xlog_recover_items_pass2(struct xlog * log,struct xlog_recover * trans,struct list_head * buffer_list,struct list_head * item_list)1923*4882a593Smuzhiyun xlog_recover_items_pass2(
1924*4882a593Smuzhiyun struct xlog *log,
1925*4882a593Smuzhiyun struct xlog_recover *trans,
1926*4882a593Smuzhiyun struct list_head *buffer_list,
1927*4882a593Smuzhiyun struct list_head *item_list)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun struct xlog_recover_item *item;
1930*4882a593Smuzhiyun int error = 0;
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun list_for_each_entry(item, item_list, ri_list) {
1933*4882a593Smuzhiyun trace_xfs_log_recover_item_recover(log, trans, item,
1934*4882a593Smuzhiyun XLOG_RECOVER_PASS2);
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun if (item->ri_ops->commit_pass2)
1937*4882a593Smuzhiyun error = item->ri_ops->commit_pass2(log, buffer_list,
1938*4882a593Smuzhiyun item, trans->r_lsn);
1939*4882a593Smuzhiyun if (error)
1940*4882a593Smuzhiyun return error;
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun return error;
1944*4882a593Smuzhiyun }
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun /*
1947*4882a593Smuzhiyun * Perform the transaction.
1948*4882a593Smuzhiyun *
1949*4882a593Smuzhiyun * If the transaction modifies a buffer or inode, do it now. Otherwise,
1950*4882a593Smuzhiyun * EFIs and EFDs get queued up by adding entries into the AIL for them.
1951*4882a593Smuzhiyun */
1952*4882a593Smuzhiyun STATIC int
xlog_recover_commit_trans(struct xlog * log,struct xlog_recover * trans,int pass,struct list_head * buffer_list)1953*4882a593Smuzhiyun xlog_recover_commit_trans(
1954*4882a593Smuzhiyun struct xlog *log,
1955*4882a593Smuzhiyun struct xlog_recover *trans,
1956*4882a593Smuzhiyun int pass,
1957*4882a593Smuzhiyun struct list_head *buffer_list)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun int error = 0;
1960*4882a593Smuzhiyun int items_queued = 0;
1961*4882a593Smuzhiyun struct xlog_recover_item *item;
1962*4882a593Smuzhiyun struct xlog_recover_item *next;
1963*4882a593Smuzhiyun LIST_HEAD (ra_list);
1964*4882a593Smuzhiyun LIST_HEAD (done_list);
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun #define XLOG_RECOVER_COMMIT_QUEUE_MAX 100
1967*4882a593Smuzhiyun
1968*4882a593Smuzhiyun hlist_del_init(&trans->r_list);
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun error = xlog_recover_reorder_trans(log, trans, pass);
1971*4882a593Smuzhiyun if (error)
1972*4882a593Smuzhiyun return error;
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun list_for_each_entry_safe(item, next, &trans->r_itemq, ri_list) {
1975*4882a593Smuzhiyun trace_xfs_log_recover_item_recover(log, trans, item, pass);
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun switch (pass) {
1978*4882a593Smuzhiyun case XLOG_RECOVER_PASS1:
1979*4882a593Smuzhiyun if (item->ri_ops->commit_pass1)
1980*4882a593Smuzhiyun error = item->ri_ops->commit_pass1(log, item);
1981*4882a593Smuzhiyun break;
1982*4882a593Smuzhiyun case XLOG_RECOVER_PASS2:
1983*4882a593Smuzhiyun if (item->ri_ops->ra_pass2)
1984*4882a593Smuzhiyun item->ri_ops->ra_pass2(log, item);
1985*4882a593Smuzhiyun list_move_tail(&item->ri_list, &ra_list);
1986*4882a593Smuzhiyun items_queued++;
1987*4882a593Smuzhiyun if (items_queued >= XLOG_RECOVER_COMMIT_QUEUE_MAX) {
1988*4882a593Smuzhiyun error = xlog_recover_items_pass2(log, trans,
1989*4882a593Smuzhiyun buffer_list, &ra_list);
1990*4882a593Smuzhiyun list_splice_tail_init(&ra_list, &done_list);
1991*4882a593Smuzhiyun items_queued = 0;
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun break;
1995*4882a593Smuzhiyun default:
1996*4882a593Smuzhiyun ASSERT(0);
1997*4882a593Smuzhiyun }
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun if (error)
2000*4882a593Smuzhiyun goto out;
2001*4882a593Smuzhiyun }
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun out:
2004*4882a593Smuzhiyun if (!list_empty(&ra_list)) {
2005*4882a593Smuzhiyun if (!error)
2006*4882a593Smuzhiyun error = xlog_recover_items_pass2(log, trans,
2007*4882a593Smuzhiyun buffer_list, &ra_list);
2008*4882a593Smuzhiyun list_splice_tail_init(&ra_list, &done_list);
2009*4882a593Smuzhiyun }
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun if (!list_empty(&done_list))
2012*4882a593Smuzhiyun list_splice_init(&done_list, &trans->r_itemq);
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun return error;
2015*4882a593Smuzhiyun }
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun STATIC void
xlog_recover_add_item(struct list_head * head)2018*4882a593Smuzhiyun xlog_recover_add_item(
2019*4882a593Smuzhiyun struct list_head *head)
2020*4882a593Smuzhiyun {
2021*4882a593Smuzhiyun struct xlog_recover_item *item;
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun item = kmem_zalloc(sizeof(struct xlog_recover_item), 0);
2024*4882a593Smuzhiyun INIT_LIST_HEAD(&item->ri_list);
2025*4882a593Smuzhiyun list_add_tail(&item->ri_list, head);
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun STATIC int
xlog_recover_add_to_cont_trans(struct xlog * log,struct xlog_recover * trans,char * dp,int len)2029*4882a593Smuzhiyun xlog_recover_add_to_cont_trans(
2030*4882a593Smuzhiyun struct xlog *log,
2031*4882a593Smuzhiyun struct xlog_recover *trans,
2032*4882a593Smuzhiyun char *dp,
2033*4882a593Smuzhiyun int len)
2034*4882a593Smuzhiyun {
2035*4882a593Smuzhiyun struct xlog_recover_item *item;
2036*4882a593Smuzhiyun char *ptr, *old_ptr;
2037*4882a593Smuzhiyun int old_len;
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun /*
2040*4882a593Smuzhiyun * If the transaction is empty, the header was split across this and the
2041*4882a593Smuzhiyun * previous record. Copy the rest of the header.
2042*4882a593Smuzhiyun */
2043*4882a593Smuzhiyun if (list_empty(&trans->r_itemq)) {
2044*4882a593Smuzhiyun ASSERT(len <= sizeof(struct xfs_trans_header));
2045*4882a593Smuzhiyun if (len > sizeof(struct xfs_trans_header)) {
2046*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: bad header length", __func__);
2047*4882a593Smuzhiyun return -EFSCORRUPTED;
2048*4882a593Smuzhiyun }
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun xlog_recover_add_item(&trans->r_itemq);
2051*4882a593Smuzhiyun ptr = (char *)&trans->r_theader +
2052*4882a593Smuzhiyun sizeof(struct xfs_trans_header) - len;
2053*4882a593Smuzhiyun memcpy(ptr, dp, len);
2054*4882a593Smuzhiyun return 0;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun /* take the tail entry */
2058*4882a593Smuzhiyun item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2059*4882a593Smuzhiyun ri_list);
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
2062*4882a593Smuzhiyun old_len = item->ri_buf[item->ri_cnt-1].i_len;
2063*4882a593Smuzhiyun
2064*4882a593Smuzhiyun ptr = kvrealloc(old_ptr, old_len, len + old_len, GFP_KERNEL);
2065*4882a593Smuzhiyun if (!ptr)
2066*4882a593Smuzhiyun return -ENOMEM;
2067*4882a593Smuzhiyun memcpy(&ptr[old_len], dp, len);
2068*4882a593Smuzhiyun item->ri_buf[item->ri_cnt-1].i_len += len;
2069*4882a593Smuzhiyun item->ri_buf[item->ri_cnt-1].i_addr = ptr;
2070*4882a593Smuzhiyun trace_xfs_log_recover_item_add_cont(log, trans, item, 0);
2071*4882a593Smuzhiyun return 0;
2072*4882a593Smuzhiyun }
2073*4882a593Smuzhiyun
2074*4882a593Smuzhiyun /*
2075*4882a593Smuzhiyun * The next region to add is the start of a new region. It could be
2076*4882a593Smuzhiyun * a whole region or it could be the first part of a new region. Because
2077*4882a593Smuzhiyun * of this, the assumption here is that the type and size fields of all
2078*4882a593Smuzhiyun * format structures fit into the first 32 bits of the structure.
2079*4882a593Smuzhiyun *
2080*4882a593Smuzhiyun * This works because all regions must be 32 bit aligned. Therefore, we
2081*4882a593Smuzhiyun * either have both fields or we have neither field. In the case we have
2082*4882a593Smuzhiyun * neither field, the data part of the region is zero length. We only have
2083*4882a593Smuzhiyun * a log_op_header and can throw away the header since a new one will appear
2084*4882a593Smuzhiyun * later. If we have at least 4 bytes, then we can determine how many regions
2085*4882a593Smuzhiyun * will appear in the current log item.
2086*4882a593Smuzhiyun */
2087*4882a593Smuzhiyun STATIC int
xlog_recover_add_to_trans(struct xlog * log,struct xlog_recover * trans,char * dp,int len)2088*4882a593Smuzhiyun xlog_recover_add_to_trans(
2089*4882a593Smuzhiyun struct xlog *log,
2090*4882a593Smuzhiyun struct xlog_recover *trans,
2091*4882a593Smuzhiyun char *dp,
2092*4882a593Smuzhiyun int len)
2093*4882a593Smuzhiyun {
2094*4882a593Smuzhiyun struct xfs_inode_log_format *in_f; /* any will do */
2095*4882a593Smuzhiyun struct xlog_recover_item *item;
2096*4882a593Smuzhiyun char *ptr;
2097*4882a593Smuzhiyun
2098*4882a593Smuzhiyun if (!len)
2099*4882a593Smuzhiyun return 0;
2100*4882a593Smuzhiyun if (list_empty(&trans->r_itemq)) {
2101*4882a593Smuzhiyun /* we need to catch log corruptions here */
2102*4882a593Smuzhiyun if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
2103*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: bad header magic number",
2104*4882a593Smuzhiyun __func__);
2105*4882a593Smuzhiyun ASSERT(0);
2106*4882a593Smuzhiyun return -EFSCORRUPTED;
2107*4882a593Smuzhiyun }
2108*4882a593Smuzhiyun
2109*4882a593Smuzhiyun if (len > sizeof(struct xfs_trans_header)) {
2110*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: bad header length", __func__);
2111*4882a593Smuzhiyun ASSERT(0);
2112*4882a593Smuzhiyun return -EFSCORRUPTED;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun
2115*4882a593Smuzhiyun /*
2116*4882a593Smuzhiyun * The transaction header can be arbitrarily split across op
2117*4882a593Smuzhiyun * records. If we don't have the whole thing here, copy what we
2118*4882a593Smuzhiyun * do have and handle the rest in the next record.
2119*4882a593Smuzhiyun */
2120*4882a593Smuzhiyun if (len == sizeof(struct xfs_trans_header))
2121*4882a593Smuzhiyun xlog_recover_add_item(&trans->r_itemq);
2122*4882a593Smuzhiyun memcpy(&trans->r_theader, dp, len);
2123*4882a593Smuzhiyun return 0;
2124*4882a593Smuzhiyun }
2125*4882a593Smuzhiyun
2126*4882a593Smuzhiyun ptr = kmem_alloc(len, 0);
2127*4882a593Smuzhiyun memcpy(ptr, dp, len);
2128*4882a593Smuzhiyun in_f = (struct xfs_inode_log_format *)ptr;
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun /* take the tail entry */
2131*4882a593Smuzhiyun item = list_entry(trans->r_itemq.prev, struct xlog_recover_item,
2132*4882a593Smuzhiyun ri_list);
2133*4882a593Smuzhiyun if (item->ri_total != 0 &&
2134*4882a593Smuzhiyun item->ri_total == item->ri_cnt) {
2135*4882a593Smuzhiyun /* tail item is in use, get a new one */
2136*4882a593Smuzhiyun xlog_recover_add_item(&trans->r_itemq);
2137*4882a593Smuzhiyun item = list_entry(trans->r_itemq.prev,
2138*4882a593Smuzhiyun struct xlog_recover_item, ri_list);
2139*4882a593Smuzhiyun }
2140*4882a593Smuzhiyun
2141*4882a593Smuzhiyun if (item->ri_total == 0) { /* first region to be added */
2142*4882a593Smuzhiyun if (in_f->ilf_size == 0 ||
2143*4882a593Smuzhiyun in_f->ilf_size > XLOG_MAX_REGIONS_IN_ITEM) {
2144*4882a593Smuzhiyun xfs_warn(log->l_mp,
2145*4882a593Smuzhiyun "bad number of regions (%d) in inode log format",
2146*4882a593Smuzhiyun in_f->ilf_size);
2147*4882a593Smuzhiyun ASSERT(0);
2148*4882a593Smuzhiyun kmem_free(ptr);
2149*4882a593Smuzhiyun return -EFSCORRUPTED;
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun
2152*4882a593Smuzhiyun item->ri_total = in_f->ilf_size;
2153*4882a593Smuzhiyun item->ri_buf =
2154*4882a593Smuzhiyun kmem_zalloc(item->ri_total * sizeof(xfs_log_iovec_t),
2155*4882a593Smuzhiyun 0);
2156*4882a593Smuzhiyun }
2157*4882a593Smuzhiyun
2158*4882a593Smuzhiyun if (item->ri_total <= item->ri_cnt) {
2159*4882a593Smuzhiyun xfs_warn(log->l_mp,
2160*4882a593Smuzhiyun "log item region count (%d) overflowed size (%d)",
2161*4882a593Smuzhiyun item->ri_cnt, item->ri_total);
2162*4882a593Smuzhiyun ASSERT(0);
2163*4882a593Smuzhiyun kmem_free(ptr);
2164*4882a593Smuzhiyun return -EFSCORRUPTED;
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun /* Description region is ri_buf[0] */
2168*4882a593Smuzhiyun item->ri_buf[item->ri_cnt].i_addr = ptr;
2169*4882a593Smuzhiyun item->ri_buf[item->ri_cnt].i_len = len;
2170*4882a593Smuzhiyun item->ri_cnt++;
2171*4882a593Smuzhiyun trace_xfs_log_recover_item_add(log, trans, item, 0);
2172*4882a593Smuzhiyun return 0;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun /*
2176*4882a593Smuzhiyun * Free up any resources allocated by the transaction
2177*4882a593Smuzhiyun *
2178*4882a593Smuzhiyun * Remember that EFIs, EFDs, and IUNLINKs are handled later.
2179*4882a593Smuzhiyun */
2180*4882a593Smuzhiyun STATIC void
xlog_recover_free_trans(struct xlog_recover * trans)2181*4882a593Smuzhiyun xlog_recover_free_trans(
2182*4882a593Smuzhiyun struct xlog_recover *trans)
2183*4882a593Smuzhiyun {
2184*4882a593Smuzhiyun struct xlog_recover_item *item, *n;
2185*4882a593Smuzhiyun int i;
2186*4882a593Smuzhiyun
2187*4882a593Smuzhiyun hlist_del_init(&trans->r_list);
2188*4882a593Smuzhiyun
2189*4882a593Smuzhiyun list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
2190*4882a593Smuzhiyun /* Free the regions in the item. */
2191*4882a593Smuzhiyun list_del(&item->ri_list);
2192*4882a593Smuzhiyun for (i = 0; i < item->ri_cnt; i++)
2193*4882a593Smuzhiyun kmem_free(item->ri_buf[i].i_addr);
2194*4882a593Smuzhiyun /* Free the item itself */
2195*4882a593Smuzhiyun kmem_free(item->ri_buf);
2196*4882a593Smuzhiyun kmem_free(item);
2197*4882a593Smuzhiyun }
2198*4882a593Smuzhiyun /* Free the transaction recover structure */
2199*4882a593Smuzhiyun kmem_free(trans);
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun /*
2203*4882a593Smuzhiyun * On error or completion, trans is freed.
2204*4882a593Smuzhiyun */
2205*4882a593Smuzhiyun STATIC int
xlog_recovery_process_trans(struct xlog * log,struct xlog_recover * trans,char * dp,unsigned int len,unsigned int flags,int pass,struct list_head * buffer_list)2206*4882a593Smuzhiyun xlog_recovery_process_trans(
2207*4882a593Smuzhiyun struct xlog *log,
2208*4882a593Smuzhiyun struct xlog_recover *trans,
2209*4882a593Smuzhiyun char *dp,
2210*4882a593Smuzhiyun unsigned int len,
2211*4882a593Smuzhiyun unsigned int flags,
2212*4882a593Smuzhiyun int pass,
2213*4882a593Smuzhiyun struct list_head *buffer_list)
2214*4882a593Smuzhiyun {
2215*4882a593Smuzhiyun int error = 0;
2216*4882a593Smuzhiyun bool freeit = false;
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun /* mask off ophdr transaction container flags */
2219*4882a593Smuzhiyun flags &= ~XLOG_END_TRANS;
2220*4882a593Smuzhiyun if (flags & XLOG_WAS_CONT_TRANS)
2221*4882a593Smuzhiyun flags &= ~XLOG_CONTINUE_TRANS;
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun /*
2224*4882a593Smuzhiyun * Callees must not free the trans structure. We'll decide if we need to
2225*4882a593Smuzhiyun * free it or not based on the operation being done and it's result.
2226*4882a593Smuzhiyun */
2227*4882a593Smuzhiyun switch (flags) {
2228*4882a593Smuzhiyun /* expected flag values */
2229*4882a593Smuzhiyun case 0:
2230*4882a593Smuzhiyun case XLOG_CONTINUE_TRANS:
2231*4882a593Smuzhiyun error = xlog_recover_add_to_trans(log, trans, dp, len);
2232*4882a593Smuzhiyun break;
2233*4882a593Smuzhiyun case XLOG_WAS_CONT_TRANS:
2234*4882a593Smuzhiyun error = xlog_recover_add_to_cont_trans(log, trans, dp, len);
2235*4882a593Smuzhiyun break;
2236*4882a593Smuzhiyun case XLOG_COMMIT_TRANS:
2237*4882a593Smuzhiyun error = xlog_recover_commit_trans(log, trans, pass,
2238*4882a593Smuzhiyun buffer_list);
2239*4882a593Smuzhiyun /* success or fail, we are now done with this transaction. */
2240*4882a593Smuzhiyun freeit = true;
2241*4882a593Smuzhiyun break;
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun /* unexpected flag values */
2244*4882a593Smuzhiyun case XLOG_UNMOUNT_TRANS:
2245*4882a593Smuzhiyun /* just skip trans */
2246*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
2247*4882a593Smuzhiyun freeit = true;
2248*4882a593Smuzhiyun break;
2249*4882a593Smuzhiyun case XLOG_START_TRANS:
2250*4882a593Smuzhiyun default:
2251*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: bad flag 0x%x", __func__, flags);
2252*4882a593Smuzhiyun ASSERT(0);
2253*4882a593Smuzhiyun error = -EFSCORRUPTED;
2254*4882a593Smuzhiyun break;
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun if (error || freeit)
2257*4882a593Smuzhiyun xlog_recover_free_trans(trans);
2258*4882a593Smuzhiyun return error;
2259*4882a593Smuzhiyun }
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun /*
2262*4882a593Smuzhiyun * Lookup the transaction recovery structure associated with the ID in the
2263*4882a593Smuzhiyun * current ophdr. If the transaction doesn't exist and the start flag is set in
2264*4882a593Smuzhiyun * the ophdr, then allocate a new transaction for future ID matches to find.
2265*4882a593Smuzhiyun * Either way, return what we found during the lookup - an existing transaction
2266*4882a593Smuzhiyun * or nothing.
2267*4882a593Smuzhiyun */
2268*4882a593Smuzhiyun STATIC struct xlog_recover *
xlog_recover_ophdr_to_trans(struct hlist_head rhash[],struct xlog_rec_header * rhead,struct xlog_op_header * ohead)2269*4882a593Smuzhiyun xlog_recover_ophdr_to_trans(
2270*4882a593Smuzhiyun struct hlist_head rhash[],
2271*4882a593Smuzhiyun struct xlog_rec_header *rhead,
2272*4882a593Smuzhiyun struct xlog_op_header *ohead)
2273*4882a593Smuzhiyun {
2274*4882a593Smuzhiyun struct xlog_recover *trans;
2275*4882a593Smuzhiyun xlog_tid_t tid;
2276*4882a593Smuzhiyun struct hlist_head *rhp;
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun tid = be32_to_cpu(ohead->oh_tid);
2279*4882a593Smuzhiyun rhp = &rhash[XLOG_RHASH(tid)];
2280*4882a593Smuzhiyun hlist_for_each_entry(trans, rhp, r_list) {
2281*4882a593Smuzhiyun if (trans->r_log_tid == tid)
2282*4882a593Smuzhiyun return trans;
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun
2285*4882a593Smuzhiyun /*
2286*4882a593Smuzhiyun * skip over non-start transaction headers - we could be
2287*4882a593Smuzhiyun * processing slack space before the next transaction starts
2288*4882a593Smuzhiyun */
2289*4882a593Smuzhiyun if (!(ohead->oh_flags & XLOG_START_TRANS))
2290*4882a593Smuzhiyun return NULL;
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun ASSERT(be32_to_cpu(ohead->oh_len) == 0);
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun /*
2295*4882a593Smuzhiyun * This is a new transaction so allocate a new recovery container to
2296*4882a593Smuzhiyun * hold the recovery ops that will follow.
2297*4882a593Smuzhiyun */
2298*4882a593Smuzhiyun trans = kmem_zalloc(sizeof(struct xlog_recover), 0);
2299*4882a593Smuzhiyun trans->r_log_tid = tid;
2300*4882a593Smuzhiyun trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2301*4882a593Smuzhiyun INIT_LIST_HEAD(&trans->r_itemq);
2302*4882a593Smuzhiyun INIT_HLIST_NODE(&trans->r_list);
2303*4882a593Smuzhiyun hlist_add_head(&trans->r_list, rhp);
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun /*
2306*4882a593Smuzhiyun * Nothing more to do for this ophdr. Items to be added to this new
2307*4882a593Smuzhiyun * transaction will be in subsequent ophdr containers.
2308*4882a593Smuzhiyun */
2309*4882a593Smuzhiyun return NULL;
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun
2312*4882a593Smuzhiyun STATIC int
xlog_recover_process_ophdr(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,struct xlog_op_header * ohead,char * dp,char * end,int pass,struct list_head * buffer_list)2313*4882a593Smuzhiyun xlog_recover_process_ophdr(
2314*4882a593Smuzhiyun struct xlog *log,
2315*4882a593Smuzhiyun struct hlist_head rhash[],
2316*4882a593Smuzhiyun struct xlog_rec_header *rhead,
2317*4882a593Smuzhiyun struct xlog_op_header *ohead,
2318*4882a593Smuzhiyun char *dp,
2319*4882a593Smuzhiyun char *end,
2320*4882a593Smuzhiyun int pass,
2321*4882a593Smuzhiyun struct list_head *buffer_list)
2322*4882a593Smuzhiyun {
2323*4882a593Smuzhiyun struct xlog_recover *trans;
2324*4882a593Smuzhiyun unsigned int len;
2325*4882a593Smuzhiyun int error;
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun /* Do we understand who wrote this op? */
2328*4882a593Smuzhiyun if (ohead->oh_clientid != XFS_TRANSACTION &&
2329*4882a593Smuzhiyun ohead->oh_clientid != XFS_LOG) {
2330*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: bad clientid 0x%x",
2331*4882a593Smuzhiyun __func__, ohead->oh_clientid);
2332*4882a593Smuzhiyun ASSERT(0);
2333*4882a593Smuzhiyun return -EFSCORRUPTED;
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun
2336*4882a593Smuzhiyun /*
2337*4882a593Smuzhiyun * Check the ophdr contains all the data it is supposed to contain.
2338*4882a593Smuzhiyun */
2339*4882a593Smuzhiyun len = be32_to_cpu(ohead->oh_len);
2340*4882a593Smuzhiyun if (dp + len > end) {
2341*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: bad length 0x%x", __func__, len);
2342*4882a593Smuzhiyun WARN_ON(1);
2343*4882a593Smuzhiyun return -EFSCORRUPTED;
2344*4882a593Smuzhiyun }
2345*4882a593Smuzhiyun
2346*4882a593Smuzhiyun trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2347*4882a593Smuzhiyun if (!trans) {
2348*4882a593Smuzhiyun /* nothing to do, so skip over this ophdr */
2349*4882a593Smuzhiyun return 0;
2350*4882a593Smuzhiyun }
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun /*
2353*4882a593Smuzhiyun * The recovered buffer queue is drained only once we know that all
2354*4882a593Smuzhiyun * recovery items for the current LSN have been processed. This is
2355*4882a593Smuzhiyun * required because:
2356*4882a593Smuzhiyun *
2357*4882a593Smuzhiyun * - Buffer write submission updates the metadata LSN of the buffer.
2358*4882a593Smuzhiyun * - Log recovery skips items with a metadata LSN >= the current LSN of
2359*4882a593Smuzhiyun * the recovery item.
2360*4882a593Smuzhiyun * - Separate recovery items against the same metadata buffer can share
2361*4882a593Smuzhiyun * a current LSN. I.e., consider that the LSN of a recovery item is
2362*4882a593Smuzhiyun * defined as the starting LSN of the first record in which its
2363*4882a593Smuzhiyun * transaction appears, that a record can hold multiple transactions,
2364*4882a593Smuzhiyun * and/or that a transaction can span multiple records.
2365*4882a593Smuzhiyun *
2366*4882a593Smuzhiyun * In other words, we are allowed to submit a buffer from log recovery
2367*4882a593Smuzhiyun * once per current LSN. Otherwise, we may incorrectly skip recovery
2368*4882a593Smuzhiyun * items and cause corruption.
2369*4882a593Smuzhiyun *
2370*4882a593Smuzhiyun * We don't know up front whether buffers are updated multiple times per
2371*4882a593Smuzhiyun * LSN. Therefore, track the current LSN of each commit log record as it
2372*4882a593Smuzhiyun * is processed and drain the queue when it changes. Use commit records
2373*4882a593Smuzhiyun * because they are ordered correctly by the logging code.
2374*4882a593Smuzhiyun */
2375*4882a593Smuzhiyun if (log->l_recovery_lsn != trans->r_lsn &&
2376*4882a593Smuzhiyun ohead->oh_flags & XLOG_COMMIT_TRANS) {
2377*4882a593Smuzhiyun error = xfs_buf_delwri_submit(buffer_list);
2378*4882a593Smuzhiyun if (error)
2379*4882a593Smuzhiyun return error;
2380*4882a593Smuzhiyun log->l_recovery_lsn = trans->r_lsn;
2381*4882a593Smuzhiyun }
2382*4882a593Smuzhiyun
2383*4882a593Smuzhiyun return xlog_recovery_process_trans(log, trans, dp, len,
2384*4882a593Smuzhiyun ohead->oh_flags, pass, buffer_list);
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun
2387*4882a593Smuzhiyun /*
2388*4882a593Smuzhiyun * There are two valid states of the r_state field. 0 indicates that the
2389*4882a593Smuzhiyun * transaction structure is in a normal state. We have either seen the
2390*4882a593Smuzhiyun * start of the transaction or the last operation we added was not a partial
2391*4882a593Smuzhiyun * operation. If the last operation we added to the transaction was a
2392*4882a593Smuzhiyun * partial operation, we need to mark r_state with XLOG_WAS_CONT_TRANS.
2393*4882a593Smuzhiyun *
2394*4882a593Smuzhiyun * NOTE: skip LRs with 0 data length.
2395*4882a593Smuzhiyun */
2396*4882a593Smuzhiyun STATIC int
xlog_recover_process_data(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,char * dp,int pass,struct list_head * buffer_list)2397*4882a593Smuzhiyun xlog_recover_process_data(
2398*4882a593Smuzhiyun struct xlog *log,
2399*4882a593Smuzhiyun struct hlist_head rhash[],
2400*4882a593Smuzhiyun struct xlog_rec_header *rhead,
2401*4882a593Smuzhiyun char *dp,
2402*4882a593Smuzhiyun int pass,
2403*4882a593Smuzhiyun struct list_head *buffer_list)
2404*4882a593Smuzhiyun {
2405*4882a593Smuzhiyun struct xlog_op_header *ohead;
2406*4882a593Smuzhiyun char *end;
2407*4882a593Smuzhiyun int num_logops;
2408*4882a593Smuzhiyun int error;
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun end = dp + be32_to_cpu(rhead->h_len);
2411*4882a593Smuzhiyun num_logops = be32_to_cpu(rhead->h_num_logops);
2412*4882a593Smuzhiyun
2413*4882a593Smuzhiyun /* check the log format matches our own - else we can't recover */
2414*4882a593Smuzhiyun if (xlog_header_check_recover(log->l_mp, rhead))
2415*4882a593Smuzhiyun return -EIO;
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun trace_xfs_log_recover_record(log, rhead, pass);
2418*4882a593Smuzhiyun while ((dp < end) && num_logops) {
2419*4882a593Smuzhiyun
2420*4882a593Smuzhiyun ohead = (struct xlog_op_header *)dp;
2421*4882a593Smuzhiyun dp += sizeof(*ohead);
2422*4882a593Smuzhiyun ASSERT(dp <= end);
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun /* errors will abort recovery */
2425*4882a593Smuzhiyun error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2426*4882a593Smuzhiyun dp, end, pass, buffer_list);
2427*4882a593Smuzhiyun if (error)
2428*4882a593Smuzhiyun return error;
2429*4882a593Smuzhiyun
2430*4882a593Smuzhiyun dp += be32_to_cpu(ohead->oh_len);
2431*4882a593Smuzhiyun num_logops--;
2432*4882a593Smuzhiyun }
2433*4882a593Smuzhiyun return 0;
2434*4882a593Smuzhiyun }
2435*4882a593Smuzhiyun
2436*4882a593Smuzhiyun /* Take all the collected deferred ops and finish them in order. */
2437*4882a593Smuzhiyun static int
xlog_finish_defer_ops(struct xfs_mount * mp,struct list_head * capture_list)2438*4882a593Smuzhiyun xlog_finish_defer_ops(
2439*4882a593Smuzhiyun struct xfs_mount *mp,
2440*4882a593Smuzhiyun struct list_head *capture_list)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun struct xfs_defer_capture *dfc, *next;
2443*4882a593Smuzhiyun struct xfs_trans *tp;
2444*4882a593Smuzhiyun struct xfs_inode *ip;
2445*4882a593Smuzhiyun int error = 0;
2446*4882a593Smuzhiyun
2447*4882a593Smuzhiyun list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2448*4882a593Smuzhiyun struct xfs_trans_res resv;
2449*4882a593Smuzhiyun
2450*4882a593Smuzhiyun /*
2451*4882a593Smuzhiyun * Create a new transaction reservation from the captured
2452*4882a593Smuzhiyun * information. Set logcount to 1 to force the new transaction
2453*4882a593Smuzhiyun * to regrant every roll so that we can make forward progress
2454*4882a593Smuzhiyun * in recovery no matter how full the log might be.
2455*4882a593Smuzhiyun */
2456*4882a593Smuzhiyun resv.tr_logres = dfc->dfc_logres;
2457*4882a593Smuzhiyun resv.tr_logcount = 1;
2458*4882a593Smuzhiyun resv.tr_logflags = XFS_TRANS_PERM_LOG_RES;
2459*4882a593Smuzhiyun
2460*4882a593Smuzhiyun error = xfs_trans_alloc(mp, &resv, dfc->dfc_blkres,
2461*4882a593Smuzhiyun dfc->dfc_rtxres, XFS_TRANS_RESERVE, &tp);
2462*4882a593Smuzhiyun if (error) {
2463*4882a593Smuzhiyun xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
2464*4882a593Smuzhiyun return error;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun /*
2468*4882a593Smuzhiyun * Transfer to this new transaction all the dfops we captured
2469*4882a593Smuzhiyun * from recovering a single intent item.
2470*4882a593Smuzhiyun */
2471*4882a593Smuzhiyun list_del_init(&dfc->dfc_list);
2472*4882a593Smuzhiyun xfs_defer_ops_continue(dfc, tp, &ip);
2473*4882a593Smuzhiyun
2474*4882a593Smuzhiyun error = xfs_trans_commit(tp);
2475*4882a593Smuzhiyun if (ip) {
2476*4882a593Smuzhiyun xfs_iunlock(ip, XFS_ILOCK_EXCL);
2477*4882a593Smuzhiyun xfs_irele(ip);
2478*4882a593Smuzhiyun }
2479*4882a593Smuzhiyun if (error)
2480*4882a593Smuzhiyun return error;
2481*4882a593Smuzhiyun }
2482*4882a593Smuzhiyun
2483*4882a593Smuzhiyun ASSERT(list_empty(capture_list));
2484*4882a593Smuzhiyun return 0;
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun /* Release all the captured defer ops and capture structures in this list. */
2488*4882a593Smuzhiyun static void
xlog_abort_defer_ops(struct xfs_mount * mp,struct list_head * capture_list)2489*4882a593Smuzhiyun xlog_abort_defer_ops(
2490*4882a593Smuzhiyun struct xfs_mount *mp,
2491*4882a593Smuzhiyun struct list_head *capture_list)
2492*4882a593Smuzhiyun {
2493*4882a593Smuzhiyun struct xfs_defer_capture *dfc;
2494*4882a593Smuzhiyun struct xfs_defer_capture *next;
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun list_for_each_entry_safe(dfc, next, capture_list, dfc_list) {
2497*4882a593Smuzhiyun list_del_init(&dfc->dfc_list);
2498*4882a593Smuzhiyun xfs_defer_ops_release(mp, dfc);
2499*4882a593Smuzhiyun }
2500*4882a593Smuzhiyun }
2501*4882a593Smuzhiyun /*
2502*4882a593Smuzhiyun * When this is called, all of the log intent items which did not have
2503*4882a593Smuzhiyun * corresponding log done items should be in the AIL. What we do now
2504*4882a593Smuzhiyun * is update the data structures associated with each one.
2505*4882a593Smuzhiyun *
2506*4882a593Smuzhiyun * Since we process the log intent items in normal transactions, they
2507*4882a593Smuzhiyun * will be removed at some point after the commit. This prevents us
2508*4882a593Smuzhiyun * from just walking down the list processing each one. We'll use a
2509*4882a593Smuzhiyun * flag in the intent item to skip those that we've already processed
2510*4882a593Smuzhiyun * and use the AIL iteration mechanism's generation count to try to
2511*4882a593Smuzhiyun * speed this up at least a bit.
2512*4882a593Smuzhiyun *
2513*4882a593Smuzhiyun * When we start, we know that the intents are the only things in the
2514*4882a593Smuzhiyun * AIL. As we process them, however, other items are added to the
2515*4882a593Smuzhiyun * AIL.
2516*4882a593Smuzhiyun */
2517*4882a593Smuzhiyun STATIC int
xlog_recover_process_intents(struct xlog * log)2518*4882a593Smuzhiyun xlog_recover_process_intents(
2519*4882a593Smuzhiyun struct xlog *log)
2520*4882a593Smuzhiyun {
2521*4882a593Smuzhiyun LIST_HEAD(capture_list);
2522*4882a593Smuzhiyun struct xfs_ail_cursor cur;
2523*4882a593Smuzhiyun struct xfs_log_item *lip;
2524*4882a593Smuzhiyun struct xfs_ail *ailp;
2525*4882a593Smuzhiyun int error = 0;
2526*4882a593Smuzhiyun #if defined(DEBUG) || defined(XFS_WARN)
2527*4882a593Smuzhiyun xfs_lsn_t last_lsn;
2528*4882a593Smuzhiyun #endif
2529*4882a593Smuzhiyun
2530*4882a593Smuzhiyun ailp = log->l_ailp;
2531*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
2532*4882a593Smuzhiyun #if defined(DEBUG) || defined(XFS_WARN)
2533*4882a593Smuzhiyun last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
2534*4882a593Smuzhiyun #endif
2535*4882a593Smuzhiyun for (lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2536*4882a593Smuzhiyun lip != NULL;
2537*4882a593Smuzhiyun lip = xfs_trans_ail_cursor_next(ailp, &cur)) {
2538*4882a593Smuzhiyun /*
2539*4882a593Smuzhiyun * We're done when we see something other than an intent.
2540*4882a593Smuzhiyun * There should be no intents left in the AIL now.
2541*4882a593Smuzhiyun */
2542*4882a593Smuzhiyun if (!xlog_item_is_intent(lip)) {
2543*4882a593Smuzhiyun #ifdef DEBUG
2544*4882a593Smuzhiyun for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2545*4882a593Smuzhiyun ASSERT(!xlog_item_is_intent(lip));
2546*4882a593Smuzhiyun #endif
2547*4882a593Smuzhiyun break;
2548*4882a593Smuzhiyun }
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun /*
2551*4882a593Smuzhiyun * We should never see a redo item with a LSN higher than
2552*4882a593Smuzhiyun * the last transaction we found in the log at the start
2553*4882a593Smuzhiyun * of recovery.
2554*4882a593Smuzhiyun */
2555*4882a593Smuzhiyun ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
2556*4882a593Smuzhiyun
2557*4882a593Smuzhiyun /*
2558*4882a593Smuzhiyun * NOTE: If your intent processing routine can create more
2559*4882a593Smuzhiyun * deferred ops, you /must/ attach them to the capture list in
2560*4882a593Smuzhiyun * the recover routine or else those subsequent intents will be
2561*4882a593Smuzhiyun * replayed in the wrong order!
2562*4882a593Smuzhiyun */
2563*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
2564*4882a593Smuzhiyun error = lip->li_ops->iop_recover(lip, &capture_list);
2565*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
2566*4882a593Smuzhiyun if (error)
2567*4882a593Smuzhiyun break;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun
2570*4882a593Smuzhiyun xfs_trans_ail_cursor_done(&cur);
2571*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
2572*4882a593Smuzhiyun if (error)
2573*4882a593Smuzhiyun goto err;
2574*4882a593Smuzhiyun
2575*4882a593Smuzhiyun error = xlog_finish_defer_ops(log->l_mp, &capture_list);
2576*4882a593Smuzhiyun if (error)
2577*4882a593Smuzhiyun goto err;
2578*4882a593Smuzhiyun
2579*4882a593Smuzhiyun return 0;
2580*4882a593Smuzhiyun err:
2581*4882a593Smuzhiyun xlog_abort_defer_ops(log->l_mp, &capture_list);
2582*4882a593Smuzhiyun return error;
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun
2585*4882a593Smuzhiyun /*
2586*4882a593Smuzhiyun * A cancel occurs when the mount has failed and we're bailing out.
2587*4882a593Smuzhiyun * Release all pending log intent items so they don't pin the AIL.
2588*4882a593Smuzhiyun */
2589*4882a593Smuzhiyun STATIC void
xlog_recover_cancel_intents(struct xlog * log)2590*4882a593Smuzhiyun xlog_recover_cancel_intents(
2591*4882a593Smuzhiyun struct xlog *log)
2592*4882a593Smuzhiyun {
2593*4882a593Smuzhiyun struct xfs_log_item *lip;
2594*4882a593Smuzhiyun struct xfs_ail_cursor cur;
2595*4882a593Smuzhiyun struct xfs_ail *ailp;
2596*4882a593Smuzhiyun
2597*4882a593Smuzhiyun ailp = log->l_ailp;
2598*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
2599*4882a593Smuzhiyun lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
2600*4882a593Smuzhiyun while (lip != NULL) {
2601*4882a593Smuzhiyun /*
2602*4882a593Smuzhiyun * We're done when we see something other than an intent.
2603*4882a593Smuzhiyun * There should be no intents left in the AIL now.
2604*4882a593Smuzhiyun */
2605*4882a593Smuzhiyun if (!xlog_item_is_intent(lip)) {
2606*4882a593Smuzhiyun #ifdef DEBUG
2607*4882a593Smuzhiyun for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
2608*4882a593Smuzhiyun ASSERT(!xlog_item_is_intent(lip));
2609*4882a593Smuzhiyun #endif
2610*4882a593Smuzhiyun break;
2611*4882a593Smuzhiyun }
2612*4882a593Smuzhiyun
2613*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
2614*4882a593Smuzhiyun lip->li_ops->iop_release(lip);
2615*4882a593Smuzhiyun spin_lock(&ailp->ail_lock);
2616*4882a593Smuzhiyun lip = xfs_trans_ail_cursor_next(ailp, &cur);
2617*4882a593Smuzhiyun }
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun xfs_trans_ail_cursor_done(&cur);
2620*4882a593Smuzhiyun spin_unlock(&ailp->ail_lock);
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun
2623*4882a593Smuzhiyun /*
2624*4882a593Smuzhiyun * This routine performs a transaction to null out a bad inode pointer
2625*4882a593Smuzhiyun * in an agi unlinked inode hash bucket.
2626*4882a593Smuzhiyun */
2627*4882a593Smuzhiyun STATIC void
xlog_recover_clear_agi_bucket(xfs_mount_t * mp,xfs_agnumber_t agno,int bucket)2628*4882a593Smuzhiyun xlog_recover_clear_agi_bucket(
2629*4882a593Smuzhiyun xfs_mount_t *mp,
2630*4882a593Smuzhiyun xfs_agnumber_t agno,
2631*4882a593Smuzhiyun int bucket)
2632*4882a593Smuzhiyun {
2633*4882a593Smuzhiyun xfs_trans_t *tp;
2634*4882a593Smuzhiyun xfs_agi_t *agi;
2635*4882a593Smuzhiyun xfs_buf_t *agibp;
2636*4882a593Smuzhiyun int offset;
2637*4882a593Smuzhiyun int error;
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun error = xfs_trans_alloc(mp, &M_RES(mp)->tr_clearagi, 0, 0, 0, &tp);
2640*4882a593Smuzhiyun if (error)
2641*4882a593Smuzhiyun goto out_error;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun error = xfs_read_agi(mp, tp, agno, &agibp);
2644*4882a593Smuzhiyun if (error)
2645*4882a593Smuzhiyun goto out_abort;
2646*4882a593Smuzhiyun
2647*4882a593Smuzhiyun agi = agibp->b_addr;
2648*4882a593Smuzhiyun agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO);
2649*4882a593Smuzhiyun offset = offsetof(xfs_agi_t, agi_unlinked) +
2650*4882a593Smuzhiyun (sizeof(xfs_agino_t) * bucket);
2651*4882a593Smuzhiyun xfs_trans_log_buf(tp, agibp, offset,
2652*4882a593Smuzhiyun (offset + sizeof(xfs_agino_t) - 1));
2653*4882a593Smuzhiyun
2654*4882a593Smuzhiyun error = xfs_trans_commit(tp);
2655*4882a593Smuzhiyun if (error)
2656*4882a593Smuzhiyun goto out_error;
2657*4882a593Smuzhiyun return;
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun out_abort:
2660*4882a593Smuzhiyun xfs_trans_cancel(tp);
2661*4882a593Smuzhiyun out_error:
2662*4882a593Smuzhiyun xfs_warn(mp, "%s: failed to clear agi %d. Continuing.", __func__, agno);
2663*4882a593Smuzhiyun return;
2664*4882a593Smuzhiyun }
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun STATIC xfs_agino_t
xlog_recover_process_one_iunlink(struct xfs_mount * mp,xfs_agnumber_t agno,xfs_agino_t agino,int bucket)2667*4882a593Smuzhiyun xlog_recover_process_one_iunlink(
2668*4882a593Smuzhiyun struct xfs_mount *mp,
2669*4882a593Smuzhiyun xfs_agnumber_t agno,
2670*4882a593Smuzhiyun xfs_agino_t agino,
2671*4882a593Smuzhiyun int bucket)
2672*4882a593Smuzhiyun {
2673*4882a593Smuzhiyun struct xfs_buf *ibp;
2674*4882a593Smuzhiyun struct xfs_dinode *dip;
2675*4882a593Smuzhiyun struct xfs_inode *ip;
2676*4882a593Smuzhiyun xfs_ino_t ino;
2677*4882a593Smuzhiyun int error;
2678*4882a593Smuzhiyun
2679*4882a593Smuzhiyun ino = XFS_AGINO_TO_INO(mp, agno, agino);
2680*4882a593Smuzhiyun error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
2681*4882a593Smuzhiyun if (error)
2682*4882a593Smuzhiyun goto fail;
2683*4882a593Smuzhiyun
2684*4882a593Smuzhiyun /*
2685*4882a593Smuzhiyun * Get the on disk inode to find the next inode in the bucket.
2686*4882a593Smuzhiyun */
2687*4882a593Smuzhiyun error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &ibp, 0);
2688*4882a593Smuzhiyun if (error)
2689*4882a593Smuzhiyun goto fail_iput;
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun xfs_iflags_clear(ip, XFS_IRECOVERY);
2692*4882a593Smuzhiyun ASSERT(VFS_I(ip)->i_nlink == 0);
2693*4882a593Smuzhiyun ASSERT(VFS_I(ip)->i_mode != 0);
2694*4882a593Smuzhiyun
2695*4882a593Smuzhiyun /* setup for the next pass */
2696*4882a593Smuzhiyun agino = be32_to_cpu(dip->di_next_unlinked);
2697*4882a593Smuzhiyun xfs_buf_relse(ibp);
2698*4882a593Smuzhiyun
2699*4882a593Smuzhiyun /*
2700*4882a593Smuzhiyun * Prevent any DMAPI event from being sent when the reference on
2701*4882a593Smuzhiyun * the inode is dropped.
2702*4882a593Smuzhiyun */
2703*4882a593Smuzhiyun ip->i_d.di_dmevmask = 0;
2704*4882a593Smuzhiyun
2705*4882a593Smuzhiyun xfs_irele(ip);
2706*4882a593Smuzhiyun return agino;
2707*4882a593Smuzhiyun
2708*4882a593Smuzhiyun fail_iput:
2709*4882a593Smuzhiyun xfs_irele(ip);
2710*4882a593Smuzhiyun fail:
2711*4882a593Smuzhiyun /*
2712*4882a593Smuzhiyun * We can't read in the inode this bucket points to, or this inode
2713*4882a593Smuzhiyun * is messed up. Just ditch this bucket of inodes. We will lose
2714*4882a593Smuzhiyun * some inodes and space, but at least we won't hang.
2715*4882a593Smuzhiyun *
2716*4882a593Smuzhiyun * Call xlog_recover_clear_agi_bucket() to perform a transaction to
2717*4882a593Smuzhiyun * clear the inode pointer in the bucket.
2718*4882a593Smuzhiyun */
2719*4882a593Smuzhiyun xlog_recover_clear_agi_bucket(mp, agno, bucket);
2720*4882a593Smuzhiyun return NULLAGINO;
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun /*
2724*4882a593Smuzhiyun * Recover AGI unlinked lists
2725*4882a593Smuzhiyun *
2726*4882a593Smuzhiyun * This is called during recovery to process any inodes which we unlinked but
2727*4882a593Smuzhiyun * not freed when the system crashed. These inodes will be on the lists in the
2728*4882a593Smuzhiyun * AGI blocks. What we do here is scan all the AGIs and fully truncate and free
2729*4882a593Smuzhiyun * any inodes found on the lists. Each inode is removed from the lists when it
2730*4882a593Smuzhiyun * has been fully truncated and is freed. The freeing of the inode and its
2731*4882a593Smuzhiyun * removal from the list must be atomic.
2732*4882a593Smuzhiyun *
2733*4882a593Smuzhiyun * If everything we touch in the agi processing loop is already in memory, this
2734*4882a593Smuzhiyun * loop can hold the cpu for a long time. It runs without lock contention,
2735*4882a593Smuzhiyun * memory allocation contention, the need wait for IO, etc, and so will run
2736*4882a593Smuzhiyun * until we either run out of inodes to process, run low on memory or we run out
2737*4882a593Smuzhiyun * of log space.
2738*4882a593Smuzhiyun *
2739*4882a593Smuzhiyun * This behaviour is bad for latency on single CPU and non-preemptible kernels,
2740*4882a593Smuzhiyun * and can prevent other filesytem work (such as CIL pushes) from running. This
2741*4882a593Smuzhiyun * can lead to deadlocks if the recovery process runs out of log reservation
2742*4882a593Smuzhiyun * space. Hence we need to yield the CPU when there is other kernel work
2743*4882a593Smuzhiyun * scheduled on this CPU to ensure other scheduled work can run without undue
2744*4882a593Smuzhiyun * latency.
2745*4882a593Smuzhiyun */
2746*4882a593Smuzhiyun STATIC void
xlog_recover_process_iunlinks(struct xlog * log)2747*4882a593Smuzhiyun xlog_recover_process_iunlinks(
2748*4882a593Smuzhiyun struct xlog *log)
2749*4882a593Smuzhiyun {
2750*4882a593Smuzhiyun xfs_mount_t *mp;
2751*4882a593Smuzhiyun xfs_agnumber_t agno;
2752*4882a593Smuzhiyun xfs_agi_t *agi;
2753*4882a593Smuzhiyun xfs_buf_t *agibp;
2754*4882a593Smuzhiyun xfs_agino_t agino;
2755*4882a593Smuzhiyun int bucket;
2756*4882a593Smuzhiyun int error;
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun mp = log->l_mp;
2759*4882a593Smuzhiyun
2760*4882a593Smuzhiyun for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
2761*4882a593Smuzhiyun /*
2762*4882a593Smuzhiyun * Find the agi for this ag.
2763*4882a593Smuzhiyun */
2764*4882a593Smuzhiyun error = xfs_read_agi(mp, NULL, agno, &agibp);
2765*4882a593Smuzhiyun if (error) {
2766*4882a593Smuzhiyun /*
2767*4882a593Smuzhiyun * AGI is b0rked. Don't process it.
2768*4882a593Smuzhiyun *
2769*4882a593Smuzhiyun * We should probably mark the filesystem as corrupt
2770*4882a593Smuzhiyun * after we've recovered all the ag's we can....
2771*4882a593Smuzhiyun */
2772*4882a593Smuzhiyun continue;
2773*4882a593Smuzhiyun }
2774*4882a593Smuzhiyun /*
2775*4882a593Smuzhiyun * Unlock the buffer so that it can be acquired in the normal
2776*4882a593Smuzhiyun * course of the transaction to truncate and free each inode.
2777*4882a593Smuzhiyun * Because we are not racing with anyone else here for the AGI
2778*4882a593Smuzhiyun * buffer, we don't even need to hold it locked to read the
2779*4882a593Smuzhiyun * initial unlinked bucket entries out of the buffer. We keep
2780*4882a593Smuzhiyun * buffer reference though, so that it stays pinned in memory
2781*4882a593Smuzhiyun * while we need the buffer.
2782*4882a593Smuzhiyun */
2783*4882a593Smuzhiyun agi = agibp->b_addr;
2784*4882a593Smuzhiyun xfs_buf_unlock(agibp);
2785*4882a593Smuzhiyun
2786*4882a593Smuzhiyun for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) {
2787*4882a593Smuzhiyun agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2788*4882a593Smuzhiyun while (agino != NULLAGINO) {
2789*4882a593Smuzhiyun agino = xlog_recover_process_one_iunlink(mp,
2790*4882a593Smuzhiyun agno, agino, bucket);
2791*4882a593Smuzhiyun cond_resched();
2792*4882a593Smuzhiyun }
2793*4882a593Smuzhiyun }
2794*4882a593Smuzhiyun xfs_buf_rele(agibp);
2795*4882a593Smuzhiyun }
2796*4882a593Smuzhiyun }
2797*4882a593Smuzhiyun
2798*4882a593Smuzhiyun STATIC void
xlog_unpack_data(struct xlog_rec_header * rhead,char * dp,struct xlog * log)2799*4882a593Smuzhiyun xlog_unpack_data(
2800*4882a593Smuzhiyun struct xlog_rec_header *rhead,
2801*4882a593Smuzhiyun char *dp,
2802*4882a593Smuzhiyun struct xlog *log)
2803*4882a593Smuzhiyun {
2804*4882a593Smuzhiyun int i, j, k;
2805*4882a593Smuzhiyun
2806*4882a593Smuzhiyun for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2807*4882a593Smuzhiyun i < (XLOG_HEADER_CYCLE_SIZE / BBSIZE); i++) {
2808*4882a593Smuzhiyun *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2809*4882a593Smuzhiyun dp += BBSIZE;
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun
2812*4882a593Smuzhiyun if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2813*4882a593Smuzhiyun xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2814*4882a593Smuzhiyun for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2815*4882a593Smuzhiyun j = i / (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2816*4882a593Smuzhiyun k = i % (XLOG_HEADER_CYCLE_SIZE / BBSIZE);
2817*4882a593Smuzhiyun *(__be32 *)dp = xhdr[j].hic_xheader.xh_cycle_data[k];
2818*4882a593Smuzhiyun dp += BBSIZE;
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun
2823*4882a593Smuzhiyun /*
2824*4882a593Smuzhiyun * CRC check, unpack and process a log record.
2825*4882a593Smuzhiyun */
2826*4882a593Smuzhiyun STATIC int
xlog_recover_process(struct xlog * log,struct hlist_head rhash[],struct xlog_rec_header * rhead,char * dp,int pass,struct list_head * buffer_list)2827*4882a593Smuzhiyun xlog_recover_process(
2828*4882a593Smuzhiyun struct xlog *log,
2829*4882a593Smuzhiyun struct hlist_head rhash[],
2830*4882a593Smuzhiyun struct xlog_rec_header *rhead,
2831*4882a593Smuzhiyun char *dp,
2832*4882a593Smuzhiyun int pass,
2833*4882a593Smuzhiyun struct list_head *buffer_list)
2834*4882a593Smuzhiyun {
2835*4882a593Smuzhiyun __le32 old_crc = rhead->h_crc;
2836*4882a593Smuzhiyun __le32 crc;
2837*4882a593Smuzhiyun
2838*4882a593Smuzhiyun crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2839*4882a593Smuzhiyun
2840*4882a593Smuzhiyun /*
2841*4882a593Smuzhiyun * Nothing else to do if this is a CRC verification pass. Just return
2842*4882a593Smuzhiyun * if this a record with a non-zero crc. Unfortunately, mkfs always
2843*4882a593Smuzhiyun * sets old_crc to 0 so we must consider this valid even on v5 supers.
2844*4882a593Smuzhiyun * Otherwise, return EFSBADCRC on failure so the callers up the stack
2845*4882a593Smuzhiyun * know precisely what failed.
2846*4882a593Smuzhiyun */
2847*4882a593Smuzhiyun if (pass == XLOG_RECOVER_CRCPASS) {
2848*4882a593Smuzhiyun if (old_crc && crc != old_crc)
2849*4882a593Smuzhiyun return -EFSBADCRC;
2850*4882a593Smuzhiyun return 0;
2851*4882a593Smuzhiyun }
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun /*
2854*4882a593Smuzhiyun * We're in the normal recovery path. Issue a warning if and only if the
2855*4882a593Smuzhiyun * CRC in the header is non-zero. This is an advisory warning and the
2856*4882a593Smuzhiyun * zero CRC check prevents warnings from being emitted when upgrading
2857*4882a593Smuzhiyun * the kernel from one that does not add CRCs by default.
2858*4882a593Smuzhiyun */
2859*4882a593Smuzhiyun if (crc != old_crc) {
2860*4882a593Smuzhiyun if (old_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2861*4882a593Smuzhiyun xfs_alert(log->l_mp,
2862*4882a593Smuzhiyun "log record CRC mismatch: found 0x%x, expected 0x%x.",
2863*4882a593Smuzhiyun le32_to_cpu(old_crc),
2864*4882a593Smuzhiyun le32_to_cpu(crc));
2865*4882a593Smuzhiyun xfs_hex_dump(dp, 32);
2866*4882a593Smuzhiyun }
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun /*
2869*4882a593Smuzhiyun * If the filesystem is CRC enabled, this mismatch becomes a
2870*4882a593Smuzhiyun * fatal log corruption failure.
2871*4882a593Smuzhiyun */
2872*4882a593Smuzhiyun if (xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
2873*4882a593Smuzhiyun XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
2874*4882a593Smuzhiyun return -EFSCORRUPTED;
2875*4882a593Smuzhiyun }
2876*4882a593Smuzhiyun }
2877*4882a593Smuzhiyun
2878*4882a593Smuzhiyun xlog_unpack_data(rhead, dp, log);
2879*4882a593Smuzhiyun
2880*4882a593Smuzhiyun return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2881*4882a593Smuzhiyun buffer_list);
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
2884*4882a593Smuzhiyun STATIC int
xlog_valid_rec_header(struct xlog * log,struct xlog_rec_header * rhead,xfs_daddr_t blkno,int bufsize)2885*4882a593Smuzhiyun xlog_valid_rec_header(
2886*4882a593Smuzhiyun struct xlog *log,
2887*4882a593Smuzhiyun struct xlog_rec_header *rhead,
2888*4882a593Smuzhiyun xfs_daddr_t blkno,
2889*4882a593Smuzhiyun int bufsize)
2890*4882a593Smuzhiyun {
2891*4882a593Smuzhiyun int hlen;
2892*4882a593Smuzhiyun
2893*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp,
2894*4882a593Smuzhiyun rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2895*4882a593Smuzhiyun return -EFSCORRUPTED;
2896*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp,
2897*4882a593Smuzhiyun (!rhead->h_version ||
2898*4882a593Smuzhiyun (be32_to_cpu(rhead->h_version) &
2899*4882a593Smuzhiyun (~XLOG_VERSION_OKBITS))))) {
2900*4882a593Smuzhiyun xfs_warn(log->l_mp, "%s: unrecognised log version (%d).",
2901*4882a593Smuzhiyun __func__, be32_to_cpu(rhead->h_version));
2902*4882a593Smuzhiyun return -EFSCORRUPTED;
2903*4882a593Smuzhiyun }
2904*4882a593Smuzhiyun
2905*4882a593Smuzhiyun /*
2906*4882a593Smuzhiyun * LR body must have data (or it wouldn't have been written)
2907*4882a593Smuzhiyun * and h_len must not be greater than LR buffer size.
2908*4882a593Smuzhiyun */
2909*4882a593Smuzhiyun hlen = be32_to_cpu(rhead->h_len);
2910*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp, hlen <= 0 || hlen > bufsize))
2911*4882a593Smuzhiyun return -EFSCORRUPTED;
2912*4882a593Smuzhiyun
2913*4882a593Smuzhiyun if (XFS_IS_CORRUPT(log->l_mp,
2914*4882a593Smuzhiyun blkno > log->l_logBBsize || blkno > INT_MAX))
2915*4882a593Smuzhiyun return -EFSCORRUPTED;
2916*4882a593Smuzhiyun return 0;
2917*4882a593Smuzhiyun }
2918*4882a593Smuzhiyun
2919*4882a593Smuzhiyun /*
2920*4882a593Smuzhiyun * Read the log from tail to head and process the log records found.
2921*4882a593Smuzhiyun * Handle the two cases where the tail and head are in the same cycle
2922*4882a593Smuzhiyun * and where the active portion of the log wraps around the end of
2923*4882a593Smuzhiyun * the physical log separately. The pass parameter is passed through
2924*4882a593Smuzhiyun * to the routines called to process the data and is not looked at
2925*4882a593Smuzhiyun * here.
2926*4882a593Smuzhiyun */
2927*4882a593Smuzhiyun STATIC int
xlog_do_recovery_pass(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk,int pass,xfs_daddr_t * first_bad)2928*4882a593Smuzhiyun xlog_do_recovery_pass(
2929*4882a593Smuzhiyun struct xlog *log,
2930*4882a593Smuzhiyun xfs_daddr_t head_blk,
2931*4882a593Smuzhiyun xfs_daddr_t tail_blk,
2932*4882a593Smuzhiyun int pass,
2933*4882a593Smuzhiyun xfs_daddr_t *first_bad) /* out: first bad log rec */
2934*4882a593Smuzhiyun {
2935*4882a593Smuzhiyun xlog_rec_header_t *rhead;
2936*4882a593Smuzhiyun xfs_daddr_t blk_no, rblk_no;
2937*4882a593Smuzhiyun xfs_daddr_t rhead_blk;
2938*4882a593Smuzhiyun char *offset;
2939*4882a593Smuzhiyun char *hbp, *dbp;
2940*4882a593Smuzhiyun int error = 0, h_size, h_len;
2941*4882a593Smuzhiyun int error2 = 0;
2942*4882a593Smuzhiyun int bblks, split_bblks;
2943*4882a593Smuzhiyun int hblks, split_hblks, wrapped_hblks;
2944*4882a593Smuzhiyun int i;
2945*4882a593Smuzhiyun struct hlist_head rhash[XLOG_RHASH_SIZE];
2946*4882a593Smuzhiyun LIST_HEAD (buffer_list);
2947*4882a593Smuzhiyun
2948*4882a593Smuzhiyun ASSERT(head_blk != tail_blk);
2949*4882a593Smuzhiyun blk_no = rhead_blk = tail_blk;
2950*4882a593Smuzhiyun
2951*4882a593Smuzhiyun for (i = 0; i < XLOG_RHASH_SIZE; i++)
2952*4882a593Smuzhiyun INIT_HLIST_HEAD(&rhash[i]);
2953*4882a593Smuzhiyun
2954*4882a593Smuzhiyun /*
2955*4882a593Smuzhiyun * Read the header of the tail block and get the iclog buffer size from
2956*4882a593Smuzhiyun * h_size. Use this to tell how many sectors make up the log header.
2957*4882a593Smuzhiyun */
2958*4882a593Smuzhiyun if (xfs_sb_version_haslogv2(&log->l_mp->m_sb)) {
2959*4882a593Smuzhiyun /*
2960*4882a593Smuzhiyun * When using variable length iclogs, read first sector of
2961*4882a593Smuzhiyun * iclog header and extract the header size from it. Get a
2962*4882a593Smuzhiyun * new hbp that is the correct size.
2963*4882a593Smuzhiyun */
2964*4882a593Smuzhiyun hbp = xlog_alloc_buffer(log, 1);
2965*4882a593Smuzhiyun if (!hbp)
2966*4882a593Smuzhiyun return -ENOMEM;
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun error = xlog_bread(log, tail_blk, 1, hbp, &offset);
2969*4882a593Smuzhiyun if (error)
2970*4882a593Smuzhiyun goto bread_err1;
2971*4882a593Smuzhiyun
2972*4882a593Smuzhiyun rhead = (xlog_rec_header_t *)offset;
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun /*
2975*4882a593Smuzhiyun * xfsprogs has a bug where record length is based on lsunit but
2976*4882a593Smuzhiyun * h_size (iclog size) is hardcoded to 32k. Now that we
2977*4882a593Smuzhiyun * unconditionally CRC verify the unmount record, this means the
2978*4882a593Smuzhiyun * log buffer can be too small for the record and cause an
2979*4882a593Smuzhiyun * overrun.
2980*4882a593Smuzhiyun *
2981*4882a593Smuzhiyun * Detect this condition here. Use lsunit for the buffer size as
2982*4882a593Smuzhiyun * long as this looks like the mkfs case. Otherwise, return an
2983*4882a593Smuzhiyun * error to avoid a buffer overrun.
2984*4882a593Smuzhiyun */
2985*4882a593Smuzhiyun h_size = be32_to_cpu(rhead->h_size);
2986*4882a593Smuzhiyun h_len = be32_to_cpu(rhead->h_len);
2987*4882a593Smuzhiyun if (h_len > h_size && h_len <= log->l_mp->m_logbsize &&
2988*4882a593Smuzhiyun rhead->h_num_logops == cpu_to_be32(1)) {
2989*4882a593Smuzhiyun xfs_warn(log->l_mp,
2990*4882a593Smuzhiyun "invalid iclog size (%d bytes), using lsunit (%d bytes)",
2991*4882a593Smuzhiyun h_size, log->l_mp->m_logbsize);
2992*4882a593Smuzhiyun h_size = log->l_mp->m_logbsize;
2993*4882a593Smuzhiyun }
2994*4882a593Smuzhiyun
2995*4882a593Smuzhiyun error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
2996*4882a593Smuzhiyun if (error)
2997*4882a593Smuzhiyun goto bread_err1;
2998*4882a593Smuzhiyun
2999*4882a593Smuzhiyun hblks = xlog_logrec_hblks(log, rhead);
3000*4882a593Smuzhiyun if (hblks != 1) {
3001*4882a593Smuzhiyun kmem_free(hbp);
3002*4882a593Smuzhiyun hbp = xlog_alloc_buffer(log, hblks);
3003*4882a593Smuzhiyun }
3004*4882a593Smuzhiyun } else {
3005*4882a593Smuzhiyun ASSERT(log->l_sectBBsize == 1);
3006*4882a593Smuzhiyun hblks = 1;
3007*4882a593Smuzhiyun hbp = xlog_alloc_buffer(log, 1);
3008*4882a593Smuzhiyun h_size = XLOG_BIG_RECORD_BSIZE;
3009*4882a593Smuzhiyun }
3010*4882a593Smuzhiyun
3011*4882a593Smuzhiyun if (!hbp)
3012*4882a593Smuzhiyun return -ENOMEM;
3013*4882a593Smuzhiyun dbp = xlog_alloc_buffer(log, BTOBB(h_size));
3014*4882a593Smuzhiyun if (!dbp) {
3015*4882a593Smuzhiyun kmem_free(hbp);
3016*4882a593Smuzhiyun return -ENOMEM;
3017*4882a593Smuzhiyun }
3018*4882a593Smuzhiyun
3019*4882a593Smuzhiyun memset(rhash, 0, sizeof(rhash));
3020*4882a593Smuzhiyun if (tail_blk > head_blk) {
3021*4882a593Smuzhiyun /*
3022*4882a593Smuzhiyun * Perform recovery around the end of the physical log.
3023*4882a593Smuzhiyun * When the head is not on the same cycle number as the tail,
3024*4882a593Smuzhiyun * we can't do a sequential recovery.
3025*4882a593Smuzhiyun */
3026*4882a593Smuzhiyun while (blk_no < log->l_logBBsize) {
3027*4882a593Smuzhiyun /*
3028*4882a593Smuzhiyun * Check for header wrapping around physical end-of-log
3029*4882a593Smuzhiyun */
3030*4882a593Smuzhiyun offset = hbp;
3031*4882a593Smuzhiyun split_hblks = 0;
3032*4882a593Smuzhiyun wrapped_hblks = 0;
3033*4882a593Smuzhiyun if (blk_no + hblks <= log->l_logBBsize) {
3034*4882a593Smuzhiyun /* Read header in one read */
3035*4882a593Smuzhiyun error = xlog_bread(log, blk_no, hblks, hbp,
3036*4882a593Smuzhiyun &offset);
3037*4882a593Smuzhiyun if (error)
3038*4882a593Smuzhiyun goto bread_err2;
3039*4882a593Smuzhiyun } else {
3040*4882a593Smuzhiyun /* This LR is split across physical log end */
3041*4882a593Smuzhiyun if (blk_no != log->l_logBBsize) {
3042*4882a593Smuzhiyun /* some data before physical log end */
3043*4882a593Smuzhiyun ASSERT(blk_no <= INT_MAX);
3044*4882a593Smuzhiyun split_hblks = log->l_logBBsize - (int)blk_no;
3045*4882a593Smuzhiyun ASSERT(split_hblks > 0);
3046*4882a593Smuzhiyun error = xlog_bread(log, blk_no,
3047*4882a593Smuzhiyun split_hblks, hbp,
3048*4882a593Smuzhiyun &offset);
3049*4882a593Smuzhiyun if (error)
3050*4882a593Smuzhiyun goto bread_err2;
3051*4882a593Smuzhiyun }
3052*4882a593Smuzhiyun
3053*4882a593Smuzhiyun /*
3054*4882a593Smuzhiyun * Note: this black magic still works with
3055*4882a593Smuzhiyun * large sector sizes (non-512) only because:
3056*4882a593Smuzhiyun * - we increased the buffer size originally
3057*4882a593Smuzhiyun * by 1 sector giving us enough extra space
3058*4882a593Smuzhiyun * for the second read;
3059*4882a593Smuzhiyun * - the log start is guaranteed to be sector
3060*4882a593Smuzhiyun * aligned;
3061*4882a593Smuzhiyun * - we read the log end (LR header start)
3062*4882a593Smuzhiyun * _first_, then the log start (LR header end)
3063*4882a593Smuzhiyun * - order is important.
3064*4882a593Smuzhiyun */
3065*4882a593Smuzhiyun wrapped_hblks = hblks - split_hblks;
3066*4882a593Smuzhiyun error = xlog_bread_noalign(log, 0,
3067*4882a593Smuzhiyun wrapped_hblks,
3068*4882a593Smuzhiyun offset + BBTOB(split_hblks));
3069*4882a593Smuzhiyun if (error)
3070*4882a593Smuzhiyun goto bread_err2;
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun rhead = (xlog_rec_header_t *)offset;
3073*4882a593Smuzhiyun error = xlog_valid_rec_header(log, rhead,
3074*4882a593Smuzhiyun split_hblks ? blk_no : 0, h_size);
3075*4882a593Smuzhiyun if (error)
3076*4882a593Smuzhiyun goto bread_err2;
3077*4882a593Smuzhiyun
3078*4882a593Smuzhiyun bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3079*4882a593Smuzhiyun blk_no += hblks;
3080*4882a593Smuzhiyun
3081*4882a593Smuzhiyun /*
3082*4882a593Smuzhiyun * Read the log record data in multiple reads if it
3083*4882a593Smuzhiyun * wraps around the end of the log. Note that if the
3084*4882a593Smuzhiyun * header already wrapped, blk_no could point past the
3085*4882a593Smuzhiyun * end of the log. The record data is contiguous in
3086*4882a593Smuzhiyun * that case.
3087*4882a593Smuzhiyun */
3088*4882a593Smuzhiyun if (blk_no + bblks <= log->l_logBBsize ||
3089*4882a593Smuzhiyun blk_no >= log->l_logBBsize) {
3090*4882a593Smuzhiyun rblk_no = xlog_wrap_logbno(log, blk_no);
3091*4882a593Smuzhiyun error = xlog_bread(log, rblk_no, bblks, dbp,
3092*4882a593Smuzhiyun &offset);
3093*4882a593Smuzhiyun if (error)
3094*4882a593Smuzhiyun goto bread_err2;
3095*4882a593Smuzhiyun } else {
3096*4882a593Smuzhiyun /* This log record is split across the
3097*4882a593Smuzhiyun * physical end of log */
3098*4882a593Smuzhiyun offset = dbp;
3099*4882a593Smuzhiyun split_bblks = 0;
3100*4882a593Smuzhiyun if (blk_no != log->l_logBBsize) {
3101*4882a593Smuzhiyun /* some data is before the physical
3102*4882a593Smuzhiyun * end of log */
3103*4882a593Smuzhiyun ASSERT(!wrapped_hblks);
3104*4882a593Smuzhiyun ASSERT(blk_no <= INT_MAX);
3105*4882a593Smuzhiyun split_bblks =
3106*4882a593Smuzhiyun log->l_logBBsize - (int)blk_no;
3107*4882a593Smuzhiyun ASSERT(split_bblks > 0);
3108*4882a593Smuzhiyun error = xlog_bread(log, blk_no,
3109*4882a593Smuzhiyun split_bblks, dbp,
3110*4882a593Smuzhiyun &offset);
3111*4882a593Smuzhiyun if (error)
3112*4882a593Smuzhiyun goto bread_err2;
3113*4882a593Smuzhiyun }
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun /*
3116*4882a593Smuzhiyun * Note: this black magic still works with
3117*4882a593Smuzhiyun * large sector sizes (non-512) only because:
3118*4882a593Smuzhiyun * - we increased the buffer size originally
3119*4882a593Smuzhiyun * by 1 sector giving us enough extra space
3120*4882a593Smuzhiyun * for the second read;
3121*4882a593Smuzhiyun * - the log start is guaranteed to be sector
3122*4882a593Smuzhiyun * aligned;
3123*4882a593Smuzhiyun * - we read the log end (LR header start)
3124*4882a593Smuzhiyun * _first_, then the log start (LR header end)
3125*4882a593Smuzhiyun * - order is important.
3126*4882a593Smuzhiyun */
3127*4882a593Smuzhiyun error = xlog_bread_noalign(log, 0,
3128*4882a593Smuzhiyun bblks - split_bblks,
3129*4882a593Smuzhiyun offset + BBTOB(split_bblks));
3130*4882a593Smuzhiyun if (error)
3131*4882a593Smuzhiyun goto bread_err2;
3132*4882a593Smuzhiyun }
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun error = xlog_recover_process(log, rhash, rhead, offset,
3135*4882a593Smuzhiyun pass, &buffer_list);
3136*4882a593Smuzhiyun if (error)
3137*4882a593Smuzhiyun goto bread_err2;
3138*4882a593Smuzhiyun
3139*4882a593Smuzhiyun blk_no += bblks;
3140*4882a593Smuzhiyun rhead_blk = blk_no;
3141*4882a593Smuzhiyun }
3142*4882a593Smuzhiyun
3143*4882a593Smuzhiyun ASSERT(blk_no >= log->l_logBBsize);
3144*4882a593Smuzhiyun blk_no -= log->l_logBBsize;
3145*4882a593Smuzhiyun rhead_blk = blk_no;
3146*4882a593Smuzhiyun }
3147*4882a593Smuzhiyun
3148*4882a593Smuzhiyun /* read first part of physical log */
3149*4882a593Smuzhiyun while (blk_no < head_blk) {
3150*4882a593Smuzhiyun error = xlog_bread(log, blk_no, hblks, hbp, &offset);
3151*4882a593Smuzhiyun if (error)
3152*4882a593Smuzhiyun goto bread_err2;
3153*4882a593Smuzhiyun
3154*4882a593Smuzhiyun rhead = (xlog_rec_header_t *)offset;
3155*4882a593Smuzhiyun error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3156*4882a593Smuzhiyun if (error)
3157*4882a593Smuzhiyun goto bread_err2;
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun /* blocks in data section */
3160*4882a593Smuzhiyun bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3161*4882a593Smuzhiyun error = xlog_bread(log, blk_no+hblks, bblks, dbp,
3162*4882a593Smuzhiyun &offset);
3163*4882a593Smuzhiyun if (error)
3164*4882a593Smuzhiyun goto bread_err2;
3165*4882a593Smuzhiyun
3166*4882a593Smuzhiyun error = xlog_recover_process(log, rhash, rhead, offset, pass,
3167*4882a593Smuzhiyun &buffer_list);
3168*4882a593Smuzhiyun if (error)
3169*4882a593Smuzhiyun goto bread_err2;
3170*4882a593Smuzhiyun
3171*4882a593Smuzhiyun blk_no += bblks + hblks;
3172*4882a593Smuzhiyun rhead_blk = blk_no;
3173*4882a593Smuzhiyun }
3174*4882a593Smuzhiyun
3175*4882a593Smuzhiyun bread_err2:
3176*4882a593Smuzhiyun kmem_free(dbp);
3177*4882a593Smuzhiyun bread_err1:
3178*4882a593Smuzhiyun kmem_free(hbp);
3179*4882a593Smuzhiyun
3180*4882a593Smuzhiyun /*
3181*4882a593Smuzhiyun * Submit buffers that have been added from the last record processed,
3182*4882a593Smuzhiyun * regardless of error status.
3183*4882a593Smuzhiyun */
3184*4882a593Smuzhiyun if (!list_empty(&buffer_list))
3185*4882a593Smuzhiyun error2 = xfs_buf_delwri_submit(&buffer_list);
3186*4882a593Smuzhiyun
3187*4882a593Smuzhiyun if (error && first_bad)
3188*4882a593Smuzhiyun *first_bad = rhead_blk;
3189*4882a593Smuzhiyun
3190*4882a593Smuzhiyun /*
3191*4882a593Smuzhiyun * Transactions are freed at commit time but transactions without commit
3192*4882a593Smuzhiyun * records on disk are never committed. Free any that may be left in the
3193*4882a593Smuzhiyun * hash table.
3194*4882a593Smuzhiyun */
3195*4882a593Smuzhiyun for (i = 0; i < XLOG_RHASH_SIZE; i++) {
3196*4882a593Smuzhiyun struct hlist_node *tmp;
3197*4882a593Smuzhiyun struct xlog_recover *trans;
3198*4882a593Smuzhiyun
3199*4882a593Smuzhiyun hlist_for_each_entry_safe(trans, tmp, &rhash[i], r_list)
3200*4882a593Smuzhiyun xlog_recover_free_trans(trans);
3201*4882a593Smuzhiyun }
3202*4882a593Smuzhiyun
3203*4882a593Smuzhiyun return error ? error : error2;
3204*4882a593Smuzhiyun }
3205*4882a593Smuzhiyun
3206*4882a593Smuzhiyun /*
3207*4882a593Smuzhiyun * Do the recovery of the log. We actually do this in two phases.
3208*4882a593Smuzhiyun * The two passes are necessary in order to implement the function
3209*4882a593Smuzhiyun * of cancelling a record written into the log. The first pass
3210*4882a593Smuzhiyun * determines those things which have been cancelled, and the
3211*4882a593Smuzhiyun * second pass replays log items normally except for those which
3212*4882a593Smuzhiyun * have been cancelled. The handling of the replay and cancellations
3213*4882a593Smuzhiyun * takes place in the log item type specific routines.
3214*4882a593Smuzhiyun *
3215*4882a593Smuzhiyun * The table of items which have cancel records in the log is allocated
3216*4882a593Smuzhiyun * and freed at this level, since only here do we know when all of
3217*4882a593Smuzhiyun * the log recovery has been completed.
3218*4882a593Smuzhiyun */
3219*4882a593Smuzhiyun STATIC int
xlog_do_log_recovery(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)3220*4882a593Smuzhiyun xlog_do_log_recovery(
3221*4882a593Smuzhiyun struct xlog *log,
3222*4882a593Smuzhiyun xfs_daddr_t head_blk,
3223*4882a593Smuzhiyun xfs_daddr_t tail_blk)
3224*4882a593Smuzhiyun {
3225*4882a593Smuzhiyun int error, i;
3226*4882a593Smuzhiyun
3227*4882a593Smuzhiyun ASSERT(head_blk != tail_blk);
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun /*
3230*4882a593Smuzhiyun * First do a pass to find all of the cancelled buf log items.
3231*4882a593Smuzhiyun * Store them in the buf_cancel_table for use in the second pass.
3232*4882a593Smuzhiyun */
3233*4882a593Smuzhiyun log->l_buf_cancel_table = kmem_zalloc(XLOG_BC_TABLE_SIZE *
3234*4882a593Smuzhiyun sizeof(struct list_head),
3235*4882a593Smuzhiyun 0);
3236*4882a593Smuzhiyun for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3237*4882a593Smuzhiyun INIT_LIST_HEAD(&log->l_buf_cancel_table[i]);
3238*4882a593Smuzhiyun
3239*4882a593Smuzhiyun error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3240*4882a593Smuzhiyun XLOG_RECOVER_PASS1, NULL);
3241*4882a593Smuzhiyun if (error != 0) {
3242*4882a593Smuzhiyun kmem_free(log->l_buf_cancel_table);
3243*4882a593Smuzhiyun log->l_buf_cancel_table = NULL;
3244*4882a593Smuzhiyun return error;
3245*4882a593Smuzhiyun }
3246*4882a593Smuzhiyun /*
3247*4882a593Smuzhiyun * Then do a second pass to actually recover the items in the log.
3248*4882a593Smuzhiyun * When it is complete free the table of buf cancel items.
3249*4882a593Smuzhiyun */
3250*4882a593Smuzhiyun error = xlog_do_recovery_pass(log, head_blk, tail_blk,
3251*4882a593Smuzhiyun XLOG_RECOVER_PASS2, NULL);
3252*4882a593Smuzhiyun #ifdef DEBUG
3253*4882a593Smuzhiyun if (!error) {
3254*4882a593Smuzhiyun int i;
3255*4882a593Smuzhiyun
3256*4882a593Smuzhiyun for (i = 0; i < XLOG_BC_TABLE_SIZE; i++)
3257*4882a593Smuzhiyun ASSERT(list_empty(&log->l_buf_cancel_table[i]));
3258*4882a593Smuzhiyun }
3259*4882a593Smuzhiyun #endif /* DEBUG */
3260*4882a593Smuzhiyun
3261*4882a593Smuzhiyun kmem_free(log->l_buf_cancel_table);
3262*4882a593Smuzhiyun log->l_buf_cancel_table = NULL;
3263*4882a593Smuzhiyun
3264*4882a593Smuzhiyun return error;
3265*4882a593Smuzhiyun }
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun /*
3268*4882a593Smuzhiyun * Do the actual recovery
3269*4882a593Smuzhiyun */
3270*4882a593Smuzhiyun STATIC int
xlog_do_recover(struct xlog * log,xfs_daddr_t head_blk,xfs_daddr_t tail_blk)3271*4882a593Smuzhiyun xlog_do_recover(
3272*4882a593Smuzhiyun struct xlog *log,
3273*4882a593Smuzhiyun xfs_daddr_t head_blk,
3274*4882a593Smuzhiyun xfs_daddr_t tail_blk)
3275*4882a593Smuzhiyun {
3276*4882a593Smuzhiyun struct xfs_mount *mp = log->l_mp;
3277*4882a593Smuzhiyun struct xfs_buf *bp = mp->m_sb_bp;
3278*4882a593Smuzhiyun struct xfs_sb *sbp = &mp->m_sb;
3279*4882a593Smuzhiyun int error;
3280*4882a593Smuzhiyun
3281*4882a593Smuzhiyun trace_xfs_log_recover(log, head_blk, tail_blk);
3282*4882a593Smuzhiyun
3283*4882a593Smuzhiyun /*
3284*4882a593Smuzhiyun * First replay the images in the log.
3285*4882a593Smuzhiyun */
3286*4882a593Smuzhiyun error = xlog_do_log_recovery(log, head_blk, tail_blk);
3287*4882a593Smuzhiyun if (error)
3288*4882a593Smuzhiyun return error;
3289*4882a593Smuzhiyun
3290*4882a593Smuzhiyun /*
3291*4882a593Smuzhiyun * If IO errors happened during recovery, bail out.
3292*4882a593Smuzhiyun */
3293*4882a593Smuzhiyun if (XFS_FORCED_SHUTDOWN(mp))
3294*4882a593Smuzhiyun return -EIO;
3295*4882a593Smuzhiyun
3296*4882a593Smuzhiyun /*
3297*4882a593Smuzhiyun * We now update the tail_lsn since much of the recovery has completed
3298*4882a593Smuzhiyun * and there may be space available to use. If there were no extent
3299*4882a593Smuzhiyun * or iunlinks, we can free up the entire log and set the tail_lsn to
3300*4882a593Smuzhiyun * be the last_sync_lsn. This was set in xlog_find_tail to be the
3301*4882a593Smuzhiyun * lsn of the last known good LR on disk. If there are extent frees
3302*4882a593Smuzhiyun * or iunlinks they will have some entries in the AIL; so we look at
3303*4882a593Smuzhiyun * the AIL to determine how to set the tail_lsn.
3304*4882a593Smuzhiyun */
3305*4882a593Smuzhiyun xlog_assign_tail_lsn(mp);
3306*4882a593Smuzhiyun
3307*4882a593Smuzhiyun /*
3308*4882a593Smuzhiyun * Now that we've finished replaying all buffer and inode updates,
3309*4882a593Smuzhiyun * re-read the superblock and reverify it.
3310*4882a593Smuzhiyun */
3311*4882a593Smuzhiyun xfs_buf_lock(bp);
3312*4882a593Smuzhiyun xfs_buf_hold(bp);
3313*4882a593Smuzhiyun error = _xfs_buf_read(bp, XBF_READ);
3314*4882a593Smuzhiyun if (error) {
3315*4882a593Smuzhiyun if (!XFS_FORCED_SHUTDOWN(mp)) {
3316*4882a593Smuzhiyun xfs_buf_ioerror_alert(bp, __this_address);
3317*4882a593Smuzhiyun ASSERT(0);
3318*4882a593Smuzhiyun }
3319*4882a593Smuzhiyun xfs_buf_relse(bp);
3320*4882a593Smuzhiyun return error;
3321*4882a593Smuzhiyun }
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun /* Convert superblock from on-disk format */
3324*4882a593Smuzhiyun xfs_sb_from_disk(sbp, bp->b_addr);
3325*4882a593Smuzhiyun xfs_buf_relse(bp);
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun /* re-initialise in-core superblock and geometry structures */
3328*4882a593Smuzhiyun xfs_reinit_percpu_counters(mp);
3329*4882a593Smuzhiyun error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
3330*4882a593Smuzhiyun if (error) {
3331*4882a593Smuzhiyun xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
3332*4882a593Smuzhiyun return error;
3333*4882a593Smuzhiyun }
3334*4882a593Smuzhiyun mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
3335*4882a593Smuzhiyun
3336*4882a593Smuzhiyun xlog_recover_check_summary(log);
3337*4882a593Smuzhiyun
3338*4882a593Smuzhiyun /* Normal transactions can now occur */
3339*4882a593Smuzhiyun log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
3340*4882a593Smuzhiyun return 0;
3341*4882a593Smuzhiyun }
3342*4882a593Smuzhiyun
3343*4882a593Smuzhiyun /*
3344*4882a593Smuzhiyun * Perform recovery and re-initialize some log variables in xlog_find_tail.
3345*4882a593Smuzhiyun *
3346*4882a593Smuzhiyun * Return error or zero.
3347*4882a593Smuzhiyun */
3348*4882a593Smuzhiyun int
xlog_recover(struct xlog * log)3349*4882a593Smuzhiyun xlog_recover(
3350*4882a593Smuzhiyun struct xlog *log)
3351*4882a593Smuzhiyun {
3352*4882a593Smuzhiyun xfs_daddr_t head_blk, tail_blk;
3353*4882a593Smuzhiyun int error;
3354*4882a593Smuzhiyun
3355*4882a593Smuzhiyun /* find the tail of the log */
3356*4882a593Smuzhiyun error = xlog_find_tail(log, &head_blk, &tail_blk);
3357*4882a593Smuzhiyun if (error)
3358*4882a593Smuzhiyun return error;
3359*4882a593Smuzhiyun
3360*4882a593Smuzhiyun /*
3361*4882a593Smuzhiyun * The superblock was read before the log was available and thus the LSN
3362*4882a593Smuzhiyun * could not be verified. Check the superblock LSN against the current
3363*4882a593Smuzhiyun * LSN now that it's known.
3364*4882a593Smuzhiyun */
3365*4882a593Smuzhiyun if (xfs_sb_version_hascrc(&log->l_mp->m_sb) &&
3366*4882a593Smuzhiyun !xfs_log_check_lsn(log->l_mp, log->l_mp->m_sb.sb_lsn))
3367*4882a593Smuzhiyun return -EINVAL;
3368*4882a593Smuzhiyun
3369*4882a593Smuzhiyun if (tail_blk != head_blk) {
3370*4882a593Smuzhiyun /* There used to be a comment here:
3371*4882a593Smuzhiyun *
3372*4882a593Smuzhiyun * disallow recovery on read-only mounts. note -- mount
3373*4882a593Smuzhiyun * checks for ENOSPC and turns it into an intelligent
3374*4882a593Smuzhiyun * error message.
3375*4882a593Smuzhiyun * ...but this is no longer true. Now, unless you specify
3376*4882a593Smuzhiyun * NORECOVERY (in which case this function would never be
3377*4882a593Smuzhiyun * called), we just go ahead and recover. We do this all
3378*4882a593Smuzhiyun * under the vfs layer, so we can get away with it unless
3379*4882a593Smuzhiyun * the device itself is read-only, in which case we fail.
3380*4882a593Smuzhiyun */
3381*4882a593Smuzhiyun if ((error = xfs_dev_is_read_only(log->l_mp, "recovery"))) {
3382*4882a593Smuzhiyun return error;
3383*4882a593Smuzhiyun }
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun /*
3386*4882a593Smuzhiyun * Version 5 superblock log feature mask validation. We know the
3387*4882a593Smuzhiyun * log is dirty so check if there are any unknown log features
3388*4882a593Smuzhiyun * in what we need to recover. If there are unknown features
3389*4882a593Smuzhiyun * (e.g. unsupported transactions, then simply reject the
3390*4882a593Smuzhiyun * attempt at recovery before touching anything.
3391*4882a593Smuzhiyun */
3392*4882a593Smuzhiyun if (XFS_SB_VERSION_NUM(&log->l_mp->m_sb) == XFS_SB_VERSION_5 &&
3393*4882a593Smuzhiyun xfs_sb_has_incompat_log_feature(&log->l_mp->m_sb,
3394*4882a593Smuzhiyun XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN)) {
3395*4882a593Smuzhiyun xfs_warn(log->l_mp,
3396*4882a593Smuzhiyun "Superblock has unknown incompatible log features (0x%x) enabled.",
3397*4882a593Smuzhiyun (log->l_mp->m_sb.sb_features_log_incompat &
3398*4882a593Smuzhiyun XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN));
3399*4882a593Smuzhiyun xfs_warn(log->l_mp,
3400*4882a593Smuzhiyun "The log can not be fully and/or safely recovered by this kernel.");
3401*4882a593Smuzhiyun xfs_warn(log->l_mp,
3402*4882a593Smuzhiyun "Please recover the log on a kernel that supports the unknown features.");
3403*4882a593Smuzhiyun return -EINVAL;
3404*4882a593Smuzhiyun }
3405*4882a593Smuzhiyun
3406*4882a593Smuzhiyun /*
3407*4882a593Smuzhiyun * Delay log recovery if the debug hook is set. This is debug
3408*4882a593Smuzhiyun * instrumention to coordinate simulation of I/O failures with
3409*4882a593Smuzhiyun * log recovery.
3410*4882a593Smuzhiyun */
3411*4882a593Smuzhiyun if (xfs_globals.log_recovery_delay) {
3412*4882a593Smuzhiyun xfs_notice(log->l_mp,
3413*4882a593Smuzhiyun "Delaying log recovery for %d seconds.",
3414*4882a593Smuzhiyun xfs_globals.log_recovery_delay);
3415*4882a593Smuzhiyun msleep(xfs_globals.log_recovery_delay * 1000);
3416*4882a593Smuzhiyun }
3417*4882a593Smuzhiyun
3418*4882a593Smuzhiyun xfs_notice(log->l_mp, "Starting recovery (logdev: %s)",
3419*4882a593Smuzhiyun log->l_mp->m_logname ? log->l_mp->m_logname
3420*4882a593Smuzhiyun : "internal");
3421*4882a593Smuzhiyun
3422*4882a593Smuzhiyun error = xlog_do_recover(log, head_blk, tail_blk);
3423*4882a593Smuzhiyun log->l_flags |= XLOG_RECOVERY_NEEDED;
3424*4882a593Smuzhiyun }
3425*4882a593Smuzhiyun return error;
3426*4882a593Smuzhiyun }
3427*4882a593Smuzhiyun
3428*4882a593Smuzhiyun /*
3429*4882a593Smuzhiyun * In the first part of recovery we replay inodes and buffers and build
3430*4882a593Smuzhiyun * up the list of extent free items which need to be processed. Here
3431*4882a593Smuzhiyun * we process the extent free items and clean up the on disk unlinked
3432*4882a593Smuzhiyun * inode lists. This is separated from the first part of recovery so
3433*4882a593Smuzhiyun * that the root and real-time bitmap inodes can be read in from disk in
3434*4882a593Smuzhiyun * between the two stages. This is necessary so that we can free space
3435*4882a593Smuzhiyun * in the real-time portion of the file system.
3436*4882a593Smuzhiyun */
3437*4882a593Smuzhiyun int
xlog_recover_finish(struct xlog * log)3438*4882a593Smuzhiyun xlog_recover_finish(
3439*4882a593Smuzhiyun struct xlog *log)
3440*4882a593Smuzhiyun {
3441*4882a593Smuzhiyun /*
3442*4882a593Smuzhiyun * Now we're ready to do the transactions needed for the
3443*4882a593Smuzhiyun * rest of recovery. Start with completing all the extent
3444*4882a593Smuzhiyun * free intent records and then process the unlinked inode
3445*4882a593Smuzhiyun * lists. At this point, we essentially run in normal mode
3446*4882a593Smuzhiyun * except that we're still performing recovery actions
3447*4882a593Smuzhiyun * rather than accepting new requests.
3448*4882a593Smuzhiyun */
3449*4882a593Smuzhiyun if (log->l_flags & XLOG_RECOVERY_NEEDED) {
3450*4882a593Smuzhiyun int error;
3451*4882a593Smuzhiyun error = xlog_recover_process_intents(log);
3452*4882a593Smuzhiyun if (error) {
3453*4882a593Smuzhiyun /*
3454*4882a593Smuzhiyun * Cancel all the unprocessed intent items now so that
3455*4882a593Smuzhiyun * we don't leave them pinned in the AIL. This can
3456*4882a593Smuzhiyun * cause the AIL to livelock on the pinned item if
3457*4882a593Smuzhiyun * anyone tries to push the AIL (inode reclaim does
3458*4882a593Smuzhiyun * this) before we get around to xfs_log_mount_cancel.
3459*4882a593Smuzhiyun */
3460*4882a593Smuzhiyun xlog_recover_cancel_intents(log);
3461*4882a593Smuzhiyun xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
3462*4882a593Smuzhiyun xfs_alert(log->l_mp, "Failed to recover intents");
3463*4882a593Smuzhiyun return error;
3464*4882a593Smuzhiyun }
3465*4882a593Smuzhiyun
3466*4882a593Smuzhiyun /*
3467*4882a593Smuzhiyun * Sync the log to get all the intents out of the AIL.
3468*4882a593Smuzhiyun * This isn't absolutely necessary, but it helps in
3469*4882a593Smuzhiyun * case the unlink transactions would have problems
3470*4882a593Smuzhiyun * pushing the intents out of the way.
3471*4882a593Smuzhiyun */
3472*4882a593Smuzhiyun xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3473*4882a593Smuzhiyun
3474*4882a593Smuzhiyun xlog_recover_process_iunlinks(log);
3475*4882a593Smuzhiyun
3476*4882a593Smuzhiyun xlog_recover_check_summary(log);
3477*4882a593Smuzhiyun
3478*4882a593Smuzhiyun xfs_notice(log->l_mp, "Ending recovery (logdev: %s)",
3479*4882a593Smuzhiyun log->l_mp->m_logname ? log->l_mp->m_logname
3480*4882a593Smuzhiyun : "internal");
3481*4882a593Smuzhiyun log->l_flags &= ~XLOG_RECOVERY_NEEDED;
3482*4882a593Smuzhiyun } else {
3483*4882a593Smuzhiyun xfs_info(log->l_mp, "Ending clean mount");
3484*4882a593Smuzhiyun }
3485*4882a593Smuzhiyun return 0;
3486*4882a593Smuzhiyun }
3487*4882a593Smuzhiyun
3488*4882a593Smuzhiyun void
xlog_recover_cancel(struct xlog * log)3489*4882a593Smuzhiyun xlog_recover_cancel(
3490*4882a593Smuzhiyun struct xlog *log)
3491*4882a593Smuzhiyun {
3492*4882a593Smuzhiyun if (log->l_flags & XLOG_RECOVERY_NEEDED)
3493*4882a593Smuzhiyun xlog_recover_cancel_intents(log);
3494*4882a593Smuzhiyun }
3495*4882a593Smuzhiyun
3496*4882a593Smuzhiyun #if defined(DEBUG)
3497*4882a593Smuzhiyun /*
3498*4882a593Smuzhiyun * Read all of the agf and agi counters and check that they
3499*4882a593Smuzhiyun * are consistent with the superblock counters.
3500*4882a593Smuzhiyun */
3501*4882a593Smuzhiyun STATIC void
xlog_recover_check_summary(struct xlog * log)3502*4882a593Smuzhiyun xlog_recover_check_summary(
3503*4882a593Smuzhiyun struct xlog *log)
3504*4882a593Smuzhiyun {
3505*4882a593Smuzhiyun xfs_mount_t *mp;
3506*4882a593Smuzhiyun xfs_buf_t *agfbp;
3507*4882a593Smuzhiyun xfs_buf_t *agibp;
3508*4882a593Smuzhiyun xfs_agnumber_t agno;
3509*4882a593Smuzhiyun uint64_t freeblks;
3510*4882a593Smuzhiyun uint64_t itotal;
3511*4882a593Smuzhiyun uint64_t ifree;
3512*4882a593Smuzhiyun int error;
3513*4882a593Smuzhiyun
3514*4882a593Smuzhiyun mp = log->l_mp;
3515*4882a593Smuzhiyun
3516*4882a593Smuzhiyun freeblks = 0LL;
3517*4882a593Smuzhiyun itotal = 0LL;
3518*4882a593Smuzhiyun ifree = 0LL;
3519*4882a593Smuzhiyun for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
3520*4882a593Smuzhiyun error = xfs_read_agf(mp, NULL, agno, 0, &agfbp);
3521*4882a593Smuzhiyun if (error) {
3522*4882a593Smuzhiyun xfs_alert(mp, "%s agf read failed agno %d error %d",
3523*4882a593Smuzhiyun __func__, agno, error);
3524*4882a593Smuzhiyun } else {
3525*4882a593Smuzhiyun struct xfs_agf *agfp = agfbp->b_addr;
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun freeblks += be32_to_cpu(agfp->agf_freeblks) +
3528*4882a593Smuzhiyun be32_to_cpu(agfp->agf_flcount);
3529*4882a593Smuzhiyun xfs_buf_relse(agfbp);
3530*4882a593Smuzhiyun }
3531*4882a593Smuzhiyun
3532*4882a593Smuzhiyun error = xfs_read_agi(mp, NULL, agno, &agibp);
3533*4882a593Smuzhiyun if (error) {
3534*4882a593Smuzhiyun xfs_alert(mp, "%s agi read failed agno %d error %d",
3535*4882a593Smuzhiyun __func__, agno, error);
3536*4882a593Smuzhiyun } else {
3537*4882a593Smuzhiyun struct xfs_agi *agi = agibp->b_addr;
3538*4882a593Smuzhiyun
3539*4882a593Smuzhiyun itotal += be32_to_cpu(agi->agi_count);
3540*4882a593Smuzhiyun ifree += be32_to_cpu(agi->agi_freecount);
3541*4882a593Smuzhiyun xfs_buf_relse(agibp);
3542*4882a593Smuzhiyun }
3543*4882a593Smuzhiyun }
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun #endif /* DEBUG */
3546