1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * All Rights Reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_shared.h"
9*4882a593Smuzhiyun #include "xfs_format.h"
10*4882a593Smuzhiyun #include "xfs_log_format.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_bit.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_trans.h"
15*4882a593Smuzhiyun #include "xfs_trans_priv.h"
16*4882a593Smuzhiyun #include "xfs_buf_item.h"
17*4882a593Smuzhiyun #include "xfs_inode.h"
18*4882a593Smuzhiyun #include "xfs_inode_item.h"
19*4882a593Smuzhiyun #include "xfs_quota.h"
20*4882a593Smuzhiyun #include "xfs_dquot_item.h"
21*4882a593Smuzhiyun #include "xfs_dquot.h"
22*4882a593Smuzhiyun #include "xfs_trace.h"
23*4882a593Smuzhiyun #include "xfs_log.h"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun kmem_zone_t *xfs_buf_item_zone;
27*4882a593Smuzhiyun
BUF_ITEM(struct xfs_log_item * lip)28*4882a593Smuzhiyun static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun return container_of(lip, struct xfs_buf_log_item, bli_item);
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* Is this log iovec plausibly large enough to contain the buffer log format? */
34*4882a593Smuzhiyun bool
xfs_buf_log_check_iovec(struct xfs_log_iovec * iovec)35*4882a593Smuzhiyun xfs_buf_log_check_iovec(
36*4882a593Smuzhiyun struct xfs_log_iovec *iovec)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct xfs_buf_log_format *blfp = iovec->i_addr;
39*4882a593Smuzhiyun char *bmp_end;
40*4882a593Smuzhiyun char *item_end;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (offsetof(struct xfs_buf_log_format, blf_data_map) > iovec->i_len)
43*4882a593Smuzhiyun return false;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun item_end = (char *)iovec->i_addr + iovec->i_len;
46*4882a593Smuzhiyun bmp_end = (char *)&blfp->blf_data_map[blfp->blf_map_size];
47*4882a593Smuzhiyun return bmp_end <= item_end;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static inline int
xfs_buf_log_format_size(struct xfs_buf_log_format * blfp)51*4882a593Smuzhiyun xfs_buf_log_format_size(
52*4882a593Smuzhiyun struct xfs_buf_log_format *blfp)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun return offsetof(struct xfs_buf_log_format, blf_data_map) +
55*4882a593Smuzhiyun (blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /*
59*4882a593Smuzhiyun * Return the number of log iovecs and space needed to log the given buf log
60*4882a593Smuzhiyun * item segment.
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * It calculates this as 1 iovec for the buf log format structure and 1 for each
63*4882a593Smuzhiyun * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged
64*4882a593Smuzhiyun * in a single iovec.
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun STATIC void
xfs_buf_item_size_segment(struct xfs_buf_log_item * bip,struct xfs_buf_log_format * blfp,int * nvecs,int * nbytes)67*4882a593Smuzhiyun xfs_buf_item_size_segment(
68*4882a593Smuzhiyun struct xfs_buf_log_item *bip,
69*4882a593Smuzhiyun struct xfs_buf_log_format *blfp,
70*4882a593Smuzhiyun int *nvecs,
71*4882a593Smuzhiyun int *nbytes)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct xfs_buf *bp = bip->bli_buf;
74*4882a593Smuzhiyun int next_bit;
75*4882a593Smuzhiyun int last_bit;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
78*4882a593Smuzhiyun if (last_bit == -1)
79*4882a593Smuzhiyun return;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * initial count for a dirty buffer is 2 vectors - the format structure
83*4882a593Smuzhiyun * and the first dirty region.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun *nvecs += 2;
86*4882a593Smuzhiyun *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun while (last_bit != -1) {
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * This takes the bit number to start looking from and
91*4882a593Smuzhiyun * returns the next set bit from there. It returns -1
92*4882a593Smuzhiyun * if there are no more bits set or the start bit is
93*4882a593Smuzhiyun * beyond the end of the bitmap.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
96*4882a593Smuzhiyun last_bit + 1);
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * If we run out of bits, leave the loop,
99*4882a593Smuzhiyun * else if we find a new set of bits bump the number of vecs,
100*4882a593Smuzhiyun * else keep scanning the current set of bits.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun if (next_bit == -1) {
103*4882a593Smuzhiyun break;
104*4882a593Smuzhiyun } else if (next_bit != last_bit + 1) {
105*4882a593Smuzhiyun last_bit = next_bit;
106*4882a593Smuzhiyun (*nvecs)++;
107*4882a593Smuzhiyun } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
108*4882a593Smuzhiyun (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
109*4882a593Smuzhiyun XFS_BLF_CHUNK)) {
110*4882a593Smuzhiyun last_bit = next_bit;
111*4882a593Smuzhiyun (*nvecs)++;
112*4882a593Smuzhiyun } else {
113*4882a593Smuzhiyun last_bit++;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun *nbytes += XFS_BLF_CHUNK;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * Return the number of log iovecs and space needed to log the given buf log
121*4882a593Smuzhiyun * item.
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * Discontiguous buffers need a format structure per region that is being
124*4882a593Smuzhiyun * logged. This makes the changes in the buffer appear to log recovery as though
125*4882a593Smuzhiyun * they came from separate buffers, just like would occur if multiple buffers
126*4882a593Smuzhiyun * were used instead of a single discontiguous buffer. This enables
127*4882a593Smuzhiyun * discontiguous buffers to be in-memory constructs, completely transparent to
128*4882a593Smuzhiyun * what ends up on disk.
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
131*4882a593Smuzhiyun * format structures. If the item has previously been logged and has dirty
132*4882a593Smuzhiyun * regions, we do not relog them in stale buffers. This has the effect of
133*4882a593Smuzhiyun * reducing the size of the relogged item by the amount of dirty data tracked
134*4882a593Smuzhiyun * by the log item. This can result in the committing transaction reducing the
135*4882a593Smuzhiyun * amount of space being consumed by the CIL.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun STATIC void
xfs_buf_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)138*4882a593Smuzhiyun xfs_buf_item_size(
139*4882a593Smuzhiyun struct xfs_log_item *lip,
140*4882a593Smuzhiyun int *nvecs,
141*4882a593Smuzhiyun int *nbytes)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
144*4882a593Smuzhiyun int i;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun ASSERT(atomic_read(&bip->bli_refcount) > 0);
147*4882a593Smuzhiyun if (bip->bli_flags & XFS_BLI_STALE) {
148*4882a593Smuzhiyun /*
149*4882a593Smuzhiyun * The buffer is stale, so all we need to log is the buf log
150*4882a593Smuzhiyun * format structure with the cancel flag in it as we are never
151*4882a593Smuzhiyun * going to replay the changes tracked in the log item.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun trace_xfs_buf_item_size_stale(bip);
154*4882a593Smuzhiyun ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
155*4882a593Smuzhiyun *nvecs += bip->bli_format_count;
156*4882a593Smuzhiyun for (i = 0; i < bip->bli_format_count; i++) {
157*4882a593Smuzhiyun *nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun return;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun ASSERT(bip->bli_flags & XFS_BLI_LOGGED);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun if (bip->bli_flags & XFS_BLI_ORDERED) {
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * The buffer has been logged just to order it. It is not being
167*4882a593Smuzhiyun * included in the transaction commit, so no vectors are used at
168*4882a593Smuzhiyun * all.
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun trace_xfs_buf_item_size_ordered(bip);
171*4882a593Smuzhiyun *nvecs = XFS_LOG_VEC_ORDERED;
172*4882a593Smuzhiyun return;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /*
176*4882a593Smuzhiyun * the vector count is based on the number of buffer vectors we have
177*4882a593Smuzhiyun * dirty bits in. This will only be greater than one when we have a
178*4882a593Smuzhiyun * compound buffer with more than one segment dirty. Hence for compound
179*4882a593Smuzhiyun * buffers we need to track which segment the dirty bits correspond to,
180*4882a593Smuzhiyun * and when we move from one segment to the next increment the vector
181*4882a593Smuzhiyun * count for the extra buf log format structure that will need to be
182*4882a593Smuzhiyun * written.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun for (i = 0; i < bip->bli_format_count; i++) {
185*4882a593Smuzhiyun xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
186*4882a593Smuzhiyun nvecs, nbytes);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun trace_xfs_buf_item_size(bip);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun static inline void
xfs_buf_item_copy_iovec(struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp,struct xfs_buf * bp,uint offset,int first_bit,uint nbits)192*4882a593Smuzhiyun xfs_buf_item_copy_iovec(
193*4882a593Smuzhiyun struct xfs_log_vec *lv,
194*4882a593Smuzhiyun struct xfs_log_iovec **vecp,
195*4882a593Smuzhiyun struct xfs_buf *bp,
196*4882a593Smuzhiyun uint offset,
197*4882a593Smuzhiyun int first_bit,
198*4882a593Smuzhiyun uint nbits)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun offset += first_bit * XFS_BLF_CHUNK;
201*4882a593Smuzhiyun xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
202*4882a593Smuzhiyun xfs_buf_offset(bp, offset),
203*4882a593Smuzhiyun nbits * XFS_BLF_CHUNK);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun static inline bool
xfs_buf_item_straddle(struct xfs_buf * bp,uint offset,int next_bit,int last_bit)207*4882a593Smuzhiyun xfs_buf_item_straddle(
208*4882a593Smuzhiyun struct xfs_buf *bp,
209*4882a593Smuzhiyun uint offset,
210*4882a593Smuzhiyun int next_bit,
211*4882a593Smuzhiyun int last_bit)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
214*4882a593Smuzhiyun (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
215*4882a593Smuzhiyun XFS_BLF_CHUNK);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun static void
xfs_buf_item_format_segment(struct xfs_buf_log_item * bip,struct xfs_log_vec * lv,struct xfs_log_iovec ** vecp,uint offset,struct xfs_buf_log_format * blfp)219*4882a593Smuzhiyun xfs_buf_item_format_segment(
220*4882a593Smuzhiyun struct xfs_buf_log_item *bip,
221*4882a593Smuzhiyun struct xfs_log_vec *lv,
222*4882a593Smuzhiyun struct xfs_log_iovec **vecp,
223*4882a593Smuzhiyun uint offset,
224*4882a593Smuzhiyun struct xfs_buf_log_format *blfp)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun struct xfs_buf *bp = bip->bli_buf;
227*4882a593Smuzhiyun uint base_size;
228*4882a593Smuzhiyun int first_bit;
229*4882a593Smuzhiyun int last_bit;
230*4882a593Smuzhiyun int next_bit;
231*4882a593Smuzhiyun uint nbits;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* copy the flags across from the base format item */
234*4882a593Smuzhiyun blfp->blf_flags = bip->__bli_format.blf_flags;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * Base size is the actual size of the ondisk structure - it reflects
238*4882a593Smuzhiyun * the actual size of the dirty bitmap rather than the size of the in
239*4882a593Smuzhiyun * memory structure.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun base_size = xfs_buf_log_format_size(blfp);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
244*4882a593Smuzhiyun if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
245*4882a593Smuzhiyun /*
246*4882a593Smuzhiyun * If the map is not be dirty in the transaction, mark
247*4882a593Smuzhiyun * the size as zero and do not advance the vector pointer.
248*4882a593Smuzhiyun */
249*4882a593Smuzhiyun return;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
253*4882a593Smuzhiyun blfp->blf_size = 1;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun if (bip->bli_flags & XFS_BLI_STALE) {
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun * The buffer is stale, so all we need to log
258*4882a593Smuzhiyun * is the buf log format structure with the
259*4882a593Smuzhiyun * cancel flag in it.
260*4882a593Smuzhiyun */
261*4882a593Smuzhiyun trace_xfs_buf_item_format_stale(bip);
262*4882a593Smuzhiyun ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * Fill in an iovec for each set of contiguous chunks.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun last_bit = first_bit;
271*4882a593Smuzhiyun nbits = 1;
272*4882a593Smuzhiyun for (;;) {
273*4882a593Smuzhiyun /*
274*4882a593Smuzhiyun * This takes the bit number to start looking from and
275*4882a593Smuzhiyun * returns the next set bit from there. It returns -1
276*4882a593Smuzhiyun * if there are no more bits set or the start bit is
277*4882a593Smuzhiyun * beyond the end of the bitmap.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
280*4882a593Smuzhiyun (uint)last_bit + 1);
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * If we run out of bits fill in the last iovec and get out of
283*4882a593Smuzhiyun * the loop. Else if we start a new set of bits then fill in
284*4882a593Smuzhiyun * the iovec for the series we were looking at and start
285*4882a593Smuzhiyun * counting the bits in the new one. Else we're still in the
286*4882a593Smuzhiyun * same set of bits so just keep counting and scanning.
287*4882a593Smuzhiyun */
288*4882a593Smuzhiyun if (next_bit == -1) {
289*4882a593Smuzhiyun xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
290*4882a593Smuzhiyun first_bit, nbits);
291*4882a593Smuzhiyun blfp->blf_size++;
292*4882a593Smuzhiyun break;
293*4882a593Smuzhiyun } else if (next_bit != last_bit + 1 ||
294*4882a593Smuzhiyun xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
295*4882a593Smuzhiyun xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
296*4882a593Smuzhiyun first_bit, nbits);
297*4882a593Smuzhiyun blfp->blf_size++;
298*4882a593Smuzhiyun first_bit = next_bit;
299*4882a593Smuzhiyun last_bit = next_bit;
300*4882a593Smuzhiyun nbits = 1;
301*4882a593Smuzhiyun } else {
302*4882a593Smuzhiyun last_bit++;
303*4882a593Smuzhiyun nbits++;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * This is called to fill in the vector of log iovecs for the
310*4882a593Smuzhiyun * given log buf item. It fills the first entry with a buf log
311*4882a593Smuzhiyun * format structure, and the rest point to contiguous chunks
312*4882a593Smuzhiyun * within the buffer.
313*4882a593Smuzhiyun */
314*4882a593Smuzhiyun STATIC void
xfs_buf_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)315*4882a593Smuzhiyun xfs_buf_item_format(
316*4882a593Smuzhiyun struct xfs_log_item *lip,
317*4882a593Smuzhiyun struct xfs_log_vec *lv)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
320*4882a593Smuzhiyun struct xfs_buf *bp = bip->bli_buf;
321*4882a593Smuzhiyun struct xfs_log_iovec *vecp = NULL;
322*4882a593Smuzhiyun uint offset = 0;
323*4882a593Smuzhiyun int i;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun ASSERT(atomic_read(&bip->bli_refcount) > 0);
326*4882a593Smuzhiyun ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
327*4882a593Smuzhiyun (bip->bli_flags & XFS_BLI_STALE));
328*4882a593Smuzhiyun ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
329*4882a593Smuzhiyun (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
330*4882a593Smuzhiyun && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
331*4882a593Smuzhiyun ASSERT(!(bip->bli_flags & XFS_BLI_ORDERED) ||
332*4882a593Smuzhiyun (bip->bli_flags & XFS_BLI_STALE));
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * If it is an inode buffer, transfer the in-memory state to the
337*4882a593Smuzhiyun * format flags and clear the in-memory state.
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * For buffer based inode allocation, we do not transfer
340*4882a593Smuzhiyun * this state if the inode buffer allocation has not yet been committed
341*4882a593Smuzhiyun * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
342*4882a593Smuzhiyun * correct replay of the inode allocation.
343*4882a593Smuzhiyun *
344*4882a593Smuzhiyun * For icreate item based inode allocation, the buffers aren't written
345*4882a593Smuzhiyun * to the journal during allocation, and hence we should always tag the
346*4882a593Smuzhiyun * buffer as an inode buffer so that the correct unlinked list replay
347*4882a593Smuzhiyun * occurs during recovery.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun if (bip->bli_flags & XFS_BLI_INODE_BUF) {
350*4882a593Smuzhiyun if (xfs_sb_version_has_v3inode(&lip->li_mountp->m_sb) ||
351*4882a593Smuzhiyun !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
352*4882a593Smuzhiyun xfs_log_item_in_current_chkpt(lip)))
353*4882a593Smuzhiyun bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
354*4882a593Smuzhiyun bip->bli_flags &= ~XFS_BLI_INODE_BUF;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun for (i = 0; i < bip->bli_format_count; i++) {
358*4882a593Smuzhiyun xfs_buf_item_format_segment(bip, lv, &vecp, offset,
359*4882a593Smuzhiyun &bip->bli_formats[i]);
360*4882a593Smuzhiyun offset += BBTOB(bp->b_maps[i].bm_len);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Check to make sure everything is consistent.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun trace_xfs_buf_item_format(bip);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun /*
370*4882a593Smuzhiyun * This is called to pin the buffer associated with the buf log item in memory
371*4882a593Smuzhiyun * so it cannot be written out.
372*4882a593Smuzhiyun *
373*4882a593Smuzhiyun * We also always take a reference to the buffer log item here so that the bli
374*4882a593Smuzhiyun * is held while the item is pinned in memory. This means that we can
375*4882a593Smuzhiyun * unconditionally drop the reference count a transaction holds when the
376*4882a593Smuzhiyun * transaction is completed.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun STATIC void
xfs_buf_item_pin(struct xfs_log_item * lip)379*4882a593Smuzhiyun xfs_buf_item_pin(
380*4882a593Smuzhiyun struct xfs_log_item *lip)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun ASSERT(atomic_read(&bip->bli_refcount) > 0);
385*4882a593Smuzhiyun ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
386*4882a593Smuzhiyun (bip->bli_flags & XFS_BLI_ORDERED) ||
387*4882a593Smuzhiyun (bip->bli_flags & XFS_BLI_STALE));
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun trace_xfs_buf_item_pin(bip);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun atomic_inc(&bip->bli_refcount);
392*4882a593Smuzhiyun atomic_inc(&bip->bli_buf->b_pin_count);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun * This is called to unpin the buffer associated with the buf log item which
397*4882a593Smuzhiyun * was previously pinned with a call to xfs_buf_item_pin().
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun STATIC void
xfs_buf_item_unpin(struct xfs_log_item * lip,int remove)400*4882a593Smuzhiyun xfs_buf_item_unpin(
401*4882a593Smuzhiyun struct xfs_log_item *lip,
402*4882a593Smuzhiyun int remove)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
405*4882a593Smuzhiyun xfs_buf_t *bp = bip->bli_buf;
406*4882a593Smuzhiyun int stale = bip->bli_flags & XFS_BLI_STALE;
407*4882a593Smuzhiyun int freed;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ASSERT(bp->b_log_item == bip);
410*4882a593Smuzhiyun ASSERT(atomic_read(&bip->bli_refcount) > 0);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun trace_xfs_buf_item_unpin(bip);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * Drop the bli ref associated with the pin and grab the hold required
416*4882a593Smuzhiyun * for the I/O simulation failure in the abort case. We have to do this
417*4882a593Smuzhiyun * before the pin count drops because the AIL doesn't acquire a bli
418*4882a593Smuzhiyun * reference. Therefore if the refcount drops to zero, the bli could
419*4882a593Smuzhiyun * still be AIL resident and the buffer submitted for I/O (and freed on
420*4882a593Smuzhiyun * completion) at any point before we return. This can be removed once
421*4882a593Smuzhiyun * the AIL properly holds a reference on the bli.
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun freed = atomic_dec_and_test(&bip->bli_refcount);
424*4882a593Smuzhiyun if (freed && !stale && remove)
425*4882a593Smuzhiyun xfs_buf_hold(bp);
426*4882a593Smuzhiyun if (atomic_dec_and_test(&bp->b_pin_count))
427*4882a593Smuzhiyun wake_up_all(&bp->b_waiters);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /* nothing to do but drop the pin count if the bli is active */
430*4882a593Smuzhiyun if (!freed)
431*4882a593Smuzhiyun return;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (stale) {
434*4882a593Smuzhiyun ASSERT(bip->bli_flags & XFS_BLI_STALE);
435*4882a593Smuzhiyun ASSERT(xfs_buf_islocked(bp));
436*4882a593Smuzhiyun ASSERT(bp->b_flags & XBF_STALE);
437*4882a593Smuzhiyun ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
438*4882a593Smuzhiyun ASSERT(list_empty(&lip->li_trans));
439*4882a593Smuzhiyun ASSERT(!bp->b_transp);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun trace_xfs_buf_item_unpin_stale(bip);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /*
444*4882a593Smuzhiyun * If we get called here because of an IO error, we may or may
445*4882a593Smuzhiyun * not have the item on the AIL. xfs_trans_ail_delete() will
446*4882a593Smuzhiyun * take care of that situation. xfs_trans_ail_delete() drops
447*4882a593Smuzhiyun * the AIL lock.
448*4882a593Smuzhiyun */
449*4882a593Smuzhiyun if (bip->bli_flags & XFS_BLI_STALE_INODE) {
450*4882a593Smuzhiyun xfs_buf_item_done(bp);
451*4882a593Smuzhiyun xfs_buf_inode_iodone(bp);
452*4882a593Smuzhiyun ASSERT(list_empty(&bp->b_li_list));
453*4882a593Smuzhiyun } else {
454*4882a593Smuzhiyun xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR);
455*4882a593Smuzhiyun xfs_buf_item_relse(bp);
456*4882a593Smuzhiyun ASSERT(bp->b_log_item == NULL);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun xfs_buf_relse(bp);
459*4882a593Smuzhiyun } else if (remove) {
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * The buffer must be locked and held by the caller to simulate
462*4882a593Smuzhiyun * an async I/O failure. We acquired the hold for this case
463*4882a593Smuzhiyun * before the buffer was unpinned.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun xfs_buf_lock(bp);
466*4882a593Smuzhiyun bp->b_flags |= XBF_ASYNC;
467*4882a593Smuzhiyun xfs_buf_ioend_fail(bp);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun STATIC uint
xfs_buf_item_push(struct xfs_log_item * lip,struct list_head * buffer_list)472*4882a593Smuzhiyun xfs_buf_item_push(
473*4882a593Smuzhiyun struct xfs_log_item *lip,
474*4882a593Smuzhiyun struct list_head *buffer_list)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
477*4882a593Smuzhiyun struct xfs_buf *bp = bip->bli_buf;
478*4882a593Smuzhiyun uint rval = XFS_ITEM_SUCCESS;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (xfs_buf_ispinned(bp))
481*4882a593Smuzhiyun return XFS_ITEM_PINNED;
482*4882a593Smuzhiyun if (!xfs_buf_trylock(bp)) {
483*4882a593Smuzhiyun /*
484*4882a593Smuzhiyun * If we have just raced with a buffer being pinned and it has
485*4882a593Smuzhiyun * been marked stale, we could end up stalling until someone else
486*4882a593Smuzhiyun * issues a log force to unpin the stale buffer. Check for the
487*4882a593Smuzhiyun * race condition here so xfsaild recognizes the buffer is pinned
488*4882a593Smuzhiyun * and queues a log force to move it along.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun if (xfs_buf_ispinned(bp))
491*4882a593Smuzhiyun return XFS_ITEM_PINNED;
492*4882a593Smuzhiyun return XFS_ITEM_LOCKED;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun trace_xfs_buf_item_push(bip);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* has a previous flush failed due to IO errors? */
500*4882a593Smuzhiyun if (bp->b_flags & XBF_WRITE_FAIL) {
501*4882a593Smuzhiyun xfs_buf_alert_ratelimited(bp, "XFS: Failing async write",
502*4882a593Smuzhiyun "Failing async write on buffer block 0x%llx. Retrying async write.",
503*4882a593Smuzhiyun (long long)bp->b_bn);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (!xfs_buf_delwri_queue(bp, buffer_list))
507*4882a593Smuzhiyun rval = XFS_ITEM_FLUSHING;
508*4882a593Smuzhiyun xfs_buf_unlock(bp);
509*4882a593Smuzhiyun return rval;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * Drop the buffer log item refcount and take appropriate action. This helper
514*4882a593Smuzhiyun * determines whether the bli must be freed or not, since a decrement to zero
515*4882a593Smuzhiyun * does not necessarily mean the bli is unused.
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * Return true if the bli is freed, false otherwise.
518*4882a593Smuzhiyun */
519*4882a593Smuzhiyun bool
xfs_buf_item_put(struct xfs_buf_log_item * bip)520*4882a593Smuzhiyun xfs_buf_item_put(
521*4882a593Smuzhiyun struct xfs_buf_log_item *bip)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct xfs_log_item *lip = &bip->bli_item;
524*4882a593Smuzhiyun bool aborted;
525*4882a593Smuzhiyun bool dirty;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* drop the bli ref and return if it wasn't the last one */
528*4882a593Smuzhiyun if (!atomic_dec_and_test(&bip->bli_refcount))
529*4882a593Smuzhiyun return false;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /*
532*4882a593Smuzhiyun * We dropped the last ref and must free the item if clean or aborted.
533*4882a593Smuzhiyun * If the bli is dirty and non-aborted, the buffer was clean in the
534*4882a593Smuzhiyun * transaction but still awaiting writeback from previous changes. In
535*4882a593Smuzhiyun * that case, the bli is freed on buffer writeback completion.
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun aborted = test_bit(XFS_LI_ABORTED, &lip->li_flags) ||
538*4882a593Smuzhiyun XFS_FORCED_SHUTDOWN(lip->li_mountp);
539*4882a593Smuzhiyun dirty = bip->bli_flags & XFS_BLI_DIRTY;
540*4882a593Smuzhiyun if (dirty && !aborted)
541*4882a593Smuzhiyun return false;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun * The bli is aborted or clean. An aborted item may be in the AIL
545*4882a593Smuzhiyun * regardless of dirty state. For example, consider an aborted
546*4882a593Smuzhiyun * transaction that invalidated a dirty bli and cleared the dirty
547*4882a593Smuzhiyun * state.
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun if (aborted)
550*4882a593Smuzhiyun xfs_trans_ail_delete(lip, 0);
551*4882a593Smuzhiyun xfs_buf_item_relse(bip->bli_buf);
552*4882a593Smuzhiyun return true;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * Release the buffer associated with the buf log item. If there is no dirty
557*4882a593Smuzhiyun * logged data associated with the buffer recorded in the buf log item, then
558*4882a593Smuzhiyun * free the buf log item and remove the reference to it in the buffer.
559*4882a593Smuzhiyun *
560*4882a593Smuzhiyun * This call ignores the recursion count. It is only called when the buffer
561*4882a593Smuzhiyun * should REALLY be unlocked, regardless of the recursion count.
562*4882a593Smuzhiyun *
563*4882a593Smuzhiyun * We unconditionally drop the transaction's reference to the log item. If the
564*4882a593Smuzhiyun * item was logged, then another reference was taken when it was pinned, so we
565*4882a593Smuzhiyun * can safely drop the transaction reference now. This also allows us to avoid
566*4882a593Smuzhiyun * potential races with the unpin code freeing the bli by not referencing the
567*4882a593Smuzhiyun * bli after we've dropped the reference count.
568*4882a593Smuzhiyun *
569*4882a593Smuzhiyun * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
570*4882a593Smuzhiyun * if necessary but do not unlock the buffer. This is for support of
571*4882a593Smuzhiyun * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
572*4882a593Smuzhiyun * free the item.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun STATIC void
xfs_buf_item_release(struct xfs_log_item * lip)575*4882a593Smuzhiyun xfs_buf_item_release(
576*4882a593Smuzhiyun struct xfs_log_item *lip)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
579*4882a593Smuzhiyun struct xfs_buf *bp = bip->bli_buf;
580*4882a593Smuzhiyun bool released;
581*4882a593Smuzhiyun bool hold = bip->bli_flags & XFS_BLI_HOLD;
582*4882a593Smuzhiyun bool stale = bip->bli_flags & XFS_BLI_STALE;
583*4882a593Smuzhiyun #if defined(DEBUG) || defined(XFS_WARN)
584*4882a593Smuzhiyun bool ordered = bip->bli_flags & XFS_BLI_ORDERED;
585*4882a593Smuzhiyun bool dirty = bip->bli_flags & XFS_BLI_DIRTY;
586*4882a593Smuzhiyun bool aborted = test_bit(XFS_LI_ABORTED,
587*4882a593Smuzhiyun &lip->li_flags);
588*4882a593Smuzhiyun #endif
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun trace_xfs_buf_item_release(bip);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /*
593*4882a593Smuzhiyun * The bli dirty state should match whether the blf has logged segments
594*4882a593Smuzhiyun * except for ordered buffers, where only the bli should be dirty.
595*4882a593Smuzhiyun */
596*4882a593Smuzhiyun ASSERT((!ordered && dirty == xfs_buf_item_dirty_format(bip)) ||
597*4882a593Smuzhiyun (ordered && dirty && !xfs_buf_item_dirty_format(bip)));
598*4882a593Smuzhiyun ASSERT(!stale || (bip->__bli_format.blf_flags & XFS_BLF_CANCEL));
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /*
601*4882a593Smuzhiyun * Clear the buffer's association with this transaction and
602*4882a593Smuzhiyun * per-transaction state from the bli, which has been copied above.
603*4882a593Smuzhiyun */
604*4882a593Smuzhiyun bp->b_transp = NULL;
605*4882a593Smuzhiyun bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /*
608*4882a593Smuzhiyun * Unref the item and unlock the buffer unless held or stale. Stale
609*4882a593Smuzhiyun * buffers remain locked until final unpin unless the bli is freed by
610*4882a593Smuzhiyun * the unref call. The latter implies shutdown because buffer
611*4882a593Smuzhiyun * invalidation dirties the bli and transaction.
612*4882a593Smuzhiyun */
613*4882a593Smuzhiyun released = xfs_buf_item_put(bip);
614*4882a593Smuzhiyun if (hold || (stale && !released))
615*4882a593Smuzhiyun return;
616*4882a593Smuzhiyun ASSERT(!stale || aborted);
617*4882a593Smuzhiyun xfs_buf_relse(bp);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun STATIC void
xfs_buf_item_committing(struct xfs_log_item * lip,xfs_csn_t seq)621*4882a593Smuzhiyun xfs_buf_item_committing(
622*4882a593Smuzhiyun struct xfs_log_item *lip,
623*4882a593Smuzhiyun xfs_csn_t seq)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun return xfs_buf_item_release(lip);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * This is called to find out where the oldest active copy of the
630*4882a593Smuzhiyun * buf log item in the on disk log resides now that the last log
631*4882a593Smuzhiyun * write of it completed at the given lsn.
632*4882a593Smuzhiyun * We always re-log all the dirty data in a buffer, so usually the
633*4882a593Smuzhiyun * latest copy in the on disk log is the only one that matters. For
634*4882a593Smuzhiyun * those cases we simply return the given lsn.
635*4882a593Smuzhiyun *
636*4882a593Smuzhiyun * The one exception to this is for buffers full of newly allocated
637*4882a593Smuzhiyun * inodes. These buffers are only relogged with the XFS_BLI_INODE_BUF
638*4882a593Smuzhiyun * flag set, indicating that only the di_next_unlinked fields from the
639*4882a593Smuzhiyun * inodes in the buffers will be replayed during recovery. If the
640*4882a593Smuzhiyun * original newly allocated inode images have not yet been flushed
641*4882a593Smuzhiyun * when the buffer is so relogged, then we need to make sure that we
642*4882a593Smuzhiyun * keep the old images in the 'active' portion of the log. We do this
643*4882a593Smuzhiyun * by returning the original lsn of that transaction here rather than
644*4882a593Smuzhiyun * the current one.
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun STATIC xfs_lsn_t
xfs_buf_item_committed(struct xfs_log_item * lip,xfs_lsn_t lsn)647*4882a593Smuzhiyun xfs_buf_item_committed(
648*4882a593Smuzhiyun struct xfs_log_item *lip,
649*4882a593Smuzhiyun xfs_lsn_t lsn)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct xfs_buf_log_item *bip = BUF_ITEM(lip);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun trace_xfs_buf_item_committed(bip);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
656*4882a593Smuzhiyun return lip->li_lsn;
657*4882a593Smuzhiyun return lsn;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun static const struct xfs_item_ops xfs_buf_item_ops = {
661*4882a593Smuzhiyun .iop_size = xfs_buf_item_size,
662*4882a593Smuzhiyun .iop_format = xfs_buf_item_format,
663*4882a593Smuzhiyun .iop_pin = xfs_buf_item_pin,
664*4882a593Smuzhiyun .iop_unpin = xfs_buf_item_unpin,
665*4882a593Smuzhiyun .iop_release = xfs_buf_item_release,
666*4882a593Smuzhiyun .iop_committing = xfs_buf_item_committing,
667*4882a593Smuzhiyun .iop_committed = xfs_buf_item_committed,
668*4882a593Smuzhiyun .iop_push = xfs_buf_item_push,
669*4882a593Smuzhiyun };
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun STATIC void
xfs_buf_item_get_format(struct xfs_buf_log_item * bip,int count)672*4882a593Smuzhiyun xfs_buf_item_get_format(
673*4882a593Smuzhiyun struct xfs_buf_log_item *bip,
674*4882a593Smuzhiyun int count)
675*4882a593Smuzhiyun {
676*4882a593Smuzhiyun ASSERT(bip->bli_formats == NULL);
677*4882a593Smuzhiyun bip->bli_format_count = count;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (count == 1) {
680*4882a593Smuzhiyun bip->bli_formats = &bip->__bli_format;
681*4882a593Smuzhiyun return;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
685*4882a593Smuzhiyun 0);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun STATIC void
xfs_buf_item_free_format(struct xfs_buf_log_item * bip)689*4882a593Smuzhiyun xfs_buf_item_free_format(
690*4882a593Smuzhiyun struct xfs_buf_log_item *bip)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun if (bip->bli_formats != &bip->__bli_format) {
693*4882a593Smuzhiyun kmem_free(bip->bli_formats);
694*4882a593Smuzhiyun bip->bli_formats = NULL;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun /*
699*4882a593Smuzhiyun * Allocate a new buf log item to go with the given buffer.
700*4882a593Smuzhiyun * Set the buffer's b_log_item field to point to the new
701*4882a593Smuzhiyun * buf log item.
702*4882a593Smuzhiyun */
703*4882a593Smuzhiyun int
xfs_buf_item_init(struct xfs_buf * bp,struct xfs_mount * mp)704*4882a593Smuzhiyun xfs_buf_item_init(
705*4882a593Smuzhiyun struct xfs_buf *bp,
706*4882a593Smuzhiyun struct xfs_mount *mp)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun struct xfs_buf_log_item *bip = bp->b_log_item;
709*4882a593Smuzhiyun int chunks;
710*4882a593Smuzhiyun int map_size;
711*4882a593Smuzhiyun int i;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun * Check to see if there is already a buf log item for
715*4882a593Smuzhiyun * this buffer. If we do already have one, there is
716*4882a593Smuzhiyun * nothing to do here so return.
717*4882a593Smuzhiyun */
718*4882a593Smuzhiyun ASSERT(bp->b_mount == mp);
719*4882a593Smuzhiyun if (bip) {
720*4882a593Smuzhiyun ASSERT(bip->bli_item.li_type == XFS_LI_BUF);
721*4882a593Smuzhiyun ASSERT(!bp->b_transp);
722*4882a593Smuzhiyun ASSERT(bip->bli_buf == bp);
723*4882a593Smuzhiyun return 0;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun bip = kmem_cache_zalloc(xfs_buf_item_zone, GFP_KERNEL | __GFP_NOFAIL);
727*4882a593Smuzhiyun xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
728*4882a593Smuzhiyun bip->bli_buf = bp;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
732*4882a593Smuzhiyun * can be divided into. Make sure not to truncate any pieces.
733*4882a593Smuzhiyun * map_size is the size of the bitmap needed to describe the
734*4882a593Smuzhiyun * chunks of the buffer.
735*4882a593Smuzhiyun *
736*4882a593Smuzhiyun * Discontiguous buffer support follows the layout of the underlying
737*4882a593Smuzhiyun * buffer. This makes the implementation as simple as possible.
738*4882a593Smuzhiyun */
739*4882a593Smuzhiyun xfs_buf_item_get_format(bip, bp->b_map_count);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun for (i = 0; i < bip->bli_format_count; i++) {
742*4882a593Smuzhiyun chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
743*4882a593Smuzhiyun XFS_BLF_CHUNK);
744*4882a593Smuzhiyun map_size = DIV_ROUND_UP(chunks, NBWORD);
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if (map_size > XFS_BLF_DATAMAP_SIZE) {
747*4882a593Smuzhiyun kmem_cache_free(xfs_buf_item_zone, bip);
748*4882a593Smuzhiyun xfs_err(mp,
749*4882a593Smuzhiyun "buffer item dirty bitmap (%u uints) too small to reflect %u bytes!",
750*4882a593Smuzhiyun map_size,
751*4882a593Smuzhiyun BBTOB(bp->b_maps[i].bm_len));
752*4882a593Smuzhiyun return -EFSCORRUPTED;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun bip->bli_formats[i].blf_type = XFS_LI_BUF;
756*4882a593Smuzhiyun bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
757*4882a593Smuzhiyun bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
758*4882a593Smuzhiyun bip->bli_formats[i].blf_map_size = map_size;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun bp->b_log_item = bip;
762*4882a593Smuzhiyun xfs_buf_hold(bp);
763*4882a593Smuzhiyun return 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun * Mark bytes first through last inclusive as dirty in the buf
769*4882a593Smuzhiyun * item's bitmap.
770*4882a593Smuzhiyun */
771*4882a593Smuzhiyun static void
xfs_buf_item_log_segment(uint first,uint last,uint * map)772*4882a593Smuzhiyun xfs_buf_item_log_segment(
773*4882a593Smuzhiyun uint first,
774*4882a593Smuzhiyun uint last,
775*4882a593Smuzhiyun uint *map)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun uint first_bit;
778*4882a593Smuzhiyun uint last_bit;
779*4882a593Smuzhiyun uint bits_to_set;
780*4882a593Smuzhiyun uint bits_set;
781*4882a593Smuzhiyun uint word_num;
782*4882a593Smuzhiyun uint *wordp;
783*4882a593Smuzhiyun uint bit;
784*4882a593Smuzhiyun uint end_bit;
785*4882a593Smuzhiyun uint mask;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun ASSERT(first < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
788*4882a593Smuzhiyun ASSERT(last < XFS_BLF_DATAMAP_SIZE * XFS_BLF_CHUNK * NBWORD);
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun * Convert byte offsets to bit numbers.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun first_bit = first >> XFS_BLF_SHIFT;
794*4882a593Smuzhiyun last_bit = last >> XFS_BLF_SHIFT;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /*
797*4882a593Smuzhiyun * Calculate the total number of bits to be set.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun bits_to_set = last_bit - first_bit + 1;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /*
802*4882a593Smuzhiyun * Get a pointer to the first word in the bitmap
803*4882a593Smuzhiyun * to set a bit in.
804*4882a593Smuzhiyun */
805*4882a593Smuzhiyun word_num = first_bit >> BIT_TO_WORD_SHIFT;
806*4882a593Smuzhiyun wordp = &map[word_num];
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /*
809*4882a593Smuzhiyun * Calculate the starting bit in the first word.
810*4882a593Smuzhiyun */
811*4882a593Smuzhiyun bit = first_bit & (uint)(NBWORD - 1);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * First set any bits in the first word of our range.
815*4882a593Smuzhiyun * If it starts at bit 0 of the word, it will be
816*4882a593Smuzhiyun * set below rather than here. That is what the variable
817*4882a593Smuzhiyun * bit tells us. The variable bits_set tracks the number
818*4882a593Smuzhiyun * of bits that have been set so far. End_bit is the number
819*4882a593Smuzhiyun * of the last bit to be set in this word plus one.
820*4882a593Smuzhiyun */
821*4882a593Smuzhiyun if (bit) {
822*4882a593Smuzhiyun end_bit = min(bit + bits_to_set, (uint)NBWORD);
823*4882a593Smuzhiyun mask = ((1U << (end_bit - bit)) - 1) << bit;
824*4882a593Smuzhiyun *wordp |= mask;
825*4882a593Smuzhiyun wordp++;
826*4882a593Smuzhiyun bits_set = end_bit - bit;
827*4882a593Smuzhiyun } else {
828*4882a593Smuzhiyun bits_set = 0;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /*
832*4882a593Smuzhiyun * Now set bits a whole word at a time that are between
833*4882a593Smuzhiyun * first_bit and last_bit.
834*4882a593Smuzhiyun */
835*4882a593Smuzhiyun while ((bits_to_set - bits_set) >= NBWORD) {
836*4882a593Smuzhiyun *wordp = 0xffffffff;
837*4882a593Smuzhiyun bits_set += NBWORD;
838*4882a593Smuzhiyun wordp++;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /*
842*4882a593Smuzhiyun * Finally, set any bits left to be set in one last partial word.
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun end_bit = bits_to_set - bits_set;
845*4882a593Smuzhiyun if (end_bit) {
846*4882a593Smuzhiyun mask = (1U << end_bit) - 1;
847*4882a593Smuzhiyun *wordp |= mask;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /*
852*4882a593Smuzhiyun * Mark bytes first through last inclusive as dirty in the buf
853*4882a593Smuzhiyun * item's bitmap.
854*4882a593Smuzhiyun */
855*4882a593Smuzhiyun void
xfs_buf_item_log(struct xfs_buf_log_item * bip,uint first,uint last)856*4882a593Smuzhiyun xfs_buf_item_log(
857*4882a593Smuzhiyun struct xfs_buf_log_item *bip,
858*4882a593Smuzhiyun uint first,
859*4882a593Smuzhiyun uint last)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun int i;
862*4882a593Smuzhiyun uint start;
863*4882a593Smuzhiyun uint end;
864*4882a593Smuzhiyun struct xfs_buf *bp = bip->bli_buf;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /*
867*4882a593Smuzhiyun * walk each buffer segment and mark them dirty appropriately.
868*4882a593Smuzhiyun */
869*4882a593Smuzhiyun start = 0;
870*4882a593Smuzhiyun for (i = 0; i < bip->bli_format_count; i++) {
871*4882a593Smuzhiyun if (start > last)
872*4882a593Smuzhiyun break;
873*4882a593Smuzhiyun end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* skip to the map that includes the first byte to log */
876*4882a593Smuzhiyun if (first > end) {
877*4882a593Smuzhiyun start += BBTOB(bp->b_maps[i].bm_len);
878*4882a593Smuzhiyun continue;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * Trim the range to this segment and mark it in the bitmap.
883*4882a593Smuzhiyun * Note that we must convert buffer offsets to segment relative
884*4882a593Smuzhiyun * offsets (e.g., the first byte of each segment is byte 0 of
885*4882a593Smuzhiyun * that segment).
886*4882a593Smuzhiyun */
887*4882a593Smuzhiyun if (first < start)
888*4882a593Smuzhiyun first = start;
889*4882a593Smuzhiyun if (end > last)
890*4882a593Smuzhiyun end = last;
891*4882a593Smuzhiyun xfs_buf_item_log_segment(first - start, end - start,
892*4882a593Smuzhiyun &bip->bli_formats[i].blf_data_map[0]);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun start += BBTOB(bp->b_maps[i].bm_len);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /*
900*4882a593Smuzhiyun * Return true if the buffer has any ranges logged/dirtied by a transaction,
901*4882a593Smuzhiyun * false otherwise.
902*4882a593Smuzhiyun */
903*4882a593Smuzhiyun bool
xfs_buf_item_dirty_format(struct xfs_buf_log_item * bip)904*4882a593Smuzhiyun xfs_buf_item_dirty_format(
905*4882a593Smuzhiyun struct xfs_buf_log_item *bip)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun int i;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun for (i = 0; i < bip->bli_format_count; i++) {
910*4882a593Smuzhiyun if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
911*4882a593Smuzhiyun bip->bli_formats[i].blf_map_size))
912*4882a593Smuzhiyun return true;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun return false;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun STATIC void
xfs_buf_item_free(struct xfs_buf_log_item * bip)919*4882a593Smuzhiyun xfs_buf_item_free(
920*4882a593Smuzhiyun struct xfs_buf_log_item *bip)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun xfs_buf_item_free_format(bip);
923*4882a593Smuzhiyun kmem_free(bip->bli_item.li_lv_shadow);
924*4882a593Smuzhiyun kmem_cache_free(xfs_buf_item_zone, bip);
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /*
928*4882a593Smuzhiyun * xfs_buf_item_relse() is called when the buf log item is no longer needed.
929*4882a593Smuzhiyun */
930*4882a593Smuzhiyun void
xfs_buf_item_relse(xfs_buf_t * bp)931*4882a593Smuzhiyun xfs_buf_item_relse(
932*4882a593Smuzhiyun xfs_buf_t *bp)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct xfs_buf_log_item *bip = bp->b_log_item;
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun trace_xfs_buf_item_relse(bp, _RET_IP_);
937*4882a593Smuzhiyun ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags));
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun bp->b_log_item = NULL;
940*4882a593Smuzhiyun xfs_buf_rele(bp);
941*4882a593Smuzhiyun xfs_buf_item_free(bip);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun void
xfs_buf_item_done(struct xfs_buf * bp)945*4882a593Smuzhiyun xfs_buf_item_done(
946*4882a593Smuzhiyun struct xfs_buf *bp)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun /*
949*4882a593Smuzhiyun * If we are forcibly shutting down, this may well be off the AIL
950*4882a593Smuzhiyun * already. That's because we simulate the log-committed callbacks to
951*4882a593Smuzhiyun * unpin these buffers. Or we may never have put this item on AIL
952*4882a593Smuzhiyun * because of the transaction was aborted forcibly.
953*4882a593Smuzhiyun * xfs_trans_ail_delete() takes care of these.
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * Either way, AIL is useless if we're forcing a shutdown.
956*4882a593Smuzhiyun *
957*4882a593Smuzhiyun * Note that log recovery writes might have buffer items that are not on
958*4882a593Smuzhiyun * the AIL even when the file system is not shut down.
959*4882a593Smuzhiyun */
960*4882a593Smuzhiyun xfs_trans_ail_delete(&bp->b_log_item->bli_item,
961*4882a593Smuzhiyun (bp->b_flags & _XBF_LOGRECOVERY) ? 0 :
962*4882a593Smuzhiyun SHUTDOWN_CORRUPT_INCORE);
963*4882a593Smuzhiyun xfs_buf_item_relse(bp);
964*4882a593Smuzhiyun }
965