1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2010 Red Hat, Inc. All Rights Reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_format.h"
9*4882a593Smuzhiyun #include "xfs_log_format.h"
10*4882a593Smuzhiyun #include "xfs_shared.h"
11*4882a593Smuzhiyun #include "xfs_trans_resv.h"
12*4882a593Smuzhiyun #include "xfs_mount.h"
13*4882a593Smuzhiyun #include "xfs_extent_busy.h"
14*4882a593Smuzhiyun #include "xfs_trans.h"
15*4882a593Smuzhiyun #include "xfs_trans_priv.h"
16*4882a593Smuzhiyun #include "xfs_log.h"
17*4882a593Smuzhiyun #include "xfs_log_priv.h"
18*4882a593Smuzhiyun #include "xfs_trace.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun struct workqueue_struct *xfs_discard_wq;
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * Allocate a new ticket. Failing to get a new ticket makes it really hard to
24*4882a593Smuzhiyun * recover, so we don't allow failure here. Also, we allocate in a context that
25*4882a593Smuzhiyun * we don't want to be issuing transactions from, so we need to tell the
26*4882a593Smuzhiyun * allocation code this as well.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * We don't reserve any space for the ticket - we are going to steal whatever
29*4882a593Smuzhiyun * space we require from transactions as they commit. To ensure we reserve all
30*4882a593Smuzhiyun * the space required, we need to set the current reservation of the ticket to
31*4882a593Smuzhiyun * zero so that we know to steal the initial transaction overhead from the
32*4882a593Smuzhiyun * first transaction commit.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun static struct xlog_ticket *
xlog_cil_ticket_alloc(struct xlog * log)35*4882a593Smuzhiyun xlog_cil_ticket_alloc(
36*4882a593Smuzhiyun struct xlog *log)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct xlog_ticket *tic;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun * set the current reservation to zero so we know to steal the basic
44*4882a593Smuzhiyun * transaction overhead reservation from the first transaction commit.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun tic->t_curr_res = 0;
47*4882a593Smuzhiyun return tic;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * After the first stage of log recovery is done, we know where the head and
52*4882a593Smuzhiyun * tail of the log are. We need this log initialisation done before we can
53*4882a593Smuzhiyun * initialise the first CIL checkpoint context.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * Here we allocate a log ticket to track space usage during a CIL push. This
56*4882a593Smuzhiyun * ticket is passed to xlog_write() directly so that we don't slowly leak log
57*4882a593Smuzhiyun * space by failing to account for space used by log headers and additional
58*4882a593Smuzhiyun * region headers for split regions.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun void
xlog_cil_init_post_recovery(struct xlog * log)61*4882a593Smuzhiyun xlog_cil_init_post_recovery(
62*4882a593Smuzhiyun struct xlog *log)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
65*4882a593Smuzhiyun log->l_cilp->xc_ctx->sequence = 1;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static inline int
xlog_cil_iovec_space(uint niovecs)69*4882a593Smuzhiyun xlog_cil_iovec_space(
70*4882a593Smuzhiyun uint niovecs)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun return round_up((sizeof(struct xfs_log_vec) +
73*4882a593Smuzhiyun niovecs * sizeof(struct xfs_log_iovec)),
74*4882a593Smuzhiyun sizeof(uint64_t));
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * Allocate or pin log vector buffers for CIL insertion.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * The CIL currently uses disposable buffers for copying a snapshot of the
81*4882a593Smuzhiyun * modified items into the log during a push. The biggest problem with this is
82*4882a593Smuzhiyun * the requirement to allocate the disposable buffer during the commit if:
83*4882a593Smuzhiyun * a) does not exist; or
84*4882a593Smuzhiyun * b) it is too small
85*4882a593Smuzhiyun *
86*4882a593Smuzhiyun * If we do this allocation within xlog_cil_insert_format_items(), it is done
87*4882a593Smuzhiyun * under the xc_ctx_lock, which means that a CIL push cannot occur during
88*4882a593Smuzhiyun * the memory allocation. This means that we have a potential deadlock situation
89*4882a593Smuzhiyun * under low memory conditions when we have lots of dirty metadata pinned in
90*4882a593Smuzhiyun * the CIL and we need a CIL commit to occur to free memory.
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * To avoid this, we need to move the memory allocation outside the
93*4882a593Smuzhiyun * xc_ctx_lock, but because the log vector buffers are disposable, that opens
94*4882a593Smuzhiyun * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
95*4882a593Smuzhiyun * vector buffers between the check and the formatting of the item into the
96*4882a593Smuzhiyun * log vector buffer within the xc_ctx_lock.
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun * Because the log vector buffer needs to be unchanged during the CIL push
99*4882a593Smuzhiyun * process, we cannot share the buffer between the transaction commit (which
100*4882a593Smuzhiyun * modifies the buffer) and the CIL push context that is writing the changes
101*4882a593Smuzhiyun * into the log. This means skipping preallocation of buffer space is
102*4882a593Smuzhiyun * unreliable, but we most definitely do not want to be allocating and freeing
103*4882a593Smuzhiyun * buffers unnecessarily during commits when overwrites can be done safely.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * The simplest solution to this problem is to allocate a shadow buffer when a
106*4882a593Smuzhiyun * log item is committed for the second time, and then to only use this buffer
107*4882a593Smuzhiyun * if necessary. The buffer can remain attached to the log item until such time
108*4882a593Smuzhiyun * it is needed, and this is the buffer that is reallocated to match the size of
109*4882a593Smuzhiyun * the incoming modification. Then during the formatting of the item we can swap
110*4882a593Smuzhiyun * the active buffer with the new one if we can't reuse the existing buffer. We
111*4882a593Smuzhiyun * don't free the old buffer as it may be reused on the next modification if
112*4882a593Smuzhiyun * it's size is right, otherwise we'll free and reallocate it at that point.
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * This function builds a vector for the changes in each log item in the
115*4882a593Smuzhiyun * transaction. It then works out the length of the buffer needed for each log
116*4882a593Smuzhiyun * item, allocates them and attaches the vector to the log item in preparation
117*4882a593Smuzhiyun * for the formatting step which occurs under the xc_ctx_lock.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * While this means the memory footprint goes up, it avoids the repeated
120*4882a593Smuzhiyun * alloc/free pattern that repeated modifications of an item would otherwise
121*4882a593Smuzhiyun * cause, and hence minimises the CPU overhead of such behaviour.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun static void
xlog_cil_alloc_shadow_bufs(struct xlog * log,struct xfs_trans * tp)124*4882a593Smuzhiyun xlog_cil_alloc_shadow_bufs(
125*4882a593Smuzhiyun struct xlog *log,
126*4882a593Smuzhiyun struct xfs_trans *tp)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct xfs_log_item *lip;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun list_for_each_entry(lip, &tp->t_items, li_trans) {
131*4882a593Smuzhiyun struct xfs_log_vec *lv;
132*4882a593Smuzhiyun int niovecs = 0;
133*4882a593Smuzhiyun int nbytes = 0;
134*4882a593Smuzhiyun int buf_size;
135*4882a593Smuzhiyun bool ordered = false;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* Skip items which aren't dirty in this transaction. */
138*4882a593Smuzhiyun if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
139*4882a593Smuzhiyun continue;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* get number of vecs and size of data to be stored */
142*4882a593Smuzhiyun lip->li_ops->iop_size(lip, &niovecs, &nbytes);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun * Ordered items need to be tracked but we do not wish to write
146*4882a593Smuzhiyun * them. We need a logvec to track the object, but we do not
147*4882a593Smuzhiyun * need an iovec or buffer to be allocated for copying data.
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun if (niovecs == XFS_LOG_VEC_ORDERED) {
150*4882a593Smuzhiyun ordered = true;
151*4882a593Smuzhiyun niovecs = 0;
152*4882a593Smuzhiyun nbytes = 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun * We 64-bit align the length of each iovec so that the start
157*4882a593Smuzhiyun * of the next one is naturally aligned. We'll need to
158*4882a593Smuzhiyun * account for that slack space here. Then round nbytes up
159*4882a593Smuzhiyun * to 64-bit alignment so that the initial buffer alignment is
160*4882a593Smuzhiyun * easy to calculate and verify.
161*4882a593Smuzhiyun */
162*4882a593Smuzhiyun nbytes += niovecs * sizeof(uint64_t);
163*4882a593Smuzhiyun nbytes = round_up(nbytes, sizeof(uint64_t));
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /*
166*4882a593Smuzhiyun * The data buffer needs to start 64-bit aligned, so round up
167*4882a593Smuzhiyun * that space to ensure we can align it appropriately and not
168*4882a593Smuzhiyun * overrun the buffer.
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun buf_size = nbytes + xlog_cil_iovec_space(niovecs);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * if we have no shadow buffer, or it is too small, we need to
174*4882a593Smuzhiyun * reallocate it.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun if (!lip->li_lv_shadow ||
177*4882a593Smuzhiyun buf_size > lip->li_lv_shadow->lv_size) {
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * We free and allocate here as a realloc would copy
181*4882a593Smuzhiyun * unnecessary data. We don't use kmem_zalloc() for the
182*4882a593Smuzhiyun * same reason - we don't need to zero the data area in
183*4882a593Smuzhiyun * the buffer, only the log vector header and the iovec
184*4882a593Smuzhiyun * storage.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun kmem_free(lip->li_lv_shadow);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun lv = kmem_alloc_large(buf_size, KM_NOFS);
189*4882a593Smuzhiyun memset(lv, 0, xlog_cil_iovec_space(niovecs));
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun lv->lv_item = lip;
192*4882a593Smuzhiyun lv->lv_size = buf_size;
193*4882a593Smuzhiyun if (ordered)
194*4882a593Smuzhiyun lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
195*4882a593Smuzhiyun else
196*4882a593Smuzhiyun lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
197*4882a593Smuzhiyun lip->li_lv_shadow = lv;
198*4882a593Smuzhiyun } else {
199*4882a593Smuzhiyun /* same or smaller, optimise common overwrite case */
200*4882a593Smuzhiyun lv = lip->li_lv_shadow;
201*4882a593Smuzhiyun if (ordered)
202*4882a593Smuzhiyun lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
203*4882a593Smuzhiyun else
204*4882a593Smuzhiyun lv->lv_buf_len = 0;
205*4882a593Smuzhiyun lv->lv_bytes = 0;
206*4882a593Smuzhiyun lv->lv_next = NULL;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /* Ensure the lv is set up according to ->iop_size */
210*4882a593Smuzhiyun lv->lv_niovecs = niovecs;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* The allocated data region lies beyond the iovec region */
213*4882a593Smuzhiyun lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * Prepare the log item for insertion into the CIL. Calculate the difference in
220*4882a593Smuzhiyun * log space and vectors it will consume, and if it is a new item pin it as
221*4882a593Smuzhiyun * well.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun STATIC void
xfs_cil_prepare_item(struct xlog * log,struct xfs_log_vec * lv,struct xfs_log_vec * old_lv,int * diff_len,int * diff_iovecs)224*4882a593Smuzhiyun xfs_cil_prepare_item(
225*4882a593Smuzhiyun struct xlog *log,
226*4882a593Smuzhiyun struct xfs_log_vec *lv,
227*4882a593Smuzhiyun struct xfs_log_vec *old_lv,
228*4882a593Smuzhiyun int *diff_len,
229*4882a593Smuzhiyun int *diff_iovecs)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun /* Account for the new LV being passed in */
232*4882a593Smuzhiyun if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
233*4882a593Smuzhiyun *diff_len += lv->lv_bytes;
234*4882a593Smuzhiyun *diff_iovecs += lv->lv_niovecs;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * If there is no old LV, this is the first time we've seen the item in
239*4882a593Smuzhiyun * this CIL context and so we need to pin it. If we are replacing the
240*4882a593Smuzhiyun * old_lv, then remove the space it accounts for and make it the shadow
241*4882a593Smuzhiyun * buffer for later freeing. In both cases we are now switching to the
242*4882a593Smuzhiyun * shadow buffer, so update the pointer to it appropriately.
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun if (!old_lv) {
245*4882a593Smuzhiyun if (lv->lv_item->li_ops->iop_pin)
246*4882a593Smuzhiyun lv->lv_item->li_ops->iop_pin(lv->lv_item);
247*4882a593Smuzhiyun lv->lv_item->li_lv_shadow = NULL;
248*4882a593Smuzhiyun } else if (old_lv != lv) {
249*4882a593Smuzhiyun ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun *diff_len -= old_lv->lv_bytes;
252*4882a593Smuzhiyun *diff_iovecs -= old_lv->lv_niovecs;
253*4882a593Smuzhiyun lv->lv_item->li_lv_shadow = old_lv;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* attach new log vector to log item */
257*4882a593Smuzhiyun lv->lv_item->li_lv = lv;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * If this is the first time the item is being committed to the
261*4882a593Smuzhiyun * CIL, store the sequence number on the log item so we can
262*4882a593Smuzhiyun * tell in future commits whether this is the first checkpoint
263*4882a593Smuzhiyun * the item is being committed into.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun if (!lv->lv_item->li_seq)
266*4882a593Smuzhiyun lv->lv_item->li_seq = log->l_cilp->xc_ctx->sequence;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * Format log item into a flat buffers
271*4882a593Smuzhiyun *
272*4882a593Smuzhiyun * For delayed logging, we need to hold a formatted buffer containing all the
273*4882a593Smuzhiyun * changes on the log item. This enables us to relog the item in memory and
274*4882a593Smuzhiyun * write it out asynchronously without needing to relock the object that was
275*4882a593Smuzhiyun * modified at the time it gets written into the iclog.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * This function takes the prepared log vectors attached to each log item, and
278*4882a593Smuzhiyun * formats the changes into the log vector buffer. The buffer it uses is
279*4882a593Smuzhiyun * dependent on the current state of the vector in the CIL - the shadow lv is
280*4882a593Smuzhiyun * guaranteed to be large enough for the current modification, but we will only
281*4882a593Smuzhiyun * use that if we can't reuse the existing lv. If we can't reuse the existing
282*4882a593Smuzhiyun * lv, then simple swap it out for the shadow lv. We don't free it - that is
283*4882a593Smuzhiyun * done lazily either by th enext modification or the freeing of the log item.
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * We don't set up region headers during this process; we simply copy the
286*4882a593Smuzhiyun * regions into the flat buffer. We can do this because we still have to do a
287*4882a593Smuzhiyun * formatting step to write the regions into the iclog buffer. Writing the
288*4882a593Smuzhiyun * ophdrs during the iclog write means that we can support splitting large
289*4882a593Smuzhiyun * regions across iclog boundares without needing a change in the format of the
290*4882a593Smuzhiyun * item/region encapsulation.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * Hence what we need to do now is change the rewrite the vector array to point
293*4882a593Smuzhiyun * to the copied region inside the buffer we just allocated. This allows us to
294*4882a593Smuzhiyun * format the regions into the iclog as though they are being formatted
295*4882a593Smuzhiyun * directly out of the objects themselves.
296*4882a593Smuzhiyun */
297*4882a593Smuzhiyun static void
xlog_cil_insert_format_items(struct xlog * log,struct xfs_trans * tp,int * diff_len,int * diff_iovecs)298*4882a593Smuzhiyun xlog_cil_insert_format_items(
299*4882a593Smuzhiyun struct xlog *log,
300*4882a593Smuzhiyun struct xfs_trans *tp,
301*4882a593Smuzhiyun int *diff_len,
302*4882a593Smuzhiyun int *diff_iovecs)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct xfs_log_item *lip;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Bail out if we didn't find a log item. */
308*4882a593Smuzhiyun if (list_empty(&tp->t_items)) {
309*4882a593Smuzhiyun ASSERT(0);
310*4882a593Smuzhiyun return;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun list_for_each_entry(lip, &tp->t_items, li_trans) {
314*4882a593Smuzhiyun struct xfs_log_vec *lv;
315*4882a593Smuzhiyun struct xfs_log_vec *old_lv = NULL;
316*4882a593Smuzhiyun struct xfs_log_vec *shadow;
317*4882a593Smuzhiyun bool ordered = false;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Skip items which aren't dirty in this transaction. */
320*4882a593Smuzhiyun if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
321*4882a593Smuzhiyun continue;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * The formatting size information is already attached to
325*4882a593Smuzhiyun * the shadow lv on the log item.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun shadow = lip->li_lv_shadow;
328*4882a593Smuzhiyun if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
329*4882a593Smuzhiyun ordered = true;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* Skip items that do not have any vectors for writing */
332*4882a593Smuzhiyun if (!shadow->lv_niovecs && !ordered)
333*4882a593Smuzhiyun continue;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* compare to existing item size */
336*4882a593Smuzhiyun old_lv = lip->li_lv;
337*4882a593Smuzhiyun if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
338*4882a593Smuzhiyun /* same or smaller, optimise common overwrite case */
339*4882a593Smuzhiyun lv = lip->li_lv;
340*4882a593Smuzhiyun lv->lv_next = NULL;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (ordered)
343*4882a593Smuzhiyun goto insert;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /*
346*4882a593Smuzhiyun * set the item up as though it is a new insertion so
347*4882a593Smuzhiyun * that the space reservation accounting is correct.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun *diff_iovecs -= lv->lv_niovecs;
350*4882a593Smuzhiyun *diff_len -= lv->lv_bytes;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Ensure the lv is set up according to ->iop_size */
353*4882a593Smuzhiyun lv->lv_niovecs = shadow->lv_niovecs;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /* reset the lv buffer information for new formatting */
356*4882a593Smuzhiyun lv->lv_buf_len = 0;
357*4882a593Smuzhiyun lv->lv_bytes = 0;
358*4882a593Smuzhiyun lv->lv_buf = (char *)lv +
359*4882a593Smuzhiyun xlog_cil_iovec_space(lv->lv_niovecs);
360*4882a593Smuzhiyun } else {
361*4882a593Smuzhiyun /* switch to shadow buffer! */
362*4882a593Smuzhiyun lv = shadow;
363*4882a593Smuzhiyun lv->lv_item = lip;
364*4882a593Smuzhiyun if (ordered) {
365*4882a593Smuzhiyun /* track as an ordered logvec */
366*4882a593Smuzhiyun ASSERT(lip->li_lv == NULL);
367*4882a593Smuzhiyun goto insert;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
372*4882a593Smuzhiyun lip->li_ops->iop_format(lip, lv);
373*4882a593Smuzhiyun insert:
374*4882a593Smuzhiyun xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun /*
379*4882a593Smuzhiyun * Insert the log items into the CIL and calculate the difference in space
380*4882a593Smuzhiyun * consumed by the item. Add the space to the checkpoint ticket and calculate
381*4882a593Smuzhiyun * if the change requires additional log metadata. If it does, take that space
382*4882a593Smuzhiyun * as well. Remove the amount of space we added to the checkpoint ticket from
383*4882a593Smuzhiyun * the current transaction ticket so that the accounting works out correctly.
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun static void
xlog_cil_insert_items(struct xlog * log,struct xfs_trans * tp)386*4882a593Smuzhiyun xlog_cil_insert_items(
387*4882a593Smuzhiyun struct xlog *log,
388*4882a593Smuzhiyun struct xfs_trans *tp)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun struct xfs_cil *cil = log->l_cilp;
391*4882a593Smuzhiyun struct xfs_cil_ctx *ctx = cil->xc_ctx;
392*4882a593Smuzhiyun struct xfs_log_item *lip;
393*4882a593Smuzhiyun int len = 0;
394*4882a593Smuzhiyun int diff_iovecs = 0;
395*4882a593Smuzhiyun int iclog_space;
396*4882a593Smuzhiyun int iovhdr_res = 0, split_res = 0, ctx_res = 0;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun ASSERT(tp);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * We can do this safely because the context can't checkpoint until we
402*4882a593Smuzhiyun * are done so it doesn't matter exactly how we update the CIL.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun xlog_cil_insert_format_items(log, tp, &len, &diff_iovecs);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun spin_lock(&cil->xc_cil_lock);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* account for space used by new iovec headers */
409*4882a593Smuzhiyun iovhdr_res = diff_iovecs * sizeof(xlog_op_header_t);
410*4882a593Smuzhiyun len += iovhdr_res;
411*4882a593Smuzhiyun ctx->nvecs += diff_iovecs;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* attach the transaction to the CIL if it has any busy extents */
414*4882a593Smuzhiyun if (!list_empty(&tp->t_busy))
415*4882a593Smuzhiyun list_splice_init(&tp->t_busy, &ctx->busy_extents);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * Now transfer enough transaction reservation to the context ticket
419*4882a593Smuzhiyun * for the checkpoint. The context ticket is special - the unit
420*4882a593Smuzhiyun * reservation has to grow as well as the current reservation as we
421*4882a593Smuzhiyun * steal from tickets so we can correctly determine the space used
422*4882a593Smuzhiyun * during the transaction commit.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun if (ctx->ticket->t_curr_res == 0) {
425*4882a593Smuzhiyun ctx_res = ctx->ticket->t_unit_res;
426*4882a593Smuzhiyun ctx->ticket->t_curr_res = ctx_res;
427*4882a593Smuzhiyun tp->t_ticket->t_curr_res -= ctx_res;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* do we need space for more log record headers? */
431*4882a593Smuzhiyun iclog_space = log->l_iclog_size - log->l_iclog_hsize;
432*4882a593Smuzhiyun if (len > 0 && (ctx->space_used / iclog_space !=
433*4882a593Smuzhiyun (ctx->space_used + len) / iclog_space)) {
434*4882a593Smuzhiyun split_res = (len + iclog_space - 1) / iclog_space;
435*4882a593Smuzhiyun /* need to take into account split region headers, too */
436*4882a593Smuzhiyun split_res *= log->l_iclog_hsize + sizeof(struct xlog_op_header);
437*4882a593Smuzhiyun ctx->ticket->t_unit_res += split_res;
438*4882a593Smuzhiyun ctx->ticket->t_curr_res += split_res;
439*4882a593Smuzhiyun tp->t_ticket->t_curr_res -= split_res;
440*4882a593Smuzhiyun ASSERT(tp->t_ticket->t_curr_res >= len);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun tp->t_ticket->t_curr_res -= len;
443*4882a593Smuzhiyun ctx->space_used += len;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /*
446*4882a593Smuzhiyun * If we've overrun the reservation, dump the tx details before we move
447*4882a593Smuzhiyun * the log items. Shutdown is imminent...
448*4882a593Smuzhiyun */
449*4882a593Smuzhiyun if (WARN_ON(tp->t_ticket->t_curr_res < 0)) {
450*4882a593Smuzhiyun xfs_warn(log->l_mp, "Transaction log reservation overrun:");
451*4882a593Smuzhiyun xfs_warn(log->l_mp,
452*4882a593Smuzhiyun " log items: %d bytes (iov hdrs: %d bytes)",
453*4882a593Smuzhiyun len, iovhdr_res);
454*4882a593Smuzhiyun xfs_warn(log->l_mp, " split region headers: %d bytes",
455*4882a593Smuzhiyun split_res);
456*4882a593Smuzhiyun xfs_warn(log->l_mp, " ctx ticket: %d bytes", ctx_res);
457*4882a593Smuzhiyun xlog_print_trans(tp);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Now (re-)position everything modified at the tail of the CIL.
462*4882a593Smuzhiyun * We do this here so we only need to take the CIL lock once during
463*4882a593Smuzhiyun * the transaction commit.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun list_for_each_entry(lip, &tp->t_items, li_trans) {
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* Skip items which aren't dirty in this transaction. */
468*4882a593Smuzhiyun if (!test_bit(XFS_LI_DIRTY, &lip->li_flags))
469*4882a593Smuzhiyun continue;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * Only move the item if it isn't already at the tail. This is
473*4882a593Smuzhiyun * to prevent a transient list_empty() state when reinserting
474*4882a593Smuzhiyun * an item that is already the only item in the CIL.
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun if (!list_is_last(&lip->li_cil, &cil->xc_cil))
477*4882a593Smuzhiyun list_move_tail(&lip->li_cil, &cil->xc_cil);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun spin_unlock(&cil->xc_cil_lock);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (tp->t_ticket->t_curr_res < 0)
483*4882a593Smuzhiyun xfs_force_shutdown(log->l_mp, SHUTDOWN_LOG_IO_ERROR);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun static void
xlog_cil_free_logvec(struct xfs_log_vec * log_vector)487*4882a593Smuzhiyun xlog_cil_free_logvec(
488*4882a593Smuzhiyun struct xfs_log_vec *log_vector)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct xfs_log_vec *lv;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun for (lv = log_vector; lv; ) {
493*4882a593Smuzhiyun struct xfs_log_vec *next = lv->lv_next;
494*4882a593Smuzhiyun kmem_free(lv);
495*4882a593Smuzhiyun lv = next;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun static void
xlog_discard_endio_work(struct work_struct * work)500*4882a593Smuzhiyun xlog_discard_endio_work(
501*4882a593Smuzhiyun struct work_struct *work)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct xfs_cil_ctx *ctx =
504*4882a593Smuzhiyun container_of(work, struct xfs_cil_ctx, discard_endio_work);
505*4882a593Smuzhiyun struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun xfs_extent_busy_clear(mp, &ctx->busy_extents, false);
508*4882a593Smuzhiyun kmem_free(ctx);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun * Queue up the actual completion to a thread to avoid IRQ-safe locking for
513*4882a593Smuzhiyun * pagb_lock. Note that we need a unbounded workqueue, otherwise we might
514*4882a593Smuzhiyun * get the execution delayed up to 30 seconds for weird reasons.
515*4882a593Smuzhiyun */
516*4882a593Smuzhiyun static void
xlog_discard_endio(struct bio * bio)517*4882a593Smuzhiyun xlog_discard_endio(
518*4882a593Smuzhiyun struct bio *bio)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun struct xfs_cil_ctx *ctx = bio->bi_private;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun INIT_WORK(&ctx->discard_endio_work, xlog_discard_endio_work);
523*4882a593Smuzhiyun queue_work(xfs_discard_wq, &ctx->discard_endio_work);
524*4882a593Smuzhiyun bio_put(bio);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun static void
xlog_discard_busy_extents(struct xfs_mount * mp,struct xfs_cil_ctx * ctx)528*4882a593Smuzhiyun xlog_discard_busy_extents(
529*4882a593Smuzhiyun struct xfs_mount *mp,
530*4882a593Smuzhiyun struct xfs_cil_ctx *ctx)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct list_head *list = &ctx->busy_extents;
533*4882a593Smuzhiyun struct xfs_extent_busy *busyp;
534*4882a593Smuzhiyun struct bio *bio = NULL;
535*4882a593Smuzhiyun struct blk_plug plug;
536*4882a593Smuzhiyun int error = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun blk_start_plug(&plug);
541*4882a593Smuzhiyun list_for_each_entry(busyp, list, list) {
542*4882a593Smuzhiyun trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
543*4882a593Smuzhiyun busyp->length);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
546*4882a593Smuzhiyun XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
547*4882a593Smuzhiyun XFS_FSB_TO_BB(mp, busyp->length),
548*4882a593Smuzhiyun GFP_NOFS, 0, &bio);
549*4882a593Smuzhiyun if (error && error != -EOPNOTSUPP) {
550*4882a593Smuzhiyun xfs_info(mp,
551*4882a593Smuzhiyun "discard failed for extent [0x%llx,%u], error %d",
552*4882a593Smuzhiyun (unsigned long long)busyp->bno,
553*4882a593Smuzhiyun busyp->length,
554*4882a593Smuzhiyun error);
555*4882a593Smuzhiyun break;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (bio) {
560*4882a593Smuzhiyun bio->bi_private = ctx;
561*4882a593Smuzhiyun bio->bi_end_io = xlog_discard_endio;
562*4882a593Smuzhiyun submit_bio(bio);
563*4882a593Smuzhiyun } else {
564*4882a593Smuzhiyun xlog_discard_endio_work(&ctx->discard_endio_work);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun blk_finish_plug(&plug);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun * Mark all items committed and clear busy extents. We free the log vector
571*4882a593Smuzhiyun * chains in a separate pass so that we unpin the log items as quickly as
572*4882a593Smuzhiyun * possible.
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun static void
xlog_cil_committed(struct xfs_cil_ctx * ctx)575*4882a593Smuzhiyun xlog_cil_committed(
576*4882a593Smuzhiyun struct xfs_cil_ctx *ctx)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun struct xfs_mount *mp = ctx->cil->xc_log->l_mp;
579*4882a593Smuzhiyun bool abort = XLOG_FORCED_SHUTDOWN(ctx->cil->xc_log);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /*
582*4882a593Smuzhiyun * If the I/O failed, we're aborting the commit and already shutdown.
583*4882a593Smuzhiyun * Wake any commit waiters before aborting the log items so we don't
584*4882a593Smuzhiyun * block async log pushers on callbacks. Async log pushers explicitly do
585*4882a593Smuzhiyun * not wait on log force completion because they may be holding locks
586*4882a593Smuzhiyun * required to unpin items.
587*4882a593Smuzhiyun */
588*4882a593Smuzhiyun if (abort) {
589*4882a593Smuzhiyun spin_lock(&ctx->cil->xc_push_lock);
590*4882a593Smuzhiyun wake_up_all(&ctx->cil->xc_commit_wait);
591*4882a593Smuzhiyun spin_unlock(&ctx->cil->xc_push_lock);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
595*4882a593Smuzhiyun ctx->start_lsn, abort);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun xfs_extent_busy_sort(&ctx->busy_extents);
598*4882a593Smuzhiyun xfs_extent_busy_clear(mp, &ctx->busy_extents,
599*4882a593Smuzhiyun (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun spin_lock(&ctx->cil->xc_push_lock);
602*4882a593Smuzhiyun list_del(&ctx->committing);
603*4882a593Smuzhiyun spin_unlock(&ctx->cil->xc_push_lock);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun xlog_cil_free_logvec(ctx->lv_chain);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (!list_empty(&ctx->busy_extents))
608*4882a593Smuzhiyun xlog_discard_busy_extents(mp, ctx);
609*4882a593Smuzhiyun else
610*4882a593Smuzhiyun kmem_free(ctx);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun void
xlog_cil_process_committed(struct list_head * list)614*4882a593Smuzhiyun xlog_cil_process_committed(
615*4882a593Smuzhiyun struct list_head *list)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun struct xfs_cil_ctx *ctx;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun while ((ctx = list_first_entry_or_null(list,
620*4882a593Smuzhiyun struct xfs_cil_ctx, iclog_entry))) {
621*4882a593Smuzhiyun list_del(&ctx->iclog_entry);
622*4882a593Smuzhiyun xlog_cil_committed(ctx);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * Push the Committed Item List to the log.
628*4882a593Smuzhiyun *
629*4882a593Smuzhiyun * If the current sequence is the same as xc_push_seq we need to do a flush. If
630*4882a593Smuzhiyun * xc_push_seq is less than the current sequence, then it has already been
631*4882a593Smuzhiyun * flushed and we don't need to do anything - the caller will wait for it to
632*4882a593Smuzhiyun * complete if necessary.
633*4882a593Smuzhiyun *
634*4882a593Smuzhiyun * xc_push_seq is checked unlocked against the sequence number for a match.
635*4882a593Smuzhiyun * Hence we can allow log forces to run racily and not issue pushes for the
636*4882a593Smuzhiyun * same sequence twice. If we get a race between multiple pushes for the same
637*4882a593Smuzhiyun * sequence they will block on the first one and then abort, hence avoiding
638*4882a593Smuzhiyun * needless pushes.
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun static void
xlog_cil_push_work(struct work_struct * work)641*4882a593Smuzhiyun xlog_cil_push_work(
642*4882a593Smuzhiyun struct work_struct *work)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct xfs_cil *cil =
645*4882a593Smuzhiyun container_of(work, struct xfs_cil, xc_push_work);
646*4882a593Smuzhiyun struct xlog *log = cil->xc_log;
647*4882a593Smuzhiyun struct xfs_log_vec *lv;
648*4882a593Smuzhiyun struct xfs_cil_ctx *ctx;
649*4882a593Smuzhiyun struct xfs_cil_ctx *new_ctx;
650*4882a593Smuzhiyun struct xlog_in_core *commit_iclog;
651*4882a593Smuzhiyun struct xlog_ticket *tic;
652*4882a593Smuzhiyun int num_iovecs;
653*4882a593Smuzhiyun int error = 0;
654*4882a593Smuzhiyun struct xfs_trans_header thdr;
655*4882a593Smuzhiyun struct xfs_log_iovec lhdr;
656*4882a593Smuzhiyun struct xfs_log_vec lvhdr = { NULL };
657*4882a593Smuzhiyun xfs_lsn_t commit_lsn;
658*4882a593Smuzhiyun xfs_lsn_t push_seq;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun new_ctx = kmem_zalloc(sizeof(*new_ctx), KM_NOFS);
661*4882a593Smuzhiyun new_ctx->ticket = xlog_cil_ticket_alloc(log);
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun down_write(&cil->xc_ctx_lock);
664*4882a593Smuzhiyun ctx = cil->xc_ctx;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
667*4882a593Smuzhiyun push_seq = cil->xc_push_seq;
668*4882a593Smuzhiyun ASSERT(push_seq <= ctx->sequence);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /*
671*4882a593Smuzhiyun * As we are about to switch to a new, empty CIL context, we no longer
672*4882a593Smuzhiyun * need to throttle tasks on CIL space overruns. Wake any waiters that
673*4882a593Smuzhiyun * the hard push throttle may have caught so they can start committing
674*4882a593Smuzhiyun * to the new context. The ctx->xc_push_lock provides the serialisation
675*4882a593Smuzhiyun * necessary for safely using the lockless waitqueue_active() check in
676*4882a593Smuzhiyun * this context.
677*4882a593Smuzhiyun */
678*4882a593Smuzhiyun if (waitqueue_active(&cil->xc_push_wait))
679*4882a593Smuzhiyun wake_up_all(&cil->xc_push_wait);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /*
682*4882a593Smuzhiyun * Check if we've anything to push. If there is nothing, then we don't
683*4882a593Smuzhiyun * move on to a new sequence number and so we have to be able to push
684*4882a593Smuzhiyun * this sequence again later.
685*4882a593Smuzhiyun */
686*4882a593Smuzhiyun if (list_empty(&cil->xc_cil)) {
687*4882a593Smuzhiyun cil->xc_push_seq = 0;
688*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
689*4882a593Smuzhiyun goto out_skip;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* check for a previously pushed sequence */
694*4882a593Smuzhiyun if (push_seq < cil->xc_ctx->sequence) {
695*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
696*4882a593Smuzhiyun goto out_skip;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /*
700*4882a593Smuzhiyun * We are now going to push this context, so add it to the committing
701*4882a593Smuzhiyun * list before we do anything else. This ensures that anyone waiting on
702*4882a593Smuzhiyun * this push can easily detect the difference between a "push in
703*4882a593Smuzhiyun * progress" and "CIL is empty, nothing to do".
704*4882a593Smuzhiyun *
705*4882a593Smuzhiyun * IOWs, a wait loop can now check for:
706*4882a593Smuzhiyun * the current sequence not being found on the committing list;
707*4882a593Smuzhiyun * an empty CIL; and
708*4882a593Smuzhiyun * an unchanged sequence number
709*4882a593Smuzhiyun * to detect a push that had nothing to do and therefore does not need
710*4882a593Smuzhiyun * waiting on. If the CIL is not empty, we get put on the committing
711*4882a593Smuzhiyun * list before emptying the CIL and bumping the sequence number. Hence
712*4882a593Smuzhiyun * an empty CIL and an unchanged sequence number means we jumped out
713*4882a593Smuzhiyun * above after doing nothing.
714*4882a593Smuzhiyun *
715*4882a593Smuzhiyun * Hence the waiter will either find the commit sequence on the
716*4882a593Smuzhiyun * committing list or the sequence number will be unchanged and the CIL
717*4882a593Smuzhiyun * still dirty. In that latter case, the push has not yet started, and
718*4882a593Smuzhiyun * so the waiter will have to continue trying to check the CIL
719*4882a593Smuzhiyun * committing list until it is found. In extreme cases of delay, the
720*4882a593Smuzhiyun * sequence may fully commit between the attempts the wait makes to wait
721*4882a593Smuzhiyun * on the commit sequence.
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun list_add(&ctx->committing, &cil->xc_committing);
724*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /*
727*4882a593Smuzhiyun * pull all the log vectors off the items in the CIL, and
728*4882a593Smuzhiyun * remove the items from the CIL. We don't need the CIL lock
729*4882a593Smuzhiyun * here because it's only needed on the transaction commit
730*4882a593Smuzhiyun * side which is currently locked out by the flush lock.
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun lv = NULL;
733*4882a593Smuzhiyun num_iovecs = 0;
734*4882a593Smuzhiyun while (!list_empty(&cil->xc_cil)) {
735*4882a593Smuzhiyun struct xfs_log_item *item;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun item = list_first_entry(&cil->xc_cil,
738*4882a593Smuzhiyun struct xfs_log_item, li_cil);
739*4882a593Smuzhiyun list_del_init(&item->li_cil);
740*4882a593Smuzhiyun if (!ctx->lv_chain)
741*4882a593Smuzhiyun ctx->lv_chain = item->li_lv;
742*4882a593Smuzhiyun else
743*4882a593Smuzhiyun lv->lv_next = item->li_lv;
744*4882a593Smuzhiyun lv = item->li_lv;
745*4882a593Smuzhiyun item->li_lv = NULL;
746*4882a593Smuzhiyun num_iovecs += lv->lv_niovecs;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /*
750*4882a593Smuzhiyun * initialise the new context and attach it to the CIL. Then attach
751*4882a593Smuzhiyun * the current context to the CIL committing list so it can be found
752*4882a593Smuzhiyun * during log forces to extract the commit lsn of the sequence that
753*4882a593Smuzhiyun * needs to be forced.
754*4882a593Smuzhiyun */
755*4882a593Smuzhiyun INIT_LIST_HEAD(&new_ctx->committing);
756*4882a593Smuzhiyun INIT_LIST_HEAD(&new_ctx->busy_extents);
757*4882a593Smuzhiyun new_ctx->sequence = ctx->sequence + 1;
758*4882a593Smuzhiyun new_ctx->cil = cil;
759*4882a593Smuzhiyun cil->xc_ctx = new_ctx;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * The switch is now done, so we can drop the context lock and move out
763*4882a593Smuzhiyun * of a shared context. We can't just go straight to the commit record,
764*4882a593Smuzhiyun * though - we need to synchronise with previous and future commits so
765*4882a593Smuzhiyun * that the commit records are correctly ordered in the log to ensure
766*4882a593Smuzhiyun * that we process items during log IO completion in the correct order.
767*4882a593Smuzhiyun *
768*4882a593Smuzhiyun * For example, if we get an EFI in one checkpoint and the EFD in the
769*4882a593Smuzhiyun * next (e.g. due to log forces), we do not want the checkpoint with
770*4882a593Smuzhiyun * the EFD to be committed before the checkpoint with the EFI. Hence
771*4882a593Smuzhiyun * we must strictly order the commit records of the checkpoints so
772*4882a593Smuzhiyun * that: a) the checkpoint callbacks are attached to the iclogs in the
773*4882a593Smuzhiyun * correct order; and b) the checkpoints are replayed in correct order
774*4882a593Smuzhiyun * in log recovery.
775*4882a593Smuzhiyun *
776*4882a593Smuzhiyun * Hence we need to add this context to the committing context list so
777*4882a593Smuzhiyun * that higher sequences will wait for us to write out a commit record
778*4882a593Smuzhiyun * before they do.
779*4882a593Smuzhiyun *
780*4882a593Smuzhiyun * xfs_log_force_seq requires us to mirror the new sequence into the cil
781*4882a593Smuzhiyun * structure atomically with the addition of this sequence to the
782*4882a593Smuzhiyun * committing list. This also ensures that we can do unlocked checks
783*4882a593Smuzhiyun * against the current sequence in log forces without risking
784*4882a593Smuzhiyun * deferencing a freed context pointer.
785*4882a593Smuzhiyun */
786*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
787*4882a593Smuzhiyun cil->xc_current_sequence = new_ctx->sequence;
788*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
789*4882a593Smuzhiyun up_write(&cil->xc_ctx_lock);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /*
792*4882a593Smuzhiyun * Build a checkpoint transaction header and write it to the log to
793*4882a593Smuzhiyun * begin the transaction. We need to account for the space used by the
794*4882a593Smuzhiyun * transaction header here as it is not accounted for in xlog_write().
795*4882a593Smuzhiyun *
796*4882a593Smuzhiyun * The LSN we need to pass to the log items on transaction commit is
797*4882a593Smuzhiyun * the LSN reported by the first log vector write. If we use the commit
798*4882a593Smuzhiyun * record lsn then we can move the tail beyond the grant write head.
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun tic = ctx->ticket;
801*4882a593Smuzhiyun thdr.th_magic = XFS_TRANS_HEADER_MAGIC;
802*4882a593Smuzhiyun thdr.th_type = XFS_TRANS_CHECKPOINT;
803*4882a593Smuzhiyun thdr.th_tid = tic->t_tid;
804*4882a593Smuzhiyun thdr.th_num_items = num_iovecs;
805*4882a593Smuzhiyun lhdr.i_addr = &thdr;
806*4882a593Smuzhiyun lhdr.i_len = sizeof(xfs_trans_header_t);
807*4882a593Smuzhiyun lhdr.i_type = XLOG_REG_TYPE_TRANSHDR;
808*4882a593Smuzhiyun tic->t_curr_res -= lhdr.i_len + sizeof(xlog_op_header_t);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun lvhdr.lv_niovecs = 1;
811*4882a593Smuzhiyun lvhdr.lv_iovecp = &lhdr;
812*4882a593Smuzhiyun lvhdr.lv_next = ctx->lv_chain;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun error = xlog_write(log, &lvhdr, tic, &ctx->start_lsn, NULL, 0, true);
815*4882a593Smuzhiyun if (error)
816*4882a593Smuzhiyun goto out_abort_free_ticket;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /*
819*4882a593Smuzhiyun * now that we've written the checkpoint into the log, strictly
820*4882a593Smuzhiyun * order the commit records so replay will get them in the right order.
821*4882a593Smuzhiyun */
822*4882a593Smuzhiyun restart:
823*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
824*4882a593Smuzhiyun list_for_each_entry(new_ctx, &cil->xc_committing, committing) {
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * Avoid getting stuck in this loop because we were woken by the
827*4882a593Smuzhiyun * shutdown, but then went back to sleep once already in the
828*4882a593Smuzhiyun * shutdown state.
829*4882a593Smuzhiyun */
830*4882a593Smuzhiyun if (XLOG_FORCED_SHUTDOWN(log)) {
831*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
832*4882a593Smuzhiyun goto out_abort_free_ticket;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * Higher sequences will wait for this one so skip them.
837*4882a593Smuzhiyun * Don't wait for our own sequence, either.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun if (new_ctx->sequence >= ctx->sequence)
840*4882a593Smuzhiyun continue;
841*4882a593Smuzhiyun if (!new_ctx->commit_lsn) {
842*4882a593Smuzhiyun /*
843*4882a593Smuzhiyun * It is still being pushed! Wait for the push to
844*4882a593Smuzhiyun * complete, then start again from the beginning.
845*4882a593Smuzhiyun */
846*4882a593Smuzhiyun xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
847*4882a593Smuzhiyun goto restart;
848*4882a593Smuzhiyun }
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun error = xlog_commit_record(log, tic, &commit_iclog, &commit_lsn);
853*4882a593Smuzhiyun if (error)
854*4882a593Smuzhiyun goto out_abort_free_ticket;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun xfs_log_ticket_ungrant(log, tic);
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun spin_lock(&commit_iclog->ic_callback_lock);
859*4882a593Smuzhiyun if (commit_iclog->ic_state == XLOG_STATE_IOERROR) {
860*4882a593Smuzhiyun spin_unlock(&commit_iclog->ic_callback_lock);
861*4882a593Smuzhiyun goto out_abort;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun ASSERT_ALWAYS(commit_iclog->ic_state == XLOG_STATE_ACTIVE ||
864*4882a593Smuzhiyun commit_iclog->ic_state == XLOG_STATE_WANT_SYNC);
865*4882a593Smuzhiyun list_add_tail(&ctx->iclog_entry, &commit_iclog->ic_callbacks);
866*4882a593Smuzhiyun spin_unlock(&commit_iclog->ic_callback_lock);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * now the checkpoint commit is complete and we've attached the
870*4882a593Smuzhiyun * callbacks to the iclog we can assign the commit LSN to the context
871*4882a593Smuzhiyun * and wake up anyone who is waiting for the commit to complete.
872*4882a593Smuzhiyun */
873*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
874*4882a593Smuzhiyun ctx->commit_lsn = commit_lsn;
875*4882a593Smuzhiyun wake_up_all(&cil->xc_commit_wait);
876*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /* release the hounds! */
879*4882a593Smuzhiyun xfs_log_release_iclog(commit_iclog);
880*4882a593Smuzhiyun return;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun out_skip:
883*4882a593Smuzhiyun up_write(&cil->xc_ctx_lock);
884*4882a593Smuzhiyun xfs_log_ticket_put(new_ctx->ticket);
885*4882a593Smuzhiyun kmem_free(new_ctx);
886*4882a593Smuzhiyun return;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun out_abort_free_ticket:
889*4882a593Smuzhiyun xfs_log_ticket_ungrant(log, tic);
890*4882a593Smuzhiyun out_abort:
891*4882a593Smuzhiyun ASSERT(XLOG_FORCED_SHUTDOWN(log));
892*4882a593Smuzhiyun xlog_cil_committed(ctx);
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /*
896*4882a593Smuzhiyun * We need to push CIL every so often so we don't cache more than we can fit in
897*4882a593Smuzhiyun * the log. The limit really is that a checkpoint can't be more than half the
898*4882a593Smuzhiyun * log (the current checkpoint is not allowed to overwrite the previous
899*4882a593Smuzhiyun * checkpoint), but commit latency and memory usage limit this to a smaller
900*4882a593Smuzhiyun * size.
901*4882a593Smuzhiyun */
902*4882a593Smuzhiyun static void
xlog_cil_push_background(struct xlog * log)903*4882a593Smuzhiyun xlog_cil_push_background(
904*4882a593Smuzhiyun struct xlog *log) __releases(cil->xc_ctx_lock)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun struct xfs_cil *cil = log->l_cilp;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /*
909*4882a593Smuzhiyun * The cil won't be empty because we are called while holding the
910*4882a593Smuzhiyun * context lock so whatever we added to the CIL will still be there
911*4882a593Smuzhiyun */
912*4882a593Smuzhiyun ASSERT(!list_empty(&cil->xc_cil));
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /*
915*4882a593Smuzhiyun * Don't do a background push if we haven't used up all the
916*4882a593Smuzhiyun * space available yet.
917*4882a593Smuzhiyun */
918*4882a593Smuzhiyun if (cil->xc_ctx->space_used < XLOG_CIL_SPACE_LIMIT(log)) {
919*4882a593Smuzhiyun up_read(&cil->xc_ctx_lock);
920*4882a593Smuzhiyun return;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
924*4882a593Smuzhiyun if (cil->xc_push_seq < cil->xc_current_sequence) {
925*4882a593Smuzhiyun cil->xc_push_seq = cil->xc_current_sequence;
926*4882a593Smuzhiyun queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /*
930*4882a593Smuzhiyun * Drop the context lock now, we can't hold that if we need to sleep
931*4882a593Smuzhiyun * because we are over the blocking threshold. The push_lock is still
932*4882a593Smuzhiyun * held, so blocking threshold sleep/wakeup is still correctly
933*4882a593Smuzhiyun * serialised here.
934*4882a593Smuzhiyun */
935*4882a593Smuzhiyun up_read(&cil->xc_ctx_lock);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /*
938*4882a593Smuzhiyun * If we are well over the space limit, throttle the work that is being
939*4882a593Smuzhiyun * done until the push work on this context has begun. Enforce the hard
940*4882a593Smuzhiyun * throttle on all transaction commits once it has been activated, even
941*4882a593Smuzhiyun * if the committing transactions have resulted in the space usage
942*4882a593Smuzhiyun * dipping back down under the hard limit.
943*4882a593Smuzhiyun *
944*4882a593Smuzhiyun * The ctx->xc_push_lock provides the serialisation necessary for safely
945*4882a593Smuzhiyun * using the lockless waitqueue_active() check in this context.
946*4882a593Smuzhiyun */
947*4882a593Smuzhiyun if (cil->xc_ctx->space_used >= XLOG_CIL_BLOCKING_SPACE_LIMIT(log) ||
948*4882a593Smuzhiyun waitqueue_active(&cil->xc_push_wait)) {
949*4882a593Smuzhiyun trace_xfs_log_cil_wait(log, cil->xc_ctx->ticket);
950*4882a593Smuzhiyun ASSERT(cil->xc_ctx->space_used < log->l_logsize);
951*4882a593Smuzhiyun xlog_wait(&cil->xc_push_wait, &cil->xc_push_lock);
952*4882a593Smuzhiyun return;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun /*
960*4882a593Smuzhiyun * xlog_cil_push_now() is used to trigger an immediate CIL push to the sequence
961*4882a593Smuzhiyun * number that is passed. When it returns, the work will be queued for
962*4882a593Smuzhiyun * @push_seq, but it won't be completed. The caller is expected to do any
963*4882a593Smuzhiyun * waiting for push_seq to complete if it is required.
964*4882a593Smuzhiyun */
965*4882a593Smuzhiyun static void
xlog_cil_push_now(struct xlog * log,xfs_lsn_t push_seq)966*4882a593Smuzhiyun xlog_cil_push_now(
967*4882a593Smuzhiyun struct xlog *log,
968*4882a593Smuzhiyun xfs_lsn_t push_seq)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun struct xfs_cil *cil = log->l_cilp;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun if (!cil)
973*4882a593Smuzhiyun return;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun ASSERT(push_seq && push_seq <= cil->xc_current_sequence);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun /* start on any pending background push to minimise wait time on it */
978*4882a593Smuzhiyun flush_work(&cil->xc_push_work);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /*
981*4882a593Smuzhiyun * If the CIL is empty or we've already pushed the sequence then
982*4882a593Smuzhiyun * there's no work we need to do.
983*4882a593Smuzhiyun */
984*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
985*4882a593Smuzhiyun if (list_empty(&cil->xc_cil) || push_seq <= cil->xc_push_seq) {
986*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
987*4882a593Smuzhiyun return;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun cil->xc_push_seq = push_seq;
991*4882a593Smuzhiyun queue_work(log->l_mp->m_cil_workqueue, &cil->xc_push_work);
992*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun bool
xlog_cil_empty(struct xlog * log)996*4882a593Smuzhiyun xlog_cil_empty(
997*4882a593Smuzhiyun struct xlog *log)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun struct xfs_cil *cil = log->l_cilp;
1000*4882a593Smuzhiyun bool empty = false;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
1003*4882a593Smuzhiyun if (list_empty(&cil->xc_cil))
1004*4882a593Smuzhiyun empty = true;
1005*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
1006*4882a593Smuzhiyun return empty;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun /*
1010*4882a593Smuzhiyun * Commit a transaction with the given vector to the Committed Item List.
1011*4882a593Smuzhiyun *
1012*4882a593Smuzhiyun * To do this, we need to format the item, pin it in memory if required and
1013*4882a593Smuzhiyun * account for the space used by the transaction. Once we have done that we
1014*4882a593Smuzhiyun * need to release the unused reservation for the transaction, attach the
1015*4882a593Smuzhiyun * transaction to the checkpoint context so we carry the busy extents through
1016*4882a593Smuzhiyun * to checkpoint completion, and then unlock all the items in the transaction.
1017*4882a593Smuzhiyun *
1018*4882a593Smuzhiyun * Called with the context lock already held in read mode to lock out
1019*4882a593Smuzhiyun * background commit, returns without it held once background commits are
1020*4882a593Smuzhiyun * allowed again.
1021*4882a593Smuzhiyun */
1022*4882a593Smuzhiyun void
xlog_cil_commit(struct xlog * log,struct xfs_trans * tp,xfs_csn_t * commit_seq,bool regrant)1023*4882a593Smuzhiyun xlog_cil_commit(
1024*4882a593Smuzhiyun struct xlog *log,
1025*4882a593Smuzhiyun struct xfs_trans *tp,
1026*4882a593Smuzhiyun xfs_csn_t *commit_seq,
1027*4882a593Smuzhiyun bool regrant)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun struct xfs_cil *cil = log->l_cilp;
1030*4882a593Smuzhiyun struct xfs_log_item *lip, *next;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /*
1033*4882a593Smuzhiyun * Do all necessary memory allocation before we lock the CIL.
1034*4882a593Smuzhiyun * This ensures the allocation does not deadlock with a CIL
1035*4882a593Smuzhiyun * push in memory reclaim (e.g. from kswapd).
1036*4882a593Smuzhiyun */
1037*4882a593Smuzhiyun xlog_cil_alloc_shadow_bufs(log, tp);
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun /* lock out background commit */
1040*4882a593Smuzhiyun down_read(&cil->xc_ctx_lock);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun xlog_cil_insert_items(log, tp);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun if (regrant && !XLOG_FORCED_SHUTDOWN(log))
1045*4882a593Smuzhiyun xfs_log_ticket_regrant(log, tp->t_ticket);
1046*4882a593Smuzhiyun else
1047*4882a593Smuzhiyun xfs_log_ticket_ungrant(log, tp->t_ticket);
1048*4882a593Smuzhiyun tp->t_ticket = NULL;
1049*4882a593Smuzhiyun xfs_trans_unreserve_and_mod_sb(tp);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun /*
1052*4882a593Smuzhiyun * Once all the items of the transaction have been copied to the CIL,
1053*4882a593Smuzhiyun * the items can be unlocked and possibly freed.
1054*4882a593Smuzhiyun *
1055*4882a593Smuzhiyun * This needs to be done before we drop the CIL context lock because we
1056*4882a593Smuzhiyun * have to update state in the log items and unlock them before they go
1057*4882a593Smuzhiyun * to disk. If we don't, then the CIL checkpoint can race with us and
1058*4882a593Smuzhiyun * we can run checkpoint completion before we've updated and unlocked
1059*4882a593Smuzhiyun * the log items. This affects (at least) processing of stale buffers,
1060*4882a593Smuzhiyun * inodes and EFIs.
1061*4882a593Smuzhiyun */
1062*4882a593Smuzhiyun trace_xfs_trans_commit_items(tp, _RET_IP_);
1063*4882a593Smuzhiyun list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
1064*4882a593Smuzhiyun xfs_trans_del_item(lip);
1065*4882a593Smuzhiyun if (lip->li_ops->iop_committing)
1066*4882a593Smuzhiyun lip->li_ops->iop_committing(lip, cil->xc_ctx->sequence);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun if (commit_seq)
1069*4882a593Smuzhiyun *commit_seq = cil->xc_ctx->sequence;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun /* xlog_cil_push_background() releases cil->xc_ctx_lock */
1072*4882a593Smuzhiyun xlog_cil_push_background(log);
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun /*
1076*4882a593Smuzhiyun * Conditionally push the CIL based on the sequence passed in.
1077*4882a593Smuzhiyun *
1078*4882a593Smuzhiyun * We only need to push if we haven't already pushed the sequence
1079*4882a593Smuzhiyun * number given. Hence the only time we will trigger a push here is
1080*4882a593Smuzhiyun * if the push sequence is the same as the current context.
1081*4882a593Smuzhiyun *
1082*4882a593Smuzhiyun * We return the current commit lsn to allow the callers to determine if a
1083*4882a593Smuzhiyun * iclog flush is necessary following this call.
1084*4882a593Smuzhiyun */
1085*4882a593Smuzhiyun xfs_lsn_t
xlog_cil_force_seq(struct xlog * log,xfs_csn_t sequence)1086*4882a593Smuzhiyun xlog_cil_force_seq(
1087*4882a593Smuzhiyun struct xlog *log,
1088*4882a593Smuzhiyun xfs_csn_t sequence)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun struct xfs_cil *cil = log->l_cilp;
1091*4882a593Smuzhiyun struct xfs_cil_ctx *ctx;
1092*4882a593Smuzhiyun xfs_lsn_t commit_lsn = NULLCOMMITLSN;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun ASSERT(sequence <= cil->xc_current_sequence);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * check to see if we need to force out the current context.
1098*4882a593Smuzhiyun * xlog_cil_push() handles racing pushes for the same sequence,
1099*4882a593Smuzhiyun * so no need to deal with it here.
1100*4882a593Smuzhiyun */
1101*4882a593Smuzhiyun restart:
1102*4882a593Smuzhiyun xlog_cil_push_now(log, sequence);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /*
1105*4882a593Smuzhiyun * See if we can find a previous sequence still committing.
1106*4882a593Smuzhiyun * We need to wait for all previous sequence commits to complete
1107*4882a593Smuzhiyun * before allowing the force of push_seq to go ahead. Hence block
1108*4882a593Smuzhiyun * on commits for those as well.
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun spin_lock(&cil->xc_push_lock);
1111*4882a593Smuzhiyun list_for_each_entry(ctx, &cil->xc_committing, committing) {
1112*4882a593Smuzhiyun /*
1113*4882a593Smuzhiyun * Avoid getting stuck in this loop because we were woken by the
1114*4882a593Smuzhiyun * shutdown, but then went back to sleep once already in the
1115*4882a593Smuzhiyun * shutdown state.
1116*4882a593Smuzhiyun */
1117*4882a593Smuzhiyun if (XLOG_FORCED_SHUTDOWN(log))
1118*4882a593Smuzhiyun goto out_shutdown;
1119*4882a593Smuzhiyun if (ctx->sequence > sequence)
1120*4882a593Smuzhiyun continue;
1121*4882a593Smuzhiyun if (!ctx->commit_lsn) {
1122*4882a593Smuzhiyun /*
1123*4882a593Smuzhiyun * It is still being pushed! Wait for the push to
1124*4882a593Smuzhiyun * complete, then start again from the beginning.
1125*4882a593Smuzhiyun */
1126*4882a593Smuzhiyun xlog_wait(&cil->xc_commit_wait, &cil->xc_push_lock);
1127*4882a593Smuzhiyun goto restart;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun if (ctx->sequence != sequence)
1130*4882a593Smuzhiyun continue;
1131*4882a593Smuzhiyun /* found it! */
1132*4882a593Smuzhiyun commit_lsn = ctx->commit_lsn;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /*
1136*4882a593Smuzhiyun * The call to xlog_cil_push_now() executes the push in the background.
1137*4882a593Smuzhiyun * Hence by the time we have got here it our sequence may not have been
1138*4882a593Smuzhiyun * pushed yet. This is true if the current sequence still matches the
1139*4882a593Smuzhiyun * push sequence after the above wait loop and the CIL still contains
1140*4882a593Smuzhiyun * dirty objects. This is guaranteed by the push code first adding the
1141*4882a593Smuzhiyun * context to the committing list before emptying the CIL.
1142*4882a593Smuzhiyun *
1143*4882a593Smuzhiyun * Hence if we don't find the context in the committing list and the
1144*4882a593Smuzhiyun * current sequence number is unchanged then the CIL contents are
1145*4882a593Smuzhiyun * significant. If the CIL is empty, if means there was nothing to push
1146*4882a593Smuzhiyun * and that means there is nothing to wait for. If the CIL is not empty,
1147*4882a593Smuzhiyun * it means we haven't yet started the push, because if it had started
1148*4882a593Smuzhiyun * we would have found the context on the committing list.
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun if (sequence == cil->xc_current_sequence &&
1151*4882a593Smuzhiyun !list_empty(&cil->xc_cil)) {
1152*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
1153*4882a593Smuzhiyun goto restart;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
1157*4882a593Smuzhiyun return commit_lsn;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /*
1160*4882a593Smuzhiyun * We detected a shutdown in progress. We need to trigger the log force
1161*4882a593Smuzhiyun * to pass through it's iclog state machine error handling, even though
1162*4882a593Smuzhiyun * we are already in a shutdown state. Hence we can't return
1163*4882a593Smuzhiyun * NULLCOMMITLSN here as that has special meaning to log forces (i.e.
1164*4882a593Smuzhiyun * LSN is already stable), so we return a zero LSN instead.
1165*4882a593Smuzhiyun */
1166*4882a593Smuzhiyun out_shutdown:
1167*4882a593Smuzhiyun spin_unlock(&cil->xc_push_lock);
1168*4882a593Smuzhiyun return 0;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun /*
1172*4882a593Smuzhiyun * Check if the current log item was first committed in this sequence.
1173*4882a593Smuzhiyun * We can't rely on just the log item being in the CIL, we have to check
1174*4882a593Smuzhiyun * the recorded commit sequence number.
1175*4882a593Smuzhiyun *
1176*4882a593Smuzhiyun * Note: for this to be used in a non-racy manner, it has to be called with
1177*4882a593Smuzhiyun * CIL flushing locked out. As a result, it should only be used during the
1178*4882a593Smuzhiyun * transaction commit process when deciding what to format into the item.
1179*4882a593Smuzhiyun */
1180*4882a593Smuzhiyun bool
xfs_log_item_in_current_chkpt(struct xfs_log_item * lip)1181*4882a593Smuzhiyun xfs_log_item_in_current_chkpt(
1182*4882a593Smuzhiyun struct xfs_log_item *lip)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun struct xfs_cil *cil = lip->li_mountp->m_log->l_cilp;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun if (list_empty(&lip->li_cil))
1187*4882a593Smuzhiyun return false;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /*
1190*4882a593Smuzhiyun * li_seq is written on the first commit of a log item to record the
1191*4882a593Smuzhiyun * first checkpoint it is written to. Hence if it is different to the
1192*4882a593Smuzhiyun * current sequence, we're in a new checkpoint.
1193*4882a593Smuzhiyun */
1194*4882a593Smuzhiyun return lip->li_seq == READ_ONCE(cil->xc_current_sequence);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /*
1198*4882a593Smuzhiyun * Perform initial CIL structure initialisation.
1199*4882a593Smuzhiyun */
1200*4882a593Smuzhiyun int
xlog_cil_init(struct xlog * log)1201*4882a593Smuzhiyun xlog_cil_init(
1202*4882a593Smuzhiyun struct xlog *log)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun struct xfs_cil *cil;
1205*4882a593Smuzhiyun struct xfs_cil_ctx *ctx;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun cil = kmem_zalloc(sizeof(*cil), KM_MAYFAIL);
1208*4882a593Smuzhiyun if (!cil)
1209*4882a593Smuzhiyun return -ENOMEM;
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun ctx = kmem_zalloc(sizeof(*ctx), KM_MAYFAIL);
1212*4882a593Smuzhiyun if (!ctx) {
1213*4882a593Smuzhiyun kmem_free(cil);
1214*4882a593Smuzhiyun return -ENOMEM;
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun INIT_WORK(&cil->xc_push_work, xlog_cil_push_work);
1218*4882a593Smuzhiyun INIT_LIST_HEAD(&cil->xc_cil);
1219*4882a593Smuzhiyun INIT_LIST_HEAD(&cil->xc_committing);
1220*4882a593Smuzhiyun spin_lock_init(&cil->xc_cil_lock);
1221*4882a593Smuzhiyun spin_lock_init(&cil->xc_push_lock);
1222*4882a593Smuzhiyun init_waitqueue_head(&cil->xc_push_wait);
1223*4882a593Smuzhiyun init_rwsem(&cil->xc_ctx_lock);
1224*4882a593Smuzhiyun init_waitqueue_head(&cil->xc_commit_wait);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->committing);
1227*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->busy_extents);
1228*4882a593Smuzhiyun ctx->sequence = 1;
1229*4882a593Smuzhiyun ctx->cil = cil;
1230*4882a593Smuzhiyun cil->xc_ctx = ctx;
1231*4882a593Smuzhiyun cil->xc_current_sequence = ctx->sequence;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun cil->xc_log = log;
1234*4882a593Smuzhiyun log->l_cilp = cil;
1235*4882a593Smuzhiyun return 0;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun void
xlog_cil_destroy(struct xlog * log)1239*4882a593Smuzhiyun xlog_cil_destroy(
1240*4882a593Smuzhiyun struct xlog *log)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun if (log->l_cilp->xc_ctx) {
1243*4882a593Smuzhiyun if (log->l_cilp->xc_ctx->ticket)
1244*4882a593Smuzhiyun xfs_log_ticket_put(log->l_cilp->xc_ctx->ticket);
1245*4882a593Smuzhiyun kmem_free(log->l_cilp->xc_ctx);
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun ASSERT(list_empty(&log->l_cilp->xc_cil));
1249*4882a593Smuzhiyun kmem_free(log->l_cilp);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
1252