xref: /OK3568_Linux_fs/kernel/fs/xfs/xfs_refcount_item.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4*4882a593Smuzhiyun  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_format.h"
9*4882a593Smuzhiyun #include "xfs_log_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_bit.h"
12*4882a593Smuzhiyun #include "xfs_shared.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_defer.h"
15*4882a593Smuzhiyun #include "xfs_trans.h"
16*4882a593Smuzhiyun #include "xfs_trans_priv.h"
17*4882a593Smuzhiyun #include "xfs_refcount_item.h"
18*4882a593Smuzhiyun #include "xfs_log.h"
19*4882a593Smuzhiyun #include "xfs_refcount.h"
20*4882a593Smuzhiyun #include "xfs_error.h"
21*4882a593Smuzhiyun #include "xfs_log_priv.h"
22*4882a593Smuzhiyun #include "xfs_log_recover.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun kmem_zone_t	*xfs_cui_zone;
25*4882a593Smuzhiyun kmem_zone_t	*xfs_cud_zone;
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static const struct xfs_item_ops xfs_cui_item_ops;
28*4882a593Smuzhiyun 
CUI_ITEM(struct xfs_log_item * lip)29*4882a593Smuzhiyun static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	return container_of(lip, struct xfs_cui_log_item, cui_item);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun STATIC void
xfs_cui_item_free(struct xfs_cui_log_item * cuip)35*4882a593Smuzhiyun xfs_cui_item_free(
36*4882a593Smuzhiyun 	struct xfs_cui_log_item	*cuip)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
39*4882a593Smuzhiyun 		kmem_free(cuip);
40*4882a593Smuzhiyun 	else
41*4882a593Smuzhiyun 		kmem_cache_free(xfs_cui_zone, cuip);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Freeing the CUI requires that we remove it from the AIL if it has already
46*4882a593Smuzhiyun  * been placed there. However, the CUI may not yet have been placed in the AIL
47*4882a593Smuzhiyun  * when called by xfs_cui_release() from CUD processing due to the ordering of
48*4882a593Smuzhiyun  * committed vs unpin operations in bulk insert operations. Hence the reference
49*4882a593Smuzhiyun  * count to ensure only the last caller frees the CUI.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun STATIC void
xfs_cui_release(struct xfs_cui_log_item * cuip)52*4882a593Smuzhiyun xfs_cui_release(
53*4882a593Smuzhiyun 	struct xfs_cui_log_item	*cuip)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	ASSERT(atomic_read(&cuip->cui_refcount) > 0);
56*4882a593Smuzhiyun 	if (atomic_dec_and_test(&cuip->cui_refcount)) {
57*4882a593Smuzhiyun 		xfs_trans_ail_delete(&cuip->cui_item, SHUTDOWN_LOG_IO_ERROR);
58*4882a593Smuzhiyun 		xfs_cui_item_free(cuip);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun STATIC void
xfs_cui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)64*4882a593Smuzhiyun xfs_cui_item_size(
65*4882a593Smuzhiyun 	struct xfs_log_item	*lip,
66*4882a593Smuzhiyun 	int			*nvecs,
67*4882a593Smuzhiyun 	int			*nbytes)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	*nvecs += 1;
72*4882a593Smuzhiyun 	*nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun  * This is called to fill in the vector of log iovecs for the
77*4882a593Smuzhiyun  * given cui log item. We use only 1 iovec, and we point that
78*4882a593Smuzhiyun  * at the cui_log_format structure embedded in the cui item.
79*4882a593Smuzhiyun  * It is at this point that we assert that all of the extent
80*4882a593Smuzhiyun  * slots in the cui item have been filled.
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun STATIC void
xfs_cui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)83*4882a593Smuzhiyun xfs_cui_item_format(
84*4882a593Smuzhiyun 	struct xfs_log_item	*lip,
85*4882a593Smuzhiyun 	struct xfs_log_vec	*lv)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
88*4882a593Smuzhiyun 	struct xfs_log_iovec	*vecp = NULL;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	ASSERT(atomic_read(&cuip->cui_next_extent) ==
91*4882a593Smuzhiyun 			cuip->cui_format.cui_nextents);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	cuip->cui_format.cui_type = XFS_LI_CUI;
94*4882a593Smuzhiyun 	cuip->cui_format.cui_size = 1;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
97*4882a593Smuzhiyun 			xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun  * The unpin operation is the last place an CUI is manipulated in the log. It is
102*4882a593Smuzhiyun  * either inserted in the AIL or aborted in the event of a log I/O error. In
103*4882a593Smuzhiyun  * either case, the CUI transaction has been successfully committed to make it
104*4882a593Smuzhiyun  * this far. Therefore, we expect whoever committed the CUI to either construct
105*4882a593Smuzhiyun  * and commit the CUD or drop the CUD's reference in the event of error. Simply
106*4882a593Smuzhiyun  * drop the log's CUI reference now that the log is done with it.
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun STATIC void
xfs_cui_item_unpin(struct xfs_log_item * lip,int remove)109*4882a593Smuzhiyun xfs_cui_item_unpin(
110*4882a593Smuzhiyun 	struct xfs_log_item	*lip,
111*4882a593Smuzhiyun 	int			remove)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	struct xfs_cui_log_item	*cuip = CUI_ITEM(lip);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	xfs_cui_release(cuip);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * The CUI has been either committed or aborted if the transaction has been
120*4882a593Smuzhiyun  * cancelled. If the transaction was cancelled, an CUD isn't going to be
121*4882a593Smuzhiyun  * constructed and thus we free the CUI here directly.
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun STATIC void
xfs_cui_item_release(struct xfs_log_item * lip)124*4882a593Smuzhiyun xfs_cui_item_release(
125*4882a593Smuzhiyun 	struct xfs_log_item	*lip)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	xfs_cui_release(CUI_ITEM(lip));
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * Allocate and initialize an cui item with the given number of extents.
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun STATIC struct xfs_cui_log_item *
xfs_cui_init(struct xfs_mount * mp,uint nextents)134*4882a593Smuzhiyun xfs_cui_init(
135*4882a593Smuzhiyun 	struct xfs_mount		*mp,
136*4882a593Smuzhiyun 	uint				nextents)
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	ASSERT(nextents > 0);
142*4882a593Smuzhiyun 	if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
143*4882a593Smuzhiyun 		cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
144*4882a593Smuzhiyun 				0);
145*4882a593Smuzhiyun 	else
146*4882a593Smuzhiyun 		cuip = kmem_cache_zalloc(xfs_cui_zone,
147*4882a593Smuzhiyun 					 GFP_KERNEL | __GFP_NOFAIL);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
150*4882a593Smuzhiyun 	cuip->cui_format.cui_nextents = nextents;
151*4882a593Smuzhiyun 	cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
152*4882a593Smuzhiyun 	atomic_set(&cuip->cui_next_extent, 0);
153*4882a593Smuzhiyun 	atomic_set(&cuip->cui_refcount, 2);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return cuip;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
CUD_ITEM(struct xfs_log_item * lip)158*4882a593Smuzhiyun static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return container_of(lip, struct xfs_cud_log_item, cud_item);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun STATIC void
xfs_cud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)164*4882a593Smuzhiyun xfs_cud_item_size(
165*4882a593Smuzhiyun 	struct xfs_log_item	*lip,
166*4882a593Smuzhiyun 	int			*nvecs,
167*4882a593Smuzhiyun 	int			*nbytes)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	*nvecs += 1;
170*4882a593Smuzhiyun 	*nbytes += sizeof(struct xfs_cud_log_format);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun  * This is called to fill in the vector of log iovecs for the
175*4882a593Smuzhiyun  * given cud log item. We use only 1 iovec, and we point that
176*4882a593Smuzhiyun  * at the cud_log_format structure embedded in the cud item.
177*4882a593Smuzhiyun  * It is at this point that we assert that all of the extent
178*4882a593Smuzhiyun  * slots in the cud item have been filled.
179*4882a593Smuzhiyun  */
180*4882a593Smuzhiyun STATIC void
xfs_cud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)181*4882a593Smuzhiyun xfs_cud_item_format(
182*4882a593Smuzhiyun 	struct xfs_log_item	*lip,
183*4882a593Smuzhiyun 	struct xfs_log_vec	*lv)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
186*4882a593Smuzhiyun 	struct xfs_log_iovec	*vecp = NULL;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	cudp->cud_format.cud_type = XFS_LI_CUD;
189*4882a593Smuzhiyun 	cudp->cud_format.cud_size = 1;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
192*4882a593Smuzhiyun 			sizeof(struct xfs_cud_log_format));
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun  * The CUD is either committed or aborted if the transaction is cancelled. If
197*4882a593Smuzhiyun  * the transaction is cancelled, drop our reference to the CUI and free the
198*4882a593Smuzhiyun  * CUD.
199*4882a593Smuzhiyun  */
200*4882a593Smuzhiyun STATIC void
xfs_cud_item_release(struct xfs_log_item * lip)201*4882a593Smuzhiyun xfs_cud_item_release(
202*4882a593Smuzhiyun 	struct xfs_log_item	*lip)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	struct xfs_cud_log_item	*cudp = CUD_ITEM(lip);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	xfs_cui_release(cudp->cud_cuip);
207*4882a593Smuzhiyun 	kmem_cache_free(xfs_cud_zone, cudp);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun static const struct xfs_item_ops xfs_cud_item_ops = {
211*4882a593Smuzhiyun 	.flags		= XFS_ITEM_RELEASE_WHEN_COMMITTED,
212*4882a593Smuzhiyun 	.iop_size	= xfs_cud_item_size,
213*4882a593Smuzhiyun 	.iop_format	= xfs_cud_item_format,
214*4882a593Smuzhiyun 	.iop_release	= xfs_cud_item_release,
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun static struct xfs_cud_log_item *
xfs_trans_get_cud(struct xfs_trans * tp,struct xfs_cui_log_item * cuip)218*4882a593Smuzhiyun xfs_trans_get_cud(
219*4882a593Smuzhiyun 	struct xfs_trans		*tp,
220*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct xfs_cud_log_item		*cudp;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	cudp = kmem_cache_zalloc(xfs_cud_zone, GFP_KERNEL | __GFP_NOFAIL);
225*4882a593Smuzhiyun 	xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
226*4882a593Smuzhiyun 			  &xfs_cud_item_ops);
227*4882a593Smuzhiyun 	cudp->cud_cuip = cuip;
228*4882a593Smuzhiyun 	cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	xfs_trans_add_item(tp, &cudp->cud_item);
231*4882a593Smuzhiyun 	return cudp;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun  * Finish an refcount update and log it to the CUD. Note that the
236*4882a593Smuzhiyun  * transaction is marked dirty regardless of whether the refcount
237*4882a593Smuzhiyun  * update succeeds or fails to support the CUI/CUD lifecycle rules.
238*4882a593Smuzhiyun  */
239*4882a593Smuzhiyun static int
xfs_trans_log_finish_refcount_update(struct xfs_trans * tp,struct xfs_cud_log_item * cudp,enum xfs_refcount_intent_type type,xfs_fsblock_t startblock,xfs_extlen_t blockcount,xfs_fsblock_t * new_fsb,xfs_extlen_t * new_len,struct xfs_btree_cur ** pcur)240*4882a593Smuzhiyun xfs_trans_log_finish_refcount_update(
241*4882a593Smuzhiyun 	struct xfs_trans		*tp,
242*4882a593Smuzhiyun 	struct xfs_cud_log_item		*cudp,
243*4882a593Smuzhiyun 	enum xfs_refcount_intent_type	type,
244*4882a593Smuzhiyun 	xfs_fsblock_t			startblock,
245*4882a593Smuzhiyun 	xfs_extlen_t			blockcount,
246*4882a593Smuzhiyun 	xfs_fsblock_t			*new_fsb,
247*4882a593Smuzhiyun 	xfs_extlen_t			*new_len,
248*4882a593Smuzhiyun 	struct xfs_btree_cur		**pcur)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	int				error;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	error = xfs_refcount_finish_one(tp, type, startblock,
253*4882a593Smuzhiyun 			blockcount, new_fsb, new_len, pcur);
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * Mark the transaction dirty, even on error. This ensures the
257*4882a593Smuzhiyun 	 * transaction is aborted, which:
258*4882a593Smuzhiyun 	 *
259*4882a593Smuzhiyun 	 * 1.) releases the CUI and frees the CUD
260*4882a593Smuzhiyun 	 * 2.) shuts down the filesystem
261*4882a593Smuzhiyun 	 */
262*4882a593Smuzhiyun 	tp->t_flags |= XFS_TRANS_DIRTY;
263*4882a593Smuzhiyun 	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return error;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /* Sort refcount intents by AG. */
269*4882a593Smuzhiyun static int
xfs_refcount_update_diff_items(void * priv,struct list_head * a,struct list_head * b)270*4882a593Smuzhiyun xfs_refcount_update_diff_items(
271*4882a593Smuzhiyun 	void				*priv,
272*4882a593Smuzhiyun 	struct list_head		*a,
273*4882a593Smuzhiyun 	struct list_head		*b)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct xfs_mount		*mp = priv;
276*4882a593Smuzhiyun 	struct xfs_refcount_intent	*ra;
277*4882a593Smuzhiyun 	struct xfs_refcount_intent	*rb;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	ra = container_of(a, struct xfs_refcount_intent, ri_list);
280*4882a593Smuzhiyun 	rb = container_of(b, struct xfs_refcount_intent, ri_list);
281*4882a593Smuzhiyun 	return  XFS_FSB_TO_AGNO(mp, ra->ri_startblock) -
282*4882a593Smuzhiyun 		XFS_FSB_TO_AGNO(mp, rb->ri_startblock);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /* Set the phys extent flags for this reverse mapping. */
286*4882a593Smuzhiyun static void
xfs_trans_set_refcount_flags(struct xfs_phys_extent * refc,enum xfs_refcount_intent_type type)287*4882a593Smuzhiyun xfs_trans_set_refcount_flags(
288*4882a593Smuzhiyun 	struct xfs_phys_extent		*refc,
289*4882a593Smuzhiyun 	enum xfs_refcount_intent_type	type)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	refc->pe_flags = 0;
292*4882a593Smuzhiyun 	switch (type) {
293*4882a593Smuzhiyun 	case XFS_REFCOUNT_INCREASE:
294*4882a593Smuzhiyun 	case XFS_REFCOUNT_DECREASE:
295*4882a593Smuzhiyun 	case XFS_REFCOUNT_ALLOC_COW:
296*4882a593Smuzhiyun 	case XFS_REFCOUNT_FREE_COW:
297*4882a593Smuzhiyun 		refc->pe_flags |= type;
298*4882a593Smuzhiyun 		break;
299*4882a593Smuzhiyun 	default:
300*4882a593Smuzhiyun 		ASSERT(0);
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun /* Log refcount updates in the intent item. */
305*4882a593Smuzhiyun STATIC void
xfs_refcount_update_log_item(struct xfs_trans * tp,struct xfs_cui_log_item * cuip,struct xfs_refcount_intent * refc)306*4882a593Smuzhiyun xfs_refcount_update_log_item(
307*4882a593Smuzhiyun 	struct xfs_trans		*tp,
308*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip,
309*4882a593Smuzhiyun 	struct xfs_refcount_intent	*refc)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	uint				next_extent;
312*4882a593Smuzhiyun 	struct xfs_phys_extent		*ext;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	tp->t_flags |= XFS_TRANS_DIRTY;
315*4882a593Smuzhiyun 	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	/*
318*4882a593Smuzhiyun 	 * atomic_inc_return gives us the value after the increment;
319*4882a593Smuzhiyun 	 * we want to use it as an array index so we need to subtract 1 from
320*4882a593Smuzhiyun 	 * it.
321*4882a593Smuzhiyun 	 */
322*4882a593Smuzhiyun 	next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
323*4882a593Smuzhiyun 	ASSERT(next_extent < cuip->cui_format.cui_nextents);
324*4882a593Smuzhiyun 	ext = &cuip->cui_format.cui_extents[next_extent];
325*4882a593Smuzhiyun 	ext->pe_startblock = refc->ri_startblock;
326*4882a593Smuzhiyun 	ext->pe_len = refc->ri_blockcount;
327*4882a593Smuzhiyun 	xfs_trans_set_refcount_flags(ext, refc->ri_type);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun static struct xfs_log_item *
xfs_refcount_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)331*4882a593Smuzhiyun xfs_refcount_update_create_intent(
332*4882a593Smuzhiyun 	struct xfs_trans		*tp,
333*4882a593Smuzhiyun 	struct list_head		*items,
334*4882a593Smuzhiyun 	unsigned int			count,
335*4882a593Smuzhiyun 	bool				sort)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	struct xfs_mount		*mp = tp->t_mountp;
338*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip = xfs_cui_init(mp, count);
339*4882a593Smuzhiyun 	struct xfs_refcount_intent	*refc;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	ASSERT(count > 0);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	xfs_trans_add_item(tp, &cuip->cui_item);
344*4882a593Smuzhiyun 	if (sort)
345*4882a593Smuzhiyun 		list_sort(mp, items, xfs_refcount_update_diff_items);
346*4882a593Smuzhiyun 	list_for_each_entry(refc, items, ri_list)
347*4882a593Smuzhiyun 		xfs_refcount_update_log_item(tp, cuip, refc);
348*4882a593Smuzhiyun 	return &cuip->cui_item;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /* Get an CUD so we can process all the deferred refcount updates. */
352*4882a593Smuzhiyun static struct xfs_log_item *
xfs_refcount_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)353*4882a593Smuzhiyun xfs_refcount_update_create_done(
354*4882a593Smuzhiyun 	struct xfs_trans		*tp,
355*4882a593Smuzhiyun 	struct xfs_log_item		*intent,
356*4882a593Smuzhiyun 	unsigned int			count)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /* Process a deferred refcount update. */
362*4882a593Smuzhiyun STATIC int
xfs_refcount_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)363*4882a593Smuzhiyun xfs_refcount_update_finish_item(
364*4882a593Smuzhiyun 	struct xfs_trans		*tp,
365*4882a593Smuzhiyun 	struct xfs_log_item		*done,
366*4882a593Smuzhiyun 	struct list_head		*item,
367*4882a593Smuzhiyun 	struct xfs_btree_cur		**state)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct xfs_refcount_intent	*refc;
370*4882a593Smuzhiyun 	xfs_fsblock_t			new_fsb;
371*4882a593Smuzhiyun 	xfs_extlen_t			new_aglen;
372*4882a593Smuzhiyun 	int				error;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
375*4882a593Smuzhiyun 	error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done),
376*4882a593Smuzhiyun 			refc->ri_type, refc->ri_startblock, refc->ri_blockcount,
377*4882a593Smuzhiyun 			&new_fsb, &new_aglen, state);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* Did we run out of reservation?  Requeue what we didn't finish. */
380*4882a593Smuzhiyun 	if (!error && new_aglen > 0) {
381*4882a593Smuzhiyun 		ASSERT(refc->ri_type == XFS_REFCOUNT_INCREASE ||
382*4882a593Smuzhiyun 		       refc->ri_type == XFS_REFCOUNT_DECREASE);
383*4882a593Smuzhiyun 		refc->ri_startblock = new_fsb;
384*4882a593Smuzhiyun 		refc->ri_blockcount = new_aglen;
385*4882a593Smuzhiyun 		return -EAGAIN;
386*4882a593Smuzhiyun 	}
387*4882a593Smuzhiyun 	kmem_free(refc);
388*4882a593Smuzhiyun 	return error;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun /* Abort all pending CUIs. */
392*4882a593Smuzhiyun STATIC void
xfs_refcount_update_abort_intent(struct xfs_log_item * intent)393*4882a593Smuzhiyun xfs_refcount_update_abort_intent(
394*4882a593Smuzhiyun 	struct xfs_log_item		*intent)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	xfs_cui_release(CUI_ITEM(intent));
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun /* Cancel a deferred refcount update. */
400*4882a593Smuzhiyun STATIC void
xfs_refcount_update_cancel_item(struct list_head * item)401*4882a593Smuzhiyun xfs_refcount_update_cancel_item(
402*4882a593Smuzhiyun 	struct list_head		*item)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct xfs_refcount_intent	*refc;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	refc = container_of(item, struct xfs_refcount_intent, ri_list);
407*4882a593Smuzhiyun 	kmem_free(refc);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
411*4882a593Smuzhiyun 	.max_items	= XFS_CUI_MAX_FAST_EXTENTS,
412*4882a593Smuzhiyun 	.create_intent	= xfs_refcount_update_create_intent,
413*4882a593Smuzhiyun 	.abort_intent	= xfs_refcount_update_abort_intent,
414*4882a593Smuzhiyun 	.create_done	= xfs_refcount_update_create_done,
415*4882a593Smuzhiyun 	.finish_item	= xfs_refcount_update_finish_item,
416*4882a593Smuzhiyun 	.finish_cleanup = xfs_refcount_finish_one_cleanup,
417*4882a593Smuzhiyun 	.cancel_item	= xfs_refcount_update_cancel_item,
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * Process a refcount update intent item that was recovered from the log.
422*4882a593Smuzhiyun  * We need to update the refcountbt.
423*4882a593Smuzhiyun  */
424*4882a593Smuzhiyun STATIC int
xfs_cui_item_recover(struct xfs_log_item * lip,struct list_head * capture_list)425*4882a593Smuzhiyun xfs_cui_item_recover(
426*4882a593Smuzhiyun 	struct xfs_log_item		*lip,
427*4882a593Smuzhiyun 	struct list_head		*capture_list)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct xfs_bmbt_irec		irec;
430*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip = CUI_ITEM(lip);
431*4882a593Smuzhiyun 	struct xfs_phys_extent		*refc;
432*4882a593Smuzhiyun 	struct xfs_cud_log_item		*cudp;
433*4882a593Smuzhiyun 	struct xfs_trans		*tp;
434*4882a593Smuzhiyun 	struct xfs_btree_cur		*rcur = NULL;
435*4882a593Smuzhiyun 	struct xfs_mount		*mp = lip->li_mountp;
436*4882a593Smuzhiyun 	xfs_fsblock_t			startblock_fsb;
437*4882a593Smuzhiyun 	xfs_fsblock_t			new_fsb;
438*4882a593Smuzhiyun 	xfs_extlen_t			new_len;
439*4882a593Smuzhiyun 	unsigned int			refc_type;
440*4882a593Smuzhiyun 	bool				op_ok;
441*4882a593Smuzhiyun 	bool				requeue_only = false;
442*4882a593Smuzhiyun 	enum xfs_refcount_intent_type	type;
443*4882a593Smuzhiyun 	int				i;
444*4882a593Smuzhiyun 	int				error = 0;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/*
447*4882a593Smuzhiyun 	 * First check the validity of the extents described by the
448*4882a593Smuzhiyun 	 * CUI.  If any are bad, then assume that all are bad and
449*4882a593Smuzhiyun 	 * just toss the CUI.
450*4882a593Smuzhiyun 	 */
451*4882a593Smuzhiyun 	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
452*4882a593Smuzhiyun 		refc = &cuip->cui_format.cui_extents[i];
453*4882a593Smuzhiyun 		startblock_fsb = XFS_BB_TO_FSB(mp,
454*4882a593Smuzhiyun 				   XFS_FSB_TO_DADDR(mp, refc->pe_startblock));
455*4882a593Smuzhiyun 		switch (refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
456*4882a593Smuzhiyun 		case XFS_REFCOUNT_INCREASE:
457*4882a593Smuzhiyun 		case XFS_REFCOUNT_DECREASE:
458*4882a593Smuzhiyun 		case XFS_REFCOUNT_ALLOC_COW:
459*4882a593Smuzhiyun 		case XFS_REFCOUNT_FREE_COW:
460*4882a593Smuzhiyun 			op_ok = true;
461*4882a593Smuzhiyun 			break;
462*4882a593Smuzhiyun 		default:
463*4882a593Smuzhiyun 			op_ok = false;
464*4882a593Smuzhiyun 			break;
465*4882a593Smuzhiyun 		}
466*4882a593Smuzhiyun 		if (!op_ok || startblock_fsb == 0 ||
467*4882a593Smuzhiyun 		    refc->pe_len == 0 ||
468*4882a593Smuzhiyun 		    startblock_fsb >= mp->m_sb.sb_dblocks ||
469*4882a593Smuzhiyun 		    refc->pe_len >= mp->m_sb.sb_agblocks ||
470*4882a593Smuzhiyun 		    (refc->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS))
471*4882a593Smuzhiyun 			return -EFSCORRUPTED;
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/*
475*4882a593Smuzhiyun 	 * Under normal operation, refcount updates are deferred, so we
476*4882a593Smuzhiyun 	 * wouldn't be adding them directly to a transaction.  All
477*4882a593Smuzhiyun 	 * refcount updates manage reservation usage internally and
478*4882a593Smuzhiyun 	 * dynamically by deferring work that won't fit in the
479*4882a593Smuzhiyun 	 * transaction.  Normally, any work that needs to be deferred
480*4882a593Smuzhiyun 	 * gets attached to the same defer_ops that scheduled the
481*4882a593Smuzhiyun 	 * refcount update.  However, we're in log recovery here, so we
482*4882a593Smuzhiyun 	 * use the passed in defer_ops and to finish up any work that
483*4882a593Smuzhiyun 	 * doesn't fit.  We need to reserve enough blocks to handle a
484*4882a593Smuzhiyun 	 * full btree split on either end of the refcount range.
485*4882a593Smuzhiyun 	 */
486*4882a593Smuzhiyun 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
487*4882a593Smuzhiyun 			mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
488*4882a593Smuzhiyun 	if (error)
489*4882a593Smuzhiyun 		return error;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	cudp = xfs_trans_get_cud(tp, cuip);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
494*4882a593Smuzhiyun 		refc = &cuip->cui_format.cui_extents[i];
495*4882a593Smuzhiyun 		refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
496*4882a593Smuzhiyun 		switch (refc_type) {
497*4882a593Smuzhiyun 		case XFS_REFCOUNT_INCREASE:
498*4882a593Smuzhiyun 		case XFS_REFCOUNT_DECREASE:
499*4882a593Smuzhiyun 		case XFS_REFCOUNT_ALLOC_COW:
500*4882a593Smuzhiyun 		case XFS_REFCOUNT_FREE_COW:
501*4882a593Smuzhiyun 			type = refc_type;
502*4882a593Smuzhiyun 			break;
503*4882a593Smuzhiyun 		default:
504*4882a593Smuzhiyun 			XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
505*4882a593Smuzhiyun 			error = -EFSCORRUPTED;
506*4882a593Smuzhiyun 			goto abort_error;
507*4882a593Smuzhiyun 		}
508*4882a593Smuzhiyun 		if (requeue_only) {
509*4882a593Smuzhiyun 			new_fsb = refc->pe_startblock;
510*4882a593Smuzhiyun 			new_len = refc->pe_len;
511*4882a593Smuzhiyun 		} else
512*4882a593Smuzhiyun 			error = xfs_trans_log_finish_refcount_update(tp, cudp,
513*4882a593Smuzhiyun 				type, refc->pe_startblock, refc->pe_len,
514*4882a593Smuzhiyun 				&new_fsb, &new_len, &rcur);
515*4882a593Smuzhiyun 		if (error)
516*4882a593Smuzhiyun 			goto abort_error;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 		/* Requeue what we didn't finish. */
519*4882a593Smuzhiyun 		if (new_len > 0) {
520*4882a593Smuzhiyun 			irec.br_startblock = new_fsb;
521*4882a593Smuzhiyun 			irec.br_blockcount = new_len;
522*4882a593Smuzhiyun 			switch (type) {
523*4882a593Smuzhiyun 			case XFS_REFCOUNT_INCREASE:
524*4882a593Smuzhiyun 				xfs_refcount_increase_extent(tp, &irec);
525*4882a593Smuzhiyun 				break;
526*4882a593Smuzhiyun 			case XFS_REFCOUNT_DECREASE:
527*4882a593Smuzhiyun 				xfs_refcount_decrease_extent(tp, &irec);
528*4882a593Smuzhiyun 				break;
529*4882a593Smuzhiyun 			case XFS_REFCOUNT_ALLOC_COW:
530*4882a593Smuzhiyun 				xfs_refcount_alloc_cow_extent(tp,
531*4882a593Smuzhiyun 						irec.br_startblock,
532*4882a593Smuzhiyun 						irec.br_blockcount);
533*4882a593Smuzhiyun 				break;
534*4882a593Smuzhiyun 			case XFS_REFCOUNT_FREE_COW:
535*4882a593Smuzhiyun 				xfs_refcount_free_cow_extent(tp,
536*4882a593Smuzhiyun 						irec.br_startblock,
537*4882a593Smuzhiyun 						irec.br_blockcount);
538*4882a593Smuzhiyun 				break;
539*4882a593Smuzhiyun 			default:
540*4882a593Smuzhiyun 				ASSERT(0);
541*4882a593Smuzhiyun 			}
542*4882a593Smuzhiyun 			requeue_only = true;
543*4882a593Smuzhiyun 		}
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	xfs_refcount_finish_one_cleanup(tp, rcur, error);
547*4882a593Smuzhiyun 	return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun abort_error:
550*4882a593Smuzhiyun 	xfs_refcount_finish_one_cleanup(tp, rcur, error);
551*4882a593Smuzhiyun 	xfs_trans_cancel(tp);
552*4882a593Smuzhiyun 	return error;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun STATIC bool
xfs_cui_item_match(struct xfs_log_item * lip,uint64_t intent_id)556*4882a593Smuzhiyun xfs_cui_item_match(
557*4882a593Smuzhiyun 	struct xfs_log_item	*lip,
558*4882a593Smuzhiyun 	uint64_t		intent_id)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun /* Relog an intent item to push the log tail forward. */
564*4882a593Smuzhiyun static struct xfs_log_item *
xfs_cui_item_relog(struct xfs_log_item * intent,struct xfs_trans * tp)565*4882a593Smuzhiyun xfs_cui_item_relog(
566*4882a593Smuzhiyun 	struct xfs_log_item		*intent,
567*4882a593Smuzhiyun 	struct xfs_trans		*tp)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	struct xfs_cud_log_item		*cudp;
570*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip;
571*4882a593Smuzhiyun 	struct xfs_phys_extent		*extp;
572*4882a593Smuzhiyun 	unsigned int			count;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	count = CUI_ITEM(intent)->cui_format.cui_nextents;
575*4882a593Smuzhiyun 	extp = CUI_ITEM(intent)->cui_format.cui_extents;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	tp->t_flags |= XFS_TRANS_DIRTY;
578*4882a593Smuzhiyun 	cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
579*4882a593Smuzhiyun 	set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	cuip = xfs_cui_init(tp->t_mountp, count);
582*4882a593Smuzhiyun 	memcpy(cuip->cui_format.cui_extents, extp, count * sizeof(*extp));
583*4882a593Smuzhiyun 	atomic_set(&cuip->cui_next_extent, count);
584*4882a593Smuzhiyun 	xfs_trans_add_item(tp, &cuip->cui_item);
585*4882a593Smuzhiyun 	set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
586*4882a593Smuzhiyun 	return &cuip->cui_item;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun static const struct xfs_item_ops xfs_cui_item_ops = {
590*4882a593Smuzhiyun 	.iop_size	= xfs_cui_item_size,
591*4882a593Smuzhiyun 	.iop_format	= xfs_cui_item_format,
592*4882a593Smuzhiyun 	.iop_unpin	= xfs_cui_item_unpin,
593*4882a593Smuzhiyun 	.iop_release	= xfs_cui_item_release,
594*4882a593Smuzhiyun 	.iop_recover	= xfs_cui_item_recover,
595*4882a593Smuzhiyun 	.iop_match	= xfs_cui_item_match,
596*4882a593Smuzhiyun 	.iop_relog	= xfs_cui_item_relog,
597*4882a593Smuzhiyun };
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun /*
600*4882a593Smuzhiyun  * Copy an CUI format buffer from the given buf, and into the destination
601*4882a593Smuzhiyun  * CUI format structure.  The CUI/CUD items were designed not to need any
602*4882a593Smuzhiyun  * special alignment handling.
603*4882a593Smuzhiyun  */
604*4882a593Smuzhiyun static int
xfs_cui_copy_format(struct xfs_log_iovec * buf,struct xfs_cui_log_format * dst_cui_fmt)605*4882a593Smuzhiyun xfs_cui_copy_format(
606*4882a593Smuzhiyun 	struct xfs_log_iovec		*buf,
607*4882a593Smuzhiyun 	struct xfs_cui_log_format	*dst_cui_fmt)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct xfs_cui_log_format	*src_cui_fmt;
610*4882a593Smuzhiyun 	uint				len;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	src_cui_fmt = buf->i_addr;
613*4882a593Smuzhiyun 	len = xfs_cui_log_format_sizeof(src_cui_fmt->cui_nextents);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (buf->i_len == len) {
616*4882a593Smuzhiyun 		memcpy(dst_cui_fmt, src_cui_fmt, len);
617*4882a593Smuzhiyun 		return 0;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 	XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
620*4882a593Smuzhiyun 	return -EFSCORRUPTED;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun  * This routine is called to create an in-core extent refcount update
625*4882a593Smuzhiyun  * item from the cui format structure which was logged on disk.
626*4882a593Smuzhiyun  * It allocates an in-core cui, copies the extents from the format
627*4882a593Smuzhiyun  * structure into it, and adds the cui to the AIL with the given
628*4882a593Smuzhiyun  * LSN.
629*4882a593Smuzhiyun  */
630*4882a593Smuzhiyun STATIC int
xlog_recover_cui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)631*4882a593Smuzhiyun xlog_recover_cui_commit_pass2(
632*4882a593Smuzhiyun 	struct xlog			*log,
633*4882a593Smuzhiyun 	struct list_head		*buffer_list,
634*4882a593Smuzhiyun 	struct xlog_recover_item	*item,
635*4882a593Smuzhiyun 	xfs_lsn_t			lsn)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	int				error;
638*4882a593Smuzhiyun 	struct xfs_mount		*mp = log->l_mp;
639*4882a593Smuzhiyun 	struct xfs_cui_log_item		*cuip;
640*4882a593Smuzhiyun 	struct xfs_cui_log_format	*cui_formatp;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	cui_formatp = item->ri_buf[0].i_addr;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
645*4882a593Smuzhiyun 	error = xfs_cui_copy_format(&item->ri_buf[0], &cuip->cui_format);
646*4882a593Smuzhiyun 	if (error) {
647*4882a593Smuzhiyun 		xfs_cui_item_free(cuip);
648*4882a593Smuzhiyun 		return error;
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 	atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
651*4882a593Smuzhiyun 	/*
652*4882a593Smuzhiyun 	 * Insert the intent into the AIL directly and drop one reference so
653*4882a593Smuzhiyun 	 * that finishing or canceling the work will drop the other.
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 	xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
656*4882a593Smuzhiyun 	xfs_cui_release(cuip);
657*4882a593Smuzhiyun 	return 0;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_cui_item_ops = {
661*4882a593Smuzhiyun 	.item_type		= XFS_LI_CUI,
662*4882a593Smuzhiyun 	.commit_pass2		= xlog_recover_cui_commit_pass2,
663*4882a593Smuzhiyun };
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun  * This routine is called when an CUD format structure is found in a committed
667*4882a593Smuzhiyun  * transaction in the log. Its purpose is to cancel the corresponding CUI if it
668*4882a593Smuzhiyun  * was still in the log. To do this it searches the AIL for the CUI with an id
669*4882a593Smuzhiyun  * equal to that in the CUD format structure. If we find it we drop the CUD
670*4882a593Smuzhiyun  * reference, which removes the CUI from the AIL and frees it.
671*4882a593Smuzhiyun  */
672*4882a593Smuzhiyun STATIC int
xlog_recover_cud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)673*4882a593Smuzhiyun xlog_recover_cud_commit_pass2(
674*4882a593Smuzhiyun 	struct xlog			*log,
675*4882a593Smuzhiyun 	struct list_head		*buffer_list,
676*4882a593Smuzhiyun 	struct xlog_recover_item	*item,
677*4882a593Smuzhiyun 	xfs_lsn_t			lsn)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	struct xfs_cud_log_format	*cud_formatp;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	cud_formatp = item->ri_buf[0].i_addr;
682*4882a593Smuzhiyun 	if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
683*4882a593Smuzhiyun 		XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
684*4882a593Smuzhiyun 		return -EFSCORRUPTED;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
688*4882a593Smuzhiyun 	return 0;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_cud_item_ops = {
692*4882a593Smuzhiyun 	.item_type		= XFS_LI_CUD,
693*4882a593Smuzhiyun 	.commit_pass2		= xlog_recover_cud_commit_pass2,
694*4882a593Smuzhiyun };
695