1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_format.h"
9*4882a593Smuzhiyun #include "xfs_log_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_bit.h"
12*4882a593Smuzhiyun #include "xfs_shared.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_defer.h"
15*4882a593Smuzhiyun #include "xfs_trans.h"
16*4882a593Smuzhiyun #include "xfs_trans_priv.h"
17*4882a593Smuzhiyun #include "xfs_rmap_item.h"
18*4882a593Smuzhiyun #include "xfs_log.h"
19*4882a593Smuzhiyun #include "xfs_rmap.h"
20*4882a593Smuzhiyun #include "xfs_error.h"
21*4882a593Smuzhiyun #include "xfs_log_priv.h"
22*4882a593Smuzhiyun #include "xfs_log_recover.h"
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun kmem_zone_t *xfs_rui_zone;
25*4882a593Smuzhiyun kmem_zone_t *xfs_rud_zone;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static const struct xfs_item_ops xfs_rui_item_ops;
28*4882a593Smuzhiyun
RUI_ITEM(struct xfs_log_item * lip)29*4882a593Smuzhiyun static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun return container_of(lip, struct xfs_rui_log_item, rui_item);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun STATIC void
xfs_rui_item_free(struct xfs_rui_log_item * ruip)35*4882a593Smuzhiyun xfs_rui_item_free(
36*4882a593Smuzhiyun struct xfs_rui_log_item *ruip)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
39*4882a593Smuzhiyun kmem_free(ruip);
40*4882a593Smuzhiyun else
41*4882a593Smuzhiyun kmem_cache_free(xfs_rui_zone, ruip);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Freeing the RUI requires that we remove it from the AIL if it has already
46*4882a593Smuzhiyun * been placed there. However, the RUI may not yet have been placed in the AIL
47*4882a593Smuzhiyun * when called by xfs_rui_release() from RUD processing due to the ordering of
48*4882a593Smuzhiyun * committed vs unpin operations in bulk insert operations. Hence the reference
49*4882a593Smuzhiyun * count to ensure only the last caller frees the RUI.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun STATIC void
xfs_rui_release(struct xfs_rui_log_item * ruip)52*4882a593Smuzhiyun xfs_rui_release(
53*4882a593Smuzhiyun struct xfs_rui_log_item *ruip)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun ASSERT(atomic_read(&ruip->rui_refcount) > 0);
56*4882a593Smuzhiyun if (atomic_dec_and_test(&ruip->rui_refcount)) {
57*4882a593Smuzhiyun xfs_trans_ail_delete(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
58*4882a593Smuzhiyun xfs_rui_item_free(ruip);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun STATIC void
xfs_rui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)63*4882a593Smuzhiyun xfs_rui_item_size(
64*4882a593Smuzhiyun struct xfs_log_item *lip,
65*4882a593Smuzhiyun int *nvecs,
66*4882a593Smuzhiyun int *nbytes)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun *nvecs += 1;
71*4882a593Smuzhiyun *nbytes += xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * This is called to fill in the vector of log iovecs for the
76*4882a593Smuzhiyun * given rui log item. We use only 1 iovec, and we point that
77*4882a593Smuzhiyun * at the rui_log_format structure embedded in the rui item.
78*4882a593Smuzhiyun * It is at this point that we assert that all of the extent
79*4882a593Smuzhiyun * slots in the rui item have been filled.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun STATIC void
xfs_rui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)82*4882a593Smuzhiyun xfs_rui_item_format(
83*4882a593Smuzhiyun struct xfs_log_item *lip,
84*4882a593Smuzhiyun struct xfs_log_vec *lv)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
87*4882a593Smuzhiyun struct xfs_log_iovec *vecp = NULL;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun ASSERT(atomic_read(&ruip->rui_next_extent) ==
90*4882a593Smuzhiyun ruip->rui_format.rui_nextents);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun ruip->rui_format.rui_type = XFS_LI_RUI;
93*4882a593Smuzhiyun ruip->rui_format.rui_size = 1;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
96*4882a593Smuzhiyun xfs_rui_log_format_sizeof(ruip->rui_format.rui_nextents));
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun * The unpin operation is the last place an RUI is manipulated in the log. It is
101*4882a593Smuzhiyun * either inserted in the AIL or aborted in the event of a log I/O error. In
102*4882a593Smuzhiyun * either case, the RUI transaction has been successfully committed to make it
103*4882a593Smuzhiyun * this far. Therefore, we expect whoever committed the RUI to either construct
104*4882a593Smuzhiyun * and commit the RUD or drop the RUD's reference in the event of error. Simply
105*4882a593Smuzhiyun * drop the log's RUI reference now that the log is done with it.
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun STATIC void
xfs_rui_item_unpin(struct xfs_log_item * lip,int remove)108*4882a593Smuzhiyun xfs_rui_item_unpin(
109*4882a593Smuzhiyun struct xfs_log_item *lip,
110*4882a593Smuzhiyun int remove)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun xfs_rui_release(ruip);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * The RUI has been either committed or aborted if the transaction has been
119*4882a593Smuzhiyun * cancelled. If the transaction was cancelled, an RUD isn't going to be
120*4882a593Smuzhiyun * constructed and thus we free the RUI here directly.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun STATIC void
xfs_rui_item_release(struct xfs_log_item * lip)123*4882a593Smuzhiyun xfs_rui_item_release(
124*4882a593Smuzhiyun struct xfs_log_item *lip)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun xfs_rui_release(RUI_ITEM(lip));
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * Allocate and initialize an rui item with the given number of extents.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun STATIC struct xfs_rui_log_item *
xfs_rui_init(struct xfs_mount * mp,uint nextents)133*4882a593Smuzhiyun xfs_rui_init(
134*4882a593Smuzhiyun struct xfs_mount *mp,
135*4882a593Smuzhiyun uint nextents)
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct xfs_rui_log_item *ruip;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun ASSERT(nextents > 0);
141*4882a593Smuzhiyun if (nextents > XFS_RUI_MAX_FAST_EXTENTS)
142*4882a593Smuzhiyun ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0);
143*4882a593Smuzhiyun else
144*4882a593Smuzhiyun ruip = kmem_cache_zalloc(xfs_rui_zone,
145*4882a593Smuzhiyun GFP_KERNEL | __GFP_NOFAIL);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
148*4882a593Smuzhiyun ruip->rui_format.rui_nextents = nextents;
149*4882a593Smuzhiyun ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
150*4882a593Smuzhiyun atomic_set(&ruip->rui_next_extent, 0);
151*4882a593Smuzhiyun atomic_set(&ruip->rui_refcount, 2);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return ruip;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun * Copy an RUI format buffer from the given buf, and into the destination
158*4882a593Smuzhiyun * RUI format structure. The RUI/RUD items were designed not to need any
159*4882a593Smuzhiyun * special alignment handling.
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun STATIC int
xfs_rui_copy_format(struct xfs_log_iovec * buf,struct xfs_rui_log_format * dst_rui_fmt)162*4882a593Smuzhiyun xfs_rui_copy_format(
163*4882a593Smuzhiyun struct xfs_log_iovec *buf,
164*4882a593Smuzhiyun struct xfs_rui_log_format *dst_rui_fmt)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct xfs_rui_log_format *src_rui_fmt;
167*4882a593Smuzhiyun uint len;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun src_rui_fmt = buf->i_addr;
170*4882a593Smuzhiyun len = xfs_rui_log_format_sizeof(src_rui_fmt->rui_nextents);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (buf->i_len != len) {
173*4882a593Smuzhiyun XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
174*4882a593Smuzhiyun return -EFSCORRUPTED;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun memcpy(dst_rui_fmt, src_rui_fmt, len);
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
RUD_ITEM(struct xfs_log_item * lip)181*4882a593Smuzhiyun static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return container_of(lip, struct xfs_rud_log_item, rud_item);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun STATIC void
xfs_rud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)187*4882a593Smuzhiyun xfs_rud_item_size(
188*4882a593Smuzhiyun struct xfs_log_item *lip,
189*4882a593Smuzhiyun int *nvecs,
190*4882a593Smuzhiyun int *nbytes)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun *nvecs += 1;
193*4882a593Smuzhiyun *nbytes += sizeof(struct xfs_rud_log_format);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun * This is called to fill in the vector of log iovecs for the
198*4882a593Smuzhiyun * given rud log item. We use only 1 iovec, and we point that
199*4882a593Smuzhiyun * at the rud_log_format structure embedded in the rud item.
200*4882a593Smuzhiyun * It is at this point that we assert that all of the extent
201*4882a593Smuzhiyun * slots in the rud item have been filled.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun STATIC void
xfs_rud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)204*4882a593Smuzhiyun xfs_rud_item_format(
205*4882a593Smuzhiyun struct xfs_log_item *lip,
206*4882a593Smuzhiyun struct xfs_log_vec *lv)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
209*4882a593Smuzhiyun struct xfs_log_iovec *vecp = NULL;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun rudp->rud_format.rud_type = XFS_LI_RUD;
212*4882a593Smuzhiyun rudp->rud_format.rud_size = 1;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
215*4882a593Smuzhiyun sizeof(struct xfs_rud_log_format));
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun * The RUD is either committed or aborted if the transaction is cancelled. If
220*4882a593Smuzhiyun * the transaction is cancelled, drop our reference to the RUI and free the
221*4882a593Smuzhiyun * RUD.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun STATIC void
xfs_rud_item_release(struct xfs_log_item * lip)224*4882a593Smuzhiyun xfs_rud_item_release(
225*4882a593Smuzhiyun struct xfs_log_item *lip)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun xfs_rui_release(rudp->rud_ruip);
230*4882a593Smuzhiyun kmem_cache_free(xfs_rud_zone, rudp);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun static const struct xfs_item_ops xfs_rud_item_ops = {
234*4882a593Smuzhiyun .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
235*4882a593Smuzhiyun .iop_size = xfs_rud_item_size,
236*4882a593Smuzhiyun .iop_format = xfs_rud_item_format,
237*4882a593Smuzhiyun .iop_release = xfs_rud_item_release,
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun static struct xfs_rud_log_item *
xfs_trans_get_rud(struct xfs_trans * tp,struct xfs_rui_log_item * ruip)241*4882a593Smuzhiyun xfs_trans_get_rud(
242*4882a593Smuzhiyun struct xfs_trans *tp,
243*4882a593Smuzhiyun struct xfs_rui_log_item *ruip)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct xfs_rud_log_item *rudp;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun rudp = kmem_cache_zalloc(xfs_rud_zone, GFP_KERNEL | __GFP_NOFAIL);
248*4882a593Smuzhiyun xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD,
249*4882a593Smuzhiyun &xfs_rud_item_ops);
250*4882a593Smuzhiyun rudp->rud_ruip = ruip;
251*4882a593Smuzhiyun rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun xfs_trans_add_item(tp, &rudp->rud_item);
254*4882a593Smuzhiyun return rudp;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Set the map extent flags for this reverse mapping. */
258*4882a593Smuzhiyun static void
xfs_trans_set_rmap_flags(struct xfs_map_extent * rmap,enum xfs_rmap_intent_type type,int whichfork,xfs_exntst_t state)259*4882a593Smuzhiyun xfs_trans_set_rmap_flags(
260*4882a593Smuzhiyun struct xfs_map_extent *rmap,
261*4882a593Smuzhiyun enum xfs_rmap_intent_type type,
262*4882a593Smuzhiyun int whichfork,
263*4882a593Smuzhiyun xfs_exntst_t state)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun rmap->me_flags = 0;
266*4882a593Smuzhiyun if (state == XFS_EXT_UNWRITTEN)
267*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
268*4882a593Smuzhiyun if (whichfork == XFS_ATTR_FORK)
269*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
270*4882a593Smuzhiyun switch (type) {
271*4882a593Smuzhiyun case XFS_RMAP_MAP:
272*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
273*4882a593Smuzhiyun break;
274*4882a593Smuzhiyun case XFS_RMAP_MAP_SHARED:
275*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_MAP_SHARED;
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun case XFS_RMAP_UNMAP:
278*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
279*4882a593Smuzhiyun break;
280*4882a593Smuzhiyun case XFS_RMAP_UNMAP_SHARED:
281*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP_SHARED;
282*4882a593Smuzhiyun break;
283*4882a593Smuzhiyun case XFS_RMAP_CONVERT:
284*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
285*4882a593Smuzhiyun break;
286*4882a593Smuzhiyun case XFS_RMAP_CONVERT_SHARED:
287*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT_SHARED;
288*4882a593Smuzhiyun break;
289*4882a593Smuzhiyun case XFS_RMAP_ALLOC:
290*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
291*4882a593Smuzhiyun break;
292*4882a593Smuzhiyun case XFS_RMAP_FREE:
293*4882a593Smuzhiyun rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
294*4882a593Smuzhiyun break;
295*4882a593Smuzhiyun default:
296*4882a593Smuzhiyun ASSERT(0);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * Finish an rmap update and log it to the RUD. Note that the transaction is
302*4882a593Smuzhiyun * marked dirty regardless of whether the rmap update succeeds or fails to
303*4882a593Smuzhiyun * support the RUI/RUD lifecycle rules.
304*4882a593Smuzhiyun */
305*4882a593Smuzhiyun static int
xfs_trans_log_finish_rmap_update(struct xfs_trans * tp,struct xfs_rud_log_item * rudp,enum xfs_rmap_intent_type type,uint64_t owner,int whichfork,xfs_fileoff_t startoff,xfs_fsblock_t startblock,xfs_filblks_t blockcount,xfs_exntst_t state,struct xfs_btree_cur ** pcur)306*4882a593Smuzhiyun xfs_trans_log_finish_rmap_update(
307*4882a593Smuzhiyun struct xfs_trans *tp,
308*4882a593Smuzhiyun struct xfs_rud_log_item *rudp,
309*4882a593Smuzhiyun enum xfs_rmap_intent_type type,
310*4882a593Smuzhiyun uint64_t owner,
311*4882a593Smuzhiyun int whichfork,
312*4882a593Smuzhiyun xfs_fileoff_t startoff,
313*4882a593Smuzhiyun xfs_fsblock_t startblock,
314*4882a593Smuzhiyun xfs_filblks_t blockcount,
315*4882a593Smuzhiyun xfs_exntst_t state,
316*4882a593Smuzhiyun struct xfs_btree_cur **pcur)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun int error;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
321*4882a593Smuzhiyun startblock, blockcount, state, pcur);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * Mark the transaction dirty, even on error. This ensures the
325*4882a593Smuzhiyun * transaction is aborted, which:
326*4882a593Smuzhiyun *
327*4882a593Smuzhiyun * 1.) releases the RUI and frees the RUD
328*4882a593Smuzhiyun * 2.) shuts down the filesystem
329*4882a593Smuzhiyun */
330*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
331*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return error;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /* Sort rmap intents by AG. */
337*4882a593Smuzhiyun static int
xfs_rmap_update_diff_items(void * priv,struct list_head * a,struct list_head * b)338*4882a593Smuzhiyun xfs_rmap_update_diff_items(
339*4882a593Smuzhiyun void *priv,
340*4882a593Smuzhiyun struct list_head *a,
341*4882a593Smuzhiyun struct list_head *b)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct xfs_mount *mp = priv;
344*4882a593Smuzhiyun struct xfs_rmap_intent *ra;
345*4882a593Smuzhiyun struct xfs_rmap_intent *rb;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun ra = container_of(a, struct xfs_rmap_intent, ri_list);
348*4882a593Smuzhiyun rb = container_of(b, struct xfs_rmap_intent, ri_list);
349*4882a593Smuzhiyun return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
350*4882a593Smuzhiyun XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Log rmap updates in the intent item. */
354*4882a593Smuzhiyun STATIC void
xfs_rmap_update_log_item(struct xfs_trans * tp,struct xfs_rui_log_item * ruip,struct xfs_rmap_intent * rmap)355*4882a593Smuzhiyun xfs_rmap_update_log_item(
356*4882a593Smuzhiyun struct xfs_trans *tp,
357*4882a593Smuzhiyun struct xfs_rui_log_item *ruip,
358*4882a593Smuzhiyun struct xfs_rmap_intent *rmap)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun uint next_extent;
361*4882a593Smuzhiyun struct xfs_map_extent *map;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
364*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun * atomic_inc_return gives us the value after the increment;
368*4882a593Smuzhiyun * we want to use it as an array index so we need to subtract 1 from
369*4882a593Smuzhiyun * it.
370*4882a593Smuzhiyun */
371*4882a593Smuzhiyun next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
372*4882a593Smuzhiyun ASSERT(next_extent < ruip->rui_format.rui_nextents);
373*4882a593Smuzhiyun map = &ruip->rui_format.rui_extents[next_extent];
374*4882a593Smuzhiyun map->me_owner = rmap->ri_owner;
375*4882a593Smuzhiyun map->me_startblock = rmap->ri_bmap.br_startblock;
376*4882a593Smuzhiyun map->me_startoff = rmap->ri_bmap.br_startoff;
377*4882a593Smuzhiyun map->me_len = rmap->ri_bmap.br_blockcount;
378*4882a593Smuzhiyun xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
379*4882a593Smuzhiyun rmap->ri_bmap.br_state);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun static struct xfs_log_item *
xfs_rmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)383*4882a593Smuzhiyun xfs_rmap_update_create_intent(
384*4882a593Smuzhiyun struct xfs_trans *tp,
385*4882a593Smuzhiyun struct list_head *items,
386*4882a593Smuzhiyun unsigned int count,
387*4882a593Smuzhiyun bool sort)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun struct xfs_mount *mp = tp->t_mountp;
390*4882a593Smuzhiyun struct xfs_rui_log_item *ruip = xfs_rui_init(mp, count);
391*4882a593Smuzhiyun struct xfs_rmap_intent *rmap;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun ASSERT(count > 0);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun xfs_trans_add_item(tp, &ruip->rui_item);
396*4882a593Smuzhiyun if (sort)
397*4882a593Smuzhiyun list_sort(mp, items, xfs_rmap_update_diff_items);
398*4882a593Smuzhiyun list_for_each_entry(rmap, items, ri_list)
399*4882a593Smuzhiyun xfs_rmap_update_log_item(tp, ruip, rmap);
400*4882a593Smuzhiyun return &ruip->rui_item;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Get an RUD so we can process all the deferred rmap updates. */
404*4882a593Smuzhiyun static struct xfs_log_item *
xfs_rmap_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)405*4882a593Smuzhiyun xfs_rmap_update_create_done(
406*4882a593Smuzhiyun struct xfs_trans *tp,
407*4882a593Smuzhiyun struct xfs_log_item *intent,
408*4882a593Smuzhiyun unsigned int count)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun return &xfs_trans_get_rud(tp, RUI_ITEM(intent))->rud_item;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* Process a deferred rmap update. */
414*4882a593Smuzhiyun STATIC int
xfs_rmap_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)415*4882a593Smuzhiyun xfs_rmap_update_finish_item(
416*4882a593Smuzhiyun struct xfs_trans *tp,
417*4882a593Smuzhiyun struct xfs_log_item *done,
418*4882a593Smuzhiyun struct list_head *item,
419*4882a593Smuzhiyun struct xfs_btree_cur **state)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun struct xfs_rmap_intent *rmap;
422*4882a593Smuzhiyun int error;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun rmap = container_of(item, struct xfs_rmap_intent, ri_list);
425*4882a593Smuzhiyun error = xfs_trans_log_finish_rmap_update(tp, RUD_ITEM(done),
426*4882a593Smuzhiyun rmap->ri_type, rmap->ri_owner, rmap->ri_whichfork,
427*4882a593Smuzhiyun rmap->ri_bmap.br_startoff, rmap->ri_bmap.br_startblock,
428*4882a593Smuzhiyun rmap->ri_bmap.br_blockcount, rmap->ri_bmap.br_state,
429*4882a593Smuzhiyun state);
430*4882a593Smuzhiyun kmem_free(rmap);
431*4882a593Smuzhiyun return error;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /* Abort all pending RUIs. */
435*4882a593Smuzhiyun STATIC void
xfs_rmap_update_abort_intent(struct xfs_log_item * intent)436*4882a593Smuzhiyun xfs_rmap_update_abort_intent(
437*4882a593Smuzhiyun struct xfs_log_item *intent)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun xfs_rui_release(RUI_ITEM(intent));
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* Cancel a deferred rmap update. */
443*4882a593Smuzhiyun STATIC void
xfs_rmap_update_cancel_item(struct list_head * item)444*4882a593Smuzhiyun xfs_rmap_update_cancel_item(
445*4882a593Smuzhiyun struct list_head *item)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct xfs_rmap_intent *rmap;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun rmap = container_of(item, struct xfs_rmap_intent, ri_list);
450*4882a593Smuzhiyun kmem_free(rmap);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
454*4882a593Smuzhiyun .max_items = XFS_RUI_MAX_FAST_EXTENTS,
455*4882a593Smuzhiyun .create_intent = xfs_rmap_update_create_intent,
456*4882a593Smuzhiyun .abort_intent = xfs_rmap_update_abort_intent,
457*4882a593Smuzhiyun .create_done = xfs_rmap_update_create_done,
458*4882a593Smuzhiyun .finish_item = xfs_rmap_update_finish_item,
459*4882a593Smuzhiyun .finish_cleanup = xfs_rmap_finish_one_cleanup,
460*4882a593Smuzhiyun .cancel_item = xfs_rmap_update_cancel_item,
461*4882a593Smuzhiyun };
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /*
464*4882a593Smuzhiyun * Process an rmap update intent item that was recovered from the log.
465*4882a593Smuzhiyun * We need to update the rmapbt.
466*4882a593Smuzhiyun */
467*4882a593Smuzhiyun STATIC int
xfs_rui_item_recover(struct xfs_log_item * lip,struct list_head * capture_list)468*4882a593Smuzhiyun xfs_rui_item_recover(
469*4882a593Smuzhiyun struct xfs_log_item *lip,
470*4882a593Smuzhiyun struct list_head *capture_list)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
473*4882a593Smuzhiyun struct xfs_map_extent *rmap;
474*4882a593Smuzhiyun struct xfs_rud_log_item *rudp;
475*4882a593Smuzhiyun struct xfs_trans *tp;
476*4882a593Smuzhiyun struct xfs_btree_cur *rcur = NULL;
477*4882a593Smuzhiyun struct xfs_mount *mp = lip->li_mountp;
478*4882a593Smuzhiyun xfs_fsblock_t startblock_fsb;
479*4882a593Smuzhiyun enum xfs_rmap_intent_type type;
480*4882a593Smuzhiyun xfs_exntst_t state;
481*4882a593Smuzhiyun bool op_ok;
482*4882a593Smuzhiyun int i;
483*4882a593Smuzhiyun int whichfork;
484*4882a593Smuzhiyun int error = 0;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * First check the validity of the extents described by the
488*4882a593Smuzhiyun * RUI. If any are bad, then assume that all are bad and
489*4882a593Smuzhiyun * just toss the RUI.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
492*4882a593Smuzhiyun rmap = &ruip->rui_format.rui_extents[i];
493*4882a593Smuzhiyun startblock_fsb = XFS_BB_TO_FSB(mp,
494*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
495*4882a593Smuzhiyun switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
496*4882a593Smuzhiyun case XFS_RMAP_EXTENT_MAP:
497*4882a593Smuzhiyun case XFS_RMAP_EXTENT_MAP_SHARED:
498*4882a593Smuzhiyun case XFS_RMAP_EXTENT_UNMAP:
499*4882a593Smuzhiyun case XFS_RMAP_EXTENT_UNMAP_SHARED:
500*4882a593Smuzhiyun case XFS_RMAP_EXTENT_CONVERT:
501*4882a593Smuzhiyun case XFS_RMAP_EXTENT_CONVERT_SHARED:
502*4882a593Smuzhiyun case XFS_RMAP_EXTENT_ALLOC:
503*4882a593Smuzhiyun case XFS_RMAP_EXTENT_FREE:
504*4882a593Smuzhiyun op_ok = true;
505*4882a593Smuzhiyun break;
506*4882a593Smuzhiyun default:
507*4882a593Smuzhiyun op_ok = false;
508*4882a593Smuzhiyun break;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun if (!op_ok || startblock_fsb == 0 ||
511*4882a593Smuzhiyun rmap->me_len == 0 ||
512*4882a593Smuzhiyun startblock_fsb >= mp->m_sb.sb_dblocks ||
513*4882a593Smuzhiyun rmap->me_len >= mp->m_sb.sb_agblocks ||
514*4882a593Smuzhiyun (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS))
515*4882a593Smuzhiyun return -EFSCORRUPTED;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
519*4882a593Smuzhiyun mp->m_rmap_maxlevels, 0, XFS_TRANS_RESERVE, &tp);
520*4882a593Smuzhiyun if (error)
521*4882a593Smuzhiyun return error;
522*4882a593Smuzhiyun rudp = xfs_trans_get_rud(tp, ruip);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
525*4882a593Smuzhiyun rmap = &ruip->rui_format.rui_extents[i];
526*4882a593Smuzhiyun state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
527*4882a593Smuzhiyun XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
528*4882a593Smuzhiyun whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
529*4882a593Smuzhiyun XFS_ATTR_FORK : XFS_DATA_FORK;
530*4882a593Smuzhiyun switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
531*4882a593Smuzhiyun case XFS_RMAP_EXTENT_MAP:
532*4882a593Smuzhiyun type = XFS_RMAP_MAP;
533*4882a593Smuzhiyun break;
534*4882a593Smuzhiyun case XFS_RMAP_EXTENT_MAP_SHARED:
535*4882a593Smuzhiyun type = XFS_RMAP_MAP_SHARED;
536*4882a593Smuzhiyun break;
537*4882a593Smuzhiyun case XFS_RMAP_EXTENT_UNMAP:
538*4882a593Smuzhiyun type = XFS_RMAP_UNMAP;
539*4882a593Smuzhiyun break;
540*4882a593Smuzhiyun case XFS_RMAP_EXTENT_UNMAP_SHARED:
541*4882a593Smuzhiyun type = XFS_RMAP_UNMAP_SHARED;
542*4882a593Smuzhiyun break;
543*4882a593Smuzhiyun case XFS_RMAP_EXTENT_CONVERT:
544*4882a593Smuzhiyun type = XFS_RMAP_CONVERT;
545*4882a593Smuzhiyun break;
546*4882a593Smuzhiyun case XFS_RMAP_EXTENT_CONVERT_SHARED:
547*4882a593Smuzhiyun type = XFS_RMAP_CONVERT_SHARED;
548*4882a593Smuzhiyun break;
549*4882a593Smuzhiyun case XFS_RMAP_EXTENT_ALLOC:
550*4882a593Smuzhiyun type = XFS_RMAP_ALLOC;
551*4882a593Smuzhiyun break;
552*4882a593Smuzhiyun case XFS_RMAP_EXTENT_FREE:
553*4882a593Smuzhiyun type = XFS_RMAP_FREE;
554*4882a593Smuzhiyun break;
555*4882a593Smuzhiyun default:
556*4882a593Smuzhiyun XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
557*4882a593Smuzhiyun error = -EFSCORRUPTED;
558*4882a593Smuzhiyun goto abort_error;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
561*4882a593Smuzhiyun rmap->me_owner, whichfork,
562*4882a593Smuzhiyun rmap->me_startoff, rmap->me_startblock,
563*4882a593Smuzhiyun rmap->me_len, state, &rcur);
564*4882a593Smuzhiyun if (error)
565*4882a593Smuzhiyun goto abort_error;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun xfs_rmap_finish_one_cleanup(tp, rcur, error);
570*4882a593Smuzhiyun return xfs_defer_ops_capture_and_commit(tp, NULL, capture_list);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun abort_error:
573*4882a593Smuzhiyun xfs_rmap_finish_one_cleanup(tp, rcur, error);
574*4882a593Smuzhiyun xfs_trans_cancel(tp);
575*4882a593Smuzhiyun return error;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun STATIC bool
xfs_rui_item_match(struct xfs_log_item * lip,uint64_t intent_id)579*4882a593Smuzhiyun xfs_rui_item_match(
580*4882a593Smuzhiyun struct xfs_log_item *lip,
581*4882a593Smuzhiyun uint64_t intent_id)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun return RUI_ITEM(lip)->rui_format.rui_id == intent_id;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /* Relog an intent item to push the log tail forward. */
587*4882a593Smuzhiyun static struct xfs_log_item *
xfs_rui_item_relog(struct xfs_log_item * intent,struct xfs_trans * tp)588*4882a593Smuzhiyun xfs_rui_item_relog(
589*4882a593Smuzhiyun struct xfs_log_item *intent,
590*4882a593Smuzhiyun struct xfs_trans *tp)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct xfs_rud_log_item *rudp;
593*4882a593Smuzhiyun struct xfs_rui_log_item *ruip;
594*4882a593Smuzhiyun struct xfs_map_extent *extp;
595*4882a593Smuzhiyun unsigned int count;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun count = RUI_ITEM(intent)->rui_format.rui_nextents;
598*4882a593Smuzhiyun extp = RUI_ITEM(intent)->rui_format.rui_extents;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
601*4882a593Smuzhiyun rudp = xfs_trans_get_rud(tp, RUI_ITEM(intent));
602*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &rudp->rud_item.li_flags);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun ruip = xfs_rui_init(tp->t_mountp, count);
605*4882a593Smuzhiyun memcpy(ruip->rui_format.rui_extents, extp, count * sizeof(*extp));
606*4882a593Smuzhiyun atomic_set(&ruip->rui_next_extent, count);
607*4882a593Smuzhiyun xfs_trans_add_item(tp, &ruip->rui_item);
608*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &ruip->rui_item.li_flags);
609*4882a593Smuzhiyun return &ruip->rui_item;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun static const struct xfs_item_ops xfs_rui_item_ops = {
613*4882a593Smuzhiyun .iop_size = xfs_rui_item_size,
614*4882a593Smuzhiyun .iop_format = xfs_rui_item_format,
615*4882a593Smuzhiyun .iop_unpin = xfs_rui_item_unpin,
616*4882a593Smuzhiyun .iop_release = xfs_rui_item_release,
617*4882a593Smuzhiyun .iop_recover = xfs_rui_item_recover,
618*4882a593Smuzhiyun .iop_match = xfs_rui_item_match,
619*4882a593Smuzhiyun .iop_relog = xfs_rui_item_relog,
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /*
623*4882a593Smuzhiyun * This routine is called to create an in-core extent rmap update
624*4882a593Smuzhiyun * item from the rui format structure which was logged on disk.
625*4882a593Smuzhiyun * It allocates an in-core rui, copies the extents from the format
626*4882a593Smuzhiyun * structure into it, and adds the rui to the AIL with the given
627*4882a593Smuzhiyun * LSN.
628*4882a593Smuzhiyun */
629*4882a593Smuzhiyun STATIC int
xlog_recover_rui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)630*4882a593Smuzhiyun xlog_recover_rui_commit_pass2(
631*4882a593Smuzhiyun struct xlog *log,
632*4882a593Smuzhiyun struct list_head *buffer_list,
633*4882a593Smuzhiyun struct xlog_recover_item *item,
634*4882a593Smuzhiyun xfs_lsn_t lsn)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun int error;
637*4882a593Smuzhiyun struct xfs_mount *mp = log->l_mp;
638*4882a593Smuzhiyun struct xfs_rui_log_item *ruip;
639*4882a593Smuzhiyun struct xfs_rui_log_format *rui_formatp;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun rui_formatp = item->ri_buf[0].i_addr;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
644*4882a593Smuzhiyun error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
645*4882a593Smuzhiyun if (error) {
646*4882a593Smuzhiyun xfs_rui_item_free(ruip);
647*4882a593Smuzhiyun return error;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun * Insert the intent into the AIL directly and drop one reference so
652*4882a593Smuzhiyun * that finishing or canceling the work will drop the other.
653*4882a593Smuzhiyun */
654*4882a593Smuzhiyun xfs_trans_ail_insert(log->l_ailp, &ruip->rui_item, lsn);
655*4882a593Smuzhiyun xfs_rui_release(ruip);
656*4882a593Smuzhiyun return 0;
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_rui_item_ops = {
660*4882a593Smuzhiyun .item_type = XFS_LI_RUI,
661*4882a593Smuzhiyun .commit_pass2 = xlog_recover_rui_commit_pass2,
662*4882a593Smuzhiyun };
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /*
665*4882a593Smuzhiyun * This routine is called when an RUD format structure is found in a committed
666*4882a593Smuzhiyun * transaction in the log. Its purpose is to cancel the corresponding RUI if it
667*4882a593Smuzhiyun * was still in the log. To do this it searches the AIL for the RUI with an id
668*4882a593Smuzhiyun * equal to that in the RUD format structure. If we find it we drop the RUD
669*4882a593Smuzhiyun * reference, which removes the RUI from the AIL and frees it.
670*4882a593Smuzhiyun */
671*4882a593Smuzhiyun STATIC int
xlog_recover_rud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)672*4882a593Smuzhiyun xlog_recover_rud_commit_pass2(
673*4882a593Smuzhiyun struct xlog *log,
674*4882a593Smuzhiyun struct list_head *buffer_list,
675*4882a593Smuzhiyun struct xlog_recover_item *item,
676*4882a593Smuzhiyun xfs_lsn_t lsn)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun struct xfs_rud_log_format *rud_formatp;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun rud_formatp = item->ri_buf[0].i_addr;
681*4882a593Smuzhiyun ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun xlog_recover_release_intent(log, XFS_LI_RUI, rud_formatp->rud_rui_id);
684*4882a593Smuzhiyun return 0;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_rud_item_ops = {
688*4882a593Smuzhiyun .item_type = XFS_LI_RUD,
689*4882a593Smuzhiyun .commit_pass2 = xlog_recover_rud_commit_pass2,
690*4882a593Smuzhiyun };
691