1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 Oracle. All Rights Reserved.
4*4882a593Smuzhiyun * Author: Darrick J. Wong <darrick.wong@oracle.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include "xfs.h"
7*4882a593Smuzhiyun #include "xfs_fs.h"
8*4882a593Smuzhiyun #include "xfs_format.h"
9*4882a593Smuzhiyun #include "xfs_log_format.h"
10*4882a593Smuzhiyun #include "xfs_trans_resv.h"
11*4882a593Smuzhiyun #include "xfs_bit.h"
12*4882a593Smuzhiyun #include "xfs_shared.h"
13*4882a593Smuzhiyun #include "xfs_mount.h"
14*4882a593Smuzhiyun #include "xfs_defer.h"
15*4882a593Smuzhiyun #include "xfs_inode.h"
16*4882a593Smuzhiyun #include "xfs_trans.h"
17*4882a593Smuzhiyun #include "xfs_trans_priv.h"
18*4882a593Smuzhiyun #include "xfs_bmap_item.h"
19*4882a593Smuzhiyun #include "xfs_log.h"
20*4882a593Smuzhiyun #include "xfs_bmap.h"
21*4882a593Smuzhiyun #include "xfs_icache.h"
22*4882a593Smuzhiyun #include "xfs_bmap_btree.h"
23*4882a593Smuzhiyun #include "xfs_trans_space.h"
24*4882a593Smuzhiyun #include "xfs_error.h"
25*4882a593Smuzhiyun #include "xfs_log_priv.h"
26*4882a593Smuzhiyun #include "xfs_log_recover.h"
27*4882a593Smuzhiyun #include "xfs_quota.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun kmem_zone_t *xfs_bui_zone;
30*4882a593Smuzhiyun kmem_zone_t *xfs_bud_zone;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static const struct xfs_item_ops xfs_bui_item_ops;
33*4882a593Smuzhiyun
BUI_ITEM(struct xfs_log_item * lip)34*4882a593Smuzhiyun static inline struct xfs_bui_log_item *BUI_ITEM(struct xfs_log_item *lip)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return container_of(lip, struct xfs_bui_log_item, bui_item);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun STATIC void
xfs_bui_item_free(struct xfs_bui_log_item * buip)40*4882a593Smuzhiyun xfs_bui_item_free(
41*4882a593Smuzhiyun struct xfs_bui_log_item *buip)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun kmem_cache_free(xfs_bui_zone, buip);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * Freeing the BUI requires that we remove it from the AIL if it has already
48*4882a593Smuzhiyun * been placed there. However, the BUI may not yet have been placed in the AIL
49*4882a593Smuzhiyun * when called by xfs_bui_release() from BUD processing due to the ordering of
50*4882a593Smuzhiyun * committed vs unpin operations in bulk insert operations. Hence the reference
51*4882a593Smuzhiyun * count to ensure only the last caller frees the BUI.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun STATIC void
xfs_bui_release(struct xfs_bui_log_item * buip)54*4882a593Smuzhiyun xfs_bui_release(
55*4882a593Smuzhiyun struct xfs_bui_log_item *buip)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun ASSERT(atomic_read(&buip->bui_refcount) > 0);
58*4882a593Smuzhiyun if (atomic_dec_and_test(&buip->bui_refcount)) {
59*4882a593Smuzhiyun xfs_trans_ail_delete(&buip->bui_item, SHUTDOWN_LOG_IO_ERROR);
60*4882a593Smuzhiyun xfs_bui_item_free(buip);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun STATIC void
xfs_bui_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)66*4882a593Smuzhiyun xfs_bui_item_size(
67*4882a593Smuzhiyun struct xfs_log_item *lip,
68*4882a593Smuzhiyun int *nvecs,
69*4882a593Smuzhiyun int *nbytes)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct xfs_bui_log_item *buip = BUI_ITEM(lip);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun *nvecs += 1;
74*4882a593Smuzhiyun *nbytes += xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * This is called to fill in the vector of log iovecs for the
79*4882a593Smuzhiyun * given bui log item. We use only 1 iovec, and we point that
80*4882a593Smuzhiyun * at the bui_log_format structure embedded in the bui item.
81*4882a593Smuzhiyun * It is at this point that we assert that all of the extent
82*4882a593Smuzhiyun * slots in the bui item have been filled.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun STATIC void
xfs_bui_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)85*4882a593Smuzhiyun xfs_bui_item_format(
86*4882a593Smuzhiyun struct xfs_log_item *lip,
87*4882a593Smuzhiyun struct xfs_log_vec *lv)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct xfs_bui_log_item *buip = BUI_ITEM(lip);
90*4882a593Smuzhiyun struct xfs_log_iovec *vecp = NULL;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun ASSERT(atomic_read(&buip->bui_next_extent) ==
93*4882a593Smuzhiyun buip->bui_format.bui_nextents);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun buip->bui_format.bui_type = XFS_LI_BUI;
96*4882a593Smuzhiyun buip->bui_format.bui_size = 1;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUI_FORMAT, &buip->bui_format,
99*4882a593Smuzhiyun xfs_bui_log_format_sizeof(buip->bui_format.bui_nextents));
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * The unpin operation is the last place an BUI is manipulated in the log. It is
104*4882a593Smuzhiyun * either inserted in the AIL or aborted in the event of a log I/O error. In
105*4882a593Smuzhiyun * either case, the BUI transaction has been successfully committed to make it
106*4882a593Smuzhiyun * this far. Therefore, we expect whoever committed the BUI to either construct
107*4882a593Smuzhiyun * and commit the BUD or drop the BUD's reference in the event of error. Simply
108*4882a593Smuzhiyun * drop the log's BUI reference now that the log is done with it.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun STATIC void
xfs_bui_item_unpin(struct xfs_log_item * lip,int remove)111*4882a593Smuzhiyun xfs_bui_item_unpin(
112*4882a593Smuzhiyun struct xfs_log_item *lip,
113*4882a593Smuzhiyun int remove)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct xfs_bui_log_item *buip = BUI_ITEM(lip);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun xfs_bui_release(buip);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * The BUI has been either committed or aborted if the transaction has been
122*4882a593Smuzhiyun * cancelled. If the transaction was cancelled, an BUD isn't going to be
123*4882a593Smuzhiyun * constructed and thus we free the BUI here directly.
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun STATIC void
xfs_bui_item_release(struct xfs_log_item * lip)126*4882a593Smuzhiyun xfs_bui_item_release(
127*4882a593Smuzhiyun struct xfs_log_item *lip)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun xfs_bui_release(BUI_ITEM(lip));
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Allocate and initialize an bui item with the given number of extents.
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun STATIC struct xfs_bui_log_item *
xfs_bui_init(struct xfs_mount * mp)136*4882a593Smuzhiyun xfs_bui_init(
137*4882a593Smuzhiyun struct xfs_mount *mp)
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun struct xfs_bui_log_item *buip;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun buip = kmem_cache_zalloc(xfs_bui_zone, GFP_KERNEL | __GFP_NOFAIL);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops);
145*4882a593Smuzhiyun buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS;
146*4882a593Smuzhiyun buip->bui_format.bui_id = (uintptr_t)(void *)buip;
147*4882a593Smuzhiyun atomic_set(&buip->bui_next_extent, 0);
148*4882a593Smuzhiyun atomic_set(&buip->bui_refcount, 2);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return buip;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
BUD_ITEM(struct xfs_log_item * lip)153*4882a593Smuzhiyun static inline struct xfs_bud_log_item *BUD_ITEM(struct xfs_log_item *lip)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun return container_of(lip, struct xfs_bud_log_item, bud_item);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun STATIC void
xfs_bud_item_size(struct xfs_log_item * lip,int * nvecs,int * nbytes)159*4882a593Smuzhiyun xfs_bud_item_size(
160*4882a593Smuzhiyun struct xfs_log_item *lip,
161*4882a593Smuzhiyun int *nvecs,
162*4882a593Smuzhiyun int *nbytes)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun *nvecs += 1;
165*4882a593Smuzhiyun *nbytes += sizeof(struct xfs_bud_log_format);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * This is called to fill in the vector of log iovecs for the
170*4882a593Smuzhiyun * given bud log item. We use only 1 iovec, and we point that
171*4882a593Smuzhiyun * at the bud_log_format structure embedded in the bud item.
172*4882a593Smuzhiyun * It is at this point that we assert that all of the extent
173*4882a593Smuzhiyun * slots in the bud item have been filled.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun STATIC void
xfs_bud_item_format(struct xfs_log_item * lip,struct xfs_log_vec * lv)176*4882a593Smuzhiyun xfs_bud_item_format(
177*4882a593Smuzhiyun struct xfs_log_item *lip,
178*4882a593Smuzhiyun struct xfs_log_vec *lv)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct xfs_bud_log_item *budp = BUD_ITEM(lip);
181*4882a593Smuzhiyun struct xfs_log_iovec *vecp = NULL;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun budp->bud_format.bud_type = XFS_LI_BUD;
184*4882a593Smuzhiyun budp->bud_format.bud_size = 1;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_BUD_FORMAT, &budp->bud_format,
187*4882a593Smuzhiyun sizeof(struct xfs_bud_log_format));
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /*
191*4882a593Smuzhiyun * The BUD is either committed or aborted if the transaction is cancelled. If
192*4882a593Smuzhiyun * the transaction is cancelled, drop our reference to the BUI and free the
193*4882a593Smuzhiyun * BUD.
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun STATIC void
xfs_bud_item_release(struct xfs_log_item * lip)196*4882a593Smuzhiyun xfs_bud_item_release(
197*4882a593Smuzhiyun struct xfs_log_item *lip)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct xfs_bud_log_item *budp = BUD_ITEM(lip);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun xfs_bui_release(budp->bud_buip);
202*4882a593Smuzhiyun kmem_cache_free(xfs_bud_zone, budp);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun static const struct xfs_item_ops xfs_bud_item_ops = {
206*4882a593Smuzhiyun .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED,
207*4882a593Smuzhiyun .iop_size = xfs_bud_item_size,
208*4882a593Smuzhiyun .iop_format = xfs_bud_item_format,
209*4882a593Smuzhiyun .iop_release = xfs_bud_item_release,
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun static struct xfs_bud_log_item *
xfs_trans_get_bud(struct xfs_trans * tp,struct xfs_bui_log_item * buip)213*4882a593Smuzhiyun xfs_trans_get_bud(
214*4882a593Smuzhiyun struct xfs_trans *tp,
215*4882a593Smuzhiyun struct xfs_bui_log_item *buip)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct xfs_bud_log_item *budp;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun budp = kmem_cache_zalloc(xfs_bud_zone, GFP_KERNEL | __GFP_NOFAIL);
220*4882a593Smuzhiyun xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD,
221*4882a593Smuzhiyun &xfs_bud_item_ops);
222*4882a593Smuzhiyun budp->bud_buip = buip;
223*4882a593Smuzhiyun budp->bud_format.bud_bui_id = buip->bui_format.bui_id;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun xfs_trans_add_item(tp, &budp->bud_item);
226*4882a593Smuzhiyun return budp;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * Finish an bmap update and log it to the BUD. Note that the
231*4882a593Smuzhiyun * transaction is marked dirty regardless of whether the bmap update
232*4882a593Smuzhiyun * succeeds or fails to support the BUI/BUD lifecycle rules.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun static int
xfs_trans_log_finish_bmap_update(struct xfs_trans * tp,struct xfs_bud_log_item * budp,enum xfs_bmap_intent_type type,struct xfs_inode * ip,int whichfork,xfs_fileoff_t startoff,xfs_fsblock_t startblock,xfs_filblks_t * blockcount,xfs_exntst_t state)235*4882a593Smuzhiyun xfs_trans_log_finish_bmap_update(
236*4882a593Smuzhiyun struct xfs_trans *tp,
237*4882a593Smuzhiyun struct xfs_bud_log_item *budp,
238*4882a593Smuzhiyun enum xfs_bmap_intent_type type,
239*4882a593Smuzhiyun struct xfs_inode *ip,
240*4882a593Smuzhiyun int whichfork,
241*4882a593Smuzhiyun xfs_fileoff_t startoff,
242*4882a593Smuzhiyun xfs_fsblock_t startblock,
243*4882a593Smuzhiyun xfs_filblks_t *blockcount,
244*4882a593Smuzhiyun xfs_exntst_t state)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int error;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun error = xfs_bmap_finish_one(tp, ip, type, whichfork, startoff,
249*4882a593Smuzhiyun startblock, blockcount, state);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * Mark the transaction dirty, even on error. This ensures the
253*4882a593Smuzhiyun * transaction is aborted, which:
254*4882a593Smuzhiyun *
255*4882a593Smuzhiyun * 1.) releases the BUI and frees the BUD
256*4882a593Smuzhiyun * 2.) shuts down the filesystem
257*4882a593Smuzhiyun */
258*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
259*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &budp->bud_item.li_flags);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return error;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Sort bmap intents by inode. */
265*4882a593Smuzhiyun static int
xfs_bmap_update_diff_items(void * priv,struct list_head * a,struct list_head * b)266*4882a593Smuzhiyun xfs_bmap_update_diff_items(
267*4882a593Smuzhiyun void *priv,
268*4882a593Smuzhiyun struct list_head *a,
269*4882a593Smuzhiyun struct list_head *b)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun struct xfs_bmap_intent *ba;
272*4882a593Smuzhiyun struct xfs_bmap_intent *bb;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun ba = container_of(a, struct xfs_bmap_intent, bi_list);
275*4882a593Smuzhiyun bb = container_of(b, struct xfs_bmap_intent, bi_list);
276*4882a593Smuzhiyun return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Set the map extent flags for this mapping. */
280*4882a593Smuzhiyun static void
xfs_trans_set_bmap_flags(struct xfs_map_extent * bmap,enum xfs_bmap_intent_type type,int whichfork,xfs_exntst_t state)281*4882a593Smuzhiyun xfs_trans_set_bmap_flags(
282*4882a593Smuzhiyun struct xfs_map_extent *bmap,
283*4882a593Smuzhiyun enum xfs_bmap_intent_type type,
284*4882a593Smuzhiyun int whichfork,
285*4882a593Smuzhiyun xfs_exntst_t state)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun bmap->me_flags = 0;
288*4882a593Smuzhiyun switch (type) {
289*4882a593Smuzhiyun case XFS_BMAP_MAP:
290*4882a593Smuzhiyun case XFS_BMAP_UNMAP:
291*4882a593Smuzhiyun bmap->me_flags = type;
292*4882a593Smuzhiyun break;
293*4882a593Smuzhiyun default:
294*4882a593Smuzhiyun ASSERT(0);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun if (state == XFS_EXT_UNWRITTEN)
297*4882a593Smuzhiyun bmap->me_flags |= XFS_BMAP_EXTENT_UNWRITTEN;
298*4882a593Smuzhiyun if (whichfork == XFS_ATTR_FORK)
299*4882a593Smuzhiyun bmap->me_flags |= XFS_BMAP_EXTENT_ATTR_FORK;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Log bmap updates in the intent item. */
303*4882a593Smuzhiyun STATIC void
xfs_bmap_update_log_item(struct xfs_trans * tp,struct xfs_bui_log_item * buip,struct xfs_bmap_intent * bmap)304*4882a593Smuzhiyun xfs_bmap_update_log_item(
305*4882a593Smuzhiyun struct xfs_trans *tp,
306*4882a593Smuzhiyun struct xfs_bui_log_item *buip,
307*4882a593Smuzhiyun struct xfs_bmap_intent *bmap)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun uint next_extent;
310*4882a593Smuzhiyun struct xfs_map_extent *map;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
313*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * atomic_inc_return gives us the value after the increment;
317*4882a593Smuzhiyun * we want to use it as an array index so we need to subtract 1 from
318*4882a593Smuzhiyun * it.
319*4882a593Smuzhiyun */
320*4882a593Smuzhiyun next_extent = atomic_inc_return(&buip->bui_next_extent) - 1;
321*4882a593Smuzhiyun ASSERT(next_extent < buip->bui_format.bui_nextents);
322*4882a593Smuzhiyun map = &buip->bui_format.bui_extents[next_extent];
323*4882a593Smuzhiyun map->me_owner = bmap->bi_owner->i_ino;
324*4882a593Smuzhiyun map->me_startblock = bmap->bi_bmap.br_startblock;
325*4882a593Smuzhiyun map->me_startoff = bmap->bi_bmap.br_startoff;
326*4882a593Smuzhiyun map->me_len = bmap->bi_bmap.br_blockcount;
327*4882a593Smuzhiyun xfs_trans_set_bmap_flags(map, bmap->bi_type, bmap->bi_whichfork,
328*4882a593Smuzhiyun bmap->bi_bmap.br_state);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun static struct xfs_log_item *
xfs_bmap_update_create_intent(struct xfs_trans * tp,struct list_head * items,unsigned int count,bool sort)332*4882a593Smuzhiyun xfs_bmap_update_create_intent(
333*4882a593Smuzhiyun struct xfs_trans *tp,
334*4882a593Smuzhiyun struct list_head *items,
335*4882a593Smuzhiyun unsigned int count,
336*4882a593Smuzhiyun bool sort)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct xfs_mount *mp = tp->t_mountp;
339*4882a593Smuzhiyun struct xfs_bui_log_item *buip = xfs_bui_init(mp);
340*4882a593Smuzhiyun struct xfs_bmap_intent *bmap;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun xfs_trans_add_item(tp, &buip->bui_item);
345*4882a593Smuzhiyun if (sort)
346*4882a593Smuzhiyun list_sort(mp, items, xfs_bmap_update_diff_items);
347*4882a593Smuzhiyun list_for_each_entry(bmap, items, bi_list)
348*4882a593Smuzhiyun xfs_bmap_update_log_item(tp, buip, bmap);
349*4882a593Smuzhiyun return &buip->bui_item;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Get an BUD so we can process all the deferred rmap updates. */
353*4882a593Smuzhiyun static struct xfs_log_item *
xfs_bmap_update_create_done(struct xfs_trans * tp,struct xfs_log_item * intent,unsigned int count)354*4882a593Smuzhiyun xfs_bmap_update_create_done(
355*4882a593Smuzhiyun struct xfs_trans *tp,
356*4882a593Smuzhiyun struct xfs_log_item *intent,
357*4882a593Smuzhiyun unsigned int count)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun return &xfs_trans_get_bud(tp, BUI_ITEM(intent))->bud_item;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* Process a deferred rmap update. */
363*4882a593Smuzhiyun STATIC int
xfs_bmap_update_finish_item(struct xfs_trans * tp,struct xfs_log_item * done,struct list_head * item,struct xfs_btree_cur ** state)364*4882a593Smuzhiyun xfs_bmap_update_finish_item(
365*4882a593Smuzhiyun struct xfs_trans *tp,
366*4882a593Smuzhiyun struct xfs_log_item *done,
367*4882a593Smuzhiyun struct list_head *item,
368*4882a593Smuzhiyun struct xfs_btree_cur **state)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun struct xfs_bmap_intent *bmap;
371*4882a593Smuzhiyun xfs_filblks_t count;
372*4882a593Smuzhiyun int error;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun bmap = container_of(item, struct xfs_bmap_intent, bi_list);
375*4882a593Smuzhiyun count = bmap->bi_bmap.br_blockcount;
376*4882a593Smuzhiyun error = xfs_trans_log_finish_bmap_update(tp, BUD_ITEM(done),
377*4882a593Smuzhiyun bmap->bi_type,
378*4882a593Smuzhiyun bmap->bi_owner, bmap->bi_whichfork,
379*4882a593Smuzhiyun bmap->bi_bmap.br_startoff,
380*4882a593Smuzhiyun bmap->bi_bmap.br_startblock,
381*4882a593Smuzhiyun &count,
382*4882a593Smuzhiyun bmap->bi_bmap.br_state);
383*4882a593Smuzhiyun if (!error && count > 0) {
384*4882a593Smuzhiyun ASSERT(bmap->bi_type == XFS_BMAP_UNMAP);
385*4882a593Smuzhiyun bmap->bi_bmap.br_blockcount = count;
386*4882a593Smuzhiyun return -EAGAIN;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun kmem_free(bmap);
389*4882a593Smuzhiyun return error;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* Abort all pending BUIs. */
393*4882a593Smuzhiyun STATIC void
xfs_bmap_update_abort_intent(struct xfs_log_item * intent)394*4882a593Smuzhiyun xfs_bmap_update_abort_intent(
395*4882a593Smuzhiyun struct xfs_log_item *intent)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun xfs_bui_release(BUI_ITEM(intent));
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Cancel a deferred rmap update. */
401*4882a593Smuzhiyun STATIC void
xfs_bmap_update_cancel_item(struct list_head * item)402*4882a593Smuzhiyun xfs_bmap_update_cancel_item(
403*4882a593Smuzhiyun struct list_head *item)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun struct xfs_bmap_intent *bmap;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun bmap = container_of(item, struct xfs_bmap_intent, bi_list);
408*4882a593Smuzhiyun kmem_free(bmap);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun const struct xfs_defer_op_type xfs_bmap_update_defer_type = {
412*4882a593Smuzhiyun .max_items = XFS_BUI_MAX_FAST_EXTENTS,
413*4882a593Smuzhiyun .create_intent = xfs_bmap_update_create_intent,
414*4882a593Smuzhiyun .abort_intent = xfs_bmap_update_abort_intent,
415*4882a593Smuzhiyun .create_done = xfs_bmap_update_create_done,
416*4882a593Smuzhiyun .finish_item = xfs_bmap_update_finish_item,
417*4882a593Smuzhiyun .cancel_item = xfs_bmap_update_cancel_item,
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun * Process a bmap update intent item that was recovered from the log.
422*4882a593Smuzhiyun * We need to update some inode's bmbt.
423*4882a593Smuzhiyun */
424*4882a593Smuzhiyun STATIC int
xfs_bui_item_recover(struct xfs_log_item * lip,struct list_head * capture_list)425*4882a593Smuzhiyun xfs_bui_item_recover(
426*4882a593Smuzhiyun struct xfs_log_item *lip,
427*4882a593Smuzhiyun struct list_head *capture_list)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct xfs_bmbt_irec irec;
430*4882a593Smuzhiyun struct xfs_bui_log_item *buip = BUI_ITEM(lip);
431*4882a593Smuzhiyun struct xfs_trans *tp;
432*4882a593Smuzhiyun struct xfs_inode *ip = NULL;
433*4882a593Smuzhiyun struct xfs_mount *mp = lip->li_mountp;
434*4882a593Smuzhiyun struct xfs_map_extent *bmap;
435*4882a593Smuzhiyun struct xfs_bud_log_item *budp;
436*4882a593Smuzhiyun xfs_fsblock_t startblock_fsb;
437*4882a593Smuzhiyun xfs_fsblock_t inode_fsb;
438*4882a593Smuzhiyun xfs_filblks_t count;
439*4882a593Smuzhiyun xfs_exntst_t state;
440*4882a593Smuzhiyun unsigned int bui_type;
441*4882a593Smuzhiyun int whichfork;
442*4882a593Smuzhiyun int error = 0;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* Only one mapping operation per BUI... */
445*4882a593Smuzhiyun if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS)
446*4882a593Smuzhiyun return -EFSCORRUPTED;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * First check the validity of the extent described by the
450*4882a593Smuzhiyun * BUI. If anything is bad, then toss the BUI.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun bmap = &buip->bui_format.bui_extents[0];
453*4882a593Smuzhiyun startblock_fsb = XFS_BB_TO_FSB(mp,
454*4882a593Smuzhiyun XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
455*4882a593Smuzhiyun inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
456*4882a593Smuzhiyun XFS_INO_TO_FSB(mp, bmap->me_owner)));
457*4882a593Smuzhiyun state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
458*4882a593Smuzhiyun XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
459*4882a593Smuzhiyun whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
460*4882a593Smuzhiyun XFS_ATTR_FORK : XFS_DATA_FORK;
461*4882a593Smuzhiyun bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
462*4882a593Smuzhiyun switch (bui_type) {
463*4882a593Smuzhiyun case XFS_BMAP_MAP:
464*4882a593Smuzhiyun case XFS_BMAP_UNMAP:
465*4882a593Smuzhiyun break;
466*4882a593Smuzhiyun default:
467*4882a593Smuzhiyun return -EFSCORRUPTED;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun if (startblock_fsb == 0 ||
470*4882a593Smuzhiyun bmap->me_len == 0 ||
471*4882a593Smuzhiyun inode_fsb == 0 ||
472*4882a593Smuzhiyun startblock_fsb >= mp->m_sb.sb_dblocks ||
473*4882a593Smuzhiyun bmap->me_len >= mp->m_sb.sb_agblocks ||
474*4882a593Smuzhiyun inode_fsb >= mp->m_sb.sb_dblocks ||
475*4882a593Smuzhiyun (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS))
476*4882a593Smuzhiyun return -EFSCORRUPTED;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Grab the inode. */
479*4882a593Smuzhiyun error = xfs_iget(mp, NULL, bmap->me_owner, 0, 0, &ip);
480*4882a593Smuzhiyun if (error)
481*4882a593Smuzhiyun return error;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun error = xfs_qm_dqattach(ip);
484*4882a593Smuzhiyun if (error)
485*4882a593Smuzhiyun goto err_rele;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (VFS_I(ip)->i_nlink == 0)
488*4882a593Smuzhiyun xfs_iflags_set(ip, XFS_IRECOVERY);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* Allocate transaction and do the work. */
491*4882a593Smuzhiyun error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
492*4882a593Smuzhiyun XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
493*4882a593Smuzhiyun if (error)
494*4882a593Smuzhiyun goto err_rele;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun budp = xfs_trans_get_bud(tp, buip);
497*4882a593Smuzhiyun xfs_ilock(ip, XFS_ILOCK_EXCL);
498*4882a593Smuzhiyun xfs_trans_ijoin(tp, ip, 0);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun count = bmap->me_len;
501*4882a593Smuzhiyun error = xfs_trans_log_finish_bmap_update(tp, budp, bui_type, ip,
502*4882a593Smuzhiyun whichfork, bmap->me_startoff, bmap->me_startblock,
503*4882a593Smuzhiyun &count, state);
504*4882a593Smuzhiyun if (error)
505*4882a593Smuzhiyun goto err_cancel;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (count > 0) {
508*4882a593Smuzhiyun ASSERT(bui_type == XFS_BMAP_UNMAP);
509*4882a593Smuzhiyun irec.br_startblock = bmap->me_startblock;
510*4882a593Smuzhiyun irec.br_blockcount = count;
511*4882a593Smuzhiyun irec.br_startoff = bmap->me_startoff;
512*4882a593Smuzhiyun irec.br_state = state;
513*4882a593Smuzhiyun xfs_bmap_unmap_extent(tp, ip, &irec);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /*
517*4882a593Smuzhiyun * Commit transaction, which frees the transaction and saves the inode
518*4882a593Smuzhiyun * for later replay activities.
519*4882a593Smuzhiyun */
520*4882a593Smuzhiyun error = xfs_defer_ops_capture_and_commit(tp, ip, capture_list);
521*4882a593Smuzhiyun if (error)
522*4882a593Smuzhiyun goto err_unlock;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun xfs_iunlock(ip, XFS_ILOCK_EXCL);
525*4882a593Smuzhiyun xfs_irele(ip);
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun err_cancel:
529*4882a593Smuzhiyun xfs_trans_cancel(tp);
530*4882a593Smuzhiyun err_unlock:
531*4882a593Smuzhiyun xfs_iunlock(ip, XFS_ILOCK_EXCL);
532*4882a593Smuzhiyun err_rele:
533*4882a593Smuzhiyun xfs_irele(ip);
534*4882a593Smuzhiyun return error;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun STATIC bool
xfs_bui_item_match(struct xfs_log_item * lip,uint64_t intent_id)538*4882a593Smuzhiyun xfs_bui_item_match(
539*4882a593Smuzhiyun struct xfs_log_item *lip,
540*4882a593Smuzhiyun uint64_t intent_id)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun return BUI_ITEM(lip)->bui_format.bui_id == intent_id;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* Relog an intent item to push the log tail forward. */
546*4882a593Smuzhiyun static struct xfs_log_item *
xfs_bui_item_relog(struct xfs_log_item * intent,struct xfs_trans * tp)547*4882a593Smuzhiyun xfs_bui_item_relog(
548*4882a593Smuzhiyun struct xfs_log_item *intent,
549*4882a593Smuzhiyun struct xfs_trans *tp)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct xfs_bud_log_item *budp;
552*4882a593Smuzhiyun struct xfs_bui_log_item *buip;
553*4882a593Smuzhiyun struct xfs_map_extent *extp;
554*4882a593Smuzhiyun unsigned int count;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun count = BUI_ITEM(intent)->bui_format.bui_nextents;
557*4882a593Smuzhiyun extp = BUI_ITEM(intent)->bui_format.bui_extents;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun tp->t_flags |= XFS_TRANS_DIRTY;
560*4882a593Smuzhiyun budp = xfs_trans_get_bud(tp, BUI_ITEM(intent));
561*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &budp->bud_item.li_flags);
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun buip = xfs_bui_init(tp->t_mountp);
564*4882a593Smuzhiyun memcpy(buip->bui_format.bui_extents, extp, count * sizeof(*extp));
565*4882a593Smuzhiyun atomic_set(&buip->bui_next_extent, count);
566*4882a593Smuzhiyun xfs_trans_add_item(tp, &buip->bui_item);
567*4882a593Smuzhiyun set_bit(XFS_LI_DIRTY, &buip->bui_item.li_flags);
568*4882a593Smuzhiyun return &buip->bui_item;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun static const struct xfs_item_ops xfs_bui_item_ops = {
572*4882a593Smuzhiyun .iop_size = xfs_bui_item_size,
573*4882a593Smuzhiyun .iop_format = xfs_bui_item_format,
574*4882a593Smuzhiyun .iop_unpin = xfs_bui_item_unpin,
575*4882a593Smuzhiyun .iop_release = xfs_bui_item_release,
576*4882a593Smuzhiyun .iop_recover = xfs_bui_item_recover,
577*4882a593Smuzhiyun .iop_match = xfs_bui_item_match,
578*4882a593Smuzhiyun .iop_relog = xfs_bui_item_relog,
579*4882a593Smuzhiyun };
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /*
582*4882a593Smuzhiyun * Copy an BUI format buffer from the given buf, and into the destination
583*4882a593Smuzhiyun * BUI format structure. The BUI/BUD items were designed not to need any
584*4882a593Smuzhiyun * special alignment handling.
585*4882a593Smuzhiyun */
586*4882a593Smuzhiyun static int
xfs_bui_copy_format(struct xfs_log_iovec * buf,struct xfs_bui_log_format * dst_bui_fmt)587*4882a593Smuzhiyun xfs_bui_copy_format(
588*4882a593Smuzhiyun struct xfs_log_iovec *buf,
589*4882a593Smuzhiyun struct xfs_bui_log_format *dst_bui_fmt)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun struct xfs_bui_log_format *src_bui_fmt;
592*4882a593Smuzhiyun uint len;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun src_bui_fmt = buf->i_addr;
595*4882a593Smuzhiyun len = xfs_bui_log_format_sizeof(src_bui_fmt->bui_nextents);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (buf->i_len == len) {
598*4882a593Smuzhiyun memcpy(dst_bui_fmt, src_bui_fmt, len);
599*4882a593Smuzhiyun return 0;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, NULL);
602*4882a593Smuzhiyun return -EFSCORRUPTED;
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /*
606*4882a593Smuzhiyun * This routine is called to create an in-core extent bmap update
607*4882a593Smuzhiyun * item from the bui format structure which was logged on disk.
608*4882a593Smuzhiyun * It allocates an in-core bui, copies the extents from the format
609*4882a593Smuzhiyun * structure into it, and adds the bui to the AIL with the given
610*4882a593Smuzhiyun * LSN.
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun STATIC int
xlog_recover_bui_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)613*4882a593Smuzhiyun xlog_recover_bui_commit_pass2(
614*4882a593Smuzhiyun struct xlog *log,
615*4882a593Smuzhiyun struct list_head *buffer_list,
616*4882a593Smuzhiyun struct xlog_recover_item *item,
617*4882a593Smuzhiyun xfs_lsn_t lsn)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun int error;
620*4882a593Smuzhiyun struct xfs_mount *mp = log->l_mp;
621*4882a593Smuzhiyun struct xfs_bui_log_item *buip;
622*4882a593Smuzhiyun struct xfs_bui_log_format *bui_formatp;
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun bui_formatp = item->ri_buf[0].i_addr;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun if (bui_formatp->bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
627*4882a593Smuzhiyun XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
628*4882a593Smuzhiyun return -EFSCORRUPTED;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun buip = xfs_bui_init(mp);
631*4882a593Smuzhiyun error = xfs_bui_copy_format(&item->ri_buf[0], &buip->bui_format);
632*4882a593Smuzhiyun if (error) {
633*4882a593Smuzhiyun xfs_bui_item_free(buip);
634*4882a593Smuzhiyun return error;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun atomic_set(&buip->bui_next_extent, bui_formatp->bui_nextents);
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * Insert the intent into the AIL directly and drop one reference so
639*4882a593Smuzhiyun * that finishing or canceling the work will drop the other.
640*4882a593Smuzhiyun */
641*4882a593Smuzhiyun xfs_trans_ail_insert(log->l_ailp, &buip->bui_item, lsn);
642*4882a593Smuzhiyun xfs_bui_release(buip);
643*4882a593Smuzhiyun return 0;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_bui_item_ops = {
647*4882a593Smuzhiyun .item_type = XFS_LI_BUI,
648*4882a593Smuzhiyun .commit_pass2 = xlog_recover_bui_commit_pass2,
649*4882a593Smuzhiyun };
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /*
652*4882a593Smuzhiyun * This routine is called when an BUD format structure is found in a committed
653*4882a593Smuzhiyun * transaction in the log. Its purpose is to cancel the corresponding BUI if it
654*4882a593Smuzhiyun * was still in the log. To do this it searches the AIL for the BUI with an id
655*4882a593Smuzhiyun * equal to that in the BUD format structure. If we find it we drop the BUD
656*4882a593Smuzhiyun * reference, which removes the BUI from the AIL and frees it.
657*4882a593Smuzhiyun */
658*4882a593Smuzhiyun STATIC int
xlog_recover_bud_commit_pass2(struct xlog * log,struct list_head * buffer_list,struct xlog_recover_item * item,xfs_lsn_t lsn)659*4882a593Smuzhiyun xlog_recover_bud_commit_pass2(
660*4882a593Smuzhiyun struct xlog *log,
661*4882a593Smuzhiyun struct list_head *buffer_list,
662*4882a593Smuzhiyun struct xlog_recover_item *item,
663*4882a593Smuzhiyun xfs_lsn_t lsn)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun struct xfs_bud_log_format *bud_formatp;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun bud_formatp = item->ri_buf[0].i_addr;
668*4882a593Smuzhiyun if (item->ri_buf[0].i_len != sizeof(struct xfs_bud_log_format)) {
669*4882a593Smuzhiyun XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, log->l_mp);
670*4882a593Smuzhiyun return -EFSCORRUPTED;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun xlog_recover_release_intent(log, XFS_LI_BUI, bud_formatp->bud_bui_id);
674*4882a593Smuzhiyun return 0;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun const struct xlog_recover_item_ops xlog_bud_item_ops = {
678*4882a593Smuzhiyun .item_type = XFS_LI_BUD,
679*4882a593Smuzhiyun .commit_pass2 = xlog_recover_bud_commit_pass2,
680*4882a593Smuzhiyun };
681