1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4*4882a593Smuzhiyun * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/sched.h>
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun #include <linux/completion.h>
11*4882a593Smuzhiyun #include <linux/buffer_head.h>
12*4882a593Smuzhiyun #include <linux/mempool.h>
13*4882a593Smuzhiyun #include <linux/gfs2_ondisk.h>
14*4882a593Smuzhiyun #include <linux/bio.h>
15*4882a593Smuzhiyun #include <linux/fs.h>
16*4882a593Smuzhiyun #include <linux/list_sort.h>
17*4882a593Smuzhiyun #include <linux/blkdev.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include "bmap.h"
20*4882a593Smuzhiyun #include "dir.h"
21*4882a593Smuzhiyun #include "gfs2.h"
22*4882a593Smuzhiyun #include "incore.h"
23*4882a593Smuzhiyun #include "inode.h"
24*4882a593Smuzhiyun #include "glock.h"
25*4882a593Smuzhiyun #include "glops.h"
26*4882a593Smuzhiyun #include "log.h"
27*4882a593Smuzhiyun #include "lops.h"
28*4882a593Smuzhiyun #include "meta_io.h"
29*4882a593Smuzhiyun #include "recovery.h"
30*4882a593Smuzhiyun #include "rgrp.h"
31*4882a593Smuzhiyun #include "trans.h"
32*4882a593Smuzhiyun #include "util.h"
33*4882a593Smuzhiyun #include "trace_gfs2.h"
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /**
36*4882a593Smuzhiyun * gfs2_pin - Pin a buffer in memory
37*4882a593Smuzhiyun * @sdp: The superblock
38*4882a593Smuzhiyun * @bh: The buffer to be pinned
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * The log lock must be held when calling this function
41*4882a593Smuzhiyun */
gfs2_pin(struct gfs2_sbd * sdp,struct buffer_head * bh)42*4882a593Smuzhiyun void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct gfs2_bufdata *bd;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun BUG_ON(!current->journal_info);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun clear_buffer_dirty(bh);
49*4882a593Smuzhiyun if (test_set_buffer_pinned(bh))
50*4882a593Smuzhiyun gfs2_assert_withdraw(sdp, 0);
51*4882a593Smuzhiyun if (!buffer_uptodate(bh))
52*4882a593Smuzhiyun gfs2_io_error_bh_wd(sdp, bh);
53*4882a593Smuzhiyun bd = bh->b_private;
54*4882a593Smuzhiyun /* If this buffer is in the AIL and it has already been written
55*4882a593Smuzhiyun * to in-place disk block, remove it from the AIL.
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun spin_lock(&sdp->sd_ail_lock);
58*4882a593Smuzhiyun if (bd->bd_tr)
59*4882a593Smuzhiyun list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60*4882a593Smuzhiyun spin_unlock(&sdp->sd_ail_lock);
61*4882a593Smuzhiyun get_bh(bh);
62*4882a593Smuzhiyun atomic_inc(&sdp->sd_log_pinned);
63*4882a593Smuzhiyun trace_gfs2_pin(bd, 1);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
buffer_is_rgrp(const struct gfs2_bufdata * bd)66*4882a593Smuzhiyun static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
maybe_release_space(struct gfs2_bufdata * bd)71*4882a593Smuzhiyun static void maybe_release_space(struct gfs2_bufdata *bd)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct gfs2_glock *gl = bd->bd_gl;
74*4882a593Smuzhiyun struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75*4882a593Smuzhiyun struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76*4882a593Smuzhiyun unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77*4882a593Smuzhiyun struct gfs2_bitmap *bi = rgd->rd_bits + index;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (bi->bi_clone == NULL)
80*4882a593Smuzhiyun return;
81*4882a593Smuzhiyun if (sdp->sd_args.ar_discard)
82*4882a593Smuzhiyun gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
83*4882a593Smuzhiyun memcpy(bi->bi_clone + bi->bi_offset,
84*4882a593Smuzhiyun bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
85*4882a593Smuzhiyun clear_bit(GBF_FULL, &bi->bi_flags);
86*4882a593Smuzhiyun rgd->rd_free_clone = rgd->rd_free;
87*4882a593Smuzhiyun rgd->rd_extfail_pt = rgd->rd_free;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun * gfs2_unpin - Unpin a buffer
92*4882a593Smuzhiyun * @sdp: the filesystem the buffer belongs to
93*4882a593Smuzhiyun * @bh: The buffer to unpin
94*4882a593Smuzhiyun * @ai:
95*4882a593Smuzhiyun * @flags: The inode dirty flags
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun */
98*4882a593Smuzhiyun
gfs2_unpin(struct gfs2_sbd * sdp,struct buffer_head * bh,struct gfs2_trans * tr)99*4882a593Smuzhiyun static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
100*4882a593Smuzhiyun struct gfs2_trans *tr)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct gfs2_bufdata *bd = bh->b_private;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun BUG_ON(!buffer_uptodate(bh));
105*4882a593Smuzhiyun BUG_ON(!buffer_pinned(bh));
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun lock_buffer(bh);
108*4882a593Smuzhiyun mark_buffer_dirty(bh);
109*4882a593Smuzhiyun clear_buffer_pinned(bh);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (buffer_is_rgrp(bd))
112*4882a593Smuzhiyun maybe_release_space(bd);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun spin_lock(&sdp->sd_ail_lock);
115*4882a593Smuzhiyun if (bd->bd_tr) {
116*4882a593Smuzhiyun list_del(&bd->bd_ail_st_list);
117*4882a593Smuzhiyun brelse(bh);
118*4882a593Smuzhiyun } else {
119*4882a593Smuzhiyun struct gfs2_glock *gl = bd->bd_gl;
120*4882a593Smuzhiyun list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
121*4882a593Smuzhiyun atomic_inc(&gl->gl_ail_count);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun bd->bd_tr = tr;
124*4882a593Smuzhiyun list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
125*4882a593Smuzhiyun spin_unlock(&sdp->sd_ail_lock);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
128*4882a593Smuzhiyun trace_gfs2_pin(bd, 0);
129*4882a593Smuzhiyun unlock_buffer(bh);
130*4882a593Smuzhiyun atomic_dec(&sdp->sd_log_pinned);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
gfs2_log_incr_head(struct gfs2_sbd * sdp)133*4882a593Smuzhiyun void gfs2_log_incr_head(struct gfs2_sbd *sdp)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
136*4882a593Smuzhiyun (sdp->sd_log_flush_head != sdp->sd_log_head));
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
139*4882a593Smuzhiyun sdp->sd_log_flush_head = 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
gfs2_log_bmap(struct gfs2_jdesc * jd,unsigned int lblock)142*4882a593Smuzhiyun u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct gfs2_journal_extent *je;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun list_for_each_entry(je, &jd->extent_list, list) {
147*4882a593Smuzhiyun if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
148*4882a593Smuzhiyun return je->dblock + lblock - je->lblock;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun return -1;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /**
155*4882a593Smuzhiyun * gfs2_end_log_write_bh - end log write of pagecache data with buffers
156*4882a593Smuzhiyun * @sdp: The superblock
157*4882a593Smuzhiyun * @bvec: The bio_vec
158*4882a593Smuzhiyun * @error: The i/o status
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * This finds the relevant buffers and unlocks them and sets the
161*4882a593Smuzhiyun * error flag according to the status of the i/o request. This is
162*4882a593Smuzhiyun * used when the log is writing data which has an in-place version
163*4882a593Smuzhiyun * that is pinned in the pagecache.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun
gfs2_end_log_write_bh(struct gfs2_sbd * sdp,struct bio_vec * bvec,blk_status_t error)166*4882a593Smuzhiyun static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
167*4882a593Smuzhiyun struct bio_vec *bvec,
168*4882a593Smuzhiyun blk_status_t error)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun struct buffer_head *bh, *next;
171*4882a593Smuzhiyun struct page *page = bvec->bv_page;
172*4882a593Smuzhiyun unsigned size;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun bh = page_buffers(page);
175*4882a593Smuzhiyun size = bvec->bv_len;
176*4882a593Smuzhiyun while (bh_offset(bh) < bvec->bv_offset)
177*4882a593Smuzhiyun bh = bh->b_this_page;
178*4882a593Smuzhiyun do {
179*4882a593Smuzhiyun if (error)
180*4882a593Smuzhiyun mark_buffer_write_io_error(bh);
181*4882a593Smuzhiyun unlock_buffer(bh);
182*4882a593Smuzhiyun next = bh->b_this_page;
183*4882a593Smuzhiyun size -= bh->b_size;
184*4882a593Smuzhiyun brelse(bh);
185*4882a593Smuzhiyun bh = next;
186*4882a593Smuzhiyun } while(bh && size);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * gfs2_end_log_write - end of i/o to the log
191*4882a593Smuzhiyun * @bio: The bio
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * Each bio_vec contains either data from the pagecache or data
194*4882a593Smuzhiyun * relating to the log itself. Here we iterate over the bio_vec
195*4882a593Smuzhiyun * array, processing both kinds of data.
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun
gfs2_end_log_write(struct bio * bio)199*4882a593Smuzhiyun static void gfs2_end_log_write(struct bio *bio)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct gfs2_sbd *sdp = bio->bi_private;
202*4882a593Smuzhiyun struct bio_vec *bvec;
203*4882a593Smuzhiyun struct page *page;
204*4882a593Smuzhiyun struct bvec_iter_all iter_all;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (bio->bi_status) {
207*4882a593Smuzhiyun if (!cmpxchg(&sdp->sd_log_error, 0, (int)bio->bi_status))
208*4882a593Smuzhiyun fs_err(sdp, "Error %d writing to journal, jid=%u\n",
209*4882a593Smuzhiyun bio->bi_status, sdp->sd_jdesc->jd_jid);
210*4882a593Smuzhiyun gfs2_withdraw_delayed(sdp);
211*4882a593Smuzhiyun /* prevent more writes to the journal */
212*4882a593Smuzhiyun clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
213*4882a593Smuzhiyun wake_up(&sdp->sd_logd_waitq);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun bio_for_each_segment_all(bvec, bio, iter_all) {
217*4882a593Smuzhiyun page = bvec->bv_page;
218*4882a593Smuzhiyun if (page_has_buffers(page))
219*4882a593Smuzhiyun gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
220*4882a593Smuzhiyun else
221*4882a593Smuzhiyun mempool_free(page, gfs2_page_pool);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun bio_put(bio);
225*4882a593Smuzhiyun if (atomic_dec_and_test(&sdp->sd_log_in_flight))
226*4882a593Smuzhiyun wake_up(&sdp->sd_log_flush_wait);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * gfs2_log_submit_bio - Submit any pending log bio
231*4882a593Smuzhiyun * @biop: Address of the bio pointer
232*4882a593Smuzhiyun * @opf: REQ_OP | op_flags
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * Submit any pending part-built or full bio to the block device. If
235*4882a593Smuzhiyun * there is no pending bio, then this is a no-op.
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun
gfs2_log_submit_bio(struct bio ** biop,int opf)238*4882a593Smuzhiyun void gfs2_log_submit_bio(struct bio **biop, int opf)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun struct bio *bio = *biop;
241*4882a593Smuzhiyun if (bio) {
242*4882a593Smuzhiyun struct gfs2_sbd *sdp = bio->bi_private;
243*4882a593Smuzhiyun atomic_inc(&sdp->sd_log_in_flight);
244*4882a593Smuzhiyun bio->bi_opf = opf;
245*4882a593Smuzhiyun submit_bio(bio);
246*4882a593Smuzhiyun *biop = NULL;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /**
251*4882a593Smuzhiyun * gfs2_log_alloc_bio - Allocate a bio
252*4882a593Smuzhiyun * @sdp: The super block
253*4882a593Smuzhiyun * @blkno: The device block number we want to write to
254*4882a593Smuzhiyun * @end_io: The bi_end_io callback
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun * Allocate a new bio, initialize it with the given parameters and return it.
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * Returns: The newly allocated bio
259*4882a593Smuzhiyun */
260*4882a593Smuzhiyun
gfs2_log_alloc_bio(struct gfs2_sbd * sdp,u64 blkno,bio_end_io_t * end_io)261*4882a593Smuzhiyun static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
262*4882a593Smuzhiyun bio_end_io_t *end_io)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun struct super_block *sb = sdp->sd_vfs;
265*4882a593Smuzhiyun struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
268*4882a593Smuzhiyun bio_set_dev(bio, sb->s_bdev);
269*4882a593Smuzhiyun bio->bi_end_io = end_io;
270*4882a593Smuzhiyun bio->bi_private = sdp;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return bio;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /**
276*4882a593Smuzhiyun * gfs2_log_get_bio - Get cached log bio, or allocate a new one
277*4882a593Smuzhiyun * @sdp: The super block
278*4882a593Smuzhiyun * @blkno: The device block number we want to write to
279*4882a593Smuzhiyun * @bio: The bio to get or allocate
280*4882a593Smuzhiyun * @op: REQ_OP
281*4882a593Smuzhiyun * @end_io: The bi_end_io callback
282*4882a593Smuzhiyun * @flush: Always flush the current bio and allocate a new one?
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * If there is a cached bio, then if the next block number is sequential
285*4882a593Smuzhiyun * with the previous one, return it, otherwise flush the bio to the
286*4882a593Smuzhiyun * device. If there is no cached bio, or we just flushed it, then
287*4882a593Smuzhiyun * allocate a new one.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun * Returns: The bio to use for log writes
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun
gfs2_log_get_bio(struct gfs2_sbd * sdp,u64 blkno,struct bio ** biop,int op,bio_end_io_t * end_io,bool flush)292*4882a593Smuzhiyun static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
293*4882a593Smuzhiyun struct bio **biop, int op,
294*4882a593Smuzhiyun bio_end_io_t *end_io, bool flush)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct bio *bio = *biop;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (bio) {
299*4882a593Smuzhiyun u64 nblk;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun nblk = bio_end_sector(bio);
302*4882a593Smuzhiyun nblk >>= sdp->sd_fsb2bb_shift;
303*4882a593Smuzhiyun if (blkno == nblk && !flush)
304*4882a593Smuzhiyun return bio;
305*4882a593Smuzhiyun gfs2_log_submit_bio(biop, op);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
309*4882a593Smuzhiyun return *biop;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun * gfs2_log_write - write to log
314*4882a593Smuzhiyun * @sdp: the filesystem
315*4882a593Smuzhiyun * @page: the page to write
316*4882a593Smuzhiyun * @size: the size of the data to write
317*4882a593Smuzhiyun * @offset: the offset within the page
318*4882a593Smuzhiyun * @blkno: block number of the log entry
319*4882a593Smuzhiyun *
320*4882a593Smuzhiyun * Try and add the page segment to the current bio. If that fails,
321*4882a593Smuzhiyun * submit the current bio to the device and create a new one, and
322*4882a593Smuzhiyun * then add the page segment to that.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun
gfs2_log_write(struct gfs2_sbd * sdp,struct page * page,unsigned size,unsigned offset,u64 blkno)325*4882a593Smuzhiyun void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
326*4882a593Smuzhiyun unsigned size, unsigned offset, u64 blkno)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun struct bio *bio;
329*4882a593Smuzhiyun int ret;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
332*4882a593Smuzhiyun gfs2_end_log_write, false);
333*4882a593Smuzhiyun ret = bio_add_page(bio, page, size, offset);
334*4882a593Smuzhiyun if (ret == 0) {
335*4882a593Smuzhiyun bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
336*4882a593Smuzhiyun REQ_OP_WRITE, gfs2_end_log_write, true);
337*4882a593Smuzhiyun ret = bio_add_page(bio, page, size, offset);
338*4882a593Smuzhiyun WARN_ON(ret == 0);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /**
343*4882a593Smuzhiyun * gfs2_log_write_bh - write a buffer's content to the log
344*4882a593Smuzhiyun * @sdp: The super block
345*4882a593Smuzhiyun * @bh: The buffer pointing to the in-place location
346*4882a593Smuzhiyun *
347*4882a593Smuzhiyun * This writes the content of the buffer to the next available location
348*4882a593Smuzhiyun * in the log. The buffer will be unlocked once the i/o to the log has
349*4882a593Smuzhiyun * completed.
350*4882a593Smuzhiyun */
351*4882a593Smuzhiyun
gfs2_log_write_bh(struct gfs2_sbd * sdp,struct buffer_head * bh)352*4882a593Smuzhiyun static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun u64 dblock;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
357*4882a593Smuzhiyun gfs2_log_incr_head(sdp);
358*4882a593Smuzhiyun gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh), dblock);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /**
362*4882a593Smuzhiyun * gfs2_log_write_page - write one block stored in a page, into the log
363*4882a593Smuzhiyun * @sdp: The superblock
364*4882a593Smuzhiyun * @page: The struct page
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * This writes the first block-sized part of the page into the log. Note
367*4882a593Smuzhiyun * that the page must have been allocated from the gfs2_page_pool mempool
368*4882a593Smuzhiyun * and that after this has been called, ownership has been transferred and
369*4882a593Smuzhiyun * the page may be freed at any time.
370*4882a593Smuzhiyun */
371*4882a593Smuzhiyun
gfs2_log_write_page(struct gfs2_sbd * sdp,struct page * page)372*4882a593Smuzhiyun void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun struct super_block *sb = sdp->sd_vfs;
375*4882a593Smuzhiyun u64 dblock;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
378*4882a593Smuzhiyun gfs2_log_incr_head(sdp);
379*4882a593Smuzhiyun gfs2_log_write(sdp, page, sb->s_blocksize, 0, dblock);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /**
383*4882a593Smuzhiyun * gfs2_end_log_read - end I/O callback for reads from the log
384*4882a593Smuzhiyun * @bio: The bio
385*4882a593Smuzhiyun *
386*4882a593Smuzhiyun * Simply unlock the pages in the bio. The main thread will wait on them and
387*4882a593Smuzhiyun * process them in order as necessary.
388*4882a593Smuzhiyun */
389*4882a593Smuzhiyun
gfs2_end_log_read(struct bio * bio)390*4882a593Smuzhiyun static void gfs2_end_log_read(struct bio *bio)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct page *page;
393*4882a593Smuzhiyun struct bio_vec *bvec;
394*4882a593Smuzhiyun struct bvec_iter_all iter_all;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun bio_for_each_segment_all(bvec, bio, iter_all) {
397*4882a593Smuzhiyun page = bvec->bv_page;
398*4882a593Smuzhiyun if (bio->bi_status) {
399*4882a593Smuzhiyun int err = blk_status_to_errno(bio->bi_status);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun SetPageError(page);
402*4882a593Smuzhiyun mapping_set_error(page->mapping, err);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun unlock_page(page);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun bio_put(bio);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /**
411*4882a593Smuzhiyun * gfs2_jhead_pg_srch - Look for the journal head in a given page.
412*4882a593Smuzhiyun * @jd: The journal descriptor
413*4882a593Smuzhiyun * @page: The page to look in
414*4882a593Smuzhiyun *
415*4882a593Smuzhiyun * Returns: 1 if found, 0 otherwise.
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun
gfs2_jhead_pg_srch(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,struct page * page)418*4882a593Smuzhiyun static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
419*4882a593Smuzhiyun struct gfs2_log_header_host *head,
420*4882a593Smuzhiyun struct page *page)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
423*4882a593Smuzhiyun struct gfs2_log_header_host lh;
424*4882a593Smuzhiyun void *kaddr = kmap_atomic(page);
425*4882a593Smuzhiyun unsigned int offset;
426*4882a593Smuzhiyun bool ret = false;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
429*4882a593Smuzhiyun if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
430*4882a593Smuzhiyun if (lh.lh_sequence >= head->lh_sequence)
431*4882a593Smuzhiyun *head = lh;
432*4882a593Smuzhiyun else {
433*4882a593Smuzhiyun ret = true;
434*4882a593Smuzhiyun break;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun kunmap_atomic(kaddr);
439*4882a593Smuzhiyun return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /**
443*4882a593Smuzhiyun * gfs2_jhead_process_page - Search/cleanup a page
444*4882a593Smuzhiyun * @jd: The journal descriptor
445*4882a593Smuzhiyun * @index: Index of the page to look into
446*4882a593Smuzhiyun * @done: If set, perform only cleanup, else search and set if found.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * Find the page with 'index' in the journal's mapping. Search the page for
449*4882a593Smuzhiyun * the journal head if requested (cleanup == false). Release refs on the
450*4882a593Smuzhiyun * page so the page cache can reclaim it (put_page() twice). We grabbed a
451*4882a593Smuzhiyun * reference on this page two times, first when we did a find_or_create_page()
452*4882a593Smuzhiyun * to obtain the page to add it to the bio and second when we do a
453*4882a593Smuzhiyun * find_get_page() here to get the page to wait on while I/O on it is being
454*4882a593Smuzhiyun * completed.
455*4882a593Smuzhiyun * This function is also used to free up a page we might've grabbed but not
456*4882a593Smuzhiyun * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
457*4882a593Smuzhiyun * submitted the I/O, but we already found the jhead so we only need to drop
458*4882a593Smuzhiyun * our references to the page.
459*4882a593Smuzhiyun */
460*4882a593Smuzhiyun
gfs2_jhead_process_page(struct gfs2_jdesc * jd,unsigned long index,struct gfs2_log_header_host * head,bool * done)461*4882a593Smuzhiyun static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
462*4882a593Smuzhiyun struct gfs2_log_header_host *head,
463*4882a593Smuzhiyun bool *done)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct page *page;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun page = find_get_page(jd->jd_inode->i_mapping, index);
468*4882a593Smuzhiyun wait_on_page_locked(page);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (PageError(page))
471*4882a593Smuzhiyun *done = true;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun if (!*done)
474*4882a593Smuzhiyun *done = gfs2_jhead_pg_srch(jd, head, page);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun put_page(page); /* Once for find_get_page */
477*4882a593Smuzhiyun put_page(page); /* Once more for find_or_create_page */
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
gfs2_chain_bio(struct bio * prev,unsigned int nr_iovecs)480*4882a593Smuzhiyun static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct bio *new;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun new = bio_alloc(GFP_NOIO, nr_iovecs);
485*4882a593Smuzhiyun bio_copy_dev(new, prev);
486*4882a593Smuzhiyun new->bi_iter.bi_sector = bio_end_sector(prev);
487*4882a593Smuzhiyun new->bi_opf = prev->bi_opf;
488*4882a593Smuzhiyun new->bi_write_hint = prev->bi_write_hint;
489*4882a593Smuzhiyun bio_chain(new, prev);
490*4882a593Smuzhiyun submit_bio(prev);
491*4882a593Smuzhiyun return new;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /**
495*4882a593Smuzhiyun * gfs2_find_jhead - find the head of a log
496*4882a593Smuzhiyun * @jd: The journal descriptor
497*4882a593Smuzhiyun * @head: The log descriptor for the head of the log is returned here
498*4882a593Smuzhiyun *
499*4882a593Smuzhiyun * Do a search of a journal by reading it in large chunks using bios and find
500*4882a593Smuzhiyun * the valid log entry with the highest sequence number. (i.e. the log head)
501*4882a593Smuzhiyun *
502*4882a593Smuzhiyun * Returns: 0 on success, errno otherwise
503*4882a593Smuzhiyun */
gfs2_find_jhead(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,bool keep_cache)504*4882a593Smuzhiyun int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
505*4882a593Smuzhiyun bool keep_cache)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
508*4882a593Smuzhiyun struct address_space *mapping = jd->jd_inode->i_mapping;
509*4882a593Smuzhiyun unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
510*4882a593Smuzhiyun unsigned int bsize = sdp->sd_sb.sb_bsize, off;
511*4882a593Smuzhiyun unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
512*4882a593Smuzhiyun unsigned int shift = PAGE_SHIFT - bsize_shift;
513*4882a593Smuzhiyun unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
514*4882a593Smuzhiyun struct gfs2_journal_extent *je;
515*4882a593Smuzhiyun int sz, ret = 0;
516*4882a593Smuzhiyun struct bio *bio = NULL;
517*4882a593Smuzhiyun struct page *page = NULL;
518*4882a593Smuzhiyun bool done = false;
519*4882a593Smuzhiyun errseq_t since;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun memset(head, 0, sizeof(*head));
522*4882a593Smuzhiyun if (list_empty(&jd->extent_list))
523*4882a593Smuzhiyun gfs2_map_journal_extents(sdp, jd);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun since = filemap_sample_wb_err(mapping);
526*4882a593Smuzhiyun list_for_each_entry(je, &jd->extent_list, list) {
527*4882a593Smuzhiyun u64 dblock = je->dblock;
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun for (; block < je->lblock + je->blocks; block++, dblock++) {
530*4882a593Smuzhiyun if (!page) {
531*4882a593Smuzhiyun page = find_or_create_page(mapping,
532*4882a593Smuzhiyun block >> shift, GFP_NOFS);
533*4882a593Smuzhiyun if (!page) {
534*4882a593Smuzhiyun ret = -ENOMEM;
535*4882a593Smuzhiyun done = true;
536*4882a593Smuzhiyun goto out;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun off = 0;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (bio && (off || block < blocks_submitted + max_blocks)) {
542*4882a593Smuzhiyun sector_t sector = dblock << sdp->sd_fsb2bb_shift;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (bio_end_sector(bio) == sector) {
545*4882a593Smuzhiyun sz = bio_add_page(bio, page, bsize, off);
546*4882a593Smuzhiyun if (sz == bsize)
547*4882a593Smuzhiyun goto block_added;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun if (off) {
550*4882a593Smuzhiyun unsigned int blocks =
551*4882a593Smuzhiyun (PAGE_SIZE - off) >> bsize_shift;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun bio = gfs2_chain_bio(bio, blocks);
554*4882a593Smuzhiyun goto add_block_to_new_bio;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (bio) {
559*4882a593Smuzhiyun blocks_submitted = block;
560*4882a593Smuzhiyun submit_bio(bio);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
564*4882a593Smuzhiyun bio->bi_opf = REQ_OP_READ;
565*4882a593Smuzhiyun add_block_to_new_bio:
566*4882a593Smuzhiyun sz = bio_add_page(bio, page, bsize, off);
567*4882a593Smuzhiyun BUG_ON(sz != bsize);
568*4882a593Smuzhiyun block_added:
569*4882a593Smuzhiyun off += bsize;
570*4882a593Smuzhiyun if (off == PAGE_SIZE)
571*4882a593Smuzhiyun page = NULL;
572*4882a593Smuzhiyun if (blocks_submitted <= blocks_read + max_blocks) {
573*4882a593Smuzhiyun /* Keep at least one bio in flight */
574*4882a593Smuzhiyun continue;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
578*4882a593Smuzhiyun blocks_read += PAGE_SIZE >> bsize_shift;
579*4882a593Smuzhiyun if (done)
580*4882a593Smuzhiyun goto out; /* found */
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun out:
585*4882a593Smuzhiyun if (bio)
586*4882a593Smuzhiyun submit_bio(bio);
587*4882a593Smuzhiyun while (blocks_read < block) {
588*4882a593Smuzhiyun gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
589*4882a593Smuzhiyun blocks_read += PAGE_SIZE >> bsize_shift;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun if (!ret)
593*4882a593Smuzhiyun ret = filemap_check_wb_err(mapping, since);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (!keep_cache)
596*4882a593Smuzhiyun truncate_inode_pages(mapping, 0);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun return ret;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
gfs2_get_log_desc(struct gfs2_sbd * sdp,u32 ld_type,u32 ld_length,u32 ld_data1)601*4882a593Smuzhiyun static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
602*4882a593Smuzhiyun u32 ld_length, u32 ld_data1)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
605*4882a593Smuzhiyun struct gfs2_log_descriptor *ld = page_address(page);
606*4882a593Smuzhiyun clear_page(ld);
607*4882a593Smuzhiyun ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
608*4882a593Smuzhiyun ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
609*4882a593Smuzhiyun ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
610*4882a593Smuzhiyun ld->ld_type = cpu_to_be32(ld_type);
611*4882a593Smuzhiyun ld->ld_length = cpu_to_be32(ld_length);
612*4882a593Smuzhiyun ld->ld_data1 = cpu_to_be32(ld_data1);
613*4882a593Smuzhiyun ld->ld_data2 = 0;
614*4882a593Smuzhiyun return page;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
gfs2_check_magic(struct buffer_head * bh)617*4882a593Smuzhiyun static void gfs2_check_magic(struct buffer_head *bh)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun void *kaddr;
620*4882a593Smuzhiyun __be32 *ptr;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun clear_buffer_escaped(bh);
623*4882a593Smuzhiyun kaddr = kmap_atomic(bh->b_page);
624*4882a593Smuzhiyun ptr = kaddr + bh_offset(bh);
625*4882a593Smuzhiyun if (*ptr == cpu_to_be32(GFS2_MAGIC))
626*4882a593Smuzhiyun set_buffer_escaped(bh);
627*4882a593Smuzhiyun kunmap_atomic(kaddr);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
blocknr_cmp(void * priv,struct list_head * a,struct list_head * b)630*4882a593Smuzhiyun static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun struct gfs2_bufdata *bda, *bdb;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun bda = list_entry(a, struct gfs2_bufdata, bd_list);
635*4882a593Smuzhiyun bdb = list_entry(b, struct gfs2_bufdata, bd_list);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
638*4882a593Smuzhiyun return -1;
639*4882a593Smuzhiyun if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
640*4882a593Smuzhiyun return 1;
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
gfs2_before_commit(struct gfs2_sbd * sdp,unsigned int limit,unsigned int total,struct list_head * blist,bool is_databuf)644*4882a593Smuzhiyun static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
645*4882a593Smuzhiyun unsigned int total, struct list_head *blist,
646*4882a593Smuzhiyun bool is_databuf)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun struct gfs2_log_descriptor *ld;
649*4882a593Smuzhiyun struct gfs2_bufdata *bd1 = NULL, *bd2;
650*4882a593Smuzhiyun struct page *page;
651*4882a593Smuzhiyun unsigned int num;
652*4882a593Smuzhiyun unsigned n;
653*4882a593Smuzhiyun __be64 *ptr;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun gfs2_log_lock(sdp);
656*4882a593Smuzhiyun list_sort(NULL, blist, blocknr_cmp);
657*4882a593Smuzhiyun bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
658*4882a593Smuzhiyun while(total) {
659*4882a593Smuzhiyun num = total;
660*4882a593Smuzhiyun if (total > limit)
661*4882a593Smuzhiyun num = limit;
662*4882a593Smuzhiyun gfs2_log_unlock(sdp);
663*4882a593Smuzhiyun page = gfs2_get_log_desc(sdp,
664*4882a593Smuzhiyun is_databuf ? GFS2_LOG_DESC_JDATA :
665*4882a593Smuzhiyun GFS2_LOG_DESC_METADATA, num + 1, num);
666*4882a593Smuzhiyun ld = page_address(page);
667*4882a593Smuzhiyun gfs2_log_lock(sdp);
668*4882a593Smuzhiyun ptr = (__be64 *)(ld + 1);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun n = 0;
671*4882a593Smuzhiyun list_for_each_entry_continue(bd1, blist, bd_list) {
672*4882a593Smuzhiyun *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
673*4882a593Smuzhiyun if (is_databuf) {
674*4882a593Smuzhiyun gfs2_check_magic(bd1->bd_bh);
675*4882a593Smuzhiyun *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun if (++n >= num)
678*4882a593Smuzhiyun break;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun gfs2_log_unlock(sdp);
682*4882a593Smuzhiyun gfs2_log_write_page(sdp, page);
683*4882a593Smuzhiyun gfs2_log_lock(sdp);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun n = 0;
686*4882a593Smuzhiyun list_for_each_entry_continue(bd2, blist, bd_list) {
687*4882a593Smuzhiyun get_bh(bd2->bd_bh);
688*4882a593Smuzhiyun gfs2_log_unlock(sdp);
689*4882a593Smuzhiyun lock_buffer(bd2->bd_bh);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (buffer_escaped(bd2->bd_bh)) {
692*4882a593Smuzhiyun void *kaddr;
693*4882a593Smuzhiyun page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
694*4882a593Smuzhiyun ptr = page_address(page);
695*4882a593Smuzhiyun kaddr = kmap_atomic(bd2->bd_bh->b_page);
696*4882a593Smuzhiyun memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
697*4882a593Smuzhiyun bd2->bd_bh->b_size);
698*4882a593Smuzhiyun kunmap_atomic(kaddr);
699*4882a593Smuzhiyun *(__be32 *)ptr = 0;
700*4882a593Smuzhiyun clear_buffer_escaped(bd2->bd_bh);
701*4882a593Smuzhiyun unlock_buffer(bd2->bd_bh);
702*4882a593Smuzhiyun brelse(bd2->bd_bh);
703*4882a593Smuzhiyun gfs2_log_write_page(sdp, page);
704*4882a593Smuzhiyun } else {
705*4882a593Smuzhiyun gfs2_log_write_bh(sdp, bd2->bd_bh);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun gfs2_log_lock(sdp);
708*4882a593Smuzhiyun if (++n >= num)
709*4882a593Smuzhiyun break;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun BUG_ON(total < num);
713*4882a593Smuzhiyun total -= num;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun gfs2_log_unlock(sdp);
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
buf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)718*4882a593Smuzhiyun static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
721*4882a593Smuzhiyun unsigned int nbuf;
722*4882a593Smuzhiyun if (tr == NULL)
723*4882a593Smuzhiyun return;
724*4882a593Smuzhiyun nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
725*4882a593Smuzhiyun gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
buf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)728*4882a593Smuzhiyun static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun struct list_head *head;
731*4882a593Smuzhiyun struct gfs2_bufdata *bd;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun if (tr == NULL)
734*4882a593Smuzhiyun return;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun head = &tr->tr_buf;
737*4882a593Smuzhiyun while (!list_empty(head)) {
738*4882a593Smuzhiyun bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
739*4882a593Smuzhiyun list_del_init(&bd->bd_list);
740*4882a593Smuzhiyun gfs2_unpin(sdp, bd->bd_bh, tr);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
buf_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)744*4882a593Smuzhiyun static void buf_lo_before_scan(struct gfs2_jdesc *jd,
745*4882a593Smuzhiyun struct gfs2_log_header_host *head, int pass)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun if (pass != 0)
748*4882a593Smuzhiyun return;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun jd->jd_found_blocks = 0;
751*4882a593Smuzhiyun jd->jd_replayed_blocks = 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
buf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)754*4882a593Smuzhiyun static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
755*4882a593Smuzhiyun struct gfs2_log_descriptor *ld, __be64 *ptr,
756*4882a593Smuzhiyun int pass)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
759*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
760*4882a593Smuzhiyun struct gfs2_glock *gl = ip->i_gl;
761*4882a593Smuzhiyun unsigned int blks = be32_to_cpu(ld->ld_data1);
762*4882a593Smuzhiyun struct buffer_head *bh_log, *bh_ip;
763*4882a593Smuzhiyun u64 blkno;
764*4882a593Smuzhiyun int error = 0;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
767*4882a593Smuzhiyun return 0;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun gfs2_replay_incr_blk(jd, &start);
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
772*4882a593Smuzhiyun blkno = be64_to_cpu(*ptr++);
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun jd->jd_found_blocks++;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (gfs2_revoke_check(jd, blkno, start))
777*4882a593Smuzhiyun continue;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun error = gfs2_replay_read_block(jd, start, &bh_log);
780*4882a593Smuzhiyun if (error)
781*4882a593Smuzhiyun return error;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun bh_ip = gfs2_meta_new(gl, blkno);
784*4882a593Smuzhiyun memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (gfs2_meta_check(sdp, bh_ip))
787*4882a593Smuzhiyun error = -EIO;
788*4882a593Smuzhiyun else {
789*4882a593Smuzhiyun struct gfs2_meta_header *mh =
790*4882a593Smuzhiyun (struct gfs2_meta_header *)bh_ip->b_data;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
793*4882a593Smuzhiyun struct gfs2_rgrpd *rgd;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun rgd = gfs2_blk2rgrpd(sdp, blkno, false);
796*4882a593Smuzhiyun if (rgd && rgd->rd_addr == blkno &&
797*4882a593Smuzhiyun rgd->rd_bits && rgd->rd_bits->bi_bh) {
798*4882a593Smuzhiyun fs_info(sdp, "Replaying 0x%llx but we "
799*4882a593Smuzhiyun "already have a bh!\n",
800*4882a593Smuzhiyun (unsigned long long)blkno);
801*4882a593Smuzhiyun fs_info(sdp, "busy:%d, pinned:%d\n",
802*4882a593Smuzhiyun buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
803*4882a593Smuzhiyun buffer_pinned(rgd->rd_bits->bi_bh));
804*4882a593Smuzhiyun gfs2_dump_glock(NULL, rgd->rd_gl, true);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun mark_buffer_dirty(bh_ip);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun brelse(bh_log);
810*4882a593Smuzhiyun brelse(bh_ip);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun if (error)
813*4882a593Smuzhiyun break;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun jd->jd_replayed_blocks++;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun return error;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
buf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)821*4882a593Smuzhiyun static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
824*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (error) {
827*4882a593Smuzhiyun gfs2_inode_metasync(ip->i_gl);
828*4882a593Smuzhiyun return;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun if (pass != 1)
831*4882a593Smuzhiyun return;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun gfs2_inode_metasync(ip->i_gl);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
836*4882a593Smuzhiyun jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
revoke_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)839*4882a593Smuzhiyun static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun struct gfs2_meta_header *mh;
842*4882a593Smuzhiyun unsigned int offset;
843*4882a593Smuzhiyun struct list_head *head = &sdp->sd_log_revokes;
844*4882a593Smuzhiyun struct gfs2_bufdata *bd;
845*4882a593Smuzhiyun struct page *page;
846*4882a593Smuzhiyun unsigned int length;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun gfs2_write_revokes(sdp);
849*4882a593Smuzhiyun if (!sdp->sd_log_num_revoke)
850*4882a593Smuzhiyun return;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
853*4882a593Smuzhiyun page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
854*4882a593Smuzhiyun offset = sizeof(struct gfs2_log_descriptor);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun list_for_each_entry(bd, head, bd_list) {
857*4882a593Smuzhiyun sdp->sd_log_num_revoke--;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun gfs2_log_write_page(sdp, page);
862*4882a593Smuzhiyun page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
863*4882a593Smuzhiyun mh = page_address(page);
864*4882a593Smuzhiyun clear_page(mh);
865*4882a593Smuzhiyun mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
866*4882a593Smuzhiyun mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
867*4882a593Smuzhiyun mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
868*4882a593Smuzhiyun offset = sizeof(struct gfs2_meta_header);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
872*4882a593Smuzhiyun offset += sizeof(u64);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun gfs2_log_write_page(sdp, page);
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
revoke_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)879*4882a593Smuzhiyun static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct list_head *head = &sdp->sd_log_revokes;
882*4882a593Smuzhiyun struct gfs2_bufdata *bd;
883*4882a593Smuzhiyun struct gfs2_glock *gl;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun while (!list_empty(head)) {
886*4882a593Smuzhiyun bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
887*4882a593Smuzhiyun list_del_init(&bd->bd_list);
888*4882a593Smuzhiyun gl = bd->bd_gl;
889*4882a593Smuzhiyun gfs2_glock_remove_revoke(gl);
890*4882a593Smuzhiyun kmem_cache_free(gfs2_bufdata_cachep, bd);
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
revoke_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)894*4882a593Smuzhiyun static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
895*4882a593Smuzhiyun struct gfs2_log_header_host *head, int pass)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun if (pass != 0)
898*4882a593Smuzhiyun return;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun jd->jd_found_revokes = 0;
901*4882a593Smuzhiyun jd->jd_replay_tail = head->lh_tail;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
revoke_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)904*4882a593Smuzhiyun static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
905*4882a593Smuzhiyun struct gfs2_log_descriptor *ld, __be64 *ptr,
906*4882a593Smuzhiyun int pass)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
909*4882a593Smuzhiyun unsigned int blks = be32_to_cpu(ld->ld_length);
910*4882a593Smuzhiyun unsigned int revokes = be32_to_cpu(ld->ld_data1);
911*4882a593Smuzhiyun struct buffer_head *bh;
912*4882a593Smuzhiyun unsigned int offset;
913*4882a593Smuzhiyun u64 blkno;
914*4882a593Smuzhiyun int first = 1;
915*4882a593Smuzhiyun int error;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
918*4882a593Smuzhiyun return 0;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun offset = sizeof(struct gfs2_log_descriptor);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
923*4882a593Smuzhiyun error = gfs2_replay_read_block(jd, start, &bh);
924*4882a593Smuzhiyun if (error)
925*4882a593Smuzhiyun return error;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (!first)
928*4882a593Smuzhiyun gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
931*4882a593Smuzhiyun blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun error = gfs2_revoke_add(jd, blkno, start);
934*4882a593Smuzhiyun if (error < 0) {
935*4882a593Smuzhiyun brelse(bh);
936*4882a593Smuzhiyun return error;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun else if (error)
939*4882a593Smuzhiyun jd->jd_found_revokes++;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun if (!--revokes)
942*4882a593Smuzhiyun break;
943*4882a593Smuzhiyun offset += sizeof(u64);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun brelse(bh);
947*4882a593Smuzhiyun offset = sizeof(struct gfs2_meta_header);
948*4882a593Smuzhiyun first = 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun return 0;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
revoke_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)954*4882a593Smuzhiyun static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun if (error) {
959*4882a593Smuzhiyun gfs2_revoke_clean(jd);
960*4882a593Smuzhiyun return;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun if (pass != 1)
963*4882a593Smuzhiyun return;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun fs_info(sdp, "jid=%u: Found %u revoke tags\n",
966*4882a593Smuzhiyun jd->jd_jid, jd->jd_found_revokes);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun gfs2_revoke_clean(jd);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /**
972*4882a593Smuzhiyun * databuf_lo_before_commit - Scan the data buffers, writing as we go
973*4882a593Smuzhiyun *
974*4882a593Smuzhiyun */
975*4882a593Smuzhiyun
databuf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)976*4882a593Smuzhiyun static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun unsigned int limit = databuf_limit(sdp);
979*4882a593Smuzhiyun unsigned int nbuf;
980*4882a593Smuzhiyun if (tr == NULL)
981*4882a593Smuzhiyun return;
982*4882a593Smuzhiyun nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
983*4882a593Smuzhiyun gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
databuf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)986*4882a593Smuzhiyun static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
987*4882a593Smuzhiyun struct gfs2_log_descriptor *ld,
988*4882a593Smuzhiyun __be64 *ptr, int pass)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
991*4882a593Smuzhiyun struct gfs2_glock *gl = ip->i_gl;
992*4882a593Smuzhiyun unsigned int blks = be32_to_cpu(ld->ld_data1);
993*4882a593Smuzhiyun struct buffer_head *bh_log, *bh_ip;
994*4882a593Smuzhiyun u64 blkno;
995*4882a593Smuzhiyun u64 esc;
996*4882a593Smuzhiyun int error = 0;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
999*4882a593Smuzhiyun return 0;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun gfs2_replay_incr_blk(jd, &start);
1002*4882a593Smuzhiyun for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1003*4882a593Smuzhiyun blkno = be64_to_cpu(*ptr++);
1004*4882a593Smuzhiyun esc = be64_to_cpu(*ptr++);
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun jd->jd_found_blocks++;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun if (gfs2_revoke_check(jd, blkno, start))
1009*4882a593Smuzhiyun continue;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun error = gfs2_replay_read_block(jd, start, &bh_log);
1012*4882a593Smuzhiyun if (error)
1013*4882a593Smuzhiyun return error;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun bh_ip = gfs2_meta_new(gl, blkno);
1016*4882a593Smuzhiyun memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* Unescape */
1019*4882a593Smuzhiyun if (esc) {
1020*4882a593Smuzhiyun __be32 *eptr = (__be32 *)bh_ip->b_data;
1021*4882a593Smuzhiyun *eptr = cpu_to_be32(GFS2_MAGIC);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun mark_buffer_dirty(bh_ip);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun brelse(bh_log);
1026*4882a593Smuzhiyun brelse(bh_ip);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun jd->jd_replayed_blocks++;
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun return error;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun /* FIXME: sort out accounting for log blocks etc. */
1035*4882a593Smuzhiyun
databuf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)1036*4882a593Smuzhiyun static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1039*4882a593Smuzhiyun struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (error) {
1042*4882a593Smuzhiyun gfs2_inode_metasync(ip->i_gl);
1043*4882a593Smuzhiyun return;
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun if (pass != 1)
1046*4882a593Smuzhiyun return;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /* data sync? */
1049*4882a593Smuzhiyun gfs2_inode_metasync(ip->i_gl);
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1052*4882a593Smuzhiyun jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
databuf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)1055*4882a593Smuzhiyun static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun struct list_head *head;
1058*4882a593Smuzhiyun struct gfs2_bufdata *bd;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun if (tr == NULL)
1061*4882a593Smuzhiyun return;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun head = &tr->tr_databuf;
1064*4882a593Smuzhiyun while (!list_empty(head)) {
1065*4882a593Smuzhiyun bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1066*4882a593Smuzhiyun list_del_init(&bd->bd_list);
1067*4882a593Smuzhiyun gfs2_unpin(sdp, bd->bd_bh, tr);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun static const struct gfs2_log_operations gfs2_buf_lops = {
1073*4882a593Smuzhiyun .lo_before_commit = buf_lo_before_commit,
1074*4882a593Smuzhiyun .lo_after_commit = buf_lo_after_commit,
1075*4882a593Smuzhiyun .lo_before_scan = buf_lo_before_scan,
1076*4882a593Smuzhiyun .lo_scan_elements = buf_lo_scan_elements,
1077*4882a593Smuzhiyun .lo_after_scan = buf_lo_after_scan,
1078*4882a593Smuzhiyun .lo_name = "buf",
1079*4882a593Smuzhiyun };
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun static const struct gfs2_log_operations gfs2_revoke_lops = {
1082*4882a593Smuzhiyun .lo_before_commit = revoke_lo_before_commit,
1083*4882a593Smuzhiyun .lo_after_commit = revoke_lo_after_commit,
1084*4882a593Smuzhiyun .lo_before_scan = revoke_lo_before_scan,
1085*4882a593Smuzhiyun .lo_scan_elements = revoke_lo_scan_elements,
1086*4882a593Smuzhiyun .lo_after_scan = revoke_lo_after_scan,
1087*4882a593Smuzhiyun .lo_name = "revoke",
1088*4882a593Smuzhiyun };
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun static const struct gfs2_log_operations gfs2_databuf_lops = {
1091*4882a593Smuzhiyun .lo_before_commit = databuf_lo_before_commit,
1092*4882a593Smuzhiyun .lo_after_commit = databuf_lo_after_commit,
1093*4882a593Smuzhiyun .lo_scan_elements = databuf_lo_scan_elements,
1094*4882a593Smuzhiyun .lo_after_scan = databuf_lo_after_scan,
1095*4882a593Smuzhiyun .lo_name = "databuf",
1096*4882a593Smuzhiyun };
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun const struct gfs2_log_operations *gfs2_log_ops[] = {
1099*4882a593Smuzhiyun &gfs2_databuf_lops,
1100*4882a593Smuzhiyun &gfs2_buf_lops,
1101*4882a593Smuzhiyun &gfs2_revoke_lops,
1102*4882a593Smuzhiyun NULL,
1103*4882a593Smuzhiyun };
1104*4882a593Smuzhiyun
1105