1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * segbuf.c - NILFS segment buffer
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Written by Ryusuke Konishi.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/buffer_head.h>
12*4882a593Smuzhiyun #include <linux/writeback.h>
13*4882a593Smuzhiyun #include <linux/crc32.h>
14*4882a593Smuzhiyun #include <linux/backing-dev.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include "page.h"
17*4882a593Smuzhiyun #include "segbuf.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun struct nilfs_write_info {
21*4882a593Smuzhiyun struct the_nilfs *nilfs;
22*4882a593Smuzhiyun struct bio *bio;
23*4882a593Smuzhiyun int start, end; /* The region to be submitted */
24*4882a593Smuzhiyun int rest_blocks;
25*4882a593Smuzhiyun int max_pages;
26*4882a593Smuzhiyun int nr_vecs;
27*4882a593Smuzhiyun sector_t blocknr;
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
31*4882a593Smuzhiyun struct the_nilfs *nilfs);
32*4882a593Smuzhiyun static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
33*4882a593Smuzhiyun
nilfs_segbuf_new(struct super_block * sb)34*4882a593Smuzhiyun struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun struct nilfs_segment_buffer *segbuf;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun segbuf = kmem_cache_alloc(nilfs_segbuf_cachep, GFP_NOFS);
39*4882a593Smuzhiyun if (unlikely(!segbuf))
40*4882a593Smuzhiyun return NULL;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun segbuf->sb_super = sb;
43*4882a593Smuzhiyun INIT_LIST_HEAD(&segbuf->sb_list);
44*4882a593Smuzhiyun INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
45*4882a593Smuzhiyun INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
46*4882a593Smuzhiyun segbuf->sb_super_root = NULL;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun init_completion(&segbuf->sb_bio_event);
49*4882a593Smuzhiyun atomic_set(&segbuf->sb_err, 0);
50*4882a593Smuzhiyun segbuf->sb_nbio = 0;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun return segbuf;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
nilfs_segbuf_free(struct nilfs_segment_buffer * segbuf)55*4882a593Smuzhiyun void nilfs_segbuf_free(struct nilfs_segment_buffer *segbuf)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun kmem_cache_free(nilfs_segbuf_cachep, segbuf);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
nilfs_segbuf_map(struct nilfs_segment_buffer * segbuf,__u64 segnum,unsigned long offset,struct the_nilfs * nilfs)60*4882a593Smuzhiyun void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
61*4882a593Smuzhiyun unsigned long offset, struct the_nilfs *nilfs)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun segbuf->sb_segnum = segnum;
64*4882a593Smuzhiyun nilfs_get_segment_range(nilfs, segnum, &segbuf->sb_fseg_start,
65*4882a593Smuzhiyun &segbuf->sb_fseg_end);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun segbuf->sb_pseg_start = segbuf->sb_fseg_start + offset;
68*4882a593Smuzhiyun segbuf->sb_rest_blocks =
69*4882a593Smuzhiyun segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun * nilfs_segbuf_map_cont - map a new log behind a given log
74*4882a593Smuzhiyun * @segbuf: new segment buffer
75*4882a593Smuzhiyun * @prev: segment buffer containing a log to be continued
76*4882a593Smuzhiyun */
nilfs_segbuf_map_cont(struct nilfs_segment_buffer * segbuf,struct nilfs_segment_buffer * prev)77*4882a593Smuzhiyun void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
78*4882a593Smuzhiyun struct nilfs_segment_buffer *prev)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun segbuf->sb_segnum = prev->sb_segnum;
81*4882a593Smuzhiyun segbuf->sb_fseg_start = prev->sb_fseg_start;
82*4882a593Smuzhiyun segbuf->sb_fseg_end = prev->sb_fseg_end;
83*4882a593Smuzhiyun segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
84*4882a593Smuzhiyun segbuf->sb_rest_blocks =
85*4882a593Smuzhiyun segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer * segbuf,__u64 nextnum,struct the_nilfs * nilfs)88*4882a593Smuzhiyun void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
89*4882a593Smuzhiyun __u64 nextnum, struct the_nilfs *nilfs)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun segbuf->sb_nextnum = nextnum;
92*4882a593Smuzhiyun segbuf->sb_sum.next = nilfs_get_segment_start_blocknr(nilfs, nextnum);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer * segbuf)95*4882a593Smuzhiyun int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct buffer_head *bh;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun bh = sb_getblk(segbuf->sb_super,
100*4882a593Smuzhiyun segbuf->sb_pseg_start + segbuf->sb_sum.nsumblk);
101*4882a593Smuzhiyun if (unlikely(!bh))
102*4882a593Smuzhiyun return -ENOMEM;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun nilfs_segbuf_add_segsum_buffer(segbuf, bh);
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
nilfs_segbuf_extend_payload(struct nilfs_segment_buffer * segbuf,struct buffer_head ** bhp)108*4882a593Smuzhiyun int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
109*4882a593Smuzhiyun struct buffer_head **bhp)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct buffer_head *bh;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun bh = sb_getblk(segbuf->sb_super,
114*4882a593Smuzhiyun segbuf->sb_pseg_start + segbuf->sb_sum.nblocks);
115*4882a593Smuzhiyun if (unlikely(!bh))
116*4882a593Smuzhiyun return -ENOMEM;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun nilfs_segbuf_add_payload_buffer(segbuf, bh);
119*4882a593Smuzhiyun *bhp = bh;
120*4882a593Smuzhiyun return 0;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
nilfs_segbuf_reset(struct nilfs_segment_buffer * segbuf,unsigned int flags,time64_t ctime,__u64 cno)123*4882a593Smuzhiyun int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags,
124*4882a593Smuzhiyun time64_t ctime, __u64 cno)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun int err;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun segbuf->sb_sum.nblocks = segbuf->sb_sum.nsumblk = 0;
129*4882a593Smuzhiyun err = nilfs_segbuf_extend_segsum(segbuf);
130*4882a593Smuzhiyun if (unlikely(err))
131*4882a593Smuzhiyun return err;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun segbuf->sb_sum.flags = flags;
134*4882a593Smuzhiyun segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
135*4882a593Smuzhiyun segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
136*4882a593Smuzhiyun segbuf->sb_sum.ctime = ctime;
137*4882a593Smuzhiyun segbuf->sb_sum.cno = cno;
138*4882a593Smuzhiyun return 0;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * Setup segment summary
143*4882a593Smuzhiyun */
nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer * segbuf)144*4882a593Smuzhiyun void nilfs_segbuf_fill_in_segsum(struct nilfs_segment_buffer *segbuf)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun struct nilfs_segment_summary *raw_sum;
147*4882a593Smuzhiyun struct buffer_head *bh_sum;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun bh_sum = list_entry(segbuf->sb_segsum_buffers.next,
150*4882a593Smuzhiyun struct buffer_head, b_assoc_buffers);
151*4882a593Smuzhiyun raw_sum = (struct nilfs_segment_summary *)bh_sum->b_data;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun raw_sum->ss_magic = cpu_to_le32(NILFS_SEGSUM_MAGIC);
154*4882a593Smuzhiyun raw_sum->ss_bytes = cpu_to_le16(sizeof(*raw_sum));
155*4882a593Smuzhiyun raw_sum->ss_flags = cpu_to_le16(segbuf->sb_sum.flags);
156*4882a593Smuzhiyun raw_sum->ss_seq = cpu_to_le64(segbuf->sb_sum.seg_seq);
157*4882a593Smuzhiyun raw_sum->ss_create = cpu_to_le64(segbuf->sb_sum.ctime);
158*4882a593Smuzhiyun raw_sum->ss_next = cpu_to_le64(segbuf->sb_sum.next);
159*4882a593Smuzhiyun raw_sum->ss_nblocks = cpu_to_le32(segbuf->sb_sum.nblocks);
160*4882a593Smuzhiyun raw_sum->ss_nfinfo = cpu_to_le32(segbuf->sb_sum.nfinfo);
161*4882a593Smuzhiyun raw_sum->ss_sumbytes = cpu_to_le32(segbuf->sb_sum.sumbytes);
162*4882a593Smuzhiyun raw_sum->ss_pad = 0;
163*4882a593Smuzhiyun raw_sum->ss_cno = cpu_to_le64(segbuf->sb_sum.cno);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * CRC calculation routines
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun static void
nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer * segbuf,u32 seed)170*4882a593Smuzhiyun nilfs_segbuf_fill_in_segsum_crc(struct nilfs_segment_buffer *segbuf, u32 seed)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct buffer_head *bh;
173*4882a593Smuzhiyun struct nilfs_segment_summary *raw_sum;
174*4882a593Smuzhiyun unsigned long size, bytes = segbuf->sb_sum.sumbytes;
175*4882a593Smuzhiyun u32 crc;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
178*4882a593Smuzhiyun b_assoc_buffers);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun raw_sum = (struct nilfs_segment_summary *)bh->b_data;
181*4882a593Smuzhiyun size = min_t(unsigned long, bytes, bh->b_size);
182*4882a593Smuzhiyun crc = crc32_le(seed,
183*4882a593Smuzhiyun (unsigned char *)raw_sum +
184*4882a593Smuzhiyun sizeof(raw_sum->ss_datasum) + sizeof(raw_sum->ss_sumsum),
185*4882a593Smuzhiyun size - (sizeof(raw_sum->ss_datasum) +
186*4882a593Smuzhiyun sizeof(raw_sum->ss_sumsum)));
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
189*4882a593Smuzhiyun b_assoc_buffers) {
190*4882a593Smuzhiyun bytes -= size;
191*4882a593Smuzhiyun size = min_t(unsigned long, bytes, bh->b_size);
192*4882a593Smuzhiyun crc = crc32_le(crc, bh->b_data, size);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun raw_sum->ss_sumsum = cpu_to_le32(crc);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer * segbuf,u32 seed)197*4882a593Smuzhiyun static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
198*4882a593Smuzhiyun u32 seed)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct buffer_head *bh;
201*4882a593Smuzhiyun struct nilfs_segment_summary *raw_sum;
202*4882a593Smuzhiyun void *kaddr;
203*4882a593Smuzhiyun u32 crc;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun bh = list_entry(segbuf->sb_segsum_buffers.next, struct buffer_head,
206*4882a593Smuzhiyun b_assoc_buffers);
207*4882a593Smuzhiyun raw_sum = (struct nilfs_segment_summary *)bh->b_data;
208*4882a593Smuzhiyun crc = crc32_le(seed,
209*4882a593Smuzhiyun (unsigned char *)raw_sum + sizeof(raw_sum->ss_datasum),
210*4882a593Smuzhiyun bh->b_size - sizeof(raw_sum->ss_datasum));
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun list_for_each_entry_continue(bh, &segbuf->sb_segsum_buffers,
213*4882a593Smuzhiyun b_assoc_buffers) {
214*4882a593Smuzhiyun crc = crc32_le(crc, bh->b_data, bh->b_size);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
217*4882a593Smuzhiyun kaddr = kmap_atomic(bh->b_page);
218*4882a593Smuzhiyun crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
219*4882a593Smuzhiyun kunmap_atomic(kaddr);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun raw_sum->ss_datasum = cpu_to_le32(crc);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun static void
nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer * segbuf,u32 seed)225*4882a593Smuzhiyun nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf,
226*4882a593Smuzhiyun u32 seed)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct nilfs_super_root *raw_sr;
229*4882a593Smuzhiyun struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info;
230*4882a593Smuzhiyun unsigned int srsize;
231*4882a593Smuzhiyun u32 crc;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data;
234*4882a593Smuzhiyun srsize = NILFS_SR_BYTES(nilfs->ns_inode_size);
235*4882a593Smuzhiyun crc = crc32_le(seed,
236*4882a593Smuzhiyun (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
237*4882a593Smuzhiyun srsize - sizeof(raw_sr->sr_sum));
238*4882a593Smuzhiyun raw_sr->sr_sum = cpu_to_le32(crc);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
nilfs_release_buffers(struct list_head * list)241*4882a593Smuzhiyun static void nilfs_release_buffers(struct list_head *list)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct buffer_head *bh, *n;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun list_for_each_entry_safe(bh, n, list, b_assoc_buffers) {
246*4882a593Smuzhiyun list_del_init(&bh->b_assoc_buffers);
247*4882a593Smuzhiyun brelse(bh);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
nilfs_segbuf_clear(struct nilfs_segment_buffer * segbuf)251*4882a593Smuzhiyun static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun nilfs_release_buffers(&segbuf->sb_segsum_buffers);
254*4882a593Smuzhiyun nilfs_release_buffers(&segbuf->sb_payload_buffers);
255*4882a593Smuzhiyun segbuf->sb_super_root = NULL;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * Iterators for segment buffers
260*4882a593Smuzhiyun */
nilfs_clear_logs(struct list_head * logs)261*4882a593Smuzhiyun void nilfs_clear_logs(struct list_head *logs)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct nilfs_segment_buffer *segbuf;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun list_for_each_entry(segbuf, logs, sb_list)
266*4882a593Smuzhiyun nilfs_segbuf_clear(segbuf);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
nilfs_truncate_logs(struct list_head * logs,struct nilfs_segment_buffer * last)269*4882a593Smuzhiyun void nilfs_truncate_logs(struct list_head *logs,
270*4882a593Smuzhiyun struct nilfs_segment_buffer *last)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun struct nilfs_segment_buffer *n, *segbuf;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun segbuf = list_prepare_entry(last, logs, sb_list);
275*4882a593Smuzhiyun list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
276*4882a593Smuzhiyun list_del_init(&segbuf->sb_list);
277*4882a593Smuzhiyun nilfs_segbuf_clear(segbuf);
278*4882a593Smuzhiyun nilfs_segbuf_free(segbuf);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
nilfs_write_logs(struct list_head * logs,struct the_nilfs * nilfs)282*4882a593Smuzhiyun int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct nilfs_segment_buffer *segbuf;
285*4882a593Smuzhiyun int ret = 0;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun list_for_each_entry(segbuf, logs, sb_list) {
288*4882a593Smuzhiyun ret = nilfs_segbuf_write(segbuf, nilfs);
289*4882a593Smuzhiyun if (ret)
290*4882a593Smuzhiyun break;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun return ret;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
nilfs_wait_on_logs(struct list_head * logs)295*4882a593Smuzhiyun int nilfs_wait_on_logs(struct list_head *logs)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct nilfs_segment_buffer *segbuf;
298*4882a593Smuzhiyun int err, ret = 0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun list_for_each_entry(segbuf, logs, sb_list) {
301*4882a593Smuzhiyun err = nilfs_segbuf_wait(segbuf);
302*4882a593Smuzhiyun if (err && !ret)
303*4882a593Smuzhiyun ret = err;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun return ret;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /**
309*4882a593Smuzhiyun * nilfs_add_checksums_on_logs - add checksums on the logs
310*4882a593Smuzhiyun * @logs: list of segment buffers storing target logs
311*4882a593Smuzhiyun * @seed: checksum seed value
312*4882a593Smuzhiyun */
nilfs_add_checksums_on_logs(struct list_head * logs,u32 seed)313*4882a593Smuzhiyun void nilfs_add_checksums_on_logs(struct list_head *logs, u32 seed)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct nilfs_segment_buffer *segbuf;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun list_for_each_entry(segbuf, logs, sb_list) {
318*4882a593Smuzhiyun if (segbuf->sb_super_root)
319*4882a593Smuzhiyun nilfs_segbuf_fill_in_super_root_crc(segbuf, seed);
320*4882a593Smuzhiyun nilfs_segbuf_fill_in_segsum_crc(segbuf, seed);
321*4882a593Smuzhiyun nilfs_segbuf_fill_in_data_crc(segbuf, seed);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun /*
326*4882a593Smuzhiyun * BIO operations
327*4882a593Smuzhiyun */
nilfs_end_bio_write(struct bio * bio)328*4882a593Smuzhiyun static void nilfs_end_bio_write(struct bio *bio)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun struct nilfs_segment_buffer *segbuf = bio->bi_private;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun if (bio->bi_status)
333*4882a593Smuzhiyun atomic_inc(&segbuf->sb_err);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun bio_put(bio);
336*4882a593Smuzhiyun complete(&segbuf->sb_bio_event);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
nilfs_segbuf_submit_bio(struct nilfs_segment_buffer * segbuf,struct nilfs_write_info * wi,int mode,int mode_flags)339*4882a593Smuzhiyun static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
340*4882a593Smuzhiyun struct nilfs_write_info *wi, int mode,
341*4882a593Smuzhiyun int mode_flags)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct bio *bio = wi->bio;
344*4882a593Smuzhiyun int err;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (segbuf->sb_nbio > 0 &&
347*4882a593Smuzhiyun bdi_write_congested(segbuf->sb_super->s_bdi)) {
348*4882a593Smuzhiyun wait_for_completion(&segbuf->sb_bio_event);
349*4882a593Smuzhiyun segbuf->sb_nbio--;
350*4882a593Smuzhiyun if (unlikely(atomic_read(&segbuf->sb_err))) {
351*4882a593Smuzhiyun bio_put(bio);
352*4882a593Smuzhiyun err = -EIO;
353*4882a593Smuzhiyun goto failed;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun bio->bi_end_io = nilfs_end_bio_write;
358*4882a593Smuzhiyun bio->bi_private = segbuf;
359*4882a593Smuzhiyun bio_set_op_attrs(bio, mode, mode_flags);
360*4882a593Smuzhiyun submit_bio(bio);
361*4882a593Smuzhiyun segbuf->sb_nbio++;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun wi->bio = NULL;
364*4882a593Smuzhiyun wi->rest_blocks -= wi->end - wi->start;
365*4882a593Smuzhiyun wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
366*4882a593Smuzhiyun wi->start = wi->end;
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun failed:
370*4882a593Smuzhiyun wi->bio = NULL;
371*4882a593Smuzhiyun return err;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun * nilfs_alloc_seg_bio - allocate a new bio for writing log
376*4882a593Smuzhiyun * @nilfs: nilfs object
377*4882a593Smuzhiyun * @start: start block number of the bio
378*4882a593Smuzhiyun * @nr_vecs: request size of page vector.
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * Return Value: On success, pointer to the struct bio is returned.
381*4882a593Smuzhiyun * On error, NULL is returned.
382*4882a593Smuzhiyun */
nilfs_alloc_seg_bio(struct the_nilfs * nilfs,sector_t start,int nr_vecs)383*4882a593Smuzhiyun static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
384*4882a593Smuzhiyun int nr_vecs)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct bio *bio;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun bio = bio_alloc(GFP_NOIO, nr_vecs);
389*4882a593Smuzhiyun if (bio == NULL) {
390*4882a593Smuzhiyun while (!bio && (nr_vecs >>= 1))
391*4882a593Smuzhiyun bio = bio_alloc(GFP_NOIO, nr_vecs);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun if (likely(bio)) {
394*4882a593Smuzhiyun bio_set_dev(bio, nilfs->ns_bdev);
395*4882a593Smuzhiyun bio->bi_iter.bi_sector =
396*4882a593Smuzhiyun start << (nilfs->ns_blocksize_bits - 9);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun return bio;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
nilfs_segbuf_prepare_write(struct nilfs_segment_buffer * segbuf,struct nilfs_write_info * wi)401*4882a593Smuzhiyun static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
402*4882a593Smuzhiyun struct nilfs_write_info *wi)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun wi->bio = NULL;
405*4882a593Smuzhiyun wi->rest_blocks = segbuf->sb_sum.nblocks;
406*4882a593Smuzhiyun wi->max_pages = BIO_MAX_PAGES;
407*4882a593Smuzhiyun wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
408*4882a593Smuzhiyun wi->start = wi->end = 0;
409*4882a593Smuzhiyun wi->blocknr = segbuf->sb_pseg_start;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
nilfs_segbuf_submit_bh(struct nilfs_segment_buffer * segbuf,struct nilfs_write_info * wi,struct buffer_head * bh,int mode)412*4882a593Smuzhiyun static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
413*4882a593Smuzhiyun struct nilfs_write_info *wi,
414*4882a593Smuzhiyun struct buffer_head *bh, int mode)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun int len, err;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun BUG_ON(wi->nr_vecs <= 0);
419*4882a593Smuzhiyun repeat:
420*4882a593Smuzhiyun if (!wi->bio) {
421*4882a593Smuzhiyun wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
422*4882a593Smuzhiyun wi->nr_vecs);
423*4882a593Smuzhiyun if (unlikely(!wi->bio))
424*4882a593Smuzhiyun return -ENOMEM;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun len = bio_add_page(wi->bio, bh->b_page, bh->b_size, bh_offset(bh));
428*4882a593Smuzhiyun if (len == bh->b_size) {
429*4882a593Smuzhiyun wi->end++;
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun /* bio is FULL */
433*4882a593Smuzhiyun err = nilfs_segbuf_submit_bio(segbuf, wi, mode, 0);
434*4882a593Smuzhiyun /* never submit current bh */
435*4882a593Smuzhiyun if (likely(!err))
436*4882a593Smuzhiyun goto repeat;
437*4882a593Smuzhiyun return err;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun * nilfs_segbuf_write - submit write requests of a log
442*4882a593Smuzhiyun * @segbuf: buffer storing a log to be written
443*4882a593Smuzhiyun * @nilfs: nilfs object
444*4882a593Smuzhiyun *
445*4882a593Smuzhiyun * Return Value: On Success, 0 is returned. On Error, one of the following
446*4882a593Smuzhiyun * negative error code is returned.
447*4882a593Smuzhiyun *
448*4882a593Smuzhiyun * %-EIO - I/O error
449*4882a593Smuzhiyun *
450*4882a593Smuzhiyun * %-ENOMEM - Insufficient memory available.
451*4882a593Smuzhiyun */
nilfs_segbuf_write(struct nilfs_segment_buffer * segbuf,struct the_nilfs * nilfs)452*4882a593Smuzhiyun static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
453*4882a593Smuzhiyun struct the_nilfs *nilfs)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct nilfs_write_info wi;
456*4882a593Smuzhiyun struct buffer_head *bh;
457*4882a593Smuzhiyun int res = 0;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun wi.nilfs = nilfs;
460*4882a593Smuzhiyun nilfs_segbuf_prepare_write(segbuf, &wi);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
463*4882a593Smuzhiyun res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE);
464*4882a593Smuzhiyun if (unlikely(res))
465*4882a593Smuzhiyun goto failed_bio;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
469*4882a593Smuzhiyun res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE);
470*4882a593Smuzhiyun if (unlikely(res))
471*4882a593Smuzhiyun goto failed_bio;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (wi.bio) {
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun * Last BIO is always sent through the following
477*4882a593Smuzhiyun * submission.
478*4882a593Smuzhiyun */
479*4882a593Smuzhiyun res = nilfs_segbuf_submit_bio(segbuf, &wi, REQ_OP_WRITE,
480*4882a593Smuzhiyun REQ_SYNC);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun failed_bio:
484*4882a593Smuzhiyun return res;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /**
488*4882a593Smuzhiyun * nilfs_segbuf_wait - wait for completion of requested BIOs
489*4882a593Smuzhiyun * @segbuf: segment buffer
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * Return Value: On Success, 0 is returned. On Error, one of the following
492*4882a593Smuzhiyun * negative error code is returned.
493*4882a593Smuzhiyun *
494*4882a593Smuzhiyun * %-EIO - I/O error
495*4882a593Smuzhiyun */
nilfs_segbuf_wait(struct nilfs_segment_buffer * segbuf)496*4882a593Smuzhiyun static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun int err = 0;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!segbuf->sb_nbio)
501*4882a593Smuzhiyun return 0;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun do {
504*4882a593Smuzhiyun wait_for_completion(&segbuf->sb_bio_event);
505*4882a593Smuzhiyun } while (--segbuf->sb_nbio > 0);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
508*4882a593Smuzhiyun nilfs_err(segbuf->sb_super,
509*4882a593Smuzhiyun "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu",
510*4882a593Smuzhiyun (unsigned long long)segbuf->sb_pseg_start,
511*4882a593Smuzhiyun segbuf->sb_sum.nblocks,
512*4882a593Smuzhiyun (unsigned long long)segbuf->sb_segnum);
513*4882a593Smuzhiyun err = -EIO;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun return err;
516*4882a593Smuzhiyun }
517