1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/fs/ext4/page-io.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This contains the new page_io functions for ext4
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Written by Theodore Ts'o, 2010.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/fs.h>
11*4882a593Smuzhiyun #include <linux/time.h>
12*4882a593Smuzhiyun #include <linux/highuid.h>
13*4882a593Smuzhiyun #include <linux/pagemap.h>
14*4882a593Smuzhiyun #include <linux/quotaops.h>
15*4882a593Smuzhiyun #include <linux/string.h>
16*4882a593Smuzhiyun #include <linux/buffer_head.h>
17*4882a593Smuzhiyun #include <linux/writeback.h>
18*4882a593Smuzhiyun #include <linux/pagevec.h>
19*4882a593Smuzhiyun #include <linux/mpage.h>
20*4882a593Smuzhiyun #include <linux/namei.h>
21*4882a593Smuzhiyun #include <linux/uio.h>
22*4882a593Smuzhiyun #include <linux/bio.h>
23*4882a593Smuzhiyun #include <linux/workqueue.h>
24*4882a593Smuzhiyun #include <linux/kernel.h>
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun #include <linux/mm.h>
27*4882a593Smuzhiyun #include <linux/backing-dev.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "ext4_jbd2.h"
30*4882a593Smuzhiyun #include "xattr.h"
31*4882a593Smuzhiyun #include "acl.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun static struct kmem_cache *io_end_cachep;
34*4882a593Smuzhiyun static struct kmem_cache *io_end_vec_cachep;
35*4882a593Smuzhiyun
ext4_init_pageio(void)36*4882a593Smuzhiyun int __init ext4_init_pageio(void)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
39*4882a593Smuzhiyun if (io_end_cachep == NULL)
40*4882a593Smuzhiyun return -ENOMEM;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun io_end_vec_cachep = KMEM_CACHE(ext4_io_end_vec, 0);
43*4882a593Smuzhiyun if (io_end_vec_cachep == NULL) {
44*4882a593Smuzhiyun kmem_cache_destroy(io_end_cachep);
45*4882a593Smuzhiyun return -ENOMEM;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun return 0;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
ext4_exit_pageio(void)50*4882a593Smuzhiyun void ext4_exit_pageio(void)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun kmem_cache_destroy(io_end_cachep);
53*4882a593Smuzhiyun kmem_cache_destroy(io_end_vec_cachep);
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
ext4_alloc_io_end_vec(ext4_io_end_t * io_end)56*4882a593Smuzhiyun struct ext4_io_end_vec *ext4_alloc_io_end_vec(ext4_io_end_t *io_end)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct ext4_io_end_vec *io_end_vec;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun io_end_vec = kmem_cache_zalloc(io_end_vec_cachep, GFP_NOFS);
61*4882a593Smuzhiyun if (!io_end_vec)
62*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
63*4882a593Smuzhiyun INIT_LIST_HEAD(&io_end_vec->list);
64*4882a593Smuzhiyun list_add_tail(&io_end_vec->list, &io_end->list_vec);
65*4882a593Smuzhiyun return io_end_vec;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
ext4_free_io_end_vec(ext4_io_end_t * io_end)68*4882a593Smuzhiyun static void ext4_free_io_end_vec(ext4_io_end_t *io_end)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct ext4_io_end_vec *io_end_vec, *tmp;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (list_empty(&io_end->list_vec))
73*4882a593Smuzhiyun return;
74*4882a593Smuzhiyun list_for_each_entry_safe(io_end_vec, tmp, &io_end->list_vec, list) {
75*4882a593Smuzhiyun list_del(&io_end_vec->list);
76*4882a593Smuzhiyun kmem_cache_free(io_end_vec_cachep, io_end_vec);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
ext4_last_io_end_vec(ext4_io_end_t * io_end)80*4882a593Smuzhiyun struct ext4_io_end_vec *ext4_last_io_end_vec(ext4_io_end_t *io_end)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun BUG_ON(list_empty(&io_end->list_vec));
83*4882a593Smuzhiyun return list_last_entry(&io_end->list_vec, struct ext4_io_end_vec, list);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Print an buffer I/O error compatible with the fs/buffer.c. This
88*4882a593Smuzhiyun * provides compatibility with dmesg scrapers that look for a specific
89*4882a593Smuzhiyun * buffer I/O error message. We really need a unified error reporting
90*4882a593Smuzhiyun * structure to userspace ala Digital Unix's uerf system, but it's
91*4882a593Smuzhiyun * probably not going to happen in my lifetime, due to LKML politics...
92*4882a593Smuzhiyun */
buffer_io_error(struct buffer_head * bh)93*4882a593Smuzhiyun static void buffer_io_error(struct buffer_head *bh)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun printk_ratelimited(KERN_ERR "Buffer I/O error on device %pg, logical block %llu\n",
96*4882a593Smuzhiyun bh->b_bdev,
97*4882a593Smuzhiyun (unsigned long long)bh->b_blocknr);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
ext4_finish_bio(struct bio * bio)100*4882a593Smuzhiyun static void ext4_finish_bio(struct bio *bio)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct bio_vec *bvec;
103*4882a593Smuzhiyun struct bvec_iter_all iter_all;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun bio_for_each_segment_all(bvec, bio, iter_all) {
106*4882a593Smuzhiyun struct page *page = bvec->bv_page;
107*4882a593Smuzhiyun struct page *bounce_page = NULL;
108*4882a593Smuzhiyun struct buffer_head *bh, *head;
109*4882a593Smuzhiyun unsigned bio_start = bvec->bv_offset;
110*4882a593Smuzhiyun unsigned bio_end = bio_start + bvec->bv_len;
111*4882a593Smuzhiyun unsigned under_io = 0;
112*4882a593Smuzhiyun unsigned long flags;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (!page)
115*4882a593Smuzhiyun continue;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (fscrypt_is_bounce_page(page)) {
118*4882a593Smuzhiyun bounce_page = page;
119*4882a593Smuzhiyun page = fscrypt_pagecache_page(bounce_page);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (bio->bi_status) {
123*4882a593Smuzhiyun SetPageError(page);
124*4882a593Smuzhiyun mapping_set_error(page->mapping, -EIO);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun bh = head = page_buffers(page);
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * We check all buffers in the page under b_uptodate_lock
129*4882a593Smuzhiyun * to avoid races with other end io clearing async_write flags
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun spin_lock_irqsave(&head->b_uptodate_lock, flags);
132*4882a593Smuzhiyun do {
133*4882a593Smuzhiyun if (bh_offset(bh) < bio_start ||
134*4882a593Smuzhiyun bh_offset(bh) + bh->b_size > bio_end) {
135*4882a593Smuzhiyun if (buffer_async_write(bh))
136*4882a593Smuzhiyun under_io++;
137*4882a593Smuzhiyun continue;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun clear_buffer_async_write(bh);
140*4882a593Smuzhiyun if (bio->bi_status) {
141*4882a593Smuzhiyun set_buffer_write_io_error(bh);
142*4882a593Smuzhiyun buffer_io_error(bh);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun } while ((bh = bh->b_this_page) != head);
145*4882a593Smuzhiyun spin_unlock_irqrestore(&head->b_uptodate_lock, flags);
146*4882a593Smuzhiyun if (!under_io) {
147*4882a593Smuzhiyun fscrypt_free_bounce_page(bounce_page);
148*4882a593Smuzhiyun end_page_writeback(page);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
ext4_release_io_end(ext4_io_end_t * io_end)153*4882a593Smuzhiyun static void ext4_release_io_end(ext4_io_end_t *io_end)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct bio *bio, *next_bio;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun BUG_ON(!list_empty(&io_end->list));
158*4882a593Smuzhiyun BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
159*4882a593Smuzhiyun WARN_ON(io_end->handle);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun for (bio = io_end->bio; bio; bio = next_bio) {
162*4882a593Smuzhiyun next_bio = bio->bi_private;
163*4882a593Smuzhiyun ext4_finish_bio(bio);
164*4882a593Smuzhiyun bio_put(bio);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun ext4_free_io_end_vec(io_end);
167*4882a593Smuzhiyun kmem_cache_free(io_end_cachep, io_end);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Check a range of space and convert unwritten extents to written. Note that
172*4882a593Smuzhiyun * we are protected from truncate touching same part of extent tree by the
173*4882a593Smuzhiyun * fact that truncate code waits for all DIO to finish (thus exclusion from
174*4882a593Smuzhiyun * direct IO is achieved) and also waits for PageWriteback bits. Thus we
175*4882a593Smuzhiyun * cannot get to ext4_ext_truncate() before all IOs overlapping that range are
176*4882a593Smuzhiyun * completed (happens from ext4_free_ioend()).
177*4882a593Smuzhiyun */
ext4_end_io_end(ext4_io_end_t * io_end)178*4882a593Smuzhiyun static int ext4_end_io_end(ext4_io_end_t *io_end)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct inode *inode = io_end->inode;
181*4882a593Smuzhiyun handle_t *handle = io_end->handle;
182*4882a593Smuzhiyun int ret = 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun ext4_debug("ext4_end_io_nolock: io_end 0x%p from inode %lu,list->next 0x%p,"
185*4882a593Smuzhiyun "list->prev 0x%p\n",
186*4882a593Smuzhiyun io_end, inode->i_ino, io_end->list.next, io_end->list.prev);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun io_end->handle = NULL; /* Following call will use up the handle */
189*4882a593Smuzhiyun ret = ext4_convert_unwritten_io_end_vec(handle, io_end);
190*4882a593Smuzhiyun if (ret < 0 && !ext4_forced_shutdown(EXT4_SB(inode->i_sb))) {
191*4882a593Smuzhiyun ext4_msg(inode->i_sb, KERN_EMERG,
192*4882a593Smuzhiyun "failed to convert unwritten extents to written "
193*4882a593Smuzhiyun "extents -- potential data loss! "
194*4882a593Smuzhiyun "(inode %lu, error %d)", inode->i_ino, ret);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun ext4_clear_io_unwritten_flag(io_end);
197*4882a593Smuzhiyun ext4_release_io_end(io_end);
198*4882a593Smuzhiyun return ret;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
dump_completed_IO(struct inode * inode,struct list_head * head)201*4882a593Smuzhiyun static void dump_completed_IO(struct inode *inode, struct list_head *head)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun #ifdef EXT4FS_DEBUG
204*4882a593Smuzhiyun struct list_head *cur, *before, *after;
205*4882a593Smuzhiyun ext4_io_end_t *io_end, *io_end0, *io_end1;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (list_empty(head))
208*4882a593Smuzhiyun return;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun ext4_debug("Dump inode %lu completed io list\n", inode->i_ino);
211*4882a593Smuzhiyun list_for_each_entry(io_end, head, list) {
212*4882a593Smuzhiyun cur = &io_end->list;
213*4882a593Smuzhiyun before = cur->prev;
214*4882a593Smuzhiyun io_end0 = container_of(before, ext4_io_end_t, list);
215*4882a593Smuzhiyun after = cur->next;
216*4882a593Smuzhiyun io_end1 = container_of(after, ext4_io_end_t, list);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
219*4882a593Smuzhiyun io_end, inode->i_ino, io_end0, io_end1);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* Add the io_end to per-inode completed end_io list. */
ext4_add_complete_io(ext4_io_end_t * io_end)225*4882a593Smuzhiyun static void ext4_add_complete_io(ext4_io_end_t *io_end)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct ext4_inode_info *ei = EXT4_I(io_end->inode);
228*4882a593Smuzhiyun struct ext4_sb_info *sbi = EXT4_SB(io_end->inode->i_sb);
229*4882a593Smuzhiyun struct workqueue_struct *wq;
230*4882a593Smuzhiyun unsigned long flags;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Only reserved conversions from writeback should enter here */
233*4882a593Smuzhiyun WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
234*4882a593Smuzhiyun WARN_ON(!io_end->handle && sbi->s_journal);
235*4882a593Smuzhiyun spin_lock_irqsave(&ei->i_completed_io_lock, flags);
236*4882a593Smuzhiyun wq = sbi->rsv_conversion_wq;
237*4882a593Smuzhiyun if (list_empty(&ei->i_rsv_conversion_list))
238*4882a593Smuzhiyun queue_work(wq, &ei->i_rsv_conversion_work);
239*4882a593Smuzhiyun list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
240*4882a593Smuzhiyun spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
ext4_do_flush_completed_IO(struct inode * inode,struct list_head * head)243*4882a593Smuzhiyun static int ext4_do_flush_completed_IO(struct inode *inode,
244*4882a593Smuzhiyun struct list_head *head)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun ext4_io_end_t *io_end;
247*4882a593Smuzhiyun struct list_head unwritten;
248*4882a593Smuzhiyun unsigned long flags;
249*4882a593Smuzhiyun struct ext4_inode_info *ei = EXT4_I(inode);
250*4882a593Smuzhiyun int err, ret = 0;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun spin_lock_irqsave(&ei->i_completed_io_lock, flags);
253*4882a593Smuzhiyun dump_completed_IO(inode, head);
254*4882a593Smuzhiyun list_replace_init(head, &unwritten);
255*4882a593Smuzhiyun spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun while (!list_empty(&unwritten)) {
258*4882a593Smuzhiyun io_end = list_entry(unwritten.next, ext4_io_end_t, list);
259*4882a593Smuzhiyun BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
260*4882a593Smuzhiyun list_del_init(&io_end->list);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun err = ext4_end_io_end(io_end);
263*4882a593Smuzhiyun if (unlikely(!ret && err))
264*4882a593Smuzhiyun ret = err;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun return ret;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /*
270*4882a593Smuzhiyun * work on completed IO, to convert unwritten extents to extents
271*4882a593Smuzhiyun */
ext4_end_io_rsv_work(struct work_struct * work)272*4882a593Smuzhiyun void ext4_end_io_rsv_work(struct work_struct *work)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
275*4882a593Smuzhiyun i_rsv_conversion_work);
276*4882a593Smuzhiyun ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
ext4_init_io_end(struct inode * inode,gfp_t flags)279*4882a593Smuzhiyun ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun ext4_io_end_t *io_end = kmem_cache_zalloc(io_end_cachep, flags);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun if (io_end) {
284*4882a593Smuzhiyun io_end->inode = inode;
285*4882a593Smuzhiyun INIT_LIST_HEAD(&io_end->list);
286*4882a593Smuzhiyun INIT_LIST_HEAD(&io_end->list_vec);
287*4882a593Smuzhiyun atomic_set(&io_end->count, 1);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun return io_end;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
ext4_put_io_end_defer(ext4_io_end_t * io_end)292*4882a593Smuzhiyun void ext4_put_io_end_defer(ext4_io_end_t *io_end)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun if (atomic_dec_and_test(&io_end->count)) {
295*4882a593Smuzhiyun if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) ||
296*4882a593Smuzhiyun list_empty(&io_end->list_vec)) {
297*4882a593Smuzhiyun ext4_release_io_end(io_end);
298*4882a593Smuzhiyun return;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun ext4_add_complete_io(io_end);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
ext4_put_io_end(ext4_io_end_t * io_end)304*4882a593Smuzhiyun int ext4_put_io_end(ext4_io_end_t *io_end)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun int err = 0;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (atomic_dec_and_test(&io_end->count)) {
309*4882a593Smuzhiyun if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
310*4882a593Smuzhiyun err = ext4_convert_unwritten_io_end_vec(io_end->handle,
311*4882a593Smuzhiyun io_end);
312*4882a593Smuzhiyun io_end->handle = NULL;
313*4882a593Smuzhiyun ext4_clear_io_unwritten_flag(io_end);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun ext4_release_io_end(io_end);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun return err;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
ext4_get_io_end(ext4_io_end_t * io_end)320*4882a593Smuzhiyun ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun atomic_inc(&io_end->count);
323*4882a593Smuzhiyun return io_end;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /* BIO completion function for page writeback */
ext4_end_bio(struct bio * bio)327*4882a593Smuzhiyun static void ext4_end_bio(struct bio *bio)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun ext4_io_end_t *io_end = bio->bi_private;
330*4882a593Smuzhiyun sector_t bi_sector = bio->bi_iter.bi_sector;
331*4882a593Smuzhiyun char b[BDEVNAME_SIZE];
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
334*4882a593Smuzhiyun bio_devname(bio, b),
335*4882a593Smuzhiyun (long long) bio->bi_iter.bi_sector,
336*4882a593Smuzhiyun (unsigned) bio_sectors(bio),
337*4882a593Smuzhiyun bio->bi_status)) {
338*4882a593Smuzhiyun ext4_finish_bio(bio);
339*4882a593Smuzhiyun bio_put(bio);
340*4882a593Smuzhiyun return;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun bio->bi_end_io = NULL;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (bio->bi_status) {
345*4882a593Smuzhiyun struct inode *inode = io_end->inode;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun ext4_warning(inode->i_sb, "I/O error %d writing to inode %lu "
348*4882a593Smuzhiyun "starting block %llu)",
349*4882a593Smuzhiyun bio->bi_status, inode->i_ino,
350*4882a593Smuzhiyun (unsigned long long)
351*4882a593Smuzhiyun bi_sector >> (inode->i_blkbits - 9));
352*4882a593Smuzhiyun mapping_set_error(inode->i_mapping,
353*4882a593Smuzhiyun blk_status_to_errno(bio->bi_status));
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * Link bio into list hanging from io_end. We have to do it
359*4882a593Smuzhiyun * atomically as bio completions can be racing against each
360*4882a593Smuzhiyun * other.
361*4882a593Smuzhiyun */
362*4882a593Smuzhiyun bio->bi_private = xchg(&io_end->bio, bio);
363*4882a593Smuzhiyun ext4_put_io_end_defer(io_end);
364*4882a593Smuzhiyun } else {
365*4882a593Smuzhiyun /*
366*4882a593Smuzhiyun * Drop io_end reference early. Inode can get freed once
367*4882a593Smuzhiyun * we finish the bio.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun ext4_put_io_end_defer(io_end);
370*4882a593Smuzhiyun ext4_finish_bio(bio);
371*4882a593Smuzhiyun bio_put(bio);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
ext4_io_submit(struct ext4_io_submit * io)375*4882a593Smuzhiyun void ext4_io_submit(struct ext4_io_submit *io)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun struct bio *bio = io->io_bio;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (bio) {
380*4882a593Smuzhiyun int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
381*4882a593Smuzhiyun REQ_SYNC : 0;
382*4882a593Smuzhiyun io->io_bio->bi_write_hint = io->io_end->inode->i_write_hint;
383*4882a593Smuzhiyun bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
384*4882a593Smuzhiyun submit_bio(io->io_bio);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun io->io_bio = NULL;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
ext4_io_submit_init(struct ext4_io_submit * io,struct writeback_control * wbc)389*4882a593Smuzhiyun void ext4_io_submit_init(struct ext4_io_submit *io,
390*4882a593Smuzhiyun struct writeback_control *wbc)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun io->io_wbc = wbc;
393*4882a593Smuzhiyun io->io_bio = NULL;
394*4882a593Smuzhiyun io->io_end = NULL;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
io_submit_init_bio(struct ext4_io_submit * io,struct buffer_head * bh)397*4882a593Smuzhiyun static void io_submit_init_bio(struct ext4_io_submit *io,
398*4882a593Smuzhiyun struct buffer_head *bh)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun struct bio *bio;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /*
403*4882a593Smuzhiyun * bio_alloc will _always_ be able to allocate a bio if
404*4882a593Smuzhiyun * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset().
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
407*4882a593Smuzhiyun fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO);
408*4882a593Smuzhiyun bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
409*4882a593Smuzhiyun bio_set_dev(bio, bh->b_bdev);
410*4882a593Smuzhiyun bio->bi_end_io = ext4_end_bio;
411*4882a593Smuzhiyun bio->bi_private = ext4_get_io_end(io->io_end);
412*4882a593Smuzhiyun io->io_bio = bio;
413*4882a593Smuzhiyun io->io_next_block = bh->b_blocknr;
414*4882a593Smuzhiyun wbc_init_bio(io->io_wbc, bio);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
io_submit_add_bh(struct ext4_io_submit * io,struct inode * inode,struct page * page,struct buffer_head * bh)417*4882a593Smuzhiyun static void io_submit_add_bh(struct ext4_io_submit *io,
418*4882a593Smuzhiyun struct inode *inode,
419*4882a593Smuzhiyun struct page *page,
420*4882a593Smuzhiyun struct buffer_head *bh)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun int ret;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (io->io_bio && (bh->b_blocknr != io->io_next_block ||
425*4882a593Smuzhiyun !fscrypt_mergeable_bio_bh(io->io_bio, bh))) {
426*4882a593Smuzhiyun submit_and_retry:
427*4882a593Smuzhiyun ext4_io_submit(io);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun if (io->io_bio == NULL) {
430*4882a593Smuzhiyun io_submit_init_bio(io, bh);
431*4882a593Smuzhiyun io->io_bio->bi_write_hint = inode->i_write_hint;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun ret = bio_add_page(io->io_bio, page, bh->b_size, bh_offset(bh));
434*4882a593Smuzhiyun if (ret != bh->b_size)
435*4882a593Smuzhiyun goto submit_and_retry;
436*4882a593Smuzhiyun wbc_account_cgroup_owner(io->io_wbc, page, bh->b_size);
437*4882a593Smuzhiyun io->io_next_block++;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
ext4_bio_write_page(struct ext4_io_submit * io,struct page * page,int len,struct writeback_control * wbc,bool keep_towrite)440*4882a593Smuzhiyun int ext4_bio_write_page(struct ext4_io_submit *io,
441*4882a593Smuzhiyun struct page *page,
442*4882a593Smuzhiyun int len,
443*4882a593Smuzhiyun struct writeback_control *wbc,
444*4882a593Smuzhiyun bool keep_towrite)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun struct page *bounce_page = NULL;
447*4882a593Smuzhiyun struct inode *inode = page->mapping->host;
448*4882a593Smuzhiyun unsigned block_start;
449*4882a593Smuzhiyun struct buffer_head *bh, *head;
450*4882a593Smuzhiyun int ret = 0;
451*4882a593Smuzhiyun int nr_submitted = 0;
452*4882a593Smuzhiyun int nr_to_submit = 0;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun BUG_ON(!PageLocked(page));
455*4882a593Smuzhiyun BUG_ON(PageWriteback(page));
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (keep_towrite)
458*4882a593Smuzhiyun set_page_writeback_keepwrite(page);
459*4882a593Smuzhiyun else
460*4882a593Smuzhiyun set_page_writeback(page);
461*4882a593Smuzhiyun ClearPageError(page);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /*
464*4882a593Smuzhiyun * Comments copied from block_write_full_page:
465*4882a593Smuzhiyun *
466*4882a593Smuzhiyun * The page straddles i_size. It must be zeroed out on each and every
467*4882a593Smuzhiyun * writepage invocation because it may be mmapped. "A file is mapped
468*4882a593Smuzhiyun * in multiples of the page size. For a file that is not a multiple of
469*4882a593Smuzhiyun * the page size, the remaining memory is zeroed when mapped, and
470*4882a593Smuzhiyun * writes to that region are not written out to the file."
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun if (len < PAGE_SIZE)
473*4882a593Smuzhiyun zero_user_segment(page, len, PAGE_SIZE);
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun * In the first loop we prepare and mark buffers to submit. We have to
476*4882a593Smuzhiyun * mark all buffers in the page before submitting so that
477*4882a593Smuzhiyun * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
478*4882a593Smuzhiyun * on the first buffer finishes and we are still working on submitting
479*4882a593Smuzhiyun * the second buffer.
480*4882a593Smuzhiyun */
481*4882a593Smuzhiyun bh = head = page_buffers(page);
482*4882a593Smuzhiyun do {
483*4882a593Smuzhiyun block_start = bh_offset(bh);
484*4882a593Smuzhiyun if (block_start >= len) {
485*4882a593Smuzhiyun clear_buffer_dirty(bh);
486*4882a593Smuzhiyun set_buffer_uptodate(bh);
487*4882a593Smuzhiyun continue;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun if (!buffer_dirty(bh) || buffer_delay(bh) ||
490*4882a593Smuzhiyun !buffer_mapped(bh) || buffer_unwritten(bh)) {
491*4882a593Smuzhiyun /* A hole? We can safely clear the dirty bit */
492*4882a593Smuzhiyun if (!buffer_mapped(bh))
493*4882a593Smuzhiyun clear_buffer_dirty(bh);
494*4882a593Smuzhiyun if (io->io_bio)
495*4882a593Smuzhiyun ext4_io_submit(io);
496*4882a593Smuzhiyun continue;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun if (buffer_new(bh))
499*4882a593Smuzhiyun clear_buffer_new(bh);
500*4882a593Smuzhiyun set_buffer_async_write(bh);
501*4882a593Smuzhiyun nr_to_submit++;
502*4882a593Smuzhiyun } while ((bh = bh->b_this_page) != head);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun bh = head = page_buffers(page);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * If any blocks are being written to an encrypted file, encrypt them
508*4882a593Smuzhiyun * into a bounce page. For simplicity, just encrypt until the last
509*4882a593Smuzhiyun * block which might be needed. This may cause some unneeded blocks
510*4882a593Smuzhiyun * (e.g. holes) to be unnecessarily encrypted, but this is rare and
511*4882a593Smuzhiyun * can't happen in the common case of blocksize == PAGE_SIZE.
512*4882a593Smuzhiyun */
513*4882a593Smuzhiyun if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) {
514*4882a593Smuzhiyun gfp_t gfp_flags = GFP_NOFS;
515*4882a593Smuzhiyun unsigned int enc_bytes = round_up(len, i_blocksize(inode));
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /*
518*4882a593Smuzhiyun * Since bounce page allocation uses a mempool, we can only use
519*4882a593Smuzhiyun * a waiting mask (i.e. request guaranteed allocation) on the
520*4882a593Smuzhiyun * first page of the bio. Otherwise it can deadlock.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun if (io->io_bio)
523*4882a593Smuzhiyun gfp_flags = GFP_NOWAIT | __GFP_NOWARN;
524*4882a593Smuzhiyun retry_encrypt:
525*4882a593Smuzhiyun bounce_page = fscrypt_encrypt_pagecache_blocks(page, enc_bytes,
526*4882a593Smuzhiyun 0, gfp_flags);
527*4882a593Smuzhiyun if (IS_ERR(bounce_page)) {
528*4882a593Smuzhiyun ret = PTR_ERR(bounce_page);
529*4882a593Smuzhiyun if (ret == -ENOMEM &&
530*4882a593Smuzhiyun (io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
531*4882a593Smuzhiyun gfp_flags = GFP_NOFS;
532*4882a593Smuzhiyun if (io->io_bio)
533*4882a593Smuzhiyun ext4_io_submit(io);
534*4882a593Smuzhiyun else
535*4882a593Smuzhiyun gfp_flags |= __GFP_NOFAIL;
536*4882a593Smuzhiyun congestion_wait(BLK_RW_ASYNC, HZ/50);
537*4882a593Smuzhiyun goto retry_encrypt;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
541*4882a593Smuzhiyun redirty_page_for_writepage(wbc, page);
542*4882a593Smuzhiyun do {
543*4882a593Smuzhiyun clear_buffer_async_write(bh);
544*4882a593Smuzhiyun bh = bh->b_this_page;
545*4882a593Smuzhiyun } while (bh != head);
546*4882a593Smuzhiyun goto unlock;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* Now submit buffers to write */
551*4882a593Smuzhiyun do {
552*4882a593Smuzhiyun if (!buffer_async_write(bh))
553*4882a593Smuzhiyun continue;
554*4882a593Smuzhiyun io_submit_add_bh(io, inode,
555*4882a593Smuzhiyun bounce_page ? bounce_page : page, bh);
556*4882a593Smuzhiyun nr_submitted++;
557*4882a593Smuzhiyun clear_buffer_dirty(bh);
558*4882a593Smuzhiyun } while ((bh = bh->b_this_page) != head);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun unlock:
561*4882a593Smuzhiyun unlock_page(page);
562*4882a593Smuzhiyun /* Nothing submitted - we have to end page writeback */
563*4882a593Smuzhiyun if (!nr_submitted)
564*4882a593Smuzhiyun end_page_writeback(page);
565*4882a593Smuzhiyun return ret;
566*4882a593Smuzhiyun }
567