xref: /OK3568_Linux_fs/kernel/fs/iomap/direct-io.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2010 Red Hat, Inc.
4*4882a593Smuzhiyun  * Copyright (c) 2016-2018 Christoph Hellwig.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/compiler.h>
8*4882a593Smuzhiyun #include <linux/fs.h>
9*4882a593Smuzhiyun #include <linux/fscrypt.h>
10*4882a593Smuzhiyun #include <linux/iomap.h>
11*4882a593Smuzhiyun #include <linux/backing-dev.h>
12*4882a593Smuzhiyun #include <linux/uio.h>
13*4882a593Smuzhiyun #include <linux/task_io_accounting_ops.h>
14*4882a593Smuzhiyun #include "trace.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "../internal.h"
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * Private flags for iomap_dio, must not overlap with the public ones in
20*4882a593Smuzhiyun  * iomap.h:
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #define IOMAP_DIO_WRITE_FUA	(1 << 28)
23*4882a593Smuzhiyun #define IOMAP_DIO_NEED_SYNC	(1 << 29)
24*4882a593Smuzhiyun #define IOMAP_DIO_WRITE		(1 << 30)
25*4882a593Smuzhiyun #define IOMAP_DIO_DIRTY		(1 << 31)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct iomap_dio {
28*4882a593Smuzhiyun 	struct kiocb		*iocb;
29*4882a593Smuzhiyun 	const struct iomap_dio_ops *dops;
30*4882a593Smuzhiyun 	loff_t			i_size;
31*4882a593Smuzhiyun 	loff_t			size;
32*4882a593Smuzhiyun 	atomic_t		ref;
33*4882a593Smuzhiyun 	unsigned		flags;
34*4882a593Smuzhiyun 	int			error;
35*4882a593Smuzhiyun 	bool			wait_for_completion;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	union {
38*4882a593Smuzhiyun 		/* used during submission and for synchronous completion: */
39*4882a593Smuzhiyun 		struct {
40*4882a593Smuzhiyun 			struct iov_iter		*iter;
41*4882a593Smuzhiyun 			struct task_struct	*waiter;
42*4882a593Smuzhiyun 			struct request_queue	*last_queue;
43*4882a593Smuzhiyun 			blk_qc_t		cookie;
44*4882a593Smuzhiyun 		} submit;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 		/* used for aio completion: */
47*4882a593Smuzhiyun 		struct {
48*4882a593Smuzhiyun 			struct work_struct	work;
49*4882a593Smuzhiyun 		} aio;
50*4882a593Smuzhiyun 	};
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
iomap_dio_iopoll(struct kiocb * kiocb,bool spin)53*4882a593Smuzhiyun int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct request_queue *q = READ_ONCE(kiocb->private);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (!q)
58*4882a593Smuzhiyun 		return 0;
59*4882a593Smuzhiyun 	return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
62*4882a593Smuzhiyun 
iomap_dio_submit_bio(struct iomap_dio * dio,struct iomap * iomap,struct bio * bio,loff_t pos)63*4882a593Smuzhiyun static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap *iomap,
64*4882a593Smuzhiyun 		struct bio *bio, loff_t pos)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	atomic_inc(&dio->ref);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (dio->iocb->ki_flags & IOCB_HIPRI)
69*4882a593Smuzhiyun 		bio_set_polled(bio, dio->iocb);
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	dio->submit.last_queue = bdev_get_queue(iomap->bdev);
72*4882a593Smuzhiyun 	if (dio->dops && dio->dops->submit_io)
73*4882a593Smuzhiyun 		dio->submit.cookie = dio->dops->submit_io(
74*4882a593Smuzhiyun 				file_inode(dio->iocb->ki_filp),
75*4882a593Smuzhiyun 				iomap, bio, pos);
76*4882a593Smuzhiyun 	else
77*4882a593Smuzhiyun 		dio->submit.cookie = submit_bio(bio);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
iomap_dio_complete(struct iomap_dio * dio)80*4882a593Smuzhiyun ssize_t iomap_dio_complete(struct iomap_dio *dio)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	const struct iomap_dio_ops *dops = dio->dops;
83*4882a593Smuzhiyun 	struct kiocb *iocb = dio->iocb;
84*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
85*4882a593Smuzhiyun 	loff_t offset = iocb->ki_pos;
86*4882a593Smuzhiyun 	ssize_t ret = dio->error;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (dops && dops->end_io)
89*4882a593Smuzhiyun 		ret = dops->end_io(iocb, dio->size, ret, dio->flags);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (likely(!ret)) {
92*4882a593Smuzhiyun 		ret = dio->size;
93*4882a593Smuzhiyun 		/* check for short read */
94*4882a593Smuzhiyun 		if (offset + ret > dio->i_size &&
95*4882a593Smuzhiyun 		    !(dio->flags & IOMAP_DIO_WRITE))
96*4882a593Smuzhiyun 			ret = dio->i_size - offset;
97*4882a593Smuzhiyun 		iocb->ki_pos += ret;
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/*
101*4882a593Smuzhiyun 	 * Try again to invalidate clean pages which might have been cached by
102*4882a593Smuzhiyun 	 * non-direct readahead, or faulted in by get_user_pages() if the source
103*4882a593Smuzhiyun 	 * of the write was an mmap'ed region of the file we're writing.  Either
104*4882a593Smuzhiyun 	 * one is a pretty crazy thing to do, so we don't support it 100%.  If
105*4882a593Smuzhiyun 	 * this invalidation fails, tough, the write still worked...
106*4882a593Smuzhiyun 	 *
107*4882a593Smuzhiyun 	 * And this page cache invalidation has to be after ->end_io(), as some
108*4882a593Smuzhiyun 	 * filesystems convert unwritten extents to real allocations in
109*4882a593Smuzhiyun 	 * ->end_io() when necessary, otherwise a racing buffer read would cache
110*4882a593Smuzhiyun 	 * zeros from unwritten extents.
111*4882a593Smuzhiyun 	 */
112*4882a593Smuzhiyun 	if (!dio->error && dio->size &&
113*4882a593Smuzhiyun 	    (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) {
114*4882a593Smuzhiyun 		int err;
115*4882a593Smuzhiyun 		err = invalidate_inode_pages2_range(inode->i_mapping,
116*4882a593Smuzhiyun 				offset >> PAGE_SHIFT,
117*4882a593Smuzhiyun 				(offset + dio->size - 1) >> PAGE_SHIFT);
118*4882a593Smuzhiyun 		if (err)
119*4882a593Smuzhiyun 			dio_warn_stale_pagecache(iocb->ki_filp);
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	inode_dio_end(file_inode(iocb->ki_filp));
123*4882a593Smuzhiyun 	/*
124*4882a593Smuzhiyun 	 * If this is a DSYNC write, make sure we push it to stable storage now
125*4882a593Smuzhiyun 	 * that we've written data.
126*4882a593Smuzhiyun 	 */
127*4882a593Smuzhiyun 	if (ret > 0 && (dio->flags & IOMAP_DIO_NEED_SYNC))
128*4882a593Smuzhiyun 		ret = generic_write_sync(iocb, ret);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	kfree(dio);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	return ret;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iomap_dio_complete);
135*4882a593Smuzhiyun 
iomap_dio_complete_work(struct work_struct * work)136*4882a593Smuzhiyun static void iomap_dio_complete_work(struct work_struct *work)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	struct iomap_dio *dio = container_of(work, struct iomap_dio, aio.work);
139*4882a593Smuzhiyun 	struct kiocb *iocb = dio->iocb;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	iocb->ki_complete(iocb, iomap_dio_complete(dio), 0);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * Set an error in the dio if none is set yet.  We have to use cmpxchg
146*4882a593Smuzhiyun  * as the submission context and the completion context(s) can race to
147*4882a593Smuzhiyun  * update the error.
148*4882a593Smuzhiyun  */
iomap_dio_set_error(struct iomap_dio * dio,int ret)149*4882a593Smuzhiyun static inline void iomap_dio_set_error(struct iomap_dio *dio, int ret)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	cmpxchg(&dio->error, 0, ret);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
iomap_dio_bio_end_io(struct bio * bio)154*4882a593Smuzhiyun static void iomap_dio_bio_end_io(struct bio *bio)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct iomap_dio *dio = bio->bi_private;
157*4882a593Smuzhiyun 	bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (bio->bi_status)
160*4882a593Smuzhiyun 		iomap_dio_set_error(dio, blk_status_to_errno(bio->bi_status));
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (atomic_dec_and_test(&dio->ref)) {
163*4882a593Smuzhiyun 		if (dio->wait_for_completion) {
164*4882a593Smuzhiyun 			struct task_struct *waiter = dio->submit.waiter;
165*4882a593Smuzhiyun 			WRITE_ONCE(dio->submit.waiter, NULL);
166*4882a593Smuzhiyun 			blk_wake_io_task(waiter);
167*4882a593Smuzhiyun 		} else if (dio->flags & IOMAP_DIO_WRITE) {
168*4882a593Smuzhiyun 			struct inode *inode = file_inode(dio->iocb->ki_filp);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 			INIT_WORK(&dio->aio.work, iomap_dio_complete_work);
171*4882a593Smuzhiyun 			queue_work(inode->i_sb->s_dio_done_wq, &dio->aio.work);
172*4882a593Smuzhiyun 		} else {
173*4882a593Smuzhiyun 			iomap_dio_complete_work(&dio->aio.work);
174*4882a593Smuzhiyun 		}
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (should_dirty) {
178*4882a593Smuzhiyun 		bio_check_pages_dirty(bio);
179*4882a593Smuzhiyun 	} else {
180*4882a593Smuzhiyun 		bio_release_pages(bio, false);
181*4882a593Smuzhiyun 		bio_put(bio);
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun static void
iomap_dio_zero(struct iomap_dio * dio,struct iomap * iomap,loff_t pos,unsigned len)186*4882a593Smuzhiyun iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
187*4882a593Smuzhiyun 		unsigned len)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	struct inode *inode = file_inode(dio->iocb->ki_filp);
190*4882a593Smuzhiyun 	struct page *page = ZERO_PAGE(0);
191*4882a593Smuzhiyun 	int flags = REQ_SYNC | REQ_IDLE;
192*4882a593Smuzhiyun 	struct bio *bio;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	bio = bio_alloc(GFP_KERNEL, 1);
195*4882a593Smuzhiyun 	fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
196*4882a593Smuzhiyun 				  GFP_KERNEL);
197*4882a593Smuzhiyun 	bio_set_dev(bio, iomap->bdev);
198*4882a593Smuzhiyun 	bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
199*4882a593Smuzhiyun 	bio->bi_private = dio;
200*4882a593Smuzhiyun 	bio->bi_end_io = iomap_dio_bio_end_io;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	get_page(page);
203*4882a593Smuzhiyun 	__bio_add_page(bio, page, len, 0);
204*4882a593Smuzhiyun 	bio_set_op_attrs(bio, REQ_OP_WRITE, flags);
205*4882a593Smuzhiyun 	iomap_dio_submit_bio(dio, iomap, bio, pos);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun static loff_t
iomap_dio_bio_actor(struct inode * inode,loff_t pos,loff_t length,struct iomap_dio * dio,struct iomap * iomap)209*4882a593Smuzhiyun iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length,
210*4882a593Smuzhiyun 		struct iomap_dio *dio, struct iomap *iomap)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	unsigned int blkbits = blksize_bits(bdev_logical_block_size(iomap->bdev));
213*4882a593Smuzhiyun 	unsigned int fs_block_size = i_blocksize(inode), pad;
214*4882a593Smuzhiyun 	unsigned int align = iov_iter_alignment(dio->submit.iter);
215*4882a593Smuzhiyun 	struct bio *bio;
216*4882a593Smuzhiyun 	bool need_zeroout = false;
217*4882a593Smuzhiyun 	bool use_fua = false;
218*4882a593Smuzhiyun 	int nr_pages, ret = 0;
219*4882a593Smuzhiyun 	size_t copied = 0;
220*4882a593Smuzhiyun 	size_t orig_count;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if ((pos | length | align) & ((1 << blkbits) - 1))
223*4882a593Smuzhiyun 		return -EINVAL;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (iomap->type == IOMAP_UNWRITTEN) {
226*4882a593Smuzhiyun 		dio->flags |= IOMAP_DIO_UNWRITTEN;
227*4882a593Smuzhiyun 		need_zeroout = true;
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (iomap->flags & IOMAP_F_SHARED)
231*4882a593Smuzhiyun 		dio->flags |= IOMAP_DIO_COW;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (iomap->flags & IOMAP_F_NEW) {
234*4882a593Smuzhiyun 		need_zeroout = true;
235*4882a593Smuzhiyun 	} else if (iomap->type == IOMAP_MAPPED) {
236*4882a593Smuzhiyun 		/*
237*4882a593Smuzhiyun 		 * Use a FUA write if we need datasync semantics, this is a pure
238*4882a593Smuzhiyun 		 * data IO that doesn't require any metadata updates (including
239*4882a593Smuzhiyun 		 * after IO completion such as unwritten extent conversion) and
240*4882a593Smuzhiyun 		 * the underlying device supports FUA. This allows us to avoid
241*4882a593Smuzhiyun 		 * cache flushes on IO completion.
242*4882a593Smuzhiyun 		 */
243*4882a593Smuzhiyun 		if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) &&
244*4882a593Smuzhiyun 		    (dio->flags & IOMAP_DIO_WRITE_FUA) &&
245*4882a593Smuzhiyun 		    blk_queue_fua(bdev_get_queue(iomap->bdev)))
246*4882a593Smuzhiyun 			use_fua = true;
247*4882a593Smuzhiyun 	}
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/*
250*4882a593Smuzhiyun 	 * Save the original count and trim the iter to just the extent we
251*4882a593Smuzhiyun 	 * are operating on right now.  The iter will be re-expanded once
252*4882a593Smuzhiyun 	 * we are done.
253*4882a593Smuzhiyun 	 */
254*4882a593Smuzhiyun 	orig_count = iov_iter_count(dio->submit.iter);
255*4882a593Smuzhiyun 	iov_iter_truncate(dio->submit.iter, length);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
258*4882a593Smuzhiyun 	if (nr_pages <= 0) {
259*4882a593Smuzhiyun 		ret = nr_pages;
260*4882a593Smuzhiyun 		goto out;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (need_zeroout) {
264*4882a593Smuzhiyun 		/* zero out from the start of the block to the write offset */
265*4882a593Smuzhiyun 		pad = pos & (fs_block_size - 1);
266*4882a593Smuzhiyun 		if (pad)
267*4882a593Smuzhiyun 			iomap_dio_zero(dio, iomap, pos - pad, pad);
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	do {
271*4882a593Smuzhiyun 		size_t n;
272*4882a593Smuzhiyun 		if (dio->error) {
273*4882a593Smuzhiyun 			iov_iter_revert(dio->submit.iter, copied);
274*4882a593Smuzhiyun 			copied = ret = 0;
275*4882a593Smuzhiyun 			goto out;
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		bio = bio_alloc(GFP_KERNEL, nr_pages);
279*4882a593Smuzhiyun 		fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
280*4882a593Smuzhiyun 					  GFP_KERNEL);
281*4882a593Smuzhiyun 		bio_set_dev(bio, iomap->bdev);
282*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
283*4882a593Smuzhiyun 		bio->bi_write_hint = dio->iocb->ki_hint;
284*4882a593Smuzhiyun 		bio->bi_ioprio = dio->iocb->ki_ioprio;
285*4882a593Smuzhiyun 		bio->bi_private = dio;
286*4882a593Smuzhiyun 		bio->bi_end_io = iomap_dio_bio_end_io;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		ret = bio_iov_iter_get_pages(bio, dio->submit.iter);
289*4882a593Smuzhiyun 		if (unlikely(ret)) {
290*4882a593Smuzhiyun 			/*
291*4882a593Smuzhiyun 			 * We have to stop part way through an IO. We must fall
292*4882a593Smuzhiyun 			 * through to the sub-block tail zeroing here, otherwise
293*4882a593Smuzhiyun 			 * this short IO may expose stale data in the tail of
294*4882a593Smuzhiyun 			 * the block we haven't written data to.
295*4882a593Smuzhiyun 			 */
296*4882a593Smuzhiyun 			bio_put(bio);
297*4882a593Smuzhiyun 			goto zero_tail;
298*4882a593Smuzhiyun 		}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		n = bio->bi_iter.bi_size;
301*4882a593Smuzhiyun 		if (dio->flags & IOMAP_DIO_WRITE) {
302*4882a593Smuzhiyun 			bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
303*4882a593Smuzhiyun 			if (use_fua)
304*4882a593Smuzhiyun 				bio->bi_opf |= REQ_FUA;
305*4882a593Smuzhiyun 			else
306*4882a593Smuzhiyun 				dio->flags &= ~IOMAP_DIO_WRITE_FUA;
307*4882a593Smuzhiyun 			task_io_account_write(n);
308*4882a593Smuzhiyun 		} else {
309*4882a593Smuzhiyun 			bio->bi_opf = REQ_OP_READ;
310*4882a593Smuzhiyun 			if (dio->flags & IOMAP_DIO_DIRTY)
311*4882a593Smuzhiyun 				bio_set_pages_dirty(bio);
312*4882a593Smuzhiyun 		}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		dio->size += n;
315*4882a593Smuzhiyun 		copied += n;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
318*4882a593Smuzhiyun 		iomap_dio_submit_bio(dio, iomap, bio, pos);
319*4882a593Smuzhiyun 		pos += n;
320*4882a593Smuzhiyun 	} while (nr_pages);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/*
323*4882a593Smuzhiyun 	 * We need to zeroout the tail of a sub-block write if the extent type
324*4882a593Smuzhiyun 	 * requires zeroing or the write extends beyond EOF. If we don't zero
325*4882a593Smuzhiyun 	 * the block tail in the latter case, we can expose stale data via mmap
326*4882a593Smuzhiyun 	 * reads of the EOF block.
327*4882a593Smuzhiyun 	 */
328*4882a593Smuzhiyun zero_tail:
329*4882a593Smuzhiyun 	if (need_zeroout ||
330*4882a593Smuzhiyun 	    ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) {
331*4882a593Smuzhiyun 		/* zero out from the end of the write to the end of the block */
332*4882a593Smuzhiyun 		pad = pos & (fs_block_size - 1);
333*4882a593Smuzhiyun 		if (pad)
334*4882a593Smuzhiyun 			iomap_dio_zero(dio, iomap, pos, fs_block_size - pad);
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun out:
337*4882a593Smuzhiyun 	/* Undo iter limitation to current extent */
338*4882a593Smuzhiyun 	iov_iter_reexpand(dio->submit.iter, orig_count - copied);
339*4882a593Smuzhiyun 	if (copied)
340*4882a593Smuzhiyun 		return copied;
341*4882a593Smuzhiyun 	return ret;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun static loff_t
iomap_dio_hole_actor(loff_t length,struct iomap_dio * dio)345*4882a593Smuzhiyun iomap_dio_hole_actor(loff_t length, struct iomap_dio *dio)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	length = iov_iter_zero(length, dio->submit.iter);
348*4882a593Smuzhiyun 	dio->size += length;
349*4882a593Smuzhiyun 	return length;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun static loff_t
iomap_dio_inline_actor(struct inode * inode,loff_t pos,loff_t length,struct iomap_dio * dio,struct iomap * iomap)353*4882a593Smuzhiyun iomap_dio_inline_actor(struct inode *inode, loff_t pos, loff_t length,
354*4882a593Smuzhiyun 		struct iomap_dio *dio, struct iomap *iomap)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct iov_iter *iter = dio->submit.iter;
357*4882a593Smuzhiyun 	size_t copied;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	BUG_ON(pos + length > PAGE_SIZE - offset_in_page(iomap->inline_data));
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (dio->flags & IOMAP_DIO_WRITE) {
362*4882a593Smuzhiyun 		loff_t size = inode->i_size;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 		if (pos > size)
365*4882a593Smuzhiyun 			memset(iomap->inline_data + size, 0, pos - size);
366*4882a593Smuzhiyun 		copied = copy_from_iter(iomap->inline_data + pos, length, iter);
367*4882a593Smuzhiyun 		if (copied) {
368*4882a593Smuzhiyun 			if (pos + copied > size)
369*4882a593Smuzhiyun 				i_size_write(inode, pos + copied);
370*4882a593Smuzhiyun 			mark_inode_dirty(inode);
371*4882a593Smuzhiyun 		}
372*4882a593Smuzhiyun 	} else {
373*4882a593Smuzhiyun 		copied = copy_to_iter(iomap->inline_data + pos, length, iter);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 	dio->size += copied;
376*4882a593Smuzhiyun 	return copied;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun static loff_t
iomap_dio_actor(struct inode * inode,loff_t pos,loff_t length,void * data,struct iomap * iomap,struct iomap * srcmap)380*4882a593Smuzhiyun iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
381*4882a593Smuzhiyun 		void *data, struct iomap *iomap, struct iomap *srcmap)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	struct iomap_dio *dio = data;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	switch (iomap->type) {
386*4882a593Smuzhiyun 	case IOMAP_HOLE:
387*4882a593Smuzhiyun 		if (WARN_ON_ONCE(dio->flags & IOMAP_DIO_WRITE))
388*4882a593Smuzhiyun 			return -EIO;
389*4882a593Smuzhiyun 		return iomap_dio_hole_actor(length, dio);
390*4882a593Smuzhiyun 	case IOMAP_UNWRITTEN:
391*4882a593Smuzhiyun 		if (!(dio->flags & IOMAP_DIO_WRITE))
392*4882a593Smuzhiyun 			return iomap_dio_hole_actor(length, dio);
393*4882a593Smuzhiyun 		return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
394*4882a593Smuzhiyun 	case IOMAP_MAPPED:
395*4882a593Smuzhiyun 		return iomap_dio_bio_actor(inode, pos, length, dio, iomap);
396*4882a593Smuzhiyun 	case IOMAP_INLINE:
397*4882a593Smuzhiyun 		return iomap_dio_inline_actor(inode, pos, length, dio, iomap);
398*4882a593Smuzhiyun 	case IOMAP_DELALLOC:
399*4882a593Smuzhiyun 		/*
400*4882a593Smuzhiyun 		 * DIO is not serialised against mmap() access at all, and so
401*4882a593Smuzhiyun 		 * if the page_mkwrite occurs between the writeback and the
402*4882a593Smuzhiyun 		 * iomap_apply() call in the DIO path, then it will see the
403*4882a593Smuzhiyun 		 * DELALLOC block that the page-mkwrite allocated.
404*4882a593Smuzhiyun 		 */
405*4882a593Smuzhiyun 		pr_warn_ratelimited("Direct I/O collision with buffered writes! File: %pD4 Comm: %.20s\n",
406*4882a593Smuzhiyun 				    dio->iocb->ki_filp, current->comm);
407*4882a593Smuzhiyun 		return -EIO;
408*4882a593Smuzhiyun 	default:
409*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
410*4882a593Smuzhiyun 		return -EIO;
411*4882a593Smuzhiyun 	}
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun  * iomap_dio_rw() always completes O_[D]SYNC writes regardless of whether the IO
416*4882a593Smuzhiyun  * is being issued as AIO or not.  This allows us to optimise pure data writes
417*4882a593Smuzhiyun  * to use REQ_FUA rather than requiring generic_write_sync() to issue a
418*4882a593Smuzhiyun  * REQ_FLUSH post write. This is slightly tricky because a single request here
419*4882a593Smuzhiyun  * can be mapped into multiple disjoint IOs and only a subset of the IOs issued
420*4882a593Smuzhiyun  * may be pure data writes. In that case, we still need to do a full data sync
421*4882a593Smuzhiyun  * completion.
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * Returns -ENOTBLK In case of a page invalidation invalidation failure for
424*4882a593Smuzhiyun  * writes.  The callers needs to fall back to buffered I/O in this case.
425*4882a593Smuzhiyun  */
426*4882a593Smuzhiyun struct iomap_dio *
__iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,const struct iomap_dio_ops * dops,bool wait_for_completion)427*4882a593Smuzhiyun __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
428*4882a593Smuzhiyun 		const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
429*4882a593Smuzhiyun 		bool wait_for_completion)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct address_space *mapping = iocb->ki_filp->f_mapping;
432*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
433*4882a593Smuzhiyun 	size_t count = iov_iter_count(iter);
434*4882a593Smuzhiyun 	loff_t pos = iocb->ki_pos;
435*4882a593Smuzhiyun 	loff_t end = iocb->ki_pos + count - 1, ret = 0;
436*4882a593Smuzhiyun 	unsigned int flags = IOMAP_DIRECT;
437*4882a593Smuzhiyun 	struct blk_plug plug;
438*4882a593Smuzhiyun 	struct iomap_dio *dio;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (!count)
441*4882a593Smuzhiyun 		return NULL;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (WARN_ON(is_sync_kiocb(iocb) && !wait_for_completion))
444*4882a593Smuzhiyun 		return ERR_PTR(-EIO);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	dio = kmalloc(sizeof(*dio), GFP_KERNEL);
447*4882a593Smuzhiyun 	if (!dio)
448*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	dio->iocb = iocb;
451*4882a593Smuzhiyun 	atomic_set(&dio->ref, 1);
452*4882a593Smuzhiyun 	dio->size = 0;
453*4882a593Smuzhiyun 	dio->i_size = i_size_read(inode);
454*4882a593Smuzhiyun 	dio->dops = dops;
455*4882a593Smuzhiyun 	dio->error = 0;
456*4882a593Smuzhiyun 	dio->flags = 0;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	dio->submit.iter = iter;
459*4882a593Smuzhiyun 	dio->submit.waiter = current;
460*4882a593Smuzhiyun 	dio->submit.cookie = BLK_QC_T_NONE;
461*4882a593Smuzhiyun 	dio->submit.last_queue = NULL;
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (iov_iter_rw(iter) == READ) {
464*4882a593Smuzhiyun 		if (pos >= dio->i_size)
465*4882a593Smuzhiyun 			goto out_free_dio;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		if (iter_is_iovec(iter))
468*4882a593Smuzhiyun 			dio->flags |= IOMAP_DIO_DIRTY;
469*4882a593Smuzhiyun 	} else {
470*4882a593Smuzhiyun 		flags |= IOMAP_WRITE;
471*4882a593Smuzhiyun 		dio->flags |= IOMAP_DIO_WRITE;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 		/* for data sync or sync, we need sync completion processing */
474*4882a593Smuzhiyun 		if (iocb->ki_flags & IOCB_DSYNC)
475*4882a593Smuzhiyun 			dio->flags |= IOMAP_DIO_NEED_SYNC;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 		/*
478*4882a593Smuzhiyun 		 * For datasync only writes, we optimistically try using FUA for
479*4882a593Smuzhiyun 		 * this IO.  Any non-FUA write that occurs will clear this flag,
480*4882a593Smuzhiyun 		 * hence we know before completion whether a cache flush is
481*4882a593Smuzhiyun 		 * necessary.
482*4882a593Smuzhiyun 		 */
483*4882a593Smuzhiyun 		if ((iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC)) == IOCB_DSYNC)
484*4882a593Smuzhiyun 			dio->flags |= IOMAP_DIO_WRITE_FUA;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
488*4882a593Smuzhiyun 		if (filemap_range_has_page(mapping, pos, end)) {
489*4882a593Smuzhiyun 			ret = -EAGAIN;
490*4882a593Smuzhiyun 			goto out_free_dio;
491*4882a593Smuzhiyun 		}
492*4882a593Smuzhiyun 		flags |= IOMAP_NOWAIT;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	ret = filemap_write_and_wait_range(mapping, pos, end);
496*4882a593Smuzhiyun 	if (ret)
497*4882a593Smuzhiyun 		goto out_free_dio;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	if (iov_iter_rw(iter) == WRITE) {
500*4882a593Smuzhiyun 		/*
501*4882a593Smuzhiyun 		 * Try to invalidate cache pages for the range we are writing.
502*4882a593Smuzhiyun 		 * If this invalidation fails, let the caller fall back to
503*4882a593Smuzhiyun 		 * buffered I/O.
504*4882a593Smuzhiyun 		 */
505*4882a593Smuzhiyun 		if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
506*4882a593Smuzhiyun 				end >> PAGE_SHIFT)) {
507*4882a593Smuzhiyun 			trace_iomap_dio_invalidate_fail(inode, pos, count);
508*4882a593Smuzhiyun 			ret = -ENOTBLK;
509*4882a593Smuzhiyun 			goto out_free_dio;
510*4882a593Smuzhiyun 		}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 		if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) {
513*4882a593Smuzhiyun 			ret = sb_init_dio_done_wq(inode->i_sb);
514*4882a593Smuzhiyun 			if (ret < 0)
515*4882a593Smuzhiyun 				goto out_free_dio;
516*4882a593Smuzhiyun 		}
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	inode_dio_begin(inode);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	blk_start_plug(&plug);
522*4882a593Smuzhiyun 	do {
523*4882a593Smuzhiyun 		ret = iomap_apply(inode, pos, count, flags, ops, dio,
524*4882a593Smuzhiyun 				iomap_dio_actor);
525*4882a593Smuzhiyun 		if (ret <= 0) {
526*4882a593Smuzhiyun 			/* magic error code to fall back to buffered I/O */
527*4882a593Smuzhiyun 			if (ret == -ENOTBLK) {
528*4882a593Smuzhiyun 				wait_for_completion = true;
529*4882a593Smuzhiyun 				ret = 0;
530*4882a593Smuzhiyun 			}
531*4882a593Smuzhiyun 			break;
532*4882a593Smuzhiyun 		}
533*4882a593Smuzhiyun 		pos += ret;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 		if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
536*4882a593Smuzhiyun 			/*
537*4882a593Smuzhiyun 			 * We only report that we've read data up to i_size.
538*4882a593Smuzhiyun 			 * Revert iter to a state corresponding to that as
539*4882a593Smuzhiyun 			 * some callers (such as splice code) rely on it.
540*4882a593Smuzhiyun 			 */
541*4882a593Smuzhiyun 			iov_iter_revert(iter, pos - dio->i_size);
542*4882a593Smuzhiyun 			break;
543*4882a593Smuzhiyun 		}
544*4882a593Smuzhiyun 	} while ((count = iov_iter_count(iter)) > 0);
545*4882a593Smuzhiyun 	blk_finish_plug(&plug);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (ret < 0)
548*4882a593Smuzhiyun 		iomap_dio_set_error(dio, ret);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/*
551*4882a593Smuzhiyun 	 * If all the writes we issued were FUA, we don't need to flush the
552*4882a593Smuzhiyun 	 * cache on IO completion. Clear the sync flag for this case.
553*4882a593Smuzhiyun 	 */
554*4882a593Smuzhiyun 	if (dio->flags & IOMAP_DIO_WRITE_FUA)
555*4882a593Smuzhiyun 		dio->flags &= ~IOMAP_DIO_NEED_SYNC;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	WRITE_ONCE(iocb->ki_cookie, dio->submit.cookie);
558*4882a593Smuzhiyun 	WRITE_ONCE(iocb->private, dio->submit.last_queue);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	/*
561*4882a593Smuzhiyun 	 * We are about to drop our additional submission reference, which
562*4882a593Smuzhiyun 	 * might be the last reference to the dio.  There are three different
563*4882a593Smuzhiyun 	 * ways we can progress here:
564*4882a593Smuzhiyun 	 *
565*4882a593Smuzhiyun 	 *  (a) If this is the last reference we will always complete and free
566*4882a593Smuzhiyun 	 *	the dio ourselves.
567*4882a593Smuzhiyun 	 *  (b) If this is not the last reference, and we serve an asynchronous
568*4882a593Smuzhiyun 	 *	iocb, we must never touch the dio after the decrement, the
569*4882a593Smuzhiyun 	 *	I/O completion handler will complete and free it.
570*4882a593Smuzhiyun 	 *  (c) If this is not the last reference, but we serve a synchronous
571*4882a593Smuzhiyun 	 *	iocb, the I/O completion handler will wake us up on the drop
572*4882a593Smuzhiyun 	 *	of the final reference, and we will complete and free it here
573*4882a593Smuzhiyun 	 *	after we got woken by the I/O completion handler.
574*4882a593Smuzhiyun 	 */
575*4882a593Smuzhiyun 	dio->wait_for_completion = wait_for_completion;
576*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&dio->ref)) {
577*4882a593Smuzhiyun 		if (!wait_for_completion)
578*4882a593Smuzhiyun 			return ERR_PTR(-EIOCBQUEUED);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 		for (;;) {
581*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
582*4882a593Smuzhiyun 			if (!READ_ONCE(dio->submit.waiter))
583*4882a593Smuzhiyun 				break;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 			if (!(iocb->ki_flags & IOCB_HIPRI) ||
586*4882a593Smuzhiyun 			    !dio->submit.last_queue ||
587*4882a593Smuzhiyun 			    !blk_poll(dio->submit.last_queue,
588*4882a593Smuzhiyun 					 dio->submit.cookie, true))
589*4882a593Smuzhiyun 				blk_io_schedule();
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 		__set_current_state(TASK_RUNNING);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	return dio;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun out_free_dio:
597*4882a593Smuzhiyun 	kfree(dio);
598*4882a593Smuzhiyun 	if (ret)
599*4882a593Smuzhiyun 		return ERR_PTR(ret);
600*4882a593Smuzhiyun 	return NULL;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__iomap_dio_rw);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun ssize_t
iomap_dio_rw(struct kiocb * iocb,struct iov_iter * iter,const struct iomap_ops * ops,const struct iomap_dio_ops * dops,bool wait_for_completion)605*4882a593Smuzhiyun iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
606*4882a593Smuzhiyun 		const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
607*4882a593Smuzhiyun 		bool wait_for_completion)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct iomap_dio *dio;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	dio = __iomap_dio_rw(iocb, iter, ops, dops, wait_for_completion);
612*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(dio))
613*4882a593Smuzhiyun 		return PTR_ERR_OR_ZERO(dio);
614*4882a593Smuzhiyun 	return iomap_dio_complete(dio);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(iomap_dio_rw);
617