xref: /OK3568_Linux_fs/kernel/fs/ext4/readpage.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * linux/fs/ext4/readpage.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2002, Linus Torvalds.
6*4882a593Smuzhiyun  * Copyright (C) 2015, Google, Inc.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This was originally taken from fs/mpage.c
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * The ext4_mpage_readpages() function here is intended to
11*4882a593Smuzhiyun  * replace mpage_readahead() in the general case, not just for
12*4882a593Smuzhiyun  * encrypted files.  It has some limitations (see below), where it
13*4882a593Smuzhiyun  * will fall back to read_block_full_page(), but these limitations
14*4882a593Smuzhiyun  * should only be hit when page_size != block_size.
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * This will allow us to attach a callback function to support ext4
17*4882a593Smuzhiyun  * encryption.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * If anything unusual happens, such as:
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * - encountering a page which has buffers
22*4882a593Smuzhiyun  * - encountering a page which has a non-hole after a hole
23*4882a593Smuzhiyun  * - encountering a page with non-contiguous blocks
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * then this code just gives up and calls the buffer_head-based read function.
26*4882a593Smuzhiyun  * It does handle a page which has holes at the end - that is a common case:
27*4882a593Smuzhiyun  * the end-of-file on blocksize < PAGE_SIZE setups.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/kernel.h>
32*4882a593Smuzhiyun #include <linux/export.h>
33*4882a593Smuzhiyun #include <linux/mm.h>
34*4882a593Smuzhiyun #include <linux/kdev_t.h>
35*4882a593Smuzhiyun #include <linux/gfp.h>
36*4882a593Smuzhiyun #include <linux/bio.h>
37*4882a593Smuzhiyun #include <linux/fs.h>
38*4882a593Smuzhiyun #include <linux/buffer_head.h>
39*4882a593Smuzhiyun #include <linux/blkdev.h>
40*4882a593Smuzhiyun #include <linux/highmem.h>
41*4882a593Smuzhiyun #include <linux/prefetch.h>
42*4882a593Smuzhiyun #include <linux/mpage.h>
43*4882a593Smuzhiyun #include <linux/writeback.h>
44*4882a593Smuzhiyun #include <linux/backing-dev.h>
45*4882a593Smuzhiyun #include <linux/pagevec.h>
46*4882a593Smuzhiyun #include <linux/cleancache.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include "ext4.h"
49*4882a593Smuzhiyun #include <trace/events/android_fs.h>
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define NUM_PREALLOC_POST_READ_CTXS	128
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static struct kmem_cache *bio_post_read_ctx_cache;
54*4882a593Smuzhiyun static mempool_t *bio_post_read_ctx_pool;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* postprocessing steps for read bios */
57*4882a593Smuzhiyun enum bio_post_read_step {
58*4882a593Smuzhiyun 	STEP_INITIAL = 0,
59*4882a593Smuzhiyun 	STEP_DECRYPT,
60*4882a593Smuzhiyun 	STEP_VERITY,
61*4882a593Smuzhiyun 	STEP_MAX,
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct bio_post_read_ctx {
65*4882a593Smuzhiyun 	struct bio *bio;
66*4882a593Smuzhiyun 	struct work_struct work;
67*4882a593Smuzhiyun 	unsigned int cur_step;
68*4882a593Smuzhiyun 	unsigned int enabled_steps;
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun 
__read_end_io(struct bio * bio)71*4882a593Smuzhiyun static void __read_end_io(struct bio *bio)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct page *page;
74*4882a593Smuzhiyun 	struct bio_vec *bv;
75*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	bio_for_each_segment_all(bv, bio, iter_all) {
78*4882a593Smuzhiyun 		page = bv->bv_page;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 		/* PG_error was set if any post_read step failed */
81*4882a593Smuzhiyun 		if (bio->bi_status || PageError(page)) {
82*4882a593Smuzhiyun 			ClearPageUptodate(page);
83*4882a593Smuzhiyun 			/* will re-read again later */
84*4882a593Smuzhiyun 			ClearPageError(page);
85*4882a593Smuzhiyun 		} else {
86*4882a593Smuzhiyun 			SetPageUptodate(page);
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 		unlock_page(page);
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun 	if (bio->bi_private)
91*4882a593Smuzhiyun 		mempool_free(bio->bi_private, bio_post_read_ctx_pool);
92*4882a593Smuzhiyun 	bio_put(bio);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
96*4882a593Smuzhiyun 
decrypt_work(struct work_struct * work)97*4882a593Smuzhiyun static void decrypt_work(struct work_struct *work)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	struct bio_post_read_ctx *ctx =
100*4882a593Smuzhiyun 		container_of(work, struct bio_post_read_ctx, work);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	fscrypt_decrypt_bio(ctx->bio);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	bio_post_read_processing(ctx);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
verity_work(struct work_struct * work)107*4882a593Smuzhiyun static void verity_work(struct work_struct *work)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct bio_post_read_ctx *ctx =
110*4882a593Smuzhiyun 		container_of(work, struct bio_post_read_ctx, work);
111*4882a593Smuzhiyun 	struct bio *bio = ctx->bio;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/*
114*4882a593Smuzhiyun 	 * fsverity_verify_bio() may call readpages() again, and although verity
115*4882a593Smuzhiyun 	 * will be disabled for that, decryption may still be needed, causing
116*4882a593Smuzhiyun 	 * another bio_post_read_ctx to be allocated.  So to guarantee that
117*4882a593Smuzhiyun 	 * mempool_alloc() never deadlocks we must free the current ctx first.
118*4882a593Smuzhiyun 	 * This is safe because verity is the last post-read step.
119*4882a593Smuzhiyun 	 */
120*4882a593Smuzhiyun 	BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
121*4882a593Smuzhiyun 	mempool_free(ctx, bio_post_read_ctx_pool);
122*4882a593Smuzhiyun 	bio->bi_private = NULL;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	fsverity_verify_bio(bio);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	__read_end_io(bio);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
bio_post_read_processing(struct bio_post_read_ctx * ctx)129*4882a593Smuzhiyun static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	/*
132*4882a593Smuzhiyun 	 * We use different work queues for decryption and for verity because
133*4882a593Smuzhiyun 	 * verity may require reading metadata pages that need decryption, and
134*4882a593Smuzhiyun 	 * we shouldn't recurse to the same workqueue.
135*4882a593Smuzhiyun 	 */
136*4882a593Smuzhiyun 	switch (++ctx->cur_step) {
137*4882a593Smuzhiyun 	case STEP_DECRYPT:
138*4882a593Smuzhiyun 		if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
139*4882a593Smuzhiyun 			INIT_WORK(&ctx->work, decrypt_work);
140*4882a593Smuzhiyun 			fscrypt_enqueue_decrypt_work(&ctx->work);
141*4882a593Smuzhiyun 			return;
142*4882a593Smuzhiyun 		}
143*4882a593Smuzhiyun 		ctx->cur_step++;
144*4882a593Smuzhiyun 		fallthrough;
145*4882a593Smuzhiyun 	case STEP_VERITY:
146*4882a593Smuzhiyun 		if (ctx->enabled_steps & (1 << STEP_VERITY)) {
147*4882a593Smuzhiyun 			INIT_WORK(&ctx->work, verity_work);
148*4882a593Smuzhiyun 			fsverity_enqueue_verify_work(&ctx->work);
149*4882a593Smuzhiyun 			return;
150*4882a593Smuzhiyun 		}
151*4882a593Smuzhiyun 		ctx->cur_step++;
152*4882a593Smuzhiyun 		fallthrough;
153*4882a593Smuzhiyun 	default:
154*4882a593Smuzhiyun 		__read_end_io(ctx->bio);
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
bio_post_read_required(struct bio * bio)158*4882a593Smuzhiyun static bool bio_post_read_required(struct bio *bio)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	return bio->bi_private && !bio->bi_status;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun static void
ext4_trace_read_completion(struct bio * bio)164*4882a593Smuzhiyun ext4_trace_read_completion(struct bio *bio)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct page *first_page = bio->bi_io_vec[0].bv_page;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (first_page != NULL)
169*4882a593Smuzhiyun 		trace_android_fs_dataread_end(first_page->mapping->host,
170*4882a593Smuzhiyun 					      page_offset(first_page),
171*4882a593Smuzhiyun 					      bio->bi_iter.bi_size);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /*
175*4882a593Smuzhiyun  * I/O completion handler for multipage BIOs.
176*4882a593Smuzhiyun  *
177*4882a593Smuzhiyun  * The mpage code never puts partial pages into a BIO (except for end-of-file).
178*4882a593Smuzhiyun  * If a page does not map to a contiguous run of blocks then it simply falls
179*4882a593Smuzhiyun  * back to block_read_full_page().
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  * Why is this?  If a page's completion depends on a number of different BIOs
182*4882a593Smuzhiyun  * which can complete in any order (or at the same time) then determining the
183*4882a593Smuzhiyun  * status of that page is hard.  See end_buffer_async_read() for the details.
184*4882a593Smuzhiyun  * There is no point in duplicating all that complexity.
185*4882a593Smuzhiyun  */
mpage_end_io(struct bio * bio)186*4882a593Smuzhiyun static void mpage_end_io(struct bio *bio)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	if (trace_android_fs_dataread_start_enabled())
189*4882a593Smuzhiyun 		ext4_trace_read_completion(bio);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	if (bio_post_read_required(bio)) {
192*4882a593Smuzhiyun 		struct bio_post_read_ctx *ctx = bio->bi_private;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		ctx->cur_step = STEP_INITIAL;
195*4882a593Smuzhiyun 		bio_post_read_processing(ctx);
196*4882a593Smuzhiyun 		return;
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 	__read_end_io(bio);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
ext4_need_verity(const struct inode * inode,pgoff_t idx)201*4882a593Smuzhiyun static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	return fsverity_active(inode) &&
204*4882a593Smuzhiyun 	       idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
ext4_set_bio_post_read_ctx(struct bio * bio,const struct inode * inode,pgoff_t first_idx)207*4882a593Smuzhiyun static void ext4_set_bio_post_read_ctx(struct bio *bio,
208*4882a593Smuzhiyun 				       const struct inode *inode,
209*4882a593Smuzhiyun 				       pgoff_t first_idx)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	unsigned int post_read_steps = 0;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (fscrypt_inode_uses_fs_layer_crypto(inode))
214*4882a593Smuzhiyun 		post_read_steps |= 1 << STEP_DECRYPT;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (ext4_need_verity(inode, first_idx))
217*4882a593Smuzhiyun 		post_read_steps |= 1 << STEP_VERITY;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (post_read_steps) {
220*4882a593Smuzhiyun 		/* Due to the mempool, this never fails. */
221*4882a593Smuzhiyun 		struct bio_post_read_ctx *ctx =
222*4882a593Smuzhiyun 			mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 		ctx->bio = bio;
225*4882a593Smuzhiyun 		ctx->enabled_steps = post_read_steps;
226*4882a593Smuzhiyun 		bio->bi_private = ctx;
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
ext4_readpage_limit(struct inode * inode)230*4882a593Smuzhiyun static inline loff_t ext4_readpage_limit(struct inode *inode)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_FS_VERITY) &&
233*4882a593Smuzhiyun 	    (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
234*4882a593Smuzhiyun 		return inode->i_sb->s_maxbytes;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	return i_size_read(inode);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun static void
ext4_submit_bio_read(struct bio * bio)240*4882a593Smuzhiyun ext4_submit_bio_read(struct bio *bio)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	if (trace_android_fs_dataread_start_enabled()) {
243*4882a593Smuzhiyun 		struct page *first_page = bio->bi_io_vec[0].bv_page;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		if (first_page != NULL) {
246*4882a593Smuzhiyun 			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 			path = android_fstrace_get_pathname(pathbuf,
249*4882a593Smuzhiyun 						    MAX_TRACE_PATHBUF_LEN,
250*4882a593Smuzhiyun 						    first_page->mapping->host);
251*4882a593Smuzhiyun 			trace_android_fs_dataread_start(
252*4882a593Smuzhiyun 				first_page->mapping->host,
253*4882a593Smuzhiyun 				page_offset(first_page),
254*4882a593Smuzhiyun 				bio->bi_iter.bi_size,
255*4882a593Smuzhiyun 				current->pid,
256*4882a593Smuzhiyun 				path,
257*4882a593Smuzhiyun 				current->comm);
258*4882a593Smuzhiyun 		}
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	submit_bio(bio);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
ext4_mpage_readpages(struct inode * inode,struct readahead_control * rac,struct page * page)263*4882a593Smuzhiyun int ext4_mpage_readpages(struct inode *inode,
264*4882a593Smuzhiyun 		struct readahead_control *rac, struct page *page)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	struct bio *bio = NULL;
267*4882a593Smuzhiyun 	sector_t last_block_in_bio = 0;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	const unsigned blkbits = inode->i_blkbits;
270*4882a593Smuzhiyun 	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
271*4882a593Smuzhiyun 	const unsigned blocksize = 1 << blkbits;
272*4882a593Smuzhiyun 	sector_t next_block;
273*4882a593Smuzhiyun 	sector_t block_in_file;
274*4882a593Smuzhiyun 	sector_t last_block;
275*4882a593Smuzhiyun 	sector_t last_block_in_file;
276*4882a593Smuzhiyun 	sector_t blocks[MAX_BUF_PER_PAGE];
277*4882a593Smuzhiyun 	unsigned page_block;
278*4882a593Smuzhiyun 	struct block_device *bdev = inode->i_sb->s_bdev;
279*4882a593Smuzhiyun 	int length;
280*4882a593Smuzhiyun 	unsigned relative_block = 0;
281*4882a593Smuzhiyun 	struct ext4_map_blocks map;
282*4882a593Smuzhiyun 	unsigned int nr_pages = rac ? readahead_count(rac) : 1;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	map.m_pblk = 0;
285*4882a593Smuzhiyun 	map.m_lblk = 0;
286*4882a593Smuzhiyun 	map.m_len = 0;
287*4882a593Smuzhiyun 	map.m_flags = 0;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	for (; nr_pages; nr_pages--) {
290*4882a593Smuzhiyun 		int fully_mapped = 1;
291*4882a593Smuzhiyun 		unsigned first_hole = blocks_per_page;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 		if (rac) {
294*4882a593Smuzhiyun 			page = readahead_page(rac);
295*4882a593Smuzhiyun 			prefetchw(&page->flags);
296*4882a593Smuzhiyun 		}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		if (page_has_buffers(page))
299*4882a593Smuzhiyun 			goto confused;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		block_in_file = next_block =
302*4882a593Smuzhiyun 			(sector_t)page->index << (PAGE_SHIFT - blkbits);
303*4882a593Smuzhiyun 		last_block = block_in_file + nr_pages * blocks_per_page;
304*4882a593Smuzhiyun 		last_block_in_file = (ext4_readpage_limit(inode) +
305*4882a593Smuzhiyun 				      blocksize - 1) >> blkbits;
306*4882a593Smuzhiyun 		if (last_block > last_block_in_file)
307*4882a593Smuzhiyun 			last_block = last_block_in_file;
308*4882a593Smuzhiyun 		page_block = 0;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 		/*
311*4882a593Smuzhiyun 		 * Map blocks using the previous result first.
312*4882a593Smuzhiyun 		 */
313*4882a593Smuzhiyun 		if ((map.m_flags & EXT4_MAP_MAPPED) &&
314*4882a593Smuzhiyun 		    block_in_file > map.m_lblk &&
315*4882a593Smuzhiyun 		    block_in_file < (map.m_lblk + map.m_len)) {
316*4882a593Smuzhiyun 			unsigned map_offset = block_in_file - map.m_lblk;
317*4882a593Smuzhiyun 			unsigned last = map.m_len - map_offset;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 			for (relative_block = 0; ; relative_block++) {
320*4882a593Smuzhiyun 				if (relative_block == last) {
321*4882a593Smuzhiyun 					/* needed? */
322*4882a593Smuzhiyun 					map.m_flags &= ~EXT4_MAP_MAPPED;
323*4882a593Smuzhiyun 					break;
324*4882a593Smuzhiyun 				}
325*4882a593Smuzhiyun 				if (page_block == blocks_per_page)
326*4882a593Smuzhiyun 					break;
327*4882a593Smuzhiyun 				blocks[page_block] = map.m_pblk + map_offset +
328*4882a593Smuzhiyun 					relative_block;
329*4882a593Smuzhiyun 				page_block++;
330*4882a593Smuzhiyun 				block_in_file++;
331*4882a593Smuzhiyun 			}
332*4882a593Smuzhiyun 		}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		/*
335*4882a593Smuzhiyun 		 * Then do more ext4_map_blocks() calls until we are
336*4882a593Smuzhiyun 		 * done with this page.
337*4882a593Smuzhiyun 		 */
338*4882a593Smuzhiyun 		while (page_block < blocks_per_page) {
339*4882a593Smuzhiyun 			if (block_in_file < last_block) {
340*4882a593Smuzhiyun 				map.m_lblk = block_in_file;
341*4882a593Smuzhiyun 				map.m_len = last_block - block_in_file;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 				if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
344*4882a593Smuzhiyun 				set_error_page:
345*4882a593Smuzhiyun 					SetPageError(page);
346*4882a593Smuzhiyun 					zero_user_segment(page, 0,
347*4882a593Smuzhiyun 							  PAGE_SIZE);
348*4882a593Smuzhiyun 					unlock_page(page);
349*4882a593Smuzhiyun 					goto next_page;
350*4882a593Smuzhiyun 				}
351*4882a593Smuzhiyun 			}
352*4882a593Smuzhiyun 			if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
353*4882a593Smuzhiyun 				fully_mapped = 0;
354*4882a593Smuzhiyun 				if (first_hole == blocks_per_page)
355*4882a593Smuzhiyun 					first_hole = page_block;
356*4882a593Smuzhiyun 				page_block++;
357*4882a593Smuzhiyun 				block_in_file++;
358*4882a593Smuzhiyun 				continue;
359*4882a593Smuzhiyun 			}
360*4882a593Smuzhiyun 			if (first_hole != blocks_per_page)
361*4882a593Smuzhiyun 				goto confused;		/* hole -> non-hole */
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 			/* Contiguous blocks? */
364*4882a593Smuzhiyun 			if (page_block && blocks[page_block-1] != map.m_pblk-1)
365*4882a593Smuzhiyun 				goto confused;
366*4882a593Smuzhiyun 			for (relative_block = 0; ; relative_block++) {
367*4882a593Smuzhiyun 				if (relative_block == map.m_len) {
368*4882a593Smuzhiyun 					/* needed? */
369*4882a593Smuzhiyun 					map.m_flags &= ~EXT4_MAP_MAPPED;
370*4882a593Smuzhiyun 					break;
371*4882a593Smuzhiyun 				} else if (page_block == blocks_per_page)
372*4882a593Smuzhiyun 					break;
373*4882a593Smuzhiyun 				blocks[page_block] = map.m_pblk+relative_block;
374*4882a593Smuzhiyun 				page_block++;
375*4882a593Smuzhiyun 				block_in_file++;
376*4882a593Smuzhiyun 			}
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 		if (first_hole != blocks_per_page) {
379*4882a593Smuzhiyun 			zero_user_segment(page, first_hole << blkbits,
380*4882a593Smuzhiyun 					  PAGE_SIZE);
381*4882a593Smuzhiyun 			if (first_hole == 0) {
382*4882a593Smuzhiyun 				if (ext4_need_verity(inode, page->index) &&
383*4882a593Smuzhiyun 				    !fsverity_verify_page(page))
384*4882a593Smuzhiyun 					goto set_error_page;
385*4882a593Smuzhiyun 				SetPageUptodate(page);
386*4882a593Smuzhiyun 				unlock_page(page);
387*4882a593Smuzhiyun 				goto next_page;
388*4882a593Smuzhiyun 			}
389*4882a593Smuzhiyun 		} else if (fully_mapped) {
390*4882a593Smuzhiyun 			SetPageMappedToDisk(page);
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 		if (fully_mapped && blocks_per_page == 1 &&
393*4882a593Smuzhiyun 		    !PageUptodate(page) && cleancache_get_page(page) == 0) {
394*4882a593Smuzhiyun 			SetPageUptodate(page);
395*4882a593Smuzhiyun 			goto confused;
396*4882a593Smuzhiyun 		}
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		/*
399*4882a593Smuzhiyun 		 * This page will go to BIO.  Do we need to send this
400*4882a593Smuzhiyun 		 * BIO off first?
401*4882a593Smuzhiyun 		 */
402*4882a593Smuzhiyun 		if (bio && (last_block_in_bio != blocks[0] - 1 ||
403*4882a593Smuzhiyun 			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
404*4882a593Smuzhiyun 		submit_and_realloc:
405*4882a593Smuzhiyun 			ext4_submit_bio_read(bio);
406*4882a593Smuzhiyun 			bio = NULL;
407*4882a593Smuzhiyun 		}
408*4882a593Smuzhiyun 		if (bio == NULL) {
409*4882a593Smuzhiyun 			/*
410*4882a593Smuzhiyun 			 * bio_alloc will _always_ be able to allocate a bio if
411*4882a593Smuzhiyun 			 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
412*4882a593Smuzhiyun 			 */
413*4882a593Smuzhiyun 			bio = bio_alloc(GFP_KERNEL,
414*4882a593Smuzhiyun 				min_t(int, nr_pages, BIO_MAX_PAGES));
415*4882a593Smuzhiyun 			fscrypt_set_bio_crypt_ctx(bio, inode, next_block,
416*4882a593Smuzhiyun 						  GFP_KERNEL);
417*4882a593Smuzhiyun 			ext4_set_bio_post_read_ctx(bio, inode, page->index);
418*4882a593Smuzhiyun 			bio_set_dev(bio, bdev);
419*4882a593Smuzhiyun 			bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
420*4882a593Smuzhiyun 			bio->bi_end_io = mpage_end_io;
421*4882a593Smuzhiyun 			bio_set_op_attrs(bio, REQ_OP_READ,
422*4882a593Smuzhiyun 						rac ? REQ_RAHEAD : 0);
423*4882a593Smuzhiyun 		}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		length = first_hole << blkbits;
426*4882a593Smuzhiyun 		if (bio_add_page(bio, page, length, 0) < length)
427*4882a593Smuzhiyun 			goto submit_and_realloc;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
430*4882a593Smuzhiyun 		     (relative_block == map.m_len)) ||
431*4882a593Smuzhiyun 		    (first_hole != blocks_per_page)) {
432*4882a593Smuzhiyun 			ext4_submit_bio_read(bio);
433*4882a593Smuzhiyun 			bio = NULL;
434*4882a593Smuzhiyun 		} else
435*4882a593Smuzhiyun 			last_block_in_bio = blocks[blocks_per_page - 1];
436*4882a593Smuzhiyun 		goto next_page;
437*4882a593Smuzhiyun 	confused:
438*4882a593Smuzhiyun 		if (bio) {
439*4882a593Smuzhiyun 			ext4_submit_bio_read(bio);
440*4882a593Smuzhiyun 			bio = NULL;
441*4882a593Smuzhiyun 		}
442*4882a593Smuzhiyun 		if (!PageUptodate(page))
443*4882a593Smuzhiyun 			block_read_full_page(page, ext4_get_block);
444*4882a593Smuzhiyun 		else
445*4882a593Smuzhiyun 			unlock_page(page);
446*4882a593Smuzhiyun 	next_page:
447*4882a593Smuzhiyun 		if (rac)
448*4882a593Smuzhiyun 			put_page(page);
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 	if (bio)
451*4882a593Smuzhiyun 		ext4_submit_bio_read(bio);
452*4882a593Smuzhiyun 	return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
ext4_init_post_read_processing(void)455*4882a593Smuzhiyun int __init ext4_init_post_read_processing(void)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	bio_post_read_ctx_cache =
458*4882a593Smuzhiyun 		kmem_cache_create("ext4_bio_post_read_ctx",
459*4882a593Smuzhiyun 				  sizeof(struct bio_post_read_ctx), 0, 0, NULL);
460*4882a593Smuzhiyun 	if (!bio_post_read_ctx_cache)
461*4882a593Smuzhiyun 		goto fail;
462*4882a593Smuzhiyun 	bio_post_read_ctx_pool =
463*4882a593Smuzhiyun 		mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
464*4882a593Smuzhiyun 					 bio_post_read_ctx_cache);
465*4882a593Smuzhiyun 	if (!bio_post_read_ctx_pool)
466*4882a593Smuzhiyun 		goto fail_free_cache;
467*4882a593Smuzhiyun 	return 0;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun fail_free_cache:
470*4882a593Smuzhiyun 	kmem_cache_destroy(bio_post_read_ctx_cache);
471*4882a593Smuzhiyun fail:
472*4882a593Smuzhiyun 	return -ENOMEM;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
ext4_exit_post_read_processing(void)475*4882a593Smuzhiyun void ext4_exit_post_read_processing(void)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	mempool_destroy(bio_post_read_ctx_pool);
478*4882a593Smuzhiyun 	kmem_cache_destroy(bio_post_read_ctx_cache);
479*4882a593Smuzhiyun }
480