xref: /OK3568_Linux_fs/kernel/fs/erofs/data.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017-2018 HUAWEI, Inc.
4*4882a593Smuzhiyun  *             https://www.huawei.com/
5*4882a593Smuzhiyun  * Created by Gao Xiang <gaoxiang25@huawei.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include "internal.h"
8*4882a593Smuzhiyun #include <linux/prefetch.h>
9*4882a593Smuzhiyun #include <linux/uio.h>
10*4882a593Smuzhiyun #include <linux/iomap.h>
11*4882a593Smuzhiyun #include <linux/dax.h>
12*4882a593Smuzhiyun #include <trace/events/erofs.h>
13*4882a593Smuzhiyun 
erofs_readendio(struct bio * bio)14*4882a593Smuzhiyun static void erofs_readendio(struct bio *bio)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	struct bio_vec *bvec;
17*4882a593Smuzhiyun 	blk_status_t err = bio->bi_status;
18*4882a593Smuzhiyun 	struct bvec_iter_all iter_all;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	bio_for_each_segment_all(bvec, bio, iter_all) {
21*4882a593Smuzhiyun 		struct page *page = bvec->bv_page;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 		/* page is already locked */
24*4882a593Smuzhiyun 		DBG_BUGON(PageUptodate(page));
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 		if (err)
27*4882a593Smuzhiyun 			SetPageError(page);
28*4882a593Smuzhiyun 		else
29*4882a593Smuzhiyun 			SetPageUptodate(page);
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 		unlock_page(page);
32*4882a593Smuzhiyun 		/* page could be reclaimed now */
33*4882a593Smuzhiyun 	}
34*4882a593Smuzhiyun 	bio_put(bio);
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
erofs_get_meta_page(struct super_block * sb,erofs_blk_t blkaddr)37*4882a593Smuzhiyun struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun 	struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
40*4882a593Smuzhiyun 	struct page *page;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	page = read_cache_page_gfp(mapping, blkaddr,
43*4882a593Smuzhiyun 				   mapping_gfp_constraint(mapping, ~__GFP_FS));
44*4882a593Smuzhiyun 	/* should already be PageUptodate */
45*4882a593Smuzhiyun 	if (!IS_ERR(page))
46*4882a593Smuzhiyun 		lock_page(page);
47*4882a593Smuzhiyun 	return page;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
erofs_map_blocks_flatmode(struct inode * inode,struct erofs_map_blocks * map,int flags)50*4882a593Smuzhiyun static int erofs_map_blocks_flatmode(struct inode *inode,
51*4882a593Smuzhiyun 				     struct erofs_map_blocks *map,
52*4882a593Smuzhiyun 				     int flags)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	int err = 0;
55*4882a593Smuzhiyun 	erofs_blk_t nblocks, lastblk;
56*4882a593Smuzhiyun 	u64 offset = map->m_la;
57*4882a593Smuzhiyun 	struct erofs_inode *vi = EROFS_I(inode);
58*4882a593Smuzhiyun 	bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
63*4882a593Smuzhiyun 	lastblk = nblocks - tailendpacking;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (offset >= inode->i_size) {
66*4882a593Smuzhiyun 		/* leave out-of-bound access unmapped */
67*4882a593Smuzhiyun 		map->m_flags = 0;
68*4882a593Smuzhiyun 		map->m_plen = 0;
69*4882a593Smuzhiyun 		goto out;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	/* there is no hole in flatmode */
73*4882a593Smuzhiyun 	map->m_flags = EROFS_MAP_MAPPED;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (offset < blknr_to_addr(lastblk)) {
76*4882a593Smuzhiyun 		map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
77*4882a593Smuzhiyun 		map->m_plen = blknr_to_addr(lastblk) - offset;
78*4882a593Smuzhiyun 	} else if (tailendpacking) {
79*4882a593Smuzhiyun 		/* 2 - inode inline B: inode, [xattrs], inline last blk... */
80*4882a593Smuzhiyun 		struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
83*4882a593Smuzhiyun 			vi->xattr_isize + erofs_blkoff(map->m_la);
84*4882a593Smuzhiyun 		map->m_plen = inode->i_size - offset;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		/* inline data should be located in one meta block */
87*4882a593Smuzhiyun 		if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
88*4882a593Smuzhiyun 			erofs_err(inode->i_sb,
89*4882a593Smuzhiyun 				  "inline data cross block boundary @ nid %llu",
90*4882a593Smuzhiyun 				  vi->nid);
91*4882a593Smuzhiyun 			DBG_BUGON(1);
92*4882a593Smuzhiyun 			err = -EFSCORRUPTED;
93*4882a593Smuzhiyun 			goto err_out;
94*4882a593Smuzhiyun 		}
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		map->m_flags |= EROFS_MAP_META;
97*4882a593Smuzhiyun 	} else {
98*4882a593Smuzhiyun 		erofs_err(inode->i_sb,
99*4882a593Smuzhiyun 			  "internal error @ nid: %llu (size %llu), m_la 0x%llx",
100*4882a593Smuzhiyun 			  vi->nid, inode->i_size, map->m_la);
101*4882a593Smuzhiyun 		DBG_BUGON(1);
102*4882a593Smuzhiyun 		err = -EIO;
103*4882a593Smuzhiyun 		goto err_out;
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun out:
107*4882a593Smuzhiyun 	map->m_llen = map->m_plen;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun err_out:
110*4882a593Smuzhiyun 	trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
111*4882a593Smuzhiyun 	return err;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
erofs_read_raw_page(struct bio * bio,struct address_space * mapping,struct page * page,erofs_off_t * last_block,unsigned int nblocks,bool ra)114*4882a593Smuzhiyun static inline struct bio *erofs_read_raw_page(struct bio *bio,
115*4882a593Smuzhiyun 					      struct address_space *mapping,
116*4882a593Smuzhiyun 					      struct page *page,
117*4882a593Smuzhiyun 					      erofs_off_t *last_block,
118*4882a593Smuzhiyun 					      unsigned int nblocks,
119*4882a593Smuzhiyun 					      bool ra)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct inode *const inode = mapping->host;
122*4882a593Smuzhiyun 	struct super_block *const sb = inode->i_sb;
123*4882a593Smuzhiyun 	erofs_off_t current_block = (erofs_off_t)page->index;
124*4882a593Smuzhiyun 	int err;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	DBG_BUGON(!nblocks);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (PageUptodate(page)) {
129*4882a593Smuzhiyun 		err = 0;
130*4882a593Smuzhiyun 		goto has_updated;
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* note that for readpage case, bio also equals to NULL */
134*4882a593Smuzhiyun 	if (bio &&
135*4882a593Smuzhiyun 	    /* not continuous */
136*4882a593Smuzhiyun 	    *last_block + 1 != current_block) {
137*4882a593Smuzhiyun submit_bio_retry:
138*4882a593Smuzhiyun 		submit_bio(bio);
139*4882a593Smuzhiyun 		bio = NULL;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (!bio) {
143*4882a593Smuzhiyun 		struct erofs_map_blocks map = {
144*4882a593Smuzhiyun 			.m_la = blknr_to_addr(current_block),
145*4882a593Smuzhiyun 		};
146*4882a593Smuzhiyun 		erofs_blk_t blknr;
147*4882a593Smuzhiyun 		unsigned int blkoff;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		err = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
150*4882a593Smuzhiyun 		if (err)
151*4882a593Smuzhiyun 			goto err_out;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 		/* zero out the holed page */
154*4882a593Smuzhiyun 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
155*4882a593Smuzhiyun 			zero_user_segment(page, 0, PAGE_SIZE);
156*4882a593Smuzhiyun 			SetPageUptodate(page);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 			/* imply err = 0, see erofs_map_blocks */
159*4882a593Smuzhiyun 			goto has_updated;
160*4882a593Smuzhiyun 		}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		/* for RAW access mode, m_plen must be equal to m_llen */
163*4882a593Smuzhiyun 		DBG_BUGON(map.m_plen != map.m_llen);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		blknr = erofs_blknr(map.m_pa);
166*4882a593Smuzhiyun 		blkoff = erofs_blkoff(map.m_pa);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		/* deal with inline page */
169*4882a593Smuzhiyun 		if (map.m_flags & EROFS_MAP_META) {
170*4882a593Smuzhiyun 			void *vsrc, *vto;
171*4882a593Smuzhiyun 			struct page *ipage;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 			DBG_BUGON(map.m_plen > PAGE_SIZE);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 			ipage = erofs_get_meta_page(inode->i_sb, blknr);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 			if (IS_ERR(ipage)) {
178*4882a593Smuzhiyun 				err = PTR_ERR(ipage);
179*4882a593Smuzhiyun 				goto err_out;
180*4882a593Smuzhiyun 			}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 			vsrc = kmap_atomic(ipage);
183*4882a593Smuzhiyun 			vto = kmap_atomic(page);
184*4882a593Smuzhiyun 			memcpy(vto, vsrc + blkoff, map.m_plen);
185*4882a593Smuzhiyun 			memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
186*4882a593Smuzhiyun 			kunmap_atomic(vto);
187*4882a593Smuzhiyun 			kunmap_atomic(vsrc);
188*4882a593Smuzhiyun 			flush_dcache_page(page);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 			SetPageUptodate(page);
191*4882a593Smuzhiyun 			/* TODO: could we unlock the page earlier? */
192*4882a593Smuzhiyun 			unlock_page(ipage);
193*4882a593Smuzhiyun 			put_page(ipage);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 			/* imply err = 0, see erofs_map_blocks */
196*4882a593Smuzhiyun 			goto has_updated;
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		/* pa must be block-aligned for raw reading */
200*4882a593Smuzhiyun 		DBG_BUGON(erofs_blkoff(map.m_pa));
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		/* max # of continuous pages */
203*4882a593Smuzhiyun 		if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
204*4882a593Smuzhiyun 			nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
205*4882a593Smuzhiyun 		if (nblocks > BIO_MAX_PAGES)
206*4882a593Smuzhiyun 			nblocks = BIO_MAX_PAGES;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 		bio = bio_alloc(GFP_NOIO, nblocks);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 		bio->bi_end_io = erofs_readendio;
211*4882a593Smuzhiyun 		bio_set_dev(bio, sb->s_bdev);
212*4882a593Smuzhiyun 		bio->bi_iter.bi_sector = (sector_t)blknr <<
213*4882a593Smuzhiyun 			LOG_SECTORS_PER_BLOCK;
214*4882a593Smuzhiyun 		bio->bi_opf = REQ_OP_READ | (ra ? REQ_RAHEAD : 0);
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	err = bio_add_page(bio, page, PAGE_SIZE, 0);
218*4882a593Smuzhiyun 	/* out of the extent or bio is full */
219*4882a593Smuzhiyun 	if (err < PAGE_SIZE)
220*4882a593Smuzhiyun 		goto submit_bio_retry;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	*last_block = current_block;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* shift in advance in case of it followed by too many gaps */
225*4882a593Smuzhiyun 	if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
226*4882a593Smuzhiyun 		/* err should reassign to 0 after submitting */
227*4882a593Smuzhiyun 		err = 0;
228*4882a593Smuzhiyun 		goto submit_bio_out;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return bio;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun err_out:
234*4882a593Smuzhiyun 	/* for sync reading, set page error immediately */
235*4882a593Smuzhiyun 	if (!ra) {
236*4882a593Smuzhiyun 		SetPageError(page);
237*4882a593Smuzhiyun 		ClearPageUptodate(page);
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun has_updated:
240*4882a593Smuzhiyun 	unlock_page(page);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* if updated manually, continuous pages has a gap */
243*4882a593Smuzhiyun 	if (bio)
244*4882a593Smuzhiyun submit_bio_out:
245*4882a593Smuzhiyun 		submit_bio(bio);
246*4882a593Smuzhiyun 	return err ? ERR_PTR(err) : NULL;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun  * since we dont have write or truncate flows, so no inode
251*4882a593Smuzhiyun  * locking needs to be held at the moment.
252*4882a593Smuzhiyun  */
erofs_raw_access_readpage(struct file * file,struct page * page)253*4882a593Smuzhiyun static int erofs_raw_access_readpage(struct file *file, struct page *page)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	erofs_off_t last_block;
256*4882a593Smuzhiyun 	struct bio *bio;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	trace_erofs_readpage(page, true);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	bio = erofs_read_raw_page(NULL, page->mapping,
261*4882a593Smuzhiyun 				  page, &last_block, 1, false);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (IS_ERR(bio))
264*4882a593Smuzhiyun 		return PTR_ERR(bio);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	DBG_BUGON(bio);	/* since we have only one bio -- must be NULL */
267*4882a593Smuzhiyun 	return 0;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
erofs_raw_access_readahead(struct readahead_control * rac)270*4882a593Smuzhiyun static void erofs_raw_access_readahead(struct readahead_control *rac)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	erofs_off_t last_block;
273*4882a593Smuzhiyun 	struct bio *bio = NULL;
274*4882a593Smuzhiyun 	struct page *page;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	trace_erofs_readpages(rac->mapping->host, readahead_index(rac),
277*4882a593Smuzhiyun 			readahead_count(rac), true);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	while ((page = readahead_page(rac))) {
280*4882a593Smuzhiyun 		prefetchw(&page->flags);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		bio = erofs_read_raw_page(bio, rac->mapping, page, &last_block,
283*4882a593Smuzhiyun 				readahead_count(rac), true);
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		/* all the page errors are ignored when readahead */
286*4882a593Smuzhiyun 		if (IS_ERR(bio)) {
287*4882a593Smuzhiyun 			pr_err("%s, readahead error at page %lu of nid %llu\n",
288*4882a593Smuzhiyun 			       __func__, page->index,
289*4882a593Smuzhiyun 			       EROFS_I(rac->mapping->host)->nid);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 			bio = NULL;
292*4882a593Smuzhiyun 		}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		put_page(page);
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* the rare case (end in gaps) */
298*4882a593Smuzhiyun 	if (bio)
299*4882a593Smuzhiyun 		submit_bio(bio);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
erofs_bmap(struct address_space * mapping,sector_t block)302*4882a593Smuzhiyun static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
305*4882a593Smuzhiyun 	struct erofs_map_blocks map = {
306*4882a593Smuzhiyun 		.m_la = blknr_to_addr(block),
307*4882a593Smuzhiyun 	};
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
310*4882a593Smuzhiyun 		erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 		if (block >> LOG_SECTORS_PER_BLOCK >= blks)
313*4882a593Smuzhiyun 			return 0;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (!erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW))
317*4882a593Smuzhiyun 		return erofs_blknr(map.m_pa);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	return 0;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
erofs_iomap_begin(struct inode * inode,loff_t offset,loff_t length,unsigned int flags,struct iomap * iomap,struct iomap * srcmap)322*4882a593Smuzhiyun static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
323*4882a593Smuzhiyun 		unsigned int flags, struct iomap *iomap, struct iomap *srcmap)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	int ret;
326*4882a593Smuzhiyun 	struct erofs_map_blocks map;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	map.m_la = offset;
329*4882a593Smuzhiyun 	map.m_llen = length;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	ret = erofs_map_blocks_flatmode(inode, &map, EROFS_GET_BLOCKS_RAW);
332*4882a593Smuzhiyun 	if (ret < 0)
333*4882a593Smuzhiyun 		return ret;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	iomap->bdev = inode->i_sb->s_bdev;
336*4882a593Smuzhiyun 	iomap->dax_dev = EROFS_I_SB(inode)->dax_dev;
337*4882a593Smuzhiyun 	iomap->offset = map.m_la;
338*4882a593Smuzhiyun 	iomap->length = map.m_llen;
339*4882a593Smuzhiyun 	iomap->flags = 0;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
342*4882a593Smuzhiyun 		iomap->type = IOMAP_HOLE;
343*4882a593Smuzhiyun 		iomap->addr = IOMAP_NULL_ADDR;
344*4882a593Smuzhiyun 		if (!iomap->length)
345*4882a593Smuzhiyun 			iomap->length = length;
346*4882a593Smuzhiyun 		return 0;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	/* that shouldn't happen for now */
350*4882a593Smuzhiyun 	if (map.m_flags & EROFS_MAP_META) {
351*4882a593Smuzhiyun 		DBG_BUGON(1);
352*4882a593Smuzhiyun 		return -ENOTBLK;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 	iomap->type = IOMAP_MAPPED;
355*4882a593Smuzhiyun 	iomap->addr = map.m_pa;
356*4882a593Smuzhiyun 	return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun static const struct iomap_ops erofs_iomap_ops = {
360*4882a593Smuzhiyun 	.iomap_begin = erofs_iomap_begin,
361*4882a593Smuzhiyun };
362*4882a593Smuzhiyun 
erofs_prepare_dio(struct kiocb * iocb,struct iov_iter * to)363*4882a593Smuzhiyun static int erofs_prepare_dio(struct kiocb *iocb, struct iov_iter *to)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun 	struct inode *inode = file_inode(iocb->ki_filp);
366*4882a593Smuzhiyun 	loff_t align = iocb->ki_pos | iov_iter_count(to) |
367*4882a593Smuzhiyun 		iov_iter_alignment(to);
368*4882a593Smuzhiyun 	struct block_device *bdev = inode->i_sb->s_bdev;
369*4882a593Smuzhiyun 	unsigned int blksize_mask;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	if (bdev)
372*4882a593Smuzhiyun 		blksize_mask = (1 << ilog2(bdev_logical_block_size(bdev))) - 1;
373*4882a593Smuzhiyun 	else
374*4882a593Smuzhiyun 		blksize_mask = (1 << inode->i_blkbits) - 1;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (align & blksize_mask)
377*4882a593Smuzhiyun 		return -EINVAL;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/*
380*4882a593Smuzhiyun 	 * Temporarily fall back tail-packing inline to buffered I/O instead
381*4882a593Smuzhiyun 	 * since tail-packing inline support relies on an iomap core update.
382*4882a593Smuzhiyun 	 */
383*4882a593Smuzhiyun 	if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE &&
384*4882a593Smuzhiyun 	    iocb->ki_pos + iov_iter_count(to) >
385*4882a593Smuzhiyun 			rounddown(inode->i_size, EROFS_BLKSIZ))
386*4882a593Smuzhiyun 		return 1;
387*4882a593Smuzhiyun 	return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
erofs_file_read_iter(struct kiocb * iocb,struct iov_iter * to)390*4882a593Smuzhiyun static ssize_t erofs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	/* no need taking (shared) inode lock since it's a ro filesystem */
393*4882a593Smuzhiyun 	if (!iov_iter_count(to))
394*4882a593Smuzhiyun 		return 0;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun #ifdef CONFIG_FS_DAX
397*4882a593Smuzhiyun 	if (IS_DAX(iocb->ki_filp->f_mapping->host))
398*4882a593Smuzhiyun 		return dax_iomap_rw(iocb, to, &erofs_iomap_ops);
399*4882a593Smuzhiyun #endif
400*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_DIRECT) {
401*4882a593Smuzhiyun 		int err = erofs_prepare_dio(iocb, to);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		if (!err)
404*4882a593Smuzhiyun 			return iomap_dio_rw(iocb, to, &erofs_iomap_ops,
405*4882a593Smuzhiyun 					    NULL, 0);
406*4882a593Smuzhiyun 		if (err < 0)
407*4882a593Smuzhiyun 			return err;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 	return generic_file_buffered_read(iocb, to, 0);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /* for uncompressed (aligned) files and raw access for other files */
413*4882a593Smuzhiyun const struct address_space_operations erofs_raw_access_aops = {
414*4882a593Smuzhiyun 	.readpage = erofs_raw_access_readpage,
415*4882a593Smuzhiyun 	.readahead = erofs_raw_access_readahead,
416*4882a593Smuzhiyun 	.bmap = erofs_bmap,
417*4882a593Smuzhiyun 	.direct_IO = noop_direct_IO,
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun #ifdef CONFIG_FS_DAX
erofs_dax_huge_fault(struct vm_fault * vmf,enum page_entry_size pe_size)421*4882a593Smuzhiyun static vm_fault_t erofs_dax_huge_fault(struct vm_fault *vmf,
422*4882a593Smuzhiyun 		enum page_entry_size pe_size)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	return dax_iomap_fault(vmf, pe_size, NULL, NULL, &erofs_iomap_ops);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
erofs_dax_fault(struct vm_fault * vmf)427*4882a593Smuzhiyun static vm_fault_t erofs_dax_fault(struct vm_fault *vmf)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	return erofs_dax_huge_fault(vmf, PE_SIZE_PTE);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun static const struct vm_operations_struct erofs_dax_vm_ops = {
433*4882a593Smuzhiyun 	.fault		= erofs_dax_fault,
434*4882a593Smuzhiyun 	.huge_fault	= erofs_dax_huge_fault,
435*4882a593Smuzhiyun };
436*4882a593Smuzhiyun 
erofs_file_mmap(struct file * file,struct vm_area_struct * vma)437*4882a593Smuzhiyun static int erofs_file_mmap(struct file *file, struct vm_area_struct *vma)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	if (!IS_DAX(file_inode(file)))
440*4882a593Smuzhiyun 		return generic_file_readonly_mmap(file, vma);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
443*4882a593Smuzhiyun 		return -EINVAL;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	vma->vm_ops = &erofs_dax_vm_ops;
446*4882a593Smuzhiyun 	vma->vm_flags |= VM_HUGEPAGE;
447*4882a593Smuzhiyun #if defined(CONFIG_ROCKCHIP_RAMDISK) && defined(CONFIG_ARM)
448*4882a593Smuzhiyun 	vma->vm_flags |= VM_MIXEDMAP;
449*4882a593Smuzhiyun #endif
450*4882a593Smuzhiyun 	return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun #else
453*4882a593Smuzhiyun #define erofs_file_mmap	generic_file_readonly_mmap
454*4882a593Smuzhiyun #endif
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun const struct file_operations erofs_file_fops = {
457*4882a593Smuzhiyun 	.llseek		= generic_file_llseek,
458*4882a593Smuzhiyun 	.read_iter	= erofs_file_read_iter,
459*4882a593Smuzhiyun 	.mmap		= erofs_file_mmap,
460*4882a593Smuzhiyun 	.splice_read	= generic_file_splice_read,
461*4882a593Smuzhiyun };
462*4882a593Smuzhiyun 
463