xref: /OK3568_Linux_fs/kernel/mm/readahead.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/readahead.c - address_space-level file readahead.
4  *
5  * Copyright (C) 2002, Linus Torvalds
6  *
7  * 09Apr2002	Andrew Morton
8  *		Initial version.
9  */
10 
11 #include <linux/kernel.h>
12 #include <linux/dax.h>
13 #include <linux/gfp.h>
14 #include <linux/export.h>
15 #include <linux/blkdev.h>
16 #include <linux/backing-dev.h>
17 #include <linux/task_io_accounting_ops.h>
18 #include <linux/pagevec.h>
19 #include <linux/pagemap.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/mm_inline.h>
23 #include <linux/blk-cgroup.h>
24 #include <linux/fadvise.h>
25 #include <linux/sched/mm.h>
26 #include <trace/hooks/mm.h>
27 
28 #include "internal.h"
29 
30 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
31 #include <linux/fscrypt.h>
32 #endif
33 
34 /*
35  * Initialise a struct file's readahead state.  Assumes that the caller has
36  * memset *ra to zero.
37  */
38 void
file_ra_state_init(struct file_ra_state * ra,struct address_space * mapping)39 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
40 {
41 	ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
42 	ra->prev_pos = -1;
43 }
44 EXPORT_SYMBOL_GPL(file_ra_state_init);
45 
46 /*
47  * see if a page needs releasing upon read_cache_pages() failure
48  * - the caller of read_cache_pages() may have set PG_private or PG_fscache
49  *   before calling, such as the NFS fs marking pages that are cached locally
50  *   on disk, thus we need to give the fs a chance to clean up in the event of
51  *   an error
52  */
read_cache_pages_invalidate_page(struct address_space * mapping,struct page * page)53 static void read_cache_pages_invalidate_page(struct address_space *mapping,
54 					     struct page *page)
55 {
56 	if (page_has_private(page)) {
57 		if (!trylock_page(page))
58 			BUG();
59 		page->mapping = mapping;
60 		do_invalidatepage(page, 0, PAGE_SIZE);
61 		page->mapping = NULL;
62 		unlock_page(page);
63 	}
64 	put_page(page);
65 }
66 
67 /*
68  * release a list of pages, invalidating them first if need be
69  */
read_cache_pages_invalidate_pages(struct address_space * mapping,struct list_head * pages)70 static void read_cache_pages_invalidate_pages(struct address_space *mapping,
71 					      struct list_head *pages)
72 {
73 	struct page *victim;
74 
75 	while (!list_empty(pages)) {
76 		victim = lru_to_page(pages);
77 		list_del(&victim->lru);
78 		read_cache_pages_invalidate_page(mapping, victim);
79 	}
80 }
81 
82 /**
83  * read_cache_pages - populate an address space with some pages & start reads against them
84  * @mapping: the address_space
85  * @pages: The address of a list_head which contains the target pages.  These
86  *   pages have their ->index populated and are otherwise uninitialised.
87  * @filler: callback routine for filling a single page.
88  * @data: private data for the callback routine.
89  *
90  * Hides the details of the LRU cache etc from the filesystems.
91  *
92  * Returns: %0 on success, error return by @filler otherwise
93  */
read_cache_pages(struct address_space * mapping,struct list_head * pages,int (* filler)(void *,struct page *),void * data)94 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
95 			int (*filler)(void *, struct page *), void *data)
96 {
97 	struct page *page;
98 	int ret = 0;
99 
100 	while (!list_empty(pages)) {
101 		page = lru_to_page(pages);
102 		list_del(&page->lru);
103 		if (add_to_page_cache_lru(page, mapping, page->index,
104 				readahead_gfp_mask(mapping))) {
105 			read_cache_pages_invalidate_page(mapping, page);
106 			continue;
107 		}
108 		put_page(page);
109 
110 		ret = filler(data, page);
111 		if (unlikely(ret)) {
112 			read_cache_pages_invalidate_pages(mapping, pages);
113 			break;
114 		}
115 		task_io_account_read(PAGE_SIZE);
116 	}
117 	return ret;
118 }
119 
120 EXPORT_SYMBOL(read_cache_pages);
121 
readahead_gfp_mask(struct address_space * x)122 gfp_t readahead_gfp_mask(struct address_space *x)
123 {
124 	gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
125 
126 	trace_android_rvh_set_readahead_gfp_mask(&mask);
127 	return mask;
128 }
129 EXPORT_SYMBOL_GPL(readahead_gfp_mask);
130 
read_pages(struct readahead_control * rac,struct list_head * pages,bool skip_page)131 static void read_pages(struct readahead_control *rac, struct list_head *pages,
132 		bool skip_page)
133 {
134 	const struct address_space_operations *aops = rac->mapping->a_ops;
135 	struct page *page;
136 	struct blk_plug plug;
137 
138 	if (!readahead_count(rac))
139 		goto out;
140 
141 	blk_start_plug(&plug);
142 
143 	if (aops->readahead) {
144 		aops->readahead(rac);
145 		/* Clean up the remaining pages */
146 		while ((page = readahead_page(rac))) {
147 			unlock_page(page);
148 			put_page(page);
149 		}
150 	} else if (aops->readpages) {
151 		aops->readpages(rac->file, rac->mapping, pages,
152 				readahead_count(rac));
153 		/* Clean up the remaining pages */
154 		put_pages_list(pages);
155 		rac->_index += rac->_nr_pages;
156 		rac->_nr_pages = 0;
157 	} else {
158 		while ((page = readahead_page(rac))) {
159 			aops->readpage(rac->file, page);
160 			put_page(page);
161 		}
162 	}
163 
164 	blk_finish_plug(&plug);
165 
166 	BUG_ON(!list_empty(pages));
167 	BUG_ON(readahead_count(rac));
168 
169 out:
170 	if (skip_page)
171 		rac->_index++;
172 }
173 
174 /**
175  * page_cache_ra_unbounded - Start unchecked readahead.
176  * @ractl: Readahead control.
177  * @nr_to_read: The number of pages to read.
178  * @lookahead_size: Where to start the next readahead.
179  *
180  * This function is for filesystems to call when they want to start
181  * readahead beyond a file's stated i_size.  This is almost certainly
182  * not the function you want to call.  Use page_cache_async_readahead()
183  * or page_cache_sync_readahead() instead.
184  *
185  * Context: File is referenced by caller.  Mutexes may be held by caller.
186  * May sleep, but will not reenter filesystem to reclaim memory.
187  */
page_cache_ra_unbounded(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)188 void page_cache_ra_unbounded(struct readahead_control *ractl,
189 		unsigned long nr_to_read, unsigned long lookahead_size)
190 {
191 	struct address_space *mapping = ractl->mapping;
192 	unsigned long index = readahead_index(ractl);
193 	LIST_HEAD(page_pool);
194 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
195 	unsigned long i;
196 
197 	/*
198 	 * Partway through the readahead operation, we will have added
199 	 * locked pages to the page cache, but will not yet have submitted
200 	 * them for I/O.  Adding another page may need to allocate memory,
201 	 * which can trigger memory reclaim.  Telling the VM we're in
202 	 * the middle of a filesystem operation will cause it to not
203 	 * touch file-backed pages, preventing a deadlock.  Most (all?)
204 	 * filesystems already specify __GFP_NOFS in their mapping's
205 	 * gfp_mask, but let's be explicit here.
206 	 */
207 	unsigned int nofs = memalloc_nofs_save();
208 
209 	/*
210 	 * Preallocate as many pages as we will need.
211 	 */
212 	for (i = 0; i < nr_to_read; i++) {
213 		struct page *page = xa_load(&mapping->i_pages, index + i);
214 
215 		BUG_ON(index + i != ractl->_index + ractl->_nr_pages);
216 
217 		if (page && !xa_is_value(page)) {
218 			/*
219 			 * Page already present?  Kick off the current batch
220 			 * of contiguous pages before continuing with the
221 			 * next batch.  This page may be the one we would
222 			 * have intended to mark as Readahead, but we don't
223 			 * have a stable reference to this page, and it's
224 			 * not worth getting one just for that.
225 			 */
226 			read_pages(ractl, &page_pool, true);
227 			continue;
228 		}
229 
230 		page = __page_cache_alloc(gfp_mask);
231 		if (!page)
232 			break;
233 		if (mapping->a_ops->readpages) {
234 			page->index = index + i;
235 			list_add(&page->lru, &page_pool);
236 		} else if (add_to_page_cache_lru(page, mapping, index + i,
237 					gfp_mask) < 0) {
238 			put_page(page);
239 			read_pages(ractl, &page_pool, true);
240 			continue;
241 		}
242 		if (i == nr_to_read - lookahead_size)
243 			SetPageReadahead(page);
244 		ractl->_nr_pages++;
245 	}
246 
247 	/*
248 	 * Now start the IO.  We ignore I/O errors - if the page is not
249 	 * uptodate then the caller will launch readpage again, and
250 	 * will then handle the error.
251 	 */
252 	read_pages(ractl, &page_pool, false);
253 	memalloc_nofs_restore(nofs);
254 }
255 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded);
256 
257 /*
258  * do_page_cache_ra() actually reads a chunk of disk.  It allocates
259  * the pages first, then submits them for I/O. This avoids the very bad
260  * behaviour which would occur if page allocations are causing VM writeback.
261  * We really don't want to intermingle reads and writes like that.
262  */
do_page_cache_ra(struct readahead_control * ractl,unsigned long nr_to_read,unsigned long lookahead_size)263 void do_page_cache_ra(struct readahead_control *ractl,
264 		unsigned long nr_to_read, unsigned long lookahead_size)
265 {
266 	struct inode *inode = ractl->mapping->host;
267 	unsigned long index = readahead_index(ractl);
268 	loff_t isize = i_size_read(inode);
269 	pgoff_t end_index;	/* The last page we want to read */
270 
271 	if (isize == 0)
272 		return;
273 
274 	end_index = (isize - 1) >> PAGE_SHIFT;
275 	if (index > end_index)
276 		return;
277 	/* Don't read past the page containing the last byte of the file */
278 	if (nr_to_read > end_index - index)
279 		nr_to_read = end_index - index + 1;
280 
281 	page_cache_ra_unbounded(ractl, nr_to_read, lookahead_size);
282 }
283 
284 /*
285  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
286  * memory at once.
287  */
force_page_cache_ra(struct readahead_control * ractl,struct file_ra_state * ra,unsigned long nr_to_read)288 void force_page_cache_ra(struct readahead_control *ractl,
289 		struct file_ra_state *ra, unsigned long nr_to_read)
290 {
291 	struct address_space *mapping = ractl->mapping;
292 	struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
293 	unsigned long max_pages, index;
294 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
295 	bool force_lookahead = false;
296 #endif
297 
298 	if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages &&
299 			!mapping->a_ops->readahead))
300 		return;
301 
302 	/*
303 	 * If the request exceeds the readahead window, allow the read to
304 	 * be up to the optimal hardware IO size
305 	 */
306 	index = readahead_index(ractl);
307 	max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages);
308 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
309 	/* For files with fscrypt enabled, to allow IO and the encryption
310 	 * or decryption process to ping-pong, lookahead is forcibly enabled.
311 	 */
312 	if (nr_to_read > max_pages && fscrypt_inode_uses_fs_layer_crypto(mapping->host))
313 		force_lookahead = true;
314 #endif
315 	nr_to_read = min_t(unsigned long, nr_to_read, max_pages);
316 	while (nr_to_read) {
317 		unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE;
318 
319 		if (this_chunk > nr_to_read)
320 			this_chunk = nr_to_read;
321 		ractl->_index = index;
322 #if defined(CONFIG_ARCH_ROCKCHIP) && defined(CONFIG_NO_GKI)
323 		if (force_lookahead)
324 			do_page_cache_ra(ractl, this_chunk, this_chunk / 2);
325 		else
326 			do_page_cache_ra(ractl, this_chunk, 0);
327 #else
328 		do_page_cache_ra(ractl, this_chunk, 0);
329 #endif
330 
331 		index += this_chunk;
332 		nr_to_read -= this_chunk;
333 	}
334 }
335 
336 /*
337  * Set the initial window size, round to next power of 2 and square
338  * for small size, x 4 for medium, and x 2 for large
339  * for 128k (32 page) max ra
340  * 1-8 page = 32k initial, > 8 page = 128k initial
341  */
get_init_ra_size(unsigned long size,unsigned long max)342 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
343 {
344 	unsigned long newsize = roundup_pow_of_two(size);
345 
346 	if (newsize <= max / 32)
347 		newsize = newsize * 4;
348 	else if (newsize <= max / 4)
349 		newsize = newsize * 2;
350 	else
351 		newsize = max;
352 
353 	return newsize;
354 }
355 
356 /*
357  *  Get the previous window size, ramp it up, and
358  *  return it as the new window size.
359  */
get_next_ra_size(struct file_ra_state * ra,unsigned long max)360 static unsigned long get_next_ra_size(struct file_ra_state *ra,
361 				      unsigned long max)
362 {
363 	unsigned long cur = ra->size;
364 
365 	if (cur < max / 16)
366 		return 4 * cur;
367 	if (cur <= max / 2)
368 		return 2 * cur;
369 	return max;
370 }
371 
372 /*
373  * On-demand readahead design.
374  *
375  * The fields in struct file_ra_state represent the most-recently-executed
376  * readahead attempt:
377  *
378  *                        |<----- async_size ---------|
379  *     |------------------- size -------------------->|
380  *     |==================#===========================|
381  *     ^start             ^page marked with PG_readahead
382  *
383  * To overlap application thinking time and disk I/O time, we do
384  * `readahead pipelining': Do not wait until the application consumed all
385  * readahead pages and stalled on the missing page at readahead_index;
386  * Instead, submit an asynchronous readahead I/O as soon as there are
387  * only async_size pages left in the readahead window. Normally async_size
388  * will be equal to size, for maximum pipelining.
389  *
390  * In interleaved sequential reads, concurrent streams on the same fd can
391  * be invalidating each other's readahead state. So we flag the new readahead
392  * page at (start+size-async_size) with PG_readahead, and use it as readahead
393  * indicator. The flag won't be set on already cached pages, to avoid the
394  * readahead-for-nothing fuss, saving pointless page cache lookups.
395  *
396  * prev_pos tracks the last visited byte in the _previous_ read request.
397  * It should be maintained by the caller, and will be used for detecting
398  * small random reads. Note that the readahead algorithm checks loosely
399  * for sequential patterns. Hence interleaved reads might be served as
400  * sequential ones.
401  *
402  * There is a special-case: if the first page which the application tries to
403  * read happens to be the first page of the file, it is assumed that a linear
404  * read is about to happen and the window is immediately set to the initial size
405  * based on I/O request size and the max_readahead.
406  *
407  * The code ramps up the readahead size aggressively at first, but slow down as
408  * it approaches max_readhead.
409  */
410 
411 /*
412  * Count contiguously cached pages from @index-1 to @index-@max,
413  * this count is a conservative estimation of
414  * 	- length of the sequential read sequence, or
415  * 	- thrashing threshold in memory tight systems
416  */
count_history_pages(struct address_space * mapping,pgoff_t index,unsigned long max)417 static pgoff_t count_history_pages(struct address_space *mapping,
418 				   pgoff_t index, unsigned long max)
419 {
420 	pgoff_t head;
421 
422 	rcu_read_lock();
423 	head = page_cache_prev_miss(mapping, index - 1, max);
424 	rcu_read_unlock();
425 
426 	return index - 1 - head;
427 }
428 
429 /*
430  * page cache context based read-ahead
431  */
try_context_readahead(struct address_space * mapping,struct file_ra_state * ra,pgoff_t index,unsigned long req_size,unsigned long max)432 static int try_context_readahead(struct address_space *mapping,
433 				 struct file_ra_state *ra,
434 				 pgoff_t index,
435 				 unsigned long req_size,
436 				 unsigned long max)
437 {
438 	pgoff_t size;
439 
440 	size = count_history_pages(mapping, index, max);
441 
442 	/*
443 	 * not enough history pages:
444 	 * it could be a random read
445 	 */
446 	if (size <= req_size)
447 		return 0;
448 
449 	/*
450 	 * starts from beginning of file:
451 	 * it is a strong indication of long-run stream (or whole-file-read)
452 	 */
453 	if (size >= index)
454 		size *= 2;
455 
456 	ra->start = index;
457 	ra->size = min(size + req_size, max);
458 	ra->async_size = 1;
459 
460 	return 1;
461 }
462 
463 /*
464  * A minimal readahead algorithm for trivial sequential/random reads.
465  */
ondemand_readahead(struct readahead_control * ractl,struct file_ra_state * ra,bool hit_readahead_marker,unsigned long req_size)466 static void ondemand_readahead(struct readahead_control *ractl,
467 		struct file_ra_state *ra, bool hit_readahead_marker,
468 		unsigned long req_size)
469 {
470 	struct backing_dev_info *bdi = inode_to_bdi(ractl->mapping->host);
471 	unsigned long max_pages = ra->ra_pages;
472 	unsigned long add_pages;
473 	unsigned long index = readahead_index(ractl);
474 	pgoff_t prev_index;
475 
476 	/*
477 	 * If the request exceeds the readahead window, allow the read to
478 	 * be up to the optimal hardware IO size
479 	 */
480 	if (req_size > max_pages && bdi->io_pages > max_pages)
481 		max_pages = min(req_size, bdi->io_pages);
482 
483 	trace_android_vh_ra_tuning_max_page(ractl, &max_pages);
484 
485 	/*
486 	 * start of file
487 	 */
488 	if (!index)
489 		goto initial_readahead;
490 
491 	/*
492 	 * It's the expected callback index, assume sequential access.
493 	 * Ramp up sizes, and push forward the readahead window.
494 	 */
495 	if ((index == (ra->start + ra->size - ra->async_size) ||
496 	     index == (ra->start + ra->size))) {
497 		ra->start += ra->size;
498 		ra->size = get_next_ra_size(ra, max_pages);
499 		ra->async_size = ra->size;
500 		goto readit;
501 	}
502 
503 	/*
504 	 * Hit a marked page without valid readahead state.
505 	 * E.g. interleaved reads.
506 	 * Query the pagecache for async_size, which normally equals to
507 	 * readahead size. Ramp it up and use it as the new readahead size.
508 	 */
509 	if (hit_readahead_marker) {
510 		pgoff_t start;
511 
512 		rcu_read_lock();
513 		start = page_cache_next_miss(ractl->mapping, index + 1,
514 				max_pages);
515 		rcu_read_unlock();
516 
517 		if (!start || start - index > max_pages)
518 			return;
519 
520 		ra->start = start;
521 		ra->size = start - index;	/* old async_size */
522 		ra->size += req_size;
523 		ra->size = get_next_ra_size(ra, max_pages);
524 		ra->async_size = ra->size;
525 		goto readit;
526 	}
527 
528 	/*
529 	 * oversize read
530 	 */
531 	if (req_size > max_pages)
532 		goto initial_readahead;
533 
534 	/*
535 	 * sequential cache miss
536 	 * trivial case: (index - prev_index) == 1
537 	 * unaligned reads: (index - prev_index) == 0
538 	 */
539 	prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT;
540 	if (index - prev_index <= 1UL)
541 		goto initial_readahead;
542 
543 	/*
544 	 * Query the page cache and look for the traces(cached history pages)
545 	 * that a sequential stream would leave behind.
546 	 */
547 	if (try_context_readahead(ractl->mapping, ra, index, req_size,
548 			max_pages))
549 		goto readit;
550 
551 	/*
552 	 * standalone, small random read
553 	 * Read as is, and do not pollute the readahead state.
554 	 */
555 	do_page_cache_ra(ractl, req_size, 0);
556 	return;
557 
558 initial_readahead:
559 	ra->start = index;
560 	ra->size = get_init_ra_size(req_size, max_pages);
561 	ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
562 
563 readit:
564 	/*
565 	 * Will this read hit the readahead marker made by itself?
566 	 * If so, trigger the readahead marker hit now, and merge
567 	 * the resulted next readahead window into the current one.
568 	 * Take care of maximum IO pages as above.
569 	 */
570 	if (index == ra->start && ra->size == ra->async_size) {
571 		add_pages = get_next_ra_size(ra, max_pages);
572 		if (ra->size + add_pages <= max_pages) {
573 			ra->async_size = add_pages;
574 			ra->size += add_pages;
575 		} else {
576 			ra->size = max_pages;
577 			ra->async_size = max_pages >> 1;
578 		}
579 	}
580 
581 	ractl->_index = ra->start;
582 	do_page_cache_ra(ractl, ra->size, ra->async_size);
583 }
584 
page_cache_sync_ra(struct readahead_control * ractl,struct file_ra_state * ra,unsigned long req_count)585 void page_cache_sync_ra(struct readahead_control *ractl,
586 		struct file_ra_state *ra, unsigned long req_count)
587 {
588 	bool do_forced_ra = ractl->file && (ractl->file->f_mode & FMODE_RANDOM);
589 
590 	/*
591 	 * Even if read-ahead is disabled, issue this request as read-ahead
592 	 * as we'll need it to satisfy the requested range. The forced
593 	 * read-ahead will do the right thing and limit the read to just the
594 	 * requested range, which we'll set to 1 page for this case.
595 	 */
596 	if (!ra->ra_pages || blk_cgroup_congested()) {
597 		if (!ractl->file)
598 			return;
599 		req_count = 1;
600 		do_forced_ra = true;
601 	}
602 
603 	/* be dumb */
604 	if (do_forced_ra) {
605 		force_page_cache_ra(ractl, ra, req_count);
606 		return;
607 	}
608 
609 	/* do read-ahead */
610 	ondemand_readahead(ractl, ra, false, req_count);
611 }
612 EXPORT_SYMBOL_GPL(page_cache_sync_ra);
613 
page_cache_async_ra(struct readahead_control * ractl,struct file_ra_state * ra,struct page * page,unsigned long req_count)614 void page_cache_async_ra(struct readahead_control *ractl,
615 		struct file_ra_state *ra, struct page *page,
616 		unsigned long req_count)
617 {
618 	/* no read-ahead */
619 	if (!ra->ra_pages)
620 		return;
621 
622 	/*
623 	 * Same bit is used for PG_readahead and PG_reclaim.
624 	 */
625 	if (PageWriteback(page))
626 		return;
627 
628 	ClearPageReadahead(page);
629 
630 	/*
631 	 * Defer asynchronous read-ahead on IO congestion.
632 	 */
633 	if (inode_read_congested(ractl->mapping->host))
634 		return;
635 
636 	if (blk_cgroup_congested())
637 		return;
638 
639 	/* do read-ahead */
640 	ondemand_readahead(ractl, ra, true, req_count);
641 }
642 EXPORT_SYMBOL_GPL(page_cache_async_ra);
643 
ksys_readahead(int fd,loff_t offset,size_t count)644 ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
645 {
646 	ssize_t ret;
647 	struct fd f;
648 
649 	ret = -EBADF;
650 	f = fdget(fd);
651 	if (!f.file || !(f.file->f_mode & FMODE_READ))
652 		goto out;
653 
654 	/*
655 	 * The readahead() syscall is intended to run only on files
656 	 * that can execute readahead. If readahead is not possible
657 	 * on this file, then we must return -EINVAL.
658 	 */
659 	ret = -EINVAL;
660 	if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
661 	    !S_ISREG(file_inode(f.file)->i_mode))
662 		goto out;
663 
664 	ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
665 out:
666 	fdput(f);
667 	return ret;
668 }
669 
SYSCALL_DEFINE3(readahead,int,fd,loff_t,offset,size_t,count)670 SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count)
671 {
672 	return ksys_readahead(fd, offset, count);
673 }
674