xref: /OK3568_Linux_fs/kernel/mm/filemap.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	linux/mm/filemap.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1994-1999  Linus Torvalds
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * This file handles the generic file mmap semantics used by
10*4882a593Smuzhiyun  * most "normal" filesystems (but you don't /have/ to use this:
11*4882a593Smuzhiyun  * the NFS filesystem used to do this differently, for example)
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun #include <linux/export.h>
14*4882a593Smuzhiyun #include <linux/compiler.h>
15*4882a593Smuzhiyun #include <linux/dax.h>
16*4882a593Smuzhiyun #include <linux/fs.h>
17*4882a593Smuzhiyun #include <linux/sched/signal.h>
18*4882a593Smuzhiyun #include <linux/uaccess.h>
19*4882a593Smuzhiyun #include <linux/capability.h>
20*4882a593Smuzhiyun #include <linux/kernel_stat.h>
21*4882a593Smuzhiyun #include <linux/gfp.h>
22*4882a593Smuzhiyun #include <linux/mm.h>
23*4882a593Smuzhiyun #include <linux/swap.h>
24*4882a593Smuzhiyun #include <linux/mman.h>
25*4882a593Smuzhiyun #include <linux/pagemap.h>
26*4882a593Smuzhiyun #include <linux/file.h>
27*4882a593Smuzhiyun #include <linux/uio.h>
28*4882a593Smuzhiyun #include <linux/error-injection.h>
29*4882a593Smuzhiyun #include <linux/hash.h>
30*4882a593Smuzhiyun #include <linux/writeback.h>
31*4882a593Smuzhiyun #include <linux/backing-dev.h>
32*4882a593Smuzhiyun #include <linux/pagevec.h>
33*4882a593Smuzhiyun #include <linux/blkdev.h>
34*4882a593Smuzhiyun #include <linux/security.h>
35*4882a593Smuzhiyun #include <linux/cpuset.h>
36*4882a593Smuzhiyun #include <linux/hugetlb.h>
37*4882a593Smuzhiyun #include <linux/memcontrol.h>
38*4882a593Smuzhiyun #include <linux/cleancache.h>
39*4882a593Smuzhiyun #include <linux/shmem_fs.h>
40*4882a593Smuzhiyun #include <linux/rmap.h>
41*4882a593Smuzhiyun #include <linux/delayacct.h>
42*4882a593Smuzhiyun #include <linux/psi.h>
43*4882a593Smuzhiyun #include <linux/ramfs.h>
44*4882a593Smuzhiyun #include <linux/page_idle.h>
45*4882a593Smuzhiyun #include <asm/pgalloc.h>
46*4882a593Smuzhiyun #include <asm/tlbflush.h>
47*4882a593Smuzhiyun #include "internal.h"
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
50*4882a593Smuzhiyun #include <trace/events/filemap.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #undef CREATE_TRACE_POINTS
53*4882a593Smuzhiyun #include <trace/hooks/mm.h>
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * FIXME: remove all knowledge of the buffer layer from the core VM
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #include <linux/buffer_head.h> /* for try_to_free_buffers */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #include <asm/mman.h>
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun  * Shared mappings implemented 30.11.1994. It's not fully working yet,
64*4882a593Smuzhiyun  * though.
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * Shared mappings now work. 15.8.1995  Bruno.
67*4882a593Smuzhiyun  *
68*4882a593Smuzhiyun  * finished 'unifying' the page and buffer cache and SMP-threaded the
69*4882a593Smuzhiyun  * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
72*4882a593Smuzhiyun  */
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun  * Lock ordering:
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  *  ->i_mmap_rwsem		(truncate_pagecache)
78*4882a593Smuzhiyun  *    ->private_lock		(__free_pte->__set_page_dirty_buffers)
79*4882a593Smuzhiyun  *      ->swap_lock		(exclusive_swap_page, others)
80*4882a593Smuzhiyun  *        ->i_pages lock
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  *  ->i_mutex
83*4882a593Smuzhiyun  *    ->i_mmap_rwsem		(truncate->unmap_mapping_range)
84*4882a593Smuzhiyun  *
85*4882a593Smuzhiyun  *  ->mmap_lock
86*4882a593Smuzhiyun  *    ->i_mmap_rwsem
87*4882a593Smuzhiyun  *      ->page_table_lock or pte_lock	(various, mainly in memory.c)
88*4882a593Smuzhiyun  *        ->i_pages lock	(arch-dependent flush_dcache_mmap_lock)
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  *  ->mmap_lock
91*4882a593Smuzhiyun  *    ->lock_page		(access_process_vm)
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  *  ->i_mutex			(generic_perform_write)
94*4882a593Smuzhiyun  *    ->mmap_lock		(fault_in_pages_readable->do_page_fault)
95*4882a593Smuzhiyun  *
96*4882a593Smuzhiyun  *  bdi->wb.list_lock
97*4882a593Smuzhiyun  *    sb_lock			(fs/fs-writeback.c)
98*4882a593Smuzhiyun  *    ->i_pages lock		(__sync_single_inode)
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  *  ->i_mmap_rwsem
101*4882a593Smuzhiyun  *    ->anon_vma.lock		(vma_adjust)
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  *  ->anon_vma.lock
104*4882a593Smuzhiyun  *    ->page_table_lock or pte_lock	(anon_vma_prepare and various)
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  *  ->page_table_lock or pte_lock
107*4882a593Smuzhiyun  *    ->swap_lock		(try_to_unmap_one)
108*4882a593Smuzhiyun  *    ->private_lock		(try_to_unmap_one)
109*4882a593Smuzhiyun  *    ->i_pages lock		(try_to_unmap_one)
110*4882a593Smuzhiyun  *    ->pgdat->lru_lock		(follow_page->mark_page_accessed)
111*4882a593Smuzhiyun  *    ->pgdat->lru_lock		(check_pte_range->isolate_lru_page)
112*4882a593Smuzhiyun  *    ->private_lock		(page_remove_rmap->set_page_dirty)
113*4882a593Smuzhiyun  *    ->i_pages lock		(page_remove_rmap->set_page_dirty)
114*4882a593Smuzhiyun  *    bdi.wb->list_lock		(page_remove_rmap->set_page_dirty)
115*4882a593Smuzhiyun  *    ->inode->i_lock		(page_remove_rmap->set_page_dirty)
116*4882a593Smuzhiyun  *    ->memcg->move_lock	(page_remove_rmap->lock_page_memcg)
117*4882a593Smuzhiyun  *    bdi.wb->list_lock		(zap_pte_range->set_page_dirty)
118*4882a593Smuzhiyun  *    ->inode->i_lock		(zap_pte_range->set_page_dirty)
119*4882a593Smuzhiyun  *    ->private_lock		(zap_pte_range->__set_page_dirty_buffers)
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * ->i_mmap_rwsem
122*4882a593Smuzhiyun  *   ->tasklist_lock            (memory_failure, collect_procs_ao)
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun 
page_cache_delete(struct address_space * mapping,struct page * page,void * shadow)125*4882a593Smuzhiyun static void page_cache_delete(struct address_space *mapping,
126*4882a593Smuzhiyun 				   struct page *page, void *shadow)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, page->index);
129*4882a593Smuzhiyun 	unsigned int nr = 1;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	mapping_set_update(&xas, mapping);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* hugetlb pages are represented by a single entry in the xarray */
134*4882a593Smuzhiyun 	if (!PageHuge(page)) {
135*4882a593Smuzhiyun 		xas_set_order(&xas, page->index, compound_order(page));
136*4882a593Smuzhiyun 		nr = compound_nr(page);
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(page), page);
140*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageTail(page), page);
141*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(nr != 1 && shadow, page);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	xas_store(&xas, shadow);
144*4882a593Smuzhiyun 	xas_init_marks(&xas);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	page->mapping = NULL;
147*4882a593Smuzhiyun 	/* Leave page->index set: truncation lookup relies upon it */
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (shadow) {
150*4882a593Smuzhiyun 		mapping->nrexceptional += nr;
151*4882a593Smuzhiyun 		/*
152*4882a593Smuzhiyun 		 * Make sure the nrexceptional update is committed before
153*4882a593Smuzhiyun 		 * the nrpages update so that final truncate racing
154*4882a593Smuzhiyun 		 * with reclaim does not see both counters 0 at the
155*4882a593Smuzhiyun 		 * same time and miss a shadow entry.
156*4882a593Smuzhiyun 		 */
157*4882a593Smuzhiyun 		smp_wmb();
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 	mapping->nrpages -= nr;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
unaccount_page_cache_page(struct address_space * mapping,struct page * page)162*4882a593Smuzhiyun static void unaccount_page_cache_page(struct address_space *mapping,
163*4882a593Smuzhiyun 				      struct page *page)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	int nr;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	/*
168*4882a593Smuzhiyun 	 * if we're uptodate, flush out into the cleancache, otherwise
169*4882a593Smuzhiyun 	 * invalidate any existing cleancache entries.  We can't leave
170*4882a593Smuzhiyun 	 * stale data around in the cleancache once our page is gone
171*4882a593Smuzhiyun 	 */
172*4882a593Smuzhiyun 	if (PageUptodate(page) && PageMappedToDisk(page))
173*4882a593Smuzhiyun 		cleancache_put_page(page);
174*4882a593Smuzhiyun 	else
175*4882a593Smuzhiyun 		cleancache_invalidate_page(mapping, page);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageTail(page), page);
178*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(page_mapped(page), page);
179*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) {
180*4882a593Smuzhiyun 		int mapcount;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		pr_alert("BUG: Bad page cache in process %s  pfn:%05lx\n",
183*4882a593Smuzhiyun 			 current->comm, page_to_pfn(page));
184*4882a593Smuzhiyun 		dump_page(page, "still mapped when deleted");
185*4882a593Smuzhiyun 		dump_stack();
186*4882a593Smuzhiyun 		add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		mapcount = page_mapcount(page);
189*4882a593Smuzhiyun 		if (mapping_exiting(mapping) &&
190*4882a593Smuzhiyun 		    page_count(page) >= mapcount + 2) {
191*4882a593Smuzhiyun 			/*
192*4882a593Smuzhiyun 			 * All vmas have already been torn down, so it's
193*4882a593Smuzhiyun 			 * a good bet that actually the page is unmapped,
194*4882a593Smuzhiyun 			 * and we'd prefer not to leak it: if we're wrong,
195*4882a593Smuzhiyun 			 * some other bad page check should catch it later.
196*4882a593Smuzhiyun 			 */
197*4882a593Smuzhiyun 			page_mapcount_reset(page);
198*4882a593Smuzhiyun 			page_ref_sub(page, mapcount);
199*4882a593Smuzhiyun 		}
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	/* hugetlb pages do not participate in page cache accounting. */
203*4882a593Smuzhiyun 	if (PageHuge(page))
204*4882a593Smuzhiyun 		return;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	nr = thp_nr_pages(page);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	__mod_lruvec_page_state(page, NR_FILE_PAGES, -nr);
209*4882a593Smuzhiyun 	if (PageSwapBacked(page)) {
210*4882a593Smuzhiyun 		__mod_lruvec_page_state(page, NR_SHMEM, -nr);
211*4882a593Smuzhiyun 		if (PageTransHuge(page))
212*4882a593Smuzhiyun 			__dec_node_page_state(page, NR_SHMEM_THPS);
213*4882a593Smuzhiyun 	} else if (PageTransHuge(page)) {
214*4882a593Smuzhiyun 		__dec_node_page_state(page, NR_FILE_THPS);
215*4882a593Smuzhiyun 		filemap_nr_thps_dec(mapping);
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	/*
219*4882a593Smuzhiyun 	 * At this point page must be either written or cleaned by
220*4882a593Smuzhiyun 	 * truncate.  Dirty page here signals a bug and loss of
221*4882a593Smuzhiyun 	 * unwritten data.
222*4882a593Smuzhiyun 	 *
223*4882a593Smuzhiyun 	 * This fixes dirty accounting after removing the page entirely
224*4882a593Smuzhiyun 	 * but leaves PageDirty set: it has no effect for truncated
225*4882a593Smuzhiyun 	 * page and anyway will be cleared before returning page into
226*4882a593Smuzhiyun 	 * buddy allocator.
227*4882a593Smuzhiyun 	 */
228*4882a593Smuzhiyun 	if (WARN_ON_ONCE(PageDirty(page)))
229*4882a593Smuzhiyun 		account_page_cleaned(page, mapping, inode_to_wb(mapping->host));
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun  * Delete a page from the page cache and free it. Caller has to make
234*4882a593Smuzhiyun  * sure the page is locked and that nobody else uses it - or that usage
235*4882a593Smuzhiyun  * is safe.  The caller must hold the i_pages lock.
236*4882a593Smuzhiyun  */
__delete_from_page_cache(struct page * page,void * shadow)237*4882a593Smuzhiyun void __delete_from_page_cache(struct page *page, void *shadow)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	struct address_space *mapping = page->mapping;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	trace_mm_filemap_delete_from_page_cache(page);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	unaccount_page_cache_page(mapping, page);
244*4882a593Smuzhiyun 	page_cache_delete(mapping, page, shadow);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
page_cache_free_page(struct address_space * mapping,struct page * page)247*4882a593Smuzhiyun static void page_cache_free_page(struct address_space *mapping,
248*4882a593Smuzhiyun 				struct page *page)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	void (*freepage)(struct page *);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	freepage = mapping->a_ops->freepage;
253*4882a593Smuzhiyun 	if (freepage)
254*4882a593Smuzhiyun 		freepage(page);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (PageTransHuge(page) && !PageHuge(page)) {
257*4882a593Smuzhiyun 		page_ref_sub(page, thp_nr_pages(page));
258*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(page_count(page) <= 0, page);
259*4882a593Smuzhiyun 	} else {
260*4882a593Smuzhiyun 		put_page(page);
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /**
265*4882a593Smuzhiyun  * delete_from_page_cache - delete page from page cache
266*4882a593Smuzhiyun  * @page: the page which the kernel is trying to remove from page cache
267*4882a593Smuzhiyun  *
268*4882a593Smuzhiyun  * This must be called only on pages that have been verified to be in the page
269*4882a593Smuzhiyun  * cache and locked.  It will never put the page into the free list, the caller
270*4882a593Smuzhiyun  * has a reference on the page.
271*4882a593Smuzhiyun  */
delete_from_page_cache(struct page * page)272*4882a593Smuzhiyun void delete_from_page_cache(struct page *page)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct address_space *mapping = page_mapping(page);
275*4882a593Smuzhiyun 	unsigned long flags;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
278*4882a593Smuzhiyun 	xa_lock_irqsave(&mapping->i_pages, flags);
279*4882a593Smuzhiyun 	__delete_from_page_cache(page, NULL);
280*4882a593Smuzhiyun 	xa_unlock_irqrestore(&mapping->i_pages, flags);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	page_cache_free_page(mapping, page);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun EXPORT_SYMBOL(delete_from_page_cache);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun  * page_cache_delete_batch - delete several pages from page cache
288*4882a593Smuzhiyun  * @mapping: the mapping to which pages belong
289*4882a593Smuzhiyun  * @pvec: pagevec with pages to delete
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  * The function walks over mapping->i_pages and removes pages passed in @pvec
292*4882a593Smuzhiyun  * from the mapping. The function expects @pvec to be sorted by page index
293*4882a593Smuzhiyun  * and is optimised for it to be dense.
294*4882a593Smuzhiyun  * It tolerates holes in @pvec (mapping entries at those indices are not
295*4882a593Smuzhiyun  * modified). The function expects only THP head pages to be present in the
296*4882a593Smuzhiyun  * @pvec.
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * The function expects the i_pages lock to be held.
299*4882a593Smuzhiyun  */
page_cache_delete_batch(struct address_space * mapping,struct pagevec * pvec)300*4882a593Smuzhiyun static void page_cache_delete_batch(struct address_space *mapping,
301*4882a593Smuzhiyun 			     struct pagevec *pvec)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index);
304*4882a593Smuzhiyun 	int total_pages = 0;
305*4882a593Smuzhiyun 	int i = 0;
306*4882a593Smuzhiyun 	struct page *page;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	mapping_set_update(&xas, mapping);
309*4882a593Smuzhiyun 	xas_for_each(&xas, page, ULONG_MAX) {
310*4882a593Smuzhiyun 		if (i >= pagevec_count(pvec))
311*4882a593Smuzhiyun 			break;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		/* A swap/dax/shadow entry got inserted? Skip it. */
314*4882a593Smuzhiyun 		if (xa_is_value(page))
315*4882a593Smuzhiyun 			continue;
316*4882a593Smuzhiyun 		/*
317*4882a593Smuzhiyun 		 * A page got inserted in our range? Skip it. We have our
318*4882a593Smuzhiyun 		 * pages locked so they are protected from being removed.
319*4882a593Smuzhiyun 		 * If we see a page whose index is higher than ours, it
320*4882a593Smuzhiyun 		 * means our page has been removed, which shouldn't be
321*4882a593Smuzhiyun 		 * possible because we're holding the PageLock.
322*4882a593Smuzhiyun 		 */
323*4882a593Smuzhiyun 		if (page != pvec->pages[i]) {
324*4882a593Smuzhiyun 			VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index,
325*4882a593Smuzhiyun 					page);
326*4882a593Smuzhiyun 			continue;
327*4882a593Smuzhiyun 		}
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 		WARN_ON_ONCE(!PageLocked(page));
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 		if (page->index == xas.xa_index)
332*4882a593Smuzhiyun 			page->mapping = NULL;
333*4882a593Smuzhiyun 		/* Leave page->index set: truncation lookup relies on it */
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		/*
336*4882a593Smuzhiyun 		 * Move to the next page in the vector if this is a regular
337*4882a593Smuzhiyun 		 * page or the index is of the last sub-page of this compound
338*4882a593Smuzhiyun 		 * page.
339*4882a593Smuzhiyun 		 */
340*4882a593Smuzhiyun 		if (page->index + compound_nr(page) - 1 == xas.xa_index)
341*4882a593Smuzhiyun 			i++;
342*4882a593Smuzhiyun 		xas_store(&xas, NULL);
343*4882a593Smuzhiyun 		total_pages++;
344*4882a593Smuzhiyun 	}
345*4882a593Smuzhiyun 	mapping->nrpages -= total_pages;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun 
delete_from_page_cache_batch(struct address_space * mapping,struct pagevec * pvec)348*4882a593Smuzhiyun void delete_from_page_cache_batch(struct address_space *mapping,
349*4882a593Smuzhiyun 				  struct pagevec *pvec)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	int i;
352*4882a593Smuzhiyun 	unsigned long flags;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (!pagevec_count(pvec))
355*4882a593Smuzhiyun 		return;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	xa_lock_irqsave(&mapping->i_pages, flags);
358*4882a593Smuzhiyun 	for (i = 0; i < pagevec_count(pvec); i++) {
359*4882a593Smuzhiyun 		trace_mm_filemap_delete_from_page_cache(pvec->pages[i]);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 		unaccount_page_cache_page(mapping, pvec->pages[i]);
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 	page_cache_delete_batch(mapping, pvec);
364*4882a593Smuzhiyun 	xa_unlock_irqrestore(&mapping->i_pages, flags);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	for (i = 0; i < pagevec_count(pvec); i++)
367*4882a593Smuzhiyun 		page_cache_free_page(mapping, pvec->pages[i]);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
filemap_check_errors(struct address_space * mapping)370*4882a593Smuzhiyun int filemap_check_errors(struct address_space *mapping)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	int ret = 0;
373*4882a593Smuzhiyun 	/* Check for outstanding write errors */
374*4882a593Smuzhiyun 	if (test_bit(AS_ENOSPC, &mapping->flags) &&
375*4882a593Smuzhiyun 	    test_and_clear_bit(AS_ENOSPC, &mapping->flags))
376*4882a593Smuzhiyun 		ret = -ENOSPC;
377*4882a593Smuzhiyun 	if (test_bit(AS_EIO, &mapping->flags) &&
378*4882a593Smuzhiyun 	    test_and_clear_bit(AS_EIO, &mapping->flags))
379*4882a593Smuzhiyun 		ret = -EIO;
380*4882a593Smuzhiyun 	return ret;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_check_errors);
383*4882a593Smuzhiyun 
filemap_check_and_keep_errors(struct address_space * mapping)384*4882a593Smuzhiyun static int filemap_check_and_keep_errors(struct address_space *mapping)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	/* Check for outstanding write errors */
387*4882a593Smuzhiyun 	if (test_bit(AS_EIO, &mapping->flags))
388*4882a593Smuzhiyun 		return -EIO;
389*4882a593Smuzhiyun 	if (test_bit(AS_ENOSPC, &mapping->flags))
390*4882a593Smuzhiyun 		return -ENOSPC;
391*4882a593Smuzhiyun 	return 0;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun /**
395*4882a593Smuzhiyun  * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
396*4882a593Smuzhiyun  * @mapping:	address space structure to write
397*4882a593Smuzhiyun  * @start:	offset in bytes where the range starts
398*4882a593Smuzhiyun  * @end:	offset in bytes where the range ends (inclusive)
399*4882a593Smuzhiyun  * @sync_mode:	enable synchronous operation
400*4882a593Smuzhiyun  *
401*4882a593Smuzhiyun  * Start writeback against all of a mapping's dirty pages that lie
402*4882a593Smuzhiyun  * within the byte offsets <start, end> inclusive.
403*4882a593Smuzhiyun  *
404*4882a593Smuzhiyun  * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
405*4882a593Smuzhiyun  * opposed to a regular memory cleansing writeback.  The difference between
406*4882a593Smuzhiyun  * these two operations is that if a dirty page/buffer is encountered, it must
407*4882a593Smuzhiyun  * be waited upon, and not just skipped over.
408*4882a593Smuzhiyun  *
409*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
410*4882a593Smuzhiyun  */
__filemap_fdatawrite_range(struct address_space * mapping,loff_t start,loff_t end,int sync_mode)411*4882a593Smuzhiyun int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
412*4882a593Smuzhiyun 				loff_t end, int sync_mode)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	int ret;
415*4882a593Smuzhiyun 	struct writeback_control wbc = {
416*4882a593Smuzhiyun 		.sync_mode = sync_mode,
417*4882a593Smuzhiyun 		.nr_to_write = LONG_MAX,
418*4882a593Smuzhiyun 		.range_start = start,
419*4882a593Smuzhiyun 		.range_end = end,
420*4882a593Smuzhiyun 	};
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	if (!mapping_can_writeback(mapping) ||
423*4882a593Smuzhiyun 	    !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
424*4882a593Smuzhiyun 		return 0;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	wbc_attach_fdatawrite_inode(&wbc, mapping->host);
427*4882a593Smuzhiyun 	ret = do_writepages(mapping, &wbc);
428*4882a593Smuzhiyun 	wbc_detach_inode(&wbc);
429*4882a593Smuzhiyun 	return ret;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
__filemap_fdatawrite(struct address_space * mapping,int sync_mode)432*4882a593Smuzhiyun static inline int __filemap_fdatawrite(struct address_space *mapping,
433*4882a593Smuzhiyun 	int sync_mode)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
filemap_fdatawrite(struct address_space * mapping)438*4882a593Smuzhiyun int filemap_fdatawrite(struct address_space *mapping)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	return __filemap_fdatawrite(mapping, WB_SYNC_ALL);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_fdatawrite);
443*4882a593Smuzhiyun 
filemap_fdatawrite_range(struct address_space * mapping,loff_t start,loff_t end)444*4882a593Smuzhiyun int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
445*4882a593Smuzhiyun 				loff_t end)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_fdatawrite_range);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun /**
452*4882a593Smuzhiyun  * filemap_flush - mostly a non-blocking flush
453*4882a593Smuzhiyun  * @mapping:	target address_space
454*4882a593Smuzhiyun  *
455*4882a593Smuzhiyun  * This is a mostly non-blocking flush.  Not suitable for data-integrity
456*4882a593Smuzhiyun  * purposes - I/O may not be started against all dirty pages.
457*4882a593Smuzhiyun  *
458*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
459*4882a593Smuzhiyun  */
filemap_flush(struct address_space * mapping)460*4882a593Smuzhiyun int filemap_flush(struct address_space *mapping)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	return __filemap_fdatawrite(mapping, WB_SYNC_NONE);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_flush);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun /**
467*4882a593Smuzhiyun  * filemap_range_has_page - check if a page exists in range.
468*4882a593Smuzhiyun  * @mapping:           address space within which to check
469*4882a593Smuzhiyun  * @start_byte:        offset in bytes where the range starts
470*4882a593Smuzhiyun  * @end_byte:          offset in bytes where the range ends (inclusive)
471*4882a593Smuzhiyun  *
472*4882a593Smuzhiyun  * Find at least one page in the range supplied, usually used to check if
473*4882a593Smuzhiyun  * direct writing in this range will trigger a writeback.
474*4882a593Smuzhiyun  *
475*4882a593Smuzhiyun  * Return: %true if at least one page exists in the specified range,
476*4882a593Smuzhiyun  * %false otherwise.
477*4882a593Smuzhiyun  */
filemap_range_has_page(struct address_space * mapping,loff_t start_byte,loff_t end_byte)478*4882a593Smuzhiyun bool filemap_range_has_page(struct address_space *mapping,
479*4882a593Smuzhiyun 			   loff_t start_byte, loff_t end_byte)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct page *page;
482*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);
483*4882a593Smuzhiyun 	pgoff_t max = end_byte >> PAGE_SHIFT;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (end_byte < start_byte)
486*4882a593Smuzhiyun 		return false;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	rcu_read_lock();
489*4882a593Smuzhiyun 	for (;;) {
490*4882a593Smuzhiyun 		page = xas_find(&xas, max);
491*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
492*4882a593Smuzhiyun 			continue;
493*4882a593Smuzhiyun 		/* Shadow entries don't count */
494*4882a593Smuzhiyun 		if (xa_is_value(page))
495*4882a593Smuzhiyun 			continue;
496*4882a593Smuzhiyun 		/*
497*4882a593Smuzhiyun 		 * We don't need to try to pin this page; we're about to
498*4882a593Smuzhiyun 		 * release the RCU lock anyway.  It is enough to know that
499*4882a593Smuzhiyun 		 * there was a page here recently.
500*4882a593Smuzhiyun 		 */
501*4882a593Smuzhiyun 		break;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 	rcu_read_unlock();
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	return page != NULL;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_range_has_page);
508*4882a593Smuzhiyun 
__filemap_fdatawait_range(struct address_space * mapping,loff_t start_byte,loff_t end_byte)509*4882a593Smuzhiyun static void __filemap_fdatawait_range(struct address_space *mapping,
510*4882a593Smuzhiyun 				     loff_t start_byte, loff_t end_byte)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	pgoff_t index = start_byte >> PAGE_SHIFT;
513*4882a593Smuzhiyun 	pgoff_t end = end_byte >> PAGE_SHIFT;
514*4882a593Smuzhiyun 	struct pagevec pvec;
515*4882a593Smuzhiyun 	int nr_pages;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	if (end_byte < start_byte)
518*4882a593Smuzhiyun 		return;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	pagevec_init(&pvec);
521*4882a593Smuzhiyun 	while (index <= end) {
522*4882a593Smuzhiyun 		unsigned i;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 		nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index,
525*4882a593Smuzhiyun 				end, PAGECACHE_TAG_WRITEBACK);
526*4882a593Smuzhiyun 		if (!nr_pages)
527*4882a593Smuzhiyun 			break;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++) {
530*4882a593Smuzhiyun 			struct page *page = pvec.pages[i];
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 			wait_on_page_writeback(page);
533*4882a593Smuzhiyun 			ClearPageError(page);
534*4882a593Smuzhiyun 		}
535*4882a593Smuzhiyun 		pagevec_release(&pvec);
536*4882a593Smuzhiyun 		cond_resched();
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun /**
541*4882a593Smuzhiyun  * filemap_fdatawait_range - wait for writeback to complete
542*4882a593Smuzhiyun  * @mapping:		address space structure to wait for
543*4882a593Smuzhiyun  * @start_byte:		offset in bytes where the range starts
544*4882a593Smuzhiyun  * @end_byte:		offset in bytes where the range ends (inclusive)
545*4882a593Smuzhiyun  *
546*4882a593Smuzhiyun  * Walk the list of under-writeback pages of the given address space
547*4882a593Smuzhiyun  * in the given range and wait for all of them.  Check error status of
548*4882a593Smuzhiyun  * the address space and return it.
549*4882a593Smuzhiyun  *
550*4882a593Smuzhiyun  * Since the error status of the address space is cleared by this function,
551*4882a593Smuzhiyun  * callers are responsible for checking the return value and handling and/or
552*4882a593Smuzhiyun  * reporting the error.
553*4882a593Smuzhiyun  *
554*4882a593Smuzhiyun  * Return: error status of the address space.
555*4882a593Smuzhiyun  */
filemap_fdatawait_range(struct address_space * mapping,loff_t start_byte,loff_t end_byte)556*4882a593Smuzhiyun int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
557*4882a593Smuzhiyun 			    loff_t end_byte)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	__filemap_fdatawait_range(mapping, start_byte, end_byte);
560*4882a593Smuzhiyun 	return filemap_check_errors(mapping);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_fdatawait_range);
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun /**
565*4882a593Smuzhiyun  * filemap_fdatawait_range_keep_errors - wait for writeback to complete
566*4882a593Smuzhiyun  * @mapping:		address space structure to wait for
567*4882a593Smuzhiyun  * @start_byte:		offset in bytes where the range starts
568*4882a593Smuzhiyun  * @end_byte:		offset in bytes where the range ends (inclusive)
569*4882a593Smuzhiyun  *
570*4882a593Smuzhiyun  * Walk the list of under-writeback pages of the given address space in the
571*4882a593Smuzhiyun  * given range and wait for all of them.  Unlike filemap_fdatawait_range(),
572*4882a593Smuzhiyun  * this function does not clear error status of the address space.
573*4882a593Smuzhiyun  *
574*4882a593Smuzhiyun  * Use this function if callers don't handle errors themselves.  Expected
575*4882a593Smuzhiyun  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
576*4882a593Smuzhiyun  * fsfreeze(8)
577*4882a593Smuzhiyun  */
filemap_fdatawait_range_keep_errors(struct address_space * mapping,loff_t start_byte,loff_t end_byte)578*4882a593Smuzhiyun int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
579*4882a593Smuzhiyun 		loff_t start_byte, loff_t end_byte)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	__filemap_fdatawait_range(mapping, start_byte, end_byte);
582*4882a593Smuzhiyun 	return filemap_check_and_keep_errors(mapping);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun /**
587*4882a593Smuzhiyun  * file_fdatawait_range - wait for writeback to complete
588*4882a593Smuzhiyun  * @file:		file pointing to address space structure to wait for
589*4882a593Smuzhiyun  * @start_byte:		offset in bytes where the range starts
590*4882a593Smuzhiyun  * @end_byte:		offset in bytes where the range ends (inclusive)
591*4882a593Smuzhiyun  *
592*4882a593Smuzhiyun  * Walk the list of under-writeback pages of the address space that file
593*4882a593Smuzhiyun  * refers to, in the given range and wait for all of them.  Check error
594*4882a593Smuzhiyun  * status of the address space vs. the file->f_wb_err cursor and return it.
595*4882a593Smuzhiyun  *
596*4882a593Smuzhiyun  * Since the error status of the file is advanced by this function,
597*4882a593Smuzhiyun  * callers are responsible for checking the return value and handling and/or
598*4882a593Smuzhiyun  * reporting the error.
599*4882a593Smuzhiyun  *
600*4882a593Smuzhiyun  * Return: error status of the address space vs. the file->f_wb_err cursor.
601*4882a593Smuzhiyun  */
file_fdatawait_range(struct file * file,loff_t start_byte,loff_t end_byte)602*4882a593Smuzhiyun int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	__filemap_fdatawait_range(mapping, start_byte, end_byte);
607*4882a593Smuzhiyun 	return file_check_and_advance_wb_err(file);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun EXPORT_SYMBOL(file_fdatawait_range);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun /**
612*4882a593Smuzhiyun  * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
613*4882a593Smuzhiyun  * @mapping: address space structure to wait for
614*4882a593Smuzhiyun  *
615*4882a593Smuzhiyun  * Walk the list of under-writeback pages of the given address space
616*4882a593Smuzhiyun  * and wait for all of them.  Unlike filemap_fdatawait(), this function
617*4882a593Smuzhiyun  * does not clear error status of the address space.
618*4882a593Smuzhiyun  *
619*4882a593Smuzhiyun  * Use this function if callers don't handle errors themselves.  Expected
620*4882a593Smuzhiyun  * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
621*4882a593Smuzhiyun  * fsfreeze(8)
622*4882a593Smuzhiyun  *
623*4882a593Smuzhiyun  * Return: error status of the address space.
624*4882a593Smuzhiyun  */
filemap_fdatawait_keep_errors(struct address_space * mapping)625*4882a593Smuzhiyun int filemap_fdatawait_keep_errors(struct address_space *mapping)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	__filemap_fdatawait_range(mapping, 0, LLONG_MAX);
628*4882a593Smuzhiyun 	return filemap_check_and_keep_errors(mapping);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_fdatawait_keep_errors);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun /* Returns true if writeback might be needed or already in progress. */
mapping_needs_writeback(struct address_space * mapping)633*4882a593Smuzhiyun static bool mapping_needs_writeback(struct address_space *mapping)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	if (dax_mapping(mapping))
636*4882a593Smuzhiyun 		return mapping->nrexceptional;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	return mapping->nrpages;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun /**
642*4882a593Smuzhiyun  * filemap_write_and_wait_range - write out & wait on a file range
643*4882a593Smuzhiyun  * @mapping:	the address_space for the pages
644*4882a593Smuzhiyun  * @lstart:	offset in bytes where the range starts
645*4882a593Smuzhiyun  * @lend:	offset in bytes where the range ends (inclusive)
646*4882a593Smuzhiyun  *
647*4882a593Smuzhiyun  * Write out and wait upon file offsets lstart->lend, inclusive.
648*4882a593Smuzhiyun  *
649*4882a593Smuzhiyun  * Note that @lend is inclusive (describes the last byte to be written) so
650*4882a593Smuzhiyun  * that this function can be used to write to the very end-of-file (end = -1).
651*4882a593Smuzhiyun  *
652*4882a593Smuzhiyun  * Return: error status of the address space.
653*4882a593Smuzhiyun  */
filemap_write_and_wait_range(struct address_space * mapping,loff_t lstart,loff_t lend)654*4882a593Smuzhiyun int filemap_write_and_wait_range(struct address_space *mapping,
655*4882a593Smuzhiyun 				 loff_t lstart, loff_t lend)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	int err = 0;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	if (mapping_needs_writeback(mapping)) {
660*4882a593Smuzhiyun 		err = __filemap_fdatawrite_range(mapping, lstart, lend,
661*4882a593Smuzhiyun 						 WB_SYNC_ALL);
662*4882a593Smuzhiyun 		/*
663*4882a593Smuzhiyun 		 * Even if the above returned error, the pages may be
664*4882a593Smuzhiyun 		 * written partially (e.g. -ENOSPC), so we wait for it.
665*4882a593Smuzhiyun 		 * But the -EIO is special case, it may indicate the worst
666*4882a593Smuzhiyun 		 * thing (e.g. bug) happened, so we avoid waiting for it.
667*4882a593Smuzhiyun 		 */
668*4882a593Smuzhiyun 		if (err != -EIO) {
669*4882a593Smuzhiyun 			int err2 = filemap_fdatawait_range(mapping,
670*4882a593Smuzhiyun 						lstart, lend);
671*4882a593Smuzhiyun 			if (!err)
672*4882a593Smuzhiyun 				err = err2;
673*4882a593Smuzhiyun 		} else {
674*4882a593Smuzhiyun 			/* Clear any previously stored errors */
675*4882a593Smuzhiyun 			filemap_check_errors(mapping);
676*4882a593Smuzhiyun 		}
677*4882a593Smuzhiyun 	} else {
678*4882a593Smuzhiyun 		err = filemap_check_errors(mapping);
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 	return err;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_write_and_wait_range);
683*4882a593Smuzhiyun 
__filemap_set_wb_err(struct address_space * mapping,int err)684*4882a593Smuzhiyun void __filemap_set_wb_err(struct address_space *mapping, int err)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	errseq_t eseq = errseq_set(&mapping->wb_err, err);
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	trace_filemap_set_wb_err(mapping, eseq);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun EXPORT_SYMBOL(__filemap_set_wb_err);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun /**
693*4882a593Smuzhiyun  * file_check_and_advance_wb_err - report wb error (if any) that was previously
694*4882a593Smuzhiyun  * 				   and advance wb_err to current one
695*4882a593Smuzhiyun  * @file: struct file on which the error is being reported
696*4882a593Smuzhiyun  *
697*4882a593Smuzhiyun  * When userland calls fsync (or something like nfsd does the equivalent), we
698*4882a593Smuzhiyun  * want to report any writeback errors that occurred since the last fsync (or
699*4882a593Smuzhiyun  * since the file was opened if there haven't been any).
700*4882a593Smuzhiyun  *
701*4882a593Smuzhiyun  * Grab the wb_err from the mapping. If it matches what we have in the file,
702*4882a593Smuzhiyun  * then just quickly return 0. The file is all caught up.
703*4882a593Smuzhiyun  *
704*4882a593Smuzhiyun  * If it doesn't match, then take the mapping value, set the "seen" flag in
705*4882a593Smuzhiyun  * it and try to swap it into place. If it works, or another task beat us
706*4882a593Smuzhiyun  * to it with the new value, then update the f_wb_err and return the error
707*4882a593Smuzhiyun  * portion. The error at this point must be reported via proper channels
708*4882a593Smuzhiyun  * (a'la fsync, or NFS COMMIT operation, etc.).
709*4882a593Smuzhiyun  *
710*4882a593Smuzhiyun  * While we handle mapping->wb_err with atomic operations, the f_wb_err
711*4882a593Smuzhiyun  * value is protected by the f_lock since we must ensure that it reflects
712*4882a593Smuzhiyun  * the latest value swapped in for this file descriptor.
713*4882a593Smuzhiyun  *
714*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
715*4882a593Smuzhiyun  */
file_check_and_advance_wb_err(struct file * file)716*4882a593Smuzhiyun int file_check_and_advance_wb_err(struct file *file)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	int err = 0;
719*4882a593Smuzhiyun 	errseq_t old = READ_ONCE(file->f_wb_err);
720*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	/* Locklessly handle the common case where nothing has changed */
723*4882a593Smuzhiyun 	if (errseq_check(&mapping->wb_err, old)) {
724*4882a593Smuzhiyun 		/* Something changed, must use slow path */
725*4882a593Smuzhiyun 		spin_lock(&file->f_lock);
726*4882a593Smuzhiyun 		old = file->f_wb_err;
727*4882a593Smuzhiyun 		err = errseq_check_and_advance(&mapping->wb_err,
728*4882a593Smuzhiyun 						&file->f_wb_err);
729*4882a593Smuzhiyun 		trace_file_check_and_advance_wb_err(file, old);
730*4882a593Smuzhiyun 		spin_unlock(&file->f_lock);
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	/*
734*4882a593Smuzhiyun 	 * We're mostly using this function as a drop in replacement for
735*4882a593Smuzhiyun 	 * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect
736*4882a593Smuzhiyun 	 * that the legacy code would have had on these flags.
737*4882a593Smuzhiyun 	 */
738*4882a593Smuzhiyun 	clear_bit(AS_EIO, &mapping->flags);
739*4882a593Smuzhiyun 	clear_bit(AS_ENOSPC, &mapping->flags);
740*4882a593Smuzhiyun 	return err;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun EXPORT_SYMBOL(file_check_and_advance_wb_err);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun /**
745*4882a593Smuzhiyun  * file_write_and_wait_range - write out & wait on a file range
746*4882a593Smuzhiyun  * @file:	file pointing to address_space with pages
747*4882a593Smuzhiyun  * @lstart:	offset in bytes where the range starts
748*4882a593Smuzhiyun  * @lend:	offset in bytes where the range ends (inclusive)
749*4882a593Smuzhiyun  *
750*4882a593Smuzhiyun  * Write out and wait upon file offsets lstart->lend, inclusive.
751*4882a593Smuzhiyun  *
752*4882a593Smuzhiyun  * Note that @lend is inclusive (describes the last byte to be written) so
753*4882a593Smuzhiyun  * that this function can be used to write to the very end-of-file (end = -1).
754*4882a593Smuzhiyun  *
755*4882a593Smuzhiyun  * After writing out and waiting on the data, we check and advance the
756*4882a593Smuzhiyun  * f_wb_err cursor to the latest value, and return any errors detected there.
757*4882a593Smuzhiyun  *
758*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
759*4882a593Smuzhiyun  */
file_write_and_wait_range(struct file * file,loff_t lstart,loff_t lend)760*4882a593Smuzhiyun int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	int err = 0, err2;
763*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	if (mapping_needs_writeback(mapping)) {
766*4882a593Smuzhiyun 		err = __filemap_fdatawrite_range(mapping, lstart, lend,
767*4882a593Smuzhiyun 						 WB_SYNC_ALL);
768*4882a593Smuzhiyun 		/* See comment of filemap_write_and_wait() */
769*4882a593Smuzhiyun 		if (err != -EIO)
770*4882a593Smuzhiyun 			__filemap_fdatawait_range(mapping, lstart, lend);
771*4882a593Smuzhiyun 	}
772*4882a593Smuzhiyun 	err2 = file_check_and_advance_wb_err(file);
773*4882a593Smuzhiyun 	if (!err)
774*4882a593Smuzhiyun 		err = err2;
775*4882a593Smuzhiyun 	return err;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun EXPORT_SYMBOL(file_write_and_wait_range);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun /**
780*4882a593Smuzhiyun  * replace_page_cache_page - replace a pagecache page with a new one
781*4882a593Smuzhiyun  * @old:	page to be replaced
782*4882a593Smuzhiyun  * @new:	page to replace with
783*4882a593Smuzhiyun  * @gfp_mask:	allocation mode
784*4882a593Smuzhiyun  *
785*4882a593Smuzhiyun  * This function replaces a page in the pagecache with a new one.  On
786*4882a593Smuzhiyun  * success it acquires the pagecache reference for the new page and
787*4882a593Smuzhiyun  * drops it for the old page.  Both the old and new pages must be
788*4882a593Smuzhiyun  * locked.  This function does not add the new page to the LRU, the
789*4882a593Smuzhiyun  * caller must do that.
790*4882a593Smuzhiyun  *
791*4882a593Smuzhiyun  * The remove + add is atomic.  This function cannot fail.
792*4882a593Smuzhiyun  *
793*4882a593Smuzhiyun  * Return: %0
794*4882a593Smuzhiyun  */
replace_page_cache_page(struct page * old,struct page * new,gfp_t gfp_mask)795*4882a593Smuzhiyun int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun 	struct address_space *mapping = old->mapping;
798*4882a593Smuzhiyun 	void (*freepage)(struct page *) = mapping->a_ops->freepage;
799*4882a593Smuzhiyun 	pgoff_t offset = old->index;
800*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, offset);
801*4882a593Smuzhiyun 	unsigned long flags;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(old), old);
804*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(new), new);
805*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(new->mapping, new);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	get_page(new);
808*4882a593Smuzhiyun 	new->mapping = mapping;
809*4882a593Smuzhiyun 	new->index = offset;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	mem_cgroup_migrate(old, new);
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 	xas_lock_irqsave(&xas, flags);
814*4882a593Smuzhiyun 	xas_store(&xas, new);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	old->mapping = NULL;
817*4882a593Smuzhiyun 	/* hugetlb pages do not participate in page cache accounting. */
818*4882a593Smuzhiyun 	if (!PageHuge(old))
819*4882a593Smuzhiyun 		__dec_lruvec_page_state(old, NR_FILE_PAGES);
820*4882a593Smuzhiyun 	if (!PageHuge(new))
821*4882a593Smuzhiyun 		__inc_lruvec_page_state(new, NR_FILE_PAGES);
822*4882a593Smuzhiyun 	if (PageSwapBacked(old))
823*4882a593Smuzhiyun 		__dec_lruvec_page_state(old, NR_SHMEM);
824*4882a593Smuzhiyun 	if (PageSwapBacked(new))
825*4882a593Smuzhiyun 		__inc_lruvec_page_state(new, NR_SHMEM);
826*4882a593Smuzhiyun 	xas_unlock_irqrestore(&xas, flags);
827*4882a593Smuzhiyun 	if (freepage)
828*4882a593Smuzhiyun 		freepage(old);
829*4882a593Smuzhiyun 	put_page(old);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	return 0;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(replace_page_cache_page);
834*4882a593Smuzhiyun 
__add_to_page_cache_locked(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp,void ** shadowp)835*4882a593Smuzhiyun noinline int __add_to_page_cache_locked(struct page *page,
836*4882a593Smuzhiyun 					struct address_space *mapping,
837*4882a593Smuzhiyun 					pgoff_t offset, gfp_t gfp,
838*4882a593Smuzhiyun 					void **shadowp)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, offset);
841*4882a593Smuzhiyun 	int huge = PageHuge(page);
842*4882a593Smuzhiyun 	int error;
843*4882a593Smuzhiyun 	bool charged = false;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(page), page);
846*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageSwapBacked(page), page);
847*4882a593Smuzhiyun 	mapping_set_update(&xas, mapping);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	get_page(page);
850*4882a593Smuzhiyun 	page->mapping = mapping;
851*4882a593Smuzhiyun 	page->index = offset;
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	if (!huge) {
854*4882a593Smuzhiyun 		error = mem_cgroup_charge(page, current->mm, gfp);
855*4882a593Smuzhiyun 		if (error)
856*4882a593Smuzhiyun 			goto error;
857*4882a593Smuzhiyun 		charged = true;
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	gfp &= GFP_RECLAIM_MASK;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	do {
863*4882a593Smuzhiyun 		unsigned int order = xa_get_order(xas.xa, xas.xa_index);
864*4882a593Smuzhiyun 		void *entry, *old = NULL;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 		if (order > thp_order(page))
867*4882a593Smuzhiyun 			xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index),
868*4882a593Smuzhiyun 					order, gfp);
869*4882a593Smuzhiyun 		xas_lock_irq(&xas);
870*4882a593Smuzhiyun 		xas_for_each_conflict(&xas, entry) {
871*4882a593Smuzhiyun 			old = entry;
872*4882a593Smuzhiyun 			if (!xa_is_value(entry)) {
873*4882a593Smuzhiyun 				xas_set_err(&xas, -EEXIST);
874*4882a593Smuzhiyun 				goto unlock;
875*4882a593Smuzhiyun 			}
876*4882a593Smuzhiyun 		}
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 		if (old) {
879*4882a593Smuzhiyun 			if (shadowp)
880*4882a593Smuzhiyun 				*shadowp = old;
881*4882a593Smuzhiyun 			/* entry may have been split before we acquired lock */
882*4882a593Smuzhiyun 			order = xa_get_order(xas.xa, xas.xa_index);
883*4882a593Smuzhiyun 			if (order > thp_order(page)) {
884*4882a593Smuzhiyun 				xas_split(&xas, old, order);
885*4882a593Smuzhiyun 				xas_reset(&xas);
886*4882a593Smuzhiyun 			}
887*4882a593Smuzhiyun 		}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 		xas_store(&xas, page);
890*4882a593Smuzhiyun 		if (xas_error(&xas))
891*4882a593Smuzhiyun 			goto unlock;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 		if (old)
894*4882a593Smuzhiyun 			mapping->nrexceptional--;
895*4882a593Smuzhiyun 		mapping->nrpages++;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 		/* hugetlb pages do not participate in page cache accounting */
898*4882a593Smuzhiyun 		if (!huge)
899*4882a593Smuzhiyun 			__inc_lruvec_page_state(page, NR_FILE_PAGES);
900*4882a593Smuzhiyun unlock:
901*4882a593Smuzhiyun 		xas_unlock_irq(&xas);
902*4882a593Smuzhiyun 	} while (xas_nomem(&xas, gfp));
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (xas_error(&xas)) {
905*4882a593Smuzhiyun 		error = xas_error(&xas);
906*4882a593Smuzhiyun 		if (charged)
907*4882a593Smuzhiyun 			mem_cgroup_uncharge(page);
908*4882a593Smuzhiyun 		goto error;
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	trace_mm_filemap_add_to_page_cache(page);
912*4882a593Smuzhiyun 	return 0;
913*4882a593Smuzhiyun error:
914*4882a593Smuzhiyun 	page->mapping = NULL;
915*4882a593Smuzhiyun 	/* Leave page->index set: truncation relies upon it */
916*4882a593Smuzhiyun 	put_page(page);
917*4882a593Smuzhiyun 	return error;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun ALLOW_ERROR_INJECTION(__add_to_page_cache_locked, ERRNO);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun /**
922*4882a593Smuzhiyun  * add_to_page_cache_locked - add a locked page to the pagecache
923*4882a593Smuzhiyun  * @page:	page to add
924*4882a593Smuzhiyun  * @mapping:	the page's address_space
925*4882a593Smuzhiyun  * @offset:	page index
926*4882a593Smuzhiyun  * @gfp_mask:	page allocation mode
927*4882a593Smuzhiyun  *
928*4882a593Smuzhiyun  * This function is used to add a page to the pagecache. It must be locked.
929*4882a593Smuzhiyun  * This function does not add the page to the LRU.  The caller must do that.
930*4882a593Smuzhiyun  *
931*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
932*4882a593Smuzhiyun  */
add_to_page_cache_locked(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)933*4882a593Smuzhiyun int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
934*4882a593Smuzhiyun 		pgoff_t offset, gfp_t gfp_mask)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	return __add_to_page_cache_locked(page, mapping, offset,
937*4882a593Smuzhiyun 					  gfp_mask, NULL);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun EXPORT_SYMBOL(add_to_page_cache_locked);
940*4882a593Smuzhiyun 
add_to_page_cache_lru(struct page * page,struct address_space * mapping,pgoff_t offset,gfp_t gfp_mask)941*4882a593Smuzhiyun int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
942*4882a593Smuzhiyun 				pgoff_t offset, gfp_t gfp_mask)
943*4882a593Smuzhiyun {
944*4882a593Smuzhiyun 	void *shadow = NULL;
945*4882a593Smuzhiyun 	int ret;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	__SetPageLocked(page);
948*4882a593Smuzhiyun 	ret = __add_to_page_cache_locked(page, mapping, offset,
949*4882a593Smuzhiyun 					 gfp_mask, &shadow);
950*4882a593Smuzhiyun 	if (unlikely(ret))
951*4882a593Smuzhiyun 		__ClearPageLocked(page);
952*4882a593Smuzhiyun 	else {
953*4882a593Smuzhiyun 		/*
954*4882a593Smuzhiyun 		 * The page might have been evicted from cache only
955*4882a593Smuzhiyun 		 * recently, in which case it should be activated like
956*4882a593Smuzhiyun 		 * any other repeatedly accessed page.
957*4882a593Smuzhiyun 		 * The exception is pages getting rewritten; evicting other
958*4882a593Smuzhiyun 		 * data from the working set, only to cache data that will
959*4882a593Smuzhiyun 		 * get overwritten with something else, is a waste of memory.
960*4882a593Smuzhiyun 		 */
961*4882a593Smuzhiyun 		WARN_ON_ONCE(PageActive(page));
962*4882a593Smuzhiyun 		if (!(gfp_mask & __GFP_WRITE) && shadow)
963*4882a593Smuzhiyun 			workingset_refault(page, shadow);
964*4882a593Smuzhiyun 		lru_cache_add(page);
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 	return ret;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun #ifdef CONFIG_NUMA
__page_cache_alloc(gfp_t gfp)971*4882a593Smuzhiyun struct page *__page_cache_alloc(gfp_t gfp)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	int n;
974*4882a593Smuzhiyun 	struct page *page;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (cpuset_do_page_mem_spread()) {
977*4882a593Smuzhiyun 		unsigned int cpuset_mems_cookie;
978*4882a593Smuzhiyun 		do {
979*4882a593Smuzhiyun 			cpuset_mems_cookie = read_mems_allowed_begin();
980*4882a593Smuzhiyun 			n = cpuset_mem_spread_node();
981*4882a593Smuzhiyun 			page = __alloc_pages_node(n, gfp, 0);
982*4882a593Smuzhiyun 		} while (!page && read_mems_allowed_retry(cpuset_mems_cookie));
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 		return page;
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	return alloc_pages(gfp, 0);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun EXPORT_SYMBOL(__page_cache_alloc);
989*4882a593Smuzhiyun #endif
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun /*
992*4882a593Smuzhiyun  * In order to wait for pages to become available there must be
993*4882a593Smuzhiyun  * waitqueues associated with pages. By using a hash table of
994*4882a593Smuzhiyun  * waitqueues where the bucket discipline is to maintain all
995*4882a593Smuzhiyun  * waiters on the same queue and wake all when any of the pages
996*4882a593Smuzhiyun  * become available, and for the woken contexts to check to be
997*4882a593Smuzhiyun  * sure the appropriate page became available, this saves space
998*4882a593Smuzhiyun  * at a cost of "thundering herd" phenomena during rare hash
999*4882a593Smuzhiyun  * collisions.
1000*4882a593Smuzhiyun  */
1001*4882a593Smuzhiyun #define PAGE_WAIT_TABLE_BITS 8
1002*4882a593Smuzhiyun #define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)
1003*4882a593Smuzhiyun static wait_queue_head_t page_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;
1004*4882a593Smuzhiyun 
page_waitqueue(struct page * page)1005*4882a593Smuzhiyun static wait_queue_head_t *page_waitqueue(struct page *page)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun 	return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)];
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun 
pagecache_init(void)1010*4882a593Smuzhiyun void __init pagecache_init(void)
1011*4882a593Smuzhiyun {
1012*4882a593Smuzhiyun 	int i;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)
1015*4882a593Smuzhiyun 		init_waitqueue_head(&page_wait_table[i]);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	page_writeback_init();
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun /*
1021*4882a593Smuzhiyun  * The page wait code treats the "wait->flags" somewhat unusually, because
1022*4882a593Smuzhiyun  * we have multiple different kinds of waits, not just the usual "exclusive"
1023*4882a593Smuzhiyun  * one.
1024*4882a593Smuzhiyun  *
1025*4882a593Smuzhiyun  * We have:
1026*4882a593Smuzhiyun  *
1027*4882a593Smuzhiyun  *  (a) no special bits set:
1028*4882a593Smuzhiyun  *
1029*4882a593Smuzhiyun  *	We're just waiting for the bit to be released, and when a waker
1030*4882a593Smuzhiyun  *	calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,
1031*4882a593Smuzhiyun  *	and remove it from the wait queue.
1032*4882a593Smuzhiyun  *
1033*4882a593Smuzhiyun  *	Simple and straightforward.
1034*4882a593Smuzhiyun  *
1035*4882a593Smuzhiyun  *  (b) WQ_FLAG_EXCLUSIVE:
1036*4882a593Smuzhiyun  *
1037*4882a593Smuzhiyun  *	The waiter is waiting to get the lock, and only one waiter should
1038*4882a593Smuzhiyun  *	be woken up to avoid any thundering herd behavior. We'll set the
1039*4882a593Smuzhiyun  *	WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.
1040*4882a593Smuzhiyun  *
1041*4882a593Smuzhiyun  *	This is the traditional exclusive wait.
1042*4882a593Smuzhiyun  *
1043*4882a593Smuzhiyun  *  (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:
1044*4882a593Smuzhiyun  *
1045*4882a593Smuzhiyun  *	The waiter is waiting to get the bit, and additionally wants the
1046*4882a593Smuzhiyun  *	lock to be transferred to it for fair lock behavior. If the lock
1047*4882a593Smuzhiyun  *	cannot be taken, we stop walking the wait queue without waking
1048*4882a593Smuzhiyun  *	the waiter.
1049*4882a593Smuzhiyun  *
1050*4882a593Smuzhiyun  *	This is the "fair lock handoff" case, and in addition to setting
1051*4882a593Smuzhiyun  *	WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see
1052*4882a593Smuzhiyun  *	that it now has the lock.
1053*4882a593Smuzhiyun  */
wake_page_function(wait_queue_entry_t * wait,unsigned mode,int sync,void * arg)1054*4882a593Smuzhiyun static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	unsigned int flags;
1057*4882a593Smuzhiyun 	struct wait_page_key *key = arg;
1058*4882a593Smuzhiyun 	struct wait_page_queue *wait_page
1059*4882a593Smuzhiyun 		= container_of(wait, struct wait_page_queue, wait);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	if (!wake_page_match(wait_page, key))
1062*4882a593Smuzhiyun 		return 0;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	/*
1065*4882a593Smuzhiyun 	 * If it's a lock handoff wait, we get the bit for it, and
1066*4882a593Smuzhiyun 	 * stop walking (and do not wake it up) if we can't.
1067*4882a593Smuzhiyun 	 */
1068*4882a593Smuzhiyun 	flags = wait->flags;
1069*4882a593Smuzhiyun 	if (flags & WQ_FLAG_EXCLUSIVE) {
1070*4882a593Smuzhiyun 		if (test_bit(key->bit_nr, &key->page->flags))
1071*4882a593Smuzhiyun 			return -1;
1072*4882a593Smuzhiyun 		if (flags & WQ_FLAG_CUSTOM) {
1073*4882a593Smuzhiyun 			if (test_and_set_bit(key->bit_nr, &key->page->flags))
1074*4882a593Smuzhiyun 				return -1;
1075*4882a593Smuzhiyun 			flags |= WQ_FLAG_DONE;
1076*4882a593Smuzhiyun 		}
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	/*
1080*4882a593Smuzhiyun 	 * We are holding the wait-queue lock, but the waiter that
1081*4882a593Smuzhiyun 	 * is waiting for this will be checking the flags without
1082*4882a593Smuzhiyun 	 * any locking.
1083*4882a593Smuzhiyun 	 *
1084*4882a593Smuzhiyun 	 * So update the flags atomically, and wake up the waiter
1085*4882a593Smuzhiyun 	 * afterwards to avoid any races. This store-release pairs
1086*4882a593Smuzhiyun 	 * with the load-acquire in wait_on_page_bit_common().
1087*4882a593Smuzhiyun 	 */
1088*4882a593Smuzhiyun 	smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);
1089*4882a593Smuzhiyun 	wake_up_state(wait->private, mode);
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	/*
1092*4882a593Smuzhiyun 	 * Ok, we have successfully done what we're waiting for,
1093*4882a593Smuzhiyun 	 * and we can unconditionally remove the wait entry.
1094*4882a593Smuzhiyun 	 *
1095*4882a593Smuzhiyun 	 * Note that this pairs with the "finish_wait()" in the
1096*4882a593Smuzhiyun 	 * waiter, and has to be the absolute last thing we do.
1097*4882a593Smuzhiyun 	 * After this list_del_init(&wait->entry) the wait entry
1098*4882a593Smuzhiyun 	 * might be de-allocated and the process might even have
1099*4882a593Smuzhiyun 	 * exited.
1100*4882a593Smuzhiyun 	 */
1101*4882a593Smuzhiyun 	list_del_init_careful(&wait->entry);
1102*4882a593Smuzhiyun 	return (flags & WQ_FLAG_EXCLUSIVE) != 0;
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun 
wake_up_page_bit(struct page * page,int bit_nr)1105*4882a593Smuzhiyun static void wake_up_page_bit(struct page *page, int bit_nr)
1106*4882a593Smuzhiyun {
1107*4882a593Smuzhiyun 	wait_queue_head_t *q = page_waitqueue(page);
1108*4882a593Smuzhiyun 	struct wait_page_key key;
1109*4882a593Smuzhiyun 	unsigned long flags;
1110*4882a593Smuzhiyun 	wait_queue_entry_t bookmark;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	key.page = page;
1113*4882a593Smuzhiyun 	key.bit_nr = bit_nr;
1114*4882a593Smuzhiyun 	key.page_match = 0;
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	bookmark.flags = 0;
1117*4882a593Smuzhiyun 	bookmark.private = NULL;
1118*4882a593Smuzhiyun 	bookmark.func = NULL;
1119*4882a593Smuzhiyun 	INIT_LIST_HEAD(&bookmark.entry);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	spin_lock_irqsave(&q->lock, flags);
1122*4882a593Smuzhiyun 	__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	while (bookmark.flags & WQ_FLAG_BOOKMARK) {
1125*4882a593Smuzhiyun 		/*
1126*4882a593Smuzhiyun 		 * Take a breather from holding the lock,
1127*4882a593Smuzhiyun 		 * allow pages that finish wake up asynchronously
1128*4882a593Smuzhiyun 		 * to acquire the lock and remove themselves
1129*4882a593Smuzhiyun 		 * from wait queue
1130*4882a593Smuzhiyun 		 */
1131*4882a593Smuzhiyun 		spin_unlock_irqrestore(&q->lock, flags);
1132*4882a593Smuzhiyun 		cpu_relax();
1133*4882a593Smuzhiyun 		spin_lock_irqsave(&q->lock, flags);
1134*4882a593Smuzhiyun 		__wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark);
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	/*
1138*4882a593Smuzhiyun 	 * It is possible for other pages to have collided on the waitqueue
1139*4882a593Smuzhiyun 	 * hash, so in that case check for a page match. That prevents a long-
1140*4882a593Smuzhiyun 	 * term waiter
1141*4882a593Smuzhiyun 	 *
1142*4882a593Smuzhiyun 	 * It is still possible to miss a case here, when we woke page waiters
1143*4882a593Smuzhiyun 	 * and removed them from the waitqueue, but there are still other
1144*4882a593Smuzhiyun 	 * page waiters.
1145*4882a593Smuzhiyun 	 */
1146*4882a593Smuzhiyun 	if (!waitqueue_active(q) || !key.page_match) {
1147*4882a593Smuzhiyun 		ClearPageWaiters(page);
1148*4882a593Smuzhiyun 		/*
1149*4882a593Smuzhiyun 		 * It's possible to miss clearing Waiters here, when we woke
1150*4882a593Smuzhiyun 		 * our page waiters, but the hashed waitqueue has waiters for
1151*4882a593Smuzhiyun 		 * other pages on it.
1152*4882a593Smuzhiyun 		 *
1153*4882a593Smuzhiyun 		 * That's okay, it's a rare case. The next waker will clear it.
1154*4882a593Smuzhiyun 		 */
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->lock, flags);
1157*4882a593Smuzhiyun }
1158*4882a593Smuzhiyun 
wake_up_page(struct page * page,int bit)1159*4882a593Smuzhiyun static void wake_up_page(struct page *page, int bit)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun 	if (!PageWaiters(page))
1162*4882a593Smuzhiyun 		return;
1163*4882a593Smuzhiyun 	wake_up_page_bit(page, bit);
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun /*
1167*4882a593Smuzhiyun  * A choice of three behaviors for wait_on_page_bit_common():
1168*4882a593Smuzhiyun  */
1169*4882a593Smuzhiyun enum behavior {
1170*4882a593Smuzhiyun 	EXCLUSIVE,	/* Hold ref to page and take the bit when woken, like
1171*4882a593Smuzhiyun 			 * __lock_page() waiting on then setting PG_locked.
1172*4882a593Smuzhiyun 			 */
1173*4882a593Smuzhiyun 	SHARED,		/* Hold ref to page and check the bit when woken, like
1174*4882a593Smuzhiyun 			 * wait_on_page_writeback() waiting on PG_writeback.
1175*4882a593Smuzhiyun 			 */
1176*4882a593Smuzhiyun 	DROP,		/* Drop ref to page before wait, no check when woken,
1177*4882a593Smuzhiyun 			 * like put_and_wait_on_page_locked() on PG_locked.
1178*4882a593Smuzhiyun 			 */
1179*4882a593Smuzhiyun };
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun  * Attempt to check (or get) the page bit, and mark us done
1183*4882a593Smuzhiyun  * if successful.
1184*4882a593Smuzhiyun  */
trylock_page_bit_common(struct page * page,int bit_nr,struct wait_queue_entry * wait)1185*4882a593Smuzhiyun static inline bool trylock_page_bit_common(struct page *page, int bit_nr,
1186*4882a593Smuzhiyun 					struct wait_queue_entry *wait)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun 	if (wait->flags & WQ_FLAG_EXCLUSIVE) {
1189*4882a593Smuzhiyun 		if (test_and_set_bit(bit_nr, &page->flags))
1190*4882a593Smuzhiyun 			return false;
1191*4882a593Smuzhiyun 	} else if (test_bit(bit_nr, &page->flags))
1192*4882a593Smuzhiyun 		return false;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;
1195*4882a593Smuzhiyun 	return true;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun /* How many times do we accept lock stealing from under a waiter? */
1199*4882a593Smuzhiyun int sysctl_page_lock_unfairness = 5;
1200*4882a593Smuzhiyun 
wait_on_page_bit_common(wait_queue_head_t * q,struct page * page,int bit_nr,int state,enum behavior behavior)1201*4882a593Smuzhiyun static inline __sched int wait_on_page_bit_common(wait_queue_head_t *q,
1202*4882a593Smuzhiyun 	struct page *page, int bit_nr, int state, enum behavior behavior)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	int unfairness = sysctl_page_lock_unfairness;
1205*4882a593Smuzhiyun 	struct wait_page_queue wait_page;
1206*4882a593Smuzhiyun 	wait_queue_entry_t *wait = &wait_page.wait;
1207*4882a593Smuzhiyun 	bool thrashing = false;
1208*4882a593Smuzhiyun 	bool delayacct = false;
1209*4882a593Smuzhiyun 	unsigned long pflags;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	if (bit_nr == PG_locked &&
1212*4882a593Smuzhiyun 	    !PageUptodate(page) && PageWorkingset(page)) {
1213*4882a593Smuzhiyun 		if (!PageSwapBacked(page)) {
1214*4882a593Smuzhiyun 			delayacct_thrashing_start();
1215*4882a593Smuzhiyun 			delayacct = true;
1216*4882a593Smuzhiyun 		}
1217*4882a593Smuzhiyun 		psi_memstall_enter(&pflags);
1218*4882a593Smuzhiyun 		thrashing = true;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	init_wait(wait);
1222*4882a593Smuzhiyun 	wait->func = wake_page_function;
1223*4882a593Smuzhiyun 	wait_page.page = page;
1224*4882a593Smuzhiyun 	wait_page.bit_nr = bit_nr;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun repeat:
1227*4882a593Smuzhiyun 	wait->flags = 0;
1228*4882a593Smuzhiyun 	if (behavior == EXCLUSIVE) {
1229*4882a593Smuzhiyun 		wait->flags = WQ_FLAG_EXCLUSIVE;
1230*4882a593Smuzhiyun 		if (--unfairness < 0)
1231*4882a593Smuzhiyun 			wait->flags |= WQ_FLAG_CUSTOM;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	/*
1235*4882a593Smuzhiyun 	 * Do one last check whether we can get the
1236*4882a593Smuzhiyun 	 * page bit synchronously.
1237*4882a593Smuzhiyun 	 *
1238*4882a593Smuzhiyun 	 * Do the SetPageWaiters() marking before that
1239*4882a593Smuzhiyun 	 * to let any waker we _just_ missed know they
1240*4882a593Smuzhiyun 	 * need to wake us up (otherwise they'll never
1241*4882a593Smuzhiyun 	 * even go to the slow case that looks at the
1242*4882a593Smuzhiyun 	 * page queue), and add ourselves to the wait
1243*4882a593Smuzhiyun 	 * queue if we need to sleep.
1244*4882a593Smuzhiyun 	 *
1245*4882a593Smuzhiyun 	 * This part needs to be done under the queue
1246*4882a593Smuzhiyun 	 * lock to avoid races.
1247*4882a593Smuzhiyun 	 */
1248*4882a593Smuzhiyun 	spin_lock_irq(&q->lock);
1249*4882a593Smuzhiyun 	SetPageWaiters(page);
1250*4882a593Smuzhiyun 	if (!trylock_page_bit_common(page, bit_nr, wait))
1251*4882a593Smuzhiyun 		__add_wait_queue_entry_tail(q, wait);
1252*4882a593Smuzhiyun 	spin_unlock_irq(&q->lock);
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	/*
1255*4882a593Smuzhiyun 	 * From now on, all the logic will be based on
1256*4882a593Smuzhiyun 	 * the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to
1257*4882a593Smuzhiyun 	 * see whether the page bit testing has already
1258*4882a593Smuzhiyun 	 * been done by the wake function.
1259*4882a593Smuzhiyun 	 *
1260*4882a593Smuzhiyun 	 * We can drop our reference to the page.
1261*4882a593Smuzhiyun 	 */
1262*4882a593Smuzhiyun 	if (behavior == DROP)
1263*4882a593Smuzhiyun 		put_page(page);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	/*
1266*4882a593Smuzhiyun 	 * Note that until the "finish_wait()", or until
1267*4882a593Smuzhiyun 	 * we see the WQ_FLAG_WOKEN flag, we need to
1268*4882a593Smuzhiyun 	 * be very careful with the 'wait->flags', because
1269*4882a593Smuzhiyun 	 * we may race with a waker that sets them.
1270*4882a593Smuzhiyun 	 */
1271*4882a593Smuzhiyun 	for (;;) {
1272*4882a593Smuzhiyun 		unsigned int flags;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 		set_current_state(state);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 		/* Loop until we've been woken or interrupted */
1277*4882a593Smuzhiyun 		flags = smp_load_acquire(&wait->flags);
1278*4882a593Smuzhiyun 		if (!(flags & WQ_FLAG_WOKEN)) {
1279*4882a593Smuzhiyun 			if (signal_pending_state(state, current))
1280*4882a593Smuzhiyun 				break;
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 			io_schedule();
1283*4882a593Smuzhiyun 			continue;
1284*4882a593Smuzhiyun 		}
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 		/* If we were non-exclusive, we're done */
1287*4882a593Smuzhiyun 		if (behavior != EXCLUSIVE)
1288*4882a593Smuzhiyun 			break;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 		/* If the waker got the lock for us, we're done */
1291*4882a593Smuzhiyun 		if (flags & WQ_FLAG_DONE)
1292*4882a593Smuzhiyun 			break;
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 		/*
1295*4882a593Smuzhiyun 		 * Otherwise, if we're getting the lock, we need to
1296*4882a593Smuzhiyun 		 * try to get it ourselves.
1297*4882a593Smuzhiyun 		 *
1298*4882a593Smuzhiyun 		 * And if that fails, we'll have to retry this all.
1299*4882a593Smuzhiyun 		 */
1300*4882a593Smuzhiyun 		if (unlikely(test_and_set_bit(bit_nr, &page->flags)))
1301*4882a593Smuzhiyun 			goto repeat;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 		wait->flags |= WQ_FLAG_DONE;
1304*4882a593Smuzhiyun 		break;
1305*4882a593Smuzhiyun 	}
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	/*
1308*4882a593Smuzhiyun 	 * If a signal happened, this 'finish_wait()' may remove the last
1309*4882a593Smuzhiyun 	 * waiter from the wait-queues, but the PageWaiters bit will remain
1310*4882a593Smuzhiyun 	 * set. That's ok. The next wakeup will take care of it, and trying
1311*4882a593Smuzhiyun 	 * to do it here would be difficult and prone to races.
1312*4882a593Smuzhiyun 	 */
1313*4882a593Smuzhiyun 	finish_wait(q, wait);
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	if (thrashing) {
1316*4882a593Smuzhiyun 		if (delayacct)
1317*4882a593Smuzhiyun 			delayacct_thrashing_end();
1318*4882a593Smuzhiyun 		psi_memstall_leave(&pflags);
1319*4882a593Smuzhiyun 	}
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	/*
1322*4882a593Smuzhiyun 	 * NOTE! The wait->flags weren't stable until we've done the
1323*4882a593Smuzhiyun 	 * 'finish_wait()', and we could have exited the loop above due
1324*4882a593Smuzhiyun 	 * to a signal, and had a wakeup event happen after the signal
1325*4882a593Smuzhiyun 	 * test but before the 'finish_wait()'.
1326*4882a593Smuzhiyun 	 *
1327*4882a593Smuzhiyun 	 * So only after the finish_wait() can we reliably determine
1328*4882a593Smuzhiyun 	 * if we got woken up or not, so we can now figure out the final
1329*4882a593Smuzhiyun 	 * return value based on that state without races.
1330*4882a593Smuzhiyun 	 *
1331*4882a593Smuzhiyun 	 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive
1332*4882a593Smuzhiyun 	 * waiter, but an exclusive one requires WQ_FLAG_DONE.
1333*4882a593Smuzhiyun 	 */
1334*4882a593Smuzhiyun 	if (behavior == EXCLUSIVE)
1335*4882a593Smuzhiyun 		return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun 
wait_on_page_bit(struct page * page,int bit_nr)1340*4882a593Smuzhiyun __sched void wait_on_page_bit(struct page *page, int bit_nr)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun 	wait_queue_head_t *q = page_waitqueue(page);
1343*4882a593Smuzhiyun 	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun EXPORT_SYMBOL(wait_on_page_bit);
1346*4882a593Smuzhiyun 
wait_on_page_bit_killable(struct page * page,int bit_nr)1347*4882a593Smuzhiyun __sched int wait_on_page_bit_killable(struct page *page, int bit_nr)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun 	wait_queue_head_t *q = page_waitqueue(page);
1350*4882a593Smuzhiyun 	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun EXPORT_SYMBOL(wait_on_page_bit_killable);
1353*4882a593Smuzhiyun 
__wait_on_page_locked_async(struct page * page,struct wait_page_queue * wait,bool set)1354*4882a593Smuzhiyun static int __wait_on_page_locked_async(struct page *page,
1355*4882a593Smuzhiyun 				       struct wait_page_queue *wait, bool set)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun 	struct wait_queue_head *q = page_waitqueue(page);
1358*4882a593Smuzhiyun 	int ret = 0;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	wait->page = page;
1361*4882a593Smuzhiyun 	wait->bit_nr = PG_locked;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	spin_lock_irq(&q->lock);
1364*4882a593Smuzhiyun 	__add_wait_queue_entry_tail(q, &wait->wait);
1365*4882a593Smuzhiyun 	SetPageWaiters(page);
1366*4882a593Smuzhiyun 	if (set)
1367*4882a593Smuzhiyun 		ret = !trylock_page(page);
1368*4882a593Smuzhiyun 	else
1369*4882a593Smuzhiyun 		ret = PageLocked(page);
1370*4882a593Smuzhiyun 	/*
1371*4882a593Smuzhiyun 	 * If we were succesful now, we know we're still on the
1372*4882a593Smuzhiyun 	 * waitqueue as we're still under the lock. This means it's
1373*4882a593Smuzhiyun 	 * safe to remove and return success, we know the callback
1374*4882a593Smuzhiyun 	 * isn't going to trigger.
1375*4882a593Smuzhiyun 	 */
1376*4882a593Smuzhiyun 	if (!ret)
1377*4882a593Smuzhiyun 		__remove_wait_queue(q, &wait->wait);
1378*4882a593Smuzhiyun 	else
1379*4882a593Smuzhiyun 		ret = -EIOCBQUEUED;
1380*4882a593Smuzhiyun 	spin_unlock_irq(&q->lock);
1381*4882a593Smuzhiyun 	return ret;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun 
wait_on_page_locked_async(struct page * page,struct wait_page_queue * wait)1384*4882a593Smuzhiyun static int wait_on_page_locked_async(struct page *page,
1385*4882a593Smuzhiyun 				     struct wait_page_queue *wait)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	if (!PageLocked(page))
1388*4882a593Smuzhiyun 		return 0;
1389*4882a593Smuzhiyun 	return __wait_on_page_locked_async(compound_head(page), wait, false);
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun /**
1393*4882a593Smuzhiyun  * put_and_wait_on_page_locked - Drop a reference and wait for it to be unlocked
1394*4882a593Smuzhiyun  * @page: The page to wait for.
1395*4882a593Smuzhiyun  *
1396*4882a593Smuzhiyun  * The caller should hold a reference on @page.  They expect the page to
1397*4882a593Smuzhiyun  * become unlocked relatively soon, but do not wish to hold up migration
1398*4882a593Smuzhiyun  * (for example) by holding the reference while waiting for the page to
1399*4882a593Smuzhiyun  * come unlocked.  After this function returns, the caller should not
1400*4882a593Smuzhiyun  * dereference @page.
1401*4882a593Smuzhiyun  */
put_and_wait_on_page_locked(struct page * page)1402*4882a593Smuzhiyun void put_and_wait_on_page_locked(struct page *page)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun 	wait_queue_head_t *q;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	page = compound_head(page);
1407*4882a593Smuzhiyun 	q = page_waitqueue(page);
1408*4882a593Smuzhiyun 	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP);
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun /**
1412*4882a593Smuzhiyun  * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1413*4882a593Smuzhiyun  * @page: Page defining the wait queue of interest
1414*4882a593Smuzhiyun  * @waiter: Waiter to add to the queue
1415*4882a593Smuzhiyun  *
1416*4882a593Smuzhiyun  * Add an arbitrary @waiter to the wait queue for the nominated @page.
1417*4882a593Smuzhiyun  */
add_page_wait_queue(struct page * page,wait_queue_entry_t * waiter)1418*4882a593Smuzhiyun void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun 	wait_queue_head_t *q = page_waitqueue(page);
1421*4882a593Smuzhiyun 	unsigned long flags;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	spin_lock_irqsave(&q->lock, flags);
1424*4882a593Smuzhiyun 	__add_wait_queue_entry_tail(q, waiter);
1425*4882a593Smuzhiyun 	SetPageWaiters(page);
1426*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->lock, flags);
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(add_page_wait_queue);
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun #ifndef clear_bit_unlock_is_negative_byte
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun /*
1433*4882a593Smuzhiyun  * PG_waiters is the high bit in the same byte as PG_lock.
1434*4882a593Smuzhiyun  *
1435*4882a593Smuzhiyun  * On x86 (and on many other architectures), we can clear PG_lock and
1436*4882a593Smuzhiyun  * test the sign bit at the same time. But if the architecture does
1437*4882a593Smuzhiyun  * not support that special operation, we just do this all by hand
1438*4882a593Smuzhiyun  * instead.
1439*4882a593Smuzhiyun  *
1440*4882a593Smuzhiyun  * The read of PG_waiters has to be after (or concurrently with) PG_locked
1441*4882a593Smuzhiyun  * being cleared, but a memory barrier should be unnecessary since it is
1442*4882a593Smuzhiyun  * in the same byte as PG_locked.
1443*4882a593Smuzhiyun  */
clear_bit_unlock_is_negative_byte(long nr,volatile void * mem)1444*4882a593Smuzhiyun static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	clear_bit_unlock(nr, mem);
1447*4882a593Smuzhiyun 	/* smp_mb__after_atomic(); */
1448*4882a593Smuzhiyun 	return test_bit(PG_waiters, mem);
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun #endif
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun /**
1454*4882a593Smuzhiyun  * unlock_page - unlock a locked page
1455*4882a593Smuzhiyun  * @page: the page
1456*4882a593Smuzhiyun  *
1457*4882a593Smuzhiyun  * Unlocks the page and wakes up sleepers in wait_on_page_locked().
1458*4882a593Smuzhiyun  * Also wakes sleepers in wait_on_page_writeback() because the wakeup
1459*4882a593Smuzhiyun  * mechanism between PageLocked pages and PageWriteback pages is shared.
1460*4882a593Smuzhiyun  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1461*4882a593Smuzhiyun  *
1462*4882a593Smuzhiyun  * Note that this depends on PG_waiters being the sign bit in the byte
1463*4882a593Smuzhiyun  * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1464*4882a593Smuzhiyun  * clear the PG_locked bit and test PG_waiters at the same time fairly
1465*4882a593Smuzhiyun  * portably (architectures that do LL/SC can test any bit, while x86 can
1466*4882a593Smuzhiyun  * test the sign bit).
1467*4882a593Smuzhiyun  */
unlock_page(struct page * page)1468*4882a593Smuzhiyun void unlock_page(struct page *page)
1469*4882a593Smuzhiyun {
1470*4882a593Smuzhiyun 	BUILD_BUG_ON(PG_waiters != 7);
1471*4882a593Smuzhiyun 	page = compound_head(page);
1472*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(!PageLocked(page), page);
1473*4882a593Smuzhiyun 	if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
1474*4882a593Smuzhiyun 		wake_up_page_bit(page, PG_locked);
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun EXPORT_SYMBOL(unlock_page);
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun /**
1479*4882a593Smuzhiyun  * end_page_writeback - end writeback against a page
1480*4882a593Smuzhiyun  * @page: the page
1481*4882a593Smuzhiyun  */
end_page_writeback(struct page * page)1482*4882a593Smuzhiyun void end_page_writeback(struct page *page)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	/*
1485*4882a593Smuzhiyun 	 * TestClearPageReclaim could be used here but it is an atomic
1486*4882a593Smuzhiyun 	 * operation and overkill in this particular case. Failing to
1487*4882a593Smuzhiyun 	 * shuffle a page marked for immediate reclaim is too mild to
1488*4882a593Smuzhiyun 	 * justify taking an atomic operation penalty at the end of
1489*4882a593Smuzhiyun 	 * ever page writeback.
1490*4882a593Smuzhiyun 	 */
1491*4882a593Smuzhiyun 	if (PageReclaim(page)) {
1492*4882a593Smuzhiyun 		ClearPageReclaim(page);
1493*4882a593Smuzhiyun 		rotate_reclaimable_page(page);
1494*4882a593Smuzhiyun 	}
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	/*
1497*4882a593Smuzhiyun 	 * Writeback does not hold a page reference of its own, relying
1498*4882a593Smuzhiyun 	 * on truncation to wait for the clearing of PG_writeback.
1499*4882a593Smuzhiyun 	 * But here we must make sure that the page is not freed and
1500*4882a593Smuzhiyun 	 * reused before the wake_up_page().
1501*4882a593Smuzhiyun 	 */
1502*4882a593Smuzhiyun 	get_page(page);
1503*4882a593Smuzhiyun 	if (!test_clear_page_writeback(page))
1504*4882a593Smuzhiyun 		BUG();
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 	smp_mb__after_atomic();
1507*4882a593Smuzhiyun 	wake_up_page(page, PG_writeback);
1508*4882a593Smuzhiyun 	put_page(page);
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun EXPORT_SYMBOL(end_page_writeback);
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun /*
1513*4882a593Smuzhiyun  * After completing I/O on a page, call this routine to update the page
1514*4882a593Smuzhiyun  * flags appropriately
1515*4882a593Smuzhiyun  */
page_endio(struct page * page,bool is_write,int err)1516*4882a593Smuzhiyun void page_endio(struct page *page, bool is_write, int err)
1517*4882a593Smuzhiyun {
1518*4882a593Smuzhiyun 	if (!is_write) {
1519*4882a593Smuzhiyun 		if (!err) {
1520*4882a593Smuzhiyun 			SetPageUptodate(page);
1521*4882a593Smuzhiyun 		} else {
1522*4882a593Smuzhiyun 			ClearPageUptodate(page);
1523*4882a593Smuzhiyun 			SetPageError(page);
1524*4882a593Smuzhiyun 		}
1525*4882a593Smuzhiyun 		unlock_page(page);
1526*4882a593Smuzhiyun 	} else {
1527*4882a593Smuzhiyun 		if (err) {
1528*4882a593Smuzhiyun 			struct address_space *mapping;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 			SetPageError(page);
1531*4882a593Smuzhiyun 			mapping = page_mapping(page);
1532*4882a593Smuzhiyun 			if (mapping)
1533*4882a593Smuzhiyun 				mapping_set_error(mapping, err);
1534*4882a593Smuzhiyun 		}
1535*4882a593Smuzhiyun 		end_page_writeback(page);
1536*4882a593Smuzhiyun 	}
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(page_endio);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun /**
1541*4882a593Smuzhiyun  * __lock_page - get a lock on the page, assuming we need to sleep to get it
1542*4882a593Smuzhiyun  * @__page: the page to lock
1543*4882a593Smuzhiyun  */
__lock_page(struct page * __page)1544*4882a593Smuzhiyun __sched void __lock_page(struct page *__page)
1545*4882a593Smuzhiyun {
1546*4882a593Smuzhiyun 	struct page *page = compound_head(__page);
1547*4882a593Smuzhiyun 	wait_queue_head_t *q = page_waitqueue(page);
1548*4882a593Smuzhiyun 	wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE,
1549*4882a593Smuzhiyun 				EXCLUSIVE);
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun EXPORT_SYMBOL(__lock_page);
1552*4882a593Smuzhiyun 
__lock_page_killable(struct page * __page)1553*4882a593Smuzhiyun __sched int __lock_page_killable(struct page *__page)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun 	struct page *page = compound_head(__page);
1556*4882a593Smuzhiyun 	wait_queue_head_t *q = page_waitqueue(page);
1557*4882a593Smuzhiyun 	return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
1558*4882a593Smuzhiyun 					EXCLUSIVE);
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__lock_page_killable);
1561*4882a593Smuzhiyun 
__lock_page_async(struct page * page,struct wait_page_queue * wait)1562*4882a593Smuzhiyun __sched int __lock_page_async(struct page *page, struct wait_page_queue *wait)
1563*4882a593Smuzhiyun {
1564*4882a593Smuzhiyun 	return __wait_on_page_locked_async(page, wait, true);
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun /*
1568*4882a593Smuzhiyun  * Return values:
1569*4882a593Smuzhiyun  * 1 - page is locked; mmap_lock is still held.
1570*4882a593Smuzhiyun  * 0 - page is not locked.
1571*4882a593Smuzhiyun  *     mmap_lock has been released (mmap_read_unlock(), unless flags had both
1572*4882a593Smuzhiyun  *     FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in
1573*4882a593Smuzhiyun  *     which case mmap_lock is still held.
1574*4882a593Smuzhiyun  *
1575*4882a593Smuzhiyun  * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
1576*4882a593Smuzhiyun  * with the page locked and the mmap_lock unperturbed.
1577*4882a593Smuzhiyun  */
__lock_page_or_retry(struct page * page,struct mm_struct * mm,unsigned int flags)1578*4882a593Smuzhiyun __sched int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
1579*4882a593Smuzhiyun 			 unsigned int flags)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun 	if (fault_flag_allow_retry_first(flags)) {
1582*4882a593Smuzhiyun 		/*
1583*4882a593Smuzhiyun 		 * CAUTION! In this case, mmap_lock is not released
1584*4882a593Smuzhiyun 		 * even though return 0.
1585*4882a593Smuzhiyun 		 */
1586*4882a593Smuzhiyun 		if (flags & FAULT_FLAG_RETRY_NOWAIT)
1587*4882a593Smuzhiyun 			return 0;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 		mmap_read_unlock(mm);
1590*4882a593Smuzhiyun 		if (flags & FAULT_FLAG_KILLABLE)
1591*4882a593Smuzhiyun 			wait_on_page_locked_killable(page);
1592*4882a593Smuzhiyun 		else
1593*4882a593Smuzhiyun 			wait_on_page_locked(page);
1594*4882a593Smuzhiyun 		return 0;
1595*4882a593Smuzhiyun 	} else {
1596*4882a593Smuzhiyun 		if (flags & FAULT_FLAG_KILLABLE) {
1597*4882a593Smuzhiyun 			int ret;
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 			ret = __lock_page_killable(page);
1600*4882a593Smuzhiyun 			if (ret) {
1601*4882a593Smuzhiyun 				mmap_read_unlock(mm);
1602*4882a593Smuzhiyun 				return 0;
1603*4882a593Smuzhiyun 			}
1604*4882a593Smuzhiyun 		} else
1605*4882a593Smuzhiyun 			__lock_page(page);
1606*4882a593Smuzhiyun 		return 1;
1607*4882a593Smuzhiyun 	}
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun /**
1611*4882a593Smuzhiyun  * page_cache_next_miss() - Find the next gap in the page cache.
1612*4882a593Smuzhiyun  * @mapping: Mapping.
1613*4882a593Smuzhiyun  * @index: Index.
1614*4882a593Smuzhiyun  * @max_scan: Maximum range to search.
1615*4882a593Smuzhiyun  *
1616*4882a593Smuzhiyun  * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1617*4882a593Smuzhiyun  * gap with the lowest index.
1618*4882a593Smuzhiyun  *
1619*4882a593Smuzhiyun  * This function may be called under the rcu_read_lock.  However, this will
1620*4882a593Smuzhiyun  * not atomically search a snapshot of the cache at a single point in time.
1621*4882a593Smuzhiyun  * For example, if a gap is created at index 5, then subsequently a gap is
1622*4882a593Smuzhiyun  * created at index 10, page_cache_next_miss covering both indices may
1623*4882a593Smuzhiyun  * return 10 if called under the rcu_read_lock.
1624*4882a593Smuzhiyun  *
1625*4882a593Smuzhiyun  * Return: The index of the gap if found, otherwise an index outside the
1626*4882a593Smuzhiyun  * range specified (in which case 'return - index >= max_scan' will be true).
1627*4882a593Smuzhiyun  * In the rare case of index wrap-around, 0 will be returned.
1628*4882a593Smuzhiyun  */
page_cache_next_miss(struct address_space * mapping,pgoff_t index,unsigned long max_scan)1629*4882a593Smuzhiyun pgoff_t page_cache_next_miss(struct address_space *mapping,
1630*4882a593Smuzhiyun 			     pgoff_t index, unsigned long max_scan)
1631*4882a593Smuzhiyun {
1632*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, index);
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	while (max_scan--) {
1635*4882a593Smuzhiyun 		void *entry = xas_next(&xas);
1636*4882a593Smuzhiyun 		if (!entry || xa_is_value(entry))
1637*4882a593Smuzhiyun 			break;
1638*4882a593Smuzhiyun 		if (xas.xa_index == 0)
1639*4882a593Smuzhiyun 			break;
1640*4882a593Smuzhiyun 	}
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun 	return xas.xa_index;
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun EXPORT_SYMBOL(page_cache_next_miss);
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun /**
1647*4882a593Smuzhiyun  * page_cache_prev_miss() - Find the previous gap in the page cache.
1648*4882a593Smuzhiyun  * @mapping: Mapping.
1649*4882a593Smuzhiyun  * @index: Index.
1650*4882a593Smuzhiyun  * @max_scan: Maximum range to search.
1651*4882a593Smuzhiyun  *
1652*4882a593Smuzhiyun  * Search the range [max(index - max_scan + 1, 0), index] for the
1653*4882a593Smuzhiyun  * gap with the highest index.
1654*4882a593Smuzhiyun  *
1655*4882a593Smuzhiyun  * This function may be called under the rcu_read_lock.  However, this will
1656*4882a593Smuzhiyun  * not atomically search a snapshot of the cache at a single point in time.
1657*4882a593Smuzhiyun  * For example, if a gap is created at index 10, then subsequently a gap is
1658*4882a593Smuzhiyun  * created at index 5, page_cache_prev_miss() covering both indices may
1659*4882a593Smuzhiyun  * return 5 if called under the rcu_read_lock.
1660*4882a593Smuzhiyun  *
1661*4882a593Smuzhiyun  * Return: The index of the gap if found, otherwise an index outside the
1662*4882a593Smuzhiyun  * range specified (in which case 'index - return >= max_scan' will be true).
1663*4882a593Smuzhiyun  * In the rare case of wrap-around, ULONG_MAX will be returned.
1664*4882a593Smuzhiyun  */
page_cache_prev_miss(struct address_space * mapping,pgoff_t index,unsigned long max_scan)1665*4882a593Smuzhiyun pgoff_t page_cache_prev_miss(struct address_space *mapping,
1666*4882a593Smuzhiyun 			     pgoff_t index, unsigned long max_scan)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, index);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	while (max_scan--) {
1671*4882a593Smuzhiyun 		void *entry = xas_prev(&xas);
1672*4882a593Smuzhiyun 		if (!entry || xa_is_value(entry))
1673*4882a593Smuzhiyun 			break;
1674*4882a593Smuzhiyun 		if (xas.xa_index == ULONG_MAX)
1675*4882a593Smuzhiyun 			break;
1676*4882a593Smuzhiyun 	}
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	return xas.xa_index;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun EXPORT_SYMBOL(page_cache_prev_miss);
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun /**
1683*4882a593Smuzhiyun  * find_get_entry - find and get a page cache entry
1684*4882a593Smuzhiyun  * @mapping: the address_space to search
1685*4882a593Smuzhiyun  * @index: The page cache index.
1686*4882a593Smuzhiyun  *
1687*4882a593Smuzhiyun  * Looks up the page cache slot at @mapping & @offset.  If there is a
1688*4882a593Smuzhiyun  * page cache page, the head page is returned with an increased refcount.
1689*4882a593Smuzhiyun  *
1690*4882a593Smuzhiyun  * If the slot holds a shadow entry of a previously evicted page, or a
1691*4882a593Smuzhiyun  * swap entry from shmem/tmpfs, it is returned.
1692*4882a593Smuzhiyun  *
1693*4882a593Smuzhiyun  * Return: The head page or shadow entry, %NULL if nothing is found.
1694*4882a593Smuzhiyun  */
find_get_entry(struct address_space * mapping,pgoff_t index)1695*4882a593Smuzhiyun struct page *find_get_entry(struct address_space *mapping, pgoff_t index)
1696*4882a593Smuzhiyun {
1697*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, index);
1698*4882a593Smuzhiyun 	struct page *page;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	rcu_read_lock();
1701*4882a593Smuzhiyun repeat:
1702*4882a593Smuzhiyun 	xas_reset(&xas);
1703*4882a593Smuzhiyun 	page = xas_load(&xas);
1704*4882a593Smuzhiyun 	if (xas_retry(&xas, page))
1705*4882a593Smuzhiyun 		goto repeat;
1706*4882a593Smuzhiyun 	/*
1707*4882a593Smuzhiyun 	 * A shadow entry of a recently evicted page, or a swap entry from
1708*4882a593Smuzhiyun 	 * shmem/tmpfs.  Return it without attempting to raise page count.
1709*4882a593Smuzhiyun 	 */
1710*4882a593Smuzhiyun 	if (!page || xa_is_value(page))
1711*4882a593Smuzhiyun 		goto out;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	if (!page_cache_get_speculative(page))
1714*4882a593Smuzhiyun 		goto repeat;
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	/*
1717*4882a593Smuzhiyun 	 * Has the page moved or been split?
1718*4882a593Smuzhiyun 	 * This is part of the lockless pagecache protocol. See
1719*4882a593Smuzhiyun 	 * include/linux/pagemap.h for details.
1720*4882a593Smuzhiyun 	 */
1721*4882a593Smuzhiyun 	if (unlikely(page != xas_reload(&xas))) {
1722*4882a593Smuzhiyun 		put_page(page);
1723*4882a593Smuzhiyun 		goto repeat;
1724*4882a593Smuzhiyun 	}
1725*4882a593Smuzhiyun out:
1726*4882a593Smuzhiyun 	rcu_read_unlock();
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	return page;
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun /**
1732*4882a593Smuzhiyun  * find_lock_entry - Locate and lock a page cache entry.
1733*4882a593Smuzhiyun  * @mapping: The address_space to search.
1734*4882a593Smuzhiyun  * @index: The page cache index.
1735*4882a593Smuzhiyun  *
1736*4882a593Smuzhiyun  * Looks up the page at @mapping & @index.  If there is a page in the
1737*4882a593Smuzhiyun  * cache, the head page is returned locked and with an increased refcount.
1738*4882a593Smuzhiyun  *
1739*4882a593Smuzhiyun  * If the slot holds a shadow entry of a previously evicted page, or a
1740*4882a593Smuzhiyun  * swap entry from shmem/tmpfs, it is returned.
1741*4882a593Smuzhiyun  *
1742*4882a593Smuzhiyun  * Context: May sleep.
1743*4882a593Smuzhiyun  * Return: The head page or shadow entry, %NULL if nothing is found.
1744*4882a593Smuzhiyun  */
find_lock_entry(struct address_space * mapping,pgoff_t index)1745*4882a593Smuzhiyun struct page *find_lock_entry(struct address_space *mapping, pgoff_t index)
1746*4882a593Smuzhiyun {
1747*4882a593Smuzhiyun 	struct page *page;
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun repeat:
1750*4882a593Smuzhiyun 	page = find_get_entry(mapping, index);
1751*4882a593Smuzhiyun 	if (page && !xa_is_value(page)) {
1752*4882a593Smuzhiyun 		lock_page(page);
1753*4882a593Smuzhiyun 		/* Has the page been truncated? */
1754*4882a593Smuzhiyun 		if (unlikely(page->mapping != mapping)) {
1755*4882a593Smuzhiyun 			unlock_page(page);
1756*4882a593Smuzhiyun 			put_page(page);
1757*4882a593Smuzhiyun 			goto repeat;
1758*4882a593Smuzhiyun 		}
1759*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!thp_contains(page, index), page);
1760*4882a593Smuzhiyun 	}
1761*4882a593Smuzhiyun 	return page;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun /**
1765*4882a593Smuzhiyun  * pagecache_get_page - Find and get a reference to a page.
1766*4882a593Smuzhiyun  * @mapping: The address_space to search.
1767*4882a593Smuzhiyun  * @index: The page index.
1768*4882a593Smuzhiyun  * @fgp_flags: %FGP flags modify how the page is returned.
1769*4882a593Smuzhiyun  * @gfp_mask: Memory allocation flags to use if %FGP_CREAT is specified.
1770*4882a593Smuzhiyun  *
1771*4882a593Smuzhiyun  * Looks up the page cache entry at @mapping & @index.
1772*4882a593Smuzhiyun  *
1773*4882a593Smuzhiyun  * @fgp_flags can be zero or more of these flags:
1774*4882a593Smuzhiyun  *
1775*4882a593Smuzhiyun  * * %FGP_ACCESSED - The page will be marked accessed.
1776*4882a593Smuzhiyun  * * %FGP_LOCK - The page is returned locked.
1777*4882a593Smuzhiyun  * * %FGP_HEAD - If the page is present and a THP, return the head page
1778*4882a593Smuzhiyun  *   rather than the exact page specified by the index.
1779*4882a593Smuzhiyun  * * %FGP_CREAT - If no page is present then a new page is allocated using
1780*4882a593Smuzhiyun  *   @gfp_mask and added to the page cache and the VM's LRU list.
1781*4882a593Smuzhiyun  *   The page is returned locked and with an increased refcount.
1782*4882a593Smuzhiyun  * * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
1783*4882a593Smuzhiyun  *   page is already in cache.  If the page was allocated, unlock it before
1784*4882a593Smuzhiyun  *   returning so the caller can do the same dance.
1785*4882a593Smuzhiyun  * * %FGP_WRITE - The page will be written
1786*4882a593Smuzhiyun  * * %FGP_NOFS - __GFP_FS will get cleared in gfp mask
1787*4882a593Smuzhiyun  * * %FGP_NOWAIT - Don't get blocked by page lock
1788*4882a593Smuzhiyun  *
1789*4882a593Smuzhiyun  * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
1790*4882a593Smuzhiyun  * if the %GFP flags specified for %FGP_CREAT are atomic.
1791*4882a593Smuzhiyun  *
1792*4882a593Smuzhiyun  * If there is a page cache page, it is returned with an increased refcount.
1793*4882a593Smuzhiyun  *
1794*4882a593Smuzhiyun  * Return: The found page or %NULL otherwise.
1795*4882a593Smuzhiyun  */
pagecache_get_page(struct address_space * mapping,pgoff_t index,int fgp_flags,gfp_t gfp_mask)1796*4882a593Smuzhiyun struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
1797*4882a593Smuzhiyun 		int fgp_flags, gfp_t gfp_mask)
1798*4882a593Smuzhiyun {
1799*4882a593Smuzhiyun 	struct page *page;
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun repeat:
1802*4882a593Smuzhiyun 	page = find_get_entry(mapping, index);
1803*4882a593Smuzhiyun 	if (xa_is_value(page))
1804*4882a593Smuzhiyun 		page = NULL;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	trace_android_vh_pagecache_get_page(mapping, index, fgp_flags,
1807*4882a593Smuzhiyun 					gfp_mask, page);
1808*4882a593Smuzhiyun 	if (!page)
1809*4882a593Smuzhiyun 		goto no_page;
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	if (fgp_flags & FGP_LOCK) {
1812*4882a593Smuzhiyun 		if (fgp_flags & FGP_NOWAIT) {
1813*4882a593Smuzhiyun 			if (!trylock_page(page)) {
1814*4882a593Smuzhiyun 				put_page(page);
1815*4882a593Smuzhiyun 				return NULL;
1816*4882a593Smuzhiyun 			}
1817*4882a593Smuzhiyun 		} else {
1818*4882a593Smuzhiyun 			lock_page(page);
1819*4882a593Smuzhiyun 		}
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 		/* Has the page been truncated? */
1822*4882a593Smuzhiyun 		if (unlikely(page->mapping != mapping)) {
1823*4882a593Smuzhiyun 			unlock_page(page);
1824*4882a593Smuzhiyun 			put_page(page);
1825*4882a593Smuzhiyun 			goto repeat;
1826*4882a593Smuzhiyun 		}
1827*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!thp_contains(page, index), page);
1828*4882a593Smuzhiyun 	}
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	if (fgp_flags & FGP_ACCESSED)
1831*4882a593Smuzhiyun 		mark_page_accessed(page);
1832*4882a593Smuzhiyun 	else if (fgp_flags & FGP_WRITE) {
1833*4882a593Smuzhiyun 		/* Clear idle flag for buffer write */
1834*4882a593Smuzhiyun 		if (page_is_idle(page))
1835*4882a593Smuzhiyun 			clear_page_idle(page);
1836*4882a593Smuzhiyun 	}
1837*4882a593Smuzhiyun 	if (!(fgp_flags & FGP_HEAD))
1838*4882a593Smuzhiyun 		page = find_subpage(page, index);
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun no_page:
1841*4882a593Smuzhiyun 	if (!page && (fgp_flags & FGP_CREAT)) {
1842*4882a593Smuzhiyun 		int err;
1843*4882a593Smuzhiyun 		if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))
1844*4882a593Smuzhiyun 			gfp_mask |= __GFP_WRITE;
1845*4882a593Smuzhiyun 		if (fgp_flags & FGP_NOFS)
1846*4882a593Smuzhiyun 			gfp_mask &= ~__GFP_FS;
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 		page = __page_cache_alloc(gfp_mask);
1849*4882a593Smuzhiyun 		if (!page)
1850*4882a593Smuzhiyun 			return NULL;
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 		if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))
1853*4882a593Smuzhiyun 			fgp_flags |= FGP_LOCK;
1854*4882a593Smuzhiyun 
1855*4882a593Smuzhiyun 		/* Init accessed so avoid atomic mark_page_accessed later */
1856*4882a593Smuzhiyun 		if (fgp_flags & FGP_ACCESSED)
1857*4882a593Smuzhiyun 			__SetPageReferenced(page);
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 		err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
1860*4882a593Smuzhiyun 		if (unlikely(err)) {
1861*4882a593Smuzhiyun 			put_page(page);
1862*4882a593Smuzhiyun 			page = NULL;
1863*4882a593Smuzhiyun 			if (err == -EEXIST)
1864*4882a593Smuzhiyun 				goto repeat;
1865*4882a593Smuzhiyun 		}
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 		/*
1868*4882a593Smuzhiyun 		 * add_to_page_cache_lru locks the page, and for mmap we expect
1869*4882a593Smuzhiyun 		 * an unlocked page.
1870*4882a593Smuzhiyun 		 */
1871*4882a593Smuzhiyun 		if (page && (fgp_flags & FGP_FOR_MMAP))
1872*4882a593Smuzhiyun 			unlock_page(page);
1873*4882a593Smuzhiyun 	}
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	return page;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun EXPORT_SYMBOL(pagecache_get_page);
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun /**
1880*4882a593Smuzhiyun  * find_get_entries - gang pagecache lookup
1881*4882a593Smuzhiyun  * @mapping:	The address_space to search
1882*4882a593Smuzhiyun  * @start:	The starting page cache index
1883*4882a593Smuzhiyun  * @nr_entries:	The maximum number of entries
1884*4882a593Smuzhiyun  * @entries:	Where the resulting entries are placed
1885*4882a593Smuzhiyun  * @indices:	The cache indices corresponding to the entries in @entries
1886*4882a593Smuzhiyun  *
1887*4882a593Smuzhiyun  * find_get_entries() will search for and return a group of up to
1888*4882a593Smuzhiyun  * @nr_entries entries in the mapping.  The entries are placed at
1889*4882a593Smuzhiyun  * @entries.  find_get_entries() takes a reference against any actual
1890*4882a593Smuzhiyun  * pages it returns.
1891*4882a593Smuzhiyun  *
1892*4882a593Smuzhiyun  * The search returns a group of mapping-contiguous page cache entries
1893*4882a593Smuzhiyun  * with ascending indexes.  There may be holes in the indices due to
1894*4882a593Smuzhiyun  * not-present pages.
1895*4882a593Smuzhiyun  *
1896*4882a593Smuzhiyun  * Any shadow entries of evicted pages, or swap entries from
1897*4882a593Smuzhiyun  * shmem/tmpfs, are included in the returned array.
1898*4882a593Smuzhiyun  *
1899*4882a593Smuzhiyun  * If it finds a Transparent Huge Page, head or tail, find_get_entries()
1900*4882a593Smuzhiyun  * stops at that page: the caller is likely to have a better way to handle
1901*4882a593Smuzhiyun  * the compound page as a whole, and then skip its extent, than repeatedly
1902*4882a593Smuzhiyun  * calling find_get_entries() to return all its tails.
1903*4882a593Smuzhiyun  *
1904*4882a593Smuzhiyun  * Return: the number of pages and shadow entries which were found.
1905*4882a593Smuzhiyun  */
find_get_entries(struct address_space * mapping,pgoff_t start,unsigned int nr_entries,struct page ** entries,pgoff_t * indices)1906*4882a593Smuzhiyun unsigned find_get_entries(struct address_space *mapping,
1907*4882a593Smuzhiyun 			  pgoff_t start, unsigned int nr_entries,
1908*4882a593Smuzhiyun 			  struct page **entries, pgoff_t *indices)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start);
1911*4882a593Smuzhiyun 	struct page *page;
1912*4882a593Smuzhiyun 	unsigned int ret = 0;
1913*4882a593Smuzhiyun 
1914*4882a593Smuzhiyun 	if (!nr_entries)
1915*4882a593Smuzhiyun 		return 0;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	rcu_read_lock();
1918*4882a593Smuzhiyun 	xas_for_each(&xas, page, ULONG_MAX) {
1919*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
1920*4882a593Smuzhiyun 			continue;
1921*4882a593Smuzhiyun 		/*
1922*4882a593Smuzhiyun 		 * A shadow entry of a recently evicted page, a swap
1923*4882a593Smuzhiyun 		 * entry from shmem/tmpfs or a DAX entry.  Return it
1924*4882a593Smuzhiyun 		 * without attempting to raise page count.
1925*4882a593Smuzhiyun 		 */
1926*4882a593Smuzhiyun 		if (xa_is_value(page))
1927*4882a593Smuzhiyun 			goto export;
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 		if (!page_cache_get_speculative(page))
1930*4882a593Smuzhiyun 			goto retry;
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 		/* Has the page moved or been split? */
1933*4882a593Smuzhiyun 		if (unlikely(page != xas_reload(&xas)))
1934*4882a593Smuzhiyun 			goto put_page;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 		/*
1937*4882a593Smuzhiyun 		 * Terminate early on finding a THP, to allow the caller to
1938*4882a593Smuzhiyun 		 * handle it all at once; but continue if this is hugetlbfs.
1939*4882a593Smuzhiyun 		 */
1940*4882a593Smuzhiyun 		if (PageTransHuge(page) && !PageHuge(page)) {
1941*4882a593Smuzhiyun 			page = find_subpage(page, xas.xa_index);
1942*4882a593Smuzhiyun 			nr_entries = ret + 1;
1943*4882a593Smuzhiyun 		}
1944*4882a593Smuzhiyun export:
1945*4882a593Smuzhiyun 		indices[ret] = xas.xa_index;
1946*4882a593Smuzhiyun 		entries[ret] = page;
1947*4882a593Smuzhiyun 		if (++ret == nr_entries)
1948*4882a593Smuzhiyun 			break;
1949*4882a593Smuzhiyun 		continue;
1950*4882a593Smuzhiyun put_page:
1951*4882a593Smuzhiyun 		put_page(page);
1952*4882a593Smuzhiyun retry:
1953*4882a593Smuzhiyun 		xas_reset(&xas);
1954*4882a593Smuzhiyun 	}
1955*4882a593Smuzhiyun 	rcu_read_unlock();
1956*4882a593Smuzhiyun 	return ret;
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun /**
1960*4882a593Smuzhiyun  * find_get_pages_range - gang pagecache lookup
1961*4882a593Smuzhiyun  * @mapping:	The address_space to search
1962*4882a593Smuzhiyun  * @start:	The starting page index
1963*4882a593Smuzhiyun  * @end:	The final page index (inclusive)
1964*4882a593Smuzhiyun  * @nr_pages:	The maximum number of pages
1965*4882a593Smuzhiyun  * @pages:	Where the resulting pages are placed
1966*4882a593Smuzhiyun  *
1967*4882a593Smuzhiyun  * find_get_pages_range() will search for and return a group of up to @nr_pages
1968*4882a593Smuzhiyun  * pages in the mapping starting at index @start and up to index @end
1969*4882a593Smuzhiyun  * (inclusive).  The pages are placed at @pages.  find_get_pages_range() takes
1970*4882a593Smuzhiyun  * a reference against the returned pages.
1971*4882a593Smuzhiyun  *
1972*4882a593Smuzhiyun  * The search returns a group of mapping-contiguous pages with ascending
1973*4882a593Smuzhiyun  * indexes.  There may be holes in the indices due to not-present pages.
1974*4882a593Smuzhiyun  * We also update @start to index the next page for the traversal.
1975*4882a593Smuzhiyun  *
1976*4882a593Smuzhiyun  * Return: the number of pages which were found. If this number is
1977*4882a593Smuzhiyun  * smaller than @nr_pages, the end of specified range has been
1978*4882a593Smuzhiyun  * reached.
1979*4882a593Smuzhiyun  */
find_get_pages_range(struct address_space * mapping,pgoff_t * start,pgoff_t end,unsigned int nr_pages,struct page ** pages)1980*4882a593Smuzhiyun unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
1981*4882a593Smuzhiyun 			      pgoff_t end, unsigned int nr_pages,
1982*4882a593Smuzhiyun 			      struct page **pages)
1983*4882a593Smuzhiyun {
1984*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, *start);
1985*4882a593Smuzhiyun 	struct page *page;
1986*4882a593Smuzhiyun 	unsigned ret = 0;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	if (unlikely(!nr_pages))
1989*4882a593Smuzhiyun 		return 0;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	rcu_read_lock();
1992*4882a593Smuzhiyun 	xas_for_each(&xas, page, end) {
1993*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
1994*4882a593Smuzhiyun 			continue;
1995*4882a593Smuzhiyun 		/* Skip over shadow, swap and DAX entries */
1996*4882a593Smuzhiyun 		if (xa_is_value(page))
1997*4882a593Smuzhiyun 			continue;
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 		if (!page_cache_get_speculative(page))
2000*4882a593Smuzhiyun 			goto retry;
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 		/* Has the page moved or been split? */
2003*4882a593Smuzhiyun 		if (unlikely(page != xas_reload(&xas)))
2004*4882a593Smuzhiyun 			goto put_page;
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 		pages[ret] = find_subpage(page, xas.xa_index);
2007*4882a593Smuzhiyun 		if (++ret == nr_pages) {
2008*4882a593Smuzhiyun 			*start = xas.xa_index + 1;
2009*4882a593Smuzhiyun 			goto out;
2010*4882a593Smuzhiyun 		}
2011*4882a593Smuzhiyun 		continue;
2012*4882a593Smuzhiyun put_page:
2013*4882a593Smuzhiyun 		put_page(page);
2014*4882a593Smuzhiyun retry:
2015*4882a593Smuzhiyun 		xas_reset(&xas);
2016*4882a593Smuzhiyun 	}
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	/*
2019*4882a593Smuzhiyun 	 * We come here when there is no page beyond @end. We take care to not
2020*4882a593Smuzhiyun 	 * overflow the index @start as it confuses some of the callers. This
2021*4882a593Smuzhiyun 	 * breaks the iteration when there is a page at index -1 but that is
2022*4882a593Smuzhiyun 	 * already broken anyway.
2023*4882a593Smuzhiyun 	 */
2024*4882a593Smuzhiyun 	if (end == (pgoff_t)-1)
2025*4882a593Smuzhiyun 		*start = (pgoff_t)-1;
2026*4882a593Smuzhiyun 	else
2027*4882a593Smuzhiyun 		*start = end + 1;
2028*4882a593Smuzhiyun out:
2029*4882a593Smuzhiyun 	rcu_read_unlock();
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 	return ret;
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun /**
2035*4882a593Smuzhiyun  * find_get_pages_contig - gang contiguous pagecache lookup
2036*4882a593Smuzhiyun  * @mapping:	The address_space to search
2037*4882a593Smuzhiyun  * @index:	The starting page index
2038*4882a593Smuzhiyun  * @nr_pages:	The maximum number of pages
2039*4882a593Smuzhiyun  * @pages:	Where the resulting pages are placed
2040*4882a593Smuzhiyun  *
2041*4882a593Smuzhiyun  * find_get_pages_contig() works exactly like find_get_pages(), except
2042*4882a593Smuzhiyun  * that the returned number of pages are guaranteed to be contiguous.
2043*4882a593Smuzhiyun  *
2044*4882a593Smuzhiyun  * Return: the number of pages which were found.
2045*4882a593Smuzhiyun  */
find_get_pages_contig(struct address_space * mapping,pgoff_t index,unsigned int nr_pages,struct page ** pages)2046*4882a593Smuzhiyun unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
2047*4882a593Smuzhiyun 			       unsigned int nr_pages, struct page **pages)
2048*4882a593Smuzhiyun {
2049*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, index);
2050*4882a593Smuzhiyun 	struct page *page;
2051*4882a593Smuzhiyun 	unsigned int ret = 0;
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	if (unlikely(!nr_pages))
2054*4882a593Smuzhiyun 		return 0;
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 	rcu_read_lock();
2057*4882a593Smuzhiyun 	for (page = xas_load(&xas); page; page = xas_next(&xas)) {
2058*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
2059*4882a593Smuzhiyun 			continue;
2060*4882a593Smuzhiyun 		/*
2061*4882a593Smuzhiyun 		 * If the entry has been swapped out, we can stop looking.
2062*4882a593Smuzhiyun 		 * No current caller is looking for DAX entries.
2063*4882a593Smuzhiyun 		 */
2064*4882a593Smuzhiyun 		if (xa_is_value(page))
2065*4882a593Smuzhiyun 			break;
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 		if (!page_cache_get_speculative(page))
2068*4882a593Smuzhiyun 			goto retry;
2069*4882a593Smuzhiyun 
2070*4882a593Smuzhiyun 		/* Has the page moved or been split? */
2071*4882a593Smuzhiyun 		if (unlikely(page != xas_reload(&xas)))
2072*4882a593Smuzhiyun 			goto put_page;
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 		pages[ret] = find_subpage(page, xas.xa_index);
2075*4882a593Smuzhiyun 		if (++ret == nr_pages)
2076*4882a593Smuzhiyun 			break;
2077*4882a593Smuzhiyun 		continue;
2078*4882a593Smuzhiyun put_page:
2079*4882a593Smuzhiyun 		put_page(page);
2080*4882a593Smuzhiyun retry:
2081*4882a593Smuzhiyun 		xas_reset(&xas);
2082*4882a593Smuzhiyun 	}
2083*4882a593Smuzhiyun 	rcu_read_unlock();
2084*4882a593Smuzhiyun 	return ret;
2085*4882a593Smuzhiyun }
2086*4882a593Smuzhiyun EXPORT_SYMBOL(find_get_pages_contig);
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun /**
2089*4882a593Smuzhiyun  * find_get_pages_range_tag - find and return pages in given range matching @tag
2090*4882a593Smuzhiyun  * @mapping:	the address_space to search
2091*4882a593Smuzhiyun  * @index:	the starting page index
2092*4882a593Smuzhiyun  * @end:	The final page index (inclusive)
2093*4882a593Smuzhiyun  * @tag:	the tag index
2094*4882a593Smuzhiyun  * @nr_pages:	the maximum number of pages
2095*4882a593Smuzhiyun  * @pages:	where the resulting pages are placed
2096*4882a593Smuzhiyun  *
2097*4882a593Smuzhiyun  * Like find_get_pages, except we only return pages which are tagged with
2098*4882a593Smuzhiyun  * @tag.   We update @index to index the next page for the traversal.
2099*4882a593Smuzhiyun  *
2100*4882a593Smuzhiyun  * Return: the number of pages which were found.
2101*4882a593Smuzhiyun  */
find_get_pages_range_tag(struct address_space * mapping,pgoff_t * index,pgoff_t end,xa_mark_t tag,unsigned int nr_pages,struct page ** pages)2102*4882a593Smuzhiyun unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
2103*4882a593Smuzhiyun 			pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
2104*4882a593Smuzhiyun 			struct page **pages)
2105*4882a593Smuzhiyun {
2106*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, *index);
2107*4882a593Smuzhiyun 	struct page *page;
2108*4882a593Smuzhiyun 	unsigned ret = 0;
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	if (unlikely(!nr_pages))
2111*4882a593Smuzhiyun 		return 0;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	rcu_read_lock();
2114*4882a593Smuzhiyun 	xas_for_each_marked(&xas, page, end, tag) {
2115*4882a593Smuzhiyun 		if (xas_retry(&xas, page))
2116*4882a593Smuzhiyun 			continue;
2117*4882a593Smuzhiyun 		/*
2118*4882a593Smuzhiyun 		 * Shadow entries should never be tagged, but this iteration
2119*4882a593Smuzhiyun 		 * is lockless so there is a window for page reclaim to evict
2120*4882a593Smuzhiyun 		 * a page we saw tagged.  Skip over it.
2121*4882a593Smuzhiyun 		 */
2122*4882a593Smuzhiyun 		if (xa_is_value(page))
2123*4882a593Smuzhiyun 			continue;
2124*4882a593Smuzhiyun 
2125*4882a593Smuzhiyun 		if (!page_cache_get_speculative(page))
2126*4882a593Smuzhiyun 			goto retry;
2127*4882a593Smuzhiyun 
2128*4882a593Smuzhiyun 		/* Has the page moved or been split? */
2129*4882a593Smuzhiyun 		if (unlikely(page != xas_reload(&xas)))
2130*4882a593Smuzhiyun 			goto put_page;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 		pages[ret] = find_subpage(page, xas.xa_index);
2133*4882a593Smuzhiyun 		if (++ret == nr_pages) {
2134*4882a593Smuzhiyun 			*index = xas.xa_index + 1;
2135*4882a593Smuzhiyun 			goto out;
2136*4882a593Smuzhiyun 		}
2137*4882a593Smuzhiyun 		continue;
2138*4882a593Smuzhiyun put_page:
2139*4882a593Smuzhiyun 		put_page(page);
2140*4882a593Smuzhiyun retry:
2141*4882a593Smuzhiyun 		xas_reset(&xas);
2142*4882a593Smuzhiyun 	}
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	/*
2145*4882a593Smuzhiyun 	 * We come here when we got to @end. We take care to not overflow the
2146*4882a593Smuzhiyun 	 * index @index as it confuses some of the callers. This breaks the
2147*4882a593Smuzhiyun 	 * iteration when there is a page at index -1 but that is already
2148*4882a593Smuzhiyun 	 * broken anyway.
2149*4882a593Smuzhiyun 	 */
2150*4882a593Smuzhiyun 	if (end == (pgoff_t)-1)
2151*4882a593Smuzhiyun 		*index = (pgoff_t)-1;
2152*4882a593Smuzhiyun 	else
2153*4882a593Smuzhiyun 		*index = end + 1;
2154*4882a593Smuzhiyun out:
2155*4882a593Smuzhiyun 	rcu_read_unlock();
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 	return ret;
2158*4882a593Smuzhiyun }
2159*4882a593Smuzhiyun EXPORT_SYMBOL(find_get_pages_range_tag);
2160*4882a593Smuzhiyun 
2161*4882a593Smuzhiyun /*
2162*4882a593Smuzhiyun  * CD/DVDs are error prone. When a medium error occurs, the driver may fail
2163*4882a593Smuzhiyun  * a _large_ part of the i/o request. Imagine the worst scenario:
2164*4882a593Smuzhiyun  *
2165*4882a593Smuzhiyun  *      ---R__________________________________________B__________
2166*4882a593Smuzhiyun  *         ^ reading here                             ^ bad block(assume 4k)
2167*4882a593Smuzhiyun  *
2168*4882a593Smuzhiyun  * read(R) => miss => readahead(R...B) => media error => frustrating retries
2169*4882a593Smuzhiyun  * => failing the whole request => read(R) => read(R+1) =>
2170*4882a593Smuzhiyun  * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>
2171*4882a593Smuzhiyun  * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>
2172*4882a593Smuzhiyun  * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......
2173*4882a593Smuzhiyun  *
2174*4882a593Smuzhiyun  * It is going insane. Fix it by quickly scaling down the readahead size.
2175*4882a593Smuzhiyun  */
shrink_readahead_size_eio(struct file_ra_state * ra)2176*4882a593Smuzhiyun static void shrink_readahead_size_eio(struct file_ra_state *ra)
2177*4882a593Smuzhiyun {
2178*4882a593Smuzhiyun 	ra->ra_pages /= 4;
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun /**
2182*4882a593Smuzhiyun  * generic_file_buffered_read - generic file read routine
2183*4882a593Smuzhiyun  * @iocb:	the iocb to read
2184*4882a593Smuzhiyun  * @iter:	data destination
2185*4882a593Smuzhiyun  * @written:	already copied
2186*4882a593Smuzhiyun  *
2187*4882a593Smuzhiyun  * This is a generic file read routine, and uses the
2188*4882a593Smuzhiyun  * mapping->a_ops->readpage() function for the actual low-level stuff.
2189*4882a593Smuzhiyun  *
2190*4882a593Smuzhiyun  * This is really ugly. But the goto's actually try to clarify some
2191*4882a593Smuzhiyun  * of the logic when it comes to error handling etc.
2192*4882a593Smuzhiyun  *
2193*4882a593Smuzhiyun  * Return:
2194*4882a593Smuzhiyun  * * total number of bytes copied, including those the were already @written
2195*4882a593Smuzhiyun  * * negative error code if nothing was copied
2196*4882a593Smuzhiyun  */
generic_file_buffered_read(struct kiocb * iocb,struct iov_iter * iter,ssize_t written)2197*4882a593Smuzhiyun ssize_t generic_file_buffered_read(struct kiocb *iocb,
2198*4882a593Smuzhiyun 		struct iov_iter *iter, ssize_t written)
2199*4882a593Smuzhiyun {
2200*4882a593Smuzhiyun 	struct file *filp = iocb->ki_filp;
2201*4882a593Smuzhiyun 	struct address_space *mapping = filp->f_mapping;
2202*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2203*4882a593Smuzhiyun 	struct file_ra_state *ra = &filp->f_ra;
2204*4882a593Smuzhiyun 	loff_t *ppos = &iocb->ki_pos;
2205*4882a593Smuzhiyun 	pgoff_t index;
2206*4882a593Smuzhiyun 	pgoff_t last_index;
2207*4882a593Smuzhiyun 	pgoff_t prev_index;
2208*4882a593Smuzhiyun 	unsigned long offset;      /* offset into pagecache page */
2209*4882a593Smuzhiyun 	unsigned int prev_offset;
2210*4882a593Smuzhiyun 	int error = 0;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	if (unlikely(*ppos >= inode->i_sb->s_maxbytes))
2213*4882a593Smuzhiyun 		return 0;
2214*4882a593Smuzhiyun 	iov_iter_truncate(iter, inode->i_sb->s_maxbytes);
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	index = *ppos >> PAGE_SHIFT;
2217*4882a593Smuzhiyun 	prev_index = ra->prev_pos >> PAGE_SHIFT;
2218*4882a593Smuzhiyun 	prev_offset = ra->prev_pos & (PAGE_SIZE-1);
2219*4882a593Smuzhiyun 	last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT;
2220*4882a593Smuzhiyun 	offset = *ppos & ~PAGE_MASK;
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	/*
2223*4882a593Smuzhiyun 	 * If we've already successfully copied some data, then we
2224*4882a593Smuzhiyun 	 * can no longer safely return -EIOCBQUEUED. Hence mark
2225*4882a593Smuzhiyun 	 * an async read NOWAIT at that point.
2226*4882a593Smuzhiyun 	 */
2227*4882a593Smuzhiyun 	if (written && (iocb->ki_flags & IOCB_WAITQ))
2228*4882a593Smuzhiyun 		iocb->ki_flags |= IOCB_NOWAIT;
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 	for (;;) {
2231*4882a593Smuzhiyun 		struct page *page;
2232*4882a593Smuzhiyun 		pgoff_t end_index;
2233*4882a593Smuzhiyun 		loff_t isize;
2234*4882a593Smuzhiyun 		unsigned long nr, ret;
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 		cond_resched();
2237*4882a593Smuzhiyun find_page:
2238*4882a593Smuzhiyun 		if (fatal_signal_pending(current)) {
2239*4882a593Smuzhiyun 			error = -EINTR;
2240*4882a593Smuzhiyun 			goto out;
2241*4882a593Smuzhiyun 		}
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 		page = find_get_page(mapping, index);
2244*4882a593Smuzhiyun 		if (!page) {
2245*4882a593Smuzhiyun 			if (iocb->ki_flags & IOCB_NOIO)
2246*4882a593Smuzhiyun 				goto would_block;
2247*4882a593Smuzhiyun 			page_cache_sync_readahead(mapping,
2248*4882a593Smuzhiyun 					ra, filp,
2249*4882a593Smuzhiyun 					index, last_index - index);
2250*4882a593Smuzhiyun 			page = find_get_page(mapping, index);
2251*4882a593Smuzhiyun 			if (unlikely(page == NULL))
2252*4882a593Smuzhiyun 				goto no_cached_page;
2253*4882a593Smuzhiyun 		}
2254*4882a593Smuzhiyun 		if (PageReadahead(page)) {
2255*4882a593Smuzhiyun 			if (iocb->ki_flags & IOCB_NOIO) {
2256*4882a593Smuzhiyun 				put_page(page);
2257*4882a593Smuzhiyun 				goto out;
2258*4882a593Smuzhiyun 			}
2259*4882a593Smuzhiyun 			page_cache_async_readahead(mapping,
2260*4882a593Smuzhiyun 					ra, filp, page,
2261*4882a593Smuzhiyun 					index, last_index - index);
2262*4882a593Smuzhiyun 		}
2263*4882a593Smuzhiyun 		if (!PageUptodate(page)) {
2264*4882a593Smuzhiyun 			/*
2265*4882a593Smuzhiyun 			 * See comment in do_read_cache_page on why
2266*4882a593Smuzhiyun 			 * wait_on_page_locked is used to avoid unnecessarily
2267*4882a593Smuzhiyun 			 * serialisations and why it's safe.
2268*4882a593Smuzhiyun 			 */
2269*4882a593Smuzhiyun 			if (iocb->ki_flags & IOCB_WAITQ) {
2270*4882a593Smuzhiyun 				if (written) {
2271*4882a593Smuzhiyun 					put_page(page);
2272*4882a593Smuzhiyun 					goto out;
2273*4882a593Smuzhiyun 				}
2274*4882a593Smuzhiyun 				error = wait_on_page_locked_async(page,
2275*4882a593Smuzhiyun 								iocb->ki_waitq);
2276*4882a593Smuzhiyun 			} else {
2277*4882a593Smuzhiyun 				if (iocb->ki_flags & IOCB_NOWAIT) {
2278*4882a593Smuzhiyun 					put_page(page);
2279*4882a593Smuzhiyun 					goto would_block;
2280*4882a593Smuzhiyun 				}
2281*4882a593Smuzhiyun 				error = wait_on_page_locked_killable(page);
2282*4882a593Smuzhiyun 			}
2283*4882a593Smuzhiyun 			if (unlikely(error))
2284*4882a593Smuzhiyun 				goto readpage_error;
2285*4882a593Smuzhiyun 			if (PageUptodate(page))
2286*4882a593Smuzhiyun 				goto page_ok;
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun 			if (inode->i_blkbits == PAGE_SHIFT ||
2289*4882a593Smuzhiyun 					!mapping->a_ops->is_partially_uptodate)
2290*4882a593Smuzhiyun 				goto page_not_up_to_date;
2291*4882a593Smuzhiyun 			/* pipes can't handle partially uptodate pages */
2292*4882a593Smuzhiyun 			if (unlikely(iov_iter_is_pipe(iter)))
2293*4882a593Smuzhiyun 				goto page_not_up_to_date;
2294*4882a593Smuzhiyun 			if (!trylock_page(page))
2295*4882a593Smuzhiyun 				goto page_not_up_to_date;
2296*4882a593Smuzhiyun 			/* Did it get truncated before we got the lock? */
2297*4882a593Smuzhiyun 			if (!page->mapping)
2298*4882a593Smuzhiyun 				goto page_not_up_to_date_locked;
2299*4882a593Smuzhiyun 			if (!mapping->a_ops->is_partially_uptodate(page,
2300*4882a593Smuzhiyun 							offset, iter->count))
2301*4882a593Smuzhiyun 				goto page_not_up_to_date_locked;
2302*4882a593Smuzhiyun 			unlock_page(page);
2303*4882a593Smuzhiyun 		}
2304*4882a593Smuzhiyun page_ok:
2305*4882a593Smuzhiyun 		/*
2306*4882a593Smuzhiyun 		 * i_size must be checked after we know the page is Uptodate.
2307*4882a593Smuzhiyun 		 *
2308*4882a593Smuzhiyun 		 * Checking i_size after the check allows us to calculate
2309*4882a593Smuzhiyun 		 * the correct value for "nr", which means the zero-filled
2310*4882a593Smuzhiyun 		 * part of the page is not copied back to userspace (unless
2311*4882a593Smuzhiyun 		 * another truncate extends the file - this is desired though).
2312*4882a593Smuzhiyun 		 */
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun 		isize = i_size_read(inode);
2315*4882a593Smuzhiyun 		end_index = (isize - 1) >> PAGE_SHIFT;
2316*4882a593Smuzhiyun 		if (unlikely(!isize || index > end_index)) {
2317*4882a593Smuzhiyun 			put_page(page);
2318*4882a593Smuzhiyun 			goto out;
2319*4882a593Smuzhiyun 		}
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 		/* nr is the maximum number of bytes to copy from this page */
2322*4882a593Smuzhiyun 		nr = PAGE_SIZE;
2323*4882a593Smuzhiyun 		if (index == end_index) {
2324*4882a593Smuzhiyun 			nr = ((isize - 1) & ~PAGE_MASK) + 1;
2325*4882a593Smuzhiyun 			if (nr <= offset) {
2326*4882a593Smuzhiyun 				put_page(page);
2327*4882a593Smuzhiyun 				goto out;
2328*4882a593Smuzhiyun 			}
2329*4882a593Smuzhiyun 		}
2330*4882a593Smuzhiyun 		nr = nr - offset;
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 		/* If users can be writing to this page using arbitrary
2333*4882a593Smuzhiyun 		 * virtual addresses, take care about potential aliasing
2334*4882a593Smuzhiyun 		 * before reading the page on the kernel side.
2335*4882a593Smuzhiyun 		 */
2336*4882a593Smuzhiyun 		if (mapping_writably_mapped(mapping))
2337*4882a593Smuzhiyun 			flush_dcache_page(page);
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 		/*
2340*4882a593Smuzhiyun 		 * When a sequential read accesses a page several times,
2341*4882a593Smuzhiyun 		 * only mark it as accessed the first time.
2342*4882a593Smuzhiyun 		 */
2343*4882a593Smuzhiyun 		if (prev_index != index || offset != prev_offset)
2344*4882a593Smuzhiyun 			mark_page_accessed(page);
2345*4882a593Smuzhiyun 		prev_index = index;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 		/*
2348*4882a593Smuzhiyun 		 * Ok, we have the page, and it's up-to-date, so
2349*4882a593Smuzhiyun 		 * now we can copy it to user space...
2350*4882a593Smuzhiyun 		 */
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 		ret = copy_page_to_iter(page, offset, nr, iter);
2353*4882a593Smuzhiyun 		offset += ret;
2354*4882a593Smuzhiyun 		index += offset >> PAGE_SHIFT;
2355*4882a593Smuzhiyun 		offset &= ~PAGE_MASK;
2356*4882a593Smuzhiyun 		prev_offset = offset;
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 		put_page(page);
2359*4882a593Smuzhiyun 		written += ret;
2360*4882a593Smuzhiyun 		if (!iov_iter_count(iter))
2361*4882a593Smuzhiyun 			goto out;
2362*4882a593Smuzhiyun 		if (ret < nr) {
2363*4882a593Smuzhiyun 			error = -EFAULT;
2364*4882a593Smuzhiyun 			goto out;
2365*4882a593Smuzhiyun 		}
2366*4882a593Smuzhiyun 		continue;
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun page_not_up_to_date:
2369*4882a593Smuzhiyun 		/* Get exclusive access to the page ... */
2370*4882a593Smuzhiyun 		if (iocb->ki_flags & IOCB_WAITQ) {
2371*4882a593Smuzhiyun 			if (written) {
2372*4882a593Smuzhiyun 				put_page(page);
2373*4882a593Smuzhiyun 				goto out;
2374*4882a593Smuzhiyun 			}
2375*4882a593Smuzhiyun 			error = lock_page_async(page, iocb->ki_waitq);
2376*4882a593Smuzhiyun 		} else {
2377*4882a593Smuzhiyun 			error = lock_page_killable(page);
2378*4882a593Smuzhiyun 		}
2379*4882a593Smuzhiyun 		if (unlikely(error))
2380*4882a593Smuzhiyun 			goto readpage_error;
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun page_not_up_to_date_locked:
2383*4882a593Smuzhiyun 		/* Did it get truncated before we got the lock? */
2384*4882a593Smuzhiyun 		if (!page->mapping) {
2385*4882a593Smuzhiyun 			unlock_page(page);
2386*4882a593Smuzhiyun 			put_page(page);
2387*4882a593Smuzhiyun 			continue;
2388*4882a593Smuzhiyun 		}
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 		/* Did somebody else fill it already? */
2391*4882a593Smuzhiyun 		if (PageUptodate(page)) {
2392*4882a593Smuzhiyun 			unlock_page(page);
2393*4882a593Smuzhiyun 			goto page_ok;
2394*4882a593Smuzhiyun 		}
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun readpage:
2397*4882a593Smuzhiyun 		if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT)) {
2398*4882a593Smuzhiyun 			unlock_page(page);
2399*4882a593Smuzhiyun 			put_page(page);
2400*4882a593Smuzhiyun 			goto would_block;
2401*4882a593Smuzhiyun 		}
2402*4882a593Smuzhiyun 		/*
2403*4882a593Smuzhiyun 		 * A previous I/O error may have been due to temporary
2404*4882a593Smuzhiyun 		 * failures, eg. multipath errors.
2405*4882a593Smuzhiyun 		 * PG_error will be set again if readpage fails.
2406*4882a593Smuzhiyun 		 */
2407*4882a593Smuzhiyun 		ClearPageError(page);
2408*4882a593Smuzhiyun 		/* Start the actual read. The read will unlock the page. */
2409*4882a593Smuzhiyun 		error = mapping->a_ops->readpage(filp, page);
2410*4882a593Smuzhiyun 
2411*4882a593Smuzhiyun 		if (unlikely(error)) {
2412*4882a593Smuzhiyun 			if (error == AOP_TRUNCATED_PAGE) {
2413*4882a593Smuzhiyun 				put_page(page);
2414*4882a593Smuzhiyun 				error = 0;
2415*4882a593Smuzhiyun 				goto find_page;
2416*4882a593Smuzhiyun 			}
2417*4882a593Smuzhiyun 			goto readpage_error;
2418*4882a593Smuzhiyun 		}
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun 		if (!PageUptodate(page)) {
2421*4882a593Smuzhiyun 			if (iocb->ki_flags & IOCB_WAITQ) {
2422*4882a593Smuzhiyun 				if (written) {
2423*4882a593Smuzhiyun 					put_page(page);
2424*4882a593Smuzhiyun 					goto out;
2425*4882a593Smuzhiyun 				}
2426*4882a593Smuzhiyun 				error = lock_page_async(page, iocb->ki_waitq);
2427*4882a593Smuzhiyun 			} else {
2428*4882a593Smuzhiyun 				error = lock_page_killable(page);
2429*4882a593Smuzhiyun 			}
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 			if (unlikely(error))
2432*4882a593Smuzhiyun 				goto readpage_error;
2433*4882a593Smuzhiyun 			if (!PageUptodate(page)) {
2434*4882a593Smuzhiyun 				if (page->mapping == NULL) {
2435*4882a593Smuzhiyun 					/*
2436*4882a593Smuzhiyun 					 * invalidate_mapping_pages got it
2437*4882a593Smuzhiyun 					 */
2438*4882a593Smuzhiyun 					unlock_page(page);
2439*4882a593Smuzhiyun 					put_page(page);
2440*4882a593Smuzhiyun 					goto find_page;
2441*4882a593Smuzhiyun 				}
2442*4882a593Smuzhiyun 				unlock_page(page);
2443*4882a593Smuzhiyun 				shrink_readahead_size_eio(ra);
2444*4882a593Smuzhiyun 				error = -EIO;
2445*4882a593Smuzhiyun 				goto readpage_error;
2446*4882a593Smuzhiyun 			}
2447*4882a593Smuzhiyun 			unlock_page(page);
2448*4882a593Smuzhiyun 		}
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 		goto page_ok;
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun readpage_error:
2453*4882a593Smuzhiyun 		/* UHHUH! A synchronous read error occurred. Report it */
2454*4882a593Smuzhiyun 		put_page(page);
2455*4882a593Smuzhiyun 		goto out;
2456*4882a593Smuzhiyun 
2457*4882a593Smuzhiyun no_cached_page:
2458*4882a593Smuzhiyun 		/*
2459*4882a593Smuzhiyun 		 * Ok, it wasn't cached, so we need to create a new
2460*4882a593Smuzhiyun 		 * page..
2461*4882a593Smuzhiyun 		 */
2462*4882a593Smuzhiyun 		page = page_cache_alloc(mapping);
2463*4882a593Smuzhiyun 		if (!page) {
2464*4882a593Smuzhiyun 			error = -ENOMEM;
2465*4882a593Smuzhiyun 			goto out;
2466*4882a593Smuzhiyun 		}
2467*4882a593Smuzhiyun 		error = add_to_page_cache_lru(page, mapping, index,
2468*4882a593Smuzhiyun 				mapping_gfp_constraint(mapping, GFP_KERNEL));
2469*4882a593Smuzhiyun 		if (error) {
2470*4882a593Smuzhiyun 			put_page(page);
2471*4882a593Smuzhiyun 			if (error == -EEXIST) {
2472*4882a593Smuzhiyun 				error = 0;
2473*4882a593Smuzhiyun 				goto find_page;
2474*4882a593Smuzhiyun 			}
2475*4882a593Smuzhiyun 			goto out;
2476*4882a593Smuzhiyun 		}
2477*4882a593Smuzhiyun 		goto readpage;
2478*4882a593Smuzhiyun 	}
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun would_block:
2481*4882a593Smuzhiyun 	error = -EAGAIN;
2482*4882a593Smuzhiyun out:
2483*4882a593Smuzhiyun 	ra->prev_pos = prev_index;
2484*4882a593Smuzhiyun 	ra->prev_pos <<= PAGE_SHIFT;
2485*4882a593Smuzhiyun 	ra->prev_pos |= prev_offset;
2486*4882a593Smuzhiyun 
2487*4882a593Smuzhiyun 	*ppos = ((loff_t)index << PAGE_SHIFT) + offset;
2488*4882a593Smuzhiyun 	file_accessed(filp);
2489*4882a593Smuzhiyun 	return written ? written : error;
2490*4882a593Smuzhiyun }
2491*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(generic_file_buffered_read);
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun /**
2494*4882a593Smuzhiyun  * generic_file_read_iter - generic filesystem read routine
2495*4882a593Smuzhiyun  * @iocb:	kernel I/O control block
2496*4882a593Smuzhiyun  * @iter:	destination for the data read
2497*4882a593Smuzhiyun  *
2498*4882a593Smuzhiyun  * This is the "read_iter()" routine for all filesystems
2499*4882a593Smuzhiyun  * that can use the page cache directly.
2500*4882a593Smuzhiyun  *
2501*4882a593Smuzhiyun  * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2502*4882a593Smuzhiyun  * be returned when no data can be read without waiting for I/O requests
2503*4882a593Smuzhiyun  * to complete; it doesn't prevent readahead.
2504*4882a593Smuzhiyun  *
2505*4882a593Smuzhiyun  * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2506*4882a593Smuzhiyun  * requests shall be made for the read or for readahead.  When no data
2507*4882a593Smuzhiyun  * can be read, -EAGAIN shall be returned.  When readahead would be
2508*4882a593Smuzhiyun  * triggered, a partial, possibly empty read shall be returned.
2509*4882a593Smuzhiyun  *
2510*4882a593Smuzhiyun  * Return:
2511*4882a593Smuzhiyun  * * number of bytes copied, even for partial reads
2512*4882a593Smuzhiyun  * * negative error code (or 0 if IOCB_NOIO) if nothing was read
2513*4882a593Smuzhiyun  */
2514*4882a593Smuzhiyun ssize_t
generic_file_read_iter(struct kiocb * iocb,struct iov_iter * iter)2515*4882a593Smuzhiyun generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
2516*4882a593Smuzhiyun {
2517*4882a593Smuzhiyun 	size_t count = iov_iter_count(iter);
2518*4882a593Smuzhiyun 	ssize_t retval = 0;
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	if (!count)
2521*4882a593Smuzhiyun 		goto out; /* skip atime */
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_DIRECT) {
2524*4882a593Smuzhiyun 		struct file *file = iocb->ki_filp;
2525*4882a593Smuzhiyun 		struct address_space *mapping = file->f_mapping;
2526*4882a593Smuzhiyun 		struct inode *inode = mapping->host;
2527*4882a593Smuzhiyun 		loff_t size;
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 		size = i_size_read(inode);
2530*4882a593Smuzhiyun 		if (iocb->ki_flags & IOCB_NOWAIT) {
2531*4882a593Smuzhiyun 			if (filemap_range_has_page(mapping, iocb->ki_pos,
2532*4882a593Smuzhiyun 						   iocb->ki_pos + count - 1))
2533*4882a593Smuzhiyun 				return -EAGAIN;
2534*4882a593Smuzhiyun 		} else {
2535*4882a593Smuzhiyun 			retval = filemap_write_and_wait_range(mapping,
2536*4882a593Smuzhiyun 						iocb->ki_pos,
2537*4882a593Smuzhiyun 					        iocb->ki_pos + count - 1);
2538*4882a593Smuzhiyun 			if (retval < 0)
2539*4882a593Smuzhiyun 				goto out;
2540*4882a593Smuzhiyun 		}
2541*4882a593Smuzhiyun 
2542*4882a593Smuzhiyun 		file_accessed(file);
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 		retval = mapping->a_ops->direct_IO(iocb, iter);
2545*4882a593Smuzhiyun 		if (retval >= 0) {
2546*4882a593Smuzhiyun 			iocb->ki_pos += retval;
2547*4882a593Smuzhiyun 			count -= retval;
2548*4882a593Smuzhiyun 		}
2549*4882a593Smuzhiyun 		iov_iter_revert(iter, count - iov_iter_count(iter));
2550*4882a593Smuzhiyun 
2551*4882a593Smuzhiyun 		/*
2552*4882a593Smuzhiyun 		 * Btrfs can have a short DIO read if we encounter
2553*4882a593Smuzhiyun 		 * compressed extents, so if there was an error, or if
2554*4882a593Smuzhiyun 		 * we've already read everything we wanted to, or if
2555*4882a593Smuzhiyun 		 * there was a short read because we hit EOF, go ahead
2556*4882a593Smuzhiyun 		 * and return.  Otherwise fallthrough to buffered io for
2557*4882a593Smuzhiyun 		 * the rest of the read.  Buffered reads will not work for
2558*4882a593Smuzhiyun 		 * DAX files, so don't bother trying.
2559*4882a593Smuzhiyun 		 */
2560*4882a593Smuzhiyun 		if (retval < 0 || !count || iocb->ki_pos >= size ||
2561*4882a593Smuzhiyun 		    IS_DAX(inode))
2562*4882a593Smuzhiyun 			goto out;
2563*4882a593Smuzhiyun 	}
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun 	retval = generic_file_buffered_read(iocb, iter, retval);
2566*4882a593Smuzhiyun out:
2567*4882a593Smuzhiyun 	return retval;
2568*4882a593Smuzhiyun }
2569*4882a593Smuzhiyun EXPORT_SYMBOL(generic_file_read_iter);
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun #ifdef CONFIG_MMU
2572*4882a593Smuzhiyun #define MMAP_LOTSAMISS  (100)
2573*4882a593Smuzhiyun /*
2574*4882a593Smuzhiyun  * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2575*4882a593Smuzhiyun  * @vmf - the vm_fault for this fault.
2576*4882a593Smuzhiyun  * @page - the page to lock.
2577*4882a593Smuzhiyun  * @fpin - the pointer to the file we may pin (or is already pinned).
2578*4882a593Smuzhiyun  *
2579*4882a593Smuzhiyun  * This works similar to lock_page_or_retry in that it can drop the mmap_lock.
2580*4882a593Smuzhiyun  * It differs in that it actually returns the page locked if it returns 1 and 0
2581*4882a593Smuzhiyun  * if it couldn't lock the page.  If we did have to drop the mmap_lock then fpin
2582*4882a593Smuzhiyun  * will point to the pinned file and needs to be fput()'ed at a later point.
2583*4882a593Smuzhiyun  */
lock_page_maybe_drop_mmap(struct vm_fault * vmf,struct page * page,struct file ** fpin)2584*4882a593Smuzhiyun static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,
2585*4882a593Smuzhiyun 				     struct file **fpin)
2586*4882a593Smuzhiyun {
2587*4882a593Smuzhiyun 	if (trylock_page(page))
2588*4882a593Smuzhiyun 		return 1;
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	/*
2591*4882a593Smuzhiyun 	 * NOTE! This will make us return with VM_FAULT_RETRY, but with
2592*4882a593Smuzhiyun 	 * the mmap_lock still held. That's how FAULT_FLAG_RETRY_NOWAIT
2593*4882a593Smuzhiyun 	 * is supposed to work. We have way too many special cases..
2594*4882a593Smuzhiyun 	 */
2595*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
2596*4882a593Smuzhiyun 		return 0;
2597*4882a593Smuzhiyun 
2598*4882a593Smuzhiyun 	*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
2599*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_KILLABLE) {
2600*4882a593Smuzhiyun 		if (__lock_page_killable(page)) {
2601*4882a593Smuzhiyun 			/*
2602*4882a593Smuzhiyun 			 * We didn't have the right flags to drop the mmap_lock,
2603*4882a593Smuzhiyun 			 * but all fault_handlers only check for fatal signals
2604*4882a593Smuzhiyun 			 * if we return VM_FAULT_RETRY, so we need to drop the
2605*4882a593Smuzhiyun 			 * mmap_lock here and return 0 if we don't have a fpin.
2606*4882a593Smuzhiyun 			 */
2607*4882a593Smuzhiyun 			if (*fpin == NULL)
2608*4882a593Smuzhiyun 				mmap_read_unlock(vmf->vma->vm_mm);
2609*4882a593Smuzhiyun 			return 0;
2610*4882a593Smuzhiyun 		}
2611*4882a593Smuzhiyun 	} else
2612*4882a593Smuzhiyun 		__lock_page(page);
2613*4882a593Smuzhiyun 	return 1;
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun /*
2618*4882a593Smuzhiyun  * Synchronous readahead happens when we don't even find a page in the page
2619*4882a593Smuzhiyun  * cache at all.  We don't want to perform IO under the mmap sem, so if we have
2620*4882a593Smuzhiyun  * to drop the mmap sem we return the file that was pinned in order for us to do
2621*4882a593Smuzhiyun  * that.  If we didn't pin a file then we return NULL.  The file that is
2622*4882a593Smuzhiyun  * returned needs to be fput()'ed when we're done with it.
2623*4882a593Smuzhiyun  */
do_sync_mmap_readahead(struct vm_fault * vmf)2624*4882a593Smuzhiyun static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)
2625*4882a593Smuzhiyun {
2626*4882a593Smuzhiyun 	struct file *file = vmf->vma->vm_file;
2627*4882a593Smuzhiyun 	struct file_ra_state *ra = &file->f_ra;
2628*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
2629*4882a593Smuzhiyun 	DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff);
2630*4882a593Smuzhiyun 	struct file *fpin = NULL;
2631*4882a593Smuzhiyun 	unsigned int mmap_miss;
2632*4882a593Smuzhiyun 
2633*4882a593Smuzhiyun 	/* If we don't want any read-ahead, don't bother */
2634*4882a593Smuzhiyun 	if (vmf->vma->vm_flags & VM_RAND_READ)
2635*4882a593Smuzhiyun 		return fpin;
2636*4882a593Smuzhiyun 	if (!ra->ra_pages)
2637*4882a593Smuzhiyun 		return fpin;
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 	if (vmf->vma->vm_flags & VM_SEQ_READ) {
2640*4882a593Smuzhiyun 		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2641*4882a593Smuzhiyun 		page_cache_sync_ra(&ractl, ra, ra->ra_pages);
2642*4882a593Smuzhiyun 		return fpin;
2643*4882a593Smuzhiyun 	}
2644*4882a593Smuzhiyun 
2645*4882a593Smuzhiyun 	/* Avoid banging the cache line if not needed */
2646*4882a593Smuzhiyun 	mmap_miss = READ_ONCE(ra->mmap_miss);
2647*4882a593Smuzhiyun 	if (mmap_miss < MMAP_LOTSAMISS * 10)
2648*4882a593Smuzhiyun 		WRITE_ONCE(ra->mmap_miss, ++mmap_miss);
2649*4882a593Smuzhiyun 
2650*4882a593Smuzhiyun 	/*
2651*4882a593Smuzhiyun 	 * Do we miss much more than hit in this file? If so,
2652*4882a593Smuzhiyun 	 * stop bothering with read-ahead. It will only hurt.
2653*4882a593Smuzhiyun 	 */
2654*4882a593Smuzhiyun 	if (mmap_miss > MMAP_LOTSAMISS)
2655*4882a593Smuzhiyun 		return fpin;
2656*4882a593Smuzhiyun 
2657*4882a593Smuzhiyun 	/*
2658*4882a593Smuzhiyun 	 * mmap read-around
2659*4882a593Smuzhiyun 	 */
2660*4882a593Smuzhiyun 	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2661*4882a593Smuzhiyun 	ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);
2662*4882a593Smuzhiyun 	ra->size = ra->ra_pages;
2663*4882a593Smuzhiyun 	ra->async_size = ra->ra_pages / 4;
2664*4882a593Smuzhiyun 	ractl._index = ra->start;
2665*4882a593Smuzhiyun 	do_page_cache_ra(&ractl, ra->size, ra->async_size);
2666*4882a593Smuzhiyun 	return fpin;
2667*4882a593Smuzhiyun }
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun /*
2670*4882a593Smuzhiyun  * Asynchronous readahead happens when we find the page and PG_readahead,
2671*4882a593Smuzhiyun  * so we want to possibly extend the readahead further.  We return the file that
2672*4882a593Smuzhiyun  * was pinned if we have to drop the mmap_lock in order to do IO.
2673*4882a593Smuzhiyun  */
do_async_mmap_readahead(struct vm_fault * vmf,struct page * page)2674*4882a593Smuzhiyun static struct file *do_async_mmap_readahead(struct vm_fault *vmf,
2675*4882a593Smuzhiyun 					    struct page *page)
2676*4882a593Smuzhiyun {
2677*4882a593Smuzhiyun 	struct file *file = vmf->vma->vm_file;
2678*4882a593Smuzhiyun 	struct file_ra_state *ra = &file->f_ra;
2679*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
2680*4882a593Smuzhiyun 	struct file *fpin = NULL;
2681*4882a593Smuzhiyun 	unsigned int mmap_miss;
2682*4882a593Smuzhiyun 	pgoff_t offset = vmf->pgoff;
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	/* If we don't want any read-ahead, don't bother */
2685*4882a593Smuzhiyun 	if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)
2686*4882a593Smuzhiyun 		return fpin;
2687*4882a593Smuzhiyun 	mmap_miss = READ_ONCE(ra->mmap_miss);
2688*4882a593Smuzhiyun 	if (mmap_miss)
2689*4882a593Smuzhiyun 		WRITE_ONCE(ra->mmap_miss, --mmap_miss);
2690*4882a593Smuzhiyun 	if (PageReadahead(page)) {
2691*4882a593Smuzhiyun 		fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2692*4882a593Smuzhiyun 		page_cache_async_readahead(mapping, ra, file,
2693*4882a593Smuzhiyun 					   page, offset, ra->ra_pages);
2694*4882a593Smuzhiyun 	}
2695*4882a593Smuzhiyun 	return fpin;
2696*4882a593Smuzhiyun }
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun /**
2699*4882a593Smuzhiyun  * filemap_fault - read in file data for page fault handling
2700*4882a593Smuzhiyun  * @vmf:	struct vm_fault containing details of the fault
2701*4882a593Smuzhiyun  *
2702*4882a593Smuzhiyun  * filemap_fault() is invoked via the vma operations vector for a
2703*4882a593Smuzhiyun  * mapped memory region to read in file data during a page fault.
2704*4882a593Smuzhiyun  *
2705*4882a593Smuzhiyun  * The goto's are kind of ugly, but this streamlines the normal case of having
2706*4882a593Smuzhiyun  * it in the page cache, and handles the special cases reasonably without
2707*4882a593Smuzhiyun  * having a lot of duplicated code.
2708*4882a593Smuzhiyun  *
2709*4882a593Smuzhiyun  * If FAULT_FLAG_SPECULATIVE is set, this function runs with elevated vma
2710*4882a593Smuzhiyun  * refcount and with mmap lock not held.
2711*4882a593Smuzhiyun  * Otherwise, vma->vm_mm->mmap_lock must be held on entry.
2712*4882a593Smuzhiyun  *
2713*4882a593Smuzhiyun  * If our return value has VM_FAULT_RETRY set, it's because the mmap_lock
2714*4882a593Smuzhiyun  * may be dropped before doing I/O or by lock_page_maybe_drop_mmap().
2715*4882a593Smuzhiyun  *
2716*4882a593Smuzhiyun  * If our return value does not have VM_FAULT_RETRY set, the mmap_lock
2717*4882a593Smuzhiyun  * has not been released.
2718*4882a593Smuzhiyun  *
2719*4882a593Smuzhiyun  * We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.
2720*4882a593Smuzhiyun  *
2721*4882a593Smuzhiyun  * Return: bitwise-OR of %VM_FAULT_ codes.
2722*4882a593Smuzhiyun  */
filemap_fault(struct vm_fault * vmf)2723*4882a593Smuzhiyun vm_fault_t filemap_fault(struct vm_fault *vmf)
2724*4882a593Smuzhiyun {
2725*4882a593Smuzhiyun 	int error;
2726*4882a593Smuzhiyun 	struct file *file = vmf->vma->vm_file;
2727*4882a593Smuzhiyun 	struct file *fpin = NULL;
2728*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
2729*4882a593Smuzhiyun 	struct file_ra_state *ra = &file->f_ra;
2730*4882a593Smuzhiyun 	struct inode *inode = mapping->host;
2731*4882a593Smuzhiyun 	pgoff_t offset = vmf->pgoff;
2732*4882a593Smuzhiyun 	pgoff_t max_off;
2733*4882a593Smuzhiyun 	struct page *page = NULL;
2734*4882a593Smuzhiyun 	vm_fault_t ret = 0;
2735*4882a593Smuzhiyun 	bool retry = false;
2736*4882a593Smuzhiyun 
2737*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
2738*4882a593Smuzhiyun 		page = find_get_page(mapping, offset);
2739*4882a593Smuzhiyun 		if (unlikely(!page) || unlikely(PageReadahead(page)))
2740*4882a593Smuzhiyun 			return VM_FAULT_RETRY;
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 		if (!trylock_page(page))
2743*4882a593Smuzhiyun 			return VM_FAULT_RETRY;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 		if (unlikely(compound_head(page)->mapping != mapping))
2746*4882a593Smuzhiyun 			goto page_unlock;
2747*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
2748*4882a593Smuzhiyun 		if (unlikely(!PageUptodate(page)))
2749*4882a593Smuzhiyun 			goto page_unlock;
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 		max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2752*4882a593Smuzhiyun 		if (unlikely(offset >= max_off))
2753*4882a593Smuzhiyun 			goto page_unlock;
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 		/*
2756*4882a593Smuzhiyun 		 * Update readahead mmap_miss statistic.
2757*4882a593Smuzhiyun 		 *
2758*4882a593Smuzhiyun 		 * Note that we are not sure if finish_fault() will
2759*4882a593Smuzhiyun 		 * manage to complete the transaction. If it fails,
2760*4882a593Smuzhiyun 		 * we'll come back to filemap_fault() non-speculative
2761*4882a593Smuzhiyun 		 * case which will update mmap_miss a second time.
2762*4882a593Smuzhiyun 		 * This is not ideal, we would prefer to guarantee the
2763*4882a593Smuzhiyun 		 * update will happen exactly once.
2764*4882a593Smuzhiyun 		 */
2765*4882a593Smuzhiyun 		if (!(vmf->vma->vm_flags & VM_RAND_READ) && ra->ra_pages) {
2766*4882a593Smuzhiyun 			unsigned int mmap_miss = READ_ONCE(ra->mmap_miss);
2767*4882a593Smuzhiyun 			if (mmap_miss)
2768*4882a593Smuzhiyun 				WRITE_ONCE(ra->mmap_miss, --mmap_miss);
2769*4882a593Smuzhiyun 		}
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 		vmf->page = page;
2772*4882a593Smuzhiyun 		return VM_FAULT_LOCKED;
2773*4882a593Smuzhiyun page_unlock:
2774*4882a593Smuzhiyun 		unlock_page(page);
2775*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
2776*4882a593Smuzhiyun 	}
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2779*4882a593Smuzhiyun 	if (unlikely(offset >= max_off))
2780*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	trace_android_vh_filemap_fault_get_page(vmf, &page, &retry);
2783*4882a593Smuzhiyun 	if (unlikely(retry))
2784*4882a593Smuzhiyun 		goto out_retry;
2785*4882a593Smuzhiyun 	if (unlikely(page))
2786*4882a593Smuzhiyun 		goto page_ok;
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	/*
2789*4882a593Smuzhiyun 	 * Do we have something in the page cache already?
2790*4882a593Smuzhiyun 	 */
2791*4882a593Smuzhiyun 	page = find_get_page(mapping, offset);
2792*4882a593Smuzhiyun 	if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) {
2793*4882a593Smuzhiyun 		/*
2794*4882a593Smuzhiyun 		 * We found the page, so try async readahead before
2795*4882a593Smuzhiyun 		 * waiting for the lock.
2796*4882a593Smuzhiyun 		 */
2797*4882a593Smuzhiyun 		fpin = do_async_mmap_readahead(vmf, page);
2798*4882a593Smuzhiyun 	} else if (!page) {
2799*4882a593Smuzhiyun 		/* No page in the page cache at all */
2800*4882a593Smuzhiyun 		count_vm_event(PGMAJFAULT);
2801*4882a593Smuzhiyun 		count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
2802*4882a593Smuzhiyun 		ret = VM_FAULT_MAJOR;
2803*4882a593Smuzhiyun 		fpin = do_sync_mmap_readahead(vmf);
2804*4882a593Smuzhiyun retry_find:
2805*4882a593Smuzhiyun 		page = pagecache_get_page(mapping, offset,
2806*4882a593Smuzhiyun 					  FGP_CREAT|FGP_FOR_MMAP,
2807*4882a593Smuzhiyun 					  vmf->gfp_mask);
2808*4882a593Smuzhiyun 		if (!page) {
2809*4882a593Smuzhiyun 			if (fpin)
2810*4882a593Smuzhiyun 				goto out_retry;
2811*4882a593Smuzhiyun 			return VM_FAULT_OOM;
2812*4882a593Smuzhiyun 		}
2813*4882a593Smuzhiyun 	}
2814*4882a593Smuzhiyun 
2815*4882a593Smuzhiyun 	if (!lock_page_maybe_drop_mmap(vmf, page, &fpin))
2816*4882a593Smuzhiyun 		goto out_retry;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	/* Did it get truncated? */
2819*4882a593Smuzhiyun 	if (unlikely(compound_head(page)->mapping != mapping)) {
2820*4882a593Smuzhiyun 		unlock_page(page);
2821*4882a593Smuzhiyun 		put_page(page);
2822*4882a593Smuzhiyun 		goto retry_find;
2823*4882a593Smuzhiyun 	}
2824*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page);
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	/*
2827*4882a593Smuzhiyun 	 * We have a locked page in the page cache, now we need to check
2828*4882a593Smuzhiyun 	 * that it's up-to-date. If not, it is going to be due to an error.
2829*4882a593Smuzhiyun 	 */
2830*4882a593Smuzhiyun 	if (unlikely(!PageUptodate(page)))
2831*4882a593Smuzhiyun 		goto page_not_uptodate;
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 	/*
2834*4882a593Smuzhiyun 	 * We've made it this far and we had to drop our mmap_lock, now is the
2835*4882a593Smuzhiyun 	 * time to return to the upper layer and have it re-find the vma and
2836*4882a593Smuzhiyun 	 * redo the fault.
2837*4882a593Smuzhiyun 	 */
2838*4882a593Smuzhiyun 	if (fpin) {
2839*4882a593Smuzhiyun 		unlock_page(page);
2840*4882a593Smuzhiyun 		goto out_retry;
2841*4882a593Smuzhiyun 	}
2842*4882a593Smuzhiyun 
2843*4882a593Smuzhiyun page_ok:
2844*4882a593Smuzhiyun 	/*
2845*4882a593Smuzhiyun 	 * Found the page and have a reference on it.
2846*4882a593Smuzhiyun 	 * We must recheck i_size under page lock.
2847*4882a593Smuzhiyun 	 */
2848*4882a593Smuzhiyun 	max_off = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
2849*4882a593Smuzhiyun 	if (unlikely(offset >= max_off)) {
2850*4882a593Smuzhiyun 		unlock_page(page);
2851*4882a593Smuzhiyun 		put_page(page);
2852*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2853*4882a593Smuzhiyun 	}
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 	vmf->page = page;
2856*4882a593Smuzhiyun 	return ret | VM_FAULT_LOCKED;
2857*4882a593Smuzhiyun 
2858*4882a593Smuzhiyun page_not_uptodate:
2859*4882a593Smuzhiyun 	/*
2860*4882a593Smuzhiyun 	 * Umm, take care of errors if the page isn't up-to-date.
2861*4882a593Smuzhiyun 	 * Try to re-read it _once_. We do this synchronously,
2862*4882a593Smuzhiyun 	 * because there really aren't any performance issues here
2863*4882a593Smuzhiyun 	 * and we need to check for errors.
2864*4882a593Smuzhiyun 	 */
2865*4882a593Smuzhiyun 	ClearPageError(page);
2866*4882a593Smuzhiyun 	fpin = maybe_unlock_mmap_for_io(vmf, fpin);
2867*4882a593Smuzhiyun 	error = mapping->a_ops->readpage(file, page);
2868*4882a593Smuzhiyun 	if (!error) {
2869*4882a593Smuzhiyun 		wait_on_page_locked(page);
2870*4882a593Smuzhiyun 		if (!PageUptodate(page))
2871*4882a593Smuzhiyun 			error = -EIO;
2872*4882a593Smuzhiyun 	}
2873*4882a593Smuzhiyun 	if (fpin)
2874*4882a593Smuzhiyun 		goto out_retry;
2875*4882a593Smuzhiyun 	put_page(page);
2876*4882a593Smuzhiyun 
2877*4882a593Smuzhiyun 	if (!error || error == AOP_TRUNCATED_PAGE)
2878*4882a593Smuzhiyun 		goto retry_find;
2879*4882a593Smuzhiyun 
2880*4882a593Smuzhiyun 	shrink_readahead_size_eio(ra);
2881*4882a593Smuzhiyun 	return VM_FAULT_SIGBUS;
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun out_retry:
2884*4882a593Smuzhiyun 	/*
2885*4882a593Smuzhiyun 	 * We dropped the mmap_lock, we need to return to the fault handler to
2886*4882a593Smuzhiyun 	 * re-find the vma and come back and find our hopefully still populated
2887*4882a593Smuzhiyun 	 * page.
2888*4882a593Smuzhiyun 	 */
2889*4882a593Smuzhiyun 	if (page) {
2890*4882a593Smuzhiyun 		trace_android_vh_filemap_fault_cache_page(vmf, page);
2891*4882a593Smuzhiyun 		put_page(page);
2892*4882a593Smuzhiyun 	}
2893*4882a593Smuzhiyun 	if (fpin)
2894*4882a593Smuzhiyun 		fput(fpin);
2895*4882a593Smuzhiyun 	return ret | VM_FAULT_RETRY;
2896*4882a593Smuzhiyun }
2897*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_fault);
2898*4882a593Smuzhiyun 
filemap_map_pmd(struct vm_fault * vmf,struct page * page)2899*4882a593Smuzhiyun static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page)
2900*4882a593Smuzhiyun {
2901*4882a593Smuzhiyun 	struct mm_struct *mm = vmf->vma->vm_mm;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 	/* Huge page is mapped? No need to proceed. */
2904*4882a593Smuzhiyun 	if (pmd_trans_huge(*vmf->pmd)) {
2905*4882a593Smuzhiyun 		unlock_page(page);
2906*4882a593Smuzhiyun 		put_page(page);
2907*4882a593Smuzhiyun 		return true;
2908*4882a593Smuzhiyun 	}
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 	if (pmd_none(*vmf->pmd) && PageTransHuge(page)) {
2911*4882a593Smuzhiyun 	    vm_fault_t ret = do_set_pmd(vmf, page);
2912*4882a593Smuzhiyun 	    if (!ret) {
2913*4882a593Smuzhiyun 		    /* The page is mapped successfully, reference consumed. */
2914*4882a593Smuzhiyun 		    unlock_page(page);
2915*4882a593Smuzhiyun 		    return true;
2916*4882a593Smuzhiyun 	    }
2917*4882a593Smuzhiyun 	}
2918*4882a593Smuzhiyun 
2919*4882a593Smuzhiyun 	if (pmd_none(*vmf->pmd)) {
2920*4882a593Smuzhiyun 		vmf->ptl = pmd_lock(mm, vmf->pmd);
2921*4882a593Smuzhiyun 		if (likely(pmd_none(*vmf->pmd))) {
2922*4882a593Smuzhiyun 			mm_inc_nr_ptes(mm);
2923*4882a593Smuzhiyun 			pmd_populate(mm, vmf->pmd, vmf->prealloc_pte);
2924*4882a593Smuzhiyun 			vmf->prealloc_pte = NULL;
2925*4882a593Smuzhiyun 		}
2926*4882a593Smuzhiyun 		spin_unlock(vmf->ptl);
2927*4882a593Smuzhiyun 	}
2928*4882a593Smuzhiyun 
2929*4882a593Smuzhiyun 	/* See comment in handle_pte_fault() */
2930*4882a593Smuzhiyun 	if (pmd_devmap_trans_unstable(vmf->pmd)) {
2931*4882a593Smuzhiyun 		unlock_page(page);
2932*4882a593Smuzhiyun 		put_page(page);
2933*4882a593Smuzhiyun 		return true;
2934*4882a593Smuzhiyun 	}
2935*4882a593Smuzhiyun 
2936*4882a593Smuzhiyun 	return false;
2937*4882a593Smuzhiyun }
2938*4882a593Smuzhiyun 
next_uptodate_page(struct page * page,struct address_space * mapping,struct xa_state * xas,pgoff_t end_pgoff)2939*4882a593Smuzhiyun static struct page *next_uptodate_page(struct page *page,
2940*4882a593Smuzhiyun 				       struct address_space *mapping,
2941*4882a593Smuzhiyun 				       struct xa_state *xas, pgoff_t end_pgoff)
2942*4882a593Smuzhiyun {
2943*4882a593Smuzhiyun 	unsigned long max_idx;
2944*4882a593Smuzhiyun 
2945*4882a593Smuzhiyun 	do {
2946*4882a593Smuzhiyun 		if (!page)
2947*4882a593Smuzhiyun 			return NULL;
2948*4882a593Smuzhiyun 		if (xas_retry(xas, page))
2949*4882a593Smuzhiyun 			continue;
2950*4882a593Smuzhiyun 		if (xa_is_value(page))
2951*4882a593Smuzhiyun 			continue;
2952*4882a593Smuzhiyun 		if (PageLocked(page))
2953*4882a593Smuzhiyun 			continue;
2954*4882a593Smuzhiyun 		if (!page_cache_get_speculative(page))
2955*4882a593Smuzhiyun 			continue;
2956*4882a593Smuzhiyun 		/* Has the page moved or been split? */
2957*4882a593Smuzhiyun 		if (unlikely(page != xas_reload(xas)))
2958*4882a593Smuzhiyun 			goto skip;
2959*4882a593Smuzhiyun 		if (!PageUptodate(page) || PageReadahead(page))
2960*4882a593Smuzhiyun 			goto skip;
2961*4882a593Smuzhiyun 		if (PageHWPoison(page))
2962*4882a593Smuzhiyun 			goto skip;
2963*4882a593Smuzhiyun 		if (!trylock_page(page))
2964*4882a593Smuzhiyun 			goto skip;
2965*4882a593Smuzhiyun 		if (page->mapping != mapping)
2966*4882a593Smuzhiyun 			goto unlock;
2967*4882a593Smuzhiyun 		if (!PageUptodate(page))
2968*4882a593Smuzhiyun 			goto unlock;
2969*4882a593Smuzhiyun 		max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);
2970*4882a593Smuzhiyun 		if (xas->xa_index >= max_idx)
2971*4882a593Smuzhiyun 			goto unlock;
2972*4882a593Smuzhiyun 		return page;
2973*4882a593Smuzhiyun unlock:
2974*4882a593Smuzhiyun 		unlock_page(page);
2975*4882a593Smuzhiyun skip:
2976*4882a593Smuzhiyun 		put_page(page);
2977*4882a593Smuzhiyun 	} while ((page = xas_next_entry(xas, end_pgoff)) != NULL);
2978*4882a593Smuzhiyun 
2979*4882a593Smuzhiyun 	return NULL;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun 
first_map_page(struct address_space * mapping,struct xa_state * xas,pgoff_t end_pgoff)2982*4882a593Smuzhiyun static inline struct page *first_map_page(struct address_space *mapping,
2983*4882a593Smuzhiyun 					  struct xa_state *xas,
2984*4882a593Smuzhiyun 					  pgoff_t end_pgoff)
2985*4882a593Smuzhiyun {
2986*4882a593Smuzhiyun 	return next_uptodate_page(xas_find(xas, end_pgoff),
2987*4882a593Smuzhiyun 				  mapping, xas, end_pgoff);
2988*4882a593Smuzhiyun }
2989*4882a593Smuzhiyun 
next_map_page(struct address_space * mapping,struct xa_state * xas,pgoff_t end_pgoff)2990*4882a593Smuzhiyun static inline struct page *next_map_page(struct address_space *mapping,
2991*4882a593Smuzhiyun 					 struct xa_state *xas,
2992*4882a593Smuzhiyun 					 pgoff_t end_pgoff)
2993*4882a593Smuzhiyun {
2994*4882a593Smuzhiyun 	return next_uptodate_page(xas_next_entry(xas, end_pgoff),
2995*4882a593Smuzhiyun 				  mapping, xas, end_pgoff);
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
filemap_allow_speculation(void)2999*4882a593Smuzhiyun bool filemap_allow_speculation(void)
3000*4882a593Smuzhiyun {
3001*4882a593Smuzhiyun 	return true;
3002*4882a593Smuzhiyun }
3003*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(filemap_allow_speculation);
3004*4882a593Smuzhiyun #endif
3005*4882a593Smuzhiyun 
filemap_map_pages(struct vm_fault * vmf,pgoff_t start_pgoff,pgoff_t end_pgoff)3006*4882a593Smuzhiyun vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3007*4882a593Smuzhiyun 			     pgoff_t start_pgoff, pgoff_t end_pgoff)
3008*4882a593Smuzhiyun {
3009*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3010*4882a593Smuzhiyun 	struct file *file = vma->vm_file;
3011*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
3012*4882a593Smuzhiyun 	pgoff_t last_pgoff = start_pgoff;
3013*4882a593Smuzhiyun 	unsigned long addr;
3014*4882a593Smuzhiyun 	XA_STATE(xas, &mapping->i_pages, start_pgoff);
3015*4882a593Smuzhiyun 	struct page *head, *page;
3016*4882a593Smuzhiyun 	unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss);
3017*4882a593Smuzhiyun 	vm_fault_t ret = (vmf->flags & FAULT_FLAG_SPECULATIVE) ?
3018*4882a593Smuzhiyun 		VM_FAULT_RETRY : 0;
3019*4882a593Smuzhiyun 
3020*4882a593Smuzhiyun 	rcu_read_lock();
3021*4882a593Smuzhiyun 	head = first_map_page(mapping, &xas, end_pgoff);
3022*4882a593Smuzhiyun 	if (!head)
3023*4882a593Smuzhiyun 		goto out;
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
3026*4882a593Smuzhiyun 	    filemap_map_pmd(vmf, head)) {
3027*4882a593Smuzhiyun 		ret = VM_FAULT_NOPAGE;
3028*4882a593Smuzhiyun 		goto out;
3029*4882a593Smuzhiyun 	}
3030*4882a593Smuzhiyun 
3031*4882a593Smuzhiyun 	addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3032*4882a593Smuzhiyun 	if (!pte_map_lock_addr(vmf, addr)) {
3033*4882a593Smuzhiyun 		unlock_page(head);
3034*4882a593Smuzhiyun 		put_page(head);
3035*4882a593Smuzhiyun 		goto out;
3036*4882a593Smuzhiyun 	}
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 	do {
3039*4882a593Smuzhiyun 		page = find_subpage(head, xas.xa_index);
3040*4882a593Smuzhiyun 		if (PageHWPoison(page))
3041*4882a593Smuzhiyun 			goto unlock;
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 		if (mmap_miss > 0)
3044*4882a593Smuzhiyun 			mmap_miss--;
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 		addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;
3047*4882a593Smuzhiyun 		vmf->pte += xas.xa_index - last_pgoff;
3048*4882a593Smuzhiyun 		last_pgoff = xas.xa_index;
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 		if (!pte_none(*vmf->pte))
3051*4882a593Smuzhiyun 			goto unlock;
3052*4882a593Smuzhiyun 
3053*4882a593Smuzhiyun 		/* We're about to handle the fault */
3054*4882a593Smuzhiyun 		if (vmf->address == addr)
3055*4882a593Smuzhiyun 			ret = VM_FAULT_NOPAGE;
3056*4882a593Smuzhiyun 
3057*4882a593Smuzhiyun 		do_set_pte(vmf, page, addr);
3058*4882a593Smuzhiyun 		/* no need to invalidate: a not-present page won't be cached */
3059*4882a593Smuzhiyun 		update_mmu_cache(vma, addr, vmf->pte);
3060*4882a593Smuzhiyun 		unlock_page(head);
3061*4882a593Smuzhiyun 		continue;
3062*4882a593Smuzhiyun unlock:
3063*4882a593Smuzhiyun 		unlock_page(head);
3064*4882a593Smuzhiyun 		put_page(head);
3065*4882a593Smuzhiyun 	} while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL);
3066*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3067*4882a593Smuzhiyun out:
3068*4882a593Smuzhiyun 	rcu_read_unlock();
3069*4882a593Smuzhiyun 	WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss);
3070*4882a593Smuzhiyun 	return ret;
3071*4882a593Smuzhiyun }
3072*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_map_pages);
3073*4882a593Smuzhiyun 
filemap_page_mkwrite(struct vm_fault * vmf)3074*4882a593Smuzhiyun vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3075*4882a593Smuzhiyun {
3076*4882a593Smuzhiyun 	struct page *page = vmf->page;
3077*4882a593Smuzhiyun 	struct inode *inode = file_inode(vmf->vma->vm_file);
3078*4882a593Smuzhiyun 	vm_fault_t ret = VM_FAULT_LOCKED;
3079*4882a593Smuzhiyun 
3080*4882a593Smuzhiyun 	sb_start_pagefault(inode->i_sb);
3081*4882a593Smuzhiyun 	file_update_time(vmf->vma->vm_file);
3082*4882a593Smuzhiyun 	lock_page(page);
3083*4882a593Smuzhiyun 	if (page->mapping != inode->i_mapping) {
3084*4882a593Smuzhiyun 		unlock_page(page);
3085*4882a593Smuzhiyun 		ret = VM_FAULT_NOPAGE;
3086*4882a593Smuzhiyun 		goto out;
3087*4882a593Smuzhiyun 	}
3088*4882a593Smuzhiyun 	/*
3089*4882a593Smuzhiyun 	 * We mark the page dirty already here so that when freeze is in
3090*4882a593Smuzhiyun 	 * progress, we are guaranteed that writeback during freezing will
3091*4882a593Smuzhiyun 	 * see the dirty page and writeprotect it again.
3092*4882a593Smuzhiyun 	 */
3093*4882a593Smuzhiyun 	set_page_dirty(page);
3094*4882a593Smuzhiyun 	wait_for_stable_page(page);
3095*4882a593Smuzhiyun out:
3096*4882a593Smuzhiyun 	sb_end_pagefault(inode->i_sb);
3097*4882a593Smuzhiyun 	return ret;
3098*4882a593Smuzhiyun }
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun const struct vm_operations_struct generic_file_vm_ops = {
3101*4882a593Smuzhiyun 	.fault		= filemap_fault,
3102*4882a593Smuzhiyun 	.map_pages	= filemap_map_pages,
3103*4882a593Smuzhiyun 	.page_mkwrite	= filemap_page_mkwrite,
3104*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
3105*4882a593Smuzhiyun 	.allow_speculation = filemap_allow_speculation,
3106*4882a593Smuzhiyun #endif
3107*4882a593Smuzhiyun };
3108*4882a593Smuzhiyun 
3109*4882a593Smuzhiyun /* This is used for a general mmap of a disk file */
3110*4882a593Smuzhiyun 
generic_file_mmap(struct file * file,struct vm_area_struct * vma)3111*4882a593Smuzhiyun int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
3112*4882a593Smuzhiyun {
3113*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	if (!mapping->a_ops->readpage)
3116*4882a593Smuzhiyun 		return -ENOEXEC;
3117*4882a593Smuzhiyun 	file_accessed(file);
3118*4882a593Smuzhiyun 	vma->vm_ops = &generic_file_vm_ops;
3119*4882a593Smuzhiyun 	return 0;
3120*4882a593Smuzhiyun }
3121*4882a593Smuzhiyun 
3122*4882a593Smuzhiyun /*
3123*4882a593Smuzhiyun  * This is for filesystems which do not implement ->writepage.
3124*4882a593Smuzhiyun  */
generic_file_readonly_mmap(struct file * file,struct vm_area_struct * vma)3125*4882a593Smuzhiyun int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)
3126*4882a593Smuzhiyun {
3127*4882a593Smuzhiyun 	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
3128*4882a593Smuzhiyun 		return -EINVAL;
3129*4882a593Smuzhiyun 	return generic_file_mmap(file, vma);
3130*4882a593Smuzhiyun }
3131*4882a593Smuzhiyun #else
filemap_page_mkwrite(struct vm_fault * vmf)3132*4882a593Smuzhiyun vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)
3133*4882a593Smuzhiyun {
3134*4882a593Smuzhiyun 	return VM_FAULT_SIGBUS;
3135*4882a593Smuzhiyun }
generic_file_mmap(struct file * file,struct vm_area_struct * vma)3136*4882a593Smuzhiyun int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
3137*4882a593Smuzhiyun {
3138*4882a593Smuzhiyun 	return -ENOSYS;
3139*4882a593Smuzhiyun }
generic_file_readonly_mmap(struct file * file,struct vm_area_struct * vma)3140*4882a593Smuzhiyun int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma)
3141*4882a593Smuzhiyun {
3142*4882a593Smuzhiyun 	return -ENOSYS;
3143*4882a593Smuzhiyun }
3144*4882a593Smuzhiyun #endif /* CONFIG_MMU */
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun EXPORT_SYMBOL(filemap_page_mkwrite);
3147*4882a593Smuzhiyun EXPORT_SYMBOL(generic_file_mmap);
3148*4882a593Smuzhiyun EXPORT_SYMBOL(generic_file_readonly_mmap);
3149*4882a593Smuzhiyun 
wait_on_page_read(struct page * page)3150*4882a593Smuzhiyun static struct page *wait_on_page_read(struct page *page)
3151*4882a593Smuzhiyun {
3152*4882a593Smuzhiyun 	if (!IS_ERR(page)) {
3153*4882a593Smuzhiyun 		wait_on_page_locked(page);
3154*4882a593Smuzhiyun 		if (!PageUptodate(page)) {
3155*4882a593Smuzhiyun 			put_page(page);
3156*4882a593Smuzhiyun 			page = ERR_PTR(-EIO);
3157*4882a593Smuzhiyun 		}
3158*4882a593Smuzhiyun 	}
3159*4882a593Smuzhiyun 	return page;
3160*4882a593Smuzhiyun }
3161*4882a593Smuzhiyun 
do_read_cache_page(struct address_space * mapping,pgoff_t index,int (* filler)(void *,struct page *),void * data,gfp_t gfp)3162*4882a593Smuzhiyun static struct page *do_read_cache_page(struct address_space *mapping,
3163*4882a593Smuzhiyun 				pgoff_t index,
3164*4882a593Smuzhiyun 				int (*filler)(void *, struct page *),
3165*4882a593Smuzhiyun 				void *data,
3166*4882a593Smuzhiyun 				gfp_t gfp)
3167*4882a593Smuzhiyun {
3168*4882a593Smuzhiyun 	struct page *page;
3169*4882a593Smuzhiyun 	int err;
3170*4882a593Smuzhiyun repeat:
3171*4882a593Smuzhiyun 	page = find_get_page(mapping, index);
3172*4882a593Smuzhiyun 	if (!page) {
3173*4882a593Smuzhiyun 		page = __page_cache_alloc(gfp);
3174*4882a593Smuzhiyun 		if (!page)
3175*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
3176*4882a593Smuzhiyun 		err = add_to_page_cache_lru(page, mapping, index, gfp);
3177*4882a593Smuzhiyun 		if (unlikely(err)) {
3178*4882a593Smuzhiyun 			put_page(page);
3179*4882a593Smuzhiyun 			if (err == -EEXIST)
3180*4882a593Smuzhiyun 				goto repeat;
3181*4882a593Smuzhiyun 			/* Presumably ENOMEM for xarray node */
3182*4882a593Smuzhiyun 			return ERR_PTR(err);
3183*4882a593Smuzhiyun 		}
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun filler:
3186*4882a593Smuzhiyun 		if (filler)
3187*4882a593Smuzhiyun 			err = filler(data, page);
3188*4882a593Smuzhiyun 		else
3189*4882a593Smuzhiyun 			err = mapping->a_ops->readpage(data, page);
3190*4882a593Smuzhiyun 
3191*4882a593Smuzhiyun 		if (err < 0) {
3192*4882a593Smuzhiyun 			put_page(page);
3193*4882a593Smuzhiyun 			return ERR_PTR(err);
3194*4882a593Smuzhiyun 		}
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 		page = wait_on_page_read(page);
3197*4882a593Smuzhiyun 		if (IS_ERR(page))
3198*4882a593Smuzhiyun 			return page;
3199*4882a593Smuzhiyun 		goto out;
3200*4882a593Smuzhiyun 	}
3201*4882a593Smuzhiyun 	if (PageUptodate(page))
3202*4882a593Smuzhiyun 		goto out;
3203*4882a593Smuzhiyun 
3204*4882a593Smuzhiyun 	/*
3205*4882a593Smuzhiyun 	 * Page is not up to date and may be locked due to one of the following
3206*4882a593Smuzhiyun 	 * case a: Page is being filled and the page lock is held
3207*4882a593Smuzhiyun 	 * case b: Read/write error clearing the page uptodate status
3208*4882a593Smuzhiyun 	 * case c: Truncation in progress (page locked)
3209*4882a593Smuzhiyun 	 * case d: Reclaim in progress
3210*4882a593Smuzhiyun 	 *
3211*4882a593Smuzhiyun 	 * Case a, the page will be up to date when the page is unlocked.
3212*4882a593Smuzhiyun 	 *    There is no need to serialise on the page lock here as the page
3213*4882a593Smuzhiyun 	 *    is pinned so the lock gives no additional protection. Even if the
3214*4882a593Smuzhiyun 	 *    page is truncated, the data is still valid if PageUptodate as
3215*4882a593Smuzhiyun 	 *    it's a race vs truncate race.
3216*4882a593Smuzhiyun 	 * Case b, the page will not be up to date
3217*4882a593Smuzhiyun 	 * Case c, the page may be truncated but in itself, the data may still
3218*4882a593Smuzhiyun 	 *    be valid after IO completes as it's a read vs truncate race. The
3219*4882a593Smuzhiyun 	 *    operation must restart if the page is not uptodate on unlock but
3220*4882a593Smuzhiyun 	 *    otherwise serialising on page lock to stabilise the mapping gives
3221*4882a593Smuzhiyun 	 *    no additional guarantees to the caller as the page lock is
3222*4882a593Smuzhiyun 	 *    released before return.
3223*4882a593Smuzhiyun 	 * Case d, similar to truncation. If reclaim holds the page lock, it
3224*4882a593Smuzhiyun 	 *    will be a race with remove_mapping that determines if the mapping
3225*4882a593Smuzhiyun 	 *    is valid on unlock but otherwise the data is valid and there is
3226*4882a593Smuzhiyun 	 *    no need to serialise with page lock.
3227*4882a593Smuzhiyun 	 *
3228*4882a593Smuzhiyun 	 * As the page lock gives no additional guarantee, we optimistically
3229*4882a593Smuzhiyun 	 * wait on the page to be unlocked and check if it's up to date and
3230*4882a593Smuzhiyun 	 * use the page if it is. Otherwise, the page lock is required to
3231*4882a593Smuzhiyun 	 * distinguish between the different cases. The motivation is that we
3232*4882a593Smuzhiyun 	 * avoid spurious serialisations and wakeups when multiple processes
3233*4882a593Smuzhiyun 	 * wait on the same page for IO to complete.
3234*4882a593Smuzhiyun 	 */
3235*4882a593Smuzhiyun 	wait_on_page_locked(page);
3236*4882a593Smuzhiyun 	if (PageUptodate(page))
3237*4882a593Smuzhiyun 		goto out;
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	/* Distinguish between all the cases under the safety of the lock */
3240*4882a593Smuzhiyun 	lock_page(page);
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	/* Case c or d, restart the operation */
3243*4882a593Smuzhiyun 	if (!page->mapping) {
3244*4882a593Smuzhiyun 		unlock_page(page);
3245*4882a593Smuzhiyun 		put_page(page);
3246*4882a593Smuzhiyun 		goto repeat;
3247*4882a593Smuzhiyun 	}
3248*4882a593Smuzhiyun 
3249*4882a593Smuzhiyun 	/* Someone else locked and filled the page in a very small window */
3250*4882a593Smuzhiyun 	if (PageUptodate(page)) {
3251*4882a593Smuzhiyun 		unlock_page(page);
3252*4882a593Smuzhiyun 		goto out;
3253*4882a593Smuzhiyun 	}
3254*4882a593Smuzhiyun 
3255*4882a593Smuzhiyun 	/*
3256*4882a593Smuzhiyun 	 * A previous I/O error may have been due to temporary
3257*4882a593Smuzhiyun 	 * failures.
3258*4882a593Smuzhiyun 	 * Clear page error before actual read, PG_error will be
3259*4882a593Smuzhiyun 	 * set again if read page fails.
3260*4882a593Smuzhiyun 	 */
3261*4882a593Smuzhiyun 	ClearPageError(page);
3262*4882a593Smuzhiyun 	goto filler;
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun out:
3265*4882a593Smuzhiyun 	mark_page_accessed(page);
3266*4882a593Smuzhiyun 	return page;
3267*4882a593Smuzhiyun }
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun /**
3270*4882a593Smuzhiyun  * read_cache_page - read into page cache, fill it if needed
3271*4882a593Smuzhiyun  * @mapping:	the page's address_space
3272*4882a593Smuzhiyun  * @index:	the page index
3273*4882a593Smuzhiyun  * @filler:	function to perform the read
3274*4882a593Smuzhiyun  * @data:	first arg to filler(data, page) function, often left as NULL
3275*4882a593Smuzhiyun  *
3276*4882a593Smuzhiyun  * Read into the page cache. If a page already exists, and PageUptodate() is
3277*4882a593Smuzhiyun  * not set, try to fill the page and wait for it to become unlocked.
3278*4882a593Smuzhiyun  *
3279*4882a593Smuzhiyun  * If the page does not get brought uptodate, return -EIO.
3280*4882a593Smuzhiyun  *
3281*4882a593Smuzhiyun  * Return: up to date page on success, ERR_PTR() on failure.
3282*4882a593Smuzhiyun  */
read_cache_page(struct address_space * mapping,pgoff_t index,int (* filler)(void *,struct page *),void * data)3283*4882a593Smuzhiyun struct page *read_cache_page(struct address_space *mapping,
3284*4882a593Smuzhiyun 				pgoff_t index,
3285*4882a593Smuzhiyun 				int (*filler)(void *, struct page *),
3286*4882a593Smuzhiyun 				void *data)
3287*4882a593Smuzhiyun {
3288*4882a593Smuzhiyun 	return do_read_cache_page(mapping, index, filler, data,
3289*4882a593Smuzhiyun 			mapping_gfp_mask(mapping));
3290*4882a593Smuzhiyun }
3291*4882a593Smuzhiyun EXPORT_SYMBOL(read_cache_page);
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun /**
3294*4882a593Smuzhiyun  * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3295*4882a593Smuzhiyun  * @mapping:	the page's address_space
3296*4882a593Smuzhiyun  * @index:	the page index
3297*4882a593Smuzhiyun  * @gfp:	the page allocator flags to use if allocating
3298*4882a593Smuzhiyun  *
3299*4882a593Smuzhiyun  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3300*4882a593Smuzhiyun  * any new page allocations done using the specified allocation flags.
3301*4882a593Smuzhiyun  *
3302*4882a593Smuzhiyun  * If the page does not get brought uptodate, return -EIO.
3303*4882a593Smuzhiyun  *
3304*4882a593Smuzhiyun  * Return: up to date page on success, ERR_PTR() on failure.
3305*4882a593Smuzhiyun  */
read_cache_page_gfp(struct address_space * mapping,pgoff_t index,gfp_t gfp)3306*4882a593Smuzhiyun struct page *read_cache_page_gfp(struct address_space *mapping,
3307*4882a593Smuzhiyun 				pgoff_t index,
3308*4882a593Smuzhiyun 				gfp_t gfp)
3309*4882a593Smuzhiyun {
3310*4882a593Smuzhiyun 	return do_read_cache_page(mapping, index, NULL, NULL, gfp);
3311*4882a593Smuzhiyun }
3312*4882a593Smuzhiyun EXPORT_SYMBOL(read_cache_page_gfp);
3313*4882a593Smuzhiyun 
pagecache_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned flags,struct page ** pagep,void ** fsdata)3314*4882a593Smuzhiyun int pagecache_write_begin(struct file *file, struct address_space *mapping,
3315*4882a593Smuzhiyun 				loff_t pos, unsigned len, unsigned flags,
3316*4882a593Smuzhiyun 				struct page **pagep, void **fsdata)
3317*4882a593Smuzhiyun {
3318*4882a593Smuzhiyun 	const struct address_space_operations *aops = mapping->a_ops;
3319*4882a593Smuzhiyun 
3320*4882a593Smuzhiyun 	return aops->write_begin(file, mapping, pos, len, flags,
3321*4882a593Smuzhiyun 							pagep, fsdata);
3322*4882a593Smuzhiyun }
3323*4882a593Smuzhiyun EXPORT_SYMBOL(pagecache_write_begin);
3324*4882a593Smuzhiyun 
pagecache_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct page * page,void * fsdata)3325*4882a593Smuzhiyun int pagecache_write_end(struct file *file, struct address_space *mapping,
3326*4882a593Smuzhiyun 				loff_t pos, unsigned len, unsigned copied,
3327*4882a593Smuzhiyun 				struct page *page, void *fsdata)
3328*4882a593Smuzhiyun {
3329*4882a593Smuzhiyun 	const struct address_space_operations *aops = mapping->a_ops;
3330*4882a593Smuzhiyun 
3331*4882a593Smuzhiyun 	return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
3332*4882a593Smuzhiyun }
3333*4882a593Smuzhiyun EXPORT_SYMBOL(pagecache_write_end);
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun /*
3336*4882a593Smuzhiyun  * Warn about a page cache invalidation failure during a direct I/O write.
3337*4882a593Smuzhiyun  */
dio_warn_stale_pagecache(struct file * filp)3338*4882a593Smuzhiyun void dio_warn_stale_pagecache(struct file *filp)
3339*4882a593Smuzhiyun {
3340*4882a593Smuzhiyun 	static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);
3341*4882a593Smuzhiyun 	char pathname[128];
3342*4882a593Smuzhiyun 	struct inode *inode = file_inode(filp);
3343*4882a593Smuzhiyun 	char *path;
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun 	errseq_set(&inode->i_mapping->wb_err, -EIO);
3346*4882a593Smuzhiyun 	if (__ratelimit(&_rs)) {
3347*4882a593Smuzhiyun 		path = file_path(filp, pathname, sizeof(pathname));
3348*4882a593Smuzhiyun 		if (IS_ERR(path))
3349*4882a593Smuzhiyun 			path = "(unknown)";
3350*4882a593Smuzhiyun 		pr_crit("Page cache invalidation failure on direct I/O.  Possible data corruption due to collision with buffered I/O!\n");
3351*4882a593Smuzhiyun 		pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,
3352*4882a593Smuzhiyun 			current->comm);
3353*4882a593Smuzhiyun 	}
3354*4882a593Smuzhiyun }
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun ssize_t
generic_file_direct_write(struct kiocb * iocb,struct iov_iter * from)3357*4882a593Smuzhiyun generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)
3358*4882a593Smuzhiyun {
3359*4882a593Smuzhiyun 	struct file	*file = iocb->ki_filp;
3360*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
3361*4882a593Smuzhiyun 	struct inode	*inode = mapping->host;
3362*4882a593Smuzhiyun 	loff_t		pos = iocb->ki_pos;
3363*4882a593Smuzhiyun 	ssize_t		written;
3364*4882a593Smuzhiyun 	size_t		write_len;
3365*4882a593Smuzhiyun 	pgoff_t		end;
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 	write_len = iov_iter_count(from);
3368*4882a593Smuzhiyun 	end = (pos + write_len - 1) >> PAGE_SHIFT;
3369*4882a593Smuzhiyun 
3370*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_NOWAIT) {
3371*4882a593Smuzhiyun 		/* If there are pages to writeback, return */
3372*4882a593Smuzhiyun 		if (filemap_range_has_page(inode->i_mapping, pos,
3373*4882a593Smuzhiyun 					   pos + write_len - 1))
3374*4882a593Smuzhiyun 			return -EAGAIN;
3375*4882a593Smuzhiyun 	} else {
3376*4882a593Smuzhiyun 		written = filemap_write_and_wait_range(mapping, pos,
3377*4882a593Smuzhiyun 							pos + write_len - 1);
3378*4882a593Smuzhiyun 		if (written)
3379*4882a593Smuzhiyun 			goto out;
3380*4882a593Smuzhiyun 	}
3381*4882a593Smuzhiyun 
3382*4882a593Smuzhiyun 	/*
3383*4882a593Smuzhiyun 	 * After a write we want buffered reads to be sure to go to disk to get
3384*4882a593Smuzhiyun 	 * the new data.  We invalidate clean cached page from the region we're
3385*4882a593Smuzhiyun 	 * about to write.  We do this *before* the write so that we can return
3386*4882a593Smuzhiyun 	 * without clobbering -EIOCBQUEUED from ->direct_IO().
3387*4882a593Smuzhiyun 	 */
3388*4882a593Smuzhiyun 	written = invalidate_inode_pages2_range(mapping,
3389*4882a593Smuzhiyun 					pos >> PAGE_SHIFT, end);
3390*4882a593Smuzhiyun 	/*
3391*4882a593Smuzhiyun 	 * If a page can not be invalidated, return 0 to fall back
3392*4882a593Smuzhiyun 	 * to buffered write.
3393*4882a593Smuzhiyun 	 */
3394*4882a593Smuzhiyun 	if (written) {
3395*4882a593Smuzhiyun 		if (written == -EBUSY)
3396*4882a593Smuzhiyun 			return 0;
3397*4882a593Smuzhiyun 		goto out;
3398*4882a593Smuzhiyun 	}
3399*4882a593Smuzhiyun 
3400*4882a593Smuzhiyun 	written = mapping->a_ops->direct_IO(iocb, from);
3401*4882a593Smuzhiyun 
3402*4882a593Smuzhiyun 	/*
3403*4882a593Smuzhiyun 	 * Finally, try again to invalidate clean pages which might have been
3404*4882a593Smuzhiyun 	 * cached by non-direct readahead, or faulted in by get_user_pages()
3405*4882a593Smuzhiyun 	 * if the source of the write was an mmap'ed region of the file
3406*4882a593Smuzhiyun 	 * we're writing.  Either one is a pretty crazy thing to do,
3407*4882a593Smuzhiyun 	 * so we don't support it 100%.  If this invalidation
3408*4882a593Smuzhiyun 	 * fails, tough, the write still worked...
3409*4882a593Smuzhiyun 	 *
3410*4882a593Smuzhiyun 	 * Most of the time we do not need this since dio_complete() will do
3411*4882a593Smuzhiyun 	 * the invalidation for us. However there are some file systems that
3412*4882a593Smuzhiyun 	 * do not end up with dio_complete() being called, so let's not break
3413*4882a593Smuzhiyun 	 * them by removing it completely.
3414*4882a593Smuzhiyun 	 *
3415*4882a593Smuzhiyun 	 * Noticeable example is a blkdev_direct_IO().
3416*4882a593Smuzhiyun 	 *
3417*4882a593Smuzhiyun 	 * Skip invalidation for async writes or if mapping has no pages.
3418*4882a593Smuzhiyun 	 */
3419*4882a593Smuzhiyun 	if (written > 0 && mapping->nrpages &&
3420*4882a593Smuzhiyun 	    invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end))
3421*4882a593Smuzhiyun 		dio_warn_stale_pagecache(file);
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun 	if (written > 0) {
3424*4882a593Smuzhiyun 		pos += written;
3425*4882a593Smuzhiyun 		write_len -= written;
3426*4882a593Smuzhiyun 		if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
3427*4882a593Smuzhiyun 			i_size_write(inode, pos);
3428*4882a593Smuzhiyun 			mark_inode_dirty(inode);
3429*4882a593Smuzhiyun 		}
3430*4882a593Smuzhiyun 		iocb->ki_pos = pos;
3431*4882a593Smuzhiyun 	}
3432*4882a593Smuzhiyun 	iov_iter_revert(from, write_len - iov_iter_count(from));
3433*4882a593Smuzhiyun out:
3434*4882a593Smuzhiyun 	return written;
3435*4882a593Smuzhiyun }
3436*4882a593Smuzhiyun EXPORT_SYMBOL(generic_file_direct_write);
3437*4882a593Smuzhiyun 
3438*4882a593Smuzhiyun /*
3439*4882a593Smuzhiyun  * Find or create a page at the given pagecache position. Return the locked
3440*4882a593Smuzhiyun  * page. This function is specifically for buffered writes.
3441*4882a593Smuzhiyun  */
grab_cache_page_write_begin(struct address_space * mapping,pgoff_t index,unsigned flags)3442*4882a593Smuzhiyun struct page *grab_cache_page_write_begin(struct address_space *mapping,
3443*4882a593Smuzhiyun 					pgoff_t index, unsigned flags)
3444*4882a593Smuzhiyun {
3445*4882a593Smuzhiyun 	struct page *page;
3446*4882a593Smuzhiyun 	int fgp_flags = FGP_LOCK|FGP_WRITE|FGP_CREAT;
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun 	if (flags & AOP_FLAG_NOFS)
3449*4882a593Smuzhiyun 		fgp_flags |= FGP_NOFS;
3450*4882a593Smuzhiyun 
3451*4882a593Smuzhiyun 	page = pagecache_get_page(mapping, index, fgp_flags,
3452*4882a593Smuzhiyun 			mapping_gfp_mask(mapping));
3453*4882a593Smuzhiyun 	if (page)
3454*4882a593Smuzhiyun 		wait_for_stable_page(page);
3455*4882a593Smuzhiyun 
3456*4882a593Smuzhiyun 	return page;
3457*4882a593Smuzhiyun }
3458*4882a593Smuzhiyun EXPORT_SYMBOL(grab_cache_page_write_begin);
3459*4882a593Smuzhiyun 
generic_perform_write(struct file * file,struct iov_iter * i,loff_t pos)3460*4882a593Smuzhiyun ssize_t generic_perform_write(struct file *file,
3461*4882a593Smuzhiyun 				struct iov_iter *i, loff_t pos)
3462*4882a593Smuzhiyun {
3463*4882a593Smuzhiyun 	struct address_space *mapping = file->f_mapping;
3464*4882a593Smuzhiyun 	const struct address_space_operations *a_ops = mapping->a_ops;
3465*4882a593Smuzhiyun 	long status = 0;
3466*4882a593Smuzhiyun 	ssize_t written = 0;
3467*4882a593Smuzhiyun 	unsigned int flags = 0;
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun 	do {
3470*4882a593Smuzhiyun 		struct page *page;
3471*4882a593Smuzhiyun 		unsigned long offset;	/* Offset into pagecache page */
3472*4882a593Smuzhiyun 		unsigned long bytes;	/* Bytes to write to page */
3473*4882a593Smuzhiyun 		size_t copied;		/* Bytes copied from user */
3474*4882a593Smuzhiyun 		void *fsdata = NULL;
3475*4882a593Smuzhiyun 
3476*4882a593Smuzhiyun 		offset = (pos & (PAGE_SIZE - 1));
3477*4882a593Smuzhiyun 		bytes = min_t(unsigned long, PAGE_SIZE - offset,
3478*4882a593Smuzhiyun 						iov_iter_count(i));
3479*4882a593Smuzhiyun 
3480*4882a593Smuzhiyun again:
3481*4882a593Smuzhiyun 		/*
3482*4882a593Smuzhiyun 		 * Bring in the user page that we will copy from _first_.
3483*4882a593Smuzhiyun 		 * Otherwise there's a nasty deadlock on copying from the
3484*4882a593Smuzhiyun 		 * same page as we're writing to, without it being marked
3485*4882a593Smuzhiyun 		 * up-to-date.
3486*4882a593Smuzhiyun 		 *
3487*4882a593Smuzhiyun 		 * Not only is this an optimisation, but it is also required
3488*4882a593Smuzhiyun 		 * to check that the address is actually valid, when atomic
3489*4882a593Smuzhiyun 		 * usercopies are used, below.
3490*4882a593Smuzhiyun 		 */
3491*4882a593Smuzhiyun 		if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
3492*4882a593Smuzhiyun 			status = -EFAULT;
3493*4882a593Smuzhiyun 			break;
3494*4882a593Smuzhiyun 		}
3495*4882a593Smuzhiyun 
3496*4882a593Smuzhiyun 		if (fatal_signal_pending(current)) {
3497*4882a593Smuzhiyun 			status = -EINTR;
3498*4882a593Smuzhiyun 			break;
3499*4882a593Smuzhiyun 		}
3500*4882a593Smuzhiyun 
3501*4882a593Smuzhiyun 		status = a_ops->write_begin(file, mapping, pos, bytes, flags,
3502*4882a593Smuzhiyun 						&page, &fsdata);
3503*4882a593Smuzhiyun 		if (unlikely(status < 0))
3504*4882a593Smuzhiyun 			break;
3505*4882a593Smuzhiyun 
3506*4882a593Smuzhiyun 		if (mapping_writably_mapped(mapping))
3507*4882a593Smuzhiyun 			flush_dcache_page(page);
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun 		copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
3510*4882a593Smuzhiyun 		flush_dcache_page(page);
3511*4882a593Smuzhiyun 
3512*4882a593Smuzhiyun 		status = a_ops->write_end(file, mapping, pos, bytes, copied,
3513*4882a593Smuzhiyun 						page, fsdata);
3514*4882a593Smuzhiyun 		if (unlikely(status < 0))
3515*4882a593Smuzhiyun 			break;
3516*4882a593Smuzhiyun 		copied = status;
3517*4882a593Smuzhiyun 
3518*4882a593Smuzhiyun 		cond_resched();
3519*4882a593Smuzhiyun 
3520*4882a593Smuzhiyun 		iov_iter_advance(i, copied);
3521*4882a593Smuzhiyun 		if (unlikely(copied == 0)) {
3522*4882a593Smuzhiyun 			/*
3523*4882a593Smuzhiyun 			 * If we were unable to copy any data at all, we must
3524*4882a593Smuzhiyun 			 * fall back to a single segment length write.
3525*4882a593Smuzhiyun 			 *
3526*4882a593Smuzhiyun 			 * If we didn't fallback here, we could livelock
3527*4882a593Smuzhiyun 			 * because not all segments in the iov can be copied at
3528*4882a593Smuzhiyun 			 * once without a pagefault.
3529*4882a593Smuzhiyun 			 */
3530*4882a593Smuzhiyun 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
3531*4882a593Smuzhiyun 						iov_iter_single_seg_count(i));
3532*4882a593Smuzhiyun 			goto again;
3533*4882a593Smuzhiyun 		}
3534*4882a593Smuzhiyun 		pos += copied;
3535*4882a593Smuzhiyun 		written += copied;
3536*4882a593Smuzhiyun 
3537*4882a593Smuzhiyun 		balance_dirty_pages_ratelimited(mapping);
3538*4882a593Smuzhiyun 	} while (iov_iter_count(i));
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun 	return written ? written : status;
3541*4882a593Smuzhiyun }
3542*4882a593Smuzhiyun EXPORT_SYMBOL(generic_perform_write);
3543*4882a593Smuzhiyun 
3544*4882a593Smuzhiyun /**
3545*4882a593Smuzhiyun  * __generic_file_write_iter - write data to a file
3546*4882a593Smuzhiyun  * @iocb:	IO state structure (file, offset, etc.)
3547*4882a593Smuzhiyun  * @from:	iov_iter with data to write
3548*4882a593Smuzhiyun  *
3549*4882a593Smuzhiyun  * This function does all the work needed for actually writing data to a
3550*4882a593Smuzhiyun  * file. It does all basic checks, removes SUID from the file, updates
3551*4882a593Smuzhiyun  * modification times and calls proper subroutines depending on whether we
3552*4882a593Smuzhiyun  * do direct IO or a standard buffered write.
3553*4882a593Smuzhiyun  *
3554*4882a593Smuzhiyun  * It expects i_mutex to be grabbed unless we work on a block device or similar
3555*4882a593Smuzhiyun  * object which does not need locking at all.
3556*4882a593Smuzhiyun  *
3557*4882a593Smuzhiyun  * This function does *not* take care of syncing data in case of O_SYNC write.
3558*4882a593Smuzhiyun  * A caller has to handle it. This is mainly due to the fact that we want to
3559*4882a593Smuzhiyun  * avoid syncing under i_mutex.
3560*4882a593Smuzhiyun  *
3561*4882a593Smuzhiyun  * Return:
3562*4882a593Smuzhiyun  * * number of bytes written, even for truncated writes
3563*4882a593Smuzhiyun  * * negative error code if no data has been written at all
3564*4882a593Smuzhiyun  */
__generic_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3565*4882a593Smuzhiyun ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3566*4882a593Smuzhiyun {
3567*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
3568*4882a593Smuzhiyun 	struct address_space * mapping = file->f_mapping;
3569*4882a593Smuzhiyun 	struct inode 	*inode = mapping->host;
3570*4882a593Smuzhiyun 	ssize_t		written = 0;
3571*4882a593Smuzhiyun 	ssize_t		err;
3572*4882a593Smuzhiyun 	ssize_t		status;
3573*4882a593Smuzhiyun 
3574*4882a593Smuzhiyun 	/* We can write back this queue in page reclaim */
3575*4882a593Smuzhiyun 	current->backing_dev_info = inode_to_bdi(inode);
3576*4882a593Smuzhiyun 	err = file_remove_privs(file);
3577*4882a593Smuzhiyun 	if (err)
3578*4882a593Smuzhiyun 		goto out;
3579*4882a593Smuzhiyun 
3580*4882a593Smuzhiyun 	err = file_update_time(file);
3581*4882a593Smuzhiyun 	if (err)
3582*4882a593Smuzhiyun 		goto out;
3583*4882a593Smuzhiyun 
3584*4882a593Smuzhiyun 	if (iocb->ki_flags & IOCB_DIRECT) {
3585*4882a593Smuzhiyun 		loff_t pos, endbyte;
3586*4882a593Smuzhiyun 
3587*4882a593Smuzhiyun 		written = generic_file_direct_write(iocb, from);
3588*4882a593Smuzhiyun 		/*
3589*4882a593Smuzhiyun 		 * If the write stopped short of completing, fall back to
3590*4882a593Smuzhiyun 		 * buffered writes.  Some filesystems do this for writes to
3591*4882a593Smuzhiyun 		 * holes, for example.  For DAX files, a buffered write will
3592*4882a593Smuzhiyun 		 * not succeed (even if it did, DAX does not handle dirty
3593*4882a593Smuzhiyun 		 * page-cache pages correctly).
3594*4882a593Smuzhiyun 		 */
3595*4882a593Smuzhiyun 		if (written < 0 || !iov_iter_count(from) || IS_DAX(inode))
3596*4882a593Smuzhiyun 			goto out;
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun 		status = generic_perform_write(file, from, pos = iocb->ki_pos);
3599*4882a593Smuzhiyun 		/*
3600*4882a593Smuzhiyun 		 * If generic_perform_write() returned a synchronous error
3601*4882a593Smuzhiyun 		 * then we want to return the number of bytes which were
3602*4882a593Smuzhiyun 		 * direct-written, or the error code if that was zero.  Note
3603*4882a593Smuzhiyun 		 * that this differs from normal direct-io semantics, which
3604*4882a593Smuzhiyun 		 * will return -EFOO even if some bytes were written.
3605*4882a593Smuzhiyun 		 */
3606*4882a593Smuzhiyun 		if (unlikely(status < 0)) {
3607*4882a593Smuzhiyun 			err = status;
3608*4882a593Smuzhiyun 			goto out;
3609*4882a593Smuzhiyun 		}
3610*4882a593Smuzhiyun 		/*
3611*4882a593Smuzhiyun 		 * We need to ensure that the page cache pages are written to
3612*4882a593Smuzhiyun 		 * disk and invalidated to preserve the expected O_DIRECT
3613*4882a593Smuzhiyun 		 * semantics.
3614*4882a593Smuzhiyun 		 */
3615*4882a593Smuzhiyun 		endbyte = pos + status - 1;
3616*4882a593Smuzhiyun 		err = filemap_write_and_wait_range(mapping, pos, endbyte);
3617*4882a593Smuzhiyun 		if (err == 0) {
3618*4882a593Smuzhiyun 			iocb->ki_pos = endbyte + 1;
3619*4882a593Smuzhiyun 			written += status;
3620*4882a593Smuzhiyun 			invalidate_mapping_pages(mapping,
3621*4882a593Smuzhiyun 						 pos >> PAGE_SHIFT,
3622*4882a593Smuzhiyun 						 endbyte >> PAGE_SHIFT);
3623*4882a593Smuzhiyun 		} else {
3624*4882a593Smuzhiyun 			/*
3625*4882a593Smuzhiyun 			 * We don't know how much we wrote, so just return
3626*4882a593Smuzhiyun 			 * the number of bytes which were direct-written
3627*4882a593Smuzhiyun 			 */
3628*4882a593Smuzhiyun 		}
3629*4882a593Smuzhiyun 	} else {
3630*4882a593Smuzhiyun 		written = generic_perform_write(file, from, iocb->ki_pos);
3631*4882a593Smuzhiyun 		if (likely(written > 0))
3632*4882a593Smuzhiyun 			iocb->ki_pos += written;
3633*4882a593Smuzhiyun 	}
3634*4882a593Smuzhiyun out:
3635*4882a593Smuzhiyun 	current->backing_dev_info = NULL;
3636*4882a593Smuzhiyun 	return written ? written : err;
3637*4882a593Smuzhiyun }
3638*4882a593Smuzhiyun EXPORT_SYMBOL(__generic_file_write_iter);
3639*4882a593Smuzhiyun 
3640*4882a593Smuzhiyun /**
3641*4882a593Smuzhiyun  * generic_file_write_iter - write data to a file
3642*4882a593Smuzhiyun  * @iocb:	IO state structure
3643*4882a593Smuzhiyun  * @from:	iov_iter with data to write
3644*4882a593Smuzhiyun  *
3645*4882a593Smuzhiyun  * This is a wrapper around __generic_file_write_iter() to be used by most
3646*4882a593Smuzhiyun  * filesystems. It takes care of syncing the file in case of O_SYNC file
3647*4882a593Smuzhiyun  * and acquires i_mutex as needed.
3648*4882a593Smuzhiyun  * Return:
3649*4882a593Smuzhiyun  * * negative error code if no data has been written at all of
3650*4882a593Smuzhiyun  *   vfs_fsync_range() failed for a synchronous write
3651*4882a593Smuzhiyun  * * number of bytes written, even for truncated writes
3652*4882a593Smuzhiyun  */
generic_file_write_iter(struct kiocb * iocb,struct iov_iter * from)3653*4882a593Smuzhiyun ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
3654*4882a593Smuzhiyun {
3655*4882a593Smuzhiyun 	struct file *file = iocb->ki_filp;
3656*4882a593Smuzhiyun 	struct inode *inode = file->f_mapping->host;
3657*4882a593Smuzhiyun 	ssize_t ret;
3658*4882a593Smuzhiyun 
3659*4882a593Smuzhiyun 	inode_lock(inode);
3660*4882a593Smuzhiyun 	ret = generic_write_checks(iocb, from);
3661*4882a593Smuzhiyun 	if (ret > 0)
3662*4882a593Smuzhiyun 		ret = __generic_file_write_iter(iocb, from);
3663*4882a593Smuzhiyun 	inode_unlock(inode);
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	if (ret > 0)
3666*4882a593Smuzhiyun 		ret = generic_write_sync(iocb, ret);
3667*4882a593Smuzhiyun 	return ret;
3668*4882a593Smuzhiyun }
3669*4882a593Smuzhiyun EXPORT_SYMBOL(generic_file_write_iter);
3670*4882a593Smuzhiyun 
3671*4882a593Smuzhiyun /**
3672*4882a593Smuzhiyun  * try_to_release_page() - release old fs-specific metadata on a page
3673*4882a593Smuzhiyun  *
3674*4882a593Smuzhiyun  * @page: the page which the kernel is trying to free
3675*4882a593Smuzhiyun  * @gfp_mask: memory allocation flags (and I/O mode)
3676*4882a593Smuzhiyun  *
3677*4882a593Smuzhiyun  * The address_space is to try to release any data against the page
3678*4882a593Smuzhiyun  * (presumably at page->private).
3679*4882a593Smuzhiyun  *
3680*4882a593Smuzhiyun  * This may also be called if PG_fscache is set on a page, indicating that the
3681*4882a593Smuzhiyun  * page is known to the local caching routines.
3682*4882a593Smuzhiyun  *
3683*4882a593Smuzhiyun  * The @gfp_mask argument specifies whether I/O may be performed to release
3684*4882a593Smuzhiyun  * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3685*4882a593Smuzhiyun  *
3686*4882a593Smuzhiyun  * Return: %1 if the release was successful, otherwise return zero.
3687*4882a593Smuzhiyun  */
try_to_release_page(struct page * page,gfp_t gfp_mask)3688*4882a593Smuzhiyun int try_to_release_page(struct page *page, gfp_t gfp_mask)
3689*4882a593Smuzhiyun {
3690*4882a593Smuzhiyun 	struct address_space * const mapping = page->mapping;
3691*4882a593Smuzhiyun 
3692*4882a593Smuzhiyun 	BUG_ON(!PageLocked(page));
3693*4882a593Smuzhiyun 	if (PageWriteback(page))
3694*4882a593Smuzhiyun 		return 0;
3695*4882a593Smuzhiyun 
3696*4882a593Smuzhiyun 	if (mapping && mapping->a_ops->releasepage)
3697*4882a593Smuzhiyun 		return mapping->a_ops->releasepage(page, gfp_mask);
3698*4882a593Smuzhiyun 	return try_to_free_buffers(page);
3699*4882a593Smuzhiyun }
3700*4882a593Smuzhiyun 
3701*4882a593Smuzhiyun EXPORT_SYMBOL(try_to_release_page);
3702