xref: /OK3568_Linux_fs/kernel/mm/memory.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/mm/memory.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * demand-loading started 01.12.91 - seems it is high on the list of
10*4882a593Smuzhiyun  * things wanted, and it should be easy to implement. - Linus
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
15*4882a593Smuzhiyun  * pages started 02.12.91, seems to work. - Linus.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
18*4882a593Smuzhiyun  * would have taken more than the 6M I have free, but it worked well as
19*4882a593Smuzhiyun  * far as I could see.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun  * Real VM (paging to/from disk) started 18.12.91. Much more work and
26*4882a593Smuzhiyun  * thought has to go into this. Oh, well..
27*4882a593Smuzhiyun  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
28*4882a593Smuzhiyun  *		Found it. Everything seems to work now.
29*4882a593Smuzhiyun  * 20.12.91  -  Ok, making the swap-device changeable like the root.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * 05.04.94  -  Multi-page memory management added for v1.1.
34*4882a593Smuzhiyun  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
37*4882a593Smuzhiyun  *		(Gerhard.Wichert@pdb.siemens.de)
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <linux/kernel_stat.h>
43*4882a593Smuzhiyun #include <linux/mm.h>
44*4882a593Smuzhiyun #include <linux/sched/mm.h>
45*4882a593Smuzhiyun #include <linux/sched/coredump.h>
46*4882a593Smuzhiyun #include <linux/sched/numa_balancing.h>
47*4882a593Smuzhiyun #include <linux/sched/task.h>
48*4882a593Smuzhiyun #include <linux/hugetlb.h>
49*4882a593Smuzhiyun #include <linux/mman.h>
50*4882a593Smuzhiyun #include <linux/swap.h>
51*4882a593Smuzhiyun #include <linux/highmem.h>
52*4882a593Smuzhiyun #include <linux/pagemap.h>
53*4882a593Smuzhiyun #include <linux/memremap.h>
54*4882a593Smuzhiyun #include <linux/ksm.h>
55*4882a593Smuzhiyun #include <linux/rmap.h>
56*4882a593Smuzhiyun #include <linux/export.h>
57*4882a593Smuzhiyun #include <linux/delayacct.h>
58*4882a593Smuzhiyun #include <linux/init.h>
59*4882a593Smuzhiyun #include <linux/pfn_t.h>
60*4882a593Smuzhiyun #include <linux/writeback.h>
61*4882a593Smuzhiyun #include <linux/memcontrol.h>
62*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
63*4882a593Smuzhiyun #include <linux/swapops.h>
64*4882a593Smuzhiyun #include <linux/elf.h>
65*4882a593Smuzhiyun #include <linux/gfp.h>
66*4882a593Smuzhiyun #include <linux/migrate.h>
67*4882a593Smuzhiyun #include <linux/string.h>
68*4882a593Smuzhiyun #include <linux/debugfs.h>
69*4882a593Smuzhiyun #include <linux/userfaultfd_k.h>
70*4882a593Smuzhiyun #include <linux/dax.h>
71*4882a593Smuzhiyun #include <linux/oom.h>
72*4882a593Smuzhiyun #include <linux/numa.h>
73*4882a593Smuzhiyun #include <linux/perf_event.h>
74*4882a593Smuzhiyun #include <linux/ptrace.h>
75*4882a593Smuzhiyun #include <linux/vmalloc.h>
76*4882a593Smuzhiyun #include <trace/hooks/mm.h>
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #include <trace/events/kmem.h>
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #include <asm/io.h>
81*4882a593Smuzhiyun #include <asm/mmu_context.h>
82*4882a593Smuzhiyun #include <asm/pgalloc.h>
83*4882a593Smuzhiyun #include <linux/uaccess.h>
84*4882a593Smuzhiyun #include <asm/tlb.h>
85*4882a593Smuzhiyun #include <asm/tlbflush.h>
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #include "pgalloc-track.h"
88*4882a593Smuzhiyun #include "internal.h"
89*4882a593Smuzhiyun #include <trace/hooks/mm.h>
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
92*4882a593Smuzhiyun #include <trace/events/pagefault.h>
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
95*4882a593Smuzhiyun #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid.
96*4882a593Smuzhiyun #endif
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #ifndef CONFIG_NEED_MULTIPLE_NODES
99*4882a593Smuzhiyun /* use the per-pgdat data instead for discontigmem - mbligh */
100*4882a593Smuzhiyun unsigned long max_mapnr;
101*4882a593Smuzhiyun EXPORT_SYMBOL(max_mapnr);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun struct page *mem_map;
104*4882a593Smuzhiyun EXPORT_SYMBOL(mem_map);
105*4882a593Smuzhiyun #endif
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /*
108*4882a593Smuzhiyun  * A number of key systems in x86 including ioremap() rely on the assumption
109*4882a593Smuzhiyun  * that high_memory defines the upper bound on direct map memory, then end
110*4882a593Smuzhiyun  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
111*4882a593Smuzhiyun  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
112*4882a593Smuzhiyun  * and ZONE_HIGHMEM.
113*4882a593Smuzhiyun  */
114*4882a593Smuzhiyun void *high_memory;
115*4882a593Smuzhiyun EXPORT_SYMBOL(high_memory);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun  * Randomize the address space (stacks, mmaps, brk, etc.).
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
121*4882a593Smuzhiyun  *   as ancient (libc5 based) binaries can segfault. )
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun int randomize_va_space __read_mostly =
124*4882a593Smuzhiyun #ifdef CONFIG_COMPAT_BRK
125*4882a593Smuzhiyun 					1;
126*4882a593Smuzhiyun #else
127*4882a593Smuzhiyun 					2;
128*4882a593Smuzhiyun #endif
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #ifndef arch_faults_on_old_pte
arch_faults_on_old_pte(void)131*4882a593Smuzhiyun static inline bool arch_faults_on_old_pte(void)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	/*
134*4882a593Smuzhiyun 	 * Those arches which don't have hw access flag feature need to
135*4882a593Smuzhiyun 	 * implement their own helper. By default, "true" means pagefault
136*4882a593Smuzhiyun 	 * will be hit on old pte.
137*4882a593Smuzhiyun 	 */
138*4882a593Smuzhiyun 	return true;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun #endif
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #ifndef arch_wants_old_prefaulted_pte
arch_wants_old_prefaulted_pte(void)143*4882a593Smuzhiyun static inline bool arch_wants_old_prefaulted_pte(void)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	/*
146*4882a593Smuzhiyun 	 * Transitioning a PTE from 'old' to 'young' can be expensive on
147*4882a593Smuzhiyun 	 * some architectures, even if it's performed in hardware. By
148*4882a593Smuzhiyun 	 * default, "false" means prefaulted entries will be 'young'.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	return false;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun 
disable_randmaps(char * s)154*4882a593Smuzhiyun static int __init disable_randmaps(char *s)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	randomize_va_space = 0;
157*4882a593Smuzhiyun 	return 1;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun __setup("norandmaps", disable_randmaps);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun unsigned long zero_pfn __read_mostly;
162*4882a593Smuzhiyun EXPORT_SYMBOL(zero_pfn);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun unsigned long highest_memmap_pfn __read_mostly;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun  * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
168*4882a593Smuzhiyun  */
init_zero_pfn(void)169*4882a593Smuzhiyun static int __init init_zero_pfn(void)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	zero_pfn = page_to_pfn(ZERO_PAGE(0));
172*4882a593Smuzhiyun 	return 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun early_initcall(init_zero_pfn);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * Only trace rss_stat when there is a 512kb cross over.
178*4882a593Smuzhiyun  * Smaller changes may be lost unless every small change is
179*4882a593Smuzhiyun  * crossing into or returning to a 512kb boundary.
180*4882a593Smuzhiyun  */
181*4882a593Smuzhiyun #define TRACE_MM_COUNTER_THRESHOLD 128
182*4882a593Smuzhiyun 
mm_trace_rss_stat(struct mm_struct * mm,int member,long count,long value)183*4882a593Smuzhiyun void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
184*4882a593Smuzhiyun 		       long value)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	long thresh_mask = ~(TRACE_MM_COUNTER_THRESHOLD - 1);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* Threshold roll-over, trace it */
189*4882a593Smuzhiyun 	if ((count & thresh_mask) != ((count - value) & thresh_mask))
190*4882a593Smuzhiyun 		trace_rss_stat(mm, member, count);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun #if defined(SPLIT_RSS_COUNTING)
195*4882a593Smuzhiyun 
sync_mm_rss(struct mm_struct * mm)196*4882a593Smuzhiyun void sync_mm_rss(struct mm_struct *mm)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	int i;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	for (i = 0; i < NR_MM_COUNTERS; i++) {
201*4882a593Smuzhiyun 		if (current->rss_stat.count[i]) {
202*4882a593Smuzhiyun 			add_mm_counter(mm, i, current->rss_stat.count[i]);
203*4882a593Smuzhiyun 			current->rss_stat.count[i] = 0;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 	current->rss_stat.events = 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
add_mm_counter_fast(struct mm_struct * mm,int member,int val)209*4882a593Smuzhiyun static void add_mm_counter_fast(struct mm_struct *mm, int member, int val)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	struct task_struct *task = current;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (likely(task->mm == mm))
214*4882a593Smuzhiyun 		task->rss_stat.count[member] += val;
215*4882a593Smuzhiyun 	else
216*4882a593Smuzhiyun 		add_mm_counter(mm, member, val);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun #define inc_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, 1)
219*4882a593Smuzhiyun #define dec_mm_counter_fast(mm, member) add_mm_counter_fast(mm, member, -1)
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /* sync counter once per 64 page faults */
222*4882a593Smuzhiyun #define TASK_RSS_EVENTS_THRESH	(64)
check_sync_rss_stat(struct task_struct * task)223*4882a593Smuzhiyun static void check_sync_rss_stat(struct task_struct *task)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	if (unlikely(task != current))
226*4882a593Smuzhiyun 		return;
227*4882a593Smuzhiyun 	if (unlikely(task->rss_stat.events++ > TASK_RSS_EVENTS_THRESH))
228*4882a593Smuzhiyun 		sync_mm_rss(task->mm);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun #else /* SPLIT_RSS_COUNTING */
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #define inc_mm_counter_fast(mm, member) inc_mm_counter(mm, member)
233*4882a593Smuzhiyun #define dec_mm_counter_fast(mm, member) dec_mm_counter(mm, member)
234*4882a593Smuzhiyun 
check_sync_rss_stat(struct task_struct * task)235*4882a593Smuzhiyun static void check_sync_rss_stat(struct task_struct *task)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun #endif /* SPLIT_RSS_COUNTING */
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun  * Note: this doesn't free the actual pages themselves. That
243*4882a593Smuzhiyun  * has been handled earlier when unmapping all the memory regions.
244*4882a593Smuzhiyun  */
free_pte_range(struct mmu_gather * tlb,pmd_t * pmd,unsigned long addr)245*4882a593Smuzhiyun static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
246*4882a593Smuzhiyun 			   unsigned long addr)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	pgtable_t token = pmd_pgtable(*pmd);
249*4882a593Smuzhiyun 	pmd_clear(pmd);
250*4882a593Smuzhiyun 	pte_free_tlb(tlb, token, addr);
251*4882a593Smuzhiyun 	mm_dec_nr_ptes(tlb->mm);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
free_pmd_range(struct mmu_gather * tlb,pud_t * pud,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)254*4882a593Smuzhiyun static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
255*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
256*4882a593Smuzhiyun 				unsigned long floor, unsigned long ceiling)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	pmd_t *pmd;
259*4882a593Smuzhiyun 	unsigned long next;
260*4882a593Smuzhiyun 	unsigned long start;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	start = addr;
263*4882a593Smuzhiyun 	pmd = pmd_offset(pud, addr);
264*4882a593Smuzhiyun 	do {
265*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
266*4882a593Smuzhiyun 		if (pmd_none_or_clear_bad(pmd))
267*4882a593Smuzhiyun 			continue;
268*4882a593Smuzhiyun 		free_pte_range(tlb, pmd, addr);
269*4882a593Smuzhiyun 	} while (pmd++, addr = next, addr != end);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	start &= PUD_MASK;
272*4882a593Smuzhiyun 	if (start < floor)
273*4882a593Smuzhiyun 		return;
274*4882a593Smuzhiyun 	if (ceiling) {
275*4882a593Smuzhiyun 		ceiling &= PUD_MASK;
276*4882a593Smuzhiyun 		if (!ceiling)
277*4882a593Smuzhiyun 			return;
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 	if (end - 1 > ceiling - 1)
280*4882a593Smuzhiyun 		return;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	pmd = pmd_offset(pud, start);
283*4882a593Smuzhiyun 	pud_clear(pud);
284*4882a593Smuzhiyun 	pmd_free_tlb(tlb, pmd, start);
285*4882a593Smuzhiyun 	mm_dec_nr_pmds(tlb->mm);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
free_pud_range(struct mmu_gather * tlb,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)288*4882a593Smuzhiyun static inline void free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
289*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
290*4882a593Smuzhiyun 				unsigned long floor, unsigned long ceiling)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	pud_t *pud;
293*4882a593Smuzhiyun 	unsigned long next;
294*4882a593Smuzhiyun 	unsigned long start;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	start = addr;
297*4882a593Smuzhiyun 	pud = pud_offset(p4d, addr);
298*4882a593Smuzhiyun 	do {
299*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
300*4882a593Smuzhiyun 		if (pud_none_or_clear_bad(pud))
301*4882a593Smuzhiyun 			continue;
302*4882a593Smuzhiyun 		free_pmd_range(tlb, pud, addr, next, floor, ceiling);
303*4882a593Smuzhiyun 	} while (pud++, addr = next, addr != end);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	start &= P4D_MASK;
306*4882a593Smuzhiyun 	if (start < floor)
307*4882a593Smuzhiyun 		return;
308*4882a593Smuzhiyun 	if (ceiling) {
309*4882a593Smuzhiyun 		ceiling &= P4D_MASK;
310*4882a593Smuzhiyun 		if (!ceiling)
311*4882a593Smuzhiyun 			return;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 	if (end - 1 > ceiling - 1)
314*4882a593Smuzhiyun 		return;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	pud = pud_offset(p4d, start);
317*4882a593Smuzhiyun 	p4d_clear(p4d);
318*4882a593Smuzhiyun 	pud_free_tlb(tlb, pud, start);
319*4882a593Smuzhiyun 	mm_dec_nr_puds(tlb->mm);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
free_p4d_range(struct mmu_gather * tlb,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)322*4882a593Smuzhiyun static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd,
323*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
324*4882a593Smuzhiyun 				unsigned long floor, unsigned long ceiling)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	p4d_t *p4d;
327*4882a593Smuzhiyun 	unsigned long next;
328*4882a593Smuzhiyun 	unsigned long start;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	start = addr;
331*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
332*4882a593Smuzhiyun 	do {
333*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
334*4882a593Smuzhiyun 		if (p4d_none_or_clear_bad(p4d))
335*4882a593Smuzhiyun 			continue;
336*4882a593Smuzhiyun 		free_pud_range(tlb, p4d, addr, next, floor, ceiling);
337*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	start &= PGDIR_MASK;
340*4882a593Smuzhiyun 	if (start < floor)
341*4882a593Smuzhiyun 		return;
342*4882a593Smuzhiyun 	if (ceiling) {
343*4882a593Smuzhiyun 		ceiling &= PGDIR_MASK;
344*4882a593Smuzhiyun 		if (!ceiling)
345*4882a593Smuzhiyun 			return;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 	if (end - 1 > ceiling - 1)
348*4882a593Smuzhiyun 		return;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, start);
351*4882a593Smuzhiyun 	pgd_clear(pgd);
352*4882a593Smuzhiyun 	p4d_free_tlb(tlb, p4d, start);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun  * This function frees user-level page tables of a process.
357*4882a593Smuzhiyun  */
free_pgd_range(struct mmu_gather * tlb,unsigned long addr,unsigned long end,unsigned long floor,unsigned long ceiling)358*4882a593Smuzhiyun void free_pgd_range(struct mmu_gather *tlb,
359*4882a593Smuzhiyun 			unsigned long addr, unsigned long end,
360*4882a593Smuzhiyun 			unsigned long floor, unsigned long ceiling)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	pgd_t *pgd;
363*4882a593Smuzhiyun 	unsigned long next;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/*
366*4882a593Smuzhiyun 	 * The next few lines have given us lots of grief...
367*4882a593Smuzhiyun 	 *
368*4882a593Smuzhiyun 	 * Why are we testing PMD* at this top level?  Because often
369*4882a593Smuzhiyun 	 * there will be no work to do at all, and we'd prefer not to
370*4882a593Smuzhiyun 	 * go all the way down to the bottom just to discover that.
371*4882a593Smuzhiyun 	 *
372*4882a593Smuzhiyun 	 * Why all these "- 1"s?  Because 0 represents both the bottom
373*4882a593Smuzhiyun 	 * of the address space and the top of it (using -1 for the
374*4882a593Smuzhiyun 	 * top wouldn't help much: the masks would do the wrong thing).
375*4882a593Smuzhiyun 	 * The rule is that addr 0 and floor 0 refer to the bottom of
376*4882a593Smuzhiyun 	 * the address space, but end 0 and ceiling 0 refer to the top
377*4882a593Smuzhiyun 	 * Comparisons need to use "end - 1" and "ceiling - 1" (though
378*4882a593Smuzhiyun 	 * that end 0 case should be mythical).
379*4882a593Smuzhiyun 	 *
380*4882a593Smuzhiyun 	 * Wherever addr is brought up or ceiling brought down, we must
381*4882a593Smuzhiyun 	 * be careful to reject "the opposite 0" before it confuses the
382*4882a593Smuzhiyun 	 * subsequent tests.  But what about where end is brought down
383*4882a593Smuzhiyun 	 * by PMD_SIZE below? no, end can't go down to 0 there.
384*4882a593Smuzhiyun 	 *
385*4882a593Smuzhiyun 	 * Whereas we round start (addr) and ceiling down, by different
386*4882a593Smuzhiyun 	 * masks at different levels, in order to test whether a table
387*4882a593Smuzhiyun 	 * now has no other vmas using it, so can be freed, we don't
388*4882a593Smuzhiyun 	 * bother to round floor or end up - the tests don't need that.
389*4882a593Smuzhiyun 	 */
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	addr &= PMD_MASK;
392*4882a593Smuzhiyun 	if (addr < floor) {
393*4882a593Smuzhiyun 		addr += PMD_SIZE;
394*4882a593Smuzhiyun 		if (!addr)
395*4882a593Smuzhiyun 			return;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 	if (ceiling) {
398*4882a593Smuzhiyun 		ceiling &= PMD_MASK;
399*4882a593Smuzhiyun 		if (!ceiling)
400*4882a593Smuzhiyun 			return;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 	if (end - 1 > ceiling - 1)
403*4882a593Smuzhiyun 		end -= PMD_SIZE;
404*4882a593Smuzhiyun 	if (addr > end - 1)
405*4882a593Smuzhiyun 		return;
406*4882a593Smuzhiyun 	/*
407*4882a593Smuzhiyun 	 * We add page table cache pages with PAGE_SIZE,
408*4882a593Smuzhiyun 	 * (see pte_free_tlb()), flush the tlb if we need
409*4882a593Smuzhiyun 	 */
410*4882a593Smuzhiyun 	tlb_change_page_size(tlb, PAGE_SIZE);
411*4882a593Smuzhiyun 	pgd = pgd_offset(tlb->mm, addr);
412*4882a593Smuzhiyun 	do {
413*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
414*4882a593Smuzhiyun 		if (pgd_none_or_clear_bad(pgd))
415*4882a593Smuzhiyun 			continue;
416*4882a593Smuzhiyun 		free_p4d_range(tlb, pgd, addr, next, floor, ceiling);
417*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
free_pgtables(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long floor,unsigned long ceiling)420*4882a593Smuzhiyun void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
421*4882a593Smuzhiyun 		unsigned long floor, unsigned long ceiling)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	while (vma) {
424*4882a593Smuzhiyun 		struct vm_area_struct *next = vma->vm_next;
425*4882a593Smuzhiyun 		unsigned long addr = vma->vm_start;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		/*
428*4882a593Smuzhiyun 		 * Hide vma from rmap and truncate_pagecache before freeing
429*4882a593Smuzhiyun 		 * pgtables
430*4882a593Smuzhiyun 		 */
431*4882a593Smuzhiyun 		vm_write_begin(vma);
432*4882a593Smuzhiyun 		unlink_anon_vmas(vma);
433*4882a593Smuzhiyun 		vm_write_end(vma);
434*4882a593Smuzhiyun 		unlink_file_vma(vma);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 		if (is_vm_hugetlb_page(vma)) {
437*4882a593Smuzhiyun 			hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
438*4882a593Smuzhiyun 				floor, next ? next->vm_start : ceiling);
439*4882a593Smuzhiyun 		} else {
440*4882a593Smuzhiyun 			/*
441*4882a593Smuzhiyun 			 * Optimization: gather nearby vmas into one call down
442*4882a593Smuzhiyun 			 */
443*4882a593Smuzhiyun 			while (next && next->vm_start <= vma->vm_end + PMD_SIZE
444*4882a593Smuzhiyun 			       && !is_vm_hugetlb_page(next)) {
445*4882a593Smuzhiyun 				vma = next;
446*4882a593Smuzhiyun 				next = vma->vm_next;
447*4882a593Smuzhiyun 				vm_write_begin(vma);
448*4882a593Smuzhiyun 				unlink_anon_vmas(vma);
449*4882a593Smuzhiyun 				vm_write_end(vma);
450*4882a593Smuzhiyun 				unlink_file_vma(vma);
451*4882a593Smuzhiyun 			}
452*4882a593Smuzhiyun 			free_pgd_range(tlb, addr, vma->vm_end,
453*4882a593Smuzhiyun 				floor, next ? next->vm_start : ceiling);
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 		vma = next;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
__pte_alloc(struct mm_struct * mm,pmd_t * pmd)459*4882a593Smuzhiyun int __pte_alloc(struct mm_struct *mm, pmd_t *pmd)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	spinlock_t *ptl;
462*4882a593Smuzhiyun 	pgtable_t new = pte_alloc_one(mm);
463*4882a593Smuzhiyun 	if (!new)
464*4882a593Smuzhiyun 		return -ENOMEM;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/*
467*4882a593Smuzhiyun 	 * Ensure all pte setup (eg. pte page lock and page clearing) are
468*4882a593Smuzhiyun 	 * visible before the pte is made visible to other CPUs by being
469*4882a593Smuzhiyun 	 * put into page tables.
470*4882a593Smuzhiyun 	 *
471*4882a593Smuzhiyun 	 * The other side of the story is the pointer chasing in the page
472*4882a593Smuzhiyun 	 * table walking code (when walking the page table without locking;
473*4882a593Smuzhiyun 	 * ie. most of the time). Fortunately, these data accesses consist
474*4882a593Smuzhiyun 	 * of a chain of data-dependent loads, meaning most CPUs (alpha
475*4882a593Smuzhiyun 	 * being the notable exception) will already guarantee loads are
476*4882a593Smuzhiyun 	 * seen in-order. See the alpha page table accessors for the
477*4882a593Smuzhiyun 	 * smp_rmb() barriers in page table walking code.
478*4882a593Smuzhiyun 	 */
479*4882a593Smuzhiyun 	smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	ptl = pmd_lock(mm, pmd);
482*4882a593Smuzhiyun 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
483*4882a593Smuzhiyun 		mm_inc_nr_ptes(mm);
484*4882a593Smuzhiyun 		pmd_populate(mm, pmd, new);
485*4882a593Smuzhiyun 		new = NULL;
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 	spin_unlock(ptl);
488*4882a593Smuzhiyun 	if (new)
489*4882a593Smuzhiyun 		pte_free(mm, new);
490*4882a593Smuzhiyun 	return 0;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
__pte_alloc_kernel(pmd_t * pmd)493*4882a593Smuzhiyun int __pte_alloc_kernel(pmd_t *pmd)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	pte_t *new = pte_alloc_one_kernel(&init_mm);
496*4882a593Smuzhiyun 	if (!new)
497*4882a593Smuzhiyun 		return -ENOMEM;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	smp_wmb(); /* See comment in __pte_alloc */
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	spin_lock(&init_mm.page_table_lock);
502*4882a593Smuzhiyun 	if (likely(pmd_none(*pmd))) {	/* Has another populated it ? */
503*4882a593Smuzhiyun 		pmd_populate_kernel(&init_mm, pmd, new);
504*4882a593Smuzhiyun 		new = NULL;
505*4882a593Smuzhiyun 	}
506*4882a593Smuzhiyun 	spin_unlock(&init_mm.page_table_lock);
507*4882a593Smuzhiyun 	if (new)
508*4882a593Smuzhiyun 		pte_free_kernel(&init_mm, new);
509*4882a593Smuzhiyun 	return 0;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun 
init_rss_vec(int * rss)512*4882a593Smuzhiyun static inline void init_rss_vec(int *rss)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	memset(rss, 0, sizeof(int) * NR_MM_COUNTERS);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
add_mm_rss_vec(struct mm_struct * mm,int * rss)517*4882a593Smuzhiyun static inline void add_mm_rss_vec(struct mm_struct *mm, int *rss)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun 	int i;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	if (current->mm == mm)
522*4882a593Smuzhiyun 		sync_mm_rss(mm);
523*4882a593Smuzhiyun 	for (i = 0; i < NR_MM_COUNTERS; i++)
524*4882a593Smuzhiyun 		if (rss[i])
525*4882a593Smuzhiyun 			add_mm_counter(mm, i, rss[i]);
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun /*
529*4882a593Smuzhiyun  * This function is called to print an error when a bad pte
530*4882a593Smuzhiyun  * is found. For example, we might have a PFN-mapped pte in
531*4882a593Smuzhiyun  * a region that doesn't allow it.
532*4882a593Smuzhiyun  *
533*4882a593Smuzhiyun  * The calling function must still handle the error.
534*4882a593Smuzhiyun  */
print_bad_pte(struct vm_area_struct * vma,unsigned long addr,pte_t pte,struct page * page)535*4882a593Smuzhiyun static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
536*4882a593Smuzhiyun 			  pte_t pte, struct page *page)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
539*4882a593Smuzhiyun 	p4d_t *p4d = p4d_offset(pgd, addr);
540*4882a593Smuzhiyun 	pud_t *pud = pud_offset(p4d, addr);
541*4882a593Smuzhiyun 	pmd_t *pmd = pmd_offset(pud, addr);
542*4882a593Smuzhiyun 	struct address_space *mapping;
543*4882a593Smuzhiyun 	pgoff_t index;
544*4882a593Smuzhiyun 	static unsigned long resume;
545*4882a593Smuzhiyun 	static unsigned long nr_shown;
546*4882a593Smuzhiyun 	static unsigned long nr_unshown;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	/*
549*4882a593Smuzhiyun 	 * Allow a burst of 60 reports, then keep quiet for that minute;
550*4882a593Smuzhiyun 	 * or allow a steady drip of one report per second.
551*4882a593Smuzhiyun 	 */
552*4882a593Smuzhiyun 	if (nr_shown == 60) {
553*4882a593Smuzhiyun 		if (time_before(jiffies, resume)) {
554*4882a593Smuzhiyun 			nr_unshown++;
555*4882a593Smuzhiyun 			return;
556*4882a593Smuzhiyun 		}
557*4882a593Smuzhiyun 		if (nr_unshown) {
558*4882a593Smuzhiyun 			pr_alert("BUG: Bad page map: %lu messages suppressed\n",
559*4882a593Smuzhiyun 				 nr_unshown);
560*4882a593Smuzhiyun 			nr_unshown = 0;
561*4882a593Smuzhiyun 		}
562*4882a593Smuzhiyun 		nr_shown = 0;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 	if (nr_shown++ == 0)
565*4882a593Smuzhiyun 		resume = jiffies + 60 * HZ;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
568*4882a593Smuzhiyun 	index = linear_page_index(vma, addr);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	pr_alert("BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
571*4882a593Smuzhiyun 		 current->comm,
572*4882a593Smuzhiyun 		 (long long)pte_val(pte), (long long)pmd_val(*pmd));
573*4882a593Smuzhiyun 	if (page)
574*4882a593Smuzhiyun 		dump_page(page, "bad pte");
575*4882a593Smuzhiyun 	pr_alert("addr:%px vm_flags:%08lx anon_vma:%px mapping:%px index:%lx\n",
576*4882a593Smuzhiyun 		 (void *)addr, READ_ONCE(vma->vm_flags), vma->anon_vma, mapping, index);
577*4882a593Smuzhiyun 	pr_alert("file:%pD fault:%ps mmap:%ps readpage:%ps\n",
578*4882a593Smuzhiyun 		 vma->vm_file,
579*4882a593Smuzhiyun 		 vma->vm_ops ? vma->vm_ops->fault : NULL,
580*4882a593Smuzhiyun 		 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
581*4882a593Smuzhiyun 		 mapping ? mapping->a_ops->readpage : NULL);
582*4882a593Smuzhiyun 	dump_stack();
583*4882a593Smuzhiyun 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun /*
587*4882a593Smuzhiyun  * __vm_normal_page -- This function gets the "struct page" associated with
588*4882a593Smuzhiyun  * a pte.
589*4882a593Smuzhiyun  *
590*4882a593Smuzhiyun  * "Special" mappings do not wish to be associated with a "struct page" (either
591*4882a593Smuzhiyun  * it doesn't exist, or it exists but they don't want to touch it). In this
592*4882a593Smuzhiyun  * case, NULL is returned here. "Normal" mappings do have a struct page.
593*4882a593Smuzhiyun  *
594*4882a593Smuzhiyun  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
595*4882a593Smuzhiyun  * pte bit, in which case this function is trivial. Secondly, an architecture
596*4882a593Smuzhiyun  * may not have a spare pte bit, which requires a more complicated scheme,
597*4882a593Smuzhiyun  * described below.
598*4882a593Smuzhiyun  *
599*4882a593Smuzhiyun  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
600*4882a593Smuzhiyun  * special mapping (even if there are underlying and valid "struct pages").
601*4882a593Smuzhiyun  * COWed pages of a VM_PFNMAP are always normal.
602*4882a593Smuzhiyun  *
603*4882a593Smuzhiyun  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
604*4882a593Smuzhiyun  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
605*4882a593Smuzhiyun  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
606*4882a593Smuzhiyun  * mapping will always honor the rule
607*4882a593Smuzhiyun  *
608*4882a593Smuzhiyun  *	pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
609*4882a593Smuzhiyun  *
610*4882a593Smuzhiyun  * And for normal mappings this is false.
611*4882a593Smuzhiyun  *
612*4882a593Smuzhiyun  * This restricts such mappings to be a linear translation from virtual address
613*4882a593Smuzhiyun  * to pfn. To get around this restriction, we allow arbitrary mappings so long
614*4882a593Smuzhiyun  * as the vma is not a COW mapping; in that case, we know that all ptes are
615*4882a593Smuzhiyun  * special (because none can have been COWed).
616*4882a593Smuzhiyun  *
617*4882a593Smuzhiyun  *
618*4882a593Smuzhiyun  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
619*4882a593Smuzhiyun  *
620*4882a593Smuzhiyun  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
621*4882a593Smuzhiyun  * page" backing, however the difference is that _all_ pages with a struct
622*4882a593Smuzhiyun  * page (that is, those where pfn_valid is true) are refcounted and considered
623*4882a593Smuzhiyun  * normal pages by the VM. The disadvantage is that pages are refcounted
624*4882a593Smuzhiyun  * (which can be slower and simply not an option for some PFNMAP users). The
625*4882a593Smuzhiyun  * advantage is that we don't have to follow the strict linearity rule of
626*4882a593Smuzhiyun  * PFNMAP mappings in order to support COWable mappings.
627*4882a593Smuzhiyun  *
628*4882a593Smuzhiyun  */
_vm_normal_page(struct vm_area_struct * vma,unsigned long addr,pte_t pte,unsigned long vma_flags)629*4882a593Smuzhiyun struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
630*4882a593Smuzhiyun 			      pte_t pte, unsigned long vma_flags)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	unsigned long pfn = pte_pfn(pte);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL)) {
635*4882a593Smuzhiyun 		if (likely(!pte_special(pte)))
636*4882a593Smuzhiyun 			goto check_pfn;
637*4882a593Smuzhiyun 		if (vma->vm_ops && vma->vm_ops->find_special_page)
638*4882a593Smuzhiyun 			return vma->vm_ops->find_special_page(vma, addr);
639*4882a593Smuzhiyun 		if (vma_flags & (VM_PFNMAP | VM_MIXEDMAP))
640*4882a593Smuzhiyun 			return NULL;
641*4882a593Smuzhiyun 		if (is_zero_pfn(pfn))
642*4882a593Smuzhiyun 			return NULL;
643*4882a593Smuzhiyun 		if (pte_devmap(pte))
644*4882a593Smuzhiyun 			return NULL;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 		print_bad_pte(vma, addr, pte, NULL);
647*4882a593Smuzhiyun 		return NULL;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* !CONFIG_ARCH_HAS_PTE_SPECIAL case follows: */
651*4882a593Smuzhiyun 	/*
652*4882a593Smuzhiyun 	 * This part should never get called when CONFIG_SPECULATIVE_PAGE_FAULT
653*4882a593Smuzhiyun 	 * is set. This is mainly because we can't rely on vm_start.
654*4882a593Smuzhiyun 	 */
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	if (unlikely(vma_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
657*4882a593Smuzhiyun 		if (vma_flags & VM_MIXEDMAP) {
658*4882a593Smuzhiyun 			if (!pfn_valid(pfn))
659*4882a593Smuzhiyun 				return NULL;
660*4882a593Smuzhiyun 			goto out;
661*4882a593Smuzhiyun 		} else {
662*4882a593Smuzhiyun 			unsigned long off;
663*4882a593Smuzhiyun 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
664*4882a593Smuzhiyun 			if (pfn == vma->vm_pgoff + off)
665*4882a593Smuzhiyun 				return NULL;
666*4882a593Smuzhiyun 			if (!is_cow_mapping(vma_flags))
667*4882a593Smuzhiyun 				return NULL;
668*4882a593Smuzhiyun 		}
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (is_zero_pfn(pfn))
672*4882a593Smuzhiyun 		return NULL;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun check_pfn:
675*4882a593Smuzhiyun 	if (unlikely(pfn > highest_memmap_pfn)) {
676*4882a593Smuzhiyun 		print_bad_pte(vma, addr, pte, NULL);
677*4882a593Smuzhiyun 		return NULL;
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	/*
681*4882a593Smuzhiyun 	 * NOTE! We still have PageReserved() pages in the page tables.
682*4882a593Smuzhiyun 	 * eg. VDSO mappings can cause them to exist.
683*4882a593Smuzhiyun 	 */
684*4882a593Smuzhiyun out:
685*4882a593Smuzhiyun 	return pfn_to_page(pfn);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
vm_normal_page_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t pmd)689*4882a593Smuzhiyun struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
690*4882a593Smuzhiyun 				pmd_t pmd)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	unsigned long pfn = pmd_pfn(pmd);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	/*
695*4882a593Smuzhiyun 	 * There is no pmd_special() but there may be special pmds, e.g.
696*4882a593Smuzhiyun 	 * in a direct-access (dax) mapping, so let's just replicate the
697*4882a593Smuzhiyun 	 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
698*4882a593Smuzhiyun 	 */
699*4882a593Smuzhiyun 	if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
700*4882a593Smuzhiyun 		if (vma->vm_flags & VM_MIXEDMAP) {
701*4882a593Smuzhiyun 			if (!pfn_valid(pfn))
702*4882a593Smuzhiyun 				return NULL;
703*4882a593Smuzhiyun 			goto out;
704*4882a593Smuzhiyun 		} else {
705*4882a593Smuzhiyun 			unsigned long off;
706*4882a593Smuzhiyun 			off = (addr - vma->vm_start) >> PAGE_SHIFT;
707*4882a593Smuzhiyun 			if (pfn == vma->vm_pgoff + off)
708*4882a593Smuzhiyun 				return NULL;
709*4882a593Smuzhiyun 			if (!is_cow_mapping(vma->vm_flags))
710*4882a593Smuzhiyun 				return NULL;
711*4882a593Smuzhiyun 		}
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (pmd_devmap(pmd))
715*4882a593Smuzhiyun 		return NULL;
716*4882a593Smuzhiyun 	if (is_huge_zero_pmd(pmd))
717*4882a593Smuzhiyun 		return NULL;
718*4882a593Smuzhiyun 	if (unlikely(pfn > highest_memmap_pfn))
719*4882a593Smuzhiyun 		return NULL;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	/*
722*4882a593Smuzhiyun 	 * NOTE! We still have PageReserved() pages in the page tables.
723*4882a593Smuzhiyun 	 * eg. VDSO mappings can cause them to exist.
724*4882a593Smuzhiyun 	 */
725*4882a593Smuzhiyun out:
726*4882a593Smuzhiyun 	return pfn_to_page(pfn);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun #endif
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun  * copy one vm_area from one task to the other. Assumes the page tables
732*4882a593Smuzhiyun  * already present in the new task to be cleared in the whole range
733*4882a593Smuzhiyun  * covered by this vma.
734*4882a593Smuzhiyun  */
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun static unsigned long
copy_nonpresent_pte(struct mm_struct * dst_mm,struct mm_struct * src_mm,pte_t * dst_pte,pte_t * src_pte,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,unsigned long addr,int * rss)737*4882a593Smuzhiyun copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
738*4882a593Smuzhiyun 		pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *dst_vma,
739*4882a593Smuzhiyun 		struct vm_area_struct *src_vma, unsigned long addr, int *rss)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun 	unsigned long vm_flags = dst_vma->vm_flags;
742*4882a593Smuzhiyun 	pte_t pte = *src_pte;
743*4882a593Smuzhiyun 	struct page *page;
744*4882a593Smuzhiyun 	swp_entry_t entry = pte_to_swp_entry(pte);
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	if (likely(!non_swap_entry(entry))) {
747*4882a593Smuzhiyun 		if (swap_duplicate(entry) < 0)
748*4882a593Smuzhiyun 			return entry.val;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 		/* make sure dst_mm is on swapoff's mmlist. */
751*4882a593Smuzhiyun 		if (unlikely(list_empty(&dst_mm->mmlist))) {
752*4882a593Smuzhiyun 			spin_lock(&mmlist_lock);
753*4882a593Smuzhiyun 			if (list_empty(&dst_mm->mmlist))
754*4882a593Smuzhiyun 				list_add(&dst_mm->mmlist,
755*4882a593Smuzhiyun 						&src_mm->mmlist);
756*4882a593Smuzhiyun 			spin_unlock(&mmlist_lock);
757*4882a593Smuzhiyun 		}
758*4882a593Smuzhiyun 		rss[MM_SWAPENTS]++;
759*4882a593Smuzhiyun 	} else if (is_migration_entry(entry)) {
760*4882a593Smuzhiyun 		page = migration_entry_to_page(entry);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 		rss[mm_counter(page)]++;
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		if (is_write_migration_entry(entry) &&
765*4882a593Smuzhiyun 				is_cow_mapping(vm_flags)) {
766*4882a593Smuzhiyun 			/*
767*4882a593Smuzhiyun 			 * COW mappings require pages in both
768*4882a593Smuzhiyun 			 * parent and child to be set to read.
769*4882a593Smuzhiyun 			 */
770*4882a593Smuzhiyun 			make_migration_entry_read(&entry);
771*4882a593Smuzhiyun 			pte = swp_entry_to_pte(entry);
772*4882a593Smuzhiyun 			if (pte_swp_soft_dirty(*src_pte))
773*4882a593Smuzhiyun 				pte = pte_swp_mksoft_dirty(pte);
774*4882a593Smuzhiyun 			if (pte_swp_uffd_wp(*src_pte))
775*4882a593Smuzhiyun 				pte = pte_swp_mkuffd_wp(pte);
776*4882a593Smuzhiyun 			set_pte_at(src_mm, addr, src_pte, pte);
777*4882a593Smuzhiyun 		}
778*4882a593Smuzhiyun 	} else if (is_device_private_entry(entry)) {
779*4882a593Smuzhiyun 		page = device_private_entry_to_page(entry);
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 		/*
782*4882a593Smuzhiyun 		 * Update rss count even for unaddressable pages, as
783*4882a593Smuzhiyun 		 * they should treated just like normal pages in this
784*4882a593Smuzhiyun 		 * respect.
785*4882a593Smuzhiyun 		 *
786*4882a593Smuzhiyun 		 * We will likely want to have some new rss counters
787*4882a593Smuzhiyun 		 * for unaddressable pages, at some point. But for now
788*4882a593Smuzhiyun 		 * keep things as they are.
789*4882a593Smuzhiyun 		 */
790*4882a593Smuzhiyun 		get_page(page);
791*4882a593Smuzhiyun 		rss[mm_counter(page)]++;
792*4882a593Smuzhiyun 		page_dup_rmap(page, false);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 		/*
795*4882a593Smuzhiyun 		 * We do not preserve soft-dirty information, because so
796*4882a593Smuzhiyun 		 * far, checkpoint/restore is the only feature that
797*4882a593Smuzhiyun 		 * requires that. And checkpoint/restore does not work
798*4882a593Smuzhiyun 		 * when a device driver is involved (you cannot easily
799*4882a593Smuzhiyun 		 * save and restore device driver state).
800*4882a593Smuzhiyun 		 */
801*4882a593Smuzhiyun 		if (is_write_device_private_entry(entry) &&
802*4882a593Smuzhiyun 		    is_cow_mapping(vm_flags)) {
803*4882a593Smuzhiyun 			make_device_private_entry_read(&entry);
804*4882a593Smuzhiyun 			pte = swp_entry_to_pte(entry);
805*4882a593Smuzhiyun 			if (pte_swp_uffd_wp(*src_pte))
806*4882a593Smuzhiyun 				pte = pte_swp_mkuffd_wp(pte);
807*4882a593Smuzhiyun 			set_pte_at(src_mm, addr, src_pte, pte);
808*4882a593Smuzhiyun 		}
809*4882a593Smuzhiyun 	}
810*4882a593Smuzhiyun 	if (!userfaultfd_wp(dst_vma))
811*4882a593Smuzhiyun 		pte = pte_swp_clear_uffd_wp(pte);
812*4882a593Smuzhiyun 	set_pte_at(dst_mm, addr, dst_pte, pte);
813*4882a593Smuzhiyun 	return 0;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun  * Copy a present and normal page if necessary.
818*4882a593Smuzhiyun  *
819*4882a593Smuzhiyun  * NOTE! The usual case is that this doesn't need to do
820*4882a593Smuzhiyun  * anything, and can just return a positive value. That
821*4882a593Smuzhiyun  * will let the caller know that it can just increase
822*4882a593Smuzhiyun  * the page refcount and re-use the pte the traditional
823*4882a593Smuzhiyun  * way.
824*4882a593Smuzhiyun  *
825*4882a593Smuzhiyun  * But _if_ we need to copy it because it needs to be
826*4882a593Smuzhiyun  * pinned in the parent (and the child should get its own
827*4882a593Smuzhiyun  * copy rather than just a reference to the same page),
828*4882a593Smuzhiyun  * we'll do that here and return zero to let the caller
829*4882a593Smuzhiyun  * know we're done.
830*4882a593Smuzhiyun  *
831*4882a593Smuzhiyun  * And if we need a pre-allocated page but don't yet have
832*4882a593Smuzhiyun  * one, return a negative error to let the preallocation
833*4882a593Smuzhiyun  * code know so that it can do so outside the page table
834*4882a593Smuzhiyun  * lock.
835*4882a593Smuzhiyun  */
836*4882a593Smuzhiyun static inline int
copy_present_page(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct page ** prealloc,pte_t pte,struct page * page)837*4882a593Smuzhiyun copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
838*4882a593Smuzhiyun 		  pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
839*4882a593Smuzhiyun 		  struct page **prealloc, pte_t pte, struct page *page)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	struct mm_struct *src_mm = src_vma->vm_mm;
842*4882a593Smuzhiyun 	struct page *new_page;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	if (!is_cow_mapping(src_vma->vm_flags))
845*4882a593Smuzhiyun 		return 1;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/*
848*4882a593Smuzhiyun 	 * What we want to do is to check whether this page may
849*4882a593Smuzhiyun 	 * have been pinned by the parent process.  If so,
850*4882a593Smuzhiyun 	 * instead of wrprotect the pte on both sides, we copy
851*4882a593Smuzhiyun 	 * the page immediately so that we'll always guarantee
852*4882a593Smuzhiyun 	 * the pinned page won't be randomly replaced in the
853*4882a593Smuzhiyun 	 * future.
854*4882a593Smuzhiyun 	 *
855*4882a593Smuzhiyun 	 * The page pinning checks are just "has this mm ever
856*4882a593Smuzhiyun 	 * seen pinning", along with the (inexact) check of
857*4882a593Smuzhiyun 	 * the page count. That might give false positives for
858*4882a593Smuzhiyun 	 * for pinning, but it will work correctly.
859*4882a593Smuzhiyun 	 */
860*4882a593Smuzhiyun 	if (likely(!atomic_read(&src_mm->has_pinned)))
861*4882a593Smuzhiyun 		return 1;
862*4882a593Smuzhiyun 	if (likely(!page_maybe_dma_pinned(page)))
863*4882a593Smuzhiyun 		return 1;
864*4882a593Smuzhiyun 
865*4882a593Smuzhiyun 	/*
866*4882a593Smuzhiyun 	 * The vma->anon_vma of the child process may be NULL
867*4882a593Smuzhiyun 	 * because the entire vma does not contain anonymous pages.
868*4882a593Smuzhiyun 	 * A BUG will occur when the copy_present_page() passes
869*4882a593Smuzhiyun 	 * a copy of a non-anonymous page of that vma to the
870*4882a593Smuzhiyun 	 * page_add_new_anon_rmap() to set up new anonymous rmap.
871*4882a593Smuzhiyun 	 * Return 1 if the page is not an anonymous page.
872*4882a593Smuzhiyun 	 */
873*4882a593Smuzhiyun 	if (!PageAnon(page))
874*4882a593Smuzhiyun 		return 1;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	new_page = *prealloc;
877*4882a593Smuzhiyun 	if (!new_page)
878*4882a593Smuzhiyun 		return -EAGAIN;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	/*
881*4882a593Smuzhiyun 	 * We have a prealloc page, all good!  Take it
882*4882a593Smuzhiyun 	 * over and copy the page & arm it.
883*4882a593Smuzhiyun 	 */
884*4882a593Smuzhiyun 	*prealloc = NULL;
885*4882a593Smuzhiyun 	copy_user_highpage(new_page, page, addr, src_vma);
886*4882a593Smuzhiyun 	__SetPageUptodate(new_page);
887*4882a593Smuzhiyun 	page_add_new_anon_rmap(new_page, dst_vma, addr, false);
888*4882a593Smuzhiyun 	lru_cache_add_inactive_or_unevictable(new_page, dst_vma);
889*4882a593Smuzhiyun 	rss[mm_counter(new_page)]++;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	/* All done, just insert the new page copy in the child */
892*4882a593Smuzhiyun 	pte = mk_pte(new_page, dst_vma->vm_page_prot);
893*4882a593Smuzhiyun 	pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma->vm_flags);
894*4882a593Smuzhiyun 	if (userfaultfd_pte_wp(dst_vma, *src_pte))
895*4882a593Smuzhiyun 		/* Uffd-wp needs to be delivered to dest pte as well */
896*4882a593Smuzhiyun 		pte = pte_wrprotect(pte_mkuffd_wp(pte));
897*4882a593Smuzhiyun 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
898*4882a593Smuzhiyun 	return 0;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun  * Copy one pte.  Returns 0 if succeeded, or -EAGAIN if one preallocated page
903*4882a593Smuzhiyun  * is required to copy this pte.
904*4882a593Smuzhiyun  */
905*4882a593Smuzhiyun static inline int
copy_present_pte(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pte_t * dst_pte,pte_t * src_pte,unsigned long addr,int * rss,struct page ** prealloc)906*4882a593Smuzhiyun copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
907*4882a593Smuzhiyun 		 pte_t *dst_pte, pte_t *src_pte, unsigned long addr, int *rss,
908*4882a593Smuzhiyun 		 struct page **prealloc)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun 	struct mm_struct *src_mm = src_vma->vm_mm;
911*4882a593Smuzhiyun 	unsigned long vm_flags = src_vma->vm_flags;
912*4882a593Smuzhiyun 	pte_t pte = *src_pte;
913*4882a593Smuzhiyun 	struct page *page;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	page = vm_normal_page(src_vma, addr, pte);
916*4882a593Smuzhiyun 	if (page) {
917*4882a593Smuzhiyun 		int retval;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 		retval = copy_present_page(dst_vma, src_vma, dst_pte, src_pte,
920*4882a593Smuzhiyun 					   addr, rss, prealloc, pte, page);
921*4882a593Smuzhiyun 		if (retval <= 0)
922*4882a593Smuzhiyun 			return retval;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 		get_page(page);
925*4882a593Smuzhiyun 		page_dup_rmap(page, false);
926*4882a593Smuzhiyun 		rss[mm_counter(page)]++;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	/*
930*4882a593Smuzhiyun 	 * If it's a COW mapping, write protect it both
931*4882a593Smuzhiyun 	 * in the parent and the child
932*4882a593Smuzhiyun 	 */
933*4882a593Smuzhiyun 	if (is_cow_mapping(vm_flags) && pte_write(pte)) {
934*4882a593Smuzhiyun 		ptep_set_wrprotect(src_mm, addr, src_pte);
935*4882a593Smuzhiyun 		pte = pte_wrprotect(pte);
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	/*
939*4882a593Smuzhiyun 	 * If it's a shared mapping, mark it clean in
940*4882a593Smuzhiyun 	 * the child
941*4882a593Smuzhiyun 	 */
942*4882a593Smuzhiyun 	if (vm_flags & VM_SHARED)
943*4882a593Smuzhiyun 		pte = pte_mkclean(pte);
944*4882a593Smuzhiyun 	pte = pte_mkold(pte);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	if (!userfaultfd_wp(dst_vma))
947*4882a593Smuzhiyun 		pte = pte_clear_uffd_wp(pte);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
950*4882a593Smuzhiyun 	return 0;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun static inline struct page *
page_copy_prealloc(struct mm_struct * src_mm,struct vm_area_struct * vma,unsigned long addr)954*4882a593Smuzhiyun page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma,
955*4882a593Smuzhiyun 		   unsigned long addr)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun 	struct page *new_page;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr);
960*4882a593Smuzhiyun 	if (!new_page)
961*4882a593Smuzhiyun 		return NULL;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 	if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) {
964*4882a593Smuzhiyun 		put_page(new_page);
965*4882a593Smuzhiyun 		return NULL;
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	return new_page;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun static int
copy_pte_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pmd_t * dst_pmd,pmd_t * src_pmd,unsigned long addr,unsigned long end)973*4882a593Smuzhiyun copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
974*4882a593Smuzhiyun 	       pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
975*4882a593Smuzhiyun 	       unsigned long end)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun 	struct mm_struct *dst_mm = dst_vma->vm_mm;
978*4882a593Smuzhiyun 	struct mm_struct *src_mm = src_vma->vm_mm;
979*4882a593Smuzhiyun 	pte_t *orig_src_pte, *orig_dst_pte;
980*4882a593Smuzhiyun 	pte_t *src_pte, *dst_pte;
981*4882a593Smuzhiyun 	spinlock_t *src_ptl, *dst_ptl;
982*4882a593Smuzhiyun 	int progress, ret = 0;
983*4882a593Smuzhiyun 	int rss[NR_MM_COUNTERS];
984*4882a593Smuzhiyun 	swp_entry_t entry = (swp_entry_t){0};
985*4882a593Smuzhiyun 	struct page *prealloc = NULL;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun again:
988*4882a593Smuzhiyun 	progress = 0;
989*4882a593Smuzhiyun 	init_rss_vec(rss);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
992*4882a593Smuzhiyun 	if (!dst_pte) {
993*4882a593Smuzhiyun 		ret = -ENOMEM;
994*4882a593Smuzhiyun 		goto out;
995*4882a593Smuzhiyun 	}
996*4882a593Smuzhiyun 	src_pte = pte_offset_map(src_pmd, addr);
997*4882a593Smuzhiyun 	src_ptl = pte_lockptr(src_mm, src_pmd);
998*4882a593Smuzhiyun 	spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
999*4882a593Smuzhiyun 	orig_src_pte = src_pte;
1000*4882a593Smuzhiyun 	orig_dst_pte = dst_pte;
1001*4882a593Smuzhiyun 	arch_enter_lazy_mmu_mode();
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	do {
1004*4882a593Smuzhiyun 		/*
1005*4882a593Smuzhiyun 		 * We are holding two locks at this point - either of them
1006*4882a593Smuzhiyun 		 * could generate latencies in another task on another CPU.
1007*4882a593Smuzhiyun 		 */
1008*4882a593Smuzhiyun 		if (progress >= 32) {
1009*4882a593Smuzhiyun 			progress = 0;
1010*4882a593Smuzhiyun 			if (need_resched() ||
1011*4882a593Smuzhiyun 			    spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
1012*4882a593Smuzhiyun 				break;
1013*4882a593Smuzhiyun 		}
1014*4882a593Smuzhiyun 		if (pte_none(*src_pte)) {
1015*4882a593Smuzhiyun 			progress++;
1016*4882a593Smuzhiyun 			continue;
1017*4882a593Smuzhiyun 		}
1018*4882a593Smuzhiyun 		if (unlikely(!pte_present(*src_pte))) {
1019*4882a593Smuzhiyun 			entry.val = copy_nonpresent_pte(dst_mm, src_mm,
1020*4882a593Smuzhiyun 							dst_pte, src_pte,
1021*4882a593Smuzhiyun 							dst_vma, src_vma,
1022*4882a593Smuzhiyun 							addr, rss);
1023*4882a593Smuzhiyun 			if (entry.val)
1024*4882a593Smuzhiyun 				break;
1025*4882a593Smuzhiyun 			progress += 8;
1026*4882a593Smuzhiyun 			continue;
1027*4882a593Smuzhiyun 		}
1028*4882a593Smuzhiyun 		/* copy_present_pte() will clear `*prealloc' if consumed */
1029*4882a593Smuzhiyun 		ret = copy_present_pte(dst_vma, src_vma, dst_pte, src_pte,
1030*4882a593Smuzhiyun 				       addr, rss, &prealloc);
1031*4882a593Smuzhiyun 		/*
1032*4882a593Smuzhiyun 		 * If we need a pre-allocated page for this pte, drop the
1033*4882a593Smuzhiyun 		 * locks, allocate, and try again.
1034*4882a593Smuzhiyun 		 */
1035*4882a593Smuzhiyun 		if (unlikely(ret == -EAGAIN))
1036*4882a593Smuzhiyun 			break;
1037*4882a593Smuzhiyun 		if (unlikely(prealloc)) {
1038*4882a593Smuzhiyun 			/*
1039*4882a593Smuzhiyun 			 * pre-alloc page cannot be reused by next time so as
1040*4882a593Smuzhiyun 			 * to strictly follow mempolicy (e.g., alloc_page_vma()
1041*4882a593Smuzhiyun 			 * will allocate page according to address).  This
1042*4882a593Smuzhiyun 			 * could only happen if one pinned pte changed.
1043*4882a593Smuzhiyun 			 */
1044*4882a593Smuzhiyun 			put_page(prealloc);
1045*4882a593Smuzhiyun 			prealloc = NULL;
1046*4882a593Smuzhiyun 		}
1047*4882a593Smuzhiyun 		progress += 8;
1048*4882a593Smuzhiyun 	} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	arch_leave_lazy_mmu_mode();
1051*4882a593Smuzhiyun 	spin_unlock(src_ptl);
1052*4882a593Smuzhiyun 	pte_unmap(orig_src_pte);
1053*4882a593Smuzhiyun 	add_mm_rss_vec(dst_mm, rss);
1054*4882a593Smuzhiyun 	pte_unmap_unlock(orig_dst_pte, dst_ptl);
1055*4882a593Smuzhiyun 	cond_resched();
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	if (entry.val) {
1058*4882a593Smuzhiyun 		if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) {
1059*4882a593Smuzhiyun 			ret = -ENOMEM;
1060*4882a593Smuzhiyun 			goto out;
1061*4882a593Smuzhiyun 		}
1062*4882a593Smuzhiyun 		entry.val = 0;
1063*4882a593Smuzhiyun 	} else if (ret) {
1064*4882a593Smuzhiyun 		WARN_ON_ONCE(ret != -EAGAIN);
1065*4882a593Smuzhiyun 		prealloc = page_copy_prealloc(src_mm, src_vma, addr);
1066*4882a593Smuzhiyun 		if (!prealloc)
1067*4882a593Smuzhiyun 			return -ENOMEM;
1068*4882a593Smuzhiyun 		/* We've captured and resolved the error. Reset, try again. */
1069*4882a593Smuzhiyun 		ret = 0;
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 	if (addr != end)
1072*4882a593Smuzhiyun 		goto again;
1073*4882a593Smuzhiyun out:
1074*4882a593Smuzhiyun 	if (unlikely(prealloc))
1075*4882a593Smuzhiyun 		put_page(prealloc);
1076*4882a593Smuzhiyun 	return ret;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun static inline int
copy_pmd_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pud_t * dst_pud,pud_t * src_pud,unsigned long addr,unsigned long end)1080*4882a593Smuzhiyun copy_pmd_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1081*4882a593Smuzhiyun 	       pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
1082*4882a593Smuzhiyun 	       unsigned long end)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1085*4882a593Smuzhiyun 	struct mm_struct *src_mm = src_vma->vm_mm;
1086*4882a593Smuzhiyun 	pmd_t *src_pmd, *dst_pmd;
1087*4882a593Smuzhiyun 	unsigned long next;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
1090*4882a593Smuzhiyun 	if (!dst_pmd)
1091*4882a593Smuzhiyun 		return -ENOMEM;
1092*4882a593Smuzhiyun 	src_pmd = pmd_offset(src_pud, addr);
1093*4882a593Smuzhiyun 	do {
1094*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
1095*4882a593Smuzhiyun 		if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1096*4882a593Smuzhiyun 			|| pmd_devmap(*src_pmd)) {
1097*4882a593Smuzhiyun 			int err;
1098*4882a593Smuzhiyun 			VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, src_vma);
1099*4882a593Smuzhiyun 			err = copy_huge_pmd(dst_mm, src_mm, dst_pmd, src_pmd,
1100*4882a593Smuzhiyun 					    addr, dst_vma, src_vma);
1101*4882a593Smuzhiyun 			if (err == -ENOMEM)
1102*4882a593Smuzhiyun 				return -ENOMEM;
1103*4882a593Smuzhiyun 			if (!err)
1104*4882a593Smuzhiyun 				continue;
1105*4882a593Smuzhiyun 			/* fall through */
1106*4882a593Smuzhiyun 		}
1107*4882a593Smuzhiyun 		if (pmd_none_or_clear_bad(src_pmd))
1108*4882a593Smuzhiyun 			continue;
1109*4882a593Smuzhiyun 		if (copy_pte_range(dst_vma, src_vma, dst_pmd, src_pmd,
1110*4882a593Smuzhiyun 				   addr, next))
1111*4882a593Smuzhiyun 			return -ENOMEM;
1112*4882a593Smuzhiyun 	} while (dst_pmd++, src_pmd++, addr = next, addr != end);
1113*4882a593Smuzhiyun 	return 0;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun static inline int
copy_pud_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,p4d_t * dst_p4d,p4d_t * src_p4d,unsigned long addr,unsigned long end)1117*4882a593Smuzhiyun copy_pud_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1118*4882a593Smuzhiyun 	       p4d_t *dst_p4d, p4d_t *src_p4d, unsigned long addr,
1119*4882a593Smuzhiyun 	       unsigned long end)
1120*4882a593Smuzhiyun {
1121*4882a593Smuzhiyun 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1122*4882a593Smuzhiyun 	struct mm_struct *src_mm = src_vma->vm_mm;
1123*4882a593Smuzhiyun 	pud_t *src_pud, *dst_pud;
1124*4882a593Smuzhiyun 	unsigned long next;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	dst_pud = pud_alloc(dst_mm, dst_p4d, addr);
1127*4882a593Smuzhiyun 	if (!dst_pud)
1128*4882a593Smuzhiyun 		return -ENOMEM;
1129*4882a593Smuzhiyun 	src_pud = pud_offset(src_p4d, addr);
1130*4882a593Smuzhiyun 	do {
1131*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
1132*4882a593Smuzhiyun 		if (pud_trans_huge(*src_pud) || pud_devmap(*src_pud)) {
1133*4882a593Smuzhiyun 			int err;
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 			VM_BUG_ON_VMA(next-addr != HPAGE_PUD_SIZE, src_vma);
1136*4882a593Smuzhiyun 			err = copy_huge_pud(dst_mm, src_mm,
1137*4882a593Smuzhiyun 					    dst_pud, src_pud, addr, src_vma);
1138*4882a593Smuzhiyun 			if (err == -ENOMEM)
1139*4882a593Smuzhiyun 				return -ENOMEM;
1140*4882a593Smuzhiyun 			if (!err)
1141*4882a593Smuzhiyun 				continue;
1142*4882a593Smuzhiyun 			/* fall through */
1143*4882a593Smuzhiyun 		}
1144*4882a593Smuzhiyun 		if (pud_none_or_clear_bad(src_pud))
1145*4882a593Smuzhiyun 			continue;
1146*4882a593Smuzhiyun 		if (copy_pmd_range(dst_vma, src_vma, dst_pud, src_pud,
1147*4882a593Smuzhiyun 				   addr, next))
1148*4882a593Smuzhiyun 			return -ENOMEM;
1149*4882a593Smuzhiyun 	} while (dst_pud++, src_pud++, addr = next, addr != end);
1150*4882a593Smuzhiyun 	return 0;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun static inline int
copy_p4d_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma,pgd_t * dst_pgd,pgd_t * src_pgd,unsigned long addr,unsigned long end)1154*4882a593Smuzhiyun copy_p4d_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma,
1155*4882a593Smuzhiyun 	       pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long addr,
1156*4882a593Smuzhiyun 	       unsigned long end)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1159*4882a593Smuzhiyun 	p4d_t *src_p4d, *dst_p4d;
1160*4882a593Smuzhiyun 	unsigned long next;
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	dst_p4d = p4d_alloc(dst_mm, dst_pgd, addr);
1163*4882a593Smuzhiyun 	if (!dst_p4d)
1164*4882a593Smuzhiyun 		return -ENOMEM;
1165*4882a593Smuzhiyun 	src_p4d = p4d_offset(src_pgd, addr);
1166*4882a593Smuzhiyun 	do {
1167*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
1168*4882a593Smuzhiyun 		if (p4d_none_or_clear_bad(src_p4d))
1169*4882a593Smuzhiyun 			continue;
1170*4882a593Smuzhiyun 		if (copy_pud_range(dst_vma, src_vma, dst_p4d, src_p4d,
1171*4882a593Smuzhiyun 				   addr, next))
1172*4882a593Smuzhiyun 			return -ENOMEM;
1173*4882a593Smuzhiyun 	} while (dst_p4d++, src_p4d++, addr = next, addr != end);
1174*4882a593Smuzhiyun 	return 0;
1175*4882a593Smuzhiyun }
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun int
copy_page_range(struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)1178*4882a593Smuzhiyun copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
1179*4882a593Smuzhiyun {
1180*4882a593Smuzhiyun 	pgd_t *src_pgd, *dst_pgd;
1181*4882a593Smuzhiyun 	unsigned long next;
1182*4882a593Smuzhiyun 	unsigned long addr = src_vma->vm_start;
1183*4882a593Smuzhiyun 	unsigned long end = src_vma->vm_end;
1184*4882a593Smuzhiyun 	struct mm_struct *dst_mm = dst_vma->vm_mm;
1185*4882a593Smuzhiyun 	struct mm_struct *src_mm = src_vma->vm_mm;
1186*4882a593Smuzhiyun 	struct mmu_notifier_range range;
1187*4882a593Smuzhiyun 	bool is_cow;
1188*4882a593Smuzhiyun 	int ret;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/*
1191*4882a593Smuzhiyun 	 * Don't copy ptes where a page fault will fill them correctly.
1192*4882a593Smuzhiyun 	 * Fork becomes much lighter when there are big shared or private
1193*4882a593Smuzhiyun 	 * readonly mappings. The tradeoff is that copy_page_range is more
1194*4882a593Smuzhiyun 	 * efficient than faulting.
1195*4882a593Smuzhiyun 	 */
1196*4882a593Smuzhiyun 	if (!(src_vma->vm_flags & (VM_HUGETLB | VM_PFNMAP | VM_MIXEDMAP)) &&
1197*4882a593Smuzhiyun 	    !src_vma->anon_vma)
1198*4882a593Smuzhiyun 		return 0;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	if (is_vm_hugetlb_page(src_vma))
1201*4882a593Smuzhiyun 		return copy_hugetlb_page_range(dst_mm, src_mm, src_vma);
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	if (unlikely(src_vma->vm_flags & VM_PFNMAP)) {
1204*4882a593Smuzhiyun 		/*
1205*4882a593Smuzhiyun 		 * We do not free on error cases below as remove_vma
1206*4882a593Smuzhiyun 		 * gets called on error from higher level routine
1207*4882a593Smuzhiyun 		 */
1208*4882a593Smuzhiyun 		ret = track_pfn_copy(src_vma);
1209*4882a593Smuzhiyun 		if (ret)
1210*4882a593Smuzhiyun 			return ret;
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	/*
1214*4882a593Smuzhiyun 	 * We need to invalidate the secondary MMU mappings only when
1215*4882a593Smuzhiyun 	 * there could be a permission downgrade on the ptes of the
1216*4882a593Smuzhiyun 	 * parent mm. And a permission downgrade will only happen if
1217*4882a593Smuzhiyun 	 * is_cow_mapping() returns true.
1218*4882a593Smuzhiyun 	 */
1219*4882a593Smuzhiyun 	is_cow = is_cow_mapping(src_vma->vm_flags);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	if (is_cow) {
1222*4882a593Smuzhiyun 		mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
1223*4882a593Smuzhiyun 					0, src_vma, src_mm, addr, end);
1224*4882a593Smuzhiyun 		mmu_notifier_invalidate_range_start(&range);
1225*4882a593Smuzhiyun 		/*
1226*4882a593Smuzhiyun 		 * Disabling preemption is not needed for the write side, as
1227*4882a593Smuzhiyun 		 * the read side doesn't spin, but goes to the mmap_lock.
1228*4882a593Smuzhiyun 		 *
1229*4882a593Smuzhiyun 		 * Use the raw variant of the seqcount_t write API to avoid
1230*4882a593Smuzhiyun 		 * lockdep complaining about preemptibility.
1231*4882a593Smuzhiyun 		 */
1232*4882a593Smuzhiyun 		mmap_assert_write_locked(src_mm);
1233*4882a593Smuzhiyun 		raw_write_seqcount_begin(&src_mm->write_protect_seq);
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	ret = 0;
1237*4882a593Smuzhiyun 	dst_pgd = pgd_offset(dst_mm, addr);
1238*4882a593Smuzhiyun 	src_pgd = pgd_offset(src_mm, addr);
1239*4882a593Smuzhiyun 	do {
1240*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
1241*4882a593Smuzhiyun 		if (pgd_none_or_clear_bad(src_pgd))
1242*4882a593Smuzhiyun 			continue;
1243*4882a593Smuzhiyun 		if (unlikely(copy_p4d_range(dst_vma, src_vma, dst_pgd, src_pgd,
1244*4882a593Smuzhiyun 					    addr, next))) {
1245*4882a593Smuzhiyun 			ret = -ENOMEM;
1246*4882a593Smuzhiyun 			break;
1247*4882a593Smuzhiyun 		}
1248*4882a593Smuzhiyun 	} while (dst_pgd++, src_pgd++, addr = next, addr != end);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	if (is_cow) {
1251*4882a593Smuzhiyun 		raw_write_seqcount_end(&src_mm->write_protect_seq);
1252*4882a593Smuzhiyun 		mmu_notifier_invalidate_range_end(&range);
1253*4882a593Smuzhiyun 	}
1254*4882a593Smuzhiyun 	return ret;
1255*4882a593Smuzhiyun }
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun /* Whether we should zap all COWed (private) pages too */
should_zap_cows(struct zap_details * details)1258*4882a593Smuzhiyun static inline bool should_zap_cows(struct zap_details *details)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun 	/* By default, zap all pages */
1261*4882a593Smuzhiyun 	if (!details)
1262*4882a593Smuzhiyun 		return true;
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	/* Or, we zap COWed pages only if the caller wants to */
1265*4882a593Smuzhiyun 	return !details->check_mapping;
1266*4882a593Smuzhiyun }
1267*4882a593Smuzhiyun 
zap_pte_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,struct zap_details * details)1268*4882a593Smuzhiyun static unsigned long zap_pte_range(struct mmu_gather *tlb,
1269*4882a593Smuzhiyun 				struct vm_area_struct *vma, pmd_t *pmd,
1270*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
1271*4882a593Smuzhiyun 				struct zap_details *details)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun 	struct mm_struct *mm = tlb->mm;
1274*4882a593Smuzhiyun 	int force_flush = 0;
1275*4882a593Smuzhiyun 	int rss[NR_MM_COUNTERS];
1276*4882a593Smuzhiyun 	spinlock_t *ptl;
1277*4882a593Smuzhiyun 	pte_t *start_pte;
1278*4882a593Smuzhiyun 	pte_t *pte;
1279*4882a593Smuzhiyun 	swp_entry_t entry;
1280*4882a593Smuzhiyun 
1281*4882a593Smuzhiyun 	tlb_change_page_size(tlb, PAGE_SIZE);
1282*4882a593Smuzhiyun again:
1283*4882a593Smuzhiyun 	init_rss_vec(rss);
1284*4882a593Smuzhiyun 	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1285*4882a593Smuzhiyun 	pte = start_pte;
1286*4882a593Smuzhiyun 	flush_tlb_batched_pending(mm);
1287*4882a593Smuzhiyun 	arch_enter_lazy_mmu_mode();
1288*4882a593Smuzhiyun 	do {
1289*4882a593Smuzhiyun 		pte_t ptent = *pte;
1290*4882a593Smuzhiyun 		if (pte_none(ptent))
1291*4882a593Smuzhiyun 			continue;
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 		if (need_resched())
1294*4882a593Smuzhiyun 			break;
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 		if (pte_present(ptent)) {
1297*4882a593Smuzhiyun 			struct page *page;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 			page = vm_normal_page(vma, addr, ptent);
1300*4882a593Smuzhiyun 			if (unlikely(details) && page) {
1301*4882a593Smuzhiyun 				/*
1302*4882a593Smuzhiyun 				 * unmap_shared_mapping_pages() wants to
1303*4882a593Smuzhiyun 				 * invalidate cache without truncating:
1304*4882a593Smuzhiyun 				 * unmap shared but keep private pages.
1305*4882a593Smuzhiyun 				 */
1306*4882a593Smuzhiyun 				if (details->check_mapping &&
1307*4882a593Smuzhiyun 				    details->check_mapping != page_rmapping(page))
1308*4882a593Smuzhiyun 					continue;
1309*4882a593Smuzhiyun 			}
1310*4882a593Smuzhiyun 			ptent = ptep_get_and_clear_full(mm, addr, pte,
1311*4882a593Smuzhiyun 							tlb->fullmm);
1312*4882a593Smuzhiyun 			tlb_remove_tlb_entry(tlb, pte, addr);
1313*4882a593Smuzhiyun 			if (unlikely(!page))
1314*4882a593Smuzhiyun 				continue;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 			if (!PageAnon(page)) {
1317*4882a593Smuzhiyun 				if (pte_dirty(ptent)) {
1318*4882a593Smuzhiyun 					force_flush = 1;
1319*4882a593Smuzhiyun 					set_page_dirty(page);
1320*4882a593Smuzhiyun 				}
1321*4882a593Smuzhiyun 				if (pte_young(ptent) &&
1322*4882a593Smuzhiyun 				    likely(!(vma->vm_flags & VM_SEQ_READ)))
1323*4882a593Smuzhiyun 					mark_page_accessed(page);
1324*4882a593Smuzhiyun 			}
1325*4882a593Smuzhiyun 			rss[mm_counter(page)]--;
1326*4882a593Smuzhiyun 			page_remove_rmap(page, false);
1327*4882a593Smuzhiyun 			if (unlikely(page_mapcount(page) < 0))
1328*4882a593Smuzhiyun 				print_bad_pte(vma, addr, ptent, page);
1329*4882a593Smuzhiyun 			if (unlikely(__tlb_remove_page(tlb, page)) ||
1330*4882a593Smuzhiyun 				     lru_cache_disabled()) {
1331*4882a593Smuzhiyun 				force_flush = 1;
1332*4882a593Smuzhiyun 				addr += PAGE_SIZE;
1333*4882a593Smuzhiyun 				break;
1334*4882a593Smuzhiyun 			}
1335*4882a593Smuzhiyun 			continue;
1336*4882a593Smuzhiyun 		}
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 		entry = pte_to_swp_entry(ptent);
1339*4882a593Smuzhiyun 		if (is_device_private_entry(entry)) {
1340*4882a593Smuzhiyun 			struct page *page = device_private_entry_to_page(entry);
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 			if (unlikely(details && details->check_mapping)) {
1343*4882a593Smuzhiyun 				/*
1344*4882a593Smuzhiyun 				 * unmap_shared_mapping_pages() wants to
1345*4882a593Smuzhiyun 				 * invalidate cache without truncating:
1346*4882a593Smuzhiyun 				 * unmap shared but keep private pages.
1347*4882a593Smuzhiyun 				 */
1348*4882a593Smuzhiyun 				if (details->check_mapping !=
1349*4882a593Smuzhiyun 				    page_rmapping(page))
1350*4882a593Smuzhiyun 					continue;
1351*4882a593Smuzhiyun 			}
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 			pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1354*4882a593Smuzhiyun 			rss[mm_counter(page)]--;
1355*4882a593Smuzhiyun 			page_remove_rmap(page, false);
1356*4882a593Smuzhiyun 			put_page(page);
1357*4882a593Smuzhiyun 			continue;
1358*4882a593Smuzhiyun 		}
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 		if (!non_swap_entry(entry)) {
1361*4882a593Smuzhiyun 			/* Genuine swap entry, hence a private anon page */
1362*4882a593Smuzhiyun 			if (!should_zap_cows(details))
1363*4882a593Smuzhiyun 				continue;
1364*4882a593Smuzhiyun 			rss[MM_SWAPENTS]--;
1365*4882a593Smuzhiyun 		} else if (is_migration_entry(entry)) {
1366*4882a593Smuzhiyun 			struct page *page;
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 			page = migration_entry_to_page(entry);
1369*4882a593Smuzhiyun 			if (details && details->check_mapping &&
1370*4882a593Smuzhiyun 			    details->check_mapping != page_rmapping(page))
1371*4882a593Smuzhiyun 				continue;
1372*4882a593Smuzhiyun 			rss[mm_counter(page)]--;
1373*4882a593Smuzhiyun 		}
1374*4882a593Smuzhiyun 		if (unlikely(!free_swap_and_cache(entry)))
1375*4882a593Smuzhiyun 			print_bad_pte(vma, addr, ptent, NULL);
1376*4882a593Smuzhiyun 		pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
1377*4882a593Smuzhiyun 	} while (pte++, addr += PAGE_SIZE, addr != end);
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	add_mm_rss_vec(mm, rss);
1380*4882a593Smuzhiyun 	arch_leave_lazy_mmu_mode();
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	/* Do the actual TLB flush before dropping ptl */
1383*4882a593Smuzhiyun 	if (force_flush)
1384*4882a593Smuzhiyun 		tlb_flush_mmu_tlbonly(tlb);
1385*4882a593Smuzhiyun 	pte_unmap_unlock(start_pte, ptl);
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	/*
1388*4882a593Smuzhiyun 	 * If we forced a TLB flush (either due to running out of
1389*4882a593Smuzhiyun 	 * batch buffers or because we needed to flush dirty TLB
1390*4882a593Smuzhiyun 	 * entries before releasing the ptl), free the batched
1391*4882a593Smuzhiyun 	 * memory too. Restart if we didn't do everything.
1392*4882a593Smuzhiyun 	 */
1393*4882a593Smuzhiyun 	if (force_flush) {
1394*4882a593Smuzhiyun 		force_flush = 0;
1395*4882a593Smuzhiyun 		tlb_flush_mmu(tlb);
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	if (addr != end) {
1399*4882a593Smuzhiyun 		cond_resched();
1400*4882a593Smuzhiyun 		goto again;
1401*4882a593Smuzhiyun 	}
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	return addr;
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun 
zap_pmd_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,struct zap_details * details)1406*4882a593Smuzhiyun static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1407*4882a593Smuzhiyun 				struct vm_area_struct *vma, pud_t *pud,
1408*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
1409*4882a593Smuzhiyun 				struct zap_details *details)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun 	pmd_t *pmd;
1412*4882a593Smuzhiyun 	unsigned long next;
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun 	pmd = pmd_offset(pud, addr);
1415*4882a593Smuzhiyun 	do {
1416*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
1417*4882a593Smuzhiyun 		if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1418*4882a593Smuzhiyun 			if (next - addr != HPAGE_PMD_SIZE)
1419*4882a593Smuzhiyun 				__split_huge_pmd(vma, pmd, addr, false, NULL);
1420*4882a593Smuzhiyun 			else if (zap_huge_pmd(tlb, vma, pmd, addr))
1421*4882a593Smuzhiyun 				goto next;
1422*4882a593Smuzhiyun 			/* fall through */
1423*4882a593Smuzhiyun 		} else if (details && details->single_page &&
1424*4882a593Smuzhiyun 			   PageTransCompound(details->single_page) &&
1425*4882a593Smuzhiyun 			   next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1426*4882a593Smuzhiyun 			spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1427*4882a593Smuzhiyun 			/*
1428*4882a593Smuzhiyun 			 * Take and drop THP pmd lock so that we cannot return
1429*4882a593Smuzhiyun 			 * prematurely, while zap_huge_pmd() has cleared *pmd,
1430*4882a593Smuzhiyun 			 * but not yet decremented compound_mapcount().
1431*4882a593Smuzhiyun 			 */
1432*4882a593Smuzhiyun 			spin_unlock(ptl);
1433*4882a593Smuzhiyun 		}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 		/*
1436*4882a593Smuzhiyun 		 * Here there can be other concurrent MADV_DONTNEED or
1437*4882a593Smuzhiyun 		 * trans huge page faults running, and if the pmd is
1438*4882a593Smuzhiyun 		 * none or trans huge it can change under us. This is
1439*4882a593Smuzhiyun 		 * because MADV_DONTNEED holds the mmap_lock in read
1440*4882a593Smuzhiyun 		 * mode.
1441*4882a593Smuzhiyun 		 */
1442*4882a593Smuzhiyun 		if (pmd_none_or_trans_huge_or_clear_bad(pmd))
1443*4882a593Smuzhiyun 			goto next;
1444*4882a593Smuzhiyun 		next = zap_pte_range(tlb, vma, pmd, addr, next, details);
1445*4882a593Smuzhiyun next:
1446*4882a593Smuzhiyun 		cond_resched();
1447*4882a593Smuzhiyun 	} while (pmd++, addr = next, addr != end);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	return addr;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun 
zap_pud_range(struct mmu_gather * tlb,struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,struct zap_details * details)1452*4882a593Smuzhiyun static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
1453*4882a593Smuzhiyun 				struct vm_area_struct *vma, p4d_t *p4d,
1454*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
1455*4882a593Smuzhiyun 				struct zap_details *details)
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun 	pud_t *pud;
1458*4882a593Smuzhiyun 	unsigned long next;
1459*4882a593Smuzhiyun 
1460*4882a593Smuzhiyun 	pud = pud_offset(p4d, addr);
1461*4882a593Smuzhiyun 	do {
1462*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
1463*4882a593Smuzhiyun 		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
1464*4882a593Smuzhiyun 			if (next - addr != HPAGE_PUD_SIZE) {
1465*4882a593Smuzhiyun 				mmap_assert_locked(tlb->mm);
1466*4882a593Smuzhiyun 				split_huge_pud(vma, pud, addr);
1467*4882a593Smuzhiyun 			} else if (zap_huge_pud(tlb, vma, pud, addr))
1468*4882a593Smuzhiyun 				goto next;
1469*4882a593Smuzhiyun 			/* fall through */
1470*4882a593Smuzhiyun 		}
1471*4882a593Smuzhiyun 		if (pud_none_or_clear_bad(pud))
1472*4882a593Smuzhiyun 			continue;
1473*4882a593Smuzhiyun 		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1474*4882a593Smuzhiyun next:
1475*4882a593Smuzhiyun 		cond_resched();
1476*4882a593Smuzhiyun 	} while (pud++, addr = next, addr != end);
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	return addr;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun 
zap_p4d_range(struct mmu_gather * tlb,struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,struct zap_details * details)1481*4882a593Smuzhiyun static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
1482*4882a593Smuzhiyun 				struct vm_area_struct *vma, pgd_t *pgd,
1483*4882a593Smuzhiyun 				unsigned long addr, unsigned long end,
1484*4882a593Smuzhiyun 				struct zap_details *details)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun 	p4d_t *p4d;
1487*4882a593Smuzhiyun 	unsigned long next;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
1490*4882a593Smuzhiyun 	do {
1491*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
1492*4882a593Smuzhiyun 		if (p4d_none_or_clear_bad(p4d))
1493*4882a593Smuzhiyun 			continue;
1494*4882a593Smuzhiyun 		next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1495*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	return addr;
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun 
unmap_page_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,unsigned long end,struct zap_details * details)1500*4882a593Smuzhiyun void unmap_page_range(struct mmu_gather *tlb,
1501*4882a593Smuzhiyun 			     struct vm_area_struct *vma,
1502*4882a593Smuzhiyun 			     unsigned long addr, unsigned long end,
1503*4882a593Smuzhiyun 			     struct zap_details *details)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun 	pgd_t *pgd;
1506*4882a593Smuzhiyun 	unsigned long next;
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	BUG_ON(addr >= end);
1509*4882a593Smuzhiyun 	tlb_start_vma(tlb, vma);
1510*4882a593Smuzhiyun 	pgd = pgd_offset(vma->vm_mm, addr);
1511*4882a593Smuzhiyun 	do {
1512*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
1513*4882a593Smuzhiyun 		if (pgd_none_or_clear_bad(pgd))
1514*4882a593Smuzhiyun 			continue;
1515*4882a593Smuzhiyun 		next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1516*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
1517*4882a593Smuzhiyun 	tlb_end_vma(tlb, vma);
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 
unmap_single_vma(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)1521*4882a593Smuzhiyun static void unmap_single_vma(struct mmu_gather *tlb,
1522*4882a593Smuzhiyun 		struct vm_area_struct *vma, unsigned long start_addr,
1523*4882a593Smuzhiyun 		unsigned long end_addr,
1524*4882a593Smuzhiyun 		struct zap_details *details)
1525*4882a593Smuzhiyun {
1526*4882a593Smuzhiyun 	unsigned long start = max(vma->vm_start, start_addr);
1527*4882a593Smuzhiyun 	unsigned long end;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	if (start >= vma->vm_end)
1530*4882a593Smuzhiyun 		return;
1531*4882a593Smuzhiyun 	end = min(vma->vm_end, end_addr);
1532*4882a593Smuzhiyun 	if (end <= vma->vm_start)
1533*4882a593Smuzhiyun 		return;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	if (vma->vm_file)
1536*4882a593Smuzhiyun 		uprobe_munmap(vma, start, end);
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	if (unlikely(vma->vm_flags & VM_PFNMAP))
1539*4882a593Smuzhiyun 		untrack_pfn(vma, 0, 0);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	if (start != end) {
1542*4882a593Smuzhiyun 		if (unlikely(is_vm_hugetlb_page(vma))) {
1543*4882a593Smuzhiyun 			/*
1544*4882a593Smuzhiyun 			 * It is undesirable to test vma->vm_file as it
1545*4882a593Smuzhiyun 			 * should be non-null for valid hugetlb area.
1546*4882a593Smuzhiyun 			 * However, vm_file will be NULL in the error
1547*4882a593Smuzhiyun 			 * cleanup path of mmap_region. When
1548*4882a593Smuzhiyun 			 * hugetlbfs ->mmap method fails,
1549*4882a593Smuzhiyun 			 * mmap_region() nullifies vma->vm_file
1550*4882a593Smuzhiyun 			 * before calling this function to clean up.
1551*4882a593Smuzhiyun 			 * Since no pte has actually been setup, it is
1552*4882a593Smuzhiyun 			 * safe to do nothing in this case.
1553*4882a593Smuzhiyun 			 */
1554*4882a593Smuzhiyun 			if (vma->vm_file) {
1555*4882a593Smuzhiyun 				i_mmap_lock_write(vma->vm_file->f_mapping);
1556*4882a593Smuzhiyun 				__unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1557*4882a593Smuzhiyun 				i_mmap_unlock_write(vma->vm_file->f_mapping);
1558*4882a593Smuzhiyun 			}
1559*4882a593Smuzhiyun 		} else
1560*4882a593Smuzhiyun 			unmap_page_range(tlb, vma, start, end, details);
1561*4882a593Smuzhiyun 	}
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun /**
1565*4882a593Smuzhiyun  * unmap_vmas - unmap a range of memory covered by a list of vma's
1566*4882a593Smuzhiyun  * @tlb: address of the caller's struct mmu_gather
1567*4882a593Smuzhiyun  * @vma: the starting vma
1568*4882a593Smuzhiyun  * @start_addr: virtual address at which to start unmapping
1569*4882a593Smuzhiyun  * @end_addr: virtual address at which to end unmapping
1570*4882a593Smuzhiyun  *
1571*4882a593Smuzhiyun  * Unmap all pages in the vma list.
1572*4882a593Smuzhiyun  *
1573*4882a593Smuzhiyun  * Only addresses between `start' and `end' will be unmapped.
1574*4882a593Smuzhiyun  *
1575*4882a593Smuzhiyun  * The VMA list must be sorted in ascending virtual address order.
1576*4882a593Smuzhiyun  *
1577*4882a593Smuzhiyun  * unmap_vmas() assumes that the caller will flush the whole unmapped address
1578*4882a593Smuzhiyun  * range after unmap_vmas() returns.  So the only responsibility here is to
1579*4882a593Smuzhiyun  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
1580*4882a593Smuzhiyun  * drops the lock and schedules.
1581*4882a593Smuzhiyun  */
unmap_vmas(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr)1582*4882a593Smuzhiyun void unmap_vmas(struct mmu_gather *tlb,
1583*4882a593Smuzhiyun 		struct vm_area_struct *vma, unsigned long start_addr,
1584*4882a593Smuzhiyun 		unsigned long end_addr)
1585*4882a593Smuzhiyun {
1586*4882a593Smuzhiyun 	struct mmu_notifier_range range;
1587*4882a593Smuzhiyun 
1588*4882a593Smuzhiyun 	mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1589*4882a593Smuzhiyun 				start_addr, end_addr);
1590*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_start(&range);
1591*4882a593Smuzhiyun 	for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
1592*4882a593Smuzhiyun 		unmap_single_vma(tlb, vma, start_addr, end_addr, NULL);
1593*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_end(&range);
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun /**
1597*4882a593Smuzhiyun  * zap_page_range - remove user pages in a given range
1598*4882a593Smuzhiyun  * @vma: vm_area_struct holding the applicable pages
1599*4882a593Smuzhiyun  * @start: starting address of pages to zap
1600*4882a593Smuzhiyun  * @size: number of bytes to zap
1601*4882a593Smuzhiyun  *
1602*4882a593Smuzhiyun  * Caller must protect the VMA list
1603*4882a593Smuzhiyun  */
zap_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long size)1604*4882a593Smuzhiyun void zap_page_range(struct vm_area_struct *vma, unsigned long start,
1605*4882a593Smuzhiyun 		unsigned long size)
1606*4882a593Smuzhiyun {
1607*4882a593Smuzhiyun 	struct mmu_notifier_range range;
1608*4882a593Smuzhiyun 	struct mmu_gather tlb;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	lru_add_drain();
1611*4882a593Smuzhiyun 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1612*4882a593Smuzhiyun 				start, start + size);
1613*4882a593Smuzhiyun 	tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1614*4882a593Smuzhiyun 	update_hiwater_rss(vma->vm_mm);
1615*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_start(&range);
1616*4882a593Smuzhiyun 	for ( ; vma && vma->vm_start < range.end; vma = vma->vm_next)
1617*4882a593Smuzhiyun 		unmap_single_vma(&tlb, vma, start, range.end, NULL);
1618*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_end(&range);
1619*4882a593Smuzhiyun 	tlb_finish_mmu(&tlb, start, range.end);
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun /**
1623*4882a593Smuzhiyun  * zap_page_range_single - remove user pages in a given range
1624*4882a593Smuzhiyun  * @vma: vm_area_struct holding the applicable pages
1625*4882a593Smuzhiyun  * @address: starting address of pages to zap
1626*4882a593Smuzhiyun  * @size: number of bytes to zap
1627*4882a593Smuzhiyun  * @details: details of shared cache invalidation
1628*4882a593Smuzhiyun  *
1629*4882a593Smuzhiyun  * The range must fit into one VMA.
1630*4882a593Smuzhiyun  */
zap_page_range_single(struct vm_area_struct * vma,unsigned long address,unsigned long size,struct zap_details * details)1631*4882a593Smuzhiyun static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1632*4882a593Smuzhiyun 		unsigned long size, struct zap_details *details)
1633*4882a593Smuzhiyun {
1634*4882a593Smuzhiyun 	struct mmu_notifier_range range;
1635*4882a593Smuzhiyun 	struct mmu_gather tlb;
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	lru_add_drain();
1638*4882a593Smuzhiyun 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1639*4882a593Smuzhiyun 				address, address + size);
1640*4882a593Smuzhiyun 	tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1641*4882a593Smuzhiyun 	update_hiwater_rss(vma->vm_mm);
1642*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_start(&range);
1643*4882a593Smuzhiyun 	unmap_single_vma(&tlb, vma, address, range.end, details);
1644*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_end(&range);
1645*4882a593Smuzhiyun 	tlb_finish_mmu(&tlb, address, range.end);
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun /**
1649*4882a593Smuzhiyun  * zap_vma_ptes - remove ptes mapping the vma
1650*4882a593Smuzhiyun  * @vma: vm_area_struct holding ptes to be zapped
1651*4882a593Smuzhiyun  * @address: starting address of pages to zap
1652*4882a593Smuzhiyun  * @size: number of bytes to zap
1653*4882a593Smuzhiyun  *
1654*4882a593Smuzhiyun  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1655*4882a593Smuzhiyun  *
1656*4882a593Smuzhiyun  * The entire address range must be fully contained within the vma.
1657*4882a593Smuzhiyun  *
1658*4882a593Smuzhiyun  */
zap_vma_ptes(struct vm_area_struct * vma,unsigned long address,unsigned long size)1659*4882a593Smuzhiyun void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1660*4882a593Smuzhiyun 		unsigned long size)
1661*4882a593Smuzhiyun {
1662*4882a593Smuzhiyun 	if (address < vma->vm_start || address + size > vma->vm_end ||
1663*4882a593Smuzhiyun 	    		!(vma->vm_flags & VM_PFNMAP))
1664*4882a593Smuzhiyun 		return;
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	zap_page_range_single(vma, address, size, NULL);
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(zap_vma_ptes);
1669*4882a593Smuzhiyun 
walk_to_pmd(struct mm_struct * mm,unsigned long addr)1670*4882a593Smuzhiyun static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	pgd_t *pgd;
1673*4882a593Smuzhiyun 	p4d_t *p4d;
1674*4882a593Smuzhiyun 	pud_t *pud;
1675*4882a593Smuzhiyun 	pmd_t *pmd;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	pgd = pgd_offset(mm, addr);
1678*4882a593Smuzhiyun 	p4d = p4d_alloc(mm, pgd, addr);
1679*4882a593Smuzhiyun 	if (!p4d)
1680*4882a593Smuzhiyun 		return NULL;
1681*4882a593Smuzhiyun 	pud = pud_alloc(mm, p4d, addr);
1682*4882a593Smuzhiyun 	if (!pud)
1683*4882a593Smuzhiyun 		return NULL;
1684*4882a593Smuzhiyun 	pmd = pmd_alloc(mm, pud, addr);
1685*4882a593Smuzhiyun 	if (!pmd)
1686*4882a593Smuzhiyun 		return NULL;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	VM_BUG_ON(pmd_trans_huge(*pmd));
1689*4882a593Smuzhiyun 	return pmd;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun 
__get_locked_pte(struct mm_struct * mm,unsigned long addr,spinlock_t ** ptl)1692*4882a593Smuzhiyun pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1693*4882a593Smuzhiyun 			spinlock_t **ptl)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	pmd_t *pmd = walk_to_pmd(mm, addr);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	if (!pmd)
1698*4882a593Smuzhiyun 		return NULL;
1699*4882a593Smuzhiyun 	return pte_alloc_map_lock(mm, pmd, addr, ptl);
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun 
validate_page_before_insert(struct page * page)1702*4882a593Smuzhiyun static int validate_page_before_insert(struct page *page)
1703*4882a593Smuzhiyun {
1704*4882a593Smuzhiyun 	if (PageAnon(page) || PageSlab(page) || page_has_type(page))
1705*4882a593Smuzhiyun 		return -EINVAL;
1706*4882a593Smuzhiyun 	flush_dcache_page(page);
1707*4882a593Smuzhiyun 	return 0;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun 
insert_page_into_pte_locked(struct mm_struct * mm,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)1710*4882a593Smuzhiyun static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
1711*4882a593Smuzhiyun 			unsigned long addr, struct page *page, pgprot_t prot)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun 	if (!pte_none(*pte))
1714*4882a593Smuzhiyun 		return -EBUSY;
1715*4882a593Smuzhiyun 	/* Ok, finally just insert the thing.. */
1716*4882a593Smuzhiyun 	get_page(page);
1717*4882a593Smuzhiyun 	inc_mm_counter_fast(mm, mm_counter_file(page));
1718*4882a593Smuzhiyun 	page_add_file_rmap(page, false);
1719*4882a593Smuzhiyun 	set_pte_at(mm, addr, pte, mk_pte(page, prot));
1720*4882a593Smuzhiyun 	return 0;
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun 
1723*4882a593Smuzhiyun /*
1724*4882a593Smuzhiyun  * This is the old fallback for page remapping.
1725*4882a593Smuzhiyun  *
1726*4882a593Smuzhiyun  * For historical reasons, it only allows reserved pages. Only
1727*4882a593Smuzhiyun  * old drivers should use this, and they needed to mark their
1728*4882a593Smuzhiyun  * pages reserved for the old functions anyway.
1729*4882a593Smuzhiyun  */
insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page,pgprot_t prot)1730*4882a593Smuzhiyun static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1731*4882a593Smuzhiyun 			struct page *page, pgprot_t prot)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1734*4882a593Smuzhiyun 	int retval;
1735*4882a593Smuzhiyun 	pte_t *pte;
1736*4882a593Smuzhiyun 	spinlock_t *ptl;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	retval = validate_page_before_insert(page);
1739*4882a593Smuzhiyun 	if (retval)
1740*4882a593Smuzhiyun 		goto out;
1741*4882a593Smuzhiyun 	retval = -ENOMEM;
1742*4882a593Smuzhiyun 	pte = get_locked_pte(mm, addr, &ptl);
1743*4882a593Smuzhiyun 	if (!pte)
1744*4882a593Smuzhiyun 		goto out;
1745*4882a593Smuzhiyun 	retval = insert_page_into_pte_locked(mm, pte, addr, page, prot);
1746*4882a593Smuzhiyun 	pte_unmap_unlock(pte, ptl);
1747*4882a593Smuzhiyun out:
1748*4882a593Smuzhiyun 	return retval;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun #ifdef pte_index
insert_page_in_batch_locked(struct mm_struct * mm,pte_t * pte,unsigned long addr,struct page * page,pgprot_t prot)1752*4882a593Smuzhiyun static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
1753*4882a593Smuzhiyun 			unsigned long addr, struct page *page, pgprot_t prot)
1754*4882a593Smuzhiyun {
1755*4882a593Smuzhiyun 	int err;
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	if (!page_count(page))
1758*4882a593Smuzhiyun 		return -EINVAL;
1759*4882a593Smuzhiyun 	err = validate_page_before_insert(page);
1760*4882a593Smuzhiyun 	if (err)
1761*4882a593Smuzhiyun 		return err;
1762*4882a593Smuzhiyun 	return insert_page_into_pte_locked(mm, pte, addr, page, prot);
1763*4882a593Smuzhiyun }
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun /* insert_pages() amortizes the cost of spinlock operations
1766*4882a593Smuzhiyun  * when inserting pages in a loop. Arch *must* define pte_index.
1767*4882a593Smuzhiyun  */
insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num,pgprot_t prot)1768*4882a593Smuzhiyun static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1769*4882a593Smuzhiyun 			struct page **pages, unsigned long *num, pgprot_t prot)
1770*4882a593Smuzhiyun {
1771*4882a593Smuzhiyun 	pmd_t *pmd = NULL;
1772*4882a593Smuzhiyun 	pte_t *start_pte, *pte;
1773*4882a593Smuzhiyun 	spinlock_t *pte_lock;
1774*4882a593Smuzhiyun 	struct mm_struct *const mm = vma->vm_mm;
1775*4882a593Smuzhiyun 	unsigned long curr_page_idx = 0;
1776*4882a593Smuzhiyun 	unsigned long remaining_pages_total = *num;
1777*4882a593Smuzhiyun 	unsigned long pages_to_write_in_pmd;
1778*4882a593Smuzhiyun 	int ret;
1779*4882a593Smuzhiyun more:
1780*4882a593Smuzhiyun 	ret = -EFAULT;
1781*4882a593Smuzhiyun 	pmd = walk_to_pmd(mm, addr);
1782*4882a593Smuzhiyun 	if (!pmd)
1783*4882a593Smuzhiyun 		goto out;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	pages_to_write_in_pmd = min_t(unsigned long,
1786*4882a593Smuzhiyun 		remaining_pages_total, PTRS_PER_PTE - pte_index(addr));
1787*4882a593Smuzhiyun 
1788*4882a593Smuzhiyun 	/* Allocate the PTE if necessary; takes PMD lock once only. */
1789*4882a593Smuzhiyun 	ret = -ENOMEM;
1790*4882a593Smuzhiyun 	if (pte_alloc(mm, pmd))
1791*4882a593Smuzhiyun 		goto out;
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	while (pages_to_write_in_pmd) {
1794*4882a593Smuzhiyun 		int pte_idx = 0;
1795*4882a593Smuzhiyun 		const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 		start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
1798*4882a593Smuzhiyun 		for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
1799*4882a593Smuzhiyun 			int err = insert_page_in_batch_locked(mm, pte,
1800*4882a593Smuzhiyun 				addr, pages[curr_page_idx], prot);
1801*4882a593Smuzhiyun 			if (unlikely(err)) {
1802*4882a593Smuzhiyun 				pte_unmap_unlock(start_pte, pte_lock);
1803*4882a593Smuzhiyun 				ret = err;
1804*4882a593Smuzhiyun 				remaining_pages_total -= pte_idx;
1805*4882a593Smuzhiyun 				goto out;
1806*4882a593Smuzhiyun 			}
1807*4882a593Smuzhiyun 			addr += PAGE_SIZE;
1808*4882a593Smuzhiyun 			++curr_page_idx;
1809*4882a593Smuzhiyun 		}
1810*4882a593Smuzhiyun 		pte_unmap_unlock(start_pte, pte_lock);
1811*4882a593Smuzhiyun 		pages_to_write_in_pmd -= batch_size;
1812*4882a593Smuzhiyun 		remaining_pages_total -= batch_size;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 	if (remaining_pages_total)
1815*4882a593Smuzhiyun 		goto more;
1816*4882a593Smuzhiyun 	ret = 0;
1817*4882a593Smuzhiyun out:
1818*4882a593Smuzhiyun 	*num = remaining_pages_total;
1819*4882a593Smuzhiyun 	return ret;
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun #endif  /* ifdef pte_index */
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun /**
1824*4882a593Smuzhiyun  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1825*4882a593Smuzhiyun  * @vma: user vma to map to
1826*4882a593Smuzhiyun  * @addr: target start user address of these pages
1827*4882a593Smuzhiyun  * @pages: source kernel pages
1828*4882a593Smuzhiyun  * @num: in: number of pages to map. out: number of pages that were *not*
1829*4882a593Smuzhiyun  * mapped. (0 means all pages were successfully mapped).
1830*4882a593Smuzhiyun  *
1831*4882a593Smuzhiyun  * Preferred over vm_insert_page() when inserting multiple pages.
1832*4882a593Smuzhiyun  *
1833*4882a593Smuzhiyun  * In case of error, we may have mapped a subset of the provided
1834*4882a593Smuzhiyun  * pages. It is the caller's responsibility to account for this case.
1835*4882a593Smuzhiyun  *
1836*4882a593Smuzhiyun  * The same restrictions apply as in vm_insert_page().
1837*4882a593Smuzhiyun  */
vm_insert_pages(struct vm_area_struct * vma,unsigned long addr,struct page ** pages,unsigned long * num)1838*4882a593Smuzhiyun int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1839*4882a593Smuzhiyun 			struct page **pages, unsigned long *num)
1840*4882a593Smuzhiyun {
1841*4882a593Smuzhiyun #ifdef pte_index
1842*4882a593Smuzhiyun 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
1845*4882a593Smuzhiyun 		return -EFAULT;
1846*4882a593Smuzhiyun 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1847*4882a593Smuzhiyun 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1848*4882a593Smuzhiyun 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1849*4882a593Smuzhiyun 		vma->vm_flags |= VM_MIXEDMAP;
1850*4882a593Smuzhiyun 	}
1851*4882a593Smuzhiyun 	/* Defer page refcount checking till we're about to map that page. */
1852*4882a593Smuzhiyun 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1853*4882a593Smuzhiyun #else
1854*4882a593Smuzhiyun 	unsigned long idx = 0, pgcount = *num;
1855*4882a593Smuzhiyun 	int err = -EINVAL;
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	for (; idx < pgcount; ++idx) {
1858*4882a593Smuzhiyun 		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
1859*4882a593Smuzhiyun 		if (err)
1860*4882a593Smuzhiyun 			break;
1861*4882a593Smuzhiyun 	}
1862*4882a593Smuzhiyun 	*num = pgcount - idx;
1863*4882a593Smuzhiyun 	return err;
1864*4882a593Smuzhiyun #endif  /* ifdef pte_index */
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun EXPORT_SYMBOL(vm_insert_pages);
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun /**
1869*4882a593Smuzhiyun  * vm_insert_page - insert single page into user vma
1870*4882a593Smuzhiyun  * @vma: user vma to map to
1871*4882a593Smuzhiyun  * @addr: target user address of this page
1872*4882a593Smuzhiyun  * @page: source kernel page
1873*4882a593Smuzhiyun  *
1874*4882a593Smuzhiyun  * This allows drivers to insert individual pages they've allocated
1875*4882a593Smuzhiyun  * into a user vma.
1876*4882a593Smuzhiyun  *
1877*4882a593Smuzhiyun  * The page has to be a nice clean _individual_ kernel allocation.
1878*4882a593Smuzhiyun  * If you allocate a compound page, you need to have marked it as
1879*4882a593Smuzhiyun  * such (__GFP_COMP), or manually just split the page up yourself
1880*4882a593Smuzhiyun  * (see split_page()).
1881*4882a593Smuzhiyun  *
1882*4882a593Smuzhiyun  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1883*4882a593Smuzhiyun  * took an arbitrary page protection parameter. This doesn't allow
1884*4882a593Smuzhiyun  * that. Your vma protection will have to be set up correctly, which
1885*4882a593Smuzhiyun  * means that if you want a shared writable mapping, you'd better
1886*4882a593Smuzhiyun  * ask for a shared writable mapping!
1887*4882a593Smuzhiyun  *
1888*4882a593Smuzhiyun  * The page does not need to be reserved.
1889*4882a593Smuzhiyun  *
1890*4882a593Smuzhiyun  * Usually this function is called from f_op->mmap() handler
1891*4882a593Smuzhiyun  * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
1892*4882a593Smuzhiyun  * Caller must set VM_MIXEDMAP on vma if it wants to call this
1893*4882a593Smuzhiyun  * function from other places, for example from page-fault handler.
1894*4882a593Smuzhiyun  *
1895*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
1896*4882a593Smuzhiyun  */
vm_insert_page(struct vm_area_struct * vma,unsigned long addr,struct page * page)1897*4882a593Smuzhiyun int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1898*4882a593Smuzhiyun 			struct page *page)
1899*4882a593Smuzhiyun {
1900*4882a593Smuzhiyun 	if (addr < vma->vm_start || addr >= vma->vm_end)
1901*4882a593Smuzhiyun 		return -EFAULT;
1902*4882a593Smuzhiyun 	if (!page_count(page))
1903*4882a593Smuzhiyun 		return -EINVAL;
1904*4882a593Smuzhiyun 	if (!(vma->vm_flags & VM_MIXEDMAP)) {
1905*4882a593Smuzhiyun 		BUG_ON(mmap_read_trylock(vma->vm_mm));
1906*4882a593Smuzhiyun 		BUG_ON(vma->vm_flags & VM_PFNMAP);
1907*4882a593Smuzhiyun 		vma->vm_flags |= VM_MIXEDMAP;
1908*4882a593Smuzhiyun 	}
1909*4882a593Smuzhiyun 	return insert_page(vma, addr, page, vma->vm_page_prot);
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun EXPORT_SYMBOL(vm_insert_page);
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun /*
1914*4882a593Smuzhiyun  * __vm_map_pages - maps range of kernel pages into user vma
1915*4882a593Smuzhiyun  * @vma: user vma to map to
1916*4882a593Smuzhiyun  * @pages: pointer to array of source kernel pages
1917*4882a593Smuzhiyun  * @num: number of pages in page array
1918*4882a593Smuzhiyun  * @offset: user's requested vm_pgoff
1919*4882a593Smuzhiyun  *
1920*4882a593Smuzhiyun  * This allows drivers to map range of kernel pages into a user vma.
1921*4882a593Smuzhiyun  *
1922*4882a593Smuzhiyun  * Return: 0 on success and error code otherwise.
1923*4882a593Smuzhiyun  */
__vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num,unsigned long offset)1924*4882a593Smuzhiyun static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1925*4882a593Smuzhiyun 				unsigned long num, unsigned long offset)
1926*4882a593Smuzhiyun {
1927*4882a593Smuzhiyun 	unsigned long count = vma_pages(vma);
1928*4882a593Smuzhiyun 	unsigned long uaddr = vma->vm_start;
1929*4882a593Smuzhiyun 	int ret, i;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	/* Fail if the user requested offset is beyond the end of the object */
1932*4882a593Smuzhiyun 	if (offset >= num)
1933*4882a593Smuzhiyun 		return -ENXIO;
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	/* Fail if the user requested size exceeds available object size */
1936*4882a593Smuzhiyun 	if (count > num - offset)
1937*4882a593Smuzhiyun 		return -ENXIO;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
1940*4882a593Smuzhiyun 		ret = vm_insert_page(vma, uaddr, pages[offset + i]);
1941*4882a593Smuzhiyun 		if (ret < 0)
1942*4882a593Smuzhiyun 			return ret;
1943*4882a593Smuzhiyun 		uaddr += PAGE_SIZE;
1944*4882a593Smuzhiyun 	}
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	return 0;
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun 
1949*4882a593Smuzhiyun /**
1950*4882a593Smuzhiyun  * vm_map_pages - maps range of kernel pages starts with non zero offset
1951*4882a593Smuzhiyun  * @vma: user vma to map to
1952*4882a593Smuzhiyun  * @pages: pointer to array of source kernel pages
1953*4882a593Smuzhiyun  * @num: number of pages in page array
1954*4882a593Smuzhiyun  *
1955*4882a593Smuzhiyun  * Maps an object consisting of @num pages, catering for the user's
1956*4882a593Smuzhiyun  * requested vm_pgoff
1957*4882a593Smuzhiyun  *
1958*4882a593Smuzhiyun  * If we fail to insert any page into the vma, the function will return
1959*4882a593Smuzhiyun  * immediately leaving any previously inserted pages present.  Callers
1960*4882a593Smuzhiyun  * from the mmap handler may immediately return the error as their caller
1961*4882a593Smuzhiyun  * will destroy the vma, removing any successfully inserted pages. Other
1962*4882a593Smuzhiyun  * callers should make their own arrangements for calling unmap_region().
1963*4882a593Smuzhiyun  *
1964*4882a593Smuzhiyun  * Context: Process context. Called by mmap handlers.
1965*4882a593Smuzhiyun  * Return: 0 on success and error code otherwise.
1966*4882a593Smuzhiyun  */
vm_map_pages(struct vm_area_struct * vma,struct page ** pages,unsigned long num)1967*4882a593Smuzhiyun int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
1968*4882a593Smuzhiyun 				unsigned long num)
1969*4882a593Smuzhiyun {
1970*4882a593Smuzhiyun 	return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
1971*4882a593Smuzhiyun }
1972*4882a593Smuzhiyun EXPORT_SYMBOL(vm_map_pages);
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun /**
1975*4882a593Smuzhiyun  * vm_map_pages_zero - map range of kernel pages starts with zero offset
1976*4882a593Smuzhiyun  * @vma: user vma to map to
1977*4882a593Smuzhiyun  * @pages: pointer to array of source kernel pages
1978*4882a593Smuzhiyun  * @num: number of pages in page array
1979*4882a593Smuzhiyun  *
1980*4882a593Smuzhiyun  * Similar to vm_map_pages(), except that it explicitly sets the offset
1981*4882a593Smuzhiyun  * to 0. This function is intended for the drivers that did not consider
1982*4882a593Smuzhiyun  * vm_pgoff.
1983*4882a593Smuzhiyun  *
1984*4882a593Smuzhiyun  * Context: Process context. Called by mmap handlers.
1985*4882a593Smuzhiyun  * Return: 0 on success and error code otherwise.
1986*4882a593Smuzhiyun  */
vm_map_pages_zero(struct vm_area_struct * vma,struct page ** pages,unsigned long num)1987*4882a593Smuzhiyun int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
1988*4882a593Smuzhiyun 				unsigned long num)
1989*4882a593Smuzhiyun {
1990*4882a593Smuzhiyun 	return __vm_map_pages(vma, pages, num, 0);
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun EXPORT_SYMBOL(vm_map_pages_zero);
1993*4882a593Smuzhiyun 
insert_pfn(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t prot,bool mkwrite)1994*4882a593Smuzhiyun static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1995*4882a593Smuzhiyun 			pfn_t pfn, pgprot_t prot, bool mkwrite)
1996*4882a593Smuzhiyun {
1997*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1998*4882a593Smuzhiyun 	pte_t *pte, entry;
1999*4882a593Smuzhiyun 	spinlock_t *ptl;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	pte = get_locked_pte(mm, addr, &ptl);
2002*4882a593Smuzhiyun 	if (!pte)
2003*4882a593Smuzhiyun 		return VM_FAULT_OOM;
2004*4882a593Smuzhiyun 	if (!pte_none(*pte)) {
2005*4882a593Smuzhiyun 		if (mkwrite) {
2006*4882a593Smuzhiyun 			/*
2007*4882a593Smuzhiyun 			 * For read faults on private mappings the PFN passed
2008*4882a593Smuzhiyun 			 * in may not match the PFN we have mapped if the
2009*4882a593Smuzhiyun 			 * mapped PFN is a writeable COW page.  In the mkwrite
2010*4882a593Smuzhiyun 			 * case we are creating a writable PTE for a shared
2011*4882a593Smuzhiyun 			 * mapping and we expect the PFNs to match. If they
2012*4882a593Smuzhiyun 			 * don't match, we are likely racing with block
2013*4882a593Smuzhiyun 			 * allocation and mapping invalidation so just skip the
2014*4882a593Smuzhiyun 			 * update.
2015*4882a593Smuzhiyun 			 */
2016*4882a593Smuzhiyun 			if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) {
2017*4882a593Smuzhiyun 				WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
2018*4882a593Smuzhiyun 				goto out_unlock;
2019*4882a593Smuzhiyun 			}
2020*4882a593Smuzhiyun 			entry = pte_mkyoung(*pte);
2021*4882a593Smuzhiyun 			entry = maybe_mkwrite(pte_mkdirty(entry),
2022*4882a593Smuzhiyun 							vma->vm_flags);
2023*4882a593Smuzhiyun 			if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2024*4882a593Smuzhiyun 				update_mmu_cache(vma, addr, pte);
2025*4882a593Smuzhiyun 		}
2026*4882a593Smuzhiyun 		goto out_unlock;
2027*4882a593Smuzhiyun 	}
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	/* Ok, finally just insert the thing.. */
2030*4882a593Smuzhiyun 	if (pfn_t_devmap(pfn))
2031*4882a593Smuzhiyun 		entry = pte_mkdevmap(pfn_t_pte(pfn, prot));
2032*4882a593Smuzhiyun 	else
2033*4882a593Smuzhiyun 		entry = pte_mkspecial(pfn_t_pte(pfn, prot));
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun 	if (mkwrite) {
2036*4882a593Smuzhiyun 		entry = pte_mkyoung(entry);
2037*4882a593Smuzhiyun 		entry = maybe_mkwrite(pte_mkdirty(entry), vma->vm_flags);
2038*4882a593Smuzhiyun 	}
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun 	set_pte_at(mm, addr, pte, entry);
2041*4882a593Smuzhiyun 	update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun out_unlock:
2044*4882a593Smuzhiyun 	pte_unmap_unlock(pte, ptl);
2045*4882a593Smuzhiyun 	return VM_FAULT_NOPAGE;
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun /**
2049*4882a593Smuzhiyun  * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2050*4882a593Smuzhiyun  * @vma: user vma to map to
2051*4882a593Smuzhiyun  * @addr: target user address of this page
2052*4882a593Smuzhiyun  * @pfn: source kernel pfn
2053*4882a593Smuzhiyun  * @pgprot: pgprot flags for the inserted page
2054*4882a593Smuzhiyun  *
2055*4882a593Smuzhiyun  * This is exactly like vmf_insert_pfn(), except that it allows drivers
2056*4882a593Smuzhiyun  * to override pgprot on a per-page basis.
2057*4882a593Smuzhiyun  *
2058*4882a593Smuzhiyun  * This only makes sense for IO mappings, and it makes no sense for
2059*4882a593Smuzhiyun  * COW mappings.  In general, using multiple vmas is preferable;
2060*4882a593Smuzhiyun  * vmf_insert_pfn_prot should only be used if using multiple VMAs is
2061*4882a593Smuzhiyun  * impractical.
2062*4882a593Smuzhiyun  *
2063*4882a593Smuzhiyun  * See vmf_insert_mixed_prot() for a discussion of the implication of using
2064*4882a593Smuzhiyun  * a value of @pgprot different from that of @vma->vm_page_prot.
2065*4882a593Smuzhiyun  *
2066*4882a593Smuzhiyun  * Context: Process context.  May allocate using %GFP_KERNEL.
2067*4882a593Smuzhiyun  * Return: vm_fault_t value.
2068*4882a593Smuzhiyun  */
vmf_insert_pfn_prot(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,pgprot_t pgprot)2069*4882a593Smuzhiyun vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2070*4882a593Smuzhiyun 			unsigned long pfn, pgprot_t pgprot)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun 	/*
2073*4882a593Smuzhiyun 	 * Technically, architectures with pte_special can avoid all these
2074*4882a593Smuzhiyun 	 * restrictions (same for remap_pfn_range).  However we would like
2075*4882a593Smuzhiyun 	 * consistency in testing and feature parity among all, so we should
2076*4882a593Smuzhiyun 	 * try to keep these invariants in place for everybody.
2077*4882a593Smuzhiyun 	 */
2078*4882a593Smuzhiyun 	BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2079*4882a593Smuzhiyun 	BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2080*4882a593Smuzhiyun 						(VM_PFNMAP|VM_MIXEDMAP));
2081*4882a593Smuzhiyun 	BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2082*4882a593Smuzhiyun 	BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	if (addr < vma->vm_start || addr >= vma->vm_end)
2085*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	if (!pfn_modify_allowed(pfn, pgprot))
2088*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2093*4882a593Smuzhiyun 			false);
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun EXPORT_SYMBOL(vmf_insert_pfn_prot);
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun /**
2098*4882a593Smuzhiyun  * vmf_insert_pfn - insert single pfn into user vma
2099*4882a593Smuzhiyun  * @vma: user vma to map to
2100*4882a593Smuzhiyun  * @addr: target user address of this page
2101*4882a593Smuzhiyun  * @pfn: source kernel pfn
2102*4882a593Smuzhiyun  *
2103*4882a593Smuzhiyun  * Similar to vm_insert_page, this allows drivers to insert individual pages
2104*4882a593Smuzhiyun  * they've allocated into a user vma. Same comments apply.
2105*4882a593Smuzhiyun  *
2106*4882a593Smuzhiyun  * This function should only be called from a vm_ops->fault handler, and
2107*4882a593Smuzhiyun  * in that case the handler should return the result of this function.
2108*4882a593Smuzhiyun  *
2109*4882a593Smuzhiyun  * vma cannot be a COW mapping.
2110*4882a593Smuzhiyun  *
2111*4882a593Smuzhiyun  * As this is called only for pages that do not currently exist, we
2112*4882a593Smuzhiyun  * do not need to flush old virtual caches or the TLB.
2113*4882a593Smuzhiyun  *
2114*4882a593Smuzhiyun  * Context: Process context.  May allocate using %GFP_KERNEL.
2115*4882a593Smuzhiyun  * Return: vm_fault_t value.
2116*4882a593Smuzhiyun  */
vmf_insert_pfn(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn)2117*4882a593Smuzhiyun vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2118*4882a593Smuzhiyun 			unsigned long pfn)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun 	return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun EXPORT_SYMBOL(vmf_insert_pfn);
2123*4882a593Smuzhiyun 
vm_mixed_ok(struct vm_area_struct * vma,pfn_t pfn)2124*4882a593Smuzhiyun static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2125*4882a593Smuzhiyun {
2126*4882a593Smuzhiyun 	/* these checks mirror the abort conditions in vm_normal_page */
2127*4882a593Smuzhiyun 	if (vma->vm_flags & VM_MIXEDMAP)
2128*4882a593Smuzhiyun 		return true;
2129*4882a593Smuzhiyun 	if (pfn_t_devmap(pfn))
2130*4882a593Smuzhiyun 		return true;
2131*4882a593Smuzhiyun 	if (pfn_t_special(pfn))
2132*4882a593Smuzhiyun 		return true;
2133*4882a593Smuzhiyun 	if (is_zero_pfn(pfn_t_to_pfn(pfn)))
2134*4882a593Smuzhiyun 		return true;
2135*4882a593Smuzhiyun 	return false;
2136*4882a593Smuzhiyun }
2137*4882a593Smuzhiyun 
__vm_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t pgprot,bool mkwrite)2138*4882a593Smuzhiyun static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2139*4882a593Smuzhiyun 		unsigned long addr, pfn_t pfn, pgprot_t pgprot,
2140*4882a593Smuzhiyun 		bool mkwrite)
2141*4882a593Smuzhiyun {
2142*4882a593Smuzhiyun 	int err;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	BUG_ON(!vm_mixed_ok(vma, pfn));
2145*4882a593Smuzhiyun 
2146*4882a593Smuzhiyun 	if (addr < vma->vm_start || addr >= vma->vm_end)
2147*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2148*4882a593Smuzhiyun 
2149*4882a593Smuzhiyun 	track_pfn_insert(vma, &pgprot, pfn);
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	if (!pfn_modify_allowed(pfn_t_to_pfn(pfn), pgprot))
2152*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun 	/*
2155*4882a593Smuzhiyun 	 * If we don't have pte special, then we have to use the pfn_valid()
2156*4882a593Smuzhiyun 	 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
2157*4882a593Smuzhiyun 	 * refcount the page if pfn_valid is true (hence insert_page rather
2158*4882a593Smuzhiyun 	 * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
2159*4882a593Smuzhiyun 	 * without pte special, it would there be refcounted as a normal page.
2160*4882a593Smuzhiyun 	 */
2161*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) &&
2162*4882a593Smuzhiyun 	    !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
2163*4882a593Smuzhiyun 		struct page *page;
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun 		/*
2166*4882a593Smuzhiyun 		 * At this point we are committed to insert_page()
2167*4882a593Smuzhiyun 		 * regardless of whether the caller specified flags that
2168*4882a593Smuzhiyun 		 * result in pfn_t_has_page() == false.
2169*4882a593Smuzhiyun 		 */
2170*4882a593Smuzhiyun 		page = pfn_to_page(pfn_t_to_pfn(pfn));
2171*4882a593Smuzhiyun 		err = insert_page(vma, addr, page, pgprot);
2172*4882a593Smuzhiyun 	} else {
2173*4882a593Smuzhiyun 		return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2174*4882a593Smuzhiyun 	}
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	if (err == -ENOMEM)
2177*4882a593Smuzhiyun 		return VM_FAULT_OOM;
2178*4882a593Smuzhiyun 	if (err < 0 && err != -EBUSY)
2179*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	return VM_FAULT_NOPAGE;
2182*4882a593Smuzhiyun }
2183*4882a593Smuzhiyun 
2184*4882a593Smuzhiyun /**
2185*4882a593Smuzhiyun  * vmf_insert_mixed_prot - insert single pfn into user vma with specified pgprot
2186*4882a593Smuzhiyun  * @vma: user vma to map to
2187*4882a593Smuzhiyun  * @addr: target user address of this page
2188*4882a593Smuzhiyun  * @pfn: source kernel pfn
2189*4882a593Smuzhiyun  * @pgprot: pgprot flags for the inserted page
2190*4882a593Smuzhiyun  *
2191*4882a593Smuzhiyun  * This is exactly like vmf_insert_mixed(), except that it allows drivers
2192*4882a593Smuzhiyun  * to override pgprot on a per-page basis.
2193*4882a593Smuzhiyun  *
2194*4882a593Smuzhiyun  * Typically this function should be used by drivers to set caching- and
2195*4882a593Smuzhiyun  * encryption bits different than those of @vma->vm_page_prot, because
2196*4882a593Smuzhiyun  * the caching- or encryption mode may not be known at mmap() time.
2197*4882a593Smuzhiyun  * This is ok as long as @vma->vm_page_prot is not used by the core vm
2198*4882a593Smuzhiyun  * to set caching and encryption bits for those vmas (except for COW pages).
2199*4882a593Smuzhiyun  * This is ensured by core vm only modifying these page table entries using
2200*4882a593Smuzhiyun  * functions that don't touch caching- or encryption bits, using pte_modify()
2201*4882a593Smuzhiyun  * if needed. (See for example mprotect()).
2202*4882a593Smuzhiyun  * Also when new page-table entries are created, this is only done using the
2203*4882a593Smuzhiyun  * fault() callback, and never using the value of vma->vm_page_prot,
2204*4882a593Smuzhiyun  * except for page-table entries that point to anonymous pages as the result
2205*4882a593Smuzhiyun  * of COW.
2206*4882a593Smuzhiyun  *
2207*4882a593Smuzhiyun  * Context: Process context.  May allocate using %GFP_KERNEL.
2208*4882a593Smuzhiyun  * Return: vm_fault_t value.
2209*4882a593Smuzhiyun  */
vmf_insert_mixed_prot(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn,pgprot_t pgprot)2210*4882a593Smuzhiyun vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
2211*4882a593Smuzhiyun 				 pfn_t pfn, pgprot_t pgprot)
2212*4882a593Smuzhiyun {
2213*4882a593Smuzhiyun 	return __vm_insert_mixed(vma, addr, pfn, pgprot, false);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun EXPORT_SYMBOL(vmf_insert_mixed_prot);
2216*4882a593Smuzhiyun 
vmf_insert_mixed(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2217*4882a593Smuzhiyun vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2218*4882a593Smuzhiyun 		pfn_t pfn)
2219*4882a593Smuzhiyun {
2220*4882a593Smuzhiyun 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, false);
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun EXPORT_SYMBOL(vmf_insert_mixed);
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun /*
2225*4882a593Smuzhiyun  *  If the insertion of PTE failed because someone else already added a
2226*4882a593Smuzhiyun  *  different entry in the mean time, we treat that as success as we assume
2227*4882a593Smuzhiyun  *  the same entry was actually inserted.
2228*4882a593Smuzhiyun  */
vmf_insert_mixed_mkwrite(struct vm_area_struct * vma,unsigned long addr,pfn_t pfn)2229*4882a593Smuzhiyun vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2230*4882a593Smuzhiyun 		unsigned long addr, pfn_t pfn)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun 	return __vm_insert_mixed(vma, addr, pfn, vma->vm_page_prot, true);
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun EXPORT_SYMBOL(vmf_insert_mixed_mkwrite);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun /*
2237*4882a593Smuzhiyun  * maps a range of physical memory into the requested pages. the old
2238*4882a593Smuzhiyun  * mappings are removed. any references to nonexistent pages results
2239*4882a593Smuzhiyun  * in null mappings (currently treated as "copy-on-access")
2240*4882a593Smuzhiyun  */
remap_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2241*4882a593Smuzhiyun static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
2242*4882a593Smuzhiyun 			unsigned long addr, unsigned long end,
2243*4882a593Smuzhiyun 			unsigned long pfn, pgprot_t prot)
2244*4882a593Smuzhiyun {
2245*4882a593Smuzhiyun 	pte_t *pte, *mapped_pte;
2246*4882a593Smuzhiyun 	spinlock_t *ptl;
2247*4882a593Smuzhiyun 	int err = 0;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
2250*4882a593Smuzhiyun 	if (!pte)
2251*4882a593Smuzhiyun 		return -ENOMEM;
2252*4882a593Smuzhiyun 	arch_enter_lazy_mmu_mode();
2253*4882a593Smuzhiyun 	do {
2254*4882a593Smuzhiyun 		BUG_ON(!pte_none(*pte));
2255*4882a593Smuzhiyun 		if (!pfn_modify_allowed(pfn, prot)) {
2256*4882a593Smuzhiyun 			err = -EACCES;
2257*4882a593Smuzhiyun 			break;
2258*4882a593Smuzhiyun 		}
2259*4882a593Smuzhiyun 		set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
2260*4882a593Smuzhiyun 		pfn++;
2261*4882a593Smuzhiyun 	} while (pte++, addr += PAGE_SIZE, addr != end);
2262*4882a593Smuzhiyun 	arch_leave_lazy_mmu_mode();
2263*4882a593Smuzhiyun 	pte_unmap_unlock(mapped_pte, ptl);
2264*4882a593Smuzhiyun 	return err;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun 
remap_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2267*4882a593Smuzhiyun static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
2268*4882a593Smuzhiyun 			unsigned long addr, unsigned long end,
2269*4882a593Smuzhiyun 			unsigned long pfn, pgprot_t prot)
2270*4882a593Smuzhiyun {
2271*4882a593Smuzhiyun 	pmd_t *pmd;
2272*4882a593Smuzhiyun 	unsigned long next;
2273*4882a593Smuzhiyun 	int err;
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 	pfn -= addr >> PAGE_SHIFT;
2276*4882a593Smuzhiyun 	pmd = pmd_alloc(mm, pud, addr);
2277*4882a593Smuzhiyun 	if (!pmd)
2278*4882a593Smuzhiyun 		return -ENOMEM;
2279*4882a593Smuzhiyun 	VM_BUG_ON(pmd_trans_huge(*pmd));
2280*4882a593Smuzhiyun 	do {
2281*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
2282*4882a593Smuzhiyun 		err = remap_pte_range(mm, pmd, addr, next,
2283*4882a593Smuzhiyun 				pfn + (addr >> PAGE_SHIFT), prot);
2284*4882a593Smuzhiyun 		if (err)
2285*4882a593Smuzhiyun 			return err;
2286*4882a593Smuzhiyun 	} while (pmd++, addr = next, addr != end);
2287*4882a593Smuzhiyun 	return 0;
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun 
remap_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2290*4882a593Smuzhiyun static inline int remap_pud_range(struct mm_struct *mm, p4d_t *p4d,
2291*4882a593Smuzhiyun 			unsigned long addr, unsigned long end,
2292*4882a593Smuzhiyun 			unsigned long pfn, pgprot_t prot)
2293*4882a593Smuzhiyun {
2294*4882a593Smuzhiyun 	pud_t *pud;
2295*4882a593Smuzhiyun 	unsigned long next;
2296*4882a593Smuzhiyun 	int err;
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	pfn -= addr >> PAGE_SHIFT;
2299*4882a593Smuzhiyun 	pud = pud_alloc(mm, p4d, addr);
2300*4882a593Smuzhiyun 	if (!pud)
2301*4882a593Smuzhiyun 		return -ENOMEM;
2302*4882a593Smuzhiyun 	do {
2303*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
2304*4882a593Smuzhiyun 		err = remap_pmd_range(mm, pud, addr, next,
2305*4882a593Smuzhiyun 				pfn + (addr >> PAGE_SHIFT), prot);
2306*4882a593Smuzhiyun 		if (err)
2307*4882a593Smuzhiyun 			return err;
2308*4882a593Smuzhiyun 	} while (pud++, addr = next, addr != end);
2309*4882a593Smuzhiyun 	return 0;
2310*4882a593Smuzhiyun }
2311*4882a593Smuzhiyun 
remap_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned long pfn,pgprot_t prot)2312*4882a593Smuzhiyun static inline int remap_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2313*4882a593Smuzhiyun 			unsigned long addr, unsigned long end,
2314*4882a593Smuzhiyun 			unsigned long pfn, pgprot_t prot)
2315*4882a593Smuzhiyun {
2316*4882a593Smuzhiyun 	p4d_t *p4d;
2317*4882a593Smuzhiyun 	unsigned long next;
2318*4882a593Smuzhiyun 	int err;
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 	pfn -= addr >> PAGE_SHIFT;
2321*4882a593Smuzhiyun 	p4d = p4d_alloc(mm, pgd, addr);
2322*4882a593Smuzhiyun 	if (!p4d)
2323*4882a593Smuzhiyun 		return -ENOMEM;
2324*4882a593Smuzhiyun 	do {
2325*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
2326*4882a593Smuzhiyun 		err = remap_pud_range(mm, p4d, addr, next,
2327*4882a593Smuzhiyun 				pfn + (addr >> PAGE_SHIFT), prot);
2328*4882a593Smuzhiyun 		if (err)
2329*4882a593Smuzhiyun 			return err;
2330*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
2331*4882a593Smuzhiyun 	return 0;
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun /**
2335*4882a593Smuzhiyun  * remap_pfn_range - remap kernel memory to userspace
2336*4882a593Smuzhiyun  * @vma: user vma to map to
2337*4882a593Smuzhiyun  * @addr: target page aligned user address to start at
2338*4882a593Smuzhiyun  * @pfn: page frame number of kernel physical memory address
2339*4882a593Smuzhiyun  * @size: size of mapping area
2340*4882a593Smuzhiyun  * @prot: page protection flags for this mapping
2341*4882a593Smuzhiyun  *
2342*4882a593Smuzhiyun  * Note: this is only safe if the mm semaphore is held when called.
2343*4882a593Smuzhiyun  *
2344*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
2345*4882a593Smuzhiyun  */
remap_pfn_range(struct vm_area_struct * vma,unsigned long addr,unsigned long pfn,unsigned long size,pgprot_t prot)2346*4882a593Smuzhiyun int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2347*4882a593Smuzhiyun 		    unsigned long pfn, unsigned long size, pgprot_t prot)
2348*4882a593Smuzhiyun {
2349*4882a593Smuzhiyun 	pgd_t *pgd;
2350*4882a593Smuzhiyun 	unsigned long next;
2351*4882a593Smuzhiyun 	unsigned long end = addr + PAGE_ALIGN(size);
2352*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
2353*4882a593Smuzhiyun 	unsigned long remap_pfn = pfn;
2354*4882a593Smuzhiyun 	int err;
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!PAGE_ALIGNED(addr)))
2357*4882a593Smuzhiyun 		return -EINVAL;
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	/*
2360*4882a593Smuzhiyun 	 * Physically remapped pages are special. Tell the
2361*4882a593Smuzhiyun 	 * rest of the world about it:
2362*4882a593Smuzhiyun 	 *   VM_IO tells people not to look at these pages
2363*4882a593Smuzhiyun 	 *	(accesses can have side effects).
2364*4882a593Smuzhiyun 	 *   VM_PFNMAP tells the core MM that the base pages are just
2365*4882a593Smuzhiyun 	 *	raw PFN mappings, and do not have a "struct page" associated
2366*4882a593Smuzhiyun 	 *	with them.
2367*4882a593Smuzhiyun 	 *   VM_DONTEXPAND
2368*4882a593Smuzhiyun 	 *      Disable vma merging and expanding with mremap().
2369*4882a593Smuzhiyun 	 *   VM_DONTDUMP
2370*4882a593Smuzhiyun 	 *      Omit vma from core dump, even when VM_IO turned off.
2371*4882a593Smuzhiyun 	 *
2372*4882a593Smuzhiyun 	 * There's a horrible special case to handle copy-on-write
2373*4882a593Smuzhiyun 	 * behaviour that some programs depend on. We mark the "original"
2374*4882a593Smuzhiyun 	 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2375*4882a593Smuzhiyun 	 * See vm_normal_page() for details.
2376*4882a593Smuzhiyun 	 */
2377*4882a593Smuzhiyun 	if (is_cow_mapping(vma->vm_flags)) {
2378*4882a593Smuzhiyun 		if (addr != vma->vm_start || end != vma->vm_end)
2379*4882a593Smuzhiyun 			return -EINVAL;
2380*4882a593Smuzhiyun 		vma->vm_pgoff = pfn;
2381*4882a593Smuzhiyun 	}
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 	err = track_pfn_remap(vma, &prot, remap_pfn, addr, PAGE_ALIGN(size));
2384*4882a593Smuzhiyun 	if (err)
2385*4882a593Smuzhiyun 		return -EINVAL;
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	BUG_ON(addr >= end);
2390*4882a593Smuzhiyun 	pfn -= addr >> PAGE_SHIFT;
2391*4882a593Smuzhiyun 	pgd = pgd_offset(mm, addr);
2392*4882a593Smuzhiyun 	flush_cache_range(vma, addr, end);
2393*4882a593Smuzhiyun 	do {
2394*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
2395*4882a593Smuzhiyun 		err = remap_p4d_range(mm, pgd, addr, next,
2396*4882a593Smuzhiyun 				pfn + (addr >> PAGE_SHIFT), prot);
2397*4882a593Smuzhiyun 		if (err)
2398*4882a593Smuzhiyun 			break;
2399*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	if (err)
2402*4882a593Smuzhiyun 		untrack_pfn(vma, remap_pfn, PAGE_ALIGN(size));
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 	return err;
2405*4882a593Smuzhiyun }
2406*4882a593Smuzhiyun EXPORT_SYMBOL(remap_pfn_range);
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun /**
2409*4882a593Smuzhiyun  * vm_iomap_memory - remap memory to userspace
2410*4882a593Smuzhiyun  * @vma: user vma to map to
2411*4882a593Smuzhiyun  * @start: start of the physical memory to be mapped
2412*4882a593Smuzhiyun  * @len: size of area
2413*4882a593Smuzhiyun  *
2414*4882a593Smuzhiyun  * This is a simplified io_remap_pfn_range() for common driver use. The
2415*4882a593Smuzhiyun  * driver just needs to give us the physical memory range to be mapped,
2416*4882a593Smuzhiyun  * we'll figure out the rest from the vma information.
2417*4882a593Smuzhiyun  *
2418*4882a593Smuzhiyun  * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2419*4882a593Smuzhiyun  * whatever write-combining details or similar.
2420*4882a593Smuzhiyun  *
2421*4882a593Smuzhiyun  * Return: %0 on success, negative error code otherwise.
2422*4882a593Smuzhiyun  */
vm_iomap_memory(struct vm_area_struct * vma,phys_addr_t start,unsigned long len)2423*4882a593Smuzhiyun int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2424*4882a593Smuzhiyun {
2425*4882a593Smuzhiyun 	unsigned long vm_len, pfn, pages;
2426*4882a593Smuzhiyun 
2427*4882a593Smuzhiyun 	/* Check that the physical memory area passed in looks valid */
2428*4882a593Smuzhiyun 	if (start + len < start)
2429*4882a593Smuzhiyun 		return -EINVAL;
2430*4882a593Smuzhiyun 	/*
2431*4882a593Smuzhiyun 	 * You *really* shouldn't map things that aren't page-aligned,
2432*4882a593Smuzhiyun 	 * but we've historically allowed it because IO memory might
2433*4882a593Smuzhiyun 	 * just have smaller alignment.
2434*4882a593Smuzhiyun 	 */
2435*4882a593Smuzhiyun 	len += start & ~PAGE_MASK;
2436*4882a593Smuzhiyun 	pfn = start >> PAGE_SHIFT;
2437*4882a593Smuzhiyun 	pages = (len + ~PAGE_MASK) >> PAGE_SHIFT;
2438*4882a593Smuzhiyun 	if (pfn + pages < pfn)
2439*4882a593Smuzhiyun 		return -EINVAL;
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	/* We start the mapping 'vm_pgoff' pages into the area */
2442*4882a593Smuzhiyun 	if (vma->vm_pgoff > pages)
2443*4882a593Smuzhiyun 		return -EINVAL;
2444*4882a593Smuzhiyun 	pfn += vma->vm_pgoff;
2445*4882a593Smuzhiyun 	pages -= vma->vm_pgoff;
2446*4882a593Smuzhiyun 
2447*4882a593Smuzhiyun 	/* Can we fit all of the mapping? */
2448*4882a593Smuzhiyun 	vm_len = vma->vm_end - vma->vm_start;
2449*4882a593Smuzhiyun 	if (vm_len >> PAGE_SHIFT > pages)
2450*4882a593Smuzhiyun 		return -EINVAL;
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	/* Ok, let it rip */
2453*4882a593Smuzhiyun 	return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun EXPORT_SYMBOL(vm_iomap_memory);
2456*4882a593Smuzhiyun 
apply_to_pte_range(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2457*4882a593Smuzhiyun static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
2458*4882a593Smuzhiyun 				     unsigned long addr, unsigned long end,
2459*4882a593Smuzhiyun 				     pte_fn_t fn, void *data, bool create,
2460*4882a593Smuzhiyun 				     pgtbl_mod_mask *mask)
2461*4882a593Smuzhiyun {
2462*4882a593Smuzhiyun 	pte_t *pte;
2463*4882a593Smuzhiyun 	int err = 0;
2464*4882a593Smuzhiyun 	spinlock_t *ptl;
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun 	if (create) {
2467*4882a593Smuzhiyun 		pte = (mm == &init_mm) ?
2468*4882a593Smuzhiyun 			pte_alloc_kernel_track(pmd, addr, mask) :
2469*4882a593Smuzhiyun 			pte_alloc_map_lock(mm, pmd, addr, &ptl);
2470*4882a593Smuzhiyun 		if (!pte)
2471*4882a593Smuzhiyun 			return -ENOMEM;
2472*4882a593Smuzhiyun 	} else {
2473*4882a593Smuzhiyun 		pte = (mm == &init_mm) ?
2474*4882a593Smuzhiyun 			pte_offset_kernel(pmd, addr) :
2475*4882a593Smuzhiyun 			pte_offset_map_lock(mm, pmd, addr, &ptl);
2476*4882a593Smuzhiyun 	}
2477*4882a593Smuzhiyun 
2478*4882a593Smuzhiyun 	BUG_ON(pmd_huge(*pmd));
2479*4882a593Smuzhiyun 
2480*4882a593Smuzhiyun 	arch_enter_lazy_mmu_mode();
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	if (fn) {
2483*4882a593Smuzhiyun 		do {
2484*4882a593Smuzhiyun 			if (create || !pte_none(*pte)) {
2485*4882a593Smuzhiyun 				err = fn(pte++, addr, data);
2486*4882a593Smuzhiyun 				if (err)
2487*4882a593Smuzhiyun 					break;
2488*4882a593Smuzhiyun 			}
2489*4882a593Smuzhiyun 		} while (addr += PAGE_SIZE, addr != end);
2490*4882a593Smuzhiyun 	}
2491*4882a593Smuzhiyun 	*mask |= PGTBL_PTE_MODIFIED;
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	arch_leave_lazy_mmu_mode();
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	if (mm != &init_mm)
2496*4882a593Smuzhiyun 		pte_unmap_unlock(pte-1, ptl);
2497*4882a593Smuzhiyun 	return err;
2498*4882a593Smuzhiyun }
2499*4882a593Smuzhiyun 
apply_to_pmd_range(struct mm_struct * mm,pud_t * pud,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2500*4882a593Smuzhiyun static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
2501*4882a593Smuzhiyun 				     unsigned long addr, unsigned long end,
2502*4882a593Smuzhiyun 				     pte_fn_t fn, void *data, bool create,
2503*4882a593Smuzhiyun 				     pgtbl_mod_mask *mask)
2504*4882a593Smuzhiyun {
2505*4882a593Smuzhiyun 	pmd_t *pmd;
2506*4882a593Smuzhiyun 	unsigned long next;
2507*4882a593Smuzhiyun 	int err = 0;
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 	BUG_ON(pud_huge(*pud));
2510*4882a593Smuzhiyun 
2511*4882a593Smuzhiyun 	if (create) {
2512*4882a593Smuzhiyun 		pmd = pmd_alloc_track(mm, pud, addr, mask);
2513*4882a593Smuzhiyun 		if (!pmd)
2514*4882a593Smuzhiyun 			return -ENOMEM;
2515*4882a593Smuzhiyun 	} else {
2516*4882a593Smuzhiyun 		pmd = pmd_offset(pud, addr);
2517*4882a593Smuzhiyun 	}
2518*4882a593Smuzhiyun 	do {
2519*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
2520*4882a593Smuzhiyun 		if (create || !pmd_none_or_clear_bad(pmd)) {
2521*4882a593Smuzhiyun 			err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
2522*4882a593Smuzhiyun 						 create, mask);
2523*4882a593Smuzhiyun 			if (err)
2524*4882a593Smuzhiyun 				break;
2525*4882a593Smuzhiyun 		}
2526*4882a593Smuzhiyun 	} while (pmd++, addr = next, addr != end);
2527*4882a593Smuzhiyun 	return err;
2528*4882a593Smuzhiyun }
2529*4882a593Smuzhiyun 
apply_to_pud_range(struct mm_struct * mm,p4d_t * p4d,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2530*4882a593Smuzhiyun static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
2531*4882a593Smuzhiyun 				     unsigned long addr, unsigned long end,
2532*4882a593Smuzhiyun 				     pte_fn_t fn, void *data, bool create,
2533*4882a593Smuzhiyun 				     pgtbl_mod_mask *mask)
2534*4882a593Smuzhiyun {
2535*4882a593Smuzhiyun 	pud_t *pud;
2536*4882a593Smuzhiyun 	unsigned long next;
2537*4882a593Smuzhiyun 	int err = 0;
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	if (create) {
2540*4882a593Smuzhiyun 		pud = pud_alloc_track(mm, p4d, addr, mask);
2541*4882a593Smuzhiyun 		if (!pud)
2542*4882a593Smuzhiyun 			return -ENOMEM;
2543*4882a593Smuzhiyun 	} else {
2544*4882a593Smuzhiyun 		pud = pud_offset(p4d, addr);
2545*4882a593Smuzhiyun 	}
2546*4882a593Smuzhiyun 	do {
2547*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
2548*4882a593Smuzhiyun 		if (create || !pud_none_or_clear_bad(pud)) {
2549*4882a593Smuzhiyun 			err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
2550*4882a593Smuzhiyun 						 create, mask);
2551*4882a593Smuzhiyun 			if (err)
2552*4882a593Smuzhiyun 				break;
2553*4882a593Smuzhiyun 		}
2554*4882a593Smuzhiyun 	} while (pud++, addr = next, addr != end);
2555*4882a593Smuzhiyun 	return err;
2556*4882a593Smuzhiyun }
2557*4882a593Smuzhiyun 
apply_to_p4d_range(struct mm_struct * mm,pgd_t * pgd,unsigned long addr,unsigned long end,pte_fn_t fn,void * data,bool create,pgtbl_mod_mask * mask)2558*4882a593Smuzhiyun static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
2559*4882a593Smuzhiyun 				     unsigned long addr, unsigned long end,
2560*4882a593Smuzhiyun 				     pte_fn_t fn, void *data, bool create,
2561*4882a593Smuzhiyun 				     pgtbl_mod_mask *mask)
2562*4882a593Smuzhiyun {
2563*4882a593Smuzhiyun 	p4d_t *p4d;
2564*4882a593Smuzhiyun 	unsigned long next;
2565*4882a593Smuzhiyun 	int err = 0;
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 	if (create) {
2568*4882a593Smuzhiyun 		p4d = p4d_alloc_track(mm, pgd, addr, mask);
2569*4882a593Smuzhiyun 		if (!p4d)
2570*4882a593Smuzhiyun 			return -ENOMEM;
2571*4882a593Smuzhiyun 	} else {
2572*4882a593Smuzhiyun 		p4d = p4d_offset(pgd, addr);
2573*4882a593Smuzhiyun 	}
2574*4882a593Smuzhiyun 	do {
2575*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
2576*4882a593Smuzhiyun 		if (create || !p4d_none_or_clear_bad(p4d)) {
2577*4882a593Smuzhiyun 			err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
2578*4882a593Smuzhiyun 						 create, mask);
2579*4882a593Smuzhiyun 			if (err)
2580*4882a593Smuzhiyun 				break;
2581*4882a593Smuzhiyun 		}
2582*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
2583*4882a593Smuzhiyun 	return err;
2584*4882a593Smuzhiyun }
2585*4882a593Smuzhiyun 
__apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data,bool create)2586*4882a593Smuzhiyun static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2587*4882a593Smuzhiyun 				 unsigned long size, pte_fn_t fn,
2588*4882a593Smuzhiyun 				 void *data, bool create)
2589*4882a593Smuzhiyun {
2590*4882a593Smuzhiyun 	pgd_t *pgd;
2591*4882a593Smuzhiyun 	unsigned long start = addr, next;
2592*4882a593Smuzhiyun 	unsigned long end = addr + size;
2593*4882a593Smuzhiyun 	pgtbl_mod_mask mask = 0;
2594*4882a593Smuzhiyun 	int err = 0;
2595*4882a593Smuzhiyun 
2596*4882a593Smuzhiyun 	if (WARN_ON(addr >= end))
2597*4882a593Smuzhiyun 		return -EINVAL;
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	pgd = pgd_offset(mm, addr);
2600*4882a593Smuzhiyun 	do {
2601*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
2602*4882a593Smuzhiyun 		if (!create && pgd_none_or_clear_bad(pgd))
2603*4882a593Smuzhiyun 			continue;
2604*4882a593Smuzhiyun 		err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
2605*4882a593Smuzhiyun 		if (err)
2606*4882a593Smuzhiyun 			break;
2607*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
2608*4882a593Smuzhiyun 
2609*4882a593Smuzhiyun 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
2610*4882a593Smuzhiyun 		arch_sync_kernel_mappings(start, start + size);
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	return err;
2613*4882a593Smuzhiyun }
2614*4882a593Smuzhiyun 
2615*4882a593Smuzhiyun /*
2616*4882a593Smuzhiyun  * Scan a region of virtual memory, filling in page tables as necessary
2617*4882a593Smuzhiyun  * and calling a provided function on each leaf page table.
2618*4882a593Smuzhiyun  */
apply_to_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)2619*4882a593Smuzhiyun int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
2620*4882a593Smuzhiyun 			unsigned long size, pte_fn_t fn, void *data)
2621*4882a593Smuzhiyun {
2622*4882a593Smuzhiyun 	return __apply_to_page_range(mm, addr, size, fn, data, true);
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(apply_to_page_range);
2625*4882a593Smuzhiyun 
2626*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
pte_spinlock(struct vm_fault * vmf)2627*4882a593Smuzhiyun static bool pte_spinlock(struct vm_fault *vmf)
2628*4882a593Smuzhiyun {
2629*4882a593Smuzhiyun 	bool ret = false;
2630*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2631*4882a593Smuzhiyun 	pmd_t pmdval;
2632*4882a593Smuzhiyun #endif
2633*4882a593Smuzhiyun 
2634*4882a593Smuzhiyun 	/* Check if vma is still valid */
2635*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
2636*4882a593Smuzhiyun 		vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2637*4882a593Smuzhiyun 		spin_lock(vmf->ptl);
2638*4882a593Smuzhiyun 		return true;
2639*4882a593Smuzhiyun 	}
2640*4882a593Smuzhiyun 
2641*4882a593Smuzhiyun 	local_irq_disable();
2642*4882a593Smuzhiyun 	if (vma_has_changed(vmf)) {
2643*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
2644*4882a593Smuzhiyun 		goto out;
2645*4882a593Smuzhiyun 	}
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2648*4882a593Smuzhiyun 	/*
2649*4882a593Smuzhiyun 	 * We check if the pmd value is still the same to ensure that there
2650*4882a593Smuzhiyun 	 * is not a huge collapse operation in progress in our back.
2651*4882a593Smuzhiyun 	 */
2652*4882a593Smuzhiyun 	pmdval = READ_ONCE(*vmf->pmd);
2653*4882a593Smuzhiyun 	if (!pmd_same(pmdval, vmf->orig_pmd)) {
2654*4882a593Smuzhiyun 		trace_spf_pmd_changed(_RET_IP_, vmf->vma, vmf->address);
2655*4882a593Smuzhiyun 		goto out;
2656*4882a593Smuzhiyun 	}
2657*4882a593Smuzhiyun #endif
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2660*4882a593Smuzhiyun 	if (unlikely(!spin_trylock(vmf->ptl))) {
2661*4882a593Smuzhiyun 		trace_spf_pte_lock(_RET_IP_, vmf->vma, vmf->address);
2662*4882a593Smuzhiyun 		goto out;
2663*4882a593Smuzhiyun 	}
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	if (vma_has_changed(vmf)) {
2666*4882a593Smuzhiyun 		spin_unlock(vmf->ptl);
2667*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf->vma, vmf->address);
2668*4882a593Smuzhiyun 		goto out;
2669*4882a593Smuzhiyun 	}
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	ret = true;
2672*4882a593Smuzhiyun out:
2673*4882a593Smuzhiyun 	local_irq_enable();
2674*4882a593Smuzhiyun 	return ret;
2675*4882a593Smuzhiyun }
2676*4882a593Smuzhiyun 
__pte_map_lock_speculative(struct vm_fault * vmf,unsigned long addr)2677*4882a593Smuzhiyun static bool __pte_map_lock_speculative(struct vm_fault *vmf, unsigned long addr)
2678*4882a593Smuzhiyun {
2679*4882a593Smuzhiyun 	bool ret = false;
2680*4882a593Smuzhiyun 	pte_t *pte;
2681*4882a593Smuzhiyun 	spinlock_t *ptl;
2682*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2683*4882a593Smuzhiyun 	pmd_t pmdval;
2684*4882a593Smuzhiyun #endif
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun 	/*
2687*4882a593Smuzhiyun 	 * The first vma_has_changed() guarantees the page-tables are still
2688*4882a593Smuzhiyun 	 * valid, having IRQs disabled ensures they stay around, hence the
2689*4882a593Smuzhiyun 	 * second vma_has_changed() to make sure they are still valid once
2690*4882a593Smuzhiyun 	 * we've got the lock. After that a concurrent zap_pte_range() will
2691*4882a593Smuzhiyun 	 * block on the PTL and thus we're safe.
2692*4882a593Smuzhiyun 	 */
2693*4882a593Smuzhiyun 	local_irq_disable();
2694*4882a593Smuzhiyun 	if (vma_has_changed(vmf)) {
2695*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);
2696*4882a593Smuzhiyun 		goto out;
2697*4882a593Smuzhiyun 	}
2698*4882a593Smuzhiyun 
2699*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2700*4882a593Smuzhiyun 	/*
2701*4882a593Smuzhiyun 	 * We check if the pmd value is still the same to ensure that there
2702*4882a593Smuzhiyun 	 * is not a huge collapse operation in progress in our back.
2703*4882a593Smuzhiyun 	 */
2704*4882a593Smuzhiyun 	pmdval = READ_ONCE(*vmf->pmd);
2705*4882a593Smuzhiyun 	if (!pmd_same(pmdval, vmf->orig_pmd)) {
2706*4882a593Smuzhiyun 		trace_spf_pmd_changed(_RET_IP_, vmf->vma, addr);
2707*4882a593Smuzhiyun 		goto out;
2708*4882a593Smuzhiyun 	}
2709*4882a593Smuzhiyun #endif
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 	/*
2712*4882a593Smuzhiyun 	 * Same as pte_offset_map_lock() except that we call
2713*4882a593Smuzhiyun 	 * spin_trylock() in place of spin_lock() to avoid race with
2714*4882a593Smuzhiyun 	 * unmap path which may have the lock and wait for this CPU
2715*4882a593Smuzhiyun 	 * to invalidate TLB but this CPU has irq disabled.
2716*4882a593Smuzhiyun 	 * Since we are in a speculative patch, accept it could fail
2717*4882a593Smuzhiyun 	 */
2718*4882a593Smuzhiyun 	ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2719*4882a593Smuzhiyun 	pte = pte_offset_map(vmf->pmd, addr);
2720*4882a593Smuzhiyun 	if (unlikely(!spin_trylock(ptl))) {
2721*4882a593Smuzhiyun 		pte_unmap(pte);
2722*4882a593Smuzhiyun 		trace_spf_pte_lock(_RET_IP_, vmf->vma, addr);
2723*4882a593Smuzhiyun 		goto out;
2724*4882a593Smuzhiyun 	}
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 	if (vma_has_changed(vmf)) {
2727*4882a593Smuzhiyun 		pte_unmap_unlock(pte, ptl);
2728*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf->vma, addr);
2729*4882a593Smuzhiyun 		goto out;
2730*4882a593Smuzhiyun 	}
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 	vmf->pte = pte;
2733*4882a593Smuzhiyun 	vmf->ptl = ptl;
2734*4882a593Smuzhiyun 	ret = true;
2735*4882a593Smuzhiyun out:
2736*4882a593Smuzhiyun 	local_irq_enable();
2737*4882a593Smuzhiyun 	return ret;
2738*4882a593Smuzhiyun }
2739*4882a593Smuzhiyun 
pte_map_lock(struct vm_fault * vmf)2740*4882a593Smuzhiyun static bool pte_map_lock(struct vm_fault *vmf)
2741*4882a593Smuzhiyun {
2742*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
2743*4882a593Smuzhiyun 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
2744*4882a593Smuzhiyun 					       vmf->address, &vmf->ptl);
2745*4882a593Smuzhiyun 		return true;
2746*4882a593Smuzhiyun 	}
2747*4882a593Smuzhiyun 
2748*4882a593Smuzhiyun 	return __pte_map_lock_speculative(vmf, vmf->address);
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun 
pte_map_lock_addr(struct vm_fault * vmf,unsigned long addr)2751*4882a593Smuzhiyun bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr)
2752*4882a593Smuzhiyun {
2753*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE)) {
2754*4882a593Smuzhiyun 		vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
2755*4882a593Smuzhiyun 					       addr, &vmf->ptl);
2756*4882a593Smuzhiyun 		return true;
2757*4882a593Smuzhiyun 	}
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	return __pte_map_lock_speculative(vmf, addr);
2760*4882a593Smuzhiyun }
2761*4882a593Smuzhiyun 
2762*4882a593Smuzhiyun static bool __read_mostly allow_file_spec_access;
allow_file_spec_access_setup(char * str)2763*4882a593Smuzhiyun static int __init allow_file_spec_access_setup(char *str)
2764*4882a593Smuzhiyun {
2765*4882a593Smuzhiyun 	allow_file_spec_access = true;
2766*4882a593Smuzhiyun 	return 1;
2767*4882a593Smuzhiyun }
2768*4882a593Smuzhiyun __setup("allow_file_spec_access", allow_file_spec_access_setup);
2769*4882a593Smuzhiyun 
vmf_allows_speculation(struct vm_fault * vmf)2770*4882a593Smuzhiyun static bool vmf_allows_speculation(struct vm_fault *vmf)
2771*4882a593Smuzhiyun {
2772*4882a593Smuzhiyun 	if (vma_is_anonymous(vmf->vma)) {
2773*4882a593Smuzhiyun 		/*
2774*4882a593Smuzhiyun 		 * __anon_vma_prepare() requires the mmap_sem to be held
2775*4882a593Smuzhiyun 		 * because vm_next and vm_prev must be safe. This can't be
2776*4882a593Smuzhiyun 		 * guaranteed in the speculative path.
2777*4882a593Smuzhiyun 		 */
2778*4882a593Smuzhiyun 		if (!vmf->vma->anon_vma) {
2779*4882a593Smuzhiyun 			trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
2780*4882a593Smuzhiyun 			return false;
2781*4882a593Smuzhiyun 		}
2782*4882a593Smuzhiyun 		return true;
2783*4882a593Smuzhiyun 	}
2784*4882a593Smuzhiyun 
2785*4882a593Smuzhiyun 	if (!allow_file_spec_access) {
2786*4882a593Smuzhiyun 		/*
2787*4882a593Smuzhiyun 		 * Can't call vm_ops service has we don't know what they would
2788*4882a593Smuzhiyun 		 * do with the VMA.
2789*4882a593Smuzhiyun 		 * This include huge page from hugetlbfs.
2790*4882a593Smuzhiyun 		 */
2791*4882a593Smuzhiyun 		trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
2792*4882a593Smuzhiyun 		return false;
2793*4882a593Smuzhiyun 	}
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	if (!(vmf->vma->vm_flags & VM_SHARED) &&
2796*4882a593Smuzhiyun 		(vmf->flags & FAULT_FLAG_WRITE) &&
2797*4882a593Smuzhiyun 		!vmf->vma->anon_vma) {
2798*4882a593Smuzhiyun 		/*
2799*4882a593Smuzhiyun 		 * non-anonymous private COW without anon_vma.
2800*4882a593Smuzhiyun 		 * See above.
2801*4882a593Smuzhiyun 		 */
2802*4882a593Smuzhiyun 		trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
2803*4882a593Smuzhiyun 		return false;
2804*4882a593Smuzhiyun 	}
2805*4882a593Smuzhiyun 
2806*4882a593Smuzhiyun 	if (vmf->vma->vm_ops->allow_speculation &&
2807*4882a593Smuzhiyun 		vmf->vma->vm_ops->allow_speculation()) {
2808*4882a593Smuzhiyun 		return true;
2809*4882a593Smuzhiyun 	}
2810*4882a593Smuzhiyun 
2811*4882a593Smuzhiyun 	trace_spf_vma_notsup(_RET_IP_, vmf->vma, vmf->address);
2812*4882a593Smuzhiyun 	return false;
2813*4882a593Smuzhiyun }
2814*4882a593Smuzhiyun 
2815*4882a593Smuzhiyun #else
pte_spinlock(struct vm_fault * vmf)2816*4882a593Smuzhiyun static inline bool pte_spinlock(struct vm_fault *vmf)
2817*4882a593Smuzhiyun {
2818*4882a593Smuzhiyun 	vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
2819*4882a593Smuzhiyun 	spin_lock(vmf->ptl);
2820*4882a593Smuzhiyun 	return true;
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun 
pte_map_lock(struct vm_fault * vmf)2823*4882a593Smuzhiyun static inline bool pte_map_lock(struct vm_fault *vmf)
2824*4882a593Smuzhiyun {
2825*4882a593Smuzhiyun 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
2826*4882a593Smuzhiyun 				       vmf->address, &vmf->ptl);
2827*4882a593Smuzhiyun 	return true;
2828*4882a593Smuzhiyun }
2829*4882a593Smuzhiyun 
pte_map_lock_addr(struct vm_fault * vmf,unsigned long addr)2830*4882a593Smuzhiyun inline bool pte_map_lock_addr(struct vm_fault *vmf, unsigned long addr)
2831*4882a593Smuzhiyun {
2832*4882a593Smuzhiyun 	vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
2833*4882a593Smuzhiyun 					addr, &vmf->ptl);
2834*4882a593Smuzhiyun 	return true;
2835*4882a593Smuzhiyun }
2836*4882a593Smuzhiyun 
vmf_allows_speculation(struct vm_fault * vmf)2837*4882a593Smuzhiyun static inline bool vmf_allows_speculation(struct vm_fault *vmf)
2838*4882a593Smuzhiyun {
2839*4882a593Smuzhiyun 	return false;
2840*4882a593Smuzhiyun }
2841*4882a593Smuzhiyun #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
2842*4882a593Smuzhiyun 
2843*4882a593Smuzhiyun /*
2844*4882a593Smuzhiyun  * Scan a region of virtual memory, calling a provided function on
2845*4882a593Smuzhiyun  * each leaf page table where it exists.
2846*4882a593Smuzhiyun  *
2847*4882a593Smuzhiyun  * Unlike apply_to_page_range, this does _not_ fill in page tables
2848*4882a593Smuzhiyun  * where they are absent.
2849*4882a593Smuzhiyun  */
apply_to_existing_page_range(struct mm_struct * mm,unsigned long addr,unsigned long size,pte_fn_t fn,void * data)2850*4882a593Smuzhiyun int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr,
2851*4882a593Smuzhiyun 				 unsigned long size, pte_fn_t fn, void *data)
2852*4882a593Smuzhiyun {
2853*4882a593Smuzhiyun 	return __apply_to_page_range(mm, addr, size, fn, data, false);
2854*4882a593Smuzhiyun }
2855*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(apply_to_existing_page_range);
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun /*
2858*4882a593Smuzhiyun  * handle_pte_fault chooses page fault handler according to an entry which was
2859*4882a593Smuzhiyun  * read non-atomically.  Before making any commitment, on those architectures
2860*4882a593Smuzhiyun  * or configurations (e.g. i386 with PAE) which might give a mix of unmatched
2861*4882a593Smuzhiyun  * parts, do_swap_page must check under lock before unmapping the pte and
2862*4882a593Smuzhiyun  * proceeding (but do_wp_page is only called after already making such a check;
2863*4882a593Smuzhiyun  * and do_anonymous_page can safely check later on).
2864*4882a593Smuzhiyun  *
2865*4882a593Smuzhiyun  * pte_unmap_same() returns:
2866*4882a593Smuzhiyun  *	0			if the PTE are the same
2867*4882a593Smuzhiyun  *	VM_FAULT_PTNOTSAME	if the PTE are different
2868*4882a593Smuzhiyun  *	VM_FAULT_RETRY		if the VMA has changed in our back during
2869*4882a593Smuzhiyun  *				a speculative page fault handling.
2870*4882a593Smuzhiyun  */
pte_unmap_same(struct vm_fault * vmf)2871*4882a593Smuzhiyun static inline int pte_unmap_same(struct vm_fault *vmf)
2872*4882a593Smuzhiyun {
2873*4882a593Smuzhiyun 	int ret = 0;
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2876*4882a593Smuzhiyun 	if (sizeof(pte_t) > sizeof(unsigned long)) {
2877*4882a593Smuzhiyun 		if (pte_spinlock(vmf)) {
2878*4882a593Smuzhiyun 			if (!pte_same(*vmf->pte, vmf->orig_pte))
2879*4882a593Smuzhiyun 				ret = VM_FAULT_PTNOTSAME;
2880*4882a593Smuzhiyun 			spin_unlock(vmf->ptl);
2881*4882a593Smuzhiyun 		} else
2882*4882a593Smuzhiyun 			ret = VM_FAULT_RETRY;
2883*4882a593Smuzhiyun 	}
2884*4882a593Smuzhiyun #endif
2885*4882a593Smuzhiyun 	pte_unmap(vmf->pte);
2886*4882a593Smuzhiyun 	return ret;
2887*4882a593Smuzhiyun }
2888*4882a593Smuzhiyun 
cow_user_page(struct page * dst,struct page * src,struct vm_fault * vmf)2889*4882a593Smuzhiyun static inline bool cow_user_page(struct page *dst, struct page *src,
2890*4882a593Smuzhiyun 				 struct vm_fault *vmf)
2891*4882a593Smuzhiyun {
2892*4882a593Smuzhiyun 	bool ret;
2893*4882a593Smuzhiyun 	void *kaddr;
2894*4882a593Smuzhiyun 	void __user *uaddr;
2895*4882a593Smuzhiyun 	bool locked = false;
2896*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
2897*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
2898*4882a593Smuzhiyun 	unsigned long addr = vmf->address;
2899*4882a593Smuzhiyun 
2900*4882a593Smuzhiyun 	if (likely(src)) {
2901*4882a593Smuzhiyun 		copy_user_highpage(dst, src, addr, vma);
2902*4882a593Smuzhiyun 		return true;
2903*4882a593Smuzhiyun 	}
2904*4882a593Smuzhiyun 
2905*4882a593Smuzhiyun 	/*
2906*4882a593Smuzhiyun 	 * If the source page was a PFN mapping, we don't have
2907*4882a593Smuzhiyun 	 * a "struct page" for it. We do a best-effort copy by
2908*4882a593Smuzhiyun 	 * just copying from the original user address. If that
2909*4882a593Smuzhiyun 	 * fails, we just zero-fill it. Live with it.
2910*4882a593Smuzhiyun 	 */
2911*4882a593Smuzhiyun 	kaddr = kmap_atomic(dst);
2912*4882a593Smuzhiyun 	uaddr = (void __user *)(addr & PAGE_MASK);
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	/*
2915*4882a593Smuzhiyun 	 * On architectures with software "accessed" bits, we would
2916*4882a593Smuzhiyun 	 * take a double page fault, so mark it accessed here.
2917*4882a593Smuzhiyun 	 */
2918*4882a593Smuzhiyun 	if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2919*4882a593Smuzhiyun 		pte_t entry;
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2922*4882a593Smuzhiyun 		locked = true;
2923*4882a593Smuzhiyun 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2924*4882a593Smuzhiyun 			/*
2925*4882a593Smuzhiyun 			 * Other thread has already handled the fault
2926*4882a593Smuzhiyun 			 * and update local tlb only
2927*4882a593Smuzhiyun 			 */
2928*4882a593Smuzhiyun 			update_mmu_tlb(vma, addr, vmf->pte);
2929*4882a593Smuzhiyun 			ret = false;
2930*4882a593Smuzhiyun 			goto pte_unlock;
2931*4882a593Smuzhiyun 		}
2932*4882a593Smuzhiyun 
2933*4882a593Smuzhiyun 		entry = pte_mkyoung(vmf->orig_pte);
2934*4882a593Smuzhiyun 		if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2935*4882a593Smuzhiyun 			update_mmu_cache(vma, addr, vmf->pte);
2936*4882a593Smuzhiyun 	}
2937*4882a593Smuzhiyun 
2938*4882a593Smuzhiyun 	/*
2939*4882a593Smuzhiyun 	 * This really shouldn't fail, because the page is there
2940*4882a593Smuzhiyun 	 * in the page tables. But it might just be unreadable,
2941*4882a593Smuzhiyun 	 * in which case we just give up and fill the result with
2942*4882a593Smuzhiyun 	 * zeroes.
2943*4882a593Smuzhiyun 	 */
2944*4882a593Smuzhiyun 	if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2945*4882a593Smuzhiyun 		if (locked)
2946*4882a593Smuzhiyun 			goto warn;
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 		/* Re-validate under PTL if the page is still mapped */
2949*4882a593Smuzhiyun 		vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2950*4882a593Smuzhiyun 		locked = true;
2951*4882a593Smuzhiyun 		if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2952*4882a593Smuzhiyun 			/* The PTE changed under us, update local tlb */
2953*4882a593Smuzhiyun 			update_mmu_tlb(vma, addr, vmf->pte);
2954*4882a593Smuzhiyun 			ret = false;
2955*4882a593Smuzhiyun 			goto pte_unlock;
2956*4882a593Smuzhiyun 		}
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 		/*
2959*4882a593Smuzhiyun 		 * The same page can be mapped back since last copy attempt.
2960*4882a593Smuzhiyun 		 * Try to copy again under PTL.
2961*4882a593Smuzhiyun 		 */
2962*4882a593Smuzhiyun 		if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
2963*4882a593Smuzhiyun 			/*
2964*4882a593Smuzhiyun 			 * Give a warn in case there can be some obscure
2965*4882a593Smuzhiyun 			 * use-case
2966*4882a593Smuzhiyun 			 */
2967*4882a593Smuzhiyun warn:
2968*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
2969*4882a593Smuzhiyun 			clear_page(kaddr);
2970*4882a593Smuzhiyun 		}
2971*4882a593Smuzhiyun 	}
2972*4882a593Smuzhiyun 
2973*4882a593Smuzhiyun 	ret = true;
2974*4882a593Smuzhiyun 
2975*4882a593Smuzhiyun pte_unlock:
2976*4882a593Smuzhiyun 	if (locked)
2977*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
2978*4882a593Smuzhiyun 	kunmap_atomic(kaddr);
2979*4882a593Smuzhiyun 	flush_dcache_page(dst);
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun 	return ret;
2982*4882a593Smuzhiyun }
2983*4882a593Smuzhiyun 
__get_fault_gfp_mask(struct vm_area_struct * vma)2984*4882a593Smuzhiyun static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2985*4882a593Smuzhiyun {
2986*4882a593Smuzhiyun 	struct file *vm_file = vma->vm_file;
2987*4882a593Smuzhiyun 
2988*4882a593Smuzhiyun 	if (vm_file)
2989*4882a593Smuzhiyun 		return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO;
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun 	/*
2992*4882a593Smuzhiyun 	 * Special mappings (e.g. VDSO) do not have any file so fake
2993*4882a593Smuzhiyun 	 * a default GFP_KERNEL for them.
2994*4882a593Smuzhiyun 	 */
2995*4882a593Smuzhiyun 	return GFP_KERNEL;
2996*4882a593Smuzhiyun }
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun /*
2999*4882a593Smuzhiyun  * Notify the address space that the page is about to become writable so that
3000*4882a593Smuzhiyun  * it can prohibit this or wait for the page to get into an appropriate state.
3001*4882a593Smuzhiyun  *
3002*4882a593Smuzhiyun  * We do this without the lock held, so that it can sleep if it needs to.
3003*4882a593Smuzhiyun  */
do_page_mkwrite(struct vm_fault * vmf)3004*4882a593Smuzhiyun static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
3005*4882a593Smuzhiyun {
3006*4882a593Smuzhiyun 	vm_fault_t ret;
3007*4882a593Smuzhiyun 	struct page *page = vmf->page;
3008*4882a593Smuzhiyun 	unsigned int old_flags = vmf->flags;
3009*4882a593Smuzhiyun 
3010*4882a593Smuzhiyun 	vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 	if (vmf->vma->vm_file &&
3013*4882a593Smuzhiyun 	    IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
3014*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
3015*4882a593Smuzhiyun 
3016*4882a593Smuzhiyun 	ret = vmf->vma->vm_ops->page_mkwrite(vmf);
3017*4882a593Smuzhiyun 	/* Restore original flags so that caller is not surprised */
3018*4882a593Smuzhiyun 	vmf->flags = old_flags;
3019*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
3020*4882a593Smuzhiyun 		return ret;
3021*4882a593Smuzhiyun 	if (unlikely(!(ret & VM_FAULT_LOCKED))) {
3022*4882a593Smuzhiyun 		lock_page(page);
3023*4882a593Smuzhiyun 		if (!page->mapping) {
3024*4882a593Smuzhiyun 			unlock_page(page);
3025*4882a593Smuzhiyun 			return 0; /* retry */
3026*4882a593Smuzhiyun 		}
3027*4882a593Smuzhiyun 		ret |= VM_FAULT_LOCKED;
3028*4882a593Smuzhiyun 	} else
3029*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!PageLocked(page), page);
3030*4882a593Smuzhiyun 	return ret;
3031*4882a593Smuzhiyun }
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun /*
3034*4882a593Smuzhiyun  * Handle dirtying of a page in shared file mapping on a write fault.
3035*4882a593Smuzhiyun  *
3036*4882a593Smuzhiyun  * The function expects the page to be locked and unlocks it.
3037*4882a593Smuzhiyun  */
fault_dirty_shared_page(struct vm_fault * vmf)3038*4882a593Smuzhiyun static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
3039*4882a593Smuzhiyun {
3040*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3041*4882a593Smuzhiyun 	struct address_space *mapping;
3042*4882a593Smuzhiyun 	struct page *page = vmf->page;
3043*4882a593Smuzhiyun 	bool dirtied;
3044*4882a593Smuzhiyun 	bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 	dirtied = set_page_dirty(page);
3047*4882a593Smuzhiyun 	VM_BUG_ON_PAGE(PageAnon(page), page);
3048*4882a593Smuzhiyun 	/*
3049*4882a593Smuzhiyun 	 * Take a local copy of the address_space - page.mapping may be zeroed
3050*4882a593Smuzhiyun 	 * by truncate after unlock_page().   The address_space itself remains
3051*4882a593Smuzhiyun 	 * pinned by vma->vm_file's reference.  We rely on unlock_page()'s
3052*4882a593Smuzhiyun 	 * release semantics to prevent the compiler from undoing this copying.
3053*4882a593Smuzhiyun 	 */
3054*4882a593Smuzhiyun 	mapping = page_rmapping(page);
3055*4882a593Smuzhiyun 	unlock_page(page);
3056*4882a593Smuzhiyun 
3057*4882a593Smuzhiyun 	if (!page_mkwrite)
3058*4882a593Smuzhiyun 		file_update_time(vma->vm_file);
3059*4882a593Smuzhiyun 
3060*4882a593Smuzhiyun 	/*
3061*4882a593Smuzhiyun 	 * Throttle page dirtying rate down to writeback speed.
3062*4882a593Smuzhiyun 	 *
3063*4882a593Smuzhiyun 	 * mapping may be NULL here because some device drivers do not
3064*4882a593Smuzhiyun 	 * set page.mapping but still dirty their pages
3065*4882a593Smuzhiyun 	 *
3066*4882a593Smuzhiyun 	 * Drop the mmap_lock before waiting on IO, if we can. The file
3067*4882a593Smuzhiyun 	 * is pinning the mapping, as per above.
3068*4882a593Smuzhiyun 	 */
3069*4882a593Smuzhiyun 	if ((dirtied || page_mkwrite) && mapping) {
3070*4882a593Smuzhiyun 		struct file *fpin;
3071*4882a593Smuzhiyun 
3072*4882a593Smuzhiyun 		fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3073*4882a593Smuzhiyun 		balance_dirty_pages_ratelimited(mapping);
3074*4882a593Smuzhiyun 		if (fpin) {
3075*4882a593Smuzhiyun 			fput(fpin);
3076*4882a593Smuzhiyun 			return VM_FAULT_RETRY;
3077*4882a593Smuzhiyun 		}
3078*4882a593Smuzhiyun 	}
3079*4882a593Smuzhiyun 
3080*4882a593Smuzhiyun 	return 0;
3081*4882a593Smuzhiyun }
3082*4882a593Smuzhiyun 
3083*4882a593Smuzhiyun /*
3084*4882a593Smuzhiyun  * Handle write page faults for pages that can be reused in the current vma
3085*4882a593Smuzhiyun  *
3086*4882a593Smuzhiyun  * This can happen either due to the mapping being with the VM_SHARED flag,
3087*4882a593Smuzhiyun  * or due to us being the last reference standing to the page. In either
3088*4882a593Smuzhiyun  * case, all we need to do here is to mark the page as writable and update
3089*4882a593Smuzhiyun  * any related book-keeping.
3090*4882a593Smuzhiyun  */
wp_page_reuse(struct vm_fault * vmf)3091*4882a593Smuzhiyun static inline void wp_page_reuse(struct vm_fault *vmf)
3092*4882a593Smuzhiyun 	__releases(vmf->ptl)
3093*4882a593Smuzhiyun {
3094*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3095*4882a593Smuzhiyun 	struct page *page = vmf->page;
3096*4882a593Smuzhiyun 	pte_t entry;
3097*4882a593Smuzhiyun 	/*
3098*4882a593Smuzhiyun 	 * Clear the pages cpupid information as the existing
3099*4882a593Smuzhiyun 	 * information potentially belongs to a now completely
3100*4882a593Smuzhiyun 	 * unrelated process.
3101*4882a593Smuzhiyun 	 */
3102*4882a593Smuzhiyun 	if (page)
3103*4882a593Smuzhiyun 		page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1);
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun 	flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3106*4882a593Smuzhiyun 	entry = pte_mkyoung(vmf->orig_pte);
3107*4882a593Smuzhiyun 	entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
3108*4882a593Smuzhiyun 	if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3109*4882a593Smuzhiyun 		update_mmu_cache(vma, vmf->address, vmf->pte);
3110*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3111*4882a593Smuzhiyun 	count_vm_event(PGREUSE);
3112*4882a593Smuzhiyun }
3113*4882a593Smuzhiyun 
3114*4882a593Smuzhiyun /*
3115*4882a593Smuzhiyun  * Handle the case of a page which we actually need to copy to a new page.
3116*4882a593Smuzhiyun  *
3117*4882a593Smuzhiyun  * Called with mmap_lock locked and the old page referenced, but
3118*4882a593Smuzhiyun  * without the ptl held.
3119*4882a593Smuzhiyun  *
3120*4882a593Smuzhiyun  * High level logic flow:
3121*4882a593Smuzhiyun  *
3122*4882a593Smuzhiyun  * - Allocate a page, copy the content of the old page to the new one.
3123*4882a593Smuzhiyun  * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc.
3124*4882a593Smuzhiyun  * - Take the PTL. If the pte changed, bail out and release the allocated page
3125*4882a593Smuzhiyun  * - If the pte is still the way we remember it, update the page table and all
3126*4882a593Smuzhiyun  *   relevant references. This includes dropping the reference the page-table
3127*4882a593Smuzhiyun  *   held to the old page, as well as updating the rmap.
3128*4882a593Smuzhiyun  * - In any case, unlock the PTL and drop the reference we took to the old page.
3129*4882a593Smuzhiyun  */
wp_page_copy(struct vm_fault * vmf)3130*4882a593Smuzhiyun static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3131*4882a593Smuzhiyun {
3132*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3133*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
3134*4882a593Smuzhiyun 	struct page *old_page = vmf->page;
3135*4882a593Smuzhiyun 	struct page *new_page = NULL;
3136*4882a593Smuzhiyun 	pte_t entry;
3137*4882a593Smuzhiyun 	int page_copied = 0;
3138*4882a593Smuzhiyun 	struct mmu_notifier_range range;
3139*4882a593Smuzhiyun 	vm_fault_t ret = VM_FAULT_OOM;
3140*4882a593Smuzhiyun 
3141*4882a593Smuzhiyun 	if (unlikely(anon_vma_prepare(vma)))
3142*4882a593Smuzhiyun 		goto out;
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun 	if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3145*4882a593Smuzhiyun 		new_page = alloc_zeroed_user_highpage_movable(vma,
3146*4882a593Smuzhiyun 							      vmf->address);
3147*4882a593Smuzhiyun 		if (!new_page)
3148*4882a593Smuzhiyun 			goto out;
3149*4882a593Smuzhiyun 	} else {
3150*4882a593Smuzhiyun 		new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
3151*4882a593Smuzhiyun 				vmf->address);
3152*4882a593Smuzhiyun 		if (!new_page)
3153*4882a593Smuzhiyun 			goto out;
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun 		if (!cow_user_page(new_page, old_page, vmf)) {
3156*4882a593Smuzhiyun 			/*
3157*4882a593Smuzhiyun 			 * COW failed, if the fault was solved by other,
3158*4882a593Smuzhiyun 			 * it's fine. If not, userspace would re-fault on
3159*4882a593Smuzhiyun 			 * the same address and we will handle the fault
3160*4882a593Smuzhiyun 			 * from the second attempt.
3161*4882a593Smuzhiyun 			 */
3162*4882a593Smuzhiyun 			put_page(new_page);
3163*4882a593Smuzhiyun 			if (old_page)
3164*4882a593Smuzhiyun 				put_page(old_page);
3165*4882a593Smuzhiyun 			return 0;
3166*4882a593Smuzhiyun 		}
3167*4882a593Smuzhiyun 		trace_android_vh_cow_user_page(vmf, new_page);
3168*4882a593Smuzhiyun 	}
3169*4882a593Smuzhiyun 
3170*4882a593Smuzhiyun 	if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
3171*4882a593Smuzhiyun 		goto out_free_new;
3172*4882a593Smuzhiyun 	cgroup_throttle_swaprate(new_page, GFP_KERNEL);
3173*4882a593Smuzhiyun 
3174*4882a593Smuzhiyun 	__SetPageUptodate(new_page);
3175*4882a593Smuzhiyun 
3176*4882a593Smuzhiyun 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
3177*4882a593Smuzhiyun 				vmf->address & PAGE_MASK,
3178*4882a593Smuzhiyun 				(vmf->address & PAGE_MASK) + PAGE_SIZE);
3179*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_start(&range);
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun 	/*
3182*4882a593Smuzhiyun 	 * Re-check the pte - we dropped the lock
3183*4882a593Smuzhiyun 	 */
3184*4882a593Smuzhiyun 	if (!pte_map_lock(vmf)) {
3185*4882a593Smuzhiyun 		ret = VM_FAULT_RETRY;
3186*4882a593Smuzhiyun 		goto out_invalidate_end;
3187*4882a593Smuzhiyun 	}
3188*4882a593Smuzhiyun 	if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
3189*4882a593Smuzhiyun 		if (old_page) {
3190*4882a593Smuzhiyun 			if (!PageAnon(old_page)) {
3191*4882a593Smuzhiyun 				dec_mm_counter_fast(mm,
3192*4882a593Smuzhiyun 						mm_counter_file(old_page));
3193*4882a593Smuzhiyun 				inc_mm_counter_fast(mm, MM_ANONPAGES);
3194*4882a593Smuzhiyun 			}
3195*4882a593Smuzhiyun 		} else {
3196*4882a593Smuzhiyun 			inc_mm_counter_fast(mm, MM_ANONPAGES);
3197*4882a593Smuzhiyun 		}
3198*4882a593Smuzhiyun 		flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3199*4882a593Smuzhiyun 		entry = mk_pte(new_page, vmf->vma_page_prot);
3200*4882a593Smuzhiyun 		entry = pte_sw_mkyoung(entry);
3201*4882a593Smuzhiyun 		entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
3202*4882a593Smuzhiyun 		/*
3203*4882a593Smuzhiyun 		 * Clear the pte entry and flush it first, before updating the
3204*4882a593Smuzhiyun 		 * pte with the new entry. This will avoid a race condition
3205*4882a593Smuzhiyun 		 * seen in the presence of one thread doing SMC and another
3206*4882a593Smuzhiyun 		 * thread doing COW.
3207*4882a593Smuzhiyun 		 */
3208*4882a593Smuzhiyun 		ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
3209*4882a593Smuzhiyun 		__page_add_new_anon_rmap(new_page, vma, vmf->address, false);
3210*4882a593Smuzhiyun 		__lru_cache_add_inactive_or_unevictable(new_page, vmf->vma_flags);
3211*4882a593Smuzhiyun 		/*
3212*4882a593Smuzhiyun 		 * We call the notify macro here because, when using secondary
3213*4882a593Smuzhiyun 		 * mmu page tables (such as kvm shadow page tables), we want the
3214*4882a593Smuzhiyun 		 * new page to be mapped directly into the secondary page table.
3215*4882a593Smuzhiyun 		 */
3216*4882a593Smuzhiyun 		set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3217*4882a593Smuzhiyun 		update_mmu_cache(vma, vmf->address, vmf->pte);
3218*4882a593Smuzhiyun 		if (old_page) {
3219*4882a593Smuzhiyun 			/*
3220*4882a593Smuzhiyun 			 * Only after switching the pte to the new page may
3221*4882a593Smuzhiyun 			 * we remove the mapcount here. Otherwise another
3222*4882a593Smuzhiyun 			 * process may come and find the rmap count decremented
3223*4882a593Smuzhiyun 			 * before the pte is switched to the new page, and
3224*4882a593Smuzhiyun 			 * "reuse" the old page writing into it while our pte
3225*4882a593Smuzhiyun 			 * here still points into it and can be read by other
3226*4882a593Smuzhiyun 			 * threads.
3227*4882a593Smuzhiyun 			 *
3228*4882a593Smuzhiyun 			 * The critical issue is to order this
3229*4882a593Smuzhiyun 			 * page_remove_rmap with the ptp_clear_flush above.
3230*4882a593Smuzhiyun 			 * Those stores are ordered by (if nothing else,)
3231*4882a593Smuzhiyun 			 * the barrier present in the atomic_add_negative
3232*4882a593Smuzhiyun 			 * in page_remove_rmap.
3233*4882a593Smuzhiyun 			 *
3234*4882a593Smuzhiyun 			 * Then the TLB flush in ptep_clear_flush ensures that
3235*4882a593Smuzhiyun 			 * no process can access the old page before the
3236*4882a593Smuzhiyun 			 * decremented mapcount is visible. And the old page
3237*4882a593Smuzhiyun 			 * cannot be reused until after the decremented
3238*4882a593Smuzhiyun 			 * mapcount is visible. So transitively, TLBs to
3239*4882a593Smuzhiyun 			 * old page will be flushed before it can be reused.
3240*4882a593Smuzhiyun 			 */
3241*4882a593Smuzhiyun 			page_remove_rmap(old_page, false);
3242*4882a593Smuzhiyun 		}
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun 		/* Free the old page.. */
3245*4882a593Smuzhiyun 		new_page = old_page;
3246*4882a593Smuzhiyun 		page_copied = 1;
3247*4882a593Smuzhiyun 	} else {
3248*4882a593Smuzhiyun 		update_mmu_tlb(vma, vmf->address, vmf->pte);
3249*4882a593Smuzhiyun 	}
3250*4882a593Smuzhiyun 
3251*4882a593Smuzhiyun 	if (new_page)
3252*4882a593Smuzhiyun 		put_page(new_page);
3253*4882a593Smuzhiyun 
3254*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3255*4882a593Smuzhiyun 	/*
3256*4882a593Smuzhiyun 	 * No need to double call mmu_notifier->invalidate_range() callback as
3257*4882a593Smuzhiyun 	 * the above ptep_clear_flush_notify() did already call it.
3258*4882a593Smuzhiyun 	 */
3259*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_only_end(&range);
3260*4882a593Smuzhiyun 	if (old_page) {
3261*4882a593Smuzhiyun 		/*
3262*4882a593Smuzhiyun 		 * Don't let another task, with possibly unlocked vma,
3263*4882a593Smuzhiyun 		 * keep the mlocked page.
3264*4882a593Smuzhiyun 		 */
3265*4882a593Smuzhiyun 		if (page_copied && (vmf->vma_flags & VM_LOCKED)) {
3266*4882a593Smuzhiyun 			lock_page(old_page);	/* LRU manipulation */
3267*4882a593Smuzhiyun 			if (PageMlocked(old_page))
3268*4882a593Smuzhiyun 				munlock_vma_page(old_page);
3269*4882a593Smuzhiyun 			unlock_page(old_page);
3270*4882a593Smuzhiyun 		}
3271*4882a593Smuzhiyun 		put_page(old_page);
3272*4882a593Smuzhiyun 	}
3273*4882a593Smuzhiyun 	return page_copied ? VM_FAULT_WRITE : 0;
3274*4882a593Smuzhiyun out_invalidate_end:
3275*4882a593Smuzhiyun 	mmu_notifier_invalidate_range_only_end(&range);
3276*4882a593Smuzhiyun out_free_new:
3277*4882a593Smuzhiyun 	put_page(new_page);
3278*4882a593Smuzhiyun out:
3279*4882a593Smuzhiyun 	if (old_page)
3280*4882a593Smuzhiyun 		put_page(old_page);
3281*4882a593Smuzhiyun 	return ret;
3282*4882a593Smuzhiyun }
3283*4882a593Smuzhiyun 
3284*4882a593Smuzhiyun /**
3285*4882a593Smuzhiyun  * finish_mkwrite_fault - finish page fault for a shared mapping, making PTE
3286*4882a593Smuzhiyun  *			  writeable once the page is prepared
3287*4882a593Smuzhiyun  *
3288*4882a593Smuzhiyun  * @vmf: structure describing the fault
3289*4882a593Smuzhiyun  *
3290*4882a593Smuzhiyun  * This function handles all that is needed to finish a write page fault in a
3291*4882a593Smuzhiyun  * shared mapping due to PTE being read-only once the mapped page is prepared.
3292*4882a593Smuzhiyun  * It handles locking of PTE and modifying it.
3293*4882a593Smuzhiyun  *
3294*4882a593Smuzhiyun  * The function expects the page to be locked or other protection against
3295*4882a593Smuzhiyun  * concurrent faults / writeback (such as DAX radix tree locks).
3296*4882a593Smuzhiyun  *
3297*4882a593Smuzhiyun  * Return: %VM_FAULT_WRITE on success, %0 when PTE got changed before
3298*4882a593Smuzhiyun  * we acquired PTE lock.
3299*4882a593Smuzhiyun  */
finish_mkwrite_fault(struct vm_fault * vmf)3300*4882a593Smuzhiyun vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3301*4882a593Smuzhiyun {
3302*4882a593Smuzhiyun 	WARN_ON_ONCE(!(vmf->vma_flags & VM_SHARED));
3303*4882a593Smuzhiyun 	if (!pte_map_lock(vmf))
3304*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
3305*4882a593Smuzhiyun 	/*
3306*4882a593Smuzhiyun 	 * We might have raced with another page fault while we released the
3307*4882a593Smuzhiyun 	 * pte_offset_map_lock.
3308*4882a593Smuzhiyun 	 */
3309*4882a593Smuzhiyun 	if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3310*4882a593Smuzhiyun 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3311*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3312*4882a593Smuzhiyun 		return VM_FAULT_NOPAGE;
3313*4882a593Smuzhiyun 	}
3314*4882a593Smuzhiyun 	wp_page_reuse(vmf);
3315*4882a593Smuzhiyun 	return 0;
3316*4882a593Smuzhiyun }
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun /*
3319*4882a593Smuzhiyun  * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED
3320*4882a593Smuzhiyun  * mapping
3321*4882a593Smuzhiyun  */
wp_pfn_shared(struct vm_fault * vmf)3322*4882a593Smuzhiyun static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3323*4882a593Smuzhiyun {
3324*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun 	if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3327*4882a593Smuzhiyun 		vm_fault_t ret;
3328*4882a593Smuzhiyun 
3329*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3330*4882a593Smuzhiyun 		vmf->flags |= FAULT_FLAG_MKWRITE;
3331*4882a593Smuzhiyun 		ret = vma->vm_ops->pfn_mkwrite(vmf);
3332*4882a593Smuzhiyun 		if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))
3333*4882a593Smuzhiyun 			return ret;
3334*4882a593Smuzhiyun 		return finish_mkwrite_fault(vmf);
3335*4882a593Smuzhiyun 	}
3336*4882a593Smuzhiyun 	wp_page_reuse(vmf);
3337*4882a593Smuzhiyun 	return VM_FAULT_WRITE;
3338*4882a593Smuzhiyun }
3339*4882a593Smuzhiyun 
wp_page_shared(struct vm_fault * vmf)3340*4882a593Smuzhiyun static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3341*4882a593Smuzhiyun 	__releases(vmf->ptl)
3342*4882a593Smuzhiyun {
3343*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3344*4882a593Smuzhiyun 	vm_fault_t ret = VM_FAULT_WRITE;
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	get_page(vmf->page);
3347*4882a593Smuzhiyun 
3348*4882a593Smuzhiyun 	if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3349*4882a593Smuzhiyun 		vm_fault_t tmp;
3350*4882a593Smuzhiyun 
3351*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3352*4882a593Smuzhiyun 		tmp = do_page_mkwrite(vmf);
3353*4882a593Smuzhiyun 		if (unlikely(!tmp || (tmp &
3354*4882a593Smuzhiyun 				      (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
3355*4882a593Smuzhiyun 			put_page(vmf->page);
3356*4882a593Smuzhiyun 			return tmp;
3357*4882a593Smuzhiyun 		}
3358*4882a593Smuzhiyun 		tmp = finish_mkwrite_fault(vmf);
3359*4882a593Smuzhiyun 		if (unlikely(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
3360*4882a593Smuzhiyun 			unlock_page(vmf->page);
3361*4882a593Smuzhiyun 			put_page(vmf->page);
3362*4882a593Smuzhiyun 			return tmp;
3363*4882a593Smuzhiyun 		}
3364*4882a593Smuzhiyun 	} else {
3365*4882a593Smuzhiyun 		wp_page_reuse(vmf);
3366*4882a593Smuzhiyun 		lock_page(vmf->page);
3367*4882a593Smuzhiyun 	}
3368*4882a593Smuzhiyun 	ret |= fault_dirty_shared_page(vmf);
3369*4882a593Smuzhiyun 	put_page(vmf->page);
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 	return ret;
3372*4882a593Smuzhiyun }
3373*4882a593Smuzhiyun 
3374*4882a593Smuzhiyun /*
3375*4882a593Smuzhiyun  * This routine handles present pages, when users try to write
3376*4882a593Smuzhiyun  * to a shared page. It is done by copying the page to a new address
3377*4882a593Smuzhiyun  * and decrementing the shared-page counter for the old page.
3378*4882a593Smuzhiyun  *
3379*4882a593Smuzhiyun  * Note that this routine assumes that the protection checks have been
3380*4882a593Smuzhiyun  * done by the caller (the low-level page fault routine in most cases).
3381*4882a593Smuzhiyun  * Thus we can safely just mark it writable once we've done any necessary
3382*4882a593Smuzhiyun  * COW.
3383*4882a593Smuzhiyun  *
3384*4882a593Smuzhiyun  * We also mark the page dirty at this point even though the page will
3385*4882a593Smuzhiyun  * change only once the write actually happens. This avoids a few races,
3386*4882a593Smuzhiyun  * and potentially makes it more efficient.
3387*4882a593Smuzhiyun  *
3388*4882a593Smuzhiyun  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3389*4882a593Smuzhiyun  * but allow concurrent faults), with pte both mapped and locked.
3390*4882a593Smuzhiyun  * We return with mmap_lock still held, but pte unmapped and unlocked.
3391*4882a593Smuzhiyun  */
do_wp_page(struct vm_fault * vmf)3392*4882a593Smuzhiyun static vm_fault_t do_wp_page(struct vm_fault *vmf)
3393*4882a593Smuzhiyun 	__releases(vmf->ptl)
3394*4882a593Smuzhiyun {
3395*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3396*4882a593Smuzhiyun 
3397*4882a593Smuzhiyun 	if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3398*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3399*4882a593Smuzhiyun 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3400*4882a593Smuzhiyun 			return VM_FAULT_RETRY;
3401*4882a593Smuzhiyun 		return handle_userfault(vmf, VM_UFFD_WP);
3402*4882a593Smuzhiyun 	}
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun 	/*
3405*4882a593Smuzhiyun 	 * Userfaultfd write-protect can defer flushes. Ensure the TLB
3406*4882a593Smuzhiyun 	 * is flushed in this case before copying.
3407*4882a593Smuzhiyun 	 */
3408*4882a593Smuzhiyun 	if (unlikely(userfaultfd_wp(vmf->vma) &&
3409*4882a593Smuzhiyun 		     mm_tlb_flush_pending(vmf->vma->vm_mm)))
3410*4882a593Smuzhiyun 		flush_tlb_page(vmf->vma, vmf->address);
3411*4882a593Smuzhiyun 
3412*4882a593Smuzhiyun 	vmf->page = _vm_normal_page(vma, vmf->address, vmf->orig_pte,
3413*4882a593Smuzhiyun 					vmf->vma_flags);
3414*4882a593Smuzhiyun 	if (!vmf->page) {
3415*4882a593Smuzhiyun 		/*
3416*4882a593Smuzhiyun 		 * VM_MIXEDMAP !pfn_valid() case, or VM_SOFTDIRTY clear on a
3417*4882a593Smuzhiyun 		 * VM_PFNMAP VMA.
3418*4882a593Smuzhiyun 		 *
3419*4882a593Smuzhiyun 		 * We should not cow pages in a shared writeable mapping.
3420*4882a593Smuzhiyun 		 * Just mark the pages writable and/or call ops->pfn_mkwrite.
3421*4882a593Smuzhiyun 		 */
3422*4882a593Smuzhiyun 		if ((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
3423*4882a593Smuzhiyun 				     (VM_WRITE|VM_SHARED))
3424*4882a593Smuzhiyun 			return wp_pfn_shared(vmf);
3425*4882a593Smuzhiyun 
3426*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3427*4882a593Smuzhiyun 		return wp_page_copy(vmf);
3428*4882a593Smuzhiyun 	}
3429*4882a593Smuzhiyun 
3430*4882a593Smuzhiyun 	/*
3431*4882a593Smuzhiyun 	 * Take out anonymous pages first, anonymous shared vmas are
3432*4882a593Smuzhiyun 	 * not dirty accountable.
3433*4882a593Smuzhiyun 	 */
3434*4882a593Smuzhiyun 	if (PageAnon(vmf->page)) {
3435*4882a593Smuzhiyun 		struct page *page = vmf->page;
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun 		/* PageKsm() doesn't necessarily raise the page refcount */
3438*4882a593Smuzhiyun 		if (PageKsm(page) || page_count(page) != 1)
3439*4882a593Smuzhiyun 			goto copy;
3440*4882a593Smuzhiyun 		if (!trylock_page(page))
3441*4882a593Smuzhiyun 			goto copy;
3442*4882a593Smuzhiyun 		if (PageKsm(page) || page_mapcount(page) != 1 || page_count(page) != 1) {
3443*4882a593Smuzhiyun 			unlock_page(page);
3444*4882a593Smuzhiyun 			goto copy;
3445*4882a593Smuzhiyun 		}
3446*4882a593Smuzhiyun 		/*
3447*4882a593Smuzhiyun 		 * Ok, we've got the only map reference, and the only
3448*4882a593Smuzhiyun 		 * page count reference, and the page is locked,
3449*4882a593Smuzhiyun 		 * it's dark out, and we're wearing sunglasses. Hit it.
3450*4882a593Smuzhiyun 		 */
3451*4882a593Smuzhiyun 		unlock_page(page);
3452*4882a593Smuzhiyun 		wp_page_reuse(vmf);
3453*4882a593Smuzhiyun 		return VM_FAULT_WRITE;
3454*4882a593Smuzhiyun 	} else if (unlikely((vmf->vma_flags & (VM_WRITE|VM_SHARED)) ==
3455*4882a593Smuzhiyun 					(VM_WRITE|VM_SHARED))) {
3456*4882a593Smuzhiyun 		return wp_page_shared(vmf);
3457*4882a593Smuzhiyun 	}
3458*4882a593Smuzhiyun copy:
3459*4882a593Smuzhiyun 	/*
3460*4882a593Smuzhiyun 	 * Ok, we need to copy. Oh, well..
3461*4882a593Smuzhiyun 	 */
3462*4882a593Smuzhiyun 	get_page(vmf->page);
3463*4882a593Smuzhiyun 
3464*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3465*4882a593Smuzhiyun 	return wp_page_copy(vmf);
3466*4882a593Smuzhiyun }
3467*4882a593Smuzhiyun 
unmap_mapping_range_vma(struct vm_area_struct * vma,unsigned long start_addr,unsigned long end_addr,struct zap_details * details)3468*4882a593Smuzhiyun static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3469*4882a593Smuzhiyun 		unsigned long start_addr, unsigned long end_addr,
3470*4882a593Smuzhiyun 		struct zap_details *details)
3471*4882a593Smuzhiyun {
3472*4882a593Smuzhiyun 	zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun 
unmap_mapping_range_tree(struct rb_root_cached * root,struct zap_details * details)3475*4882a593Smuzhiyun static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
3476*4882a593Smuzhiyun 					    struct zap_details *details)
3477*4882a593Smuzhiyun {
3478*4882a593Smuzhiyun 	struct vm_area_struct *vma;
3479*4882a593Smuzhiyun 	pgoff_t vba, vea, zba, zea;
3480*4882a593Smuzhiyun 
3481*4882a593Smuzhiyun 	vma_interval_tree_foreach(vma, root,
3482*4882a593Smuzhiyun 			details->first_index, details->last_index) {
3483*4882a593Smuzhiyun 
3484*4882a593Smuzhiyun 		vba = vma->vm_pgoff;
3485*4882a593Smuzhiyun 		vea = vba + vma_pages(vma) - 1;
3486*4882a593Smuzhiyun 		zba = details->first_index;
3487*4882a593Smuzhiyun 		if (zba < vba)
3488*4882a593Smuzhiyun 			zba = vba;
3489*4882a593Smuzhiyun 		zea = details->last_index;
3490*4882a593Smuzhiyun 		if (zea > vea)
3491*4882a593Smuzhiyun 			zea = vea;
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 		unmap_mapping_range_vma(vma,
3494*4882a593Smuzhiyun 			((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3495*4882a593Smuzhiyun 			((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3496*4882a593Smuzhiyun 				details);
3497*4882a593Smuzhiyun 	}
3498*4882a593Smuzhiyun }
3499*4882a593Smuzhiyun 
3500*4882a593Smuzhiyun /**
3501*4882a593Smuzhiyun  * unmap_mapping_page() - Unmap single page from processes.
3502*4882a593Smuzhiyun  * @page: The locked page to be unmapped.
3503*4882a593Smuzhiyun  *
3504*4882a593Smuzhiyun  * Unmap this page from any userspace process which still has it mmaped.
3505*4882a593Smuzhiyun  * Typically, for efficiency, the range of nearby pages has already been
3506*4882a593Smuzhiyun  * unmapped by unmap_mapping_pages() or unmap_mapping_range().  But once
3507*4882a593Smuzhiyun  * truncation or invalidation holds the lock on a page, it may find that
3508*4882a593Smuzhiyun  * the page has been remapped again: and then uses unmap_mapping_page()
3509*4882a593Smuzhiyun  * to unmap it finally.
3510*4882a593Smuzhiyun  */
unmap_mapping_page(struct page * page)3511*4882a593Smuzhiyun void unmap_mapping_page(struct page *page)
3512*4882a593Smuzhiyun {
3513*4882a593Smuzhiyun 	struct address_space *mapping = page->mapping;
3514*4882a593Smuzhiyun 	struct zap_details details = { };
3515*4882a593Smuzhiyun 
3516*4882a593Smuzhiyun 	VM_BUG_ON(!PageLocked(page));
3517*4882a593Smuzhiyun 	VM_BUG_ON(PageTail(page));
3518*4882a593Smuzhiyun 
3519*4882a593Smuzhiyun 	details.check_mapping = mapping;
3520*4882a593Smuzhiyun 	details.first_index = page->index;
3521*4882a593Smuzhiyun 	details.last_index = page->index + thp_nr_pages(page) - 1;
3522*4882a593Smuzhiyun 	details.single_page = page;
3523*4882a593Smuzhiyun 
3524*4882a593Smuzhiyun 	i_mmap_lock_write(mapping);
3525*4882a593Smuzhiyun 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3526*4882a593Smuzhiyun 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3527*4882a593Smuzhiyun 	i_mmap_unlock_write(mapping);
3528*4882a593Smuzhiyun }
3529*4882a593Smuzhiyun 
3530*4882a593Smuzhiyun /**
3531*4882a593Smuzhiyun  * unmap_mapping_pages() - Unmap pages from processes.
3532*4882a593Smuzhiyun  * @mapping: The address space containing pages to be unmapped.
3533*4882a593Smuzhiyun  * @start: Index of first page to be unmapped.
3534*4882a593Smuzhiyun  * @nr: Number of pages to be unmapped.  0 to unmap to end of file.
3535*4882a593Smuzhiyun  * @even_cows: Whether to unmap even private COWed pages.
3536*4882a593Smuzhiyun  *
3537*4882a593Smuzhiyun  * Unmap the pages in this address space from any userspace process which
3538*4882a593Smuzhiyun  * has them mmaped.  Generally, you want to remove COWed pages as well when
3539*4882a593Smuzhiyun  * a file is being truncated, but not when invalidating pages from the page
3540*4882a593Smuzhiyun  * cache.
3541*4882a593Smuzhiyun  */
unmap_mapping_pages(struct address_space * mapping,pgoff_t start,pgoff_t nr,bool even_cows)3542*4882a593Smuzhiyun void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
3543*4882a593Smuzhiyun 		pgoff_t nr, bool even_cows)
3544*4882a593Smuzhiyun {
3545*4882a593Smuzhiyun 	struct zap_details details = { };
3546*4882a593Smuzhiyun 
3547*4882a593Smuzhiyun 	details.check_mapping = even_cows ? NULL : mapping;
3548*4882a593Smuzhiyun 	details.first_index = start;
3549*4882a593Smuzhiyun 	details.last_index = start + nr - 1;
3550*4882a593Smuzhiyun 	if (details.last_index < details.first_index)
3551*4882a593Smuzhiyun 		details.last_index = ULONG_MAX;
3552*4882a593Smuzhiyun 
3553*4882a593Smuzhiyun 	i_mmap_lock_write(mapping);
3554*4882a593Smuzhiyun 	if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
3555*4882a593Smuzhiyun 		unmap_mapping_range_tree(&mapping->i_mmap, &details);
3556*4882a593Smuzhiyun 	i_mmap_unlock_write(mapping);
3557*4882a593Smuzhiyun }
3558*4882a593Smuzhiyun 
3559*4882a593Smuzhiyun /**
3560*4882a593Smuzhiyun  * unmap_mapping_range - unmap the portion of all mmaps in the specified
3561*4882a593Smuzhiyun  * address_space corresponding to the specified byte range in the underlying
3562*4882a593Smuzhiyun  * file.
3563*4882a593Smuzhiyun  *
3564*4882a593Smuzhiyun  * @mapping: the address space containing mmaps to be unmapped.
3565*4882a593Smuzhiyun  * @holebegin: byte in first page to unmap, relative to the start of
3566*4882a593Smuzhiyun  * the underlying file.  This will be rounded down to a PAGE_SIZE
3567*4882a593Smuzhiyun  * boundary.  Note that this is different from truncate_pagecache(), which
3568*4882a593Smuzhiyun  * must keep the partial page.  In contrast, we must get rid of
3569*4882a593Smuzhiyun  * partial pages.
3570*4882a593Smuzhiyun  * @holelen: size of prospective hole in bytes.  This will be rounded
3571*4882a593Smuzhiyun  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
3572*4882a593Smuzhiyun  * end of the file.
3573*4882a593Smuzhiyun  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
3574*4882a593Smuzhiyun  * but 0 when invalidating pagecache, don't throw away private data.
3575*4882a593Smuzhiyun  */
unmap_mapping_range(struct address_space * mapping,loff_t const holebegin,loff_t const holelen,int even_cows)3576*4882a593Smuzhiyun void unmap_mapping_range(struct address_space *mapping,
3577*4882a593Smuzhiyun 		loff_t const holebegin, loff_t const holelen, int even_cows)
3578*4882a593Smuzhiyun {
3579*4882a593Smuzhiyun 	pgoff_t hba = holebegin >> PAGE_SHIFT;
3580*4882a593Smuzhiyun 	pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3581*4882a593Smuzhiyun 
3582*4882a593Smuzhiyun 	/* Check for overflow. */
3583*4882a593Smuzhiyun 	if (sizeof(holelen) > sizeof(hlen)) {
3584*4882a593Smuzhiyun 		long long holeend =
3585*4882a593Smuzhiyun 			(holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
3586*4882a593Smuzhiyun 		if (holeend & ~(long long)ULONG_MAX)
3587*4882a593Smuzhiyun 			hlen = ULONG_MAX - hba + 1;
3588*4882a593Smuzhiyun 	}
3589*4882a593Smuzhiyun 
3590*4882a593Smuzhiyun 	unmap_mapping_pages(mapping, hba, hlen, even_cows);
3591*4882a593Smuzhiyun }
3592*4882a593Smuzhiyun EXPORT_SYMBOL(unmap_mapping_range);
3593*4882a593Smuzhiyun 
3594*4882a593Smuzhiyun /*
3595*4882a593Smuzhiyun  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3596*4882a593Smuzhiyun  * but allow concurrent faults), and pte mapped but not yet locked.
3597*4882a593Smuzhiyun  * We return with pte unmapped and unlocked.
3598*4882a593Smuzhiyun  *
3599*4882a593Smuzhiyun  * We return with the mmap_lock locked or unlocked in the same cases
3600*4882a593Smuzhiyun  * as does filemap_fault().
3601*4882a593Smuzhiyun  */
do_swap_page(struct vm_fault * vmf)3602*4882a593Smuzhiyun vm_fault_t do_swap_page(struct vm_fault *vmf)
3603*4882a593Smuzhiyun {
3604*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3605*4882a593Smuzhiyun 	struct page *page = NULL, *swapcache;
3606*4882a593Smuzhiyun 	swp_entry_t entry;
3607*4882a593Smuzhiyun 	pte_t pte;
3608*4882a593Smuzhiyun 	int locked;
3609*4882a593Smuzhiyun 	int exclusive = 0;
3610*4882a593Smuzhiyun 	vm_fault_t ret;
3611*4882a593Smuzhiyun 	void *shadow = NULL;
3612*4882a593Smuzhiyun 
3613*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3614*4882a593Smuzhiyun 		pte_unmap(vmf->pte);
3615*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
3616*4882a593Smuzhiyun 	}
3617*4882a593Smuzhiyun 
3618*4882a593Smuzhiyun 	ret = pte_unmap_same(vmf);
3619*4882a593Smuzhiyun 	if (ret) {
3620*4882a593Smuzhiyun 		/*
3621*4882a593Smuzhiyun 		 * If pte != orig_pte, this means another thread did the
3622*4882a593Smuzhiyun 		 * swap operation in our back.
3623*4882a593Smuzhiyun 		 * So nothing else to do.
3624*4882a593Smuzhiyun 		 */
3625*4882a593Smuzhiyun 		if (ret == VM_FAULT_PTNOTSAME)
3626*4882a593Smuzhiyun 			ret = 0;
3627*4882a593Smuzhiyun 		goto out;
3628*4882a593Smuzhiyun 	}
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun 	entry = pte_to_swp_entry(vmf->orig_pte);
3631*4882a593Smuzhiyun 	if (unlikely(non_swap_entry(entry))) {
3632*4882a593Smuzhiyun 		if (is_migration_entry(entry)) {
3633*4882a593Smuzhiyun 			migration_entry_wait(vma->vm_mm, vmf->pmd,
3634*4882a593Smuzhiyun 					     vmf->address);
3635*4882a593Smuzhiyun 		} else if (is_device_private_entry(entry)) {
3636*4882a593Smuzhiyun 			vmf->page = device_private_entry_to_page(entry);
3637*4882a593Smuzhiyun 			ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3638*4882a593Smuzhiyun 		} else if (is_hwpoison_entry(entry)) {
3639*4882a593Smuzhiyun 			ret = VM_FAULT_HWPOISON;
3640*4882a593Smuzhiyun 		} else {
3641*4882a593Smuzhiyun 			print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3642*4882a593Smuzhiyun 			ret = VM_FAULT_SIGBUS;
3643*4882a593Smuzhiyun 		}
3644*4882a593Smuzhiyun 		goto out;
3645*4882a593Smuzhiyun 	}
3646*4882a593Smuzhiyun 
3647*4882a593Smuzhiyun 
3648*4882a593Smuzhiyun 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
3649*4882a593Smuzhiyun 	page = lookup_swap_cache(entry, vma, vmf->address);
3650*4882a593Smuzhiyun 	swapcache = page;
3651*4882a593Smuzhiyun 
3652*4882a593Smuzhiyun 	if (!page) {
3653*4882a593Smuzhiyun 		struct swap_info_struct *si = swp_swap_info(entry);
3654*4882a593Smuzhiyun 
3655*4882a593Smuzhiyun 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
3656*4882a593Smuzhiyun 		    __swap_count(entry) == 1) {
3657*4882a593Smuzhiyun 			/* skip swapcache */
3658*4882a593Smuzhiyun 			gfp_t flags = GFP_HIGHUSER_MOVABLE;
3659*4882a593Smuzhiyun 
3660*4882a593Smuzhiyun 			trace_android_rvh_set_skip_swapcache_flags(&flags);
3661*4882a593Smuzhiyun 			page = alloc_page_vma(flags, vma, vmf->address);
3662*4882a593Smuzhiyun 			if (page) {
3663*4882a593Smuzhiyun 				int err;
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 				__SetPageLocked(page);
3666*4882a593Smuzhiyun 				__SetPageSwapBacked(page);
3667*4882a593Smuzhiyun 				set_page_private(page, entry.val);
3668*4882a593Smuzhiyun 
3669*4882a593Smuzhiyun 				/* Tell memcg to use swap ownership records */
3670*4882a593Smuzhiyun 				SetPageSwapCache(page);
3671*4882a593Smuzhiyun 				err = mem_cgroup_charge(page, vma->vm_mm,
3672*4882a593Smuzhiyun 							GFP_KERNEL);
3673*4882a593Smuzhiyun 				ClearPageSwapCache(page);
3674*4882a593Smuzhiyun 				if (err) {
3675*4882a593Smuzhiyun 					ret = VM_FAULT_OOM;
3676*4882a593Smuzhiyun 					goto out_page;
3677*4882a593Smuzhiyun 				}
3678*4882a593Smuzhiyun 
3679*4882a593Smuzhiyun 				shadow = get_shadow_from_swap_cache(entry);
3680*4882a593Smuzhiyun 				if (shadow)
3681*4882a593Smuzhiyun 					workingset_refault(page, shadow);
3682*4882a593Smuzhiyun 
3683*4882a593Smuzhiyun 				lru_cache_add(page);
3684*4882a593Smuzhiyun 				swap_readpage(page, true);
3685*4882a593Smuzhiyun 			}
3686*4882a593Smuzhiyun 		} else if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
3687*4882a593Smuzhiyun 			/*
3688*4882a593Smuzhiyun 			 * Don't try readahead during a speculative page fault
3689*4882a593Smuzhiyun 			 * as the VMA's boundaries may change in our back.
3690*4882a593Smuzhiyun 			 * If the page is not in the swap cache and synchronous
3691*4882a593Smuzhiyun 			 * read is disabled, fall back to the regular page fault
3692*4882a593Smuzhiyun 			 * mechanism.
3693*4882a593Smuzhiyun 			 */
3694*4882a593Smuzhiyun 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3695*4882a593Smuzhiyun 			ret = VM_FAULT_RETRY;
3696*4882a593Smuzhiyun 			goto out;
3697*4882a593Smuzhiyun 		} else {
3698*4882a593Smuzhiyun 			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
3699*4882a593Smuzhiyun 						vmf);
3700*4882a593Smuzhiyun 			swapcache = page;
3701*4882a593Smuzhiyun 		}
3702*4882a593Smuzhiyun 
3703*4882a593Smuzhiyun 		if (!page) {
3704*4882a593Smuzhiyun 			/*
3705*4882a593Smuzhiyun 			 * Back out if the VMA has changed in our back during
3706*4882a593Smuzhiyun 			 * a speculative page fault or if somebody else
3707*4882a593Smuzhiyun 			 * faulted in this pte while we released the pte lock.
3708*4882a593Smuzhiyun 			 */
3709*4882a593Smuzhiyun 			if (!pte_map_lock(vmf)) {
3710*4882a593Smuzhiyun 				delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3711*4882a593Smuzhiyun 				ret = VM_FAULT_RETRY;
3712*4882a593Smuzhiyun 				goto out;
3713*4882a593Smuzhiyun 			}
3714*4882a593Smuzhiyun 
3715*4882a593Smuzhiyun 			if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3716*4882a593Smuzhiyun 				ret = VM_FAULT_OOM;
3717*4882a593Smuzhiyun 			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3718*4882a593Smuzhiyun 			goto unlock;
3719*4882a593Smuzhiyun 		}
3720*4882a593Smuzhiyun 
3721*4882a593Smuzhiyun 		/* Had to read the page from swap area: Major fault */
3722*4882a593Smuzhiyun 		ret = VM_FAULT_MAJOR;
3723*4882a593Smuzhiyun 		count_vm_event(PGMAJFAULT);
3724*4882a593Smuzhiyun 		count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3725*4882a593Smuzhiyun 	} else if (PageHWPoison(page)) {
3726*4882a593Smuzhiyun 		/*
3727*4882a593Smuzhiyun 		 * hwpoisoned dirty swapcache pages are kept for killing
3728*4882a593Smuzhiyun 		 * owner processes (which may be unknown at hwpoison time)
3729*4882a593Smuzhiyun 		 */
3730*4882a593Smuzhiyun 		ret = VM_FAULT_HWPOISON;
3731*4882a593Smuzhiyun 		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3732*4882a593Smuzhiyun 		goto out_release;
3733*4882a593Smuzhiyun 	}
3734*4882a593Smuzhiyun 
3735*4882a593Smuzhiyun 	locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun 	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
3738*4882a593Smuzhiyun 	if (!locked) {
3739*4882a593Smuzhiyun 		ret |= VM_FAULT_RETRY;
3740*4882a593Smuzhiyun 		goto out_release;
3741*4882a593Smuzhiyun 	}
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 	/*
3744*4882a593Smuzhiyun 	 * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
3745*4882a593Smuzhiyun 	 * release the swapcache from under us.  The page pin, and pte_same
3746*4882a593Smuzhiyun 	 * test below, are not enough to exclude that.  Even if it is still
3747*4882a593Smuzhiyun 	 * swapcache, we need to check that the page's swap has not changed.
3748*4882a593Smuzhiyun 	 */
3749*4882a593Smuzhiyun 	if (unlikely((!PageSwapCache(page) ||
3750*4882a593Smuzhiyun 			page_private(page) != entry.val)) && swapcache)
3751*4882a593Smuzhiyun 		goto out_page;
3752*4882a593Smuzhiyun 
3753*4882a593Smuzhiyun 	page = ksm_might_need_to_copy(page, vma, vmf->address);
3754*4882a593Smuzhiyun 	if (unlikely(!page)) {
3755*4882a593Smuzhiyun 		ret = VM_FAULT_OOM;
3756*4882a593Smuzhiyun 		page = swapcache;
3757*4882a593Smuzhiyun 		goto out_page;
3758*4882a593Smuzhiyun 	}
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3761*4882a593Smuzhiyun 
3762*4882a593Smuzhiyun 	/*
3763*4882a593Smuzhiyun 	 * Back out if the VMA has changed in our back during a speculative
3764*4882a593Smuzhiyun 	 * page fault or if somebody else already faulted in this pte.
3765*4882a593Smuzhiyun 	 */
3766*4882a593Smuzhiyun 	if (!pte_map_lock(vmf)) {
3767*4882a593Smuzhiyun 		ret = VM_FAULT_RETRY;
3768*4882a593Smuzhiyun 		goto out_page;
3769*4882a593Smuzhiyun 	}
3770*4882a593Smuzhiyun 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3771*4882a593Smuzhiyun 		goto out_nomap;
3772*4882a593Smuzhiyun 
3773*4882a593Smuzhiyun 	if (unlikely(!PageUptodate(page))) {
3774*4882a593Smuzhiyun 		ret = VM_FAULT_SIGBUS;
3775*4882a593Smuzhiyun 		goto out_nomap;
3776*4882a593Smuzhiyun 	}
3777*4882a593Smuzhiyun 
3778*4882a593Smuzhiyun 	/*
3779*4882a593Smuzhiyun 	 * The page isn't present yet, go ahead with the fault.
3780*4882a593Smuzhiyun 	 *
3781*4882a593Smuzhiyun 	 * Be careful about the sequence of operations here.
3782*4882a593Smuzhiyun 	 * To get its accounting right, reuse_swap_page() must be called
3783*4882a593Smuzhiyun 	 * while the page is counted on swap but not yet in mapcount i.e.
3784*4882a593Smuzhiyun 	 * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
3785*4882a593Smuzhiyun 	 * must be called after the swap_free(), or it will never succeed.
3786*4882a593Smuzhiyun 	 */
3787*4882a593Smuzhiyun 
3788*4882a593Smuzhiyun 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3789*4882a593Smuzhiyun 	dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3790*4882a593Smuzhiyun 	pte = mk_pte(page, vmf->vma_page_prot);
3791*4882a593Smuzhiyun 	if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3792*4882a593Smuzhiyun 		pte = maybe_mkwrite(pte_mkdirty(pte), vmf->vma_flags);
3793*4882a593Smuzhiyun 		vmf->flags &= ~FAULT_FLAG_WRITE;
3794*4882a593Smuzhiyun 		ret |= VM_FAULT_WRITE;
3795*4882a593Smuzhiyun 		exclusive = RMAP_EXCLUSIVE;
3796*4882a593Smuzhiyun 	}
3797*4882a593Smuzhiyun 	flush_icache_page(vma, page);
3798*4882a593Smuzhiyun 	if (pte_swp_soft_dirty(vmf->orig_pte))
3799*4882a593Smuzhiyun 		pte = pte_mksoft_dirty(pte);
3800*4882a593Smuzhiyun 	if (pte_swp_uffd_wp(vmf->orig_pte)) {
3801*4882a593Smuzhiyun 		pte = pte_mkuffd_wp(pte);
3802*4882a593Smuzhiyun 		pte = pte_wrprotect(pte);
3803*4882a593Smuzhiyun 	}
3804*4882a593Smuzhiyun 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3805*4882a593Smuzhiyun 	arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3806*4882a593Smuzhiyun 	vmf->orig_pte = pte;
3807*4882a593Smuzhiyun 
3808*4882a593Smuzhiyun 	/* ksm created a completely new copy */
3809*4882a593Smuzhiyun 	if (unlikely(page != swapcache && swapcache)) {
3810*4882a593Smuzhiyun 		__page_add_new_anon_rmap(page, vma, vmf->address, false);
3811*4882a593Smuzhiyun 		__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
3812*4882a593Smuzhiyun 	} else {
3813*4882a593Smuzhiyun 		do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3814*4882a593Smuzhiyun 	}
3815*4882a593Smuzhiyun 
3816*4882a593Smuzhiyun 	trace_android_vh_swapin_add_anon_rmap(vmf, page);
3817*4882a593Smuzhiyun 	swap_free(entry);
3818*4882a593Smuzhiyun 	if (mem_cgroup_swap_full(page) ||
3819*4882a593Smuzhiyun 	    (vmf->vma_flags & VM_LOCKED) || PageMlocked(page))
3820*4882a593Smuzhiyun 		try_to_free_swap(page);
3821*4882a593Smuzhiyun 	unlock_page(page);
3822*4882a593Smuzhiyun 	if (page != swapcache && swapcache) {
3823*4882a593Smuzhiyun 		/*
3824*4882a593Smuzhiyun 		 * Hold the lock to avoid the swap entry to be reused
3825*4882a593Smuzhiyun 		 * until we take the PT lock for the pte_same() check
3826*4882a593Smuzhiyun 		 * (to avoid false positives from pte_same). For
3827*4882a593Smuzhiyun 		 * further safety release the lock after the swap_free
3828*4882a593Smuzhiyun 		 * so that the swap count won't change under a
3829*4882a593Smuzhiyun 		 * parallel locked swapcache.
3830*4882a593Smuzhiyun 		 */
3831*4882a593Smuzhiyun 		unlock_page(swapcache);
3832*4882a593Smuzhiyun 		put_page(swapcache);
3833*4882a593Smuzhiyun 	}
3834*4882a593Smuzhiyun 
3835*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_WRITE) {
3836*4882a593Smuzhiyun 		ret |= do_wp_page(vmf);
3837*4882a593Smuzhiyun 		if (ret & VM_FAULT_ERROR)
3838*4882a593Smuzhiyun 			ret &= VM_FAULT_ERROR;
3839*4882a593Smuzhiyun 		goto out;
3840*4882a593Smuzhiyun 	}
3841*4882a593Smuzhiyun 
3842*4882a593Smuzhiyun 	/* No need to invalidate - it was non-present before */
3843*4882a593Smuzhiyun 	update_mmu_cache(vma, vmf->address, vmf->pte);
3844*4882a593Smuzhiyun unlock:
3845*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3846*4882a593Smuzhiyun out:
3847*4882a593Smuzhiyun 	return ret;
3848*4882a593Smuzhiyun out_nomap:
3849*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3850*4882a593Smuzhiyun out_page:
3851*4882a593Smuzhiyun 	unlock_page(page);
3852*4882a593Smuzhiyun out_release:
3853*4882a593Smuzhiyun 	put_page(page);
3854*4882a593Smuzhiyun 	if (page != swapcache && swapcache) {
3855*4882a593Smuzhiyun 		unlock_page(swapcache);
3856*4882a593Smuzhiyun 		put_page(swapcache);
3857*4882a593Smuzhiyun 	}
3858*4882a593Smuzhiyun 	return ret;
3859*4882a593Smuzhiyun }
3860*4882a593Smuzhiyun 
3861*4882a593Smuzhiyun /*
3862*4882a593Smuzhiyun  * We enter with non-exclusive mmap_lock (to exclude vma changes,
3863*4882a593Smuzhiyun  * but allow concurrent faults), and pte mapped but not yet locked.
3864*4882a593Smuzhiyun  * We return with mmap_lock still held, but pte unmapped and unlocked.
3865*4882a593Smuzhiyun  */
do_anonymous_page(struct vm_fault * vmf)3866*4882a593Smuzhiyun static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3867*4882a593Smuzhiyun {
3868*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
3869*4882a593Smuzhiyun 	struct page *page;
3870*4882a593Smuzhiyun 	vm_fault_t ret = 0;
3871*4882a593Smuzhiyun 	pte_t entry;
3872*4882a593Smuzhiyun 
3873*4882a593Smuzhiyun 	/* File mapping without ->vm_ops ? */
3874*4882a593Smuzhiyun 	if (vmf->vma_flags & VM_SHARED)
3875*4882a593Smuzhiyun 		return VM_FAULT_SIGBUS;
3876*4882a593Smuzhiyun 
3877*4882a593Smuzhiyun 	/* Do not check unstable pmd, if it's changed will retry later */
3878*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3879*4882a593Smuzhiyun 		goto skip_pmd_checks;
3880*4882a593Smuzhiyun 
3881*4882a593Smuzhiyun 	/*
3882*4882a593Smuzhiyun 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
3883*4882a593Smuzhiyun 	 * pte_offset_map() on pmds where a huge pmd might be created
3884*4882a593Smuzhiyun 	 * from a different thread.
3885*4882a593Smuzhiyun 	 *
3886*4882a593Smuzhiyun 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
3887*4882a593Smuzhiyun 	 * parallel threads are excluded by other means.
3888*4882a593Smuzhiyun 	 *
3889*4882a593Smuzhiyun 	 * Here we only have mmap_read_lock(mm).
3890*4882a593Smuzhiyun 	 */
3891*4882a593Smuzhiyun 	if (pte_alloc(vma->vm_mm, vmf->pmd))
3892*4882a593Smuzhiyun 		return VM_FAULT_OOM;
3893*4882a593Smuzhiyun 
3894*4882a593Smuzhiyun 	/* See comment in handle_pte_fault() */
3895*4882a593Smuzhiyun 	if (unlikely(pmd_trans_unstable(vmf->pmd)))
3896*4882a593Smuzhiyun 		return 0;
3897*4882a593Smuzhiyun 
3898*4882a593Smuzhiyun skip_pmd_checks:
3899*4882a593Smuzhiyun 	/* Use the zero-page for reads */
3900*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3901*4882a593Smuzhiyun 			!mm_forbids_zeropage(vma->vm_mm)) {
3902*4882a593Smuzhiyun 		entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3903*4882a593Smuzhiyun 						vmf->vma_page_prot));
3904*4882a593Smuzhiyun 		if (!pte_map_lock(vmf))
3905*4882a593Smuzhiyun 			return VM_FAULT_RETRY;
3906*4882a593Smuzhiyun 		if (!pte_none(*vmf->pte)) {
3907*4882a593Smuzhiyun 			update_mmu_tlb(vma, vmf->address, vmf->pte);
3908*4882a593Smuzhiyun 			goto unlock;
3909*4882a593Smuzhiyun 		}
3910*4882a593Smuzhiyun 		ret = check_stable_address_space(vma->vm_mm);
3911*4882a593Smuzhiyun 		if (ret)
3912*4882a593Smuzhiyun 			goto unlock;
3913*4882a593Smuzhiyun 		/*
3914*4882a593Smuzhiyun 		 * Don't call the userfaultfd during the speculative path.
3915*4882a593Smuzhiyun 		 * We already checked for the VMA to not be managed through
3916*4882a593Smuzhiyun 		 * userfaultfd, but it may be set in our back once we have lock
3917*4882a593Smuzhiyun 		 * the pte. In such a case we can ignore it this time.
3918*4882a593Smuzhiyun 		 */
3919*4882a593Smuzhiyun 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
3920*4882a593Smuzhiyun 			goto setpte;
3921*4882a593Smuzhiyun 		/* Deliver the page fault to userland, check inside PT lock */
3922*4882a593Smuzhiyun 		if (userfaultfd_missing(vma)) {
3923*4882a593Smuzhiyun 			pte_unmap_unlock(vmf->pte, vmf->ptl);
3924*4882a593Smuzhiyun 			return handle_userfault(vmf, VM_UFFD_MISSING);
3925*4882a593Smuzhiyun 		}
3926*4882a593Smuzhiyun 		goto setpte;
3927*4882a593Smuzhiyun 	}
3928*4882a593Smuzhiyun 
3929*4882a593Smuzhiyun 	/* Allocate our own private page. */
3930*4882a593Smuzhiyun 	if (unlikely(anon_vma_prepare(vma)))
3931*4882a593Smuzhiyun 		goto oom;
3932*4882a593Smuzhiyun 	page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3933*4882a593Smuzhiyun 	if (!page)
3934*4882a593Smuzhiyun 		goto oom;
3935*4882a593Smuzhiyun 
3936*4882a593Smuzhiyun 	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3937*4882a593Smuzhiyun 		goto oom_free_page;
3938*4882a593Smuzhiyun 	cgroup_throttle_swaprate(page, GFP_KERNEL);
3939*4882a593Smuzhiyun 
3940*4882a593Smuzhiyun 	/*
3941*4882a593Smuzhiyun 	 * The memory barrier inside __SetPageUptodate makes sure that
3942*4882a593Smuzhiyun 	 * preceding stores to the page contents become visible before
3943*4882a593Smuzhiyun 	 * the set_pte_at() write.
3944*4882a593Smuzhiyun 	 */
3945*4882a593Smuzhiyun 	__SetPageUptodate(page);
3946*4882a593Smuzhiyun 
3947*4882a593Smuzhiyun 	entry = mk_pte(page, vmf->vma_page_prot);
3948*4882a593Smuzhiyun 	entry = pte_sw_mkyoung(entry);
3949*4882a593Smuzhiyun 	if (vmf->vma_flags & VM_WRITE)
3950*4882a593Smuzhiyun 		entry = pte_mkwrite(pte_mkdirty(entry));
3951*4882a593Smuzhiyun 
3952*4882a593Smuzhiyun 	if (!pte_map_lock(vmf)) {
3953*4882a593Smuzhiyun 		ret = VM_FAULT_RETRY;
3954*4882a593Smuzhiyun 		goto release;
3955*4882a593Smuzhiyun 	}
3956*4882a593Smuzhiyun 
3957*4882a593Smuzhiyun 	if (!pte_none(*vmf->pte)) {
3958*4882a593Smuzhiyun 		update_mmu_cache(vma, vmf->address, vmf->pte);
3959*4882a593Smuzhiyun 		goto unlock_and_release;
3960*4882a593Smuzhiyun 	}
3961*4882a593Smuzhiyun 
3962*4882a593Smuzhiyun 	ret = check_stable_address_space(vma->vm_mm);
3963*4882a593Smuzhiyun 	if (ret)
3964*4882a593Smuzhiyun 		goto unlock_and_release;
3965*4882a593Smuzhiyun 
3966*4882a593Smuzhiyun 	/* Deliver the page fault to userland, check inside PT lock */
3967*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
3968*4882a593Smuzhiyun 				userfaultfd_missing(vma)) {
3969*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
3970*4882a593Smuzhiyun 		put_page(page);
3971*4882a593Smuzhiyun 		return handle_userfault(vmf, VM_UFFD_MISSING);
3972*4882a593Smuzhiyun 	}
3973*4882a593Smuzhiyun 
3974*4882a593Smuzhiyun 	inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3975*4882a593Smuzhiyun 	__page_add_new_anon_rmap(page, vma, vmf->address, false);
3976*4882a593Smuzhiyun 	__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
3977*4882a593Smuzhiyun setpte:
3978*4882a593Smuzhiyun 	set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3979*4882a593Smuzhiyun 
3980*4882a593Smuzhiyun 	/* No need to invalidate - it was non-present before */
3981*4882a593Smuzhiyun 	update_mmu_cache(vma, vmf->address, vmf->pte);
3982*4882a593Smuzhiyun unlock:
3983*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3984*4882a593Smuzhiyun 	return ret;
3985*4882a593Smuzhiyun unlock_and_release:
3986*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
3987*4882a593Smuzhiyun release:
3988*4882a593Smuzhiyun 	put_page(page);
3989*4882a593Smuzhiyun 	return ret;
3990*4882a593Smuzhiyun oom_free_page:
3991*4882a593Smuzhiyun 	put_page(page);
3992*4882a593Smuzhiyun oom:
3993*4882a593Smuzhiyun 	return VM_FAULT_OOM;
3994*4882a593Smuzhiyun }
3995*4882a593Smuzhiyun 
3996*4882a593Smuzhiyun /*
3997*4882a593Smuzhiyun  * The mmap_lock must have been held on entry, and may have been
3998*4882a593Smuzhiyun  * released depending on flags and vma->vm_ops->fault() return value.
3999*4882a593Smuzhiyun  * See filemap_fault() and __lock_page_retry().
4000*4882a593Smuzhiyun  */
__do_fault(struct vm_fault * vmf)4001*4882a593Smuzhiyun static vm_fault_t __do_fault(struct vm_fault *vmf)
4002*4882a593Smuzhiyun {
4003*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4004*4882a593Smuzhiyun 	vm_fault_t ret;
4005*4882a593Smuzhiyun 
4006*4882a593Smuzhiyun 	/* Do not check unstable pmd, if it's changed will retry later */
4007*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4008*4882a593Smuzhiyun 		goto skip_pmd_checks;
4009*4882a593Smuzhiyun 
4010*4882a593Smuzhiyun 	/*
4011*4882a593Smuzhiyun 	 * Preallocate pte before we take page_lock because this might lead to
4012*4882a593Smuzhiyun 	 * deadlocks for memcg reclaim which waits for pages under writeback:
4013*4882a593Smuzhiyun 	 *				lock_page(A)
4014*4882a593Smuzhiyun 	 *				SetPageWriteback(A)
4015*4882a593Smuzhiyun 	 *				unlock_page(A)
4016*4882a593Smuzhiyun 	 * lock_page(B)
4017*4882a593Smuzhiyun 	 *				lock_page(B)
4018*4882a593Smuzhiyun 	 * pte_alloc_one
4019*4882a593Smuzhiyun 	 *   shrink_page_list
4020*4882a593Smuzhiyun 	 *     wait_on_page_writeback(A)
4021*4882a593Smuzhiyun 	 *				SetPageWriteback(B)
4022*4882a593Smuzhiyun 	 *				unlock_page(B)
4023*4882a593Smuzhiyun 	 *				# flush A, B to clear the writeback
4024*4882a593Smuzhiyun 	 */
4025*4882a593Smuzhiyun 	if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4026*4882a593Smuzhiyun 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4027*4882a593Smuzhiyun 		if (!vmf->prealloc_pte)
4028*4882a593Smuzhiyun 			return VM_FAULT_OOM;
4029*4882a593Smuzhiyun 		smp_wmb(); /* See comment in __pte_alloc() */
4030*4882a593Smuzhiyun 	}
4031*4882a593Smuzhiyun 
4032*4882a593Smuzhiyun skip_pmd_checks:
4033*4882a593Smuzhiyun 	ret = vma->vm_ops->fault(vmf);
4034*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
4035*4882a593Smuzhiyun 			    VM_FAULT_DONE_COW)))
4036*4882a593Smuzhiyun 		return ret;
4037*4882a593Smuzhiyun 
4038*4882a593Smuzhiyun 	if (unlikely(PageHWPoison(vmf->page))) {
4039*4882a593Smuzhiyun 		struct page *page = vmf->page;
4040*4882a593Smuzhiyun 		vm_fault_t poisonret = VM_FAULT_HWPOISON;
4041*4882a593Smuzhiyun 		if (ret & VM_FAULT_LOCKED) {
4042*4882a593Smuzhiyun 			if (page_mapped(page))
4043*4882a593Smuzhiyun 				unmap_mapping_pages(page_mapping(page),
4044*4882a593Smuzhiyun 						    page->index, 1, false);
4045*4882a593Smuzhiyun 			/* Retry if a clean page was removed from the cache. */
4046*4882a593Smuzhiyun 			if (invalidate_inode_page(page))
4047*4882a593Smuzhiyun 				poisonret = VM_FAULT_NOPAGE;
4048*4882a593Smuzhiyun 			unlock_page(page);
4049*4882a593Smuzhiyun 		}
4050*4882a593Smuzhiyun 		put_page(page);
4051*4882a593Smuzhiyun 		vmf->page = NULL;
4052*4882a593Smuzhiyun 		return poisonret;
4053*4882a593Smuzhiyun 	}
4054*4882a593Smuzhiyun 
4055*4882a593Smuzhiyun 	if (unlikely(!(ret & VM_FAULT_LOCKED)))
4056*4882a593Smuzhiyun 		lock_page(vmf->page);
4057*4882a593Smuzhiyun 	else
4058*4882a593Smuzhiyun 		VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4059*4882a593Smuzhiyun 
4060*4882a593Smuzhiyun 	return ret;
4061*4882a593Smuzhiyun }
4062*4882a593Smuzhiyun 
4063*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
deposit_prealloc_pte(struct vm_fault * vmf)4064*4882a593Smuzhiyun static void deposit_prealloc_pte(struct vm_fault *vmf)
4065*4882a593Smuzhiyun {
4066*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4067*4882a593Smuzhiyun 
4068*4882a593Smuzhiyun 	pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4069*4882a593Smuzhiyun 	/*
4070*4882a593Smuzhiyun 	 * We are going to consume the prealloc table,
4071*4882a593Smuzhiyun 	 * count that as nr_ptes.
4072*4882a593Smuzhiyun 	 */
4073*4882a593Smuzhiyun 	mm_inc_nr_ptes(vma->vm_mm);
4074*4882a593Smuzhiyun 	vmf->prealloc_pte = NULL;
4075*4882a593Smuzhiyun }
4076*4882a593Smuzhiyun 
do_set_pmd(struct vm_fault * vmf,struct page * page)4077*4882a593Smuzhiyun vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4078*4882a593Smuzhiyun {
4079*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4080*4882a593Smuzhiyun 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4081*4882a593Smuzhiyun 	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4082*4882a593Smuzhiyun 	pmd_t entry;
4083*4882a593Smuzhiyun 	int i;
4084*4882a593Smuzhiyun 	vm_fault_t ret = VM_FAULT_FALLBACK;
4085*4882a593Smuzhiyun 
4086*4882a593Smuzhiyun 	if (!transhuge_vma_suitable(vma, haddr))
4087*4882a593Smuzhiyun 		return ret;
4088*4882a593Smuzhiyun 
4089*4882a593Smuzhiyun 	page = compound_head(page);
4090*4882a593Smuzhiyun 	if (compound_order(page) != HPAGE_PMD_ORDER)
4091*4882a593Smuzhiyun 		return ret;
4092*4882a593Smuzhiyun 
4093*4882a593Smuzhiyun 	/*
4094*4882a593Smuzhiyun 	 * Archs like ppc64 need additonal space to store information
4095*4882a593Smuzhiyun 	 * related to pte entry. Use the preallocated table for that.
4096*4882a593Smuzhiyun 	 */
4097*4882a593Smuzhiyun 	if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4098*4882a593Smuzhiyun 		vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4099*4882a593Smuzhiyun 		if (!vmf->prealloc_pte)
4100*4882a593Smuzhiyun 			return VM_FAULT_OOM;
4101*4882a593Smuzhiyun 		smp_wmb(); /* See comment in __pte_alloc() */
4102*4882a593Smuzhiyun 	}
4103*4882a593Smuzhiyun 
4104*4882a593Smuzhiyun 	vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4105*4882a593Smuzhiyun 	if (unlikely(!pmd_none(*vmf->pmd)))
4106*4882a593Smuzhiyun 		goto out;
4107*4882a593Smuzhiyun 
4108*4882a593Smuzhiyun 	for (i = 0; i < HPAGE_PMD_NR; i++)
4109*4882a593Smuzhiyun 		flush_icache_page(vma, page + i);
4110*4882a593Smuzhiyun 
4111*4882a593Smuzhiyun 	entry = mk_huge_pmd(page, vmf->vma_page_prot);
4112*4882a593Smuzhiyun 	if (write)
4113*4882a593Smuzhiyun 		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4114*4882a593Smuzhiyun 
4115*4882a593Smuzhiyun 	add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
4116*4882a593Smuzhiyun 	page_add_file_rmap(page, true);
4117*4882a593Smuzhiyun 	/*
4118*4882a593Smuzhiyun 	 * deposit and withdraw with pmd lock held
4119*4882a593Smuzhiyun 	 */
4120*4882a593Smuzhiyun 	if (arch_needs_pgtable_deposit())
4121*4882a593Smuzhiyun 		deposit_prealloc_pte(vmf);
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun 	set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4124*4882a593Smuzhiyun 
4125*4882a593Smuzhiyun 	update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4126*4882a593Smuzhiyun 
4127*4882a593Smuzhiyun 	/* fault is handled */
4128*4882a593Smuzhiyun 	ret = 0;
4129*4882a593Smuzhiyun 	count_vm_event(THP_FILE_MAPPED);
4130*4882a593Smuzhiyun out:
4131*4882a593Smuzhiyun 	spin_unlock(vmf->ptl);
4132*4882a593Smuzhiyun 	return ret;
4133*4882a593Smuzhiyun }
4134*4882a593Smuzhiyun #else
do_set_pmd(struct vm_fault * vmf,struct page * page)4135*4882a593Smuzhiyun vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4136*4882a593Smuzhiyun {
4137*4882a593Smuzhiyun 	return VM_FAULT_FALLBACK;
4138*4882a593Smuzhiyun }
4139*4882a593Smuzhiyun #endif
4140*4882a593Smuzhiyun 
do_set_pte(struct vm_fault * vmf,struct page * page,unsigned long addr)4141*4882a593Smuzhiyun void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr)
4142*4882a593Smuzhiyun {
4143*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4144*4882a593Smuzhiyun 	bool write = vmf->flags & FAULT_FLAG_WRITE;
4145*4882a593Smuzhiyun 	bool prefault = vmf->address != addr;
4146*4882a593Smuzhiyun 	pte_t entry;
4147*4882a593Smuzhiyun 
4148*4882a593Smuzhiyun 	flush_icache_page(vma, page);
4149*4882a593Smuzhiyun 	entry = mk_pte(page, vmf->vma_page_prot);
4150*4882a593Smuzhiyun 
4151*4882a593Smuzhiyun 	if (prefault && arch_wants_old_prefaulted_pte())
4152*4882a593Smuzhiyun 		entry = pte_mkold(entry);
4153*4882a593Smuzhiyun 	else
4154*4882a593Smuzhiyun 		entry = pte_sw_mkyoung(entry);
4155*4882a593Smuzhiyun 
4156*4882a593Smuzhiyun 	if (write)
4157*4882a593Smuzhiyun 		entry = maybe_mkwrite(pte_mkdirty(entry), vmf->vma_flags);
4158*4882a593Smuzhiyun 	/* copy-on-write page */
4159*4882a593Smuzhiyun 	if (write && !(vmf->vma_flags & VM_SHARED)) {
4160*4882a593Smuzhiyun 		inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
4161*4882a593Smuzhiyun 		__page_add_new_anon_rmap(page, vma, addr, false);
4162*4882a593Smuzhiyun 		__lru_cache_add_inactive_or_unevictable(page, vmf->vma_flags);
4163*4882a593Smuzhiyun 	} else {
4164*4882a593Smuzhiyun 		inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
4165*4882a593Smuzhiyun 		page_add_file_rmap(page, false);
4166*4882a593Smuzhiyun 	}
4167*4882a593Smuzhiyun 	set_pte_at(vma->vm_mm, addr, vmf->pte, entry);
4168*4882a593Smuzhiyun }
4169*4882a593Smuzhiyun 
4170*4882a593Smuzhiyun /**
4171*4882a593Smuzhiyun  * finish_fault - finish page fault once we have prepared the page to fault
4172*4882a593Smuzhiyun  *
4173*4882a593Smuzhiyun  * @vmf: structure describing the fault
4174*4882a593Smuzhiyun  *
4175*4882a593Smuzhiyun  * This function handles all that is needed to finish a page fault once the
4176*4882a593Smuzhiyun  * page to fault in is prepared. It handles locking of PTEs, inserts PTE for
4177*4882a593Smuzhiyun  * given page, adds reverse page mapping, handles memcg charges and LRU
4178*4882a593Smuzhiyun  * addition.
4179*4882a593Smuzhiyun  *
4180*4882a593Smuzhiyun  * The function expects the page to be locked and on success it consumes a
4181*4882a593Smuzhiyun  * reference of a page being mapped (for the PTE which maps it).
4182*4882a593Smuzhiyun  *
4183*4882a593Smuzhiyun  * Return: %0 on success, %VM_FAULT_ code in case of error.
4184*4882a593Smuzhiyun  */
finish_fault(struct vm_fault * vmf)4185*4882a593Smuzhiyun vm_fault_t finish_fault(struct vm_fault *vmf)
4186*4882a593Smuzhiyun {
4187*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4188*4882a593Smuzhiyun 	struct page *page;
4189*4882a593Smuzhiyun 	vm_fault_t ret;
4190*4882a593Smuzhiyun 
4191*4882a593Smuzhiyun 	/* Did we COW the page? */
4192*4882a593Smuzhiyun 	if ((vmf->flags & FAULT_FLAG_WRITE) &&
4193*4882a593Smuzhiyun 	    !(vmf->vma_flags & VM_SHARED))
4194*4882a593Smuzhiyun 		page = vmf->cow_page;
4195*4882a593Smuzhiyun 	else
4196*4882a593Smuzhiyun 		page = vmf->page;
4197*4882a593Smuzhiyun 
4198*4882a593Smuzhiyun 	/*
4199*4882a593Smuzhiyun 	 * check even for read faults because we might have lost our CoWed
4200*4882a593Smuzhiyun 	 * page
4201*4882a593Smuzhiyun 	 */
4202*4882a593Smuzhiyun 	if (!(vma->vm_flags & VM_SHARED)) {
4203*4882a593Smuzhiyun 		ret = check_stable_address_space(vma->vm_mm);
4204*4882a593Smuzhiyun 		if (ret)
4205*4882a593Smuzhiyun 			return ret;
4206*4882a593Smuzhiyun 	}
4207*4882a593Smuzhiyun 
4208*4882a593Smuzhiyun 	/* Do not check unstable pmd, if it's changed will retry later */
4209*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4210*4882a593Smuzhiyun 		goto skip_pmd_checks;
4211*4882a593Smuzhiyun 
4212*4882a593Smuzhiyun 	if (pmd_none(*vmf->pmd)) {
4213*4882a593Smuzhiyun 		if (PageTransCompound(page)) {
4214*4882a593Smuzhiyun 			ret = do_set_pmd(vmf, page);
4215*4882a593Smuzhiyun 			if (ret != VM_FAULT_FALLBACK)
4216*4882a593Smuzhiyun 				return ret;
4217*4882a593Smuzhiyun 		}
4218*4882a593Smuzhiyun 
4219*4882a593Smuzhiyun 		if (vmf->prealloc_pte) {
4220*4882a593Smuzhiyun 			vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4221*4882a593Smuzhiyun 			if (likely(pmd_none(*vmf->pmd))) {
4222*4882a593Smuzhiyun 				mm_inc_nr_ptes(vma->vm_mm);
4223*4882a593Smuzhiyun 				pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4224*4882a593Smuzhiyun 				vmf->prealloc_pte = NULL;
4225*4882a593Smuzhiyun 			}
4226*4882a593Smuzhiyun 			spin_unlock(vmf->ptl);
4227*4882a593Smuzhiyun 		} else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
4228*4882a593Smuzhiyun 			return VM_FAULT_OOM;
4229*4882a593Smuzhiyun 		}
4230*4882a593Smuzhiyun 	}
4231*4882a593Smuzhiyun 
4232*4882a593Smuzhiyun 	/*
4233*4882a593Smuzhiyun 	 * See comment in handle_pte_fault() for how this scenario happens, we
4234*4882a593Smuzhiyun 	 * need to return NOPAGE so that we drop this page.
4235*4882a593Smuzhiyun 	 */
4236*4882a593Smuzhiyun 	if (pmd_devmap_trans_unstable(vmf->pmd))
4237*4882a593Smuzhiyun 		return VM_FAULT_NOPAGE;
4238*4882a593Smuzhiyun 
4239*4882a593Smuzhiyun skip_pmd_checks:
4240*4882a593Smuzhiyun 	if (!pte_map_lock(vmf))
4241*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
4242*4882a593Smuzhiyun 
4243*4882a593Smuzhiyun 	ret = 0;
4244*4882a593Smuzhiyun 	/* Re-check under ptl */
4245*4882a593Smuzhiyun 	if (likely(pte_none(*vmf->pte)))
4246*4882a593Smuzhiyun 		do_set_pte(vmf, page, vmf->address);
4247*4882a593Smuzhiyun 	else
4248*4882a593Smuzhiyun 		ret = VM_FAULT_NOPAGE;
4249*4882a593Smuzhiyun 
4250*4882a593Smuzhiyun 	update_mmu_tlb(vma, vmf->address, vmf->pte);
4251*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4252*4882a593Smuzhiyun 	return ret;
4253*4882a593Smuzhiyun }
4254*4882a593Smuzhiyun 
4255*4882a593Smuzhiyun static unsigned long fault_around_bytes __read_mostly =
4256*4882a593Smuzhiyun 	rounddown_pow_of_two(65536);
4257*4882a593Smuzhiyun 
4258*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
fault_around_bytes_get(void * data,u64 * val)4259*4882a593Smuzhiyun static int fault_around_bytes_get(void *data, u64 *val)
4260*4882a593Smuzhiyun {
4261*4882a593Smuzhiyun 	*val = fault_around_bytes;
4262*4882a593Smuzhiyun 	return 0;
4263*4882a593Smuzhiyun }
4264*4882a593Smuzhiyun 
4265*4882a593Smuzhiyun /*
4266*4882a593Smuzhiyun  * fault_around_bytes must be rounded down to the nearest page order as it's
4267*4882a593Smuzhiyun  * what do_fault_around() expects to see.
4268*4882a593Smuzhiyun  */
fault_around_bytes_set(void * data,u64 val)4269*4882a593Smuzhiyun static int fault_around_bytes_set(void *data, u64 val)
4270*4882a593Smuzhiyun {
4271*4882a593Smuzhiyun 	if (val / PAGE_SIZE > PTRS_PER_PTE)
4272*4882a593Smuzhiyun 		return -EINVAL;
4273*4882a593Smuzhiyun 	if (val > PAGE_SIZE)
4274*4882a593Smuzhiyun 		fault_around_bytes = rounddown_pow_of_two(val);
4275*4882a593Smuzhiyun 	else
4276*4882a593Smuzhiyun 		fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
4277*4882a593Smuzhiyun 	return 0;
4278*4882a593Smuzhiyun }
4279*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(fault_around_bytes_fops,
4280*4882a593Smuzhiyun 		fault_around_bytes_get, fault_around_bytes_set, "%llu\n");
4281*4882a593Smuzhiyun 
fault_around_debugfs(void)4282*4882a593Smuzhiyun static int __init fault_around_debugfs(void)
4283*4882a593Smuzhiyun {
4284*4882a593Smuzhiyun 	debugfs_create_file_unsafe("fault_around_bytes", 0644, NULL, NULL,
4285*4882a593Smuzhiyun 				   &fault_around_bytes_fops);
4286*4882a593Smuzhiyun 	return 0;
4287*4882a593Smuzhiyun }
4288*4882a593Smuzhiyun late_initcall(fault_around_debugfs);
4289*4882a593Smuzhiyun #endif
4290*4882a593Smuzhiyun 
4291*4882a593Smuzhiyun /*
4292*4882a593Smuzhiyun  * do_fault_around() tries to map few pages around the fault address. The hope
4293*4882a593Smuzhiyun  * is that the pages will be needed soon and this will lower the number of
4294*4882a593Smuzhiyun  * faults to handle.
4295*4882a593Smuzhiyun  *
4296*4882a593Smuzhiyun  * It uses vm_ops->map_pages() to map the pages, which skips the page if it's
4297*4882a593Smuzhiyun  * not ready to be mapped: not up-to-date, locked, etc.
4298*4882a593Smuzhiyun  *
4299*4882a593Smuzhiyun  * This function is called with the page table lock taken. In the split ptlock
4300*4882a593Smuzhiyun  * case the page table lock only protects only those entries which belong to
4301*4882a593Smuzhiyun  * the page table corresponding to the fault address.
4302*4882a593Smuzhiyun  *
4303*4882a593Smuzhiyun  * This function doesn't cross the VMA boundaries, in order to call map_pages()
4304*4882a593Smuzhiyun  * only once.
4305*4882a593Smuzhiyun  *
4306*4882a593Smuzhiyun  * fault_around_bytes defines how many bytes we'll try to map.
4307*4882a593Smuzhiyun  * do_fault_around() expects it to be set to a power of two less than or equal
4308*4882a593Smuzhiyun  * to PTRS_PER_PTE.
4309*4882a593Smuzhiyun  *
4310*4882a593Smuzhiyun  * The virtual address of the area that we map is naturally aligned to
4311*4882a593Smuzhiyun  * fault_around_bytes rounded down to the machine page size
4312*4882a593Smuzhiyun  * (and therefore to page order).  This way it's easier to guarantee
4313*4882a593Smuzhiyun  * that we don't cross page table boundaries.
4314*4882a593Smuzhiyun  */
do_fault_around(struct vm_fault * vmf)4315*4882a593Smuzhiyun static vm_fault_t do_fault_around(struct vm_fault *vmf)
4316*4882a593Smuzhiyun {
4317*4882a593Smuzhiyun 	unsigned long address = vmf->address, nr_pages, mask;
4318*4882a593Smuzhiyun 	pgoff_t start_pgoff = vmf->pgoff;
4319*4882a593Smuzhiyun 	pgoff_t end_pgoff;
4320*4882a593Smuzhiyun 	int off;
4321*4882a593Smuzhiyun 
4322*4882a593Smuzhiyun 	nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT;
4323*4882a593Smuzhiyun 	mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK;
4324*4882a593Smuzhiyun 
4325*4882a593Smuzhiyun 	address = max(address & mask, vmf->vma->vm_start);
4326*4882a593Smuzhiyun 	off = ((vmf->address - address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4327*4882a593Smuzhiyun 	start_pgoff -= off;
4328*4882a593Smuzhiyun 
4329*4882a593Smuzhiyun 	/*
4330*4882a593Smuzhiyun 	 *  end_pgoff is either the end of the page table, the end of
4331*4882a593Smuzhiyun 	 *  the vma or nr_pages from start_pgoff, depending what is nearest.
4332*4882a593Smuzhiyun 	 */
4333*4882a593Smuzhiyun 	end_pgoff = start_pgoff -
4334*4882a593Smuzhiyun 		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4335*4882a593Smuzhiyun 		PTRS_PER_PTE - 1;
4336*4882a593Smuzhiyun 	end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4337*4882a593Smuzhiyun 			start_pgoff + nr_pages - 1);
4338*4882a593Smuzhiyun 
4339*4882a593Smuzhiyun 	if (!(vmf->flags & FAULT_FLAG_SPECULATIVE) &&
4340*4882a593Smuzhiyun 	    pmd_none(*vmf->pmd)) {
4341*4882a593Smuzhiyun 		vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4342*4882a593Smuzhiyun 		if (!vmf->prealloc_pte)
4343*4882a593Smuzhiyun 			return VM_FAULT_OOM;
4344*4882a593Smuzhiyun 		smp_wmb(); /* See comment in __pte_alloc() */
4345*4882a593Smuzhiyun 	}
4346*4882a593Smuzhiyun 
4347*4882a593Smuzhiyun 	return vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4348*4882a593Smuzhiyun }
4349*4882a593Smuzhiyun 
do_read_fault(struct vm_fault * vmf)4350*4882a593Smuzhiyun static vm_fault_t do_read_fault(struct vm_fault *vmf)
4351*4882a593Smuzhiyun {
4352*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4353*4882a593Smuzhiyun 	vm_fault_t ret = 0;
4354*4882a593Smuzhiyun 
4355*4882a593Smuzhiyun 	/*
4356*4882a593Smuzhiyun 	 * Let's call ->map_pages() first and use ->fault() as fallback
4357*4882a593Smuzhiyun 	 * if page by the offset is not ready to be mapped (cold cache or
4358*4882a593Smuzhiyun 	 * something).
4359*4882a593Smuzhiyun 	 */
4360*4882a593Smuzhiyun 	if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
4361*4882a593Smuzhiyun 		if (likely(!userfaultfd_minor(vmf->vma))) {
4362*4882a593Smuzhiyun 			ret = do_fault_around(vmf);
4363*4882a593Smuzhiyun 			if (ret)
4364*4882a593Smuzhiyun 				return ret;
4365*4882a593Smuzhiyun 		}
4366*4882a593Smuzhiyun 	}
4367*4882a593Smuzhiyun 
4368*4882a593Smuzhiyun 	ret = __do_fault(vmf);
4369*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4370*4882a593Smuzhiyun 		return ret;
4371*4882a593Smuzhiyun 
4372*4882a593Smuzhiyun 	ret |= finish_fault(vmf);
4373*4882a593Smuzhiyun 	unlock_page(vmf->page);
4374*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4375*4882a593Smuzhiyun 		put_page(vmf->page);
4376*4882a593Smuzhiyun 	return ret;
4377*4882a593Smuzhiyun }
4378*4882a593Smuzhiyun 
do_cow_fault(struct vm_fault * vmf)4379*4882a593Smuzhiyun static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4380*4882a593Smuzhiyun {
4381*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4382*4882a593Smuzhiyun 	vm_fault_t ret;
4383*4882a593Smuzhiyun 
4384*4882a593Smuzhiyun 	if (unlikely(anon_vma_prepare(vma)))
4385*4882a593Smuzhiyun 		return VM_FAULT_OOM;
4386*4882a593Smuzhiyun 
4387*4882a593Smuzhiyun 	vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4388*4882a593Smuzhiyun 	if (!vmf->cow_page)
4389*4882a593Smuzhiyun 		return VM_FAULT_OOM;
4390*4882a593Smuzhiyun 
4391*4882a593Smuzhiyun 	if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4392*4882a593Smuzhiyun 		put_page(vmf->cow_page);
4393*4882a593Smuzhiyun 		return VM_FAULT_OOM;
4394*4882a593Smuzhiyun 	}
4395*4882a593Smuzhiyun 	cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4396*4882a593Smuzhiyun 
4397*4882a593Smuzhiyun 	ret = __do_fault(vmf);
4398*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4399*4882a593Smuzhiyun 		goto uncharge_out;
4400*4882a593Smuzhiyun 	if (ret & VM_FAULT_DONE_COW)
4401*4882a593Smuzhiyun 		return ret;
4402*4882a593Smuzhiyun 
4403*4882a593Smuzhiyun 	copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4404*4882a593Smuzhiyun 	__SetPageUptodate(vmf->cow_page);
4405*4882a593Smuzhiyun 
4406*4882a593Smuzhiyun 	ret |= finish_fault(vmf);
4407*4882a593Smuzhiyun 	unlock_page(vmf->page);
4408*4882a593Smuzhiyun 	put_page(vmf->page);
4409*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4410*4882a593Smuzhiyun 		goto uncharge_out;
4411*4882a593Smuzhiyun 	return ret;
4412*4882a593Smuzhiyun uncharge_out:
4413*4882a593Smuzhiyun 	put_page(vmf->cow_page);
4414*4882a593Smuzhiyun 	return ret;
4415*4882a593Smuzhiyun }
4416*4882a593Smuzhiyun 
do_shared_fault(struct vm_fault * vmf)4417*4882a593Smuzhiyun static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4418*4882a593Smuzhiyun {
4419*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4420*4882a593Smuzhiyun 	vm_fault_t ret, tmp;
4421*4882a593Smuzhiyun 
4422*4882a593Smuzhiyun 	ret = __do_fault(vmf);
4423*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
4424*4882a593Smuzhiyun 		return ret;
4425*4882a593Smuzhiyun 
4426*4882a593Smuzhiyun 	/*
4427*4882a593Smuzhiyun 	 * Check if the backing address space wants to know that the page is
4428*4882a593Smuzhiyun 	 * about to become writable
4429*4882a593Smuzhiyun 	 */
4430*4882a593Smuzhiyun 	if (vma->vm_ops->page_mkwrite) {
4431*4882a593Smuzhiyun 		unlock_page(vmf->page);
4432*4882a593Smuzhiyun 		tmp = do_page_mkwrite(vmf);
4433*4882a593Smuzhiyun 		if (unlikely(!tmp ||
4434*4882a593Smuzhiyun 				(tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) {
4435*4882a593Smuzhiyun 			put_page(vmf->page);
4436*4882a593Smuzhiyun 			return tmp;
4437*4882a593Smuzhiyun 		}
4438*4882a593Smuzhiyun 	}
4439*4882a593Smuzhiyun 
4440*4882a593Smuzhiyun 	ret |= finish_fault(vmf);
4441*4882a593Smuzhiyun 	if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
4442*4882a593Smuzhiyun 					VM_FAULT_RETRY))) {
4443*4882a593Smuzhiyun 		unlock_page(vmf->page);
4444*4882a593Smuzhiyun 		put_page(vmf->page);
4445*4882a593Smuzhiyun 		return ret;
4446*4882a593Smuzhiyun 	}
4447*4882a593Smuzhiyun 
4448*4882a593Smuzhiyun 	ret |= fault_dirty_shared_page(vmf);
4449*4882a593Smuzhiyun 	return ret;
4450*4882a593Smuzhiyun }
4451*4882a593Smuzhiyun 
4452*4882a593Smuzhiyun /*
4453*4882a593Smuzhiyun  * We enter with non-exclusive mmap_lock (to exclude vma changes,
4454*4882a593Smuzhiyun  * but allow concurrent faults).
4455*4882a593Smuzhiyun  * The mmap_lock may have been released depending on flags and our
4456*4882a593Smuzhiyun  * return value.  See filemap_fault() and __lock_page_or_retry().
4457*4882a593Smuzhiyun  * If mmap_lock is released, vma may become invalid (for example
4458*4882a593Smuzhiyun  * by other thread calling munmap()).
4459*4882a593Smuzhiyun  */
do_fault(struct vm_fault * vmf)4460*4882a593Smuzhiyun static vm_fault_t do_fault(struct vm_fault *vmf)
4461*4882a593Smuzhiyun {
4462*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4463*4882a593Smuzhiyun 	struct mm_struct *vm_mm = vma->vm_mm;
4464*4882a593Smuzhiyun 	vm_fault_t ret;
4465*4882a593Smuzhiyun 
4466*4882a593Smuzhiyun 	/*
4467*4882a593Smuzhiyun 	 * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
4468*4882a593Smuzhiyun 	 */
4469*4882a593Smuzhiyun 	if (!vma->vm_ops->fault) {
4470*4882a593Smuzhiyun 		/*
4471*4882a593Smuzhiyun 		 * If we find a migration pmd entry or a none pmd entry, which
4472*4882a593Smuzhiyun 		 * should never happen, return SIGBUS
4473*4882a593Smuzhiyun 		 */
4474*4882a593Smuzhiyun 		if (unlikely(!pmd_present(*vmf->pmd)))
4475*4882a593Smuzhiyun 			ret = VM_FAULT_SIGBUS;
4476*4882a593Smuzhiyun 		else {
4477*4882a593Smuzhiyun 			vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4478*4882a593Smuzhiyun 						       vmf->pmd,
4479*4882a593Smuzhiyun 						       vmf->address,
4480*4882a593Smuzhiyun 						       &vmf->ptl);
4481*4882a593Smuzhiyun 			/*
4482*4882a593Smuzhiyun 			 * Make sure this is not a temporary clearing of pte
4483*4882a593Smuzhiyun 			 * by holding ptl and checking again. A R/M/W update
4484*4882a593Smuzhiyun 			 * of pte involves: take ptl, clearing the pte so that
4485*4882a593Smuzhiyun 			 * we don't have concurrent modification by hardware
4486*4882a593Smuzhiyun 			 * followed by an update.
4487*4882a593Smuzhiyun 			 */
4488*4882a593Smuzhiyun 			if (unlikely(pte_none(*vmf->pte)))
4489*4882a593Smuzhiyun 				ret = VM_FAULT_SIGBUS;
4490*4882a593Smuzhiyun 			else
4491*4882a593Smuzhiyun 				ret = VM_FAULT_NOPAGE;
4492*4882a593Smuzhiyun 
4493*4882a593Smuzhiyun 			pte_unmap_unlock(vmf->pte, vmf->ptl);
4494*4882a593Smuzhiyun 		}
4495*4882a593Smuzhiyun 	} else if (!(vmf->flags & FAULT_FLAG_WRITE))
4496*4882a593Smuzhiyun 		ret = do_read_fault(vmf);
4497*4882a593Smuzhiyun 	else if (!(vmf->vma_flags & VM_SHARED))
4498*4882a593Smuzhiyun 		ret = do_cow_fault(vmf);
4499*4882a593Smuzhiyun 	else
4500*4882a593Smuzhiyun 		ret = do_shared_fault(vmf);
4501*4882a593Smuzhiyun 
4502*4882a593Smuzhiyun 	/* preallocated pagetable is unused: free it */
4503*4882a593Smuzhiyun 	if (vmf->prealloc_pte) {
4504*4882a593Smuzhiyun 		pte_free(vm_mm, vmf->prealloc_pte);
4505*4882a593Smuzhiyun 		vmf->prealloc_pte = NULL;
4506*4882a593Smuzhiyun 	}
4507*4882a593Smuzhiyun 	return ret;
4508*4882a593Smuzhiyun }
4509*4882a593Smuzhiyun 
numa_migrate_prep(struct page * page,struct vm_area_struct * vma,unsigned long addr,int page_nid,int * flags)4510*4882a593Smuzhiyun static int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4511*4882a593Smuzhiyun 				unsigned long addr, int page_nid,
4512*4882a593Smuzhiyun 				int *flags)
4513*4882a593Smuzhiyun {
4514*4882a593Smuzhiyun 	get_page(page);
4515*4882a593Smuzhiyun 
4516*4882a593Smuzhiyun 	count_vm_numa_event(NUMA_HINT_FAULTS);
4517*4882a593Smuzhiyun 	if (page_nid == numa_node_id()) {
4518*4882a593Smuzhiyun 		count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
4519*4882a593Smuzhiyun 		*flags |= TNF_FAULT_LOCAL;
4520*4882a593Smuzhiyun 	}
4521*4882a593Smuzhiyun 
4522*4882a593Smuzhiyun 	return mpol_misplaced(page, vma, addr);
4523*4882a593Smuzhiyun }
4524*4882a593Smuzhiyun 
do_numa_page(struct vm_fault * vmf)4525*4882a593Smuzhiyun static vm_fault_t do_numa_page(struct vm_fault *vmf)
4526*4882a593Smuzhiyun {
4527*4882a593Smuzhiyun 	struct vm_area_struct *vma = vmf->vma;
4528*4882a593Smuzhiyun 	struct page *page = NULL;
4529*4882a593Smuzhiyun 	int page_nid = NUMA_NO_NODE;
4530*4882a593Smuzhiyun 	int last_cpupid;
4531*4882a593Smuzhiyun 	int target_nid;
4532*4882a593Smuzhiyun 	bool migrated = false;
4533*4882a593Smuzhiyun 	pte_t pte, old_pte;
4534*4882a593Smuzhiyun 	bool was_writable = pte_savedwrite(vmf->orig_pte);
4535*4882a593Smuzhiyun 	int flags = 0;
4536*4882a593Smuzhiyun 
4537*4882a593Smuzhiyun 	/*
4538*4882a593Smuzhiyun 	 * The "pte" at this point cannot be used safely without
4539*4882a593Smuzhiyun 	 * validation through pte_unmap_same(). It's of NUMA type but
4540*4882a593Smuzhiyun 	 * the pfn may be screwed if the read is non atomic.
4541*4882a593Smuzhiyun 	 */
4542*4882a593Smuzhiyun 	if (!pte_spinlock(vmf))
4543*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
4544*4882a593Smuzhiyun 	if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4545*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4546*4882a593Smuzhiyun 		goto out;
4547*4882a593Smuzhiyun 	}
4548*4882a593Smuzhiyun 
4549*4882a593Smuzhiyun 	/*
4550*4882a593Smuzhiyun 	 * Make it present again, Depending on how arch implementes non
4551*4882a593Smuzhiyun 	 * accessible ptes, some can allow access by kernel mode.
4552*4882a593Smuzhiyun 	 */
4553*4882a593Smuzhiyun 	old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4554*4882a593Smuzhiyun 	pte = pte_modify(old_pte, vmf->vma_page_prot);
4555*4882a593Smuzhiyun 	pte = pte_mkyoung(pte);
4556*4882a593Smuzhiyun 	if (was_writable)
4557*4882a593Smuzhiyun 		pte = pte_mkwrite(pte);
4558*4882a593Smuzhiyun 	ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4559*4882a593Smuzhiyun 	update_mmu_cache(vma, vmf->address, vmf->pte);
4560*4882a593Smuzhiyun 
4561*4882a593Smuzhiyun 	page = _vm_normal_page(vma, vmf->address, pte, vmf->vma_flags);
4562*4882a593Smuzhiyun 	if (!page) {
4563*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4564*4882a593Smuzhiyun 		return 0;
4565*4882a593Smuzhiyun 	}
4566*4882a593Smuzhiyun 
4567*4882a593Smuzhiyun 	/* TODO: handle PTE-mapped THP */
4568*4882a593Smuzhiyun 	if (PageCompound(page)) {
4569*4882a593Smuzhiyun 		pte_unmap_unlock(vmf->pte, vmf->ptl);
4570*4882a593Smuzhiyun 		return 0;
4571*4882a593Smuzhiyun 	}
4572*4882a593Smuzhiyun 
4573*4882a593Smuzhiyun 	/*
4574*4882a593Smuzhiyun 	 * Avoid grouping on RO pages in general. RO pages shouldn't hurt as
4575*4882a593Smuzhiyun 	 * much anyway since they can be in shared cache state. This misses
4576*4882a593Smuzhiyun 	 * the case where a mapping is writable but the process never writes
4577*4882a593Smuzhiyun 	 * to it but pte_write gets cleared during protection updates and
4578*4882a593Smuzhiyun 	 * pte_dirty has unpredictable behaviour between PTE scan updates,
4579*4882a593Smuzhiyun 	 * background writeback, dirty balancing and application behaviour.
4580*4882a593Smuzhiyun 	 */
4581*4882a593Smuzhiyun 	if (!pte_write(pte))
4582*4882a593Smuzhiyun 		flags |= TNF_NO_GROUP;
4583*4882a593Smuzhiyun 
4584*4882a593Smuzhiyun 	/*
4585*4882a593Smuzhiyun 	 * Flag if the page is shared between multiple address spaces. This
4586*4882a593Smuzhiyun 	 * is later used when determining whether to group tasks together
4587*4882a593Smuzhiyun 	 */
4588*4882a593Smuzhiyun 	if (page_mapcount(page) > 1 && (vmf->vma_flags & VM_SHARED))
4589*4882a593Smuzhiyun 		flags |= TNF_SHARED;
4590*4882a593Smuzhiyun 
4591*4882a593Smuzhiyun 	last_cpupid = page_cpupid_last(page);
4592*4882a593Smuzhiyun 	page_nid = page_to_nid(page);
4593*4882a593Smuzhiyun 	target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4594*4882a593Smuzhiyun 			&flags);
4595*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4596*4882a593Smuzhiyun 	if (target_nid == NUMA_NO_NODE) {
4597*4882a593Smuzhiyun 		put_page(page);
4598*4882a593Smuzhiyun 		goto out;
4599*4882a593Smuzhiyun 	}
4600*4882a593Smuzhiyun 
4601*4882a593Smuzhiyun 	/* Migrate to the requested node */
4602*4882a593Smuzhiyun 	migrated = migrate_misplaced_page(page, vmf, target_nid);
4603*4882a593Smuzhiyun 	if (migrated) {
4604*4882a593Smuzhiyun 		page_nid = target_nid;
4605*4882a593Smuzhiyun 		flags |= TNF_MIGRATED;
4606*4882a593Smuzhiyun 	} else
4607*4882a593Smuzhiyun 		flags |= TNF_MIGRATE_FAIL;
4608*4882a593Smuzhiyun 
4609*4882a593Smuzhiyun out:
4610*4882a593Smuzhiyun 	if (page_nid != NUMA_NO_NODE)
4611*4882a593Smuzhiyun 		task_numa_fault(last_cpupid, page_nid, 1, flags);
4612*4882a593Smuzhiyun 	return 0;
4613*4882a593Smuzhiyun }
4614*4882a593Smuzhiyun 
create_huge_pmd(struct vm_fault * vmf)4615*4882a593Smuzhiyun static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4616*4882a593Smuzhiyun {
4617*4882a593Smuzhiyun 	if (vma_is_anonymous(vmf->vma))
4618*4882a593Smuzhiyun 		return do_huge_pmd_anonymous_page(vmf);
4619*4882a593Smuzhiyun 	if (vmf->vma->vm_ops->huge_fault)
4620*4882a593Smuzhiyun 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4621*4882a593Smuzhiyun 	return VM_FAULT_FALLBACK;
4622*4882a593Smuzhiyun }
4623*4882a593Smuzhiyun 
4624*4882a593Smuzhiyun /* `inline' is required to avoid gcc 4.1.2 build error */
wp_huge_pmd(struct vm_fault * vmf,pmd_t orig_pmd)4625*4882a593Smuzhiyun static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
4626*4882a593Smuzhiyun {
4627*4882a593Smuzhiyun 	if (vma_is_anonymous(vmf->vma)) {
4628*4882a593Smuzhiyun 		if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
4629*4882a593Smuzhiyun 			return handle_userfault(vmf, VM_UFFD_WP);
4630*4882a593Smuzhiyun 		return do_huge_pmd_wp_page(vmf, orig_pmd);
4631*4882a593Smuzhiyun 	}
4632*4882a593Smuzhiyun 	if (vmf->vma->vm_ops->huge_fault) {
4633*4882a593Smuzhiyun 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4634*4882a593Smuzhiyun 
4635*4882a593Smuzhiyun 		if (!(ret & VM_FAULT_FALLBACK))
4636*4882a593Smuzhiyun 			return ret;
4637*4882a593Smuzhiyun 	}
4638*4882a593Smuzhiyun 
4639*4882a593Smuzhiyun 	/* COW or write-notify handled on pte level: split pmd. */
4640*4882a593Smuzhiyun 	__split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4641*4882a593Smuzhiyun 
4642*4882a593Smuzhiyun 	return VM_FAULT_FALLBACK;
4643*4882a593Smuzhiyun }
4644*4882a593Smuzhiyun 
create_huge_pud(struct vm_fault * vmf)4645*4882a593Smuzhiyun static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4646*4882a593Smuzhiyun {
4647*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4648*4882a593Smuzhiyun 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4649*4882a593Smuzhiyun 	/* No support for anonymous transparent PUD pages yet */
4650*4882a593Smuzhiyun 	if (vma_is_anonymous(vmf->vma))
4651*4882a593Smuzhiyun 		return VM_FAULT_FALLBACK;
4652*4882a593Smuzhiyun 	if (vmf->vma->vm_ops->huge_fault)
4653*4882a593Smuzhiyun 		return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4654*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
4655*4882a593Smuzhiyun 	return VM_FAULT_FALLBACK;
4656*4882a593Smuzhiyun }
4657*4882a593Smuzhiyun 
wp_huge_pud(struct vm_fault * vmf,pud_t orig_pud)4658*4882a593Smuzhiyun static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4659*4882a593Smuzhiyun {
4660*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) &&			\
4661*4882a593Smuzhiyun 	defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
4662*4882a593Smuzhiyun 	/* No support for anonymous transparent PUD pages yet */
4663*4882a593Smuzhiyun 	if (vma_is_anonymous(vmf->vma))
4664*4882a593Smuzhiyun 		goto split;
4665*4882a593Smuzhiyun 	if (vmf->vma->vm_ops->huge_fault) {
4666*4882a593Smuzhiyun 		vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4667*4882a593Smuzhiyun 
4668*4882a593Smuzhiyun 		if (!(ret & VM_FAULT_FALLBACK))
4669*4882a593Smuzhiyun 			return ret;
4670*4882a593Smuzhiyun 	}
4671*4882a593Smuzhiyun split:
4672*4882a593Smuzhiyun 	/* COW or write-notify not handled on PUD level: split pud.*/
4673*4882a593Smuzhiyun 	__split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4674*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
4675*4882a593Smuzhiyun 	return VM_FAULT_FALLBACK;
4676*4882a593Smuzhiyun }
4677*4882a593Smuzhiyun 
4678*4882a593Smuzhiyun /*
4679*4882a593Smuzhiyun  * These routines also need to handle stuff like marking pages dirty
4680*4882a593Smuzhiyun  * and/or accessed for architectures that don't do it in hardware (most
4681*4882a593Smuzhiyun  * RISC architectures).  The early dirtying is also good on the i386.
4682*4882a593Smuzhiyun  *
4683*4882a593Smuzhiyun  * There is also a hook called "update_mmu_cache()" that architectures
4684*4882a593Smuzhiyun  * with external mmu caches can use to update those (ie the Sparc or
4685*4882a593Smuzhiyun  * PowerPC hashed page tables that act as extended TLBs).
4686*4882a593Smuzhiyun  *
4687*4882a593Smuzhiyun  * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
4688*4882a593Smuzhiyun  * concurrent faults).
4689*4882a593Smuzhiyun  *
4690*4882a593Smuzhiyun  * The mmap_lock may have been released depending on flags and our return value.
4691*4882a593Smuzhiyun  * See filemap_fault() and __lock_page_or_retry().
4692*4882a593Smuzhiyun  */
handle_pte_fault(struct vm_fault * vmf)4693*4882a593Smuzhiyun static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4694*4882a593Smuzhiyun {
4695*4882a593Smuzhiyun 	pte_t entry;
4696*4882a593Smuzhiyun 	vm_fault_t ret = 0;
4697*4882a593Smuzhiyun 
4698*4882a593Smuzhiyun 	/* Do not check unstable pmd, if it's changed will retry later */
4699*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4700*4882a593Smuzhiyun 		goto skip_pmd_checks;
4701*4882a593Smuzhiyun 
4702*4882a593Smuzhiyun 	if (unlikely(pmd_none(*vmf->pmd))) {
4703*4882a593Smuzhiyun 		/*
4704*4882a593Smuzhiyun 		 * Leave __pte_alloc() until later: because vm_ops->fault may
4705*4882a593Smuzhiyun 		 * want to allocate huge page, and if we expose page table
4706*4882a593Smuzhiyun 		 * for an instant, it will be difficult to retract from
4707*4882a593Smuzhiyun 		 * concurrent faults and from rmap lookups.
4708*4882a593Smuzhiyun 		 */
4709*4882a593Smuzhiyun 		vmf->pte = NULL;
4710*4882a593Smuzhiyun 	} else {
4711*4882a593Smuzhiyun 		/*
4712*4882a593Smuzhiyun 		 * If a huge pmd materialized under us just retry later.  Use
4713*4882a593Smuzhiyun 		 * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead
4714*4882a593Smuzhiyun 		 * of pmd_trans_huge() to ensure the pmd didn't become
4715*4882a593Smuzhiyun 		 * pmd_trans_huge under us and then back to pmd_none, as a
4716*4882a593Smuzhiyun 		 * result of MADV_DONTNEED running immediately after a huge pmd
4717*4882a593Smuzhiyun 		 * fault in a different thread of this mm, in turn leading to a
4718*4882a593Smuzhiyun 		 * misleading pmd_trans_huge() retval. All we have to ensure is
4719*4882a593Smuzhiyun 		 * that it is a regular pmd that we can walk with
4720*4882a593Smuzhiyun 		 * pte_offset_map() and we can do that through an atomic read
4721*4882a593Smuzhiyun 		 * in C, which is what pmd_trans_unstable() provides.
4722*4882a593Smuzhiyun 		 */
4723*4882a593Smuzhiyun 		if (pmd_devmap_trans_unstable(vmf->pmd))
4724*4882a593Smuzhiyun 			return 0;
4725*4882a593Smuzhiyun 		/*
4726*4882a593Smuzhiyun 		 * A regular pmd is established and it can't morph into a huge
4727*4882a593Smuzhiyun 		 * pmd from under us anymore at this point because we hold the
4728*4882a593Smuzhiyun 		 * mmap_lock read mode and khugepaged takes it in write mode.
4729*4882a593Smuzhiyun 		 * So now it's safe to run pte_offset_map().
4730*4882a593Smuzhiyun 		 * This is not applicable to the speculative page fault handler
4731*4882a593Smuzhiyun 		 * but in that case, the pte is fetched earlier in
4732*4882a593Smuzhiyun 		 * handle_speculative_fault().
4733*4882a593Smuzhiyun 		 */
4734*4882a593Smuzhiyun 		vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4735*4882a593Smuzhiyun 		vmf->orig_pte = *vmf->pte;
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 		/*
4738*4882a593Smuzhiyun 		 * some architectures can have larger ptes than wordsize,
4739*4882a593Smuzhiyun 		 * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
4740*4882a593Smuzhiyun 		 * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
4741*4882a593Smuzhiyun 		 * accesses.  The code below just needs a consistent view
4742*4882a593Smuzhiyun 		 * for the ifs and we later double check anyway with the
4743*4882a593Smuzhiyun 		 * ptl lock held. So here a barrier will do.
4744*4882a593Smuzhiyun 		 */
4745*4882a593Smuzhiyun 		barrier();
4746*4882a593Smuzhiyun 		if (pte_none(vmf->orig_pte)) {
4747*4882a593Smuzhiyun 			pte_unmap(vmf->pte);
4748*4882a593Smuzhiyun 			vmf->pte = NULL;
4749*4882a593Smuzhiyun 		}
4750*4882a593Smuzhiyun 	}
4751*4882a593Smuzhiyun 
4752*4882a593Smuzhiyun skip_pmd_checks:
4753*4882a593Smuzhiyun 	if (!vmf->pte) {
4754*4882a593Smuzhiyun 		if (vma_is_anonymous(vmf->vma))
4755*4882a593Smuzhiyun 			return do_anonymous_page(vmf);
4756*4882a593Smuzhiyun 		else if ((vmf->flags & FAULT_FLAG_SPECULATIVE) &&
4757*4882a593Smuzhiyun 				!vmf_allows_speculation(vmf))
4758*4882a593Smuzhiyun 			return VM_FAULT_RETRY;
4759*4882a593Smuzhiyun 		else
4760*4882a593Smuzhiyun 			return do_fault(vmf);
4761*4882a593Smuzhiyun 	}
4762*4882a593Smuzhiyun 
4763*4882a593Smuzhiyun 	if (!pte_present(vmf->orig_pte))
4764*4882a593Smuzhiyun 		return do_swap_page(vmf);
4765*4882a593Smuzhiyun 
4766*4882a593Smuzhiyun 	if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4767*4882a593Smuzhiyun 		return do_numa_page(vmf);
4768*4882a593Smuzhiyun 
4769*4882a593Smuzhiyun 	if (!pte_spinlock(vmf))
4770*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
4771*4882a593Smuzhiyun 	entry = vmf->orig_pte;
4772*4882a593Smuzhiyun 	if (unlikely(!pte_same(*vmf->pte, entry))) {
4773*4882a593Smuzhiyun 		update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4774*4882a593Smuzhiyun 		goto unlock;
4775*4882a593Smuzhiyun 	}
4776*4882a593Smuzhiyun 	if (vmf->flags & FAULT_FLAG_WRITE) {
4777*4882a593Smuzhiyun 		if (!pte_write(entry)) {
4778*4882a593Smuzhiyun 			if (!(vmf->flags & FAULT_FLAG_SPECULATIVE))
4779*4882a593Smuzhiyun 				return do_wp_page(vmf);
4780*4882a593Smuzhiyun 
4781*4882a593Smuzhiyun 			if (!mmu_notifier_trylock(vmf->vma->vm_mm)) {
4782*4882a593Smuzhiyun 				ret = VM_FAULT_RETRY;
4783*4882a593Smuzhiyun 				goto unlock;
4784*4882a593Smuzhiyun 			}
4785*4882a593Smuzhiyun 
4786*4882a593Smuzhiyun 			ret = do_wp_page(vmf);
4787*4882a593Smuzhiyun 			mmu_notifier_unlock(vmf->vma->vm_mm);
4788*4882a593Smuzhiyun 			return ret;
4789*4882a593Smuzhiyun 		}
4790*4882a593Smuzhiyun 		entry = pte_mkdirty(entry);
4791*4882a593Smuzhiyun 	}
4792*4882a593Smuzhiyun 	entry = pte_mkyoung(entry);
4793*4882a593Smuzhiyun 	if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4794*4882a593Smuzhiyun 				vmf->flags & FAULT_FLAG_WRITE)) {
4795*4882a593Smuzhiyun 		update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4796*4882a593Smuzhiyun 	} else {
4797*4882a593Smuzhiyun 		/* Skip spurious TLB flush for retried page fault */
4798*4882a593Smuzhiyun 		if (vmf->flags & FAULT_FLAG_TRIED)
4799*4882a593Smuzhiyun 			goto unlock;
4800*4882a593Smuzhiyun 		if (vmf->flags & FAULT_FLAG_SPECULATIVE)
4801*4882a593Smuzhiyun 			ret = VM_FAULT_RETRY;
4802*4882a593Smuzhiyun 		/*
4803*4882a593Smuzhiyun 		 * This is needed only for protection faults but the arch code
4804*4882a593Smuzhiyun 		 * is not yet telling us if this is a protection fault or not.
4805*4882a593Smuzhiyun 		 * This still avoids useless tlb flushes for .text page faults
4806*4882a593Smuzhiyun 		 * with threads.
4807*4882a593Smuzhiyun 		 */
4808*4882a593Smuzhiyun 		if (vmf->flags & FAULT_FLAG_WRITE)
4809*4882a593Smuzhiyun 			flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4810*4882a593Smuzhiyun 	}
4811*4882a593Smuzhiyun 	trace_android_rvh_handle_pte_fault_end(vmf, highest_memmap_pfn);
4812*4882a593Smuzhiyun 	trace_android_vh_handle_pte_fault_end(vmf, highest_memmap_pfn);
4813*4882a593Smuzhiyun unlock:
4814*4882a593Smuzhiyun 	pte_unmap_unlock(vmf->pte, vmf->ptl);
4815*4882a593Smuzhiyun 	return ret;
4816*4882a593Smuzhiyun }
4817*4882a593Smuzhiyun 
4818*4882a593Smuzhiyun /*
4819*4882a593Smuzhiyun  * By the time we get here, we already hold the mm semaphore
4820*4882a593Smuzhiyun  *
4821*4882a593Smuzhiyun  * The mmap_lock may have been released depending on flags and our
4822*4882a593Smuzhiyun  * return value.  See filemap_fault() and __lock_page_or_retry().
4823*4882a593Smuzhiyun  */
__handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags)4824*4882a593Smuzhiyun static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
4825*4882a593Smuzhiyun 		unsigned long address, unsigned int flags)
4826*4882a593Smuzhiyun {
4827*4882a593Smuzhiyun 	struct vm_fault vmf = {
4828*4882a593Smuzhiyun 		.vma = vma,
4829*4882a593Smuzhiyun 		.address = address & PAGE_MASK,
4830*4882a593Smuzhiyun 		.flags = flags,
4831*4882a593Smuzhiyun 		.pgoff = linear_page_index(vma, address),
4832*4882a593Smuzhiyun 		.gfp_mask = __get_fault_gfp_mask(vma),
4833*4882a593Smuzhiyun 		.vma_flags = vma->vm_flags,
4834*4882a593Smuzhiyun 		.vma_page_prot = vma->vm_page_prot,
4835*4882a593Smuzhiyun 	};
4836*4882a593Smuzhiyun 	unsigned int dirty = flags & FAULT_FLAG_WRITE;
4837*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
4838*4882a593Smuzhiyun 	pgd_t *pgd;
4839*4882a593Smuzhiyun 	p4d_t *p4d;
4840*4882a593Smuzhiyun 	vm_fault_t ret;
4841*4882a593Smuzhiyun 
4842*4882a593Smuzhiyun 	pgd = pgd_offset(mm, address);
4843*4882a593Smuzhiyun 	p4d = p4d_alloc(mm, pgd, address);
4844*4882a593Smuzhiyun 	if (!p4d)
4845*4882a593Smuzhiyun 		return VM_FAULT_OOM;
4846*4882a593Smuzhiyun 
4847*4882a593Smuzhiyun 	vmf.pud = pud_alloc(mm, p4d, address);
4848*4882a593Smuzhiyun 	if (!vmf.pud)
4849*4882a593Smuzhiyun 		return VM_FAULT_OOM;
4850*4882a593Smuzhiyun retry_pud:
4851*4882a593Smuzhiyun 	if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4852*4882a593Smuzhiyun 		ret = create_huge_pud(&vmf);
4853*4882a593Smuzhiyun 		if (!(ret & VM_FAULT_FALLBACK))
4854*4882a593Smuzhiyun 			return ret;
4855*4882a593Smuzhiyun 	} else {
4856*4882a593Smuzhiyun 		pud_t orig_pud = *vmf.pud;
4857*4882a593Smuzhiyun 
4858*4882a593Smuzhiyun 		barrier();
4859*4882a593Smuzhiyun 		if (pud_trans_huge(orig_pud) || pud_devmap(orig_pud)) {
4860*4882a593Smuzhiyun 
4861*4882a593Smuzhiyun 			/* NUMA case for anonymous PUDs would go here */
4862*4882a593Smuzhiyun 
4863*4882a593Smuzhiyun 			if (dirty && !pud_write(orig_pud)) {
4864*4882a593Smuzhiyun 				ret = wp_huge_pud(&vmf, orig_pud);
4865*4882a593Smuzhiyun 				if (!(ret & VM_FAULT_FALLBACK))
4866*4882a593Smuzhiyun 					return ret;
4867*4882a593Smuzhiyun 			} else {
4868*4882a593Smuzhiyun 				huge_pud_set_accessed(&vmf, orig_pud);
4869*4882a593Smuzhiyun 				return 0;
4870*4882a593Smuzhiyun 			}
4871*4882a593Smuzhiyun 		}
4872*4882a593Smuzhiyun 	}
4873*4882a593Smuzhiyun 
4874*4882a593Smuzhiyun 	vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4875*4882a593Smuzhiyun 	if (!vmf.pmd)
4876*4882a593Smuzhiyun 		return VM_FAULT_OOM;
4877*4882a593Smuzhiyun 
4878*4882a593Smuzhiyun 	/* Huge pud page fault raced with pmd_alloc? */
4879*4882a593Smuzhiyun 	if (pud_trans_unstable(vmf.pud))
4880*4882a593Smuzhiyun 		goto retry_pud;
4881*4882a593Smuzhiyun 
4882*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4883*4882a593Smuzhiyun 	vmf.sequence = raw_read_seqcount(&vma->vm_sequence);
4884*4882a593Smuzhiyun #endif
4885*4882a593Smuzhiyun 	if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4886*4882a593Smuzhiyun 		ret = create_huge_pmd(&vmf);
4887*4882a593Smuzhiyun 		if (!(ret & VM_FAULT_FALLBACK))
4888*4882a593Smuzhiyun 			return ret;
4889*4882a593Smuzhiyun 	} else {
4890*4882a593Smuzhiyun 		pmd_t orig_pmd = *vmf.pmd;
4891*4882a593Smuzhiyun 
4892*4882a593Smuzhiyun 		barrier();
4893*4882a593Smuzhiyun 		if (unlikely(is_swap_pmd(orig_pmd))) {
4894*4882a593Smuzhiyun 			VM_BUG_ON(thp_migration_supported() &&
4895*4882a593Smuzhiyun 					  !is_pmd_migration_entry(orig_pmd));
4896*4882a593Smuzhiyun 			if (is_pmd_migration_entry(orig_pmd))
4897*4882a593Smuzhiyun 				pmd_migration_entry_wait(mm, vmf.pmd);
4898*4882a593Smuzhiyun 			return 0;
4899*4882a593Smuzhiyun 		}
4900*4882a593Smuzhiyun 		if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
4901*4882a593Smuzhiyun 			if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
4902*4882a593Smuzhiyun 				return do_huge_pmd_numa_page(&vmf, orig_pmd);
4903*4882a593Smuzhiyun 
4904*4882a593Smuzhiyun 			if (dirty && !pmd_write(orig_pmd)) {
4905*4882a593Smuzhiyun 				ret = wp_huge_pmd(&vmf, orig_pmd);
4906*4882a593Smuzhiyun 				if (!(ret & VM_FAULT_FALLBACK))
4907*4882a593Smuzhiyun 					return ret;
4908*4882a593Smuzhiyun 			} else {
4909*4882a593Smuzhiyun 				huge_pmd_set_accessed(&vmf, orig_pmd);
4910*4882a593Smuzhiyun 				return 0;
4911*4882a593Smuzhiyun 			}
4912*4882a593Smuzhiyun 		}
4913*4882a593Smuzhiyun 	}
4914*4882a593Smuzhiyun 
4915*4882a593Smuzhiyun 	return handle_pte_fault(&vmf);
4916*4882a593Smuzhiyun }
4917*4882a593Smuzhiyun 
4918*4882a593Smuzhiyun /**
4919*4882a593Smuzhiyun  * mm_account_fault - Do page fault accountings
4920*4882a593Smuzhiyun  *
4921*4882a593Smuzhiyun  * @regs: the pt_regs struct pointer.  When set to NULL, will skip accounting
4922*4882a593Smuzhiyun  *        of perf event counters, but we'll still do the per-task accounting to
4923*4882a593Smuzhiyun  *        the task who triggered this page fault.
4924*4882a593Smuzhiyun  * @address: the faulted address.
4925*4882a593Smuzhiyun  * @flags: the fault flags.
4926*4882a593Smuzhiyun  * @ret: the fault retcode.
4927*4882a593Smuzhiyun  *
4928*4882a593Smuzhiyun  * This will take care of most of the page fault accountings.  Meanwhile, it
4929*4882a593Smuzhiyun  * will also include the PERF_COUNT_SW_PAGE_FAULTS_[MAJ|MIN] perf counter
4930*4882a593Smuzhiyun  * updates.  However note that the handling of PERF_COUNT_SW_PAGE_FAULTS should
4931*4882a593Smuzhiyun  * still be in per-arch page fault handlers at the entry of page fault.
4932*4882a593Smuzhiyun  */
mm_account_fault(struct pt_regs * regs,unsigned long address,unsigned int flags,vm_fault_t ret)4933*4882a593Smuzhiyun static inline void mm_account_fault(struct pt_regs *regs,
4934*4882a593Smuzhiyun 				    unsigned long address, unsigned int flags,
4935*4882a593Smuzhiyun 				    vm_fault_t ret)
4936*4882a593Smuzhiyun {
4937*4882a593Smuzhiyun 	bool major;
4938*4882a593Smuzhiyun 
4939*4882a593Smuzhiyun 	/*
4940*4882a593Smuzhiyun 	 * We don't do accounting for some specific faults:
4941*4882a593Smuzhiyun 	 *
4942*4882a593Smuzhiyun 	 * - Unsuccessful faults (e.g. when the address wasn't valid).  That
4943*4882a593Smuzhiyun 	 *   includes arch_vma_access_permitted() failing before reaching here.
4944*4882a593Smuzhiyun 	 *   So this is not a "this many hardware page faults" counter.  We
4945*4882a593Smuzhiyun 	 *   should use the hw profiling for that.
4946*4882a593Smuzhiyun 	 *
4947*4882a593Smuzhiyun 	 * - Incomplete faults (VM_FAULT_RETRY).  They will only be counted
4948*4882a593Smuzhiyun 	 *   once they're completed.
4949*4882a593Smuzhiyun 	 */
4950*4882a593Smuzhiyun 	if (ret & (VM_FAULT_ERROR | VM_FAULT_RETRY))
4951*4882a593Smuzhiyun 		return;
4952*4882a593Smuzhiyun 
4953*4882a593Smuzhiyun 	/*
4954*4882a593Smuzhiyun 	 * We define the fault as a major fault when the final successful fault
4955*4882a593Smuzhiyun 	 * is VM_FAULT_MAJOR, or if it retried (which implies that we couldn't
4956*4882a593Smuzhiyun 	 * handle it immediately previously).
4957*4882a593Smuzhiyun 	 */
4958*4882a593Smuzhiyun 	major = (ret & VM_FAULT_MAJOR) || (flags & FAULT_FLAG_TRIED);
4959*4882a593Smuzhiyun 
4960*4882a593Smuzhiyun 	if (major)
4961*4882a593Smuzhiyun 		current->maj_flt++;
4962*4882a593Smuzhiyun 	else
4963*4882a593Smuzhiyun 		current->min_flt++;
4964*4882a593Smuzhiyun 
4965*4882a593Smuzhiyun 	/*
4966*4882a593Smuzhiyun 	 * If the fault is done for GUP, regs will be NULL.  We only do the
4967*4882a593Smuzhiyun 	 * accounting for the per thread fault counters who triggered the
4968*4882a593Smuzhiyun 	 * fault, and we skip the perf event updates.
4969*4882a593Smuzhiyun 	 */
4970*4882a593Smuzhiyun 	if (!regs)
4971*4882a593Smuzhiyun 		return;
4972*4882a593Smuzhiyun 
4973*4882a593Smuzhiyun 	if (major)
4974*4882a593Smuzhiyun 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4975*4882a593Smuzhiyun 	else
4976*4882a593Smuzhiyun 		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4977*4882a593Smuzhiyun }
4978*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
4979*4882a593Smuzhiyun 
4980*4882a593Smuzhiyun #ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
4981*4882a593Smuzhiyun /* This is required by vm_normal_page() */
4982*4882a593Smuzhiyun #error "Speculative page fault handler requires CONFIG_ARCH_HAS_PTE_SPECIAL"
4983*4882a593Smuzhiyun #endif
4984*4882a593Smuzhiyun /*
4985*4882a593Smuzhiyun  * vm_normal_page() adds some processing which should be done while
4986*4882a593Smuzhiyun  * hodling the mmap_sem.
4987*4882a593Smuzhiyun  */
4988*4882a593Smuzhiyun 
4989*4882a593Smuzhiyun /*
4990*4882a593Smuzhiyun  * Tries to handle the page fault in a speculative way, without grabbing the
4991*4882a593Smuzhiyun  * mmap_sem.
4992*4882a593Smuzhiyun  * When VM_FAULT_RETRY is returned, the vma pointer is valid and this vma must
4993*4882a593Smuzhiyun  * be checked later when the mmap_sem has been grabbed by calling
4994*4882a593Smuzhiyun  * can_reuse_spf_vma().
4995*4882a593Smuzhiyun  * This is needed as the returned vma is kept in memory until the call to
4996*4882a593Smuzhiyun  * can_reuse_spf_vma() is made.
4997*4882a593Smuzhiyun  */
___handle_speculative_fault(struct mm_struct * mm,unsigned long address,unsigned int flags,struct vm_area_struct * vma)4998*4882a593Smuzhiyun static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
4999*4882a593Smuzhiyun 				unsigned long address, unsigned int flags,
5000*4882a593Smuzhiyun 				struct vm_area_struct *vma)
5001*4882a593Smuzhiyun {
5002*4882a593Smuzhiyun 	struct vm_fault vmf = {
5003*4882a593Smuzhiyun 		.address = address,
5004*4882a593Smuzhiyun 		.pgoff = linear_page_index(vma, address),
5005*4882a593Smuzhiyun 		.vma = vma,
5006*4882a593Smuzhiyun 		.gfp_mask = __get_fault_gfp_mask(vma),
5007*4882a593Smuzhiyun 		.flags = flags,
5008*4882a593Smuzhiyun 	};
5009*4882a593Smuzhiyun #ifdef CONFIG_NUMA
5010*4882a593Smuzhiyun 	struct mempolicy *pol;
5011*4882a593Smuzhiyun #endif
5012*4882a593Smuzhiyun 	pgd_t *pgd, pgdval;
5013*4882a593Smuzhiyun 	p4d_t *p4d, p4dval;
5014*4882a593Smuzhiyun 	pud_t pudval;
5015*4882a593Smuzhiyun 	int seq;
5016*4882a593Smuzhiyun 	vm_fault_t ret;
5017*4882a593Smuzhiyun 
5018*4882a593Smuzhiyun 	/* Clear flags that may lead to release the mmap_sem to retry */
5019*4882a593Smuzhiyun 	flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE);
5020*4882a593Smuzhiyun 	flags |= FAULT_FLAG_SPECULATIVE;
5021*4882a593Smuzhiyun 
5022*4882a593Smuzhiyun 	/* rmb <-> seqlock,vma_rb_erase() */
5023*4882a593Smuzhiyun 	seq = raw_read_seqcount(&vmf.vma->vm_sequence);
5024*4882a593Smuzhiyun 	if (seq & 1) {
5025*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
5026*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5027*4882a593Smuzhiyun 	}
5028*4882a593Smuzhiyun 
5029*4882a593Smuzhiyun 	if (!vmf_allows_speculation(&vmf))
5030*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5031*4882a593Smuzhiyun 
5032*4882a593Smuzhiyun 	vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags);
5033*4882a593Smuzhiyun 	vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot);
5034*4882a593Smuzhiyun 
5035*4882a593Smuzhiyun #ifdef CONFIG_USERFAULTFD
5036*4882a593Smuzhiyun 	/* Can't call userland page fault handler in the speculative path */
5037*4882a593Smuzhiyun 	if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) {
5038*4882a593Smuzhiyun 		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
5039*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5040*4882a593Smuzhiyun 	}
5041*4882a593Smuzhiyun #endif
5042*4882a593Smuzhiyun 
5043*4882a593Smuzhiyun 	if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) {
5044*4882a593Smuzhiyun 		/*
5045*4882a593Smuzhiyun 		 * This could be detected by the check address against VMA's
5046*4882a593Smuzhiyun 		 * boundaries but we want to trace it as not supported instead
5047*4882a593Smuzhiyun 		 * of changed.
5048*4882a593Smuzhiyun 		 */
5049*4882a593Smuzhiyun 		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
5050*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5051*4882a593Smuzhiyun 	}
5052*4882a593Smuzhiyun 
5053*4882a593Smuzhiyun 	if (address < READ_ONCE(vmf.vma->vm_start)
5054*4882a593Smuzhiyun 	    || READ_ONCE(vmf.vma->vm_end) <= address) {
5055*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
5056*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5057*4882a593Smuzhiyun 	}
5058*4882a593Smuzhiyun 
5059*4882a593Smuzhiyun 	if (!arch_vma_access_permitted(vmf.vma, flags & FAULT_FLAG_WRITE,
5060*4882a593Smuzhiyun 				       flags & FAULT_FLAG_INSTRUCTION,
5061*4882a593Smuzhiyun 				       flags & FAULT_FLAG_REMOTE))
5062*4882a593Smuzhiyun 		goto out_segv;
5063*4882a593Smuzhiyun 
5064*4882a593Smuzhiyun 	/* This is one is required to check that the VMA has write access set */
5065*4882a593Smuzhiyun 	if (flags & FAULT_FLAG_WRITE) {
5066*4882a593Smuzhiyun 		if (unlikely(!(vmf.vma_flags & VM_WRITE)))
5067*4882a593Smuzhiyun 			goto out_segv;
5068*4882a593Smuzhiyun 	} else if (unlikely(!(vmf.vma_flags & (VM_READ|VM_EXEC|VM_WRITE))))
5069*4882a593Smuzhiyun 		goto out_segv;
5070*4882a593Smuzhiyun 
5071*4882a593Smuzhiyun #ifdef CONFIG_NUMA
5072*4882a593Smuzhiyun 	/*
5073*4882a593Smuzhiyun 	 * MPOL_INTERLEAVE implies additional checks in
5074*4882a593Smuzhiyun 	 * mpol_misplaced() which are not compatible with the
5075*4882a593Smuzhiyun 	 *speculative page fault processing.
5076*4882a593Smuzhiyun 	 */
5077*4882a593Smuzhiyun 	pol = __get_vma_policy(vmf.vma, address);
5078*4882a593Smuzhiyun 	if (!pol)
5079*4882a593Smuzhiyun 		pol = get_task_policy(current);
5080*4882a593Smuzhiyun 	if (pol && pol->mode == MPOL_INTERLEAVE) {
5081*4882a593Smuzhiyun 		trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
5082*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5083*4882a593Smuzhiyun 	}
5084*4882a593Smuzhiyun #endif
5085*4882a593Smuzhiyun 
5086*4882a593Smuzhiyun 	/*
5087*4882a593Smuzhiyun 	 * Do a speculative lookup of the PTE entry.
5088*4882a593Smuzhiyun 	 */
5089*4882a593Smuzhiyun 	local_irq_disable();
5090*4882a593Smuzhiyun 	pgd = pgd_offset(mm, address);
5091*4882a593Smuzhiyun 	pgdval = READ_ONCE(*pgd);
5092*4882a593Smuzhiyun 	if (pgd_none(pgdval) || unlikely(pgd_bad(pgdval)))
5093*4882a593Smuzhiyun 		goto out_walk;
5094*4882a593Smuzhiyun 
5095*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, address);
5096*4882a593Smuzhiyun 	if (pgd_val(READ_ONCE(*pgd)) != pgd_val(pgdval))
5097*4882a593Smuzhiyun 		goto out_walk;
5098*4882a593Smuzhiyun 	p4dval = READ_ONCE(*p4d);
5099*4882a593Smuzhiyun 	if (p4d_none(p4dval) || unlikely(p4d_bad(p4dval)))
5100*4882a593Smuzhiyun 		goto out_walk;
5101*4882a593Smuzhiyun 
5102*4882a593Smuzhiyun 	vmf.pud = pud_offset(p4d, address);
5103*4882a593Smuzhiyun 	if (p4d_val(READ_ONCE(*p4d)) != p4d_val(p4dval))
5104*4882a593Smuzhiyun 		goto out_walk;
5105*4882a593Smuzhiyun 	pudval = READ_ONCE(*vmf.pud);
5106*4882a593Smuzhiyun 	if (pud_none(pudval) || unlikely(pud_bad(pudval)))
5107*4882a593Smuzhiyun 		goto out_walk;
5108*4882a593Smuzhiyun 
5109*4882a593Smuzhiyun 	/* Huge pages at PUD level are not supported. */
5110*4882a593Smuzhiyun 	if (unlikely(pud_trans_huge(pudval)))
5111*4882a593Smuzhiyun 		goto out_walk;
5112*4882a593Smuzhiyun 
5113*4882a593Smuzhiyun 	vmf.pmd = pmd_offset(vmf.pud, address);
5114*4882a593Smuzhiyun 	if (pud_val(READ_ONCE(*vmf.pud)) != pud_val(pudval))
5115*4882a593Smuzhiyun 		goto out_walk;
5116*4882a593Smuzhiyun 	vmf.orig_pmd = READ_ONCE(*vmf.pmd);
5117*4882a593Smuzhiyun 	/*
5118*4882a593Smuzhiyun 	 * pmd_none could mean that a hugepage collapse is in progress
5119*4882a593Smuzhiyun 	 * in our back as collapse_huge_page() mark it before
5120*4882a593Smuzhiyun 	 * invalidating the pte (which is done once the IPI is catched
5121*4882a593Smuzhiyun 	 * by all CPU and we have interrupt disabled).
5122*4882a593Smuzhiyun 	 * For this reason we cannot handle THP in a speculative way since we
5123*4882a593Smuzhiyun 	 * can't safely indentify an in progress collapse operation done in our
5124*4882a593Smuzhiyun 	 * back on that PMD.
5125*4882a593Smuzhiyun 	 * Regarding the order of the following checks, see comment in
5126*4882a593Smuzhiyun 	 * pmd_devmap_trans_unstable()
5127*4882a593Smuzhiyun 	 */
5128*4882a593Smuzhiyun 	if (unlikely(pmd_devmap(vmf.orig_pmd) ||
5129*4882a593Smuzhiyun 		     pmd_none(vmf.orig_pmd) || pmd_trans_huge(vmf.orig_pmd) ||
5130*4882a593Smuzhiyun 		     is_swap_pmd(vmf.orig_pmd)))
5131*4882a593Smuzhiyun 		goto out_walk;
5132*4882a593Smuzhiyun 
5133*4882a593Smuzhiyun 	/*
5134*4882a593Smuzhiyun 	 * The above does not allocate/instantiate page-tables because doing so
5135*4882a593Smuzhiyun 	 * would lead to the possibility of instantiating page-tables after
5136*4882a593Smuzhiyun 	 * free_pgtables() -- and consequently leaking them.
5137*4882a593Smuzhiyun 	 *
5138*4882a593Smuzhiyun 	 * The result is that we take at least one !speculative fault per PMD
5139*4882a593Smuzhiyun 	 * in order to instantiate it.
5140*4882a593Smuzhiyun 	 */
5141*4882a593Smuzhiyun 
5142*4882a593Smuzhiyun 	vmf.pte = pte_offset_map(vmf.pmd, address);
5143*4882a593Smuzhiyun 	if (pmd_val(READ_ONCE(*vmf.pmd)) != pmd_val(vmf.orig_pmd)) {
5144*4882a593Smuzhiyun 		pte_unmap(vmf.pte);
5145*4882a593Smuzhiyun 		vmf.pte = NULL;
5146*4882a593Smuzhiyun 		goto out_walk;
5147*4882a593Smuzhiyun 	}
5148*4882a593Smuzhiyun 	vmf.orig_pte = READ_ONCE(*vmf.pte);
5149*4882a593Smuzhiyun 	barrier(); /* See comment in handle_pte_fault() */
5150*4882a593Smuzhiyun 	if (pte_none(vmf.orig_pte)) {
5151*4882a593Smuzhiyun 		pte_unmap(vmf.pte);
5152*4882a593Smuzhiyun 		vmf.pte = NULL;
5153*4882a593Smuzhiyun 	}
5154*4882a593Smuzhiyun 
5155*4882a593Smuzhiyun 	vmf.sequence = seq;
5156*4882a593Smuzhiyun 	vmf.flags = flags;
5157*4882a593Smuzhiyun 
5158*4882a593Smuzhiyun 	local_irq_enable();
5159*4882a593Smuzhiyun 
5160*4882a593Smuzhiyun 	/*
5161*4882a593Smuzhiyun 	 * We need to re-validate the VMA after checking the bounds, otherwise
5162*4882a593Smuzhiyun 	 * we might have a false positive on the bounds.
5163*4882a593Smuzhiyun 	 */
5164*4882a593Smuzhiyun 	if (read_seqcount_retry(&vmf.vma->vm_sequence, seq)) {
5165*4882a593Smuzhiyun 		trace_spf_vma_changed(_RET_IP_, vmf.vma, address);
5166*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5167*4882a593Smuzhiyun 	}
5168*4882a593Smuzhiyun 
5169*4882a593Smuzhiyun 	mem_cgroup_enter_user_fault();
5170*4882a593Smuzhiyun 	ret = handle_pte_fault(&vmf);
5171*4882a593Smuzhiyun 	mem_cgroup_exit_user_fault();
5172*4882a593Smuzhiyun 
5173*4882a593Smuzhiyun 	if (ret != VM_FAULT_RETRY) {
5174*4882a593Smuzhiyun 		if (vma_is_anonymous(vmf.vma))
5175*4882a593Smuzhiyun 			count_vm_event(SPECULATIVE_PGFAULT_ANON);
5176*4882a593Smuzhiyun 		else
5177*4882a593Smuzhiyun 			count_vm_event(SPECULATIVE_PGFAULT_FILE);
5178*4882a593Smuzhiyun 	}
5179*4882a593Smuzhiyun 
5180*4882a593Smuzhiyun 	/*
5181*4882a593Smuzhiyun 	 * The task may have entered a memcg OOM situation but
5182*4882a593Smuzhiyun 	 * if the allocation error was handled gracefully (no
5183*4882a593Smuzhiyun 	 * VM_FAULT_OOM), there is no need to kill anything.
5184*4882a593Smuzhiyun 	 * Just clean up the OOM state peacefully.
5185*4882a593Smuzhiyun 	 */
5186*4882a593Smuzhiyun 	if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5187*4882a593Smuzhiyun 		mem_cgroup_oom_synchronize(false);
5188*4882a593Smuzhiyun 	return ret;
5189*4882a593Smuzhiyun 
5190*4882a593Smuzhiyun out_walk:
5191*4882a593Smuzhiyun 	trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
5192*4882a593Smuzhiyun 	local_irq_enable();
5193*4882a593Smuzhiyun 	return VM_FAULT_RETRY;
5194*4882a593Smuzhiyun 
5195*4882a593Smuzhiyun out_segv:
5196*4882a593Smuzhiyun 	trace_spf_vma_access(_RET_IP_, vmf.vma, address);
5197*4882a593Smuzhiyun 	return VM_FAULT_SIGSEGV;
5198*4882a593Smuzhiyun }
5199*4882a593Smuzhiyun 
__handle_speculative_fault(struct mm_struct * mm,unsigned long address,unsigned int flags,struct vm_area_struct ** vma,struct pt_regs * regs)5200*4882a593Smuzhiyun vm_fault_t __handle_speculative_fault(struct mm_struct *mm,
5201*4882a593Smuzhiyun 				unsigned long address, unsigned int flags,
5202*4882a593Smuzhiyun 				struct vm_area_struct **vma,
5203*4882a593Smuzhiyun 				struct pt_regs *regs)
5204*4882a593Smuzhiyun {
5205*4882a593Smuzhiyun 	vm_fault_t ret;
5206*4882a593Smuzhiyun 
5207*4882a593Smuzhiyun 	check_sync_rss_stat(current);
5208*4882a593Smuzhiyun 
5209*4882a593Smuzhiyun 	*vma = get_vma(mm, address);
5210*4882a593Smuzhiyun 	if (!*vma)
5211*4882a593Smuzhiyun 		return VM_FAULT_RETRY;
5212*4882a593Smuzhiyun 
5213*4882a593Smuzhiyun 	ret = ___handle_speculative_fault(mm, address, flags, *vma);
5214*4882a593Smuzhiyun 
5215*4882a593Smuzhiyun 	/*
5216*4882a593Smuzhiyun 	 * If there is no need to retry, don't return the vma to the caller.
5217*4882a593Smuzhiyun 	 */
5218*4882a593Smuzhiyun 	if (ret != VM_FAULT_RETRY) {
5219*4882a593Smuzhiyun 		put_vma(*vma);
5220*4882a593Smuzhiyun 		*vma = NULL;
5221*4882a593Smuzhiyun 		mm_account_fault(regs, address, flags, ret);
5222*4882a593Smuzhiyun 	}
5223*4882a593Smuzhiyun 
5224*4882a593Smuzhiyun 	return ret;
5225*4882a593Smuzhiyun }
5226*4882a593Smuzhiyun 
5227*4882a593Smuzhiyun /*
5228*4882a593Smuzhiyun  * This is used to know if the vma fetch in the speculative page fault handler
5229*4882a593Smuzhiyun  * is still valid when trying the regular fault path while holding the
5230*4882a593Smuzhiyun  * mmap_sem.
5231*4882a593Smuzhiyun  * The call to put_vma(vma) must be made after checking the vma's fields, as
5232*4882a593Smuzhiyun  * the vma may be freed by put_vma(). In such a case it is expected that false
5233*4882a593Smuzhiyun  * is returned.
5234*4882a593Smuzhiyun  */
can_reuse_spf_vma(struct vm_area_struct * vma,unsigned long address)5235*4882a593Smuzhiyun bool can_reuse_spf_vma(struct vm_area_struct *vma, unsigned long address)
5236*4882a593Smuzhiyun {
5237*4882a593Smuzhiyun 	bool ret;
5238*4882a593Smuzhiyun 
5239*4882a593Smuzhiyun 	ret = !RB_EMPTY_NODE(&vma->vm_rb) &&
5240*4882a593Smuzhiyun 		vma->vm_start <= address && address < vma->vm_end;
5241*4882a593Smuzhiyun 	put_vma(vma);
5242*4882a593Smuzhiyun 	return ret;
5243*4882a593Smuzhiyun }
5244*4882a593Smuzhiyun #endif /* CONFIG_SPECULATIVE_PAGE_FAULT */
5245*4882a593Smuzhiyun 
5246*4882a593Smuzhiyun /*
5247*4882a593Smuzhiyun  * By the time we get here, we already hold the mm semaphore
5248*4882a593Smuzhiyun  *
5249*4882a593Smuzhiyun  * The mmap_lock may have been released depending on flags and our
5250*4882a593Smuzhiyun  * return value.  See filemap_fault() and __lock_page_or_retry().
5251*4882a593Smuzhiyun  */
handle_mm_fault(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct pt_regs * regs)5252*4882a593Smuzhiyun vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5253*4882a593Smuzhiyun 			   unsigned int flags, struct pt_regs *regs)
5254*4882a593Smuzhiyun {
5255*4882a593Smuzhiyun 	vm_fault_t ret;
5256*4882a593Smuzhiyun 
5257*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
5258*4882a593Smuzhiyun 
5259*4882a593Smuzhiyun 	count_vm_event(PGFAULT);
5260*4882a593Smuzhiyun 	count_memcg_event_mm(vma->vm_mm, PGFAULT);
5261*4882a593Smuzhiyun 
5262*4882a593Smuzhiyun 	/* do counter updates before entering really critical section. */
5263*4882a593Smuzhiyun 	check_sync_rss_stat(current);
5264*4882a593Smuzhiyun 
5265*4882a593Smuzhiyun 	if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5266*4882a593Smuzhiyun 					    flags & FAULT_FLAG_INSTRUCTION,
5267*4882a593Smuzhiyun 					    flags & FAULT_FLAG_REMOTE))
5268*4882a593Smuzhiyun 		return VM_FAULT_SIGSEGV;
5269*4882a593Smuzhiyun 
5270*4882a593Smuzhiyun 	/*
5271*4882a593Smuzhiyun 	 * Enable the memcg OOM handling for faults triggered in user
5272*4882a593Smuzhiyun 	 * space.  Kernel faults are handled more gracefully.
5273*4882a593Smuzhiyun 	 */
5274*4882a593Smuzhiyun 	if (flags & FAULT_FLAG_USER)
5275*4882a593Smuzhiyun 		mem_cgroup_enter_user_fault();
5276*4882a593Smuzhiyun 
5277*4882a593Smuzhiyun 	if (unlikely(is_vm_hugetlb_page(vma)))
5278*4882a593Smuzhiyun 		ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5279*4882a593Smuzhiyun 	else
5280*4882a593Smuzhiyun 		ret = __handle_mm_fault(vma, address, flags);
5281*4882a593Smuzhiyun 
5282*4882a593Smuzhiyun 	if (flags & FAULT_FLAG_USER) {
5283*4882a593Smuzhiyun 		mem_cgroup_exit_user_fault();
5284*4882a593Smuzhiyun 		/*
5285*4882a593Smuzhiyun 		 * The task may have entered a memcg OOM situation but
5286*4882a593Smuzhiyun 		 * if the allocation error was handled gracefully (no
5287*4882a593Smuzhiyun 		 * VM_FAULT_OOM), there is no need to kill anything.
5288*4882a593Smuzhiyun 		 * Just clean up the OOM state peacefully.
5289*4882a593Smuzhiyun 		 */
5290*4882a593Smuzhiyun 		if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
5291*4882a593Smuzhiyun 			mem_cgroup_oom_synchronize(false);
5292*4882a593Smuzhiyun 	}
5293*4882a593Smuzhiyun 
5294*4882a593Smuzhiyun 	mm_account_fault(regs, address, flags, ret);
5295*4882a593Smuzhiyun 
5296*4882a593Smuzhiyun 	return ret;
5297*4882a593Smuzhiyun }
5298*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(handle_mm_fault);
5299*4882a593Smuzhiyun 
5300*4882a593Smuzhiyun #ifndef __PAGETABLE_P4D_FOLDED
5301*4882a593Smuzhiyun /*
5302*4882a593Smuzhiyun  * Allocate p4d page table.
5303*4882a593Smuzhiyun  * We've already handled the fast-path in-line.
5304*4882a593Smuzhiyun  */
__p4d_alloc(struct mm_struct * mm,pgd_t * pgd,unsigned long address)5305*4882a593Smuzhiyun int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
5306*4882a593Smuzhiyun {
5307*4882a593Smuzhiyun 	p4d_t *new = p4d_alloc_one(mm, address);
5308*4882a593Smuzhiyun 	if (!new)
5309*4882a593Smuzhiyun 		return -ENOMEM;
5310*4882a593Smuzhiyun 
5311*4882a593Smuzhiyun 	smp_wmb(); /* See comment in __pte_alloc */
5312*4882a593Smuzhiyun 
5313*4882a593Smuzhiyun 	spin_lock(&mm->page_table_lock);
5314*4882a593Smuzhiyun 	if (pgd_present(*pgd))		/* Another has populated it */
5315*4882a593Smuzhiyun 		p4d_free(mm, new);
5316*4882a593Smuzhiyun 	else
5317*4882a593Smuzhiyun 		pgd_populate(mm, pgd, new);
5318*4882a593Smuzhiyun 	spin_unlock(&mm->page_table_lock);
5319*4882a593Smuzhiyun 	return 0;
5320*4882a593Smuzhiyun }
5321*4882a593Smuzhiyun #endif /* __PAGETABLE_P4D_FOLDED */
5322*4882a593Smuzhiyun 
5323*4882a593Smuzhiyun #ifndef __PAGETABLE_PUD_FOLDED
5324*4882a593Smuzhiyun /*
5325*4882a593Smuzhiyun  * Allocate page upper directory.
5326*4882a593Smuzhiyun  * We've already handled the fast-path in-line.
5327*4882a593Smuzhiyun  */
__pud_alloc(struct mm_struct * mm,p4d_t * p4d,unsigned long address)5328*4882a593Smuzhiyun int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
5329*4882a593Smuzhiyun {
5330*4882a593Smuzhiyun 	pud_t *new = pud_alloc_one(mm, address);
5331*4882a593Smuzhiyun 	if (!new)
5332*4882a593Smuzhiyun 		return -ENOMEM;
5333*4882a593Smuzhiyun 
5334*4882a593Smuzhiyun 	smp_wmb(); /* See comment in __pte_alloc */
5335*4882a593Smuzhiyun 
5336*4882a593Smuzhiyun 	spin_lock(&mm->page_table_lock);
5337*4882a593Smuzhiyun 	if (!p4d_present(*p4d)) {
5338*4882a593Smuzhiyun 		mm_inc_nr_puds(mm);
5339*4882a593Smuzhiyun 		p4d_populate(mm, p4d, new);
5340*4882a593Smuzhiyun 	} else	/* Another has populated it */
5341*4882a593Smuzhiyun 		pud_free(mm, new);
5342*4882a593Smuzhiyun 	spin_unlock(&mm->page_table_lock);
5343*4882a593Smuzhiyun 	return 0;
5344*4882a593Smuzhiyun }
5345*4882a593Smuzhiyun #endif /* __PAGETABLE_PUD_FOLDED */
5346*4882a593Smuzhiyun 
5347*4882a593Smuzhiyun #ifndef __PAGETABLE_PMD_FOLDED
5348*4882a593Smuzhiyun /*
5349*4882a593Smuzhiyun  * Allocate page middle directory.
5350*4882a593Smuzhiyun  * We've already handled the fast-path in-line.
5351*4882a593Smuzhiyun  */
__pmd_alloc(struct mm_struct * mm,pud_t * pud,unsigned long address)5352*4882a593Smuzhiyun int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
5353*4882a593Smuzhiyun {
5354*4882a593Smuzhiyun 	spinlock_t *ptl;
5355*4882a593Smuzhiyun 	pmd_t *new = pmd_alloc_one(mm, address);
5356*4882a593Smuzhiyun 	if (!new)
5357*4882a593Smuzhiyun 		return -ENOMEM;
5358*4882a593Smuzhiyun 
5359*4882a593Smuzhiyun 	smp_wmb(); /* See comment in __pte_alloc */
5360*4882a593Smuzhiyun 
5361*4882a593Smuzhiyun 	ptl = pud_lock(mm, pud);
5362*4882a593Smuzhiyun 	if (!pud_present(*pud)) {
5363*4882a593Smuzhiyun 		mm_inc_nr_pmds(mm);
5364*4882a593Smuzhiyun 		pud_populate(mm, pud, new);
5365*4882a593Smuzhiyun 	} else	/* Another has populated it */
5366*4882a593Smuzhiyun 		pmd_free(mm, new);
5367*4882a593Smuzhiyun 	spin_unlock(ptl);
5368*4882a593Smuzhiyun 	return 0;
5369*4882a593Smuzhiyun }
5370*4882a593Smuzhiyun #endif /* __PAGETABLE_PMD_FOLDED */
5371*4882a593Smuzhiyun 
follow_invalidate_pte(struct mm_struct * mm,unsigned long address,struct mmu_notifier_range * range,pte_t ** ptepp,pmd_t ** pmdpp,spinlock_t ** ptlp)5372*4882a593Smuzhiyun int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
5373*4882a593Smuzhiyun 			  struct mmu_notifier_range *range, pte_t **ptepp,
5374*4882a593Smuzhiyun 			  pmd_t **pmdpp, spinlock_t **ptlp)
5375*4882a593Smuzhiyun {
5376*4882a593Smuzhiyun 	pgd_t *pgd;
5377*4882a593Smuzhiyun 	p4d_t *p4d;
5378*4882a593Smuzhiyun 	pud_t *pud;
5379*4882a593Smuzhiyun 	pmd_t *pmd;
5380*4882a593Smuzhiyun 	pte_t *ptep;
5381*4882a593Smuzhiyun 
5382*4882a593Smuzhiyun 	pgd = pgd_offset(mm, address);
5383*4882a593Smuzhiyun 	if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
5384*4882a593Smuzhiyun 		goto out;
5385*4882a593Smuzhiyun 
5386*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, address);
5387*4882a593Smuzhiyun 	if (p4d_none(*p4d) || unlikely(p4d_bad(*p4d)))
5388*4882a593Smuzhiyun 		goto out;
5389*4882a593Smuzhiyun 
5390*4882a593Smuzhiyun 	pud = pud_offset(p4d, address);
5391*4882a593Smuzhiyun 	if (pud_none(*pud) || unlikely(pud_bad(*pud)))
5392*4882a593Smuzhiyun 		goto out;
5393*4882a593Smuzhiyun 
5394*4882a593Smuzhiyun 	pmd = pmd_offset(pud, address);
5395*4882a593Smuzhiyun 	VM_BUG_ON(pmd_trans_huge(*pmd));
5396*4882a593Smuzhiyun 
5397*4882a593Smuzhiyun 	if (pmd_huge(*pmd)) {
5398*4882a593Smuzhiyun 		if (!pmdpp)
5399*4882a593Smuzhiyun 			goto out;
5400*4882a593Smuzhiyun 
5401*4882a593Smuzhiyun 		if (range) {
5402*4882a593Smuzhiyun 			mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0,
5403*4882a593Smuzhiyun 						NULL, mm, address & PMD_MASK,
5404*4882a593Smuzhiyun 						(address & PMD_MASK) + PMD_SIZE);
5405*4882a593Smuzhiyun 			mmu_notifier_invalidate_range_start(range);
5406*4882a593Smuzhiyun 		}
5407*4882a593Smuzhiyun 		*ptlp = pmd_lock(mm, pmd);
5408*4882a593Smuzhiyun 		if (pmd_huge(*pmd)) {
5409*4882a593Smuzhiyun 			*pmdpp = pmd;
5410*4882a593Smuzhiyun 			return 0;
5411*4882a593Smuzhiyun 		}
5412*4882a593Smuzhiyun 		spin_unlock(*ptlp);
5413*4882a593Smuzhiyun 		if (range)
5414*4882a593Smuzhiyun 			mmu_notifier_invalidate_range_end(range);
5415*4882a593Smuzhiyun 	}
5416*4882a593Smuzhiyun 
5417*4882a593Smuzhiyun 	if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
5418*4882a593Smuzhiyun 		goto out;
5419*4882a593Smuzhiyun 
5420*4882a593Smuzhiyun 	if (range) {
5421*4882a593Smuzhiyun 		mmu_notifier_range_init(range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
5422*4882a593Smuzhiyun 					address & PAGE_MASK,
5423*4882a593Smuzhiyun 					(address & PAGE_MASK) + PAGE_SIZE);
5424*4882a593Smuzhiyun 		mmu_notifier_invalidate_range_start(range);
5425*4882a593Smuzhiyun 	}
5426*4882a593Smuzhiyun 	ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
5427*4882a593Smuzhiyun 	if (!pte_present(*ptep))
5428*4882a593Smuzhiyun 		goto unlock;
5429*4882a593Smuzhiyun 	*ptepp = ptep;
5430*4882a593Smuzhiyun 	return 0;
5431*4882a593Smuzhiyun unlock:
5432*4882a593Smuzhiyun 	pte_unmap_unlock(ptep, *ptlp);
5433*4882a593Smuzhiyun 	if (range)
5434*4882a593Smuzhiyun 		mmu_notifier_invalidate_range_end(range);
5435*4882a593Smuzhiyun out:
5436*4882a593Smuzhiyun 	return -EINVAL;
5437*4882a593Smuzhiyun }
5438*4882a593Smuzhiyun 
5439*4882a593Smuzhiyun /**
5440*4882a593Smuzhiyun  * follow_pte - look up PTE at a user virtual address
5441*4882a593Smuzhiyun  * @mm: the mm_struct of the target address space
5442*4882a593Smuzhiyun  * @address: user virtual address
5443*4882a593Smuzhiyun  * @ptepp: location to store found PTE
5444*4882a593Smuzhiyun  * @ptlp: location to store the lock for the PTE
5445*4882a593Smuzhiyun  *
5446*4882a593Smuzhiyun  * On a successful return, the pointer to the PTE is stored in @ptepp;
5447*4882a593Smuzhiyun  * the corresponding lock is taken and its location is stored in @ptlp.
5448*4882a593Smuzhiyun  * The contents of the PTE are only stable until @ptlp is released;
5449*4882a593Smuzhiyun  * any further use, if any, must be protected against invalidation
5450*4882a593Smuzhiyun  * with MMU notifiers.
5451*4882a593Smuzhiyun  *
5452*4882a593Smuzhiyun  * Only IO mappings and raw PFN mappings are allowed.  The mmap semaphore
5453*4882a593Smuzhiyun  * should be taken for read.
5454*4882a593Smuzhiyun  *
5455*4882a593Smuzhiyun  * KVM uses this function.  While it is arguably less bad than ``follow_pfn``,
5456*4882a593Smuzhiyun  * it is not a good general-purpose API.
5457*4882a593Smuzhiyun  *
5458*4882a593Smuzhiyun  * Return: zero on success, -ve otherwise.
5459*4882a593Smuzhiyun  */
follow_pte(struct mm_struct * mm,unsigned long address,pte_t ** ptepp,spinlock_t ** ptlp)5460*4882a593Smuzhiyun int follow_pte(struct mm_struct *mm, unsigned long address,
5461*4882a593Smuzhiyun 	       pte_t **ptepp, spinlock_t **ptlp)
5462*4882a593Smuzhiyun {
5463*4882a593Smuzhiyun 	return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
5464*4882a593Smuzhiyun }
5465*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(follow_pte);
5466*4882a593Smuzhiyun 
5467*4882a593Smuzhiyun /**
5468*4882a593Smuzhiyun  * follow_pfn - look up PFN at a user virtual address
5469*4882a593Smuzhiyun  * @vma: memory mapping
5470*4882a593Smuzhiyun  * @address: user virtual address
5471*4882a593Smuzhiyun  * @pfn: location to store found PFN
5472*4882a593Smuzhiyun  *
5473*4882a593Smuzhiyun  * Only IO mappings and raw PFN mappings are allowed.
5474*4882a593Smuzhiyun  *
5475*4882a593Smuzhiyun  * This function does not allow the caller to read the permissions
5476*4882a593Smuzhiyun  * of the PTE.  Do not use it.
5477*4882a593Smuzhiyun  *
5478*4882a593Smuzhiyun  * Return: zero and the pfn at @pfn on success, -ve otherwise.
5479*4882a593Smuzhiyun  */
follow_pfn(struct vm_area_struct * vma,unsigned long address,unsigned long * pfn)5480*4882a593Smuzhiyun int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5481*4882a593Smuzhiyun 	unsigned long *pfn)
5482*4882a593Smuzhiyun {
5483*4882a593Smuzhiyun 	int ret = -EINVAL;
5484*4882a593Smuzhiyun 	spinlock_t *ptl;
5485*4882a593Smuzhiyun 	pte_t *ptep;
5486*4882a593Smuzhiyun 
5487*4882a593Smuzhiyun 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5488*4882a593Smuzhiyun 		return ret;
5489*4882a593Smuzhiyun 
5490*4882a593Smuzhiyun 	ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5491*4882a593Smuzhiyun 	if (ret)
5492*4882a593Smuzhiyun 		return ret;
5493*4882a593Smuzhiyun 	*pfn = pte_pfn(*ptep);
5494*4882a593Smuzhiyun 	pte_unmap_unlock(ptep, ptl);
5495*4882a593Smuzhiyun 	return 0;
5496*4882a593Smuzhiyun }
5497*4882a593Smuzhiyun EXPORT_SYMBOL(follow_pfn);
5498*4882a593Smuzhiyun 
5499*4882a593Smuzhiyun #ifdef CONFIG_HAVE_IOREMAP_PROT
follow_phys(struct vm_area_struct * vma,unsigned long address,unsigned int flags,unsigned long * prot,resource_size_t * phys)5500*4882a593Smuzhiyun int follow_phys(struct vm_area_struct *vma,
5501*4882a593Smuzhiyun 		unsigned long address, unsigned int flags,
5502*4882a593Smuzhiyun 		unsigned long *prot, resource_size_t *phys)
5503*4882a593Smuzhiyun {
5504*4882a593Smuzhiyun 	int ret = -EINVAL;
5505*4882a593Smuzhiyun 	pte_t *ptep, pte;
5506*4882a593Smuzhiyun 	spinlock_t *ptl;
5507*4882a593Smuzhiyun 
5508*4882a593Smuzhiyun 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5509*4882a593Smuzhiyun 		goto out;
5510*4882a593Smuzhiyun 
5511*4882a593Smuzhiyun 	if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5512*4882a593Smuzhiyun 		goto out;
5513*4882a593Smuzhiyun 	pte = *ptep;
5514*4882a593Smuzhiyun 
5515*4882a593Smuzhiyun 	if ((flags & FOLL_WRITE) && !pte_write(pte))
5516*4882a593Smuzhiyun 		goto unlock;
5517*4882a593Smuzhiyun 
5518*4882a593Smuzhiyun 	*prot = pgprot_val(pte_pgprot(pte));
5519*4882a593Smuzhiyun 	*phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
5520*4882a593Smuzhiyun 
5521*4882a593Smuzhiyun 	ret = 0;
5522*4882a593Smuzhiyun unlock:
5523*4882a593Smuzhiyun 	pte_unmap_unlock(ptep, ptl);
5524*4882a593Smuzhiyun out:
5525*4882a593Smuzhiyun 	return ret;
5526*4882a593Smuzhiyun }
5527*4882a593Smuzhiyun 
generic_access_phys(struct vm_area_struct * vma,unsigned long addr,void * buf,int len,int write)5528*4882a593Smuzhiyun int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5529*4882a593Smuzhiyun 			void *buf, int len, int write)
5530*4882a593Smuzhiyun {
5531*4882a593Smuzhiyun 	resource_size_t phys_addr;
5532*4882a593Smuzhiyun 	unsigned long prot = 0;
5533*4882a593Smuzhiyun 	void __iomem *maddr;
5534*4882a593Smuzhiyun 	int offset = addr & (PAGE_SIZE-1);
5535*4882a593Smuzhiyun 
5536*4882a593Smuzhiyun 	if (follow_phys(vma, addr, write, &prot, &phys_addr))
5537*4882a593Smuzhiyun 		return -EINVAL;
5538*4882a593Smuzhiyun 
5539*4882a593Smuzhiyun 	maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
5540*4882a593Smuzhiyun 	if (!maddr)
5541*4882a593Smuzhiyun 		return -ENOMEM;
5542*4882a593Smuzhiyun 
5543*4882a593Smuzhiyun 	if (write)
5544*4882a593Smuzhiyun 		memcpy_toio(maddr + offset, buf, len);
5545*4882a593Smuzhiyun 	else
5546*4882a593Smuzhiyun 		memcpy_fromio(buf, maddr + offset, len);
5547*4882a593Smuzhiyun 	iounmap(maddr);
5548*4882a593Smuzhiyun 
5549*4882a593Smuzhiyun 	return len;
5550*4882a593Smuzhiyun }
5551*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(generic_access_phys);
5552*4882a593Smuzhiyun #endif
5553*4882a593Smuzhiyun 
5554*4882a593Smuzhiyun /*
5555*4882a593Smuzhiyun  * Access another process' address space as given in mm.  If non-NULL, use the
5556*4882a593Smuzhiyun  * given task for page fault accounting.
5557*4882a593Smuzhiyun  */
__access_remote_vm(struct task_struct * tsk,struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)5558*4882a593Smuzhiyun int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
5559*4882a593Smuzhiyun 		unsigned long addr, void *buf, int len, unsigned int gup_flags)
5560*4882a593Smuzhiyun {
5561*4882a593Smuzhiyun 	struct vm_area_struct *vma;
5562*4882a593Smuzhiyun 	void *old_buf = buf;
5563*4882a593Smuzhiyun 	int write = gup_flags & FOLL_WRITE;
5564*4882a593Smuzhiyun 
5565*4882a593Smuzhiyun 	if (mmap_read_lock_killable(mm))
5566*4882a593Smuzhiyun 		return 0;
5567*4882a593Smuzhiyun 
5568*4882a593Smuzhiyun 	/* ignore errors, just check how much was successfully transferred */
5569*4882a593Smuzhiyun 	while (len) {
5570*4882a593Smuzhiyun 		int bytes, ret, offset;
5571*4882a593Smuzhiyun 		void *maddr;
5572*4882a593Smuzhiyun 		struct page *page = NULL;
5573*4882a593Smuzhiyun 
5574*4882a593Smuzhiyun 		ret = get_user_pages_remote(mm, addr, 1,
5575*4882a593Smuzhiyun 				gup_flags, &page, &vma, NULL);
5576*4882a593Smuzhiyun 		if (ret <= 0) {
5577*4882a593Smuzhiyun #ifndef CONFIG_HAVE_IOREMAP_PROT
5578*4882a593Smuzhiyun 			break;
5579*4882a593Smuzhiyun #else
5580*4882a593Smuzhiyun 			/*
5581*4882a593Smuzhiyun 			 * Check if this is a VM_IO | VM_PFNMAP VMA, which
5582*4882a593Smuzhiyun 			 * we can access using slightly different code.
5583*4882a593Smuzhiyun 			 */
5584*4882a593Smuzhiyun 			vma = find_vma(mm, addr);
5585*4882a593Smuzhiyun 			if (!vma || vma->vm_start > addr)
5586*4882a593Smuzhiyun 				break;
5587*4882a593Smuzhiyun 			if (vma->vm_ops && vma->vm_ops->access)
5588*4882a593Smuzhiyun 				ret = vma->vm_ops->access(vma, addr, buf,
5589*4882a593Smuzhiyun 							  len, write);
5590*4882a593Smuzhiyun 			if (ret <= 0)
5591*4882a593Smuzhiyun 				break;
5592*4882a593Smuzhiyun 			bytes = ret;
5593*4882a593Smuzhiyun #endif
5594*4882a593Smuzhiyun 		} else {
5595*4882a593Smuzhiyun 			bytes = len;
5596*4882a593Smuzhiyun 			offset = addr & (PAGE_SIZE-1);
5597*4882a593Smuzhiyun 			if (bytes > PAGE_SIZE-offset)
5598*4882a593Smuzhiyun 				bytes = PAGE_SIZE-offset;
5599*4882a593Smuzhiyun 
5600*4882a593Smuzhiyun 			maddr = kmap(page);
5601*4882a593Smuzhiyun 			if (write) {
5602*4882a593Smuzhiyun 				copy_to_user_page(vma, page, addr,
5603*4882a593Smuzhiyun 						  maddr + offset, buf, bytes);
5604*4882a593Smuzhiyun 				set_page_dirty_lock(page);
5605*4882a593Smuzhiyun 			} else {
5606*4882a593Smuzhiyun 				copy_from_user_page(vma, page, addr,
5607*4882a593Smuzhiyun 						    buf, maddr + offset, bytes);
5608*4882a593Smuzhiyun 			}
5609*4882a593Smuzhiyun 			kunmap(page);
5610*4882a593Smuzhiyun 			put_user_page(page);
5611*4882a593Smuzhiyun 		}
5612*4882a593Smuzhiyun 		len -= bytes;
5613*4882a593Smuzhiyun 		buf += bytes;
5614*4882a593Smuzhiyun 		addr += bytes;
5615*4882a593Smuzhiyun 	}
5616*4882a593Smuzhiyun 	mmap_read_unlock(mm);
5617*4882a593Smuzhiyun 
5618*4882a593Smuzhiyun 	return buf - old_buf;
5619*4882a593Smuzhiyun }
5620*4882a593Smuzhiyun 
5621*4882a593Smuzhiyun /**
5622*4882a593Smuzhiyun  * access_remote_vm - access another process' address space
5623*4882a593Smuzhiyun  * @mm:		the mm_struct of the target address space
5624*4882a593Smuzhiyun  * @addr:	start address to access
5625*4882a593Smuzhiyun  * @buf:	source or destination buffer
5626*4882a593Smuzhiyun  * @len:	number of bytes to transfer
5627*4882a593Smuzhiyun  * @gup_flags:	flags modifying lookup behaviour
5628*4882a593Smuzhiyun  *
5629*4882a593Smuzhiyun  * The caller must hold a reference on @mm.
5630*4882a593Smuzhiyun  *
5631*4882a593Smuzhiyun  * Return: number of bytes copied from source to destination.
5632*4882a593Smuzhiyun  */
access_remote_vm(struct mm_struct * mm,unsigned long addr,void * buf,int len,unsigned int gup_flags)5633*4882a593Smuzhiyun int access_remote_vm(struct mm_struct *mm, unsigned long addr,
5634*4882a593Smuzhiyun 		void *buf, int len, unsigned int gup_flags)
5635*4882a593Smuzhiyun {
5636*4882a593Smuzhiyun 	return __access_remote_vm(NULL, mm, addr, buf, len, gup_flags);
5637*4882a593Smuzhiyun }
5638*4882a593Smuzhiyun 
5639*4882a593Smuzhiyun /*
5640*4882a593Smuzhiyun  * Access another process' address space.
5641*4882a593Smuzhiyun  * Source/target buffer must be kernel space,
5642*4882a593Smuzhiyun  * Do not walk the page table directly, use get_user_pages
5643*4882a593Smuzhiyun  */
access_process_vm(struct task_struct * tsk,unsigned long addr,void * buf,int len,unsigned int gup_flags)5644*4882a593Smuzhiyun int access_process_vm(struct task_struct *tsk, unsigned long addr,
5645*4882a593Smuzhiyun 		void *buf, int len, unsigned int gup_flags)
5646*4882a593Smuzhiyun {
5647*4882a593Smuzhiyun 	struct mm_struct *mm;
5648*4882a593Smuzhiyun 	int ret;
5649*4882a593Smuzhiyun 
5650*4882a593Smuzhiyun 	mm = get_task_mm(tsk);
5651*4882a593Smuzhiyun 	if (!mm)
5652*4882a593Smuzhiyun 		return 0;
5653*4882a593Smuzhiyun 
5654*4882a593Smuzhiyun 	ret = __access_remote_vm(tsk, mm, addr, buf, len, gup_flags);
5655*4882a593Smuzhiyun 
5656*4882a593Smuzhiyun 	mmput(mm);
5657*4882a593Smuzhiyun 
5658*4882a593Smuzhiyun 	return ret;
5659*4882a593Smuzhiyun }
5660*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(access_process_vm);
5661*4882a593Smuzhiyun 
5662*4882a593Smuzhiyun /*
5663*4882a593Smuzhiyun  * Print the name of a VMA.
5664*4882a593Smuzhiyun  */
print_vma_addr(char * prefix,unsigned long ip)5665*4882a593Smuzhiyun void print_vma_addr(char *prefix, unsigned long ip)
5666*4882a593Smuzhiyun {
5667*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
5668*4882a593Smuzhiyun 	struct vm_area_struct *vma;
5669*4882a593Smuzhiyun 
5670*4882a593Smuzhiyun 	/*
5671*4882a593Smuzhiyun 	 * we might be running from an atomic context so we cannot sleep
5672*4882a593Smuzhiyun 	 */
5673*4882a593Smuzhiyun 	if (!mmap_read_trylock(mm))
5674*4882a593Smuzhiyun 		return;
5675*4882a593Smuzhiyun 
5676*4882a593Smuzhiyun 	vma = find_vma(mm, ip);
5677*4882a593Smuzhiyun 	if (vma && vma->vm_file) {
5678*4882a593Smuzhiyun 		struct file *f = vma->vm_file;
5679*4882a593Smuzhiyun 		char *buf = (char *)__get_free_page(GFP_NOWAIT);
5680*4882a593Smuzhiyun 		if (buf) {
5681*4882a593Smuzhiyun 			char *p;
5682*4882a593Smuzhiyun 
5683*4882a593Smuzhiyun 			p = file_path(f, buf, PAGE_SIZE);
5684*4882a593Smuzhiyun 			if (IS_ERR(p))
5685*4882a593Smuzhiyun 				p = "?";
5686*4882a593Smuzhiyun 			printk("%s%s[%lx+%lx]", prefix, kbasename(p),
5687*4882a593Smuzhiyun 					vma->vm_start,
5688*4882a593Smuzhiyun 					vma->vm_end - vma->vm_start);
5689*4882a593Smuzhiyun 			free_page((unsigned long)buf);
5690*4882a593Smuzhiyun 		}
5691*4882a593Smuzhiyun 	}
5692*4882a593Smuzhiyun 	mmap_read_unlock(mm);
5693*4882a593Smuzhiyun }
5694*4882a593Smuzhiyun 
5695*4882a593Smuzhiyun #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
__might_fault(const char * file,int line)5696*4882a593Smuzhiyun void __might_fault(const char *file, int line)
5697*4882a593Smuzhiyun {
5698*4882a593Smuzhiyun 	/*
5699*4882a593Smuzhiyun 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
5700*4882a593Smuzhiyun 	 * holding the mmap_lock, this is safe because kernel memory doesn't
5701*4882a593Smuzhiyun 	 * get paged out, therefore we'll never actually fault, and the
5702*4882a593Smuzhiyun 	 * below annotations will generate false positives.
5703*4882a593Smuzhiyun 	 */
5704*4882a593Smuzhiyun 	if (uaccess_kernel())
5705*4882a593Smuzhiyun 		return;
5706*4882a593Smuzhiyun 	if (pagefault_disabled())
5707*4882a593Smuzhiyun 		return;
5708*4882a593Smuzhiyun 	__might_sleep(file, line, 0);
5709*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
5710*4882a593Smuzhiyun 	if (current->mm)
5711*4882a593Smuzhiyun 		might_lock_read(&current->mm->mmap_lock);
5712*4882a593Smuzhiyun #endif
5713*4882a593Smuzhiyun }
5714*4882a593Smuzhiyun EXPORT_SYMBOL(__might_fault);
5715*4882a593Smuzhiyun #endif
5716*4882a593Smuzhiyun 
5717*4882a593Smuzhiyun #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
5718*4882a593Smuzhiyun /*
5719*4882a593Smuzhiyun  * Process all subpages of the specified huge page with the specified
5720*4882a593Smuzhiyun  * operation.  The target subpage will be processed last to keep its
5721*4882a593Smuzhiyun  * cache lines hot.
5722*4882a593Smuzhiyun  */
process_huge_page(unsigned long addr_hint,unsigned int pages_per_huge_page,void (* process_subpage)(unsigned long addr,int idx,void * arg),void * arg)5723*4882a593Smuzhiyun static inline void process_huge_page(
5724*4882a593Smuzhiyun 	unsigned long addr_hint, unsigned int pages_per_huge_page,
5725*4882a593Smuzhiyun 	void (*process_subpage)(unsigned long addr, int idx, void *arg),
5726*4882a593Smuzhiyun 	void *arg)
5727*4882a593Smuzhiyun {
5728*4882a593Smuzhiyun 	int i, n, base, l;
5729*4882a593Smuzhiyun 	unsigned long addr = addr_hint &
5730*4882a593Smuzhiyun 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5731*4882a593Smuzhiyun 
5732*4882a593Smuzhiyun 	/* Process target subpage last to keep its cache lines hot */
5733*4882a593Smuzhiyun 	might_sleep();
5734*4882a593Smuzhiyun 	n = (addr_hint - addr) / PAGE_SIZE;
5735*4882a593Smuzhiyun 	if (2 * n <= pages_per_huge_page) {
5736*4882a593Smuzhiyun 		/* If target subpage in first half of huge page */
5737*4882a593Smuzhiyun 		base = 0;
5738*4882a593Smuzhiyun 		l = n;
5739*4882a593Smuzhiyun 		/* Process subpages at the end of huge page */
5740*4882a593Smuzhiyun 		for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
5741*4882a593Smuzhiyun 			cond_resched();
5742*4882a593Smuzhiyun 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5743*4882a593Smuzhiyun 		}
5744*4882a593Smuzhiyun 	} else {
5745*4882a593Smuzhiyun 		/* If target subpage in second half of huge page */
5746*4882a593Smuzhiyun 		base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
5747*4882a593Smuzhiyun 		l = pages_per_huge_page - n;
5748*4882a593Smuzhiyun 		/* Process subpages at the begin of huge page */
5749*4882a593Smuzhiyun 		for (i = 0; i < base; i++) {
5750*4882a593Smuzhiyun 			cond_resched();
5751*4882a593Smuzhiyun 			process_subpage(addr + i * PAGE_SIZE, i, arg);
5752*4882a593Smuzhiyun 		}
5753*4882a593Smuzhiyun 	}
5754*4882a593Smuzhiyun 	/*
5755*4882a593Smuzhiyun 	 * Process remaining subpages in left-right-left-right pattern
5756*4882a593Smuzhiyun 	 * towards the target subpage
5757*4882a593Smuzhiyun 	 */
5758*4882a593Smuzhiyun 	for (i = 0; i < l; i++) {
5759*4882a593Smuzhiyun 		int left_idx = base + i;
5760*4882a593Smuzhiyun 		int right_idx = base + 2 * l - 1 - i;
5761*4882a593Smuzhiyun 
5762*4882a593Smuzhiyun 		cond_resched();
5763*4882a593Smuzhiyun 		process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg);
5764*4882a593Smuzhiyun 		cond_resched();
5765*4882a593Smuzhiyun 		process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg);
5766*4882a593Smuzhiyun 	}
5767*4882a593Smuzhiyun }
5768*4882a593Smuzhiyun 
clear_gigantic_page(struct page * page,unsigned long addr,unsigned int pages_per_huge_page)5769*4882a593Smuzhiyun static void clear_gigantic_page(struct page *page,
5770*4882a593Smuzhiyun 				unsigned long addr,
5771*4882a593Smuzhiyun 				unsigned int pages_per_huge_page)
5772*4882a593Smuzhiyun {
5773*4882a593Smuzhiyun 	int i;
5774*4882a593Smuzhiyun 	struct page *p = page;
5775*4882a593Smuzhiyun 
5776*4882a593Smuzhiyun 	might_sleep();
5777*4882a593Smuzhiyun 	for (i = 0; i < pages_per_huge_page;
5778*4882a593Smuzhiyun 	     i++, p = mem_map_next(p, page, i)) {
5779*4882a593Smuzhiyun 		cond_resched();
5780*4882a593Smuzhiyun 		clear_user_highpage(p, addr + i * PAGE_SIZE);
5781*4882a593Smuzhiyun 	}
5782*4882a593Smuzhiyun }
5783*4882a593Smuzhiyun 
clear_subpage(unsigned long addr,int idx,void * arg)5784*4882a593Smuzhiyun static void clear_subpage(unsigned long addr, int idx, void *arg)
5785*4882a593Smuzhiyun {
5786*4882a593Smuzhiyun 	struct page *page = arg;
5787*4882a593Smuzhiyun 
5788*4882a593Smuzhiyun 	clear_user_highpage(page + idx, addr);
5789*4882a593Smuzhiyun }
5790*4882a593Smuzhiyun 
clear_huge_page(struct page * page,unsigned long addr_hint,unsigned int pages_per_huge_page)5791*4882a593Smuzhiyun void clear_huge_page(struct page *page,
5792*4882a593Smuzhiyun 		     unsigned long addr_hint, unsigned int pages_per_huge_page)
5793*4882a593Smuzhiyun {
5794*4882a593Smuzhiyun 	unsigned long addr = addr_hint &
5795*4882a593Smuzhiyun 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5796*4882a593Smuzhiyun 
5797*4882a593Smuzhiyun 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5798*4882a593Smuzhiyun 		clear_gigantic_page(page, addr, pages_per_huge_page);
5799*4882a593Smuzhiyun 		return;
5800*4882a593Smuzhiyun 	}
5801*4882a593Smuzhiyun 
5802*4882a593Smuzhiyun 	process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page);
5803*4882a593Smuzhiyun }
5804*4882a593Smuzhiyun 
copy_user_gigantic_page(struct page * dst,struct page * src,unsigned long addr,struct vm_area_struct * vma,unsigned int pages_per_huge_page)5805*4882a593Smuzhiyun static void copy_user_gigantic_page(struct page *dst, struct page *src,
5806*4882a593Smuzhiyun 				    unsigned long addr,
5807*4882a593Smuzhiyun 				    struct vm_area_struct *vma,
5808*4882a593Smuzhiyun 				    unsigned int pages_per_huge_page)
5809*4882a593Smuzhiyun {
5810*4882a593Smuzhiyun 	int i;
5811*4882a593Smuzhiyun 	struct page *dst_base = dst;
5812*4882a593Smuzhiyun 	struct page *src_base = src;
5813*4882a593Smuzhiyun 
5814*4882a593Smuzhiyun 	for (i = 0; i < pages_per_huge_page; ) {
5815*4882a593Smuzhiyun 		cond_resched();
5816*4882a593Smuzhiyun 		copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
5817*4882a593Smuzhiyun 
5818*4882a593Smuzhiyun 		i++;
5819*4882a593Smuzhiyun 		dst = mem_map_next(dst, dst_base, i);
5820*4882a593Smuzhiyun 		src = mem_map_next(src, src_base, i);
5821*4882a593Smuzhiyun 	}
5822*4882a593Smuzhiyun }
5823*4882a593Smuzhiyun 
5824*4882a593Smuzhiyun struct copy_subpage_arg {
5825*4882a593Smuzhiyun 	struct page *dst;
5826*4882a593Smuzhiyun 	struct page *src;
5827*4882a593Smuzhiyun 	struct vm_area_struct *vma;
5828*4882a593Smuzhiyun };
5829*4882a593Smuzhiyun 
copy_subpage(unsigned long addr,int idx,void * arg)5830*4882a593Smuzhiyun static void copy_subpage(unsigned long addr, int idx, void *arg)
5831*4882a593Smuzhiyun {
5832*4882a593Smuzhiyun 	struct copy_subpage_arg *copy_arg = arg;
5833*4882a593Smuzhiyun 
5834*4882a593Smuzhiyun 	copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx,
5835*4882a593Smuzhiyun 			   addr, copy_arg->vma);
5836*4882a593Smuzhiyun }
5837*4882a593Smuzhiyun 
copy_user_huge_page(struct page * dst,struct page * src,unsigned long addr_hint,struct vm_area_struct * vma,unsigned int pages_per_huge_page)5838*4882a593Smuzhiyun void copy_user_huge_page(struct page *dst, struct page *src,
5839*4882a593Smuzhiyun 			 unsigned long addr_hint, struct vm_area_struct *vma,
5840*4882a593Smuzhiyun 			 unsigned int pages_per_huge_page)
5841*4882a593Smuzhiyun {
5842*4882a593Smuzhiyun 	unsigned long addr = addr_hint &
5843*4882a593Smuzhiyun 		~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
5844*4882a593Smuzhiyun 	struct copy_subpage_arg arg = {
5845*4882a593Smuzhiyun 		.dst = dst,
5846*4882a593Smuzhiyun 		.src = src,
5847*4882a593Smuzhiyun 		.vma = vma,
5848*4882a593Smuzhiyun 	};
5849*4882a593Smuzhiyun 
5850*4882a593Smuzhiyun 	if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
5851*4882a593Smuzhiyun 		copy_user_gigantic_page(dst, src, addr, vma,
5852*4882a593Smuzhiyun 					pages_per_huge_page);
5853*4882a593Smuzhiyun 		return;
5854*4882a593Smuzhiyun 	}
5855*4882a593Smuzhiyun 
5856*4882a593Smuzhiyun 	process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg);
5857*4882a593Smuzhiyun }
5858*4882a593Smuzhiyun 
copy_huge_page_from_user(struct page * dst_page,const void __user * usr_src,unsigned int pages_per_huge_page,bool allow_pagefault)5859*4882a593Smuzhiyun long copy_huge_page_from_user(struct page *dst_page,
5860*4882a593Smuzhiyun 				const void __user *usr_src,
5861*4882a593Smuzhiyun 				unsigned int pages_per_huge_page,
5862*4882a593Smuzhiyun 				bool allow_pagefault)
5863*4882a593Smuzhiyun {
5864*4882a593Smuzhiyun 	void *src = (void *)usr_src;
5865*4882a593Smuzhiyun 	void *page_kaddr;
5866*4882a593Smuzhiyun 	unsigned long i, rc = 0;
5867*4882a593Smuzhiyun 	unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
5868*4882a593Smuzhiyun 	struct page *subpage = dst_page;
5869*4882a593Smuzhiyun 
5870*4882a593Smuzhiyun 	for (i = 0; i < pages_per_huge_page;
5871*4882a593Smuzhiyun 	     i++, subpage = mem_map_next(subpage, dst_page, i)) {
5872*4882a593Smuzhiyun 		if (allow_pagefault)
5873*4882a593Smuzhiyun 			page_kaddr = kmap(subpage);
5874*4882a593Smuzhiyun 		else
5875*4882a593Smuzhiyun 			page_kaddr = kmap_atomic(subpage);
5876*4882a593Smuzhiyun 		rc = copy_from_user(page_kaddr,
5877*4882a593Smuzhiyun 				(const void __user *)(src + i * PAGE_SIZE),
5878*4882a593Smuzhiyun 				PAGE_SIZE);
5879*4882a593Smuzhiyun 		if (allow_pagefault)
5880*4882a593Smuzhiyun 			kunmap(subpage);
5881*4882a593Smuzhiyun 		else
5882*4882a593Smuzhiyun 			kunmap_atomic(page_kaddr);
5883*4882a593Smuzhiyun 
5884*4882a593Smuzhiyun 		ret_val -= (PAGE_SIZE - rc);
5885*4882a593Smuzhiyun 		if (rc)
5886*4882a593Smuzhiyun 			break;
5887*4882a593Smuzhiyun 
5888*4882a593Smuzhiyun 		flush_dcache_page(subpage);
5889*4882a593Smuzhiyun 
5890*4882a593Smuzhiyun 		cond_resched();
5891*4882a593Smuzhiyun 	}
5892*4882a593Smuzhiyun 	return ret_val;
5893*4882a593Smuzhiyun }
5894*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
5895*4882a593Smuzhiyun 
5896*4882a593Smuzhiyun #if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
5897*4882a593Smuzhiyun 
5898*4882a593Smuzhiyun static struct kmem_cache *page_ptl_cachep;
5899*4882a593Smuzhiyun 
ptlock_cache_init(void)5900*4882a593Smuzhiyun void __init ptlock_cache_init(void)
5901*4882a593Smuzhiyun {
5902*4882a593Smuzhiyun 	page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
5903*4882a593Smuzhiyun 			SLAB_PANIC, NULL);
5904*4882a593Smuzhiyun }
5905*4882a593Smuzhiyun 
ptlock_alloc(struct page * page)5906*4882a593Smuzhiyun bool ptlock_alloc(struct page *page)
5907*4882a593Smuzhiyun {
5908*4882a593Smuzhiyun 	spinlock_t *ptl;
5909*4882a593Smuzhiyun 
5910*4882a593Smuzhiyun 	ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL);
5911*4882a593Smuzhiyun 	if (!ptl)
5912*4882a593Smuzhiyun 		return false;
5913*4882a593Smuzhiyun 	page->ptl = ptl;
5914*4882a593Smuzhiyun 	return true;
5915*4882a593Smuzhiyun }
5916*4882a593Smuzhiyun 
ptlock_free(struct page * page)5917*4882a593Smuzhiyun void ptlock_free(struct page *page)
5918*4882a593Smuzhiyun {
5919*4882a593Smuzhiyun 	kmem_cache_free(page_ptl_cachep, page->ptl);
5920*4882a593Smuzhiyun }
5921*4882a593Smuzhiyun #endif
5922