xref: /OK3568_Linux_fs/kernel/mm/pagewalk.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/pagewalk.h>
3*4882a593Smuzhiyun #include <linux/highmem.h>
4*4882a593Smuzhiyun #include <linux/sched.h>
5*4882a593Smuzhiyun #include <linux/hugetlb.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun  * We want to know the real level where a entry is located ignoring any
9*4882a593Smuzhiyun  * folding of levels which may be happening. For example if p4d is folded then
10*4882a593Smuzhiyun  * a missing entry found at level 1 (p4d) is actually at level 0 (pgd).
11*4882a593Smuzhiyun  */
real_depth(int depth)12*4882a593Smuzhiyun static int real_depth(int depth)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	if (depth == 3 && PTRS_PER_PMD == 1)
15*4882a593Smuzhiyun 		depth = 2;
16*4882a593Smuzhiyun 	if (depth == 2 && PTRS_PER_PUD == 1)
17*4882a593Smuzhiyun 		depth = 1;
18*4882a593Smuzhiyun 	if (depth == 1 && PTRS_PER_P4D == 1)
19*4882a593Smuzhiyun 		depth = 0;
20*4882a593Smuzhiyun 	return depth;
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
walk_pte_range_inner(pte_t * pte,unsigned long addr,unsigned long end,struct mm_walk * walk)23*4882a593Smuzhiyun static int walk_pte_range_inner(pte_t *pte, unsigned long addr,
24*4882a593Smuzhiyun 				unsigned long end, struct mm_walk *walk)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
27*4882a593Smuzhiyun 	int err = 0;
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	for (;;) {
30*4882a593Smuzhiyun 		err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
31*4882a593Smuzhiyun 		if (err)
32*4882a593Smuzhiyun 		       break;
33*4882a593Smuzhiyun 		if (addr >= end - PAGE_SIZE)
34*4882a593Smuzhiyun 			break;
35*4882a593Smuzhiyun 		addr += PAGE_SIZE;
36*4882a593Smuzhiyun 		pte++;
37*4882a593Smuzhiyun 	}
38*4882a593Smuzhiyun 	return err;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
walk_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)41*4882a593Smuzhiyun static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
42*4882a593Smuzhiyun 			  struct mm_walk *walk)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	pte_t *pte;
45*4882a593Smuzhiyun 	int err = 0;
46*4882a593Smuzhiyun 	spinlock_t *ptl;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	if (walk->no_vma) {
49*4882a593Smuzhiyun 		pte = pte_offset_map(pmd, addr);
50*4882a593Smuzhiyun 		err = walk_pte_range_inner(pte, addr, end, walk);
51*4882a593Smuzhiyun 		pte_unmap(pte);
52*4882a593Smuzhiyun 	} else {
53*4882a593Smuzhiyun 		pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
54*4882a593Smuzhiyun 		err = walk_pte_range_inner(pte, addr, end, walk);
55*4882a593Smuzhiyun 		pte_unmap_unlock(pte, ptl);
56*4882a593Smuzhiyun 	}
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	return err;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
walk_pmd_range(pud_t * pud,unsigned long addr,unsigned long end,struct mm_walk * walk)61*4882a593Smuzhiyun static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
62*4882a593Smuzhiyun 			  struct mm_walk *walk)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	pmd_t *pmd;
65*4882a593Smuzhiyun 	unsigned long next;
66*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
67*4882a593Smuzhiyun 	int err = 0;
68*4882a593Smuzhiyun 	int depth = real_depth(3);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	pmd = pmd_offset(pud, addr);
71*4882a593Smuzhiyun 	do {
72*4882a593Smuzhiyun again:
73*4882a593Smuzhiyun 		next = pmd_addr_end(addr, end);
74*4882a593Smuzhiyun 		if (pmd_none(*pmd)) {
75*4882a593Smuzhiyun 			if (ops->pte_hole)
76*4882a593Smuzhiyun 				err = ops->pte_hole(addr, next, depth, walk);
77*4882a593Smuzhiyun 			if (err)
78*4882a593Smuzhiyun 				break;
79*4882a593Smuzhiyun 			continue;
80*4882a593Smuzhiyun 		}
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 		walk->action = ACTION_SUBTREE;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		/*
85*4882a593Smuzhiyun 		 * This implies that each ->pmd_entry() handler
86*4882a593Smuzhiyun 		 * needs to know about pmd_trans_huge() pmds
87*4882a593Smuzhiyun 		 */
88*4882a593Smuzhiyun 		if (ops->pmd_entry)
89*4882a593Smuzhiyun 			err = ops->pmd_entry(pmd, addr, next, walk);
90*4882a593Smuzhiyun 		if (err)
91*4882a593Smuzhiyun 			break;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		if (walk->action == ACTION_AGAIN)
94*4882a593Smuzhiyun 			goto again;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 		/*
97*4882a593Smuzhiyun 		 * Check this here so we only break down trans_huge
98*4882a593Smuzhiyun 		 * pages when we _need_ to
99*4882a593Smuzhiyun 		 */
100*4882a593Smuzhiyun 		if ((!walk->vma && (pmd_leaf(*pmd) || !pmd_present(*pmd))) ||
101*4882a593Smuzhiyun 		    walk->action == ACTION_CONTINUE ||
102*4882a593Smuzhiyun 		    !(ops->pte_entry))
103*4882a593Smuzhiyun 			continue;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		if (walk->vma) {
106*4882a593Smuzhiyun 			split_huge_pmd(walk->vma, pmd, addr);
107*4882a593Smuzhiyun 			if (pmd_trans_unstable(pmd))
108*4882a593Smuzhiyun 				goto again;
109*4882a593Smuzhiyun 		}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		err = walk_pte_range(pmd, addr, next, walk);
112*4882a593Smuzhiyun 		if (err)
113*4882a593Smuzhiyun 			break;
114*4882a593Smuzhiyun 	} while (pmd++, addr = next, addr != end);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return err;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
walk_pud_range(p4d_t * p4d,unsigned long addr,unsigned long end,struct mm_walk * walk)119*4882a593Smuzhiyun static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
120*4882a593Smuzhiyun 			  struct mm_walk *walk)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	pud_t *pud;
123*4882a593Smuzhiyun 	unsigned long next;
124*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
125*4882a593Smuzhiyun 	int err = 0;
126*4882a593Smuzhiyun 	int depth = real_depth(2);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	pud = pud_offset(p4d, addr);
129*4882a593Smuzhiyun 	do {
130*4882a593Smuzhiyun  again:
131*4882a593Smuzhiyun 		next = pud_addr_end(addr, end);
132*4882a593Smuzhiyun 		if (pud_none(*pud)) {
133*4882a593Smuzhiyun 			if (ops->pte_hole)
134*4882a593Smuzhiyun 				err = ops->pte_hole(addr, next, depth, walk);
135*4882a593Smuzhiyun 			if (err)
136*4882a593Smuzhiyun 				break;
137*4882a593Smuzhiyun 			continue;
138*4882a593Smuzhiyun 		}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		walk->action = ACTION_SUBTREE;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 		if (ops->pud_entry)
143*4882a593Smuzhiyun 			err = ops->pud_entry(pud, addr, next, walk);
144*4882a593Smuzhiyun 		if (err)
145*4882a593Smuzhiyun 			break;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		if (walk->action == ACTION_AGAIN)
148*4882a593Smuzhiyun 			goto again;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		if ((!walk->vma && (pud_leaf(*pud) || !pud_present(*pud))) ||
151*4882a593Smuzhiyun 		    walk->action == ACTION_CONTINUE ||
152*4882a593Smuzhiyun 		    !(ops->pmd_entry || ops->pte_entry))
153*4882a593Smuzhiyun 			continue;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 		if (walk->vma)
156*4882a593Smuzhiyun 			split_huge_pud(walk->vma, pud, addr);
157*4882a593Smuzhiyun 		if (pud_none(*pud))
158*4882a593Smuzhiyun 			goto again;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		err = walk_pmd_range(pud, addr, next, walk);
161*4882a593Smuzhiyun 		if (err)
162*4882a593Smuzhiyun 			break;
163*4882a593Smuzhiyun 	} while (pud++, addr = next, addr != end);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return err;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
walk_p4d_range(pgd_t * pgd,unsigned long addr,unsigned long end,struct mm_walk * walk)168*4882a593Smuzhiyun static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
169*4882a593Smuzhiyun 			  struct mm_walk *walk)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	p4d_t *p4d;
172*4882a593Smuzhiyun 	unsigned long next;
173*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
174*4882a593Smuzhiyun 	int err = 0;
175*4882a593Smuzhiyun 	int depth = real_depth(1);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, addr);
178*4882a593Smuzhiyun 	do {
179*4882a593Smuzhiyun 		next = p4d_addr_end(addr, end);
180*4882a593Smuzhiyun 		if (p4d_none_or_clear_bad(p4d)) {
181*4882a593Smuzhiyun 			if (ops->pte_hole)
182*4882a593Smuzhiyun 				err = ops->pte_hole(addr, next, depth, walk);
183*4882a593Smuzhiyun 			if (err)
184*4882a593Smuzhiyun 				break;
185*4882a593Smuzhiyun 			continue;
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 		if (ops->p4d_entry) {
188*4882a593Smuzhiyun 			err = ops->p4d_entry(p4d, addr, next, walk);
189*4882a593Smuzhiyun 			if (err)
190*4882a593Smuzhiyun 				break;
191*4882a593Smuzhiyun 		}
192*4882a593Smuzhiyun 		if (ops->pud_entry || ops->pmd_entry || ops->pte_entry)
193*4882a593Smuzhiyun 			err = walk_pud_range(p4d, addr, next, walk);
194*4882a593Smuzhiyun 		if (err)
195*4882a593Smuzhiyun 			break;
196*4882a593Smuzhiyun 	} while (p4d++, addr = next, addr != end);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return err;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
walk_pgd_range(unsigned long addr,unsigned long end,struct mm_walk * walk)201*4882a593Smuzhiyun static int walk_pgd_range(unsigned long addr, unsigned long end,
202*4882a593Smuzhiyun 			  struct mm_walk *walk)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	pgd_t *pgd;
205*4882a593Smuzhiyun 	unsigned long next;
206*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
207*4882a593Smuzhiyun 	int err = 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (walk->pgd)
210*4882a593Smuzhiyun 		pgd = walk->pgd + pgd_index(addr);
211*4882a593Smuzhiyun 	else
212*4882a593Smuzhiyun 		pgd = pgd_offset(walk->mm, addr);
213*4882a593Smuzhiyun 	do {
214*4882a593Smuzhiyun 		next = pgd_addr_end(addr, end);
215*4882a593Smuzhiyun 		if (pgd_none_or_clear_bad(pgd)) {
216*4882a593Smuzhiyun 			if (ops->pte_hole)
217*4882a593Smuzhiyun 				err = ops->pte_hole(addr, next, 0, walk);
218*4882a593Smuzhiyun 			if (err)
219*4882a593Smuzhiyun 				break;
220*4882a593Smuzhiyun 			continue;
221*4882a593Smuzhiyun 		}
222*4882a593Smuzhiyun 		if (ops->pgd_entry) {
223*4882a593Smuzhiyun 			err = ops->pgd_entry(pgd, addr, next, walk);
224*4882a593Smuzhiyun 			if (err)
225*4882a593Smuzhiyun 				break;
226*4882a593Smuzhiyun 		}
227*4882a593Smuzhiyun 		if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry ||
228*4882a593Smuzhiyun 		    ops->pte_entry)
229*4882a593Smuzhiyun 			err = walk_p4d_range(pgd, addr, next, walk);
230*4882a593Smuzhiyun 		if (err)
231*4882a593Smuzhiyun 			break;
232*4882a593Smuzhiyun 	} while (pgd++, addr = next, addr != end);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	return err;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
hugetlb_entry_end(struct hstate * h,unsigned long addr,unsigned long end)238*4882a593Smuzhiyun static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
239*4882a593Smuzhiyun 				       unsigned long end)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
242*4882a593Smuzhiyun 	return boundary < end ? boundary : end;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
walk_hugetlb_range(unsigned long addr,unsigned long end,struct mm_walk * walk)245*4882a593Smuzhiyun static int walk_hugetlb_range(unsigned long addr, unsigned long end,
246*4882a593Smuzhiyun 			      struct mm_walk *walk)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
249*4882a593Smuzhiyun 	struct hstate *h = hstate_vma(vma);
250*4882a593Smuzhiyun 	unsigned long next;
251*4882a593Smuzhiyun 	unsigned long hmask = huge_page_mask(h);
252*4882a593Smuzhiyun 	unsigned long sz = huge_page_size(h);
253*4882a593Smuzhiyun 	pte_t *pte;
254*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
255*4882a593Smuzhiyun 	int err = 0;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	do {
258*4882a593Smuzhiyun 		next = hugetlb_entry_end(h, addr, end);
259*4882a593Smuzhiyun 		pte = huge_pte_offset(walk->mm, addr & hmask, sz);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		if (pte)
262*4882a593Smuzhiyun 			err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
263*4882a593Smuzhiyun 		else if (ops->pte_hole)
264*4882a593Smuzhiyun 			err = ops->pte_hole(addr, next, -1, walk);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		if (err)
267*4882a593Smuzhiyun 			break;
268*4882a593Smuzhiyun 	} while (addr = next, addr != end);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	return err;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun #else /* CONFIG_HUGETLB_PAGE */
walk_hugetlb_range(unsigned long addr,unsigned long end,struct mm_walk * walk)274*4882a593Smuzhiyun static int walk_hugetlb_range(unsigned long addr, unsigned long end,
275*4882a593Smuzhiyun 			      struct mm_walk *walk)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	return 0;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #endif /* CONFIG_HUGETLB_PAGE */
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun  * Decide whether we really walk over the current vma on [@start, @end)
284*4882a593Smuzhiyun  * or skip it via the returned value. Return 0 if we do walk over the
285*4882a593Smuzhiyun  * current vma, and return 1 if we skip the vma. Negative values means
286*4882a593Smuzhiyun  * error, where we abort the current walk.
287*4882a593Smuzhiyun  */
walk_page_test(unsigned long start,unsigned long end,struct mm_walk * walk)288*4882a593Smuzhiyun static int walk_page_test(unsigned long start, unsigned long end,
289*4882a593Smuzhiyun 			struct mm_walk *walk)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
292*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	if (ops->test_walk)
295*4882a593Smuzhiyun 		return ops->test_walk(start, end, walk);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/*
298*4882a593Smuzhiyun 	 * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
299*4882a593Smuzhiyun 	 * range, so we don't walk over it as we do for normal vmas. However,
300*4882a593Smuzhiyun 	 * Some callers are interested in handling hole range and they don't
301*4882a593Smuzhiyun 	 * want to just ignore any single address range. Such users certainly
302*4882a593Smuzhiyun 	 * define their ->pte_hole() callbacks, so let's delegate them to handle
303*4882a593Smuzhiyun 	 * vma(VM_PFNMAP).
304*4882a593Smuzhiyun 	 */
305*4882a593Smuzhiyun 	if (vma->vm_flags & VM_PFNMAP) {
306*4882a593Smuzhiyun 		int err = 1;
307*4882a593Smuzhiyun 		if (ops->pte_hole)
308*4882a593Smuzhiyun 			err = ops->pte_hole(start, end, -1, walk);
309*4882a593Smuzhiyun 		return err ? err : 1;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 	return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
__walk_page_range(unsigned long start,unsigned long end,struct mm_walk * walk)314*4882a593Smuzhiyun static int __walk_page_range(unsigned long start, unsigned long end,
315*4882a593Smuzhiyun 			struct mm_walk *walk)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	int err = 0;
318*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
319*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = walk->ops;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	if (ops->pre_vma) {
322*4882a593Smuzhiyun 		err = ops->pre_vma(start, end, walk);
323*4882a593Smuzhiyun 		if (err)
324*4882a593Smuzhiyun 			return err;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (is_vm_hugetlb_page(vma)) {
328*4882a593Smuzhiyun 		if (ops->hugetlb_entry)
329*4882a593Smuzhiyun 			err = walk_hugetlb_range(start, end, walk);
330*4882a593Smuzhiyun 	} else
331*4882a593Smuzhiyun 		err = walk_pgd_range(start, end, walk);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	if (ops->post_vma)
334*4882a593Smuzhiyun 		ops->post_vma(walk);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	return err;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /**
340*4882a593Smuzhiyun  * walk_page_range - walk page table with caller specific callbacks
341*4882a593Smuzhiyun  * @mm:		mm_struct representing the target process of page table walk
342*4882a593Smuzhiyun  * @start:	start address of the virtual address range
343*4882a593Smuzhiyun  * @end:	end address of the virtual address range
344*4882a593Smuzhiyun  * @ops:	operation to call during the walk
345*4882a593Smuzhiyun  * @private:	private data for callbacks' usage
346*4882a593Smuzhiyun  *
347*4882a593Smuzhiyun  * Recursively walk the page table tree of the process represented by @mm
348*4882a593Smuzhiyun  * within the virtual address range [@start, @end). During walking, we can do
349*4882a593Smuzhiyun  * some caller-specific works for each entry, by setting up pmd_entry(),
350*4882a593Smuzhiyun  * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
351*4882a593Smuzhiyun  * callbacks, the associated entries/pages are just ignored.
352*4882a593Smuzhiyun  * The return values of these callbacks are commonly defined like below:
353*4882a593Smuzhiyun  *
354*4882a593Smuzhiyun  *  - 0  : succeeded to handle the current entry, and if you don't reach the
355*4882a593Smuzhiyun  *         end address yet, continue to walk.
356*4882a593Smuzhiyun  *  - >0 : succeeded to handle the current entry, and return to the caller
357*4882a593Smuzhiyun  *         with caller specific value.
358*4882a593Smuzhiyun  *  - <0 : failed to handle the current entry, and return to the caller
359*4882a593Smuzhiyun  *         with error code.
360*4882a593Smuzhiyun  *
361*4882a593Smuzhiyun  * Before starting to walk page table, some callers want to check whether
362*4882a593Smuzhiyun  * they really want to walk over the current vma, typically by checking
363*4882a593Smuzhiyun  * its vm_flags. walk_page_test() and @ops->test_walk() are used for this
364*4882a593Smuzhiyun  * purpose.
365*4882a593Smuzhiyun  *
366*4882a593Smuzhiyun  * If operations need to be staged before and committed after a vma is walked,
367*4882a593Smuzhiyun  * there are two callbacks, pre_vma() and post_vma(). Note that post_vma(),
368*4882a593Smuzhiyun  * since it is intended to handle commit-type operations, can't return any
369*4882a593Smuzhiyun  * errors.
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * struct mm_walk keeps current values of some common data like vma and pmd,
372*4882a593Smuzhiyun  * which are useful for the access from callbacks. If you want to pass some
373*4882a593Smuzhiyun  * caller-specific data to callbacks, @private should be helpful.
374*4882a593Smuzhiyun  *
375*4882a593Smuzhiyun  * Locking:
376*4882a593Smuzhiyun  *   Callers of walk_page_range() and walk_page_vma() should hold @mm->mmap_lock,
377*4882a593Smuzhiyun  *   because these function traverse vma list and/or access to vma's data.
378*4882a593Smuzhiyun  */
walk_page_range(struct mm_struct * mm,unsigned long start,unsigned long end,const struct mm_walk_ops * ops,void * private)379*4882a593Smuzhiyun int walk_page_range(struct mm_struct *mm, unsigned long start,
380*4882a593Smuzhiyun 		unsigned long end, const struct mm_walk_ops *ops,
381*4882a593Smuzhiyun 		void *private)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	int err = 0;
384*4882a593Smuzhiyun 	unsigned long next;
385*4882a593Smuzhiyun 	struct vm_area_struct *vma;
386*4882a593Smuzhiyun 	struct mm_walk walk = {
387*4882a593Smuzhiyun 		.ops		= ops,
388*4882a593Smuzhiyun 		.mm		= mm,
389*4882a593Smuzhiyun 		.private	= private,
390*4882a593Smuzhiyun 	};
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (start >= end)
393*4882a593Smuzhiyun 		return -EINVAL;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (!walk.mm)
396*4882a593Smuzhiyun 		return -EINVAL;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	mmap_assert_locked(walk.mm);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	vma = find_vma(walk.mm, start);
401*4882a593Smuzhiyun 	do {
402*4882a593Smuzhiyun 		if (!vma) { /* after the last vma */
403*4882a593Smuzhiyun 			walk.vma = NULL;
404*4882a593Smuzhiyun 			next = end;
405*4882a593Smuzhiyun 			if (ops->pte_hole)
406*4882a593Smuzhiyun 				err = ops->pte_hole(start, next, -1, &walk);
407*4882a593Smuzhiyun 		} else if (start < vma->vm_start) { /* outside vma */
408*4882a593Smuzhiyun 			walk.vma = NULL;
409*4882a593Smuzhiyun 			next = min(end, vma->vm_start);
410*4882a593Smuzhiyun 			if (ops->pte_hole)
411*4882a593Smuzhiyun 				err = ops->pte_hole(start, next, -1, &walk);
412*4882a593Smuzhiyun 		} else { /* inside vma */
413*4882a593Smuzhiyun 			walk.vma = vma;
414*4882a593Smuzhiyun 			next = min(end, vma->vm_end);
415*4882a593Smuzhiyun 			vma = vma->vm_next;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 			err = walk_page_test(start, next, &walk);
418*4882a593Smuzhiyun 			if (err > 0) {
419*4882a593Smuzhiyun 				/*
420*4882a593Smuzhiyun 				 * positive return values are purely for
421*4882a593Smuzhiyun 				 * controlling the pagewalk, so should never
422*4882a593Smuzhiyun 				 * be passed to the callers.
423*4882a593Smuzhiyun 				 */
424*4882a593Smuzhiyun 				err = 0;
425*4882a593Smuzhiyun 				continue;
426*4882a593Smuzhiyun 			}
427*4882a593Smuzhiyun 			if (err < 0)
428*4882a593Smuzhiyun 				break;
429*4882a593Smuzhiyun 			err = __walk_page_range(start, next, &walk);
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 		if (err)
432*4882a593Smuzhiyun 			break;
433*4882a593Smuzhiyun 	} while (start = next, start < end);
434*4882a593Smuzhiyun 	return err;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(walk_page_range);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /*
439*4882a593Smuzhiyun  * Similar to walk_page_range() but can walk any page tables even if they are
440*4882a593Smuzhiyun  * not backed by VMAs. Because 'unusual' entries may be walked this function
441*4882a593Smuzhiyun  * will also not lock the PTEs for the pte_entry() callback. This is useful for
442*4882a593Smuzhiyun  * walking the kernel pages tables or page tables for firmware.
443*4882a593Smuzhiyun  */
walk_page_range_novma(struct mm_struct * mm,unsigned long start,unsigned long end,const struct mm_walk_ops * ops,pgd_t * pgd,void * private)444*4882a593Smuzhiyun int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
445*4882a593Smuzhiyun 			  unsigned long end, const struct mm_walk_ops *ops,
446*4882a593Smuzhiyun 			  pgd_t *pgd,
447*4882a593Smuzhiyun 			  void *private)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct mm_walk walk = {
450*4882a593Smuzhiyun 		.ops		= ops,
451*4882a593Smuzhiyun 		.mm		= mm,
452*4882a593Smuzhiyun 		.pgd		= pgd,
453*4882a593Smuzhiyun 		.private	= private,
454*4882a593Smuzhiyun 		.no_vma		= true
455*4882a593Smuzhiyun 	};
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (start >= end || !walk.mm)
458*4882a593Smuzhiyun 		return -EINVAL;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	mmap_assert_write_locked(walk.mm);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	return walk_pgd_range(start, end, &walk);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
walk_page_vma(struct vm_area_struct * vma,const struct mm_walk_ops * ops,void * private)465*4882a593Smuzhiyun int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
466*4882a593Smuzhiyun 		void *private)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	struct mm_walk walk = {
469*4882a593Smuzhiyun 		.ops		= ops,
470*4882a593Smuzhiyun 		.mm		= vma->vm_mm,
471*4882a593Smuzhiyun 		.vma		= vma,
472*4882a593Smuzhiyun 		.private	= private,
473*4882a593Smuzhiyun 	};
474*4882a593Smuzhiyun 	int err;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (!walk.mm)
477*4882a593Smuzhiyun 		return -EINVAL;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	mmap_assert_locked(walk.mm);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
482*4882a593Smuzhiyun 	if (err > 0)
483*4882a593Smuzhiyun 		return 0;
484*4882a593Smuzhiyun 	if (err < 0)
485*4882a593Smuzhiyun 		return err;
486*4882a593Smuzhiyun 	return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun /**
490*4882a593Smuzhiyun  * walk_page_mapping - walk all memory areas mapped into a struct address_space.
491*4882a593Smuzhiyun  * @mapping: Pointer to the struct address_space
492*4882a593Smuzhiyun  * @first_index: First page offset in the address_space
493*4882a593Smuzhiyun  * @nr: Number of incremental page offsets to cover
494*4882a593Smuzhiyun  * @ops:	operation to call during the walk
495*4882a593Smuzhiyun  * @private:	private data for callbacks' usage
496*4882a593Smuzhiyun  *
497*4882a593Smuzhiyun  * This function walks all memory areas mapped into a struct address_space.
498*4882a593Smuzhiyun  * The walk is limited to only the given page-size index range, but if
499*4882a593Smuzhiyun  * the index boundaries cross a huge page-table entry, that entry will be
500*4882a593Smuzhiyun  * included.
501*4882a593Smuzhiyun  *
502*4882a593Smuzhiyun  * Also see walk_page_range() for additional information.
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * Locking:
505*4882a593Smuzhiyun  *   This function can't require that the struct mm_struct::mmap_lock is held,
506*4882a593Smuzhiyun  *   since @mapping may be mapped by multiple processes. Instead
507*4882a593Smuzhiyun  *   @mapping->i_mmap_rwsem must be held. This might have implications in the
508*4882a593Smuzhiyun  *   callbacks, and it's up tho the caller to ensure that the
509*4882a593Smuzhiyun  *   struct mm_struct::mmap_lock is not needed.
510*4882a593Smuzhiyun  *
511*4882a593Smuzhiyun  *   Also this means that a caller can't rely on the struct
512*4882a593Smuzhiyun  *   vm_area_struct::vm_flags to be constant across a call,
513*4882a593Smuzhiyun  *   except for immutable flags. Callers requiring this shouldn't use
514*4882a593Smuzhiyun  *   this function.
515*4882a593Smuzhiyun  *
516*4882a593Smuzhiyun  * Return: 0 on success, negative error code on failure, positive number on
517*4882a593Smuzhiyun  * caller defined premature termination.
518*4882a593Smuzhiyun  */
walk_page_mapping(struct address_space * mapping,pgoff_t first_index,pgoff_t nr,const struct mm_walk_ops * ops,void * private)519*4882a593Smuzhiyun int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
520*4882a593Smuzhiyun 		      pgoff_t nr, const struct mm_walk_ops *ops,
521*4882a593Smuzhiyun 		      void *private)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	struct mm_walk walk = {
524*4882a593Smuzhiyun 		.ops		= ops,
525*4882a593Smuzhiyun 		.private	= private,
526*4882a593Smuzhiyun 	};
527*4882a593Smuzhiyun 	struct vm_area_struct *vma;
528*4882a593Smuzhiyun 	pgoff_t vba, vea, cba, cea;
529*4882a593Smuzhiyun 	unsigned long start_addr, end_addr;
530*4882a593Smuzhiyun 	int err = 0;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	lockdep_assert_held(&mapping->i_mmap_rwsem);
533*4882a593Smuzhiyun 	vma_interval_tree_foreach(vma, &mapping->i_mmap, first_index,
534*4882a593Smuzhiyun 				  first_index + nr - 1) {
535*4882a593Smuzhiyun 		/* Clip to the vma */
536*4882a593Smuzhiyun 		vba = vma->vm_pgoff;
537*4882a593Smuzhiyun 		vea = vba + vma_pages(vma);
538*4882a593Smuzhiyun 		cba = first_index;
539*4882a593Smuzhiyun 		cba = max(cba, vba);
540*4882a593Smuzhiyun 		cea = first_index + nr;
541*4882a593Smuzhiyun 		cea = min(cea, vea);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 		start_addr = ((cba - vba) << PAGE_SHIFT) + vma->vm_start;
544*4882a593Smuzhiyun 		end_addr = ((cea - vba) << PAGE_SHIFT) + vma->vm_start;
545*4882a593Smuzhiyun 		if (start_addr >= end_addr)
546*4882a593Smuzhiyun 			continue;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 		walk.vma = vma;
549*4882a593Smuzhiyun 		walk.mm = vma->vm_mm;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
552*4882a593Smuzhiyun 		if (err > 0) {
553*4882a593Smuzhiyun 			err = 0;
554*4882a593Smuzhiyun 			break;
555*4882a593Smuzhiyun 		} else if (err < 0)
556*4882a593Smuzhiyun 			break;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		err = __walk_page_range(start_addr, end_addr, &walk);
559*4882a593Smuzhiyun 		if (err)
560*4882a593Smuzhiyun 			break;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	return err;
564*4882a593Smuzhiyun }
565