1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2013 Red Hat Inc.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Jérôme Glisse <jglisse@redhat.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun * Refer to include/linux/hmm.h for information about heterogeneous memory
9*4882a593Smuzhiyun * management or HMM for short.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/pagewalk.h>
12*4882a593Smuzhiyun #include <linux/hmm.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/rmap.h>
15*4882a593Smuzhiyun #include <linux/swap.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/sched.h>
18*4882a593Smuzhiyun #include <linux/mmzone.h>
19*4882a593Smuzhiyun #include <linux/pagemap.h>
20*4882a593Smuzhiyun #include <linux/swapops.h>
21*4882a593Smuzhiyun #include <linux/hugetlb.h>
22*4882a593Smuzhiyun #include <linux/memremap.h>
23*4882a593Smuzhiyun #include <linux/sched/mm.h>
24*4882a593Smuzhiyun #include <linux/jump_label.h>
25*4882a593Smuzhiyun #include <linux/dma-mapping.h>
26*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
27*4882a593Smuzhiyun #include <linux/memory_hotplug.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct hmm_vma_walk {
30*4882a593Smuzhiyun struct hmm_range *range;
31*4882a593Smuzhiyun unsigned long last;
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun enum {
35*4882a593Smuzhiyun HMM_NEED_FAULT = 1 << 0,
36*4882a593Smuzhiyun HMM_NEED_WRITE_FAULT = 1 << 1,
37*4882a593Smuzhiyun HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
hmm_pfns_fill(unsigned long addr,unsigned long end,struct hmm_range * range,unsigned long cpu_flags)40*4882a593Smuzhiyun static int hmm_pfns_fill(unsigned long addr, unsigned long end,
41*4882a593Smuzhiyun struct hmm_range *range, unsigned long cpu_flags)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun unsigned long i = (addr - range->start) >> PAGE_SHIFT;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun for (; addr < end; addr += PAGE_SIZE, i++)
46*4882a593Smuzhiyun range->hmm_pfns[i] = cpu_flags;
47*4882a593Smuzhiyun return 0;
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52*4882a593Smuzhiyun * @addr: range virtual start address (inclusive)
53*4882a593Smuzhiyun * @end: range virtual end address (exclusive)
54*4882a593Smuzhiyun * @required_fault: HMM_NEED_* flags
55*4882a593Smuzhiyun * @walk: mm_walk structure
56*4882a593Smuzhiyun * Return: -EBUSY after page fault, or page fault error
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun * This function will be called whenever pmd_none() or pte_none() returns true,
59*4882a593Smuzhiyun * or whenever there is no page directory covering the virtual address range.
60*4882a593Smuzhiyun */
hmm_vma_fault(unsigned long addr,unsigned long end,unsigned int required_fault,struct mm_walk * walk)61*4882a593Smuzhiyun static int hmm_vma_fault(unsigned long addr, unsigned long end,
62*4882a593Smuzhiyun unsigned int required_fault, struct mm_walk *walk)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
65*4882a593Smuzhiyun struct vm_area_struct *vma = walk->vma;
66*4882a593Smuzhiyun unsigned int fault_flags = FAULT_FLAG_REMOTE;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun WARN_ON_ONCE(!required_fault);
69*4882a593Smuzhiyun hmm_vma_walk->last = addr;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (required_fault & HMM_NEED_WRITE_FAULT) {
72*4882a593Smuzhiyun if (!(vma->vm_flags & VM_WRITE))
73*4882a593Smuzhiyun return -EPERM;
74*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_WRITE;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun for (; addr < end; addr += PAGE_SIZE)
78*4882a593Smuzhiyun if (handle_mm_fault(vma, addr, fault_flags, NULL) &
79*4882a593Smuzhiyun VM_FAULT_ERROR)
80*4882a593Smuzhiyun return -EFAULT;
81*4882a593Smuzhiyun return -EBUSY;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
hmm_pte_need_fault(const struct hmm_vma_walk * hmm_vma_walk,unsigned long pfn_req_flags,unsigned long cpu_flags)84*4882a593Smuzhiyun static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
85*4882a593Smuzhiyun unsigned long pfn_req_flags,
86*4882a593Smuzhiyun unsigned long cpu_flags)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /*
91*4882a593Smuzhiyun * So we not only consider the individual per page request we also
92*4882a593Smuzhiyun * consider the default flags requested for the range. The API can
93*4882a593Smuzhiyun * be used 2 ways. The first one where the HMM user coalesces
94*4882a593Smuzhiyun * multiple page faults into one request and sets flags per pfn for
95*4882a593Smuzhiyun * those faults. The second one where the HMM user wants to pre-
96*4882a593Smuzhiyun * fault a range with specific flags. For the latter one it is a
97*4882a593Smuzhiyun * waste to have the user pre-fill the pfn arrays with a default
98*4882a593Smuzhiyun * flags value.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun pfn_req_flags &= range->pfn_flags_mask;
101*4882a593Smuzhiyun pfn_req_flags |= range->default_flags;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* We aren't ask to do anything ... */
104*4882a593Smuzhiyun if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Need to write fault ? */
108*4882a593Smuzhiyun if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
109*4882a593Smuzhiyun !(cpu_flags & HMM_PFN_WRITE))
110*4882a593Smuzhiyun return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* If CPU page table is not valid then we need to fault */
113*4882a593Smuzhiyun if (!(cpu_flags & HMM_PFN_VALID))
114*4882a593Smuzhiyun return HMM_NEED_FAULT;
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun static unsigned int
hmm_range_need_fault(const struct hmm_vma_walk * hmm_vma_walk,const unsigned long hmm_pfns[],unsigned long npages,unsigned long cpu_flags)119*4882a593Smuzhiyun hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
120*4882a593Smuzhiyun const unsigned long hmm_pfns[], unsigned long npages,
121*4882a593Smuzhiyun unsigned long cpu_flags)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
124*4882a593Smuzhiyun unsigned int required_fault = 0;
125*4882a593Smuzhiyun unsigned long i;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * If the default flags do not request to fault pages, and the mask does
129*4882a593Smuzhiyun * not allow for individual pages to be faulted, then
130*4882a593Smuzhiyun * hmm_pte_need_fault() will always return 0.
131*4882a593Smuzhiyun */
132*4882a593Smuzhiyun if (!((range->default_flags | range->pfn_flags_mask) &
133*4882a593Smuzhiyun HMM_PFN_REQ_FAULT))
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun for (i = 0; i < npages; ++i) {
137*4882a593Smuzhiyun required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
138*4882a593Smuzhiyun cpu_flags);
139*4882a593Smuzhiyun if (required_fault == HMM_NEED_ALL_BITS)
140*4882a593Smuzhiyun return required_fault;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun return required_fault;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
hmm_vma_walk_hole(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)145*4882a593Smuzhiyun static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
146*4882a593Smuzhiyun __always_unused int depth, struct mm_walk *walk)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
149*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
150*4882a593Smuzhiyun unsigned int required_fault;
151*4882a593Smuzhiyun unsigned long i, npages;
152*4882a593Smuzhiyun unsigned long *hmm_pfns;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun i = (addr - range->start) >> PAGE_SHIFT;
155*4882a593Smuzhiyun npages = (end - addr) >> PAGE_SHIFT;
156*4882a593Smuzhiyun hmm_pfns = &range->hmm_pfns[i];
157*4882a593Smuzhiyun required_fault =
158*4882a593Smuzhiyun hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
159*4882a593Smuzhiyun if (!walk->vma) {
160*4882a593Smuzhiyun if (required_fault)
161*4882a593Smuzhiyun return -EFAULT;
162*4882a593Smuzhiyun return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun if (required_fault)
165*4882a593Smuzhiyun return hmm_vma_fault(addr, end, required_fault, walk);
166*4882a593Smuzhiyun return hmm_pfns_fill(addr, end, range, 0);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
hmm_pfn_flags_order(unsigned long order)169*4882a593Smuzhiyun static inline unsigned long hmm_pfn_flags_order(unsigned long order)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return order << HMM_PFN_ORDER_SHIFT;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
pmd_to_hmm_pfn_flags(struct hmm_range * range,pmd_t pmd)174*4882a593Smuzhiyun static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
175*4882a593Smuzhiyun pmd_t pmd)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun if (pmd_protnone(pmd))
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
180*4882a593Smuzhiyun HMM_PFN_VALID) |
181*4882a593Smuzhiyun hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
hmm_vma_handle_pmd(struct mm_walk * walk,unsigned long addr,unsigned long end,unsigned long hmm_pfns[],pmd_t pmd)185*4882a593Smuzhiyun static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
186*4882a593Smuzhiyun unsigned long end, unsigned long hmm_pfns[],
187*4882a593Smuzhiyun pmd_t pmd)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
190*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
191*4882a593Smuzhiyun unsigned long pfn, npages, i;
192*4882a593Smuzhiyun unsigned int required_fault;
193*4882a593Smuzhiyun unsigned long cpu_flags;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun npages = (end - addr) >> PAGE_SHIFT;
196*4882a593Smuzhiyun cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
197*4882a593Smuzhiyun required_fault =
198*4882a593Smuzhiyun hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
199*4882a593Smuzhiyun if (required_fault)
200*4882a593Smuzhiyun return hmm_vma_fault(addr, end, required_fault, walk);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
203*4882a593Smuzhiyun for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
204*4882a593Smuzhiyun hmm_pfns[i] = pfn | cpu_flags;
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun #else /* CONFIG_TRANSPARENT_HUGEPAGE */
208*4882a593Smuzhiyun /* stub to allow the code below to compile */
209*4882a593Smuzhiyun int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
210*4882a593Smuzhiyun unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
211*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
212*4882a593Smuzhiyun
hmm_is_device_private_entry(struct hmm_range * range,swp_entry_t entry)213*4882a593Smuzhiyun static inline bool hmm_is_device_private_entry(struct hmm_range *range,
214*4882a593Smuzhiyun swp_entry_t entry)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun return is_device_private_entry(entry) &&
217*4882a593Smuzhiyun device_private_entry_to_page(entry)->pgmap->owner ==
218*4882a593Smuzhiyun range->dev_private_owner;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
pte_to_hmm_pfn_flags(struct hmm_range * range,pte_t pte)221*4882a593Smuzhiyun static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
222*4882a593Smuzhiyun pte_t pte)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
225*4882a593Smuzhiyun return 0;
226*4882a593Smuzhiyun return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
hmm_vma_handle_pte(struct mm_walk * walk,unsigned long addr,unsigned long end,pmd_t * pmdp,pte_t * ptep,unsigned long * hmm_pfn)229*4882a593Smuzhiyun static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
230*4882a593Smuzhiyun unsigned long end, pmd_t *pmdp, pte_t *ptep,
231*4882a593Smuzhiyun unsigned long *hmm_pfn)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
234*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
235*4882a593Smuzhiyun unsigned int required_fault;
236*4882a593Smuzhiyun unsigned long cpu_flags;
237*4882a593Smuzhiyun pte_t pte = *ptep;
238*4882a593Smuzhiyun uint64_t pfn_req_flags = *hmm_pfn;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (pte_none(pte)) {
241*4882a593Smuzhiyun required_fault =
242*4882a593Smuzhiyun hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
243*4882a593Smuzhiyun if (required_fault)
244*4882a593Smuzhiyun goto fault;
245*4882a593Smuzhiyun *hmm_pfn = 0;
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (!pte_present(pte)) {
250*4882a593Smuzhiyun swp_entry_t entry = pte_to_swp_entry(pte);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /*
253*4882a593Smuzhiyun * Never fault in device private pages, but just report
254*4882a593Smuzhiyun * the PFN even if not present.
255*4882a593Smuzhiyun */
256*4882a593Smuzhiyun if (hmm_is_device_private_entry(range, entry)) {
257*4882a593Smuzhiyun cpu_flags = HMM_PFN_VALID;
258*4882a593Smuzhiyun if (is_write_device_private_entry(entry))
259*4882a593Smuzhiyun cpu_flags |= HMM_PFN_WRITE;
260*4882a593Smuzhiyun *hmm_pfn = device_private_entry_to_pfn(entry) |
261*4882a593Smuzhiyun cpu_flags;
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun required_fault =
266*4882a593Smuzhiyun hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
267*4882a593Smuzhiyun if (!required_fault) {
268*4882a593Smuzhiyun *hmm_pfn = 0;
269*4882a593Smuzhiyun return 0;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (!non_swap_entry(entry))
273*4882a593Smuzhiyun goto fault;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (is_migration_entry(entry)) {
276*4882a593Smuzhiyun pte_unmap(ptep);
277*4882a593Smuzhiyun hmm_vma_walk->last = addr;
278*4882a593Smuzhiyun migration_entry_wait(walk->mm, pmdp, addr);
279*4882a593Smuzhiyun return -EBUSY;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Report error for everything else */
283*4882a593Smuzhiyun pte_unmap(ptep);
284*4882a593Smuzhiyun return -EFAULT;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun cpu_flags = pte_to_hmm_pfn_flags(range, pte);
288*4882a593Smuzhiyun required_fault =
289*4882a593Smuzhiyun hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
290*4882a593Smuzhiyun if (required_fault)
291*4882a593Smuzhiyun goto fault;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * Bypass devmap pte such as DAX page when all pfn requested
295*4882a593Smuzhiyun * flags(pfn_req_flags) are fulfilled.
296*4882a593Smuzhiyun * Since each architecture defines a struct page for the zero page, just
297*4882a593Smuzhiyun * fall through and treat it like a normal page.
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun if (!vm_normal_page(walk->vma, addr, pte) &&
300*4882a593Smuzhiyun !pte_devmap(pte) &&
301*4882a593Smuzhiyun !is_zero_pfn(pte_pfn(pte))) {
302*4882a593Smuzhiyun if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
303*4882a593Smuzhiyun pte_unmap(ptep);
304*4882a593Smuzhiyun return -EFAULT;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun *hmm_pfn = HMM_PFN_ERROR;
307*4882a593Smuzhiyun return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun *hmm_pfn = pte_pfn(pte) | cpu_flags;
311*4882a593Smuzhiyun return 0;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun fault:
314*4882a593Smuzhiyun pte_unmap(ptep);
315*4882a593Smuzhiyun /* Fault any virtual address we were asked to fault */
316*4882a593Smuzhiyun return hmm_vma_fault(addr, end, required_fault, walk);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
hmm_vma_walk_pmd(pmd_t * pmdp,unsigned long start,unsigned long end,struct mm_walk * walk)319*4882a593Smuzhiyun static int hmm_vma_walk_pmd(pmd_t *pmdp,
320*4882a593Smuzhiyun unsigned long start,
321*4882a593Smuzhiyun unsigned long end,
322*4882a593Smuzhiyun struct mm_walk *walk)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
325*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
326*4882a593Smuzhiyun unsigned long *hmm_pfns =
327*4882a593Smuzhiyun &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
328*4882a593Smuzhiyun unsigned long npages = (end - start) >> PAGE_SHIFT;
329*4882a593Smuzhiyun unsigned long addr = start;
330*4882a593Smuzhiyun pte_t *ptep;
331*4882a593Smuzhiyun pmd_t pmd;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun again:
334*4882a593Smuzhiyun pmd = READ_ONCE(*pmdp);
335*4882a593Smuzhiyun if (pmd_none(pmd))
336*4882a593Smuzhiyun return hmm_vma_walk_hole(start, end, -1, walk);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
339*4882a593Smuzhiyun if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
340*4882a593Smuzhiyun hmm_vma_walk->last = addr;
341*4882a593Smuzhiyun pmd_migration_entry_wait(walk->mm, pmdp);
342*4882a593Smuzhiyun return -EBUSY;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun return hmm_pfns_fill(start, end, range, 0);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun if (!pmd_present(pmd)) {
348*4882a593Smuzhiyun if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
349*4882a593Smuzhiyun return -EFAULT;
350*4882a593Smuzhiyun return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
354*4882a593Smuzhiyun /*
355*4882a593Smuzhiyun * No need to take pmd_lock here, even if some other thread
356*4882a593Smuzhiyun * is splitting the huge pmd we will get that event through
357*4882a593Smuzhiyun * mmu_notifier callback.
358*4882a593Smuzhiyun *
359*4882a593Smuzhiyun * So just read pmd value and check again it's a transparent
360*4882a593Smuzhiyun * huge or device mapping one and compute corresponding pfn
361*4882a593Smuzhiyun * values.
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun pmd = pmd_read_atomic(pmdp);
364*4882a593Smuzhiyun barrier();
365*4882a593Smuzhiyun if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
366*4882a593Smuzhiyun goto again;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /*
372*4882a593Smuzhiyun * We have handled all the valid cases above ie either none, migration,
373*4882a593Smuzhiyun * huge or transparent huge. At this point either it is a valid pmd
374*4882a593Smuzhiyun * entry pointing to pte directory or it is a bad pmd that will not
375*4882a593Smuzhiyun * recover.
376*4882a593Smuzhiyun */
377*4882a593Smuzhiyun if (pmd_bad(pmd)) {
378*4882a593Smuzhiyun if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
379*4882a593Smuzhiyun return -EFAULT;
380*4882a593Smuzhiyun return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun ptep = pte_offset_map(pmdp, addr);
384*4882a593Smuzhiyun for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
385*4882a593Smuzhiyun int r;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
388*4882a593Smuzhiyun if (r) {
389*4882a593Smuzhiyun /* hmm_vma_handle_pte() did pte_unmap() */
390*4882a593Smuzhiyun return r;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun pte_unmap(ptep - 1);
394*4882a593Smuzhiyun return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
398*4882a593Smuzhiyun defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_to_hmm_pfn_flags(struct hmm_range * range,pud_t pud)399*4882a593Smuzhiyun static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
400*4882a593Smuzhiyun pud_t pud)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun if (!pud_present(pud))
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
405*4882a593Smuzhiyun HMM_PFN_VALID) |
406*4882a593Smuzhiyun hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
hmm_vma_walk_pud(pud_t * pudp,unsigned long start,unsigned long end,struct mm_walk * walk)409*4882a593Smuzhiyun static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
410*4882a593Smuzhiyun struct mm_walk *walk)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
413*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
414*4882a593Smuzhiyun unsigned long addr = start;
415*4882a593Smuzhiyun pud_t pud;
416*4882a593Smuzhiyun int ret = 0;
417*4882a593Smuzhiyun spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (!ptl)
420*4882a593Smuzhiyun return 0;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* Normally we don't want to split the huge page */
423*4882a593Smuzhiyun walk->action = ACTION_CONTINUE;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun pud = READ_ONCE(*pudp);
426*4882a593Smuzhiyun if (pud_none(pud)) {
427*4882a593Smuzhiyun spin_unlock(ptl);
428*4882a593Smuzhiyun return hmm_vma_walk_hole(start, end, -1, walk);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (pud_huge(pud) && pud_devmap(pud)) {
432*4882a593Smuzhiyun unsigned long i, npages, pfn;
433*4882a593Smuzhiyun unsigned int required_fault;
434*4882a593Smuzhiyun unsigned long *hmm_pfns;
435*4882a593Smuzhiyun unsigned long cpu_flags;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun if (!pud_present(pud)) {
438*4882a593Smuzhiyun spin_unlock(ptl);
439*4882a593Smuzhiyun return hmm_vma_walk_hole(start, end, -1, walk);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun i = (addr - range->start) >> PAGE_SHIFT;
443*4882a593Smuzhiyun npages = (end - addr) >> PAGE_SHIFT;
444*4882a593Smuzhiyun hmm_pfns = &range->hmm_pfns[i];
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun cpu_flags = pud_to_hmm_pfn_flags(range, pud);
447*4882a593Smuzhiyun required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
448*4882a593Smuzhiyun npages, cpu_flags);
449*4882a593Smuzhiyun if (required_fault) {
450*4882a593Smuzhiyun spin_unlock(ptl);
451*4882a593Smuzhiyun return hmm_vma_fault(addr, end, required_fault, walk);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
455*4882a593Smuzhiyun for (i = 0; i < npages; ++i, ++pfn)
456*4882a593Smuzhiyun hmm_pfns[i] = pfn | cpu_flags;
457*4882a593Smuzhiyun goto out_unlock;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* Ask for the PUD to be split */
461*4882a593Smuzhiyun walk->action = ACTION_SUBTREE;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun out_unlock:
464*4882a593Smuzhiyun spin_unlock(ptl);
465*4882a593Smuzhiyun return ret;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun #else
468*4882a593Smuzhiyun #define hmm_vma_walk_pud NULL
469*4882a593Smuzhiyun #endif
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
hmm_vma_walk_hugetlb_entry(pte_t * pte,unsigned long hmask,unsigned long start,unsigned long end,struct mm_walk * walk)472*4882a593Smuzhiyun static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
473*4882a593Smuzhiyun unsigned long start, unsigned long end,
474*4882a593Smuzhiyun struct mm_walk *walk)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun unsigned long addr = start, i, pfn;
477*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
478*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
479*4882a593Smuzhiyun struct vm_area_struct *vma = walk->vma;
480*4882a593Smuzhiyun unsigned int required_fault;
481*4882a593Smuzhiyun unsigned long pfn_req_flags;
482*4882a593Smuzhiyun unsigned long cpu_flags;
483*4882a593Smuzhiyun spinlock_t *ptl;
484*4882a593Smuzhiyun pte_t entry;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
487*4882a593Smuzhiyun entry = huge_ptep_get(pte);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun i = (start - range->start) >> PAGE_SHIFT;
490*4882a593Smuzhiyun pfn_req_flags = range->hmm_pfns[i];
491*4882a593Smuzhiyun cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
492*4882a593Smuzhiyun hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
493*4882a593Smuzhiyun required_fault =
494*4882a593Smuzhiyun hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
495*4882a593Smuzhiyun if (required_fault) {
496*4882a593Smuzhiyun spin_unlock(ptl);
497*4882a593Smuzhiyun return hmm_vma_fault(addr, end, required_fault, walk);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
501*4882a593Smuzhiyun for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
502*4882a593Smuzhiyun range->hmm_pfns[i] = pfn | cpu_flags;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun spin_unlock(ptl);
505*4882a593Smuzhiyun return 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun #else
508*4882a593Smuzhiyun #define hmm_vma_walk_hugetlb_entry NULL
509*4882a593Smuzhiyun #endif /* CONFIG_HUGETLB_PAGE */
510*4882a593Smuzhiyun
hmm_vma_walk_test(unsigned long start,unsigned long end,struct mm_walk * walk)511*4882a593Smuzhiyun static int hmm_vma_walk_test(unsigned long start, unsigned long end,
512*4882a593Smuzhiyun struct mm_walk *walk)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct hmm_vma_walk *hmm_vma_walk = walk->private;
515*4882a593Smuzhiyun struct hmm_range *range = hmm_vma_walk->range;
516*4882a593Smuzhiyun struct vm_area_struct *vma = walk->vma;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)) &&
519*4882a593Smuzhiyun vma->vm_flags & VM_READ)
520*4882a593Smuzhiyun return 0;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /*
523*4882a593Smuzhiyun * vma ranges that don't have struct page backing them or map I/O
524*4882a593Smuzhiyun * devices directly cannot be handled by hmm_range_fault().
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * If the vma does not allow read access, then assume that it does not
527*4882a593Smuzhiyun * allow write access either. HMM does not support architectures that
528*4882a593Smuzhiyun * allow write without read.
529*4882a593Smuzhiyun *
530*4882a593Smuzhiyun * If a fault is requested for an unsupported range then it is a hard
531*4882a593Smuzhiyun * failure.
532*4882a593Smuzhiyun */
533*4882a593Smuzhiyun if (hmm_range_need_fault(hmm_vma_walk,
534*4882a593Smuzhiyun range->hmm_pfns +
535*4882a593Smuzhiyun ((start - range->start) >> PAGE_SHIFT),
536*4882a593Smuzhiyun (end - start) >> PAGE_SHIFT, 0))
537*4882a593Smuzhiyun return -EFAULT;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* Skip this vma and continue processing the next vma. */
542*4882a593Smuzhiyun return 1;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun static const struct mm_walk_ops hmm_walk_ops = {
546*4882a593Smuzhiyun .pud_entry = hmm_vma_walk_pud,
547*4882a593Smuzhiyun .pmd_entry = hmm_vma_walk_pmd,
548*4882a593Smuzhiyun .pte_hole = hmm_vma_walk_hole,
549*4882a593Smuzhiyun .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
550*4882a593Smuzhiyun .test_walk = hmm_vma_walk_test,
551*4882a593Smuzhiyun };
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun /**
554*4882a593Smuzhiyun * hmm_range_fault - try to fault some address in a virtual address range
555*4882a593Smuzhiyun * @range: argument structure
556*4882a593Smuzhiyun *
557*4882a593Smuzhiyun * Returns 0 on success or one of the following error codes:
558*4882a593Smuzhiyun *
559*4882a593Smuzhiyun * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
560*4882a593Smuzhiyun * (e.g., device file vma).
561*4882a593Smuzhiyun * -ENOMEM: Out of memory.
562*4882a593Smuzhiyun * -EPERM: Invalid permission (e.g., asking for write and range is read
563*4882a593Smuzhiyun * only).
564*4882a593Smuzhiyun * -EBUSY: The range has been invalidated and the caller needs to wait for
565*4882a593Smuzhiyun * the invalidation to finish.
566*4882a593Smuzhiyun * -EFAULT: A page was requested to be valid and could not be made valid
567*4882a593Smuzhiyun * ie it has no backing VMA or it is illegal to access
568*4882a593Smuzhiyun *
569*4882a593Smuzhiyun * This is similar to get_user_pages(), except that it can read the page tables
570*4882a593Smuzhiyun * without mutating them (ie causing faults).
571*4882a593Smuzhiyun */
hmm_range_fault(struct hmm_range * range)572*4882a593Smuzhiyun int hmm_range_fault(struct hmm_range *range)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct hmm_vma_walk hmm_vma_walk = {
575*4882a593Smuzhiyun .range = range,
576*4882a593Smuzhiyun .last = range->start,
577*4882a593Smuzhiyun };
578*4882a593Smuzhiyun struct mm_struct *mm = range->notifier->mm;
579*4882a593Smuzhiyun int ret;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun mmap_assert_locked(mm);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun do {
584*4882a593Smuzhiyun /* If range is no longer valid force retry. */
585*4882a593Smuzhiyun if (mmu_interval_check_retry(range->notifier,
586*4882a593Smuzhiyun range->notifier_seq))
587*4882a593Smuzhiyun return -EBUSY;
588*4882a593Smuzhiyun ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
589*4882a593Smuzhiyun &hmm_walk_ops, &hmm_vma_walk);
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun * When -EBUSY is returned the loop restarts with
592*4882a593Smuzhiyun * hmm_vma_walk.last set to an address that has not been stored
593*4882a593Smuzhiyun * in pfns. All entries < last in the pfn array are set to their
594*4882a593Smuzhiyun * output, and all >= are still at their input values.
595*4882a593Smuzhiyun */
596*4882a593Smuzhiyun } while (ret == -EBUSY);
597*4882a593Smuzhiyun return ret;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun EXPORT_SYMBOL(hmm_range_fault);
600