1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #include <linux/kernel.h>
3*4882a593Smuzhiyun #include <linux/errno.h>
4*4882a593Smuzhiyun #include <linux/err.h>
5*4882a593Smuzhiyun #include <linux/spinlock.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <linux/memremap.h>
9*4882a593Smuzhiyun #include <linux/pagemap.h>
10*4882a593Smuzhiyun #include <linux/rmap.h>
11*4882a593Smuzhiyun #include <linux/swap.h>
12*4882a593Smuzhiyun #include <linux/swapops.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/sched/signal.h>
15*4882a593Smuzhiyun #include <linux/rwsem.h>
16*4882a593Smuzhiyun #include <linux/hugetlb.h>
17*4882a593Smuzhiyun #include <linux/migrate.h>
18*4882a593Smuzhiyun #include <linux/mm_inline.h>
19*4882a593Smuzhiyun #include <linux/sched/mm.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/page_pinner.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <asm/mmu_context.h>
24*4882a593Smuzhiyun #include <asm/tlbflush.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "internal.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct follow_page_context {
29*4882a593Smuzhiyun struct dev_pagemap *pgmap;
30*4882a593Smuzhiyun unsigned int page_mask;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun
hpage_pincount_add(struct page * page,int refs)33*4882a593Smuzhiyun static void hpage_pincount_add(struct page *page, int refs)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
36*4882a593Smuzhiyun VM_BUG_ON_PAGE(page != compound_head(page), page);
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun atomic_add(refs, compound_pincount_ptr(page));
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
hpage_pincount_sub(struct page * page,int refs)41*4882a593Smuzhiyun static void hpage_pincount_sub(struct page *page, int refs)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
44*4882a593Smuzhiyun VM_BUG_ON_PAGE(page != compound_head(page), page);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun atomic_sub(refs, compound_pincount_ptr(page));
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* Equivalent to calling put_page() @refs times. */
put_page_refs(struct page * page,int refs)50*4882a593Smuzhiyun static void put_page_refs(struct page *page, int refs)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
53*4882a593Smuzhiyun if (VM_WARN_ON_ONCE_PAGE(page_ref_count(page) < refs, page))
54*4882a593Smuzhiyun return;
55*4882a593Smuzhiyun #endif
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * Calling put_page() for each ref is unnecessarily slow. Only the last
59*4882a593Smuzhiyun * ref needs a put_page().
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun if (refs > 1)
62*4882a593Smuzhiyun page_ref_sub(page, refs - 1);
63*4882a593Smuzhiyun put_page(page);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun * Return the compound head page with ref appropriately incremented,
68*4882a593Smuzhiyun * or NULL if that failed.
69*4882a593Smuzhiyun */
try_get_compound_head(struct page * page,int refs)70*4882a593Smuzhiyun static inline struct page *try_get_compound_head(struct page *page, int refs)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct page *head = compound_head(page);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if (WARN_ON_ONCE(page_ref_count(head) < 0))
75*4882a593Smuzhiyun return NULL;
76*4882a593Smuzhiyun if (unlikely(!page_cache_add_speculative(head, refs)))
77*4882a593Smuzhiyun return NULL;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * At this point we have a stable reference to the head page; but it
81*4882a593Smuzhiyun * could be that between the compound_head() lookup and the refcount
82*4882a593Smuzhiyun * increment, the compound page was split, in which case we'd end up
83*4882a593Smuzhiyun * holding a reference on a page that has nothing to do with the page
84*4882a593Smuzhiyun * we were given anymore.
85*4882a593Smuzhiyun * So now that the head page is stable, recheck that the pages still
86*4882a593Smuzhiyun * belong together.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun if (unlikely(compound_head(page) != head)) {
89*4882a593Smuzhiyun put_page_refs(head, refs);
90*4882a593Smuzhiyun return NULL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return head;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * try_grab_compound_head() - attempt to elevate a page's refcount, by a
98*4882a593Smuzhiyun * flags-dependent amount.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * "grab" names in this file mean, "look at flags to decide whether to use
101*4882a593Smuzhiyun * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Either FOLL_PIN or FOLL_GET (or neither) must be set, but not both at the
104*4882a593Smuzhiyun * same time. (That's true throughout the get_user_pages*() and
105*4882a593Smuzhiyun * pin_user_pages*() APIs.) Cases:
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * FOLL_GET: page's refcount will be incremented by 1.
108*4882a593Smuzhiyun * FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * Return: head page (with refcount appropriately incremented) for success, or
111*4882a593Smuzhiyun * NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
112*4882a593Smuzhiyun * considered failure, and furthermore, a likely bug in the caller, so a warning
113*4882a593Smuzhiyun * is also emitted.
114*4882a593Smuzhiyun */
try_grab_compound_head(struct page * page,int refs,unsigned int flags)115*4882a593Smuzhiyun static __maybe_unused struct page *try_grab_compound_head(struct page *page,
116*4882a593Smuzhiyun int refs,
117*4882a593Smuzhiyun unsigned int flags)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun if (flags & FOLL_GET) {
120*4882a593Smuzhiyun struct page *head = try_get_compound_head(page, refs);
121*4882a593Smuzhiyun if (head)
122*4882a593Smuzhiyun set_page_pinner(head, compound_order(head));
123*4882a593Smuzhiyun return head;
124*4882a593Smuzhiyun } else if (flags & FOLL_PIN) {
125*4882a593Smuzhiyun int orig_refs = refs;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
129*4882a593Smuzhiyun * path, so fail and let the caller fall back to the slow path.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun if (unlikely(flags & FOLL_LONGTERM) &&
132*4882a593Smuzhiyun is_migrate_cma_page(page))
133*4882a593Smuzhiyun return NULL;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * CAUTION: Don't use compound_head() on the page before this
137*4882a593Smuzhiyun * point, the result won't be stable.
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun page = try_get_compound_head(page, refs);
140*4882a593Smuzhiyun if (!page)
141*4882a593Smuzhiyun return NULL;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun * When pinning a compound page of order > 1 (which is what
145*4882a593Smuzhiyun * hpage_pincount_available() checks for), use an exact count to
146*4882a593Smuzhiyun * track it, via hpage_pincount_add/_sub().
147*4882a593Smuzhiyun *
148*4882a593Smuzhiyun * However, be sure to *also* increment the normal page refcount
149*4882a593Smuzhiyun * field at least once, so that the page really is pinned.
150*4882a593Smuzhiyun */
151*4882a593Smuzhiyun if (hpage_pincount_available(page))
152*4882a593Smuzhiyun hpage_pincount_add(page, refs);
153*4882a593Smuzhiyun else
154*4882a593Smuzhiyun page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED,
157*4882a593Smuzhiyun orig_refs);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return page;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun WARN_ON_ONCE(1);
163*4882a593Smuzhiyun return NULL;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
put_compound_head(struct page * page,int refs,unsigned int flags)166*4882a593Smuzhiyun static void put_compound_head(struct page *page, int refs, unsigned int flags)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun if (flags & FOLL_PIN) {
169*4882a593Smuzhiyun mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
170*4882a593Smuzhiyun refs);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (hpage_pincount_available(page))
173*4882a593Smuzhiyun hpage_pincount_sub(page, refs);
174*4882a593Smuzhiyun else
175*4882a593Smuzhiyun refs *= GUP_PIN_COUNTING_BIAS;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (flags & FOLL_GET)
179*4882a593Smuzhiyun reset_page_pinner(page, compound_order(page));
180*4882a593Smuzhiyun put_page_refs(page, refs);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /**
184*4882a593Smuzhiyun * try_grab_page() - elevate a page's refcount by a flag-dependent amount
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * This might not do anything at all, depending on the flags argument.
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * "grab" names in this file mean, "look at flags to decide whether to use
189*4882a593Smuzhiyun * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * @page: pointer to page to be grabbed
192*4882a593Smuzhiyun * @flags: gup flags: these are the FOLL_* flag values.
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * Either FOLL_PIN or FOLL_GET (or neither) may be set, but not both at the same
195*4882a593Smuzhiyun * time. Cases:
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * FOLL_GET: page's refcount will be incremented by 1.
198*4882a593Smuzhiyun * FOLL_PIN: page's refcount will be incremented by GUP_PIN_COUNTING_BIAS.
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * Return: true for success, or if no action was required (if neither FOLL_PIN
201*4882a593Smuzhiyun * nor FOLL_GET was set, nothing is done). False for failure: FOLL_GET or
202*4882a593Smuzhiyun * FOLL_PIN was set, but the page could not be grabbed.
203*4882a593Smuzhiyun */
try_grab_page(struct page * page,unsigned int flags)204*4882a593Smuzhiyun bool __must_check try_grab_page(struct page *page, unsigned int flags)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == (FOLL_GET | FOLL_PIN));
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (flags & FOLL_GET) {
209*4882a593Smuzhiyun bool ret = try_get_page(page);
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (ret) {
212*4882a593Smuzhiyun page = compound_head(page);
213*4882a593Smuzhiyun set_page_pinner(page, compound_order(page));
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun return ret;
216*4882a593Smuzhiyun } else if (flags & FOLL_PIN) {
217*4882a593Smuzhiyun int refs = 1;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun page = compound_head(page);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (WARN_ON_ONCE(page_ref_count(page) <= 0))
222*4882a593Smuzhiyun return false;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (hpage_pincount_available(page))
225*4882a593Smuzhiyun hpage_pincount_add(page, 1);
226*4882a593Smuzhiyun else
227*4882a593Smuzhiyun refs = GUP_PIN_COUNTING_BIAS;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * Similar to try_grab_compound_head(): even if using the
231*4882a593Smuzhiyun * hpage_pincount_add/_sub() routines, be sure to
232*4882a593Smuzhiyun * *also* increment the normal page refcount field at least
233*4882a593Smuzhiyun * once, so that the page really is pinned.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun page_ref_add(page, refs);
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_ACQUIRED, 1);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun return true;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * unpin_user_page() - release a dma-pinned page
245*4882a593Smuzhiyun * @page: pointer to page to be released
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * Pages that were pinned via pin_user_pages*() must be released via either
248*4882a593Smuzhiyun * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
249*4882a593Smuzhiyun * that such pages can be separately tracked and uniquely handled. In
250*4882a593Smuzhiyun * particular, interactions with RDMA and filesystems need special handling.
251*4882a593Smuzhiyun */
unpin_user_page(struct page * page)252*4882a593Smuzhiyun void unpin_user_page(struct page *page)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun put_compound_head(compound_head(page), 1, FOLL_PIN);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun EXPORT_SYMBOL(unpin_user_page);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /*
259*4882a593Smuzhiyun * put_user_page() - release a page obtained using get_user_pages() or
260*4882a593Smuzhiyun * follow_page(FOLL_GET)
261*4882a593Smuzhiyun * @page: pointer to page to be released
262*4882a593Smuzhiyun *
263*4882a593Smuzhiyun * Pages that were obtained via get_user_pages()/follow_page(FOLL_GET) must be
264*4882a593Smuzhiyun * released via put_user_page.
265*4882a593Smuzhiyun * note: If it's not a page from GUP or follow_page(FOLL_GET), it's harmless.
266*4882a593Smuzhiyun */
put_user_page(struct page * page)267*4882a593Smuzhiyun void put_user_page(struct page *page)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct page *head = compound_head(page);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun reset_page_pinner(head, compound_order(head));
272*4882a593Smuzhiyun put_page(page);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun EXPORT_SYMBOL(put_user_page);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
278*4882a593Smuzhiyun * @pages: array of pages to be maybe marked dirty, and definitely released.
279*4882a593Smuzhiyun * @npages: number of pages in the @pages array.
280*4882a593Smuzhiyun * @make_dirty: whether to mark the pages dirty
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun * "gup-pinned page" refers to a page that has had one of the get_user_pages()
283*4882a593Smuzhiyun * variants called on that page.
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * For each page in the @pages array, make that page (or its head page, if a
286*4882a593Smuzhiyun * compound page) dirty, if @make_dirty is true, and if the page was previously
287*4882a593Smuzhiyun * listed as clean. In any case, releases all pages using unpin_user_page(),
288*4882a593Smuzhiyun * possibly via unpin_user_pages(), for the non-dirty case.
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Please see the unpin_user_page() documentation for details.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
293*4882a593Smuzhiyun * required, then the caller should a) verify that this is really correct,
294*4882a593Smuzhiyun * because _lock() is usually required, and b) hand code it:
295*4882a593Smuzhiyun * set_page_dirty_lock(), unpin_user_page().
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun */
unpin_user_pages_dirty_lock(struct page ** pages,unsigned long npages,bool make_dirty)298*4882a593Smuzhiyun void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
299*4882a593Smuzhiyun bool make_dirty)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun unsigned long index;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * TODO: this can be optimized for huge pages: if a series of pages is
305*4882a593Smuzhiyun * physically contiguous and part of the same compound page, then a
306*4882a593Smuzhiyun * single operation to the head page should suffice.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (!make_dirty) {
310*4882a593Smuzhiyun unpin_user_pages(pages, npages);
311*4882a593Smuzhiyun return;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (index = 0; index < npages; index++) {
315*4882a593Smuzhiyun struct page *page = compound_head(pages[index]);
316*4882a593Smuzhiyun /*
317*4882a593Smuzhiyun * Checking PageDirty at this point may race with
318*4882a593Smuzhiyun * clear_page_dirty_for_io(), but that's OK. Two key
319*4882a593Smuzhiyun * cases:
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * 1) This code sees the page as already dirty, so it
322*4882a593Smuzhiyun * skips the call to set_page_dirty(). That could happen
323*4882a593Smuzhiyun * because clear_page_dirty_for_io() called
324*4882a593Smuzhiyun * page_mkclean(), followed by set_page_dirty().
325*4882a593Smuzhiyun * However, now the page is going to get written back,
326*4882a593Smuzhiyun * which meets the original intention of setting it
327*4882a593Smuzhiyun * dirty, so all is well: clear_page_dirty_for_io() goes
328*4882a593Smuzhiyun * on to call TestClearPageDirty(), and write the page
329*4882a593Smuzhiyun * back.
330*4882a593Smuzhiyun *
331*4882a593Smuzhiyun * 2) This code sees the page as clean, so it calls
332*4882a593Smuzhiyun * set_page_dirty(). The page stays dirty, despite being
333*4882a593Smuzhiyun * written back, so it gets written back again in the
334*4882a593Smuzhiyun * next writeback cycle. This is harmless.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun if (!PageDirty(page))
337*4882a593Smuzhiyun set_page_dirty_lock(page);
338*4882a593Smuzhiyun unpin_user_page(page);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun EXPORT_SYMBOL(unpin_user_pages_dirty_lock);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /**
344*4882a593Smuzhiyun * unpin_user_pages() - release an array of gup-pinned pages.
345*4882a593Smuzhiyun * @pages: array of pages to be marked dirty and released.
346*4882a593Smuzhiyun * @npages: number of pages in the @pages array.
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * For each page in the @pages array, release the page using unpin_user_page().
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * Please see the unpin_user_page() documentation for details.
351*4882a593Smuzhiyun */
unpin_user_pages(struct page ** pages,unsigned long npages)352*4882a593Smuzhiyun void unpin_user_pages(struct page **pages, unsigned long npages)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun unsigned long index;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * If this WARN_ON() fires, then the system *might* be leaking pages (by
358*4882a593Smuzhiyun * leaving them pinned), but probably not. More likely, gup/pup returned
359*4882a593Smuzhiyun * a hard -ERRNO error to the caller, who erroneously passed it here.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun if (WARN_ON(IS_ERR_VALUE(npages)))
362*4882a593Smuzhiyun return;
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * TODO: this can be optimized for huge pages: if a series of pages is
365*4882a593Smuzhiyun * physically contiguous and part of the same compound page, then a
366*4882a593Smuzhiyun * single operation to the head page should suffice.
367*4882a593Smuzhiyun */
368*4882a593Smuzhiyun for (index = 0; index < npages; index++)
369*4882a593Smuzhiyun unpin_user_page(pages[index]);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun EXPORT_SYMBOL(unpin_user_pages);
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun #ifdef CONFIG_MMU
no_page_table(struct vm_area_struct * vma,unsigned int flags)374*4882a593Smuzhiyun static struct page *no_page_table(struct vm_area_struct *vma,
375*4882a593Smuzhiyun unsigned int flags)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun /*
378*4882a593Smuzhiyun * When core dumping an enormous anonymous area that nobody
379*4882a593Smuzhiyun * has touched so far, we don't want to allocate unnecessary pages or
380*4882a593Smuzhiyun * page tables. Return error instead of NULL to skip handle_mm_fault,
381*4882a593Smuzhiyun * then get_dump_page() will return NULL to leave a hole in the dump.
382*4882a593Smuzhiyun * But we can only make this optimization where a hole would surely
383*4882a593Smuzhiyun * be zero-filled if handle_mm_fault() actually did handle it.
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun if ((flags & FOLL_DUMP) &&
386*4882a593Smuzhiyun (vma_is_anonymous(vma) || !vma->vm_ops->fault))
387*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
388*4882a593Smuzhiyun return NULL;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
follow_pfn_pte(struct vm_area_struct * vma,unsigned long address,pte_t * pte,unsigned int flags)391*4882a593Smuzhiyun static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
392*4882a593Smuzhiyun pte_t *pte, unsigned int flags)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun /* No page to get reference */
395*4882a593Smuzhiyun if (flags & FOLL_GET)
396*4882a593Smuzhiyun return -EFAULT;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (flags & FOLL_TOUCH) {
399*4882a593Smuzhiyun pte_t entry = *pte;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun if (flags & FOLL_WRITE)
402*4882a593Smuzhiyun entry = pte_mkdirty(entry);
403*4882a593Smuzhiyun entry = pte_mkyoung(entry);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (!pte_same(*pte, entry)) {
406*4882a593Smuzhiyun set_pte_at(vma->vm_mm, address, pte, entry);
407*4882a593Smuzhiyun update_mmu_cache(vma, address, pte);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* Proper page table entry exists, but no corresponding struct page */
412*4882a593Smuzhiyun return -EEXIST;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun * FOLL_FORCE can write to even unwritable pte's, but only
417*4882a593Smuzhiyun * after we've gone through a COW cycle and they are dirty.
418*4882a593Smuzhiyun */
can_follow_write_pte(pte_t pte,unsigned int flags)419*4882a593Smuzhiyun static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun return pte_write(pte) ||
422*4882a593Smuzhiyun ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
follow_page_pte(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,unsigned int flags,struct dev_pagemap ** pgmap)425*4882a593Smuzhiyun static struct page *follow_page_pte(struct vm_area_struct *vma,
426*4882a593Smuzhiyun unsigned long address, pmd_t *pmd, unsigned int flags,
427*4882a593Smuzhiyun struct dev_pagemap **pgmap)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
430*4882a593Smuzhiyun struct page *page;
431*4882a593Smuzhiyun spinlock_t *ptl;
432*4882a593Smuzhiyun pte_t *ptep, pte;
433*4882a593Smuzhiyun int ret;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
436*4882a593Smuzhiyun if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) ==
437*4882a593Smuzhiyun (FOLL_PIN | FOLL_GET)))
438*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun * Considering PTE level hugetlb, like continuous-PTE hugetlb on
442*4882a593Smuzhiyun * ARM64 architecture.
443*4882a593Smuzhiyun */
444*4882a593Smuzhiyun if (is_vm_hugetlb_page(vma)) {
445*4882a593Smuzhiyun page = follow_huge_pmd_pte(vma, address, flags);
446*4882a593Smuzhiyun if (page)
447*4882a593Smuzhiyun return page;
448*4882a593Smuzhiyun return no_page_table(vma, flags);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun retry:
452*4882a593Smuzhiyun if (unlikely(pmd_bad(*pmd)))
453*4882a593Smuzhiyun return no_page_table(vma, flags);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
456*4882a593Smuzhiyun pte = *ptep;
457*4882a593Smuzhiyun if (!pte_present(pte)) {
458*4882a593Smuzhiyun swp_entry_t entry;
459*4882a593Smuzhiyun /*
460*4882a593Smuzhiyun * KSM's break_ksm() relies upon recognizing a ksm page
461*4882a593Smuzhiyun * even while it is being migrated, so for that case we
462*4882a593Smuzhiyun * need migration_entry_wait().
463*4882a593Smuzhiyun */
464*4882a593Smuzhiyun if (likely(!(flags & FOLL_MIGRATION)))
465*4882a593Smuzhiyun goto no_page;
466*4882a593Smuzhiyun if (pte_none(pte))
467*4882a593Smuzhiyun goto no_page;
468*4882a593Smuzhiyun entry = pte_to_swp_entry(pte);
469*4882a593Smuzhiyun if (!is_migration_entry(entry))
470*4882a593Smuzhiyun goto no_page;
471*4882a593Smuzhiyun pte_unmap_unlock(ptep, ptl);
472*4882a593Smuzhiyun migration_entry_wait(mm, pmd, address);
473*4882a593Smuzhiyun goto retry;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun if ((flags & FOLL_NUMA) && pte_protnone(pte))
476*4882a593Smuzhiyun goto no_page;
477*4882a593Smuzhiyun if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
478*4882a593Smuzhiyun pte_unmap_unlock(ptep, ptl);
479*4882a593Smuzhiyun return NULL;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun page = vm_normal_page(vma, address, pte);
483*4882a593Smuzhiyun if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun * Only return device mapping pages in the FOLL_GET or FOLL_PIN
486*4882a593Smuzhiyun * case since they are only valid while holding the pgmap
487*4882a593Smuzhiyun * reference.
488*4882a593Smuzhiyun */
489*4882a593Smuzhiyun *pgmap = get_dev_pagemap(pte_pfn(pte), *pgmap);
490*4882a593Smuzhiyun if (*pgmap)
491*4882a593Smuzhiyun page = pte_page(pte);
492*4882a593Smuzhiyun else
493*4882a593Smuzhiyun goto no_page;
494*4882a593Smuzhiyun } else if (unlikely(!page)) {
495*4882a593Smuzhiyun if (flags & FOLL_DUMP) {
496*4882a593Smuzhiyun /* Avoid special (like zero) pages in core dumps */
497*4882a593Smuzhiyun page = ERR_PTR(-EFAULT);
498*4882a593Smuzhiyun goto out;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun if (is_zero_pfn(pte_pfn(pte))) {
502*4882a593Smuzhiyun page = pte_page(pte);
503*4882a593Smuzhiyun } else {
504*4882a593Smuzhiyun ret = follow_pfn_pte(vma, address, ptep, flags);
505*4882a593Smuzhiyun page = ERR_PTR(ret);
506*4882a593Smuzhiyun goto out;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (flags & FOLL_SPLIT && PageTransCompound(page)) {
511*4882a593Smuzhiyun get_page(page);
512*4882a593Smuzhiyun pte_unmap_unlock(ptep, ptl);
513*4882a593Smuzhiyun lock_page(page);
514*4882a593Smuzhiyun ret = split_huge_page(page);
515*4882a593Smuzhiyun unlock_page(page);
516*4882a593Smuzhiyun put_page(page);
517*4882a593Smuzhiyun if (ret)
518*4882a593Smuzhiyun return ERR_PTR(ret);
519*4882a593Smuzhiyun goto retry;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* try_grab_page() does nothing unless FOLL_GET or FOLL_PIN is set. */
523*4882a593Smuzhiyun if (unlikely(!try_grab_page(page, flags))) {
524*4882a593Smuzhiyun page = ERR_PTR(-ENOMEM);
525*4882a593Smuzhiyun goto out;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun /*
528*4882a593Smuzhiyun * We need to make the page accessible if and only if we are going
529*4882a593Smuzhiyun * to access its content (the FOLL_PIN case). Please see
530*4882a593Smuzhiyun * Documentation/core-api/pin_user_pages.rst for details.
531*4882a593Smuzhiyun */
532*4882a593Smuzhiyun if (flags & FOLL_PIN) {
533*4882a593Smuzhiyun ret = arch_make_page_accessible(page);
534*4882a593Smuzhiyun if (ret) {
535*4882a593Smuzhiyun unpin_user_page(page);
536*4882a593Smuzhiyun page = ERR_PTR(ret);
537*4882a593Smuzhiyun goto out;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun if (flags & FOLL_TOUCH) {
541*4882a593Smuzhiyun if ((flags & FOLL_WRITE) &&
542*4882a593Smuzhiyun !pte_dirty(pte) && !PageDirty(page))
543*4882a593Smuzhiyun set_page_dirty(page);
544*4882a593Smuzhiyun /*
545*4882a593Smuzhiyun * pte_mkyoung() would be more correct here, but atomic care
546*4882a593Smuzhiyun * is needed to avoid losing the dirty bit: it is easier to use
547*4882a593Smuzhiyun * mark_page_accessed().
548*4882a593Smuzhiyun */
549*4882a593Smuzhiyun mark_page_accessed(page);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
552*4882a593Smuzhiyun /* Do not mlock pte-mapped THP */
553*4882a593Smuzhiyun if (PageTransCompound(page))
554*4882a593Smuzhiyun goto out;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /*
557*4882a593Smuzhiyun * The preliminary mapping check is mainly to avoid the
558*4882a593Smuzhiyun * pointless overhead of lock_page on the ZERO_PAGE
559*4882a593Smuzhiyun * which might bounce very badly if there is contention.
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * If the page is already locked, we don't need to
562*4882a593Smuzhiyun * handle it now - vmscan will handle it later if and
563*4882a593Smuzhiyun * when it attempts to reclaim the page.
564*4882a593Smuzhiyun */
565*4882a593Smuzhiyun if (page->mapping && trylock_page(page)) {
566*4882a593Smuzhiyun lru_add_drain(); /* push cached pages to LRU */
567*4882a593Smuzhiyun /*
568*4882a593Smuzhiyun * Because we lock page here, and migration is
569*4882a593Smuzhiyun * blocked by the pte's page reference, and we
570*4882a593Smuzhiyun * know the page is still mapped, we don't even
571*4882a593Smuzhiyun * need to check for file-cache page truncation.
572*4882a593Smuzhiyun */
573*4882a593Smuzhiyun mlock_vma_page(page);
574*4882a593Smuzhiyun unlock_page(page);
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun out:
578*4882a593Smuzhiyun pte_unmap_unlock(ptep, ptl);
579*4882a593Smuzhiyun return page;
580*4882a593Smuzhiyun no_page:
581*4882a593Smuzhiyun pte_unmap_unlock(ptep, ptl);
582*4882a593Smuzhiyun if (!pte_none(pte))
583*4882a593Smuzhiyun return NULL;
584*4882a593Smuzhiyun return no_page_table(vma, flags);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
follow_pmd_mask(struct vm_area_struct * vma,unsigned long address,pud_t * pudp,unsigned int flags,struct follow_page_context * ctx)587*4882a593Smuzhiyun static struct page *follow_pmd_mask(struct vm_area_struct *vma,
588*4882a593Smuzhiyun unsigned long address, pud_t *pudp,
589*4882a593Smuzhiyun unsigned int flags,
590*4882a593Smuzhiyun struct follow_page_context *ctx)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun pmd_t *pmd, pmdval;
593*4882a593Smuzhiyun spinlock_t *ptl;
594*4882a593Smuzhiyun struct page *page;
595*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun pmd = pmd_offset(pudp, address);
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun * The READ_ONCE() will stabilize the pmdval in a register or
600*4882a593Smuzhiyun * on the stack so that it will stop changing under the code.
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun pmdval = READ_ONCE(*pmd);
603*4882a593Smuzhiyun if (pmd_none(pmdval))
604*4882a593Smuzhiyun return no_page_table(vma, flags);
605*4882a593Smuzhiyun if (pmd_huge(pmdval) && is_vm_hugetlb_page(vma)) {
606*4882a593Smuzhiyun page = follow_huge_pmd_pte(vma, address, flags);
607*4882a593Smuzhiyun if (page)
608*4882a593Smuzhiyun return page;
609*4882a593Smuzhiyun return no_page_table(vma, flags);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun if (is_hugepd(__hugepd(pmd_val(pmdval)))) {
612*4882a593Smuzhiyun page = follow_huge_pd(vma, address,
613*4882a593Smuzhiyun __hugepd(pmd_val(pmdval)), flags,
614*4882a593Smuzhiyun PMD_SHIFT);
615*4882a593Smuzhiyun if (page)
616*4882a593Smuzhiyun return page;
617*4882a593Smuzhiyun return no_page_table(vma, flags);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun retry:
620*4882a593Smuzhiyun if (!pmd_present(pmdval)) {
621*4882a593Smuzhiyun if (likely(!(flags & FOLL_MIGRATION)))
622*4882a593Smuzhiyun return no_page_table(vma, flags);
623*4882a593Smuzhiyun VM_BUG_ON(thp_migration_supported() &&
624*4882a593Smuzhiyun !is_pmd_migration_entry(pmdval));
625*4882a593Smuzhiyun if (is_pmd_migration_entry(pmdval))
626*4882a593Smuzhiyun pmd_migration_entry_wait(mm, pmd);
627*4882a593Smuzhiyun pmdval = READ_ONCE(*pmd);
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * MADV_DONTNEED may convert the pmd to null because
630*4882a593Smuzhiyun * mmap_lock is held in read mode
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun if (pmd_none(pmdval))
633*4882a593Smuzhiyun return no_page_table(vma, flags);
634*4882a593Smuzhiyun goto retry;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun if (pmd_devmap(pmdval)) {
637*4882a593Smuzhiyun ptl = pmd_lock(mm, pmd);
638*4882a593Smuzhiyun page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
639*4882a593Smuzhiyun spin_unlock(ptl);
640*4882a593Smuzhiyun if (page)
641*4882a593Smuzhiyun return page;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun if (likely(!pmd_trans_huge(pmdval)))
644*4882a593Smuzhiyun return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if ((flags & FOLL_NUMA) && pmd_protnone(pmdval))
647*4882a593Smuzhiyun return no_page_table(vma, flags);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun retry_locked:
650*4882a593Smuzhiyun ptl = pmd_lock(mm, pmd);
651*4882a593Smuzhiyun if (unlikely(pmd_none(*pmd))) {
652*4882a593Smuzhiyun spin_unlock(ptl);
653*4882a593Smuzhiyun return no_page_table(vma, flags);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun if (unlikely(!pmd_present(*pmd))) {
656*4882a593Smuzhiyun spin_unlock(ptl);
657*4882a593Smuzhiyun if (likely(!(flags & FOLL_MIGRATION)))
658*4882a593Smuzhiyun return no_page_table(vma, flags);
659*4882a593Smuzhiyun pmd_migration_entry_wait(mm, pmd);
660*4882a593Smuzhiyun goto retry_locked;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun if (unlikely(!pmd_trans_huge(*pmd))) {
663*4882a593Smuzhiyun spin_unlock(ptl);
664*4882a593Smuzhiyun return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun if (flags & (FOLL_SPLIT | FOLL_SPLIT_PMD)) {
667*4882a593Smuzhiyun int ret;
668*4882a593Smuzhiyun page = pmd_page(*pmd);
669*4882a593Smuzhiyun if (is_huge_zero_page(page)) {
670*4882a593Smuzhiyun spin_unlock(ptl);
671*4882a593Smuzhiyun ret = 0;
672*4882a593Smuzhiyun split_huge_pmd(vma, pmd, address);
673*4882a593Smuzhiyun if (pmd_trans_unstable(pmd))
674*4882a593Smuzhiyun ret = -EBUSY;
675*4882a593Smuzhiyun } else if (flags & FOLL_SPLIT) {
676*4882a593Smuzhiyun if (unlikely(!try_get_page(page))) {
677*4882a593Smuzhiyun spin_unlock(ptl);
678*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun spin_unlock(ptl);
681*4882a593Smuzhiyun lock_page(page);
682*4882a593Smuzhiyun ret = split_huge_page(page);
683*4882a593Smuzhiyun unlock_page(page);
684*4882a593Smuzhiyun put_page(page);
685*4882a593Smuzhiyun if (pmd_none(*pmd))
686*4882a593Smuzhiyun return no_page_table(vma, flags);
687*4882a593Smuzhiyun } else { /* flags & FOLL_SPLIT_PMD */
688*4882a593Smuzhiyun spin_unlock(ptl);
689*4882a593Smuzhiyun split_huge_pmd(vma, pmd, address);
690*4882a593Smuzhiyun ret = pte_alloc(mm, pmd) ? -ENOMEM : 0;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return ret ? ERR_PTR(ret) :
694*4882a593Smuzhiyun follow_page_pte(vma, address, pmd, flags, &ctx->pgmap);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun page = follow_trans_huge_pmd(vma, address, pmd, flags);
697*4882a593Smuzhiyun spin_unlock(ptl);
698*4882a593Smuzhiyun ctx->page_mask = HPAGE_PMD_NR - 1;
699*4882a593Smuzhiyun return page;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
follow_pud_mask(struct vm_area_struct * vma,unsigned long address,p4d_t * p4dp,unsigned int flags,struct follow_page_context * ctx)702*4882a593Smuzhiyun static struct page *follow_pud_mask(struct vm_area_struct *vma,
703*4882a593Smuzhiyun unsigned long address, p4d_t *p4dp,
704*4882a593Smuzhiyun unsigned int flags,
705*4882a593Smuzhiyun struct follow_page_context *ctx)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun pud_t *pud;
708*4882a593Smuzhiyun spinlock_t *ptl;
709*4882a593Smuzhiyun struct page *page;
710*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun pud = pud_offset(p4dp, address);
713*4882a593Smuzhiyun if (pud_none(*pud))
714*4882a593Smuzhiyun return no_page_table(vma, flags);
715*4882a593Smuzhiyun if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) {
716*4882a593Smuzhiyun page = follow_huge_pud(mm, address, pud, flags);
717*4882a593Smuzhiyun if (page)
718*4882a593Smuzhiyun return page;
719*4882a593Smuzhiyun return no_page_table(vma, flags);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun if (is_hugepd(__hugepd(pud_val(*pud)))) {
722*4882a593Smuzhiyun page = follow_huge_pd(vma, address,
723*4882a593Smuzhiyun __hugepd(pud_val(*pud)), flags,
724*4882a593Smuzhiyun PUD_SHIFT);
725*4882a593Smuzhiyun if (page)
726*4882a593Smuzhiyun return page;
727*4882a593Smuzhiyun return no_page_table(vma, flags);
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun if (pud_devmap(*pud)) {
730*4882a593Smuzhiyun ptl = pud_lock(mm, pud);
731*4882a593Smuzhiyun page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
732*4882a593Smuzhiyun spin_unlock(ptl);
733*4882a593Smuzhiyun if (page)
734*4882a593Smuzhiyun return page;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun if (unlikely(pud_bad(*pud)))
737*4882a593Smuzhiyun return no_page_table(vma, flags);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun return follow_pmd_mask(vma, address, pud, flags, ctx);
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
follow_p4d_mask(struct vm_area_struct * vma,unsigned long address,pgd_t * pgdp,unsigned int flags,struct follow_page_context * ctx)742*4882a593Smuzhiyun static struct page *follow_p4d_mask(struct vm_area_struct *vma,
743*4882a593Smuzhiyun unsigned long address, pgd_t *pgdp,
744*4882a593Smuzhiyun unsigned int flags,
745*4882a593Smuzhiyun struct follow_page_context *ctx)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun p4d_t *p4d;
748*4882a593Smuzhiyun struct page *page;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun p4d = p4d_offset(pgdp, address);
751*4882a593Smuzhiyun if (p4d_none(*p4d))
752*4882a593Smuzhiyun return no_page_table(vma, flags);
753*4882a593Smuzhiyun BUILD_BUG_ON(p4d_huge(*p4d));
754*4882a593Smuzhiyun if (unlikely(p4d_bad(*p4d)))
755*4882a593Smuzhiyun return no_page_table(vma, flags);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (is_hugepd(__hugepd(p4d_val(*p4d)))) {
758*4882a593Smuzhiyun page = follow_huge_pd(vma, address,
759*4882a593Smuzhiyun __hugepd(p4d_val(*p4d)), flags,
760*4882a593Smuzhiyun P4D_SHIFT);
761*4882a593Smuzhiyun if (page)
762*4882a593Smuzhiyun return page;
763*4882a593Smuzhiyun return no_page_table(vma, flags);
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun return follow_pud_mask(vma, address, p4d, flags, ctx);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /**
769*4882a593Smuzhiyun * follow_page_mask - look up a page descriptor from a user-virtual address
770*4882a593Smuzhiyun * @vma: vm_area_struct mapping @address
771*4882a593Smuzhiyun * @address: virtual address to look up
772*4882a593Smuzhiyun * @flags: flags modifying lookup behaviour
773*4882a593Smuzhiyun * @ctx: contains dev_pagemap for %ZONE_DEVICE memory pinning and a
774*4882a593Smuzhiyun * pointer to output page_mask
775*4882a593Smuzhiyun *
776*4882a593Smuzhiyun * @flags can have FOLL_ flags set, defined in <linux/mm.h>
777*4882a593Smuzhiyun *
778*4882a593Smuzhiyun * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
779*4882a593Smuzhiyun * the device's dev_pagemap metadata to avoid repeating expensive lookups.
780*4882a593Smuzhiyun *
781*4882a593Smuzhiyun * On output, the @ctx->page_mask is set according to the size of the page.
782*4882a593Smuzhiyun *
783*4882a593Smuzhiyun * Return: the mapped (struct page *), %NULL if no mapping exists, or
784*4882a593Smuzhiyun * an error pointer if there is a mapping to something not represented
785*4882a593Smuzhiyun * by a page descriptor (see also vm_normal_page()).
786*4882a593Smuzhiyun */
follow_page_mask(struct vm_area_struct * vma,unsigned long address,unsigned int flags,struct follow_page_context * ctx)787*4882a593Smuzhiyun static struct page *follow_page_mask(struct vm_area_struct *vma,
788*4882a593Smuzhiyun unsigned long address, unsigned int flags,
789*4882a593Smuzhiyun struct follow_page_context *ctx)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun pgd_t *pgd;
792*4882a593Smuzhiyun struct page *page;
793*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun ctx->page_mask = 0;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* make this handle hugepd */
798*4882a593Smuzhiyun page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
799*4882a593Smuzhiyun if (!IS_ERR(page)) {
800*4882a593Smuzhiyun WARN_ON_ONCE(flags & (FOLL_GET | FOLL_PIN));
801*4882a593Smuzhiyun return page;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun pgd = pgd_offset(mm, address);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
807*4882a593Smuzhiyun return no_page_table(vma, flags);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (pgd_huge(*pgd)) {
810*4882a593Smuzhiyun page = follow_huge_pgd(mm, address, pgd, flags);
811*4882a593Smuzhiyun if (page)
812*4882a593Smuzhiyun return page;
813*4882a593Smuzhiyun return no_page_table(vma, flags);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun if (is_hugepd(__hugepd(pgd_val(*pgd)))) {
816*4882a593Smuzhiyun page = follow_huge_pd(vma, address,
817*4882a593Smuzhiyun __hugepd(pgd_val(*pgd)), flags,
818*4882a593Smuzhiyun PGDIR_SHIFT);
819*4882a593Smuzhiyun if (page)
820*4882a593Smuzhiyun return page;
821*4882a593Smuzhiyun return no_page_table(vma, flags);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun return follow_p4d_mask(vma, address, pgd, flags, ctx);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
follow_page(struct vm_area_struct * vma,unsigned long address,unsigned int foll_flags)827*4882a593Smuzhiyun struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
828*4882a593Smuzhiyun unsigned int foll_flags)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun struct follow_page_context ctx = { NULL };
831*4882a593Smuzhiyun struct page *page;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun page = follow_page_mask(vma, address, foll_flags, &ctx);
834*4882a593Smuzhiyun if (ctx.pgmap)
835*4882a593Smuzhiyun put_dev_pagemap(ctx.pgmap);
836*4882a593Smuzhiyun return page;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
get_gate_page(struct mm_struct * mm,unsigned long address,unsigned int gup_flags,struct vm_area_struct ** vma,struct page ** page)839*4882a593Smuzhiyun static int get_gate_page(struct mm_struct *mm, unsigned long address,
840*4882a593Smuzhiyun unsigned int gup_flags, struct vm_area_struct **vma,
841*4882a593Smuzhiyun struct page **page)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun pgd_t *pgd;
844*4882a593Smuzhiyun p4d_t *p4d;
845*4882a593Smuzhiyun pud_t *pud;
846*4882a593Smuzhiyun pmd_t *pmd;
847*4882a593Smuzhiyun pte_t *pte;
848*4882a593Smuzhiyun int ret = -EFAULT;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* user gate pages are read-only */
851*4882a593Smuzhiyun if (gup_flags & FOLL_WRITE)
852*4882a593Smuzhiyun return -EFAULT;
853*4882a593Smuzhiyun if (address > TASK_SIZE)
854*4882a593Smuzhiyun pgd = pgd_offset_k(address);
855*4882a593Smuzhiyun else
856*4882a593Smuzhiyun pgd = pgd_offset_gate(mm, address);
857*4882a593Smuzhiyun if (pgd_none(*pgd))
858*4882a593Smuzhiyun return -EFAULT;
859*4882a593Smuzhiyun p4d = p4d_offset(pgd, address);
860*4882a593Smuzhiyun if (p4d_none(*p4d))
861*4882a593Smuzhiyun return -EFAULT;
862*4882a593Smuzhiyun pud = pud_offset(p4d, address);
863*4882a593Smuzhiyun if (pud_none(*pud))
864*4882a593Smuzhiyun return -EFAULT;
865*4882a593Smuzhiyun pmd = pmd_offset(pud, address);
866*4882a593Smuzhiyun if (!pmd_present(*pmd))
867*4882a593Smuzhiyun return -EFAULT;
868*4882a593Smuzhiyun VM_BUG_ON(pmd_trans_huge(*pmd));
869*4882a593Smuzhiyun pte = pte_offset_map(pmd, address);
870*4882a593Smuzhiyun if (pte_none(*pte))
871*4882a593Smuzhiyun goto unmap;
872*4882a593Smuzhiyun *vma = get_gate_vma(mm);
873*4882a593Smuzhiyun if (!page)
874*4882a593Smuzhiyun goto out;
875*4882a593Smuzhiyun *page = vm_normal_page(*vma, address, *pte);
876*4882a593Smuzhiyun if (!*page) {
877*4882a593Smuzhiyun if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
878*4882a593Smuzhiyun goto unmap;
879*4882a593Smuzhiyun *page = pte_page(*pte);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun if (unlikely(!try_grab_page(*page, gup_flags))) {
882*4882a593Smuzhiyun ret = -ENOMEM;
883*4882a593Smuzhiyun goto unmap;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun out:
886*4882a593Smuzhiyun ret = 0;
887*4882a593Smuzhiyun unmap:
888*4882a593Smuzhiyun pte_unmap(pte);
889*4882a593Smuzhiyun return ret;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun /*
893*4882a593Smuzhiyun * mmap_lock must be held on entry. If @locked != NULL and *@flags
894*4882a593Smuzhiyun * does not include FOLL_NOWAIT, the mmap_lock may be released. If it
895*4882a593Smuzhiyun * is, *@locked will be set to 0 and -EBUSY returned.
896*4882a593Smuzhiyun */
faultin_page(struct vm_area_struct * vma,unsigned long address,unsigned int * flags,int * locked)897*4882a593Smuzhiyun static int faultin_page(struct vm_area_struct *vma,
898*4882a593Smuzhiyun unsigned long address, unsigned int *flags, int *locked)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun unsigned int fault_flags = 0;
901*4882a593Smuzhiyun vm_fault_t ret;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /* mlock all present pages, but do not fault in new pages */
904*4882a593Smuzhiyun if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
905*4882a593Smuzhiyun return -ENOENT;
906*4882a593Smuzhiyun if (*flags & FOLL_WRITE)
907*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_WRITE;
908*4882a593Smuzhiyun if (*flags & FOLL_REMOTE)
909*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_REMOTE;
910*4882a593Smuzhiyun if (locked)
911*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
912*4882a593Smuzhiyun if (*flags & FOLL_NOWAIT)
913*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
914*4882a593Smuzhiyun if (*flags & FOLL_TRIED) {
915*4882a593Smuzhiyun /*
916*4882a593Smuzhiyun * Note: FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_TRIED
917*4882a593Smuzhiyun * can co-exist
918*4882a593Smuzhiyun */
919*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_TRIED;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun ret = handle_mm_fault(vma, address, fault_flags, NULL);
923*4882a593Smuzhiyun if (ret & VM_FAULT_ERROR) {
924*4882a593Smuzhiyun int err = vm_fault_to_errno(ret, *flags);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun if (err)
927*4882a593Smuzhiyun return err;
928*4882a593Smuzhiyun BUG();
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun if (ret & VM_FAULT_RETRY) {
932*4882a593Smuzhiyun if (locked && !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
933*4882a593Smuzhiyun *locked = 0;
934*4882a593Smuzhiyun return -EBUSY;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun /*
938*4882a593Smuzhiyun * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
939*4882a593Smuzhiyun * necessary, even if maybe_mkwrite decided not to set pte_write. We
940*4882a593Smuzhiyun * can thus safely do subsequent page lookups as if they were reads.
941*4882a593Smuzhiyun * But only do so when looping for pte_write is futile: in some cases
942*4882a593Smuzhiyun * userspace may also be wanting to write to the gotten user page,
943*4882a593Smuzhiyun * which a read fault here might prevent (a readonly page might get
944*4882a593Smuzhiyun * reCOWed by userspace write).
945*4882a593Smuzhiyun */
946*4882a593Smuzhiyun if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
947*4882a593Smuzhiyun *flags |= FOLL_COW;
948*4882a593Smuzhiyun return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
check_vma_flags(struct vm_area_struct * vma,unsigned long gup_flags)951*4882a593Smuzhiyun static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun vm_flags_t vm_flags = vma->vm_flags;
954*4882a593Smuzhiyun int write = (gup_flags & FOLL_WRITE);
955*4882a593Smuzhiyun int foreign = (gup_flags & FOLL_REMOTE);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (vm_flags & (VM_IO | VM_PFNMAP))
958*4882a593Smuzhiyun return -EFAULT;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
961*4882a593Smuzhiyun return -EFAULT;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma))
964*4882a593Smuzhiyun return -EOPNOTSUPP;
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (write) {
967*4882a593Smuzhiyun if (!(vm_flags & VM_WRITE)) {
968*4882a593Smuzhiyun if (!(gup_flags & FOLL_FORCE))
969*4882a593Smuzhiyun return -EFAULT;
970*4882a593Smuzhiyun /*
971*4882a593Smuzhiyun * We used to let the write,force case do COW in a
972*4882a593Smuzhiyun * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
973*4882a593Smuzhiyun * set a breakpoint in a read-only mapping of an
974*4882a593Smuzhiyun * executable, without corrupting the file (yet only
975*4882a593Smuzhiyun * when that file had been opened for writing!).
976*4882a593Smuzhiyun * Anon pages in shared mappings are surprising: now
977*4882a593Smuzhiyun * just reject it.
978*4882a593Smuzhiyun */
979*4882a593Smuzhiyun if (!is_cow_mapping(vm_flags))
980*4882a593Smuzhiyun return -EFAULT;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun } else if (!(vm_flags & VM_READ)) {
983*4882a593Smuzhiyun if (!(gup_flags & FOLL_FORCE))
984*4882a593Smuzhiyun return -EFAULT;
985*4882a593Smuzhiyun /*
986*4882a593Smuzhiyun * Is there actually any vma we can reach here which does not
987*4882a593Smuzhiyun * have VM_MAYREAD set?
988*4882a593Smuzhiyun */
989*4882a593Smuzhiyun if (!(vm_flags & VM_MAYREAD))
990*4882a593Smuzhiyun return -EFAULT;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun /*
993*4882a593Smuzhiyun * gups are always data accesses, not instruction
994*4882a593Smuzhiyun * fetches, so execute=false here
995*4882a593Smuzhiyun */
996*4882a593Smuzhiyun if (!arch_vma_access_permitted(vma, write, false, foreign))
997*4882a593Smuzhiyun return -EFAULT;
998*4882a593Smuzhiyun return 0;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /**
1002*4882a593Smuzhiyun * __get_user_pages() - pin user pages in memory
1003*4882a593Smuzhiyun * @mm: mm_struct of target mm
1004*4882a593Smuzhiyun * @start: starting user address
1005*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
1006*4882a593Smuzhiyun * @gup_flags: flags modifying pin behaviour
1007*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
1008*4882a593Smuzhiyun * Should be at least nr_pages long. Or NULL, if caller
1009*4882a593Smuzhiyun * only intends to ensure the pages are faulted in.
1010*4882a593Smuzhiyun * @vmas: array of pointers to vmas corresponding to each page.
1011*4882a593Smuzhiyun * Or NULL if the caller does not require them.
1012*4882a593Smuzhiyun * @locked: whether we're still with the mmap_lock held
1013*4882a593Smuzhiyun *
1014*4882a593Smuzhiyun * Returns either number of pages pinned (which may be less than the
1015*4882a593Smuzhiyun * number requested), or an error. Details about the return value:
1016*4882a593Smuzhiyun *
1017*4882a593Smuzhiyun * -- If nr_pages is 0, returns 0.
1018*4882a593Smuzhiyun * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1019*4882a593Smuzhiyun * -- If nr_pages is >0, and some pages were pinned, returns the number of
1020*4882a593Smuzhiyun * pages pinned. Again, this may be less than nr_pages.
1021*4882a593Smuzhiyun * -- 0 return value is possible when the fault would need to be retried.
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * The caller is responsible for releasing returned @pages, via put_page().
1024*4882a593Smuzhiyun *
1025*4882a593Smuzhiyun * @vmas are valid only as long as mmap_lock is held.
1026*4882a593Smuzhiyun *
1027*4882a593Smuzhiyun * Must be called with mmap_lock held. It may be released. See below.
1028*4882a593Smuzhiyun *
1029*4882a593Smuzhiyun * __get_user_pages walks a process's page tables and takes a reference to
1030*4882a593Smuzhiyun * each struct page that each user address corresponds to at a given
1031*4882a593Smuzhiyun * instant. That is, it takes the page that would be accessed if a user
1032*4882a593Smuzhiyun * thread accesses the given user virtual address at that instant.
1033*4882a593Smuzhiyun *
1034*4882a593Smuzhiyun * This does not guarantee that the page exists in the user mappings when
1035*4882a593Smuzhiyun * __get_user_pages returns, and there may even be a completely different
1036*4882a593Smuzhiyun * page there in some cases (eg. if mmapped pagecache has been invalidated
1037*4882a593Smuzhiyun * and subsequently re faulted). However it does guarantee that the page
1038*4882a593Smuzhiyun * won't be freed completely. And mostly callers simply care that the page
1039*4882a593Smuzhiyun * contains data that was valid *at some point in time*. Typically, an IO
1040*4882a593Smuzhiyun * or similar operation cannot guarantee anything stronger anyway because
1041*4882a593Smuzhiyun * locks can't be held over the syscall boundary.
1042*4882a593Smuzhiyun *
1043*4882a593Smuzhiyun * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1044*4882a593Smuzhiyun * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1045*4882a593Smuzhiyun * appropriate) must be called after the page is finished with, and
1046*4882a593Smuzhiyun * before put_page is called.
1047*4882a593Smuzhiyun *
1048*4882a593Smuzhiyun * If @locked != NULL, *@locked will be set to 0 when mmap_lock is
1049*4882a593Smuzhiyun * released by an up_read(). That can happen if @gup_flags does not
1050*4882a593Smuzhiyun * have FOLL_NOWAIT.
1051*4882a593Smuzhiyun *
1052*4882a593Smuzhiyun * A caller using such a combination of @locked and @gup_flags
1053*4882a593Smuzhiyun * must therefore hold the mmap_lock for reading only, and recognize
1054*4882a593Smuzhiyun * when it's been released. Otherwise, it must be held for either
1055*4882a593Smuzhiyun * reading or writing and will not be released.
1056*4882a593Smuzhiyun *
1057*4882a593Smuzhiyun * In most cases, get_user_pages or get_user_pages_fast should be used
1058*4882a593Smuzhiyun * instead of __get_user_pages. __get_user_pages should be used only if
1059*4882a593Smuzhiyun * you need some special @gup_flags.
1060*4882a593Smuzhiyun */
__get_user_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)1061*4882a593Smuzhiyun static long __get_user_pages(struct mm_struct *mm,
1062*4882a593Smuzhiyun unsigned long start, unsigned long nr_pages,
1063*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1064*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun long ret = 0, i = 0;
1067*4882a593Smuzhiyun struct vm_area_struct *vma = NULL;
1068*4882a593Smuzhiyun struct follow_page_context ctx = { NULL };
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (!nr_pages)
1071*4882a593Smuzhiyun return 0;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun start = untagged_addr(start);
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * If FOLL_FORCE is set then do not force a full fault as the hinting
1079*4882a593Smuzhiyun * fault information is unrelated to the reference behaviour of a task
1080*4882a593Smuzhiyun * using the address space
1081*4882a593Smuzhiyun */
1082*4882a593Smuzhiyun if (!(gup_flags & FOLL_FORCE))
1083*4882a593Smuzhiyun gup_flags |= FOLL_NUMA;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun do {
1086*4882a593Smuzhiyun struct page *page;
1087*4882a593Smuzhiyun unsigned int foll_flags = gup_flags;
1088*4882a593Smuzhiyun unsigned int page_increm;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /* first iteration or cross vma bound */
1091*4882a593Smuzhiyun if (!vma || start >= vma->vm_end) {
1092*4882a593Smuzhiyun vma = find_extend_vma(mm, start);
1093*4882a593Smuzhiyun if (!vma && in_gate_area(mm, start)) {
1094*4882a593Smuzhiyun ret = get_gate_page(mm, start & PAGE_MASK,
1095*4882a593Smuzhiyun gup_flags, &vma,
1096*4882a593Smuzhiyun pages ? &pages[i] : NULL);
1097*4882a593Smuzhiyun if (ret)
1098*4882a593Smuzhiyun goto out;
1099*4882a593Smuzhiyun ctx.page_mask = 0;
1100*4882a593Smuzhiyun goto next_page;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun if (!vma) {
1104*4882a593Smuzhiyun ret = -EFAULT;
1105*4882a593Smuzhiyun goto out;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun ret = check_vma_flags(vma, gup_flags);
1108*4882a593Smuzhiyun if (ret)
1109*4882a593Smuzhiyun goto out;
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun if (is_vm_hugetlb_page(vma)) {
1112*4882a593Smuzhiyun i = follow_hugetlb_page(mm, vma, pages, vmas,
1113*4882a593Smuzhiyun &start, &nr_pages, i,
1114*4882a593Smuzhiyun gup_flags, locked);
1115*4882a593Smuzhiyun if (locked && *locked == 0) {
1116*4882a593Smuzhiyun /*
1117*4882a593Smuzhiyun * We've got a VM_FAULT_RETRY
1118*4882a593Smuzhiyun * and we've lost mmap_lock.
1119*4882a593Smuzhiyun * We must stop here.
1120*4882a593Smuzhiyun */
1121*4882a593Smuzhiyun BUG_ON(gup_flags & FOLL_NOWAIT);
1122*4882a593Smuzhiyun BUG_ON(ret != 0);
1123*4882a593Smuzhiyun goto out;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun continue;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun retry:
1129*4882a593Smuzhiyun /*
1130*4882a593Smuzhiyun * If we have a pending SIGKILL, don't keep faulting pages and
1131*4882a593Smuzhiyun * potentially allocating memory.
1132*4882a593Smuzhiyun */
1133*4882a593Smuzhiyun if (fatal_signal_pending(current)) {
1134*4882a593Smuzhiyun ret = -EINTR;
1135*4882a593Smuzhiyun goto out;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun cond_resched();
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun page = follow_page_mask(vma, start, foll_flags, &ctx);
1140*4882a593Smuzhiyun if (!page) {
1141*4882a593Smuzhiyun ret = faultin_page(vma, start, &foll_flags, locked);
1142*4882a593Smuzhiyun switch (ret) {
1143*4882a593Smuzhiyun case 0:
1144*4882a593Smuzhiyun goto retry;
1145*4882a593Smuzhiyun case -EBUSY:
1146*4882a593Smuzhiyun ret = 0;
1147*4882a593Smuzhiyun fallthrough;
1148*4882a593Smuzhiyun case -EFAULT:
1149*4882a593Smuzhiyun case -ENOMEM:
1150*4882a593Smuzhiyun case -EHWPOISON:
1151*4882a593Smuzhiyun goto out;
1152*4882a593Smuzhiyun case -ENOENT:
1153*4882a593Smuzhiyun goto next_page;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun BUG();
1156*4882a593Smuzhiyun } else if (PTR_ERR(page) == -EEXIST) {
1157*4882a593Smuzhiyun /*
1158*4882a593Smuzhiyun * Proper page table entry exists, but no corresponding
1159*4882a593Smuzhiyun * struct page.
1160*4882a593Smuzhiyun */
1161*4882a593Smuzhiyun goto next_page;
1162*4882a593Smuzhiyun } else if (IS_ERR(page)) {
1163*4882a593Smuzhiyun ret = PTR_ERR(page);
1164*4882a593Smuzhiyun goto out;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun if (pages) {
1167*4882a593Smuzhiyun pages[i] = page;
1168*4882a593Smuzhiyun flush_anon_page(vma, page, start);
1169*4882a593Smuzhiyun flush_dcache_page(page);
1170*4882a593Smuzhiyun ctx.page_mask = 0;
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun next_page:
1173*4882a593Smuzhiyun if (vmas) {
1174*4882a593Smuzhiyun vmas[i] = vma;
1175*4882a593Smuzhiyun ctx.page_mask = 0;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask);
1178*4882a593Smuzhiyun if (page_increm > nr_pages)
1179*4882a593Smuzhiyun page_increm = nr_pages;
1180*4882a593Smuzhiyun i += page_increm;
1181*4882a593Smuzhiyun start += page_increm * PAGE_SIZE;
1182*4882a593Smuzhiyun nr_pages -= page_increm;
1183*4882a593Smuzhiyun } while (nr_pages);
1184*4882a593Smuzhiyun out:
1185*4882a593Smuzhiyun if (ctx.pgmap)
1186*4882a593Smuzhiyun put_dev_pagemap(ctx.pgmap);
1187*4882a593Smuzhiyun return i ? i : ret;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
vma_permits_fault(struct vm_area_struct * vma,unsigned int fault_flags)1190*4882a593Smuzhiyun static bool vma_permits_fault(struct vm_area_struct *vma,
1191*4882a593Smuzhiyun unsigned int fault_flags)
1192*4882a593Smuzhiyun {
1193*4882a593Smuzhiyun bool write = !!(fault_flags & FAULT_FLAG_WRITE);
1194*4882a593Smuzhiyun bool foreign = !!(fault_flags & FAULT_FLAG_REMOTE);
1195*4882a593Smuzhiyun vm_flags_t vm_flags = write ? VM_WRITE : VM_READ;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun if (!(vm_flags & vma->vm_flags))
1198*4882a593Smuzhiyun return false;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /*
1201*4882a593Smuzhiyun * The architecture might have a hardware protection
1202*4882a593Smuzhiyun * mechanism other than read/write that can deny access.
1203*4882a593Smuzhiyun *
1204*4882a593Smuzhiyun * gup always represents data access, not instruction
1205*4882a593Smuzhiyun * fetches, so execute=false here:
1206*4882a593Smuzhiyun */
1207*4882a593Smuzhiyun if (!arch_vma_access_permitted(vma, write, false, foreign))
1208*4882a593Smuzhiyun return false;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun return true;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /**
1214*4882a593Smuzhiyun * fixup_user_fault() - manually resolve a user page fault
1215*4882a593Smuzhiyun * @mm: mm_struct of target mm
1216*4882a593Smuzhiyun * @address: user address
1217*4882a593Smuzhiyun * @fault_flags:flags to pass down to handle_mm_fault()
1218*4882a593Smuzhiyun * @unlocked: did we unlock the mmap_lock while retrying, maybe NULL if caller
1219*4882a593Smuzhiyun * does not allow retry. If NULL, the caller must guarantee
1220*4882a593Smuzhiyun * that fault_flags does not contain FAULT_FLAG_ALLOW_RETRY.
1221*4882a593Smuzhiyun *
1222*4882a593Smuzhiyun * This is meant to be called in the specific scenario where for locking reasons
1223*4882a593Smuzhiyun * we try to access user memory in atomic context (within a pagefault_disable()
1224*4882a593Smuzhiyun * section), this returns -EFAULT, and we want to resolve the user fault before
1225*4882a593Smuzhiyun * trying again.
1226*4882a593Smuzhiyun *
1227*4882a593Smuzhiyun * Typically this is meant to be used by the futex code.
1228*4882a593Smuzhiyun *
1229*4882a593Smuzhiyun * The main difference with get_user_pages() is that this function will
1230*4882a593Smuzhiyun * unconditionally call handle_mm_fault() which will in turn perform all the
1231*4882a593Smuzhiyun * necessary SW fixup of the dirty and young bits in the PTE, while
1232*4882a593Smuzhiyun * get_user_pages() only guarantees to update these in the struct page.
1233*4882a593Smuzhiyun *
1234*4882a593Smuzhiyun * This is important for some architectures where those bits also gate the
1235*4882a593Smuzhiyun * access permission to the page because they are maintained in software. On
1236*4882a593Smuzhiyun * such architectures, gup() will not be enough to make a subsequent access
1237*4882a593Smuzhiyun * succeed.
1238*4882a593Smuzhiyun *
1239*4882a593Smuzhiyun * This function will not return with an unlocked mmap_lock. So it has not the
1240*4882a593Smuzhiyun * same semantics wrt the @mm->mmap_lock as does filemap_fault().
1241*4882a593Smuzhiyun */
fixup_user_fault(struct mm_struct * mm,unsigned long address,unsigned int fault_flags,bool * unlocked)1242*4882a593Smuzhiyun int fixup_user_fault(struct mm_struct *mm,
1243*4882a593Smuzhiyun unsigned long address, unsigned int fault_flags,
1244*4882a593Smuzhiyun bool *unlocked)
1245*4882a593Smuzhiyun {
1246*4882a593Smuzhiyun struct vm_area_struct *vma;
1247*4882a593Smuzhiyun vm_fault_t ret, major = 0;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun address = untagged_addr(address);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun if (unlocked)
1252*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun retry:
1255*4882a593Smuzhiyun vma = find_extend_vma(mm, address);
1256*4882a593Smuzhiyun if (!vma || address < vma->vm_start)
1257*4882a593Smuzhiyun return -EFAULT;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun if (!vma_permits_fault(vma, fault_flags))
1260*4882a593Smuzhiyun return -EFAULT;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun if ((fault_flags & FAULT_FLAG_KILLABLE) &&
1263*4882a593Smuzhiyun fatal_signal_pending(current))
1264*4882a593Smuzhiyun return -EINTR;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun ret = handle_mm_fault(vma, address, fault_flags, NULL);
1267*4882a593Smuzhiyun major |= ret & VM_FAULT_MAJOR;
1268*4882a593Smuzhiyun if (ret & VM_FAULT_ERROR) {
1269*4882a593Smuzhiyun int err = vm_fault_to_errno(ret, 0);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun if (err)
1272*4882a593Smuzhiyun return err;
1273*4882a593Smuzhiyun BUG();
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (ret & VM_FAULT_RETRY) {
1277*4882a593Smuzhiyun mmap_read_lock(mm);
1278*4882a593Smuzhiyun *unlocked = true;
1279*4882a593Smuzhiyun fault_flags |= FAULT_FLAG_TRIED;
1280*4882a593Smuzhiyun goto retry;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun return 0;
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(fixup_user_fault);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun /*
1288*4882a593Smuzhiyun * Please note that this function, unlike __get_user_pages will not
1289*4882a593Smuzhiyun * return 0 for nr_pages > 0 without FOLL_NOWAIT
1290*4882a593Smuzhiyun */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,int * locked,unsigned int flags)1291*4882a593Smuzhiyun static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
1292*4882a593Smuzhiyun unsigned long start,
1293*4882a593Smuzhiyun unsigned long nr_pages,
1294*4882a593Smuzhiyun struct page **pages,
1295*4882a593Smuzhiyun struct vm_area_struct **vmas,
1296*4882a593Smuzhiyun int *locked,
1297*4882a593Smuzhiyun unsigned int flags)
1298*4882a593Smuzhiyun {
1299*4882a593Smuzhiyun long ret, pages_done;
1300*4882a593Smuzhiyun bool lock_dropped;
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun if (locked) {
1303*4882a593Smuzhiyun /* if VM_FAULT_RETRY can be returned, vmas become invalid */
1304*4882a593Smuzhiyun BUG_ON(vmas);
1305*4882a593Smuzhiyun /* check caller initialized locked */
1306*4882a593Smuzhiyun BUG_ON(*locked != 1);
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun if (flags & FOLL_PIN)
1310*4882a593Smuzhiyun atomic_set(&mm->has_pinned, 1);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun /*
1313*4882a593Smuzhiyun * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior
1314*4882a593Smuzhiyun * is to set FOLL_GET if the caller wants pages[] filled in (but has
1315*4882a593Smuzhiyun * carelessly failed to specify FOLL_GET), so keep doing that, but only
1316*4882a593Smuzhiyun * for FOLL_GET, not for the newer FOLL_PIN.
1317*4882a593Smuzhiyun *
1318*4882a593Smuzhiyun * FOLL_PIN always expects pages to be non-null, but no need to assert
1319*4882a593Smuzhiyun * that here, as any failures will be obvious enough.
1320*4882a593Smuzhiyun */
1321*4882a593Smuzhiyun if (pages && !(flags & FOLL_PIN))
1322*4882a593Smuzhiyun flags |= FOLL_GET;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun pages_done = 0;
1325*4882a593Smuzhiyun lock_dropped = false;
1326*4882a593Smuzhiyun for (;;) {
1327*4882a593Smuzhiyun ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1328*4882a593Smuzhiyun vmas, locked);
1329*4882a593Smuzhiyun if (!locked)
1330*4882a593Smuzhiyun /* VM_FAULT_RETRY couldn't trigger, bypass */
1331*4882a593Smuzhiyun return ret;
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /* VM_FAULT_RETRY cannot return errors */
1334*4882a593Smuzhiyun if (!*locked) {
1335*4882a593Smuzhiyun BUG_ON(ret < 0);
1336*4882a593Smuzhiyun BUG_ON(ret >= nr_pages);
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun if (ret > 0) {
1340*4882a593Smuzhiyun nr_pages -= ret;
1341*4882a593Smuzhiyun pages_done += ret;
1342*4882a593Smuzhiyun if (!nr_pages)
1343*4882a593Smuzhiyun break;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun if (*locked) {
1346*4882a593Smuzhiyun /*
1347*4882a593Smuzhiyun * VM_FAULT_RETRY didn't trigger or it was a
1348*4882a593Smuzhiyun * FOLL_NOWAIT.
1349*4882a593Smuzhiyun */
1350*4882a593Smuzhiyun if (!pages_done)
1351*4882a593Smuzhiyun pages_done = ret;
1352*4882a593Smuzhiyun break;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun /*
1355*4882a593Smuzhiyun * VM_FAULT_RETRY triggered, so seek to the faulting offset.
1356*4882a593Smuzhiyun * For the prefault case (!pages) we only update counts.
1357*4882a593Smuzhiyun */
1358*4882a593Smuzhiyun if (likely(pages))
1359*4882a593Smuzhiyun pages += ret;
1360*4882a593Smuzhiyun start += ret << PAGE_SHIFT;
1361*4882a593Smuzhiyun lock_dropped = true;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun retry:
1364*4882a593Smuzhiyun /*
1365*4882a593Smuzhiyun * Repeat on the address that fired VM_FAULT_RETRY
1366*4882a593Smuzhiyun * with both FAULT_FLAG_ALLOW_RETRY and
1367*4882a593Smuzhiyun * FAULT_FLAG_TRIED. Note that GUP can be interrupted
1368*4882a593Smuzhiyun * by fatal signals, so we need to check it before we
1369*4882a593Smuzhiyun * start trying again otherwise it can loop forever.
1370*4882a593Smuzhiyun */
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun if (fatal_signal_pending(current)) {
1373*4882a593Smuzhiyun if (!pages_done)
1374*4882a593Smuzhiyun pages_done = -EINTR;
1375*4882a593Smuzhiyun break;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun ret = mmap_read_lock_killable(mm);
1379*4882a593Smuzhiyun if (ret) {
1380*4882a593Smuzhiyun BUG_ON(ret > 0);
1381*4882a593Smuzhiyun if (!pages_done)
1382*4882a593Smuzhiyun pages_done = ret;
1383*4882a593Smuzhiyun break;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun *locked = 1;
1387*4882a593Smuzhiyun ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED,
1388*4882a593Smuzhiyun pages, NULL, locked);
1389*4882a593Smuzhiyun if (!*locked) {
1390*4882a593Smuzhiyun /* Continue to retry until we succeeded */
1391*4882a593Smuzhiyun BUG_ON(ret != 0);
1392*4882a593Smuzhiyun goto retry;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun if (ret != 1) {
1395*4882a593Smuzhiyun BUG_ON(ret > 1);
1396*4882a593Smuzhiyun if (!pages_done)
1397*4882a593Smuzhiyun pages_done = ret;
1398*4882a593Smuzhiyun break;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun nr_pages--;
1401*4882a593Smuzhiyun pages_done++;
1402*4882a593Smuzhiyun if (!nr_pages)
1403*4882a593Smuzhiyun break;
1404*4882a593Smuzhiyun if (likely(pages))
1405*4882a593Smuzhiyun pages++;
1406*4882a593Smuzhiyun start += PAGE_SIZE;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun if (lock_dropped && *locked) {
1409*4882a593Smuzhiyun /*
1410*4882a593Smuzhiyun * We must let the caller know we temporarily dropped the lock
1411*4882a593Smuzhiyun * and so the critical section protected by it was lost.
1412*4882a593Smuzhiyun */
1413*4882a593Smuzhiyun mmap_read_unlock(mm);
1414*4882a593Smuzhiyun *locked = 0;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun return pages_done;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun /**
1420*4882a593Smuzhiyun * populate_vma_page_range() - populate a range of pages in the vma.
1421*4882a593Smuzhiyun * @vma: target vma
1422*4882a593Smuzhiyun * @start: start address
1423*4882a593Smuzhiyun * @end: end address
1424*4882a593Smuzhiyun * @locked: whether the mmap_lock is still held
1425*4882a593Smuzhiyun *
1426*4882a593Smuzhiyun * This takes care of mlocking the pages too if VM_LOCKED is set.
1427*4882a593Smuzhiyun *
1428*4882a593Smuzhiyun * Return either number of pages pinned in the vma, or a negative error
1429*4882a593Smuzhiyun * code on error.
1430*4882a593Smuzhiyun *
1431*4882a593Smuzhiyun * vma->vm_mm->mmap_lock must be held.
1432*4882a593Smuzhiyun *
1433*4882a593Smuzhiyun * If @locked is NULL, it may be held for read or write and will
1434*4882a593Smuzhiyun * be unperturbed.
1435*4882a593Smuzhiyun *
1436*4882a593Smuzhiyun * If @locked is non-NULL, it must held for read only and may be
1437*4882a593Smuzhiyun * released. If it's released, *@locked will be set to 0.
1438*4882a593Smuzhiyun */
populate_vma_page_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,int * locked)1439*4882a593Smuzhiyun long populate_vma_page_range(struct vm_area_struct *vma,
1440*4882a593Smuzhiyun unsigned long start, unsigned long end, int *locked)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun struct mm_struct *mm = vma->vm_mm;
1443*4882a593Smuzhiyun unsigned long nr_pages = (end - start) / PAGE_SIZE;
1444*4882a593Smuzhiyun int gup_flags;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun VM_BUG_ON(start & ~PAGE_MASK);
1447*4882a593Smuzhiyun VM_BUG_ON(end & ~PAGE_MASK);
1448*4882a593Smuzhiyun VM_BUG_ON_VMA(start < vma->vm_start, vma);
1449*4882a593Smuzhiyun VM_BUG_ON_VMA(end > vma->vm_end, vma);
1450*4882a593Smuzhiyun mmap_assert_locked(mm);
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
1453*4882a593Smuzhiyun if (vma->vm_flags & VM_LOCKONFAULT)
1454*4882a593Smuzhiyun gup_flags &= ~FOLL_POPULATE;
1455*4882a593Smuzhiyun /*
1456*4882a593Smuzhiyun * We want to touch writable mappings with a write fault in order
1457*4882a593Smuzhiyun * to break COW, except for shared mappings because these don't COW
1458*4882a593Smuzhiyun * and we would not want to dirty them for nothing.
1459*4882a593Smuzhiyun */
1460*4882a593Smuzhiyun if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
1461*4882a593Smuzhiyun gup_flags |= FOLL_WRITE;
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun /*
1464*4882a593Smuzhiyun * We want mlock to succeed for regions that have any permissions
1465*4882a593Smuzhiyun * other than PROT_NONE.
1466*4882a593Smuzhiyun */
1467*4882a593Smuzhiyun if (vma_is_accessible(vma))
1468*4882a593Smuzhiyun gup_flags |= FOLL_FORCE;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /*
1471*4882a593Smuzhiyun * We made sure addr is within a VMA, so the following will
1472*4882a593Smuzhiyun * not result in a stack expansion that recurses back here.
1473*4882a593Smuzhiyun */
1474*4882a593Smuzhiyun return __get_user_pages(mm, start, nr_pages, gup_flags,
1475*4882a593Smuzhiyun NULL, NULL, locked);
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun /*
1479*4882a593Smuzhiyun * __mm_populate - populate and/or mlock pages within a range of address space.
1480*4882a593Smuzhiyun *
1481*4882a593Smuzhiyun * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
1482*4882a593Smuzhiyun * flags. VMAs must be already marked with the desired vm_flags, and
1483*4882a593Smuzhiyun * mmap_lock must not be held.
1484*4882a593Smuzhiyun */
__mm_populate(unsigned long start,unsigned long len,int ignore_errors)1485*4882a593Smuzhiyun int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
1486*4882a593Smuzhiyun {
1487*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
1488*4882a593Smuzhiyun unsigned long end, nstart, nend;
1489*4882a593Smuzhiyun struct vm_area_struct *vma = NULL;
1490*4882a593Smuzhiyun int locked = 0;
1491*4882a593Smuzhiyun long ret = 0;
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun end = start + len;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun for (nstart = start; nstart < end; nstart = nend) {
1496*4882a593Smuzhiyun /*
1497*4882a593Smuzhiyun * We want to fault in pages for [nstart; end) address range.
1498*4882a593Smuzhiyun * Find first corresponding VMA.
1499*4882a593Smuzhiyun */
1500*4882a593Smuzhiyun if (!locked) {
1501*4882a593Smuzhiyun locked = 1;
1502*4882a593Smuzhiyun mmap_read_lock(mm);
1503*4882a593Smuzhiyun vma = find_vma(mm, nstart);
1504*4882a593Smuzhiyun } else if (nstart >= vma->vm_end)
1505*4882a593Smuzhiyun vma = vma->vm_next;
1506*4882a593Smuzhiyun if (!vma || vma->vm_start >= end)
1507*4882a593Smuzhiyun break;
1508*4882a593Smuzhiyun /*
1509*4882a593Smuzhiyun * Set [nstart; nend) to intersection of desired address
1510*4882a593Smuzhiyun * range with the first VMA. Also, skip undesirable VMA types.
1511*4882a593Smuzhiyun */
1512*4882a593Smuzhiyun nend = min(end, vma->vm_end);
1513*4882a593Smuzhiyun if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1514*4882a593Smuzhiyun continue;
1515*4882a593Smuzhiyun if (nstart < vma->vm_start)
1516*4882a593Smuzhiyun nstart = vma->vm_start;
1517*4882a593Smuzhiyun /*
1518*4882a593Smuzhiyun * Now fault in a range of pages. populate_vma_page_range()
1519*4882a593Smuzhiyun * double checks the vma flags, so that it won't mlock pages
1520*4882a593Smuzhiyun * if the vma was already munlocked.
1521*4882a593Smuzhiyun */
1522*4882a593Smuzhiyun ret = populate_vma_page_range(vma, nstart, nend, &locked);
1523*4882a593Smuzhiyun if (ret < 0) {
1524*4882a593Smuzhiyun if (ignore_errors) {
1525*4882a593Smuzhiyun ret = 0;
1526*4882a593Smuzhiyun continue; /* continue at next VMA */
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun break;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun nend = nstart + ret * PAGE_SIZE;
1531*4882a593Smuzhiyun ret = 0;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun if (locked)
1534*4882a593Smuzhiyun mmap_read_unlock(mm);
1535*4882a593Smuzhiyun return ret; /* 0 or negative error code */
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun #else /* CONFIG_MMU */
__get_user_pages_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,int * locked,unsigned int foll_flags)1538*4882a593Smuzhiyun static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start,
1539*4882a593Smuzhiyun unsigned long nr_pages, struct page **pages,
1540*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked,
1541*4882a593Smuzhiyun unsigned int foll_flags)
1542*4882a593Smuzhiyun {
1543*4882a593Smuzhiyun struct vm_area_struct *vma;
1544*4882a593Smuzhiyun unsigned long vm_flags;
1545*4882a593Smuzhiyun int i;
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun /* calculate required read or write permissions.
1548*4882a593Smuzhiyun * If FOLL_FORCE is set, we only require the "MAY" flags.
1549*4882a593Smuzhiyun */
1550*4882a593Smuzhiyun vm_flags = (foll_flags & FOLL_WRITE) ?
1551*4882a593Smuzhiyun (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1552*4882a593Smuzhiyun vm_flags &= (foll_flags & FOLL_FORCE) ?
1553*4882a593Smuzhiyun (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
1556*4882a593Smuzhiyun vma = find_vma(mm, start);
1557*4882a593Smuzhiyun if (!vma)
1558*4882a593Smuzhiyun goto finish_or_fault;
1559*4882a593Smuzhiyun
1560*4882a593Smuzhiyun /* protect what we can, including chardevs */
1561*4882a593Smuzhiyun if ((vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1562*4882a593Smuzhiyun !(vm_flags & vma->vm_flags))
1563*4882a593Smuzhiyun goto finish_or_fault;
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun if (pages) {
1566*4882a593Smuzhiyun pages[i] = virt_to_page(start);
1567*4882a593Smuzhiyun if (pages[i])
1568*4882a593Smuzhiyun get_page(pages[i]);
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun if (vmas)
1571*4882a593Smuzhiyun vmas[i] = vma;
1572*4882a593Smuzhiyun start = (start + PAGE_SIZE) & PAGE_MASK;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun return i;
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun finish_or_fault:
1578*4882a593Smuzhiyun return i ? : -EFAULT;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun #endif /* !CONFIG_MMU */
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun /**
1583*4882a593Smuzhiyun * get_dump_page() - pin user page in memory while writing it to core dump
1584*4882a593Smuzhiyun * @addr: user address
1585*4882a593Smuzhiyun *
1586*4882a593Smuzhiyun * Returns struct page pointer of user page pinned for dump,
1587*4882a593Smuzhiyun * to be freed afterwards by put_page().
1588*4882a593Smuzhiyun *
1589*4882a593Smuzhiyun * Returns NULL on any kind of failure - a hole must then be inserted into
1590*4882a593Smuzhiyun * the corefile, to preserve alignment with its headers; and also returns
1591*4882a593Smuzhiyun * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
1592*4882a593Smuzhiyun * allowing a hole to be left in the corefile to save diskspace.
1593*4882a593Smuzhiyun *
1594*4882a593Smuzhiyun * Called without mmap_lock (takes and releases the mmap_lock by itself).
1595*4882a593Smuzhiyun */
1596*4882a593Smuzhiyun #ifdef CONFIG_ELF_CORE
get_dump_page(unsigned long addr)1597*4882a593Smuzhiyun struct page *get_dump_page(unsigned long addr)
1598*4882a593Smuzhiyun {
1599*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
1600*4882a593Smuzhiyun struct page *page;
1601*4882a593Smuzhiyun int locked = 1;
1602*4882a593Smuzhiyun int ret;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun if (mmap_read_lock_killable(mm))
1605*4882a593Smuzhiyun return NULL;
1606*4882a593Smuzhiyun ret = __get_user_pages_locked(mm, addr, 1, &page, NULL, &locked,
1607*4882a593Smuzhiyun FOLL_FORCE | FOLL_DUMP | FOLL_GET);
1608*4882a593Smuzhiyun if (locked)
1609*4882a593Smuzhiyun mmap_read_unlock(mm);
1610*4882a593Smuzhiyun return (ret == 1) ? page : NULL;
1611*4882a593Smuzhiyun }
1612*4882a593Smuzhiyun #endif /* CONFIG_ELF_CORE */
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun #ifdef CONFIG_CMA
check_and_migrate_cma_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,unsigned int gup_flags)1615*4882a593Smuzhiyun static long check_and_migrate_cma_pages(struct mm_struct *mm,
1616*4882a593Smuzhiyun unsigned long start,
1617*4882a593Smuzhiyun unsigned long nr_pages,
1618*4882a593Smuzhiyun struct page **pages,
1619*4882a593Smuzhiyun struct vm_area_struct **vmas,
1620*4882a593Smuzhiyun unsigned int gup_flags)
1621*4882a593Smuzhiyun {
1622*4882a593Smuzhiyun unsigned long i, isolation_error_count;
1623*4882a593Smuzhiyun bool drain_allow;
1624*4882a593Smuzhiyun LIST_HEAD(cma_page_list);
1625*4882a593Smuzhiyun long ret = nr_pages;
1626*4882a593Smuzhiyun struct page *prev_head, *head;
1627*4882a593Smuzhiyun struct migration_target_control mtc = {
1628*4882a593Smuzhiyun .nid = NUMA_NO_NODE,
1629*4882a593Smuzhiyun .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_NOWARN,
1630*4882a593Smuzhiyun };
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun check_again:
1633*4882a593Smuzhiyun prev_head = NULL;
1634*4882a593Smuzhiyun isolation_error_count = 0;
1635*4882a593Smuzhiyun drain_allow = true;
1636*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++) {
1637*4882a593Smuzhiyun head = compound_head(pages[i]);
1638*4882a593Smuzhiyun if (head == prev_head)
1639*4882a593Smuzhiyun continue;
1640*4882a593Smuzhiyun prev_head = head;
1641*4882a593Smuzhiyun /*
1642*4882a593Smuzhiyun * If we get a page from the CMA zone, since we are going to
1643*4882a593Smuzhiyun * be pinning these entries, we might as well move them out
1644*4882a593Smuzhiyun * of the CMA zone if possible.
1645*4882a593Smuzhiyun */
1646*4882a593Smuzhiyun if (is_migrate_cma_page(head)) {
1647*4882a593Smuzhiyun if (PageHuge(head)) {
1648*4882a593Smuzhiyun if (!isolate_huge_page(head, &cma_page_list))
1649*4882a593Smuzhiyun isolation_error_count++;
1650*4882a593Smuzhiyun } else {
1651*4882a593Smuzhiyun if (!PageLRU(head) && drain_allow) {
1652*4882a593Smuzhiyun lru_add_drain_all();
1653*4882a593Smuzhiyun drain_allow = false;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun if (isolate_lru_page(head)) {
1657*4882a593Smuzhiyun isolation_error_count++;
1658*4882a593Smuzhiyun continue;
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun list_add_tail(&head->lru, &cma_page_list);
1661*4882a593Smuzhiyun mod_node_page_state(page_pgdat(head),
1662*4882a593Smuzhiyun NR_ISOLATED_ANON +
1663*4882a593Smuzhiyun page_is_file_lru(head),
1664*4882a593Smuzhiyun thp_nr_pages(head));
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
1669*4882a593Smuzhiyun /*
1670*4882a593Smuzhiyun * If list is empty, and no isolation errors, means that all pages are
1671*4882a593Smuzhiyun * in the correct zone.
1672*4882a593Smuzhiyun */
1673*4882a593Smuzhiyun if (list_empty(&cma_page_list) && !isolation_error_count)
1674*4882a593Smuzhiyun return ret;
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun if (!list_empty(&cma_page_list)) {
1677*4882a593Smuzhiyun /*
1678*4882a593Smuzhiyun * drop the above get_user_pages reference.
1679*4882a593Smuzhiyun */
1680*4882a593Smuzhiyun if (gup_flags & FOLL_PIN)
1681*4882a593Smuzhiyun unpin_user_pages(pages, nr_pages);
1682*4882a593Smuzhiyun else
1683*4882a593Smuzhiyun for (i = 0; i < nr_pages; i++)
1684*4882a593Smuzhiyun put_page(pages[i]);
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun ret = migrate_pages(&cma_page_list, alloc_migration_target,
1687*4882a593Smuzhiyun NULL, (unsigned long)&mtc, MIGRATE_SYNC,
1688*4882a593Smuzhiyun MR_CONTIG_RANGE);
1689*4882a593Smuzhiyun if (ret) {
1690*4882a593Smuzhiyun if (!list_empty(&cma_page_list))
1691*4882a593Smuzhiyun putback_movable_pages(&cma_page_list);
1692*4882a593Smuzhiyun return ret > 0 ? -ENOMEM : ret;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun /* We unpinned pages before migration, pin them again */
1696*4882a593Smuzhiyun ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1697*4882a593Smuzhiyun NULL, gup_flags);
1698*4882a593Smuzhiyun if (ret <= 0)
1699*4882a593Smuzhiyun return ret;
1700*4882a593Smuzhiyun nr_pages = ret;
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun /*
1704*4882a593Smuzhiyun * check again because pages were unpinned, and we also might have
1705*4882a593Smuzhiyun * had isolation errors and need more pages to migrate.
1706*4882a593Smuzhiyun */
1707*4882a593Smuzhiyun goto check_again;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun #else
check_and_migrate_cma_pages(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,unsigned int gup_flags)1710*4882a593Smuzhiyun static long check_and_migrate_cma_pages(struct mm_struct *mm,
1711*4882a593Smuzhiyun unsigned long start,
1712*4882a593Smuzhiyun unsigned long nr_pages,
1713*4882a593Smuzhiyun struct page **pages,
1714*4882a593Smuzhiyun struct vm_area_struct **vmas,
1715*4882a593Smuzhiyun unsigned int gup_flags)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun return nr_pages;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun #endif /* CONFIG_CMA */
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /*
1722*4882a593Smuzhiyun * __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
1723*4882a593Smuzhiyun * allows us to process the FOLL_LONGTERM flag.
1724*4882a593Smuzhiyun */
__gup_longterm_locked(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,struct page ** pages,struct vm_area_struct ** vmas,unsigned int gup_flags)1725*4882a593Smuzhiyun static long __gup_longterm_locked(struct mm_struct *mm,
1726*4882a593Smuzhiyun unsigned long start,
1727*4882a593Smuzhiyun unsigned long nr_pages,
1728*4882a593Smuzhiyun struct page **pages,
1729*4882a593Smuzhiyun struct vm_area_struct **vmas,
1730*4882a593Smuzhiyun unsigned int gup_flags)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun unsigned long flags = 0;
1733*4882a593Smuzhiyun long rc;
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun if (gup_flags & FOLL_LONGTERM)
1736*4882a593Smuzhiyun flags = memalloc_nocma_save();
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL,
1739*4882a593Smuzhiyun gup_flags);
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun if (gup_flags & FOLL_LONGTERM) {
1742*4882a593Smuzhiyun if (rc > 0)
1743*4882a593Smuzhiyun rc = check_and_migrate_cma_pages(mm, start, rc, pages,
1744*4882a593Smuzhiyun vmas, gup_flags);
1745*4882a593Smuzhiyun memalloc_nocma_restore(flags);
1746*4882a593Smuzhiyun }
1747*4882a593Smuzhiyun return rc;
1748*4882a593Smuzhiyun }
1749*4882a593Smuzhiyun
is_valid_gup_flags(unsigned int gup_flags)1750*4882a593Smuzhiyun static bool is_valid_gup_flags(unsigned int gup_flags)
1751*4882a593Smuzhiyun {
1752*4882a593Smuzhiyun /*
1753*4882a593Smuzhiyun * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1754*4882a593Smuzhiyun * never directly by the caller, so enforce that with an assertion:
1755*4882a593Smuzhiyun */
1756*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1757*4882a593Smuzhiyun return false;
1758*4882a593Smuzhiyun /*
1759*4882a593Smuzhiyun * FOLL_PIN is a prerequisite to FOLL_LONGTERM. Another way of saying
1760*4882a593Smuzhiyun * that is, FOLL_LONGTERM is a specific case, more restrictive case of
1761*4882a593Smuzhiyun * FOLL_PIN.
1762*4882a593Smuzhiyun */
1763*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1764*4882a593Smuzhiyun return false;
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun return true;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun #ifdef CONFIG_MMU
__get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)1770*4882a593Smuzhiyun static long __get_user_pages_remote(struct mm_struct *mm,
1771*4882a593Smuzhiyun unsigned long start, unsigned long nr_pages,
1772*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1773*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked)
1774*4882a593Smuzhiyun {
1775*4882a593Smuzhiyun /*
1776*4882a593Smuzhiyun * Parts of FOLL_LONGTERM behavior are incompatible with
1777*4882a593Smuzhiyun * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1778*4882a593Smuzhiyun * vmas. However, this only comes up if locked is set, and there are
1779*4882a593Smuzhiyun * callers that do request FOLL_LONGTERM, but do not set locked. So,
1780*4882a593Smuzhiyun * allow what we can.
1781*4882a593Smuzhiyun */
1782*4882a593Smuzhiyun if (gup_flags & FOLL_LONGTERM) {
1783*4882a593Smuzhiyun if (WARN_ON_ONCE(locked))
1784*4882a593Smuzhiyun return -EINVAL;
1785*4882a593Smuzhiyun /*
1786*4882a593Smuzhiyun * This will check the vmas (even if our vmas arg is NULL)
1787*4882a593Smuzhiyun * and return -ENOTSUPP if DAX isn't allowed in this case:
1788*4882a593Smuzhiyun */
1789*4882a593Smuzhiyun return __gup_longterm_locked(mm, start, nr_pages, pages,
1790*4882a593Smuzhiyun vmas, gup_flags | FOLL_TOUCH |
1791*4882a593Smuzhiyun FOLL_REMOTE);
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1795*4882a593Smuzhiyun locked,
1796*4882a593Smuzhiyun gup_flags | FOLL_TOUCH | FOLL_REMOTE);
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /**
1800*4882a593Smuzhiyun * get_user_pages_remote() - pin user pages in memory
1801*4882a593Smuzhiyun * @mm: mm_struct of target mm
1802*4882a593Smuzhiyun * @start: starting user address
1803*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
1804*4882a593Smuzhiyun * @gup_flags: flags modifying lookup behaviour
1805*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
1806*4882a593Smuzhiyun * Should be at least nr_pages long. Or NULL, if caller
1807*4882a593Smuzhiyun * only intends to ensure the pages are faulted in.
1808*4882a593Smuzhiyun * @vmas: array of pointers to vmas corresponding to each page.
1809*4882a593Smuzhiyun * Or NULL if the caller does not require them.
1810*4882a593Smuzhiyun * @locked: pointer to lock flag indicating whether lock is held and
1811*4882a593Smuzhiyun * subsequently whether VM_FAULT_RETRY functionality can be
1812*4882a593Smuzhiyun * utilised. Lock must initially be held.
1813*4882a593Smuzhiyun *
1814*4882a593Smuzhiyun * Returns either number of pages pinned (which may be less than the
1815*4882a593Smuzhiyun * number requested), or an error. Details about the return value:
1816*4882a593Smuzhiyun *
1817*4882a593Smuzhiyun * -- If nr_pages is 0, returns 0.
1818*4882a593Smuzhiyun * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1819*4882a593Smuzhiyun * -- If nr_pages is >0, and some pages were pinned, returns the number of
1820*4882a593Smuzhiyun * pages pinned. Again, this may be less than nr_pages.
1821*4882a593Smuzhiyun *
1822*4882a593Smuzhiyun * The caller is responsible for releasing returned @pages, via put_page().
1823*4882a593Smuzhiyun *
1824*4882a593Smuzhiyun * @vmas are valid only as long as mmap_lock is held.
1825*4882a593Smuzhiyun *
1826*4882a593Smuzhiyun * Must be called with mmap_lock held for read or write.
1827*4882a593Smuzhiyun *
1828*4882a593Smuzhiyun * get_user_pages_remote walks a process's page tables and takes a reference
1829*4882a593Smuzhiyun * to each struct page that each user address corresponds to at a given
1830*4882a593Smuzhiyun * instant. That is, it takes the page that would be accessed if a user
1831*4882a593Smuzhiyun * thread accesses the given user virtual address at that instant.
1832*4882a593Smuzhiyun *
1833*4882a593Smuzhiyun * This does not guarantee that the page exists in the user mappings when
1834*4882a593Smuzhiyun * get_user_pages_remote returns, and there may even be a completely different
1835*4882a593Smuzhiyun * page there in some cases (eg. if mmapped pagecache has been invalidated
1836*4882a593Smuzhiyun * and subsequently re faulted). However it does guarantee that the page
1837*4882a593Smuzhiyun * won't be freed completely. And mostly callers simply care that the page
1838*4882a593Smuzhiyun * contains data that was valid *at some point in time*. Typically, an IO
1839*4882a593Smuzhiyun * or similar operation cannot guarantee anything stronger anyway because
1840*4882a593Smuzhiyun * locks can't be held over the syscall boundary.
1841*4882a593Smuzhiyun *
1842*4882a593Smuzhiyun * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
1843*4882a593Smuzhiyun * is written to, set_page_dirty (or set_page_dirty_lock, as appropriate) must
1844*4882a593Smuzhiyun * be called after the page is finished with, and before put_page is called.
1845*4882a593Smuzhiyun *
1846*4882a593Smuzhiyun * get_user_pages_remote is typically used for fewer-copy IO operations,
1847*4882a593Smuzhiyun * to get a handle on the memory by some means other than accesses
1848*4882a593Smuzhiyun * via the user virtual addresses. The pages may be submitted for
1849*4882a593Smuzhiyun * DMA to devices or accessed via their kernel linear mapping (via the
1850*4882a593Smuzhiyun * kmap APIs). Care should be taken to use the correct cache flushing APIs.
1851*4882a593Smuzhiyun *
1852*4882a593Smuzhiyun * See also get_user_pages_fast, for performance critical applications.
1853*4882a593Smuzhiyun *
1854*4882a593Smuzhiyun * get_user_pages_remote should be phased out in favor of
1855*4882a593Smuzhiyun * get_user_pages_locked|unlocked or get_user_pages_fast. Nothing
1856*4882a593Smuzhiyun * should use get_user_pages_remote because it cannot pass
1857*4882a593Smuzhiyun * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
1858*4882a593Smuzhiyun */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)1859*4882a593Smuzhiyun long get_user_pages_remote(struct mm_struct *mm,
1860*4882a593Smuzhiyun unsigned long start, unsigned long nr_pages,
1861*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1862*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked)
1863*4882a593Smuzhiyun {
1864*4882a593Smuzhiyun if (!is_valid_gup_flags(gup_flags))
1865*4882a593Smuzhiyun return -EINVAL;
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
1868*4882a593Smuzhiyun pages, vmas, locked);
1869*4882a593Smuzhiyun }
1870*4882a593Smuzhiyun EXPORT_SYMBOL(get_user_pages_remote);
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun #else /* CONFIG_MMU */
get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)1873*4882a593Smuzhiyun long get_user_pages_remote(struct mm_struct *mm,
1874*4882a593Smuzhiyun unsigned long start, unsigned long nr_pages,
1875*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1876*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked)
1877*4882a593Smuzhiyun {
1878*4882a593Smuzhiyun return 0;
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun
__get_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)1881*4882a593Smuzhiyun static long __get_user_pages_remote(struct mm_struct *mm,
1882*4882a593Smuzhiyun unsigned long start, unsigned long nr_pages,
1883*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1884*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked)
1885*4882a593Smuzhiyun {
1886*4882a593Smuzhiyun return 0;
1887*4882a593Smuzhiyun }
1888*4882a593Smuzhiyun #endif /* !CONFIG_MMU */
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun /**
1891*4882a593Smuzhiyun * get_user_pages() - pin user pages in memory
1892*4882a593Smuzhiyun * @start: starting user address
1893*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
1894*4882a593Smuzhiyun * @gup_flags: flags modifying lookup behaviour
1895*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
1896*4882a593Smuzhiyun * Should be at least nr_pages long. Or NULL, if caller
1897*4882a593Smuzhiyun * only intends to ensure the pages are faulted in.
1898*4882a593Smuzhiyun * @vmas: array of pointers to vmas corresponding to each page.
1899*4882a593Smuzhiyun * Or NULL if the caller does not require them.
1900*4882a593Smuzhiyun *
1901*4882a593Smuzhiyun * This is the same as get_user_pages_remote(), just with a less-flexible
1902*4882a593Smuzhiyun * calling convention where we assume that the mm being operated on belongs to
1903*4882a593Smuzhiyun * the current task, and doesn't allow passing of a locked parameter. We also
1904*4882a593Smuzhiyun * obviously don't pass FOLL_REMOTE in here.
1905*4882a593Smuzhiyun */
get_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)1906*4882a593Smuzhiyun long get_user_pages(unsigned long start, unsigned long nr_pages,
1907*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1908*4882a593Smuzhiyun struct vm_area_struct **vmas)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun if (!is_valid_gup_flags(gup_flags))
1911*4882a593Smuzhiyun return -EINVAL;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun return __gup_longterm_locked(current->mm, start, nr_pages,
1914*4882a593Smuzhiyun pages, vmas, gup_flags | FOLL_TOUCH);
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun EXPORT_SYMBOL(get_user_pages);
1917*4882a593Smuzhiyun
1918*4882a593Smuzhiyun /**
1919*4882a593Smuzhiyun * get_user_pages_locked() is suitable to replace the form:
1920*4882a593Smuzhiyun *
1921*4882a593Smuzhiyun * mmap_read_lock(mm);
1922*4882a593Smuzhiyun * do_something()
1923*4882a593Smuzhiyun * get_user_pages(mm, ..., pages, NULL);
1924*4882a593Smuzhiyun * mmap_read_unlock(mm);
1925*4882a593Smuzhiyun *
1926*4882a593Smuzhiyun * to:
1927*4882a593Smuzhiyun *
1928*4882a593Smuzhiyun * int locked = 1;
1929*4882a593Smuzhiyun * mmap_read_lock(mm);
1930*4882a593Smuzhiyun * do_something()
1931*4882a593Smuzhiyun * get_user_pages_locked(mm, ..., pages, &locked);
1932*4882a593Smuzhiyun * if (locked)
1933*4882a593Smuzhiyun * mmap_read_unlock(mm);
1934*4882a593Smuzhiyun *
1935*4882a593Smuzhiyun * @start: starting user address
1936*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
1937*4882a593Smuzhiyun * @gup_flags: flags modifying lookup behaviour
1938*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
1939*4882a593Smuzhiyun * Should be at least nr_pages long. Or NULL, if caller
1940*4882a593Smuzhiyun * only intends to ensure the pages are faulted in.
1941*4882a593Smuzhiyun * @locked: pointer to lock flag indicating whether lock is held and
1942*4882a593Smuzhiyun * subsequently whether VM_FAULT_RETRY functionality can be
1943*4882a593Smuzhiyun * utilised. Lock must initially be held.
1944*4882a593Smuzhiyun *
1945*4882a593Smuzhiyun * We can leverage the VM_FAULT_RETRY functionality in the page fault
1946*4882a593Smuzhiyun * paths better by using either get_user_pages_locked() or
1947*4882a593Smuzhiyun * get_user_pages_unlocked().
1948*4882a593Smuzhiyun *
1949*4882a593Smuzhiyun */
get_user_pages_locked(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)1950*4882a593Smuzhiyun long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1951*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
1952*4882a593Smuzhiyun int *locked)
1953*4882a593Smuzhiyun {
1954*4882a593Smuzhiyun /*
1955*4882a593Smuzhiyun * FIXME: Current FOLL_LONGTERM behavior is incompatible with
1956*4882a593Smuzhiyun * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
1957*4882a593Smuzhiyun * vmas. As there are no users of this flag in this call we simply
1958*4882a593Smuzhiyun * disallow this option for now.
1959*4882a593Smuzhiyun */
1960*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
1961*4882a593Smuzhiyun return -EINVAL;
1962*4882a593Smuzhiyun /*
1963*4882a593Smuzhiyun * FOLL_PIN must only be set internally by the pin_user_pages*() APIs,
1964*4882a593Smuzhiyun * never directly by the caller, so enforce that:
1965*4882a593Smuzhiyun */
1966*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_PIN))
1967*4882a593Smuzhiyun return -EINVAL;
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun return __get_user_pages_locked(current->mm, start, nr_pages,
1970*4882a593Smuzhiyun pages, NULL, locked,
1971*4882a593Smuzhiyun gup_flags | FOLL_TOUCH);
1972*4882a593Smuzhiyun }
1973*4882a593Smuzhiyun EXPORT_SYMBOL(get_user_pages_locked);
1974*4882a593Smuzhiyun
1975*4882a593Smuzhiyun /*
1976*4882a593Smuzhiyun * get_user_pages_unlocked() is suitable to replace the form:
1977*4882a593Smuzhiyun *
1978*4882a593Smuzhiyun * mmap_read_lock(mm);
1979*4882a593Smuzhiyun * get_user_pages(mm, ..., pages, NULL);
1980*4882a593Smuzhiyun * mmap_read_unlock(mm);
1981*4882a593Smuzhiyun *
1982*4882a593Smuzhiyun * with:
1983*4882a593Smuzhiyun *
1984*4882a593Smuzhiyun * get_user_pages_unlocked(mm, ..., pages);
1985*4882a593Smuzhiyun *
1986*4882a593Smuzhiyun * It is functionally equivalent to get_user_pages_fast so
1987*4882a593Smuzhiyun * get_user_pages_fast should be used instead if specific gup_flags
1988*4882a593Smuzhiyun * (e.g. FOLL_FORCE) are not required.
1989*4882a593Smuzhiyun */
get_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)1990*4882a593Smuzhiyun long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1991*4882a593Smuzhiyun struct page **pages, unsigned int gup_flags)
1992*4882a593Smuzhiyun {
1993*4882a593Smuzhiyun struct mm_struct *mm = current->mm;
1994*4882a593Smuzhiyun int locked = 1;
1995*4882a593Smuzhiyun long ret;
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun /*
1998*4882a593Smuzhiyun * FIXME: Current FOLL_LONGTERM behavior is incompatible with
1999*4882a593Smuzhiyun * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
2000*4882a593Smuzhiyun * vmas. As there are no users of this flag in this call we simply
2001*4882a593Smuzhiyun * disallow this option for now.
2002*4882a593Smuzhiyun */
2003*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
2004*4882a593Smuzhiyun return -EINVAL;
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun mmap_read_lock(mm);
2007*4882a593Smuzhiyun ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2008*4882a593Smuzhiyun &locked, gup_flags | FOLL_TOUCH);
2009*4882a593Smuzhiyun if (locked)
2010*4882a593Smuzhiyun mmap_read_unlock(mm);
2011*4882a593Smuzhiyun return ret;
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun EXPORT_SYMBOL(get_user_pages_unlocked);
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun /*
2016*4882a593Smuzhiyun * Fast GUP
2017*4882a593Smuzhiyun *
2018*4882a593Smuzhiyun * get_user_pages_fast attempts to pin user pages by walking the page
2019*4882a593Smuzhiyun * tables directly and avoids taking locks. Thus the walker needs to be
2020*4882a593Smuzhiyun * protected from page table pages being freed from under it, and should
2021*4882a593Smuzhiyun * block any THP splits.
2022*4882a593Smuzhiyun *
2023*4882a593Smuzhiyun * One way to achieve this is to have the walker disable interrupts, and
2024*4882a593Smuzhiyun * rely on IPIs from the TLB flushing code blocking before the page table
2025*4882a593Smuzhiyun * pages are freed. This is unsuitable for architectures that do not need
2026*4882a593Smuzhiyun * to broadcast an IPI when invalidating TLBs.
2027*4882a593Smuzhiyun *
2028*4882a593Smuzhiyun * Another way to achieve this is to batch up page table containing pages
2029*4882a593Smuzhiyun * belonging to more than one mm_user, then rcu_sched a callback to free those
2030*4882a593Smuzhiyun * pages. Disabling interrupts will allow the fast_gup walker to both block
2031*4882a593Smuzhiyun * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
2032*4882a593Smuzhiyun * (which is a relatively rare event). The code below adopts this strategy.
2033*4882a593Smuzhiyun *
2034*4882a593Smuzhiyun * Before activating this code, please be aware that the following assumptions
2035*4882a593Smuzhiyun * are currently made:
2036*4882a593Smuzhiyun *
2037*4882a593Smuzhiyun * *) Either MMU_GATHER_RCU_TABLE_FREE is enabled, and tlb_remove_table() is used to
2038*4882a593Smuzhiyun * free pages containing page tables or TLB flushing requires IPI broadcast.
2039*4882a593Smuzhiyun *
2040*4882a593Smuzhiyun * *) ptes can be read atomically by the architecture.
2041*4882a593Smuzhiyun *
2042*4882a593Smuzhiyun * *) access_ok is sufficient to validate userspace address ranges.
2043*4882a593Smuzhiyun *
2044*4882a593Smuzhiyun * The last two assumptions can be relaxed by the addition of helper functions.
2045*4882a593Smuzhiyun *
2046*4882a593Smuzhiyun * This code is based heavily on the PowerPC implementation by Nick Piggin.
2047*4882a593Smuzhiyun */
2048*4882a593Smuzhiyun #ifdef CONFIG_HAVE_FAST_GUP
2049*4882a593Smuzhiyun #ifdef CONFIG_GUP_GET_PTE_LOW_HIGH
2050*4882a593Smuzhiyun
2051*4882a593Smuzhiyun /*
2052*4882a593Smuzhiyun * WARNING: only to be used in the get_user_pages_fast() implementation.
2053*4882a593Smuzhiyun *
2054*4882a593Smuzhiyun * With get_user_pages_fast(), we walk down the pagetables without taking any
2055*4882a593Smuzhiyun * locks. For this we would like to load the pointers atomically, but sometimes
2056*4882a593Smuzhiyun * that is not possible (e.g. without expensive cmpxchg8b on x86_32 PAE). What
2057*4882a593Smuzhiyun * we do have is the guarantee that a PTE will only either go from not present
2058*4882a593Smuzhiyun * to present, or present to not present or both -- it will not switch to a
2059*4882a593Smuzhiyun * completely different present page without a TLB flush in between; something
2060*4882a593Smuzhiyun * that we are blocking by holding interrupts off.
2061*4882a593Smuzhiyun *
2062*4882a593Smuzhiyun * Setting ptes from not present to present goes:
2063*4882a593Smuzhiyun *
2064*4882a593Smuzhiyun * ptep->pte_high = h;
2065*4882a593Smuzhiyun * smp_wmb();
2066*4882a593Smuzhiyun * ptep->pte_low = l;
2067*4882a593Smuzhiyun *
2068*4882a593Smuzhiyun * And present to not present goes:
2069*4882a593Smuzhiyun *
2070*4882a593Smuzhiyun * ptep->pte_low = 0;
2071*4882a593Smuzhiyun * smp_wmb();
2072*4882a593Smuzhiyun * ptep->pte_high = 0;
2073*4882a593Smuzhiyun *
2074*4882a593Smuzhiyun * We must ensure here that the load of pte_low sees 'l' IFF pte_high sees 'h'.
2075*4882a593Smuzhiyun * We load pte_high *after* loading pte_low, which ensures we don't see an older
2076*4882a593Smuzhiyun * value of pte_high. *Then* we recheck pte_low, which ensures that we haven't
2077*4882a593Smuzhiyun * picked up a changed pte high. We might have gotten rubbish values from
2078*4882a593Smuzhiyun * pte_low and pte_high, but we are guaranteed that pte_low will not have the
2079*4882a593Smuzhiyun * present bit set *unless* it is 'l'. Because get_user_pages_fast() only
2080*4882a593Smuzhiyun * operates on present ptes we're safe.
2081*4882a593Smuzhiyun */
gup_get_pte(pte_t * ptep)2082*4882a593Smuzhiyun static inline pte_t gup_get_pte(pte_t *ptep)
2083*4882a593Smuzhiyun {
2084*4882a593Smuzhiyun pte_t pte;
2085*4882a593Smuzhiyun
2086*4882a593Smuzhiyun do {
2087*4882a593Smuzhiyun pte.pte_low = ptep->pte_low;
2088*4882a593Smuzhiyun smp_rmb();
2089*4882a593Smuzhiyun pte.pte_high = ptep->pte_high;
2090*4882a593Smuzhiyun smp_rmb();
2091*4882a593Smuzhiyun } while (unlikely(pte.pte_low != ptep->pte_low));
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun return pte;
2094*4882a593Smuzhiyun }
2095*4882a593Smuzhiyun #else /* CONFIG_GUP_GET_PTE_LOW_HIGH */
2096*4882a593Smuzhiyun /*
2097*4882a593Smuzhiyun * We require that the PTE can be read atomically.
2098*4882a593Smuzhiyun */
gup_get_pte(pte_t * ptep)2099*4882a593Smuzhiyun static inline pte_t gup_get_pte(pte_t *ptep)
2100*4882a593Smuzhiyun {
2101*4882a593Smuzhiyun return ptep_get(ptep);
2102*4882a593Smuzhiyun }
2103*4882a593Smuzhiyun #endif /* CONFIG_GUP_GET_PTE_LOW_HIGH */
2104*4882a593Smuzhiyun
undo_dev_pagemap(int * nr,int nr_start,unsigned int flags,struct page ** pages)2105*4882a593Smuzhiyun static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
2106*4882a593Smuzhiyun unsigned int flags,
2107*4882a593Smuzhiyun struct page **pages)
2108*4882a593Smuzhiyun {
2109*4882a593Smuzhiyun while ((*nr) - nr_start) {
2110*4882a593Smuzhiyun struct page *page = pages[--(*nr)];
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun ClearPageReferenced(page);
2113*4882a593Smuzhiyun if (flags & FOLL_PIN)
2114*4882a593Smuzhiyun unpin_user_page(page);
2115*4882a593Smuzhiyun else
2116*4882a593Smuzhiyun put_page(page);
2117*4882a593Smuzhiyun }
2118*4882a593Smuzhiyun }
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
2121*4882a593Smuzhiyun /*
2122*4882a593Smuzhiyun * Fast-gup relies on pte change detection to avoid concurrent pgtable
2123*4882a593Smuzhiyun * operations.
2124*4882a593Smuzhiyun *
2125*4882a593Smuzhiyun * To pin the page, fast-gup needs to do below in order:
2126*4882a593Smuzhiyun * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2127*4882a593Smuzhiyun *
2128*4882a593Smuzhiyun * For the rest of pgtable operations where pgtable updates can be racy
2129*4882a593Smuzhiyun * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2130*4882a593Smuzhiyun * is pinned.
2131*4882a593Smuzhiyun *
2132*4882a593Smuzhiyun * Above will work for all pte-level operations, including THP split.
2133*4882a593Smuzhiyun *
2134*4882a593Smuzhiyun * For THP collapse, it's a bit more complicated because fast-gup may be
2135*4882a593Smuzhiyun * walking a pgtable page that is being freed (pte is still valid but pmd
2136*4882a593Smuzhiyun * can be cleared already). To avoid race in such condition, we need to
2137*4882a593Smuzhiyun * also check pmd here to make sure pmd doesn't change (corresponds to
2138*4882a593Smuzhiyun * pmdp_collapse_flush() in the THP collapse code path).
2139*4882a593Smuzhiyun */
gup_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2140*4882a593Smuzhiyun static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2141*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2142*4882a593Smuzhiyun struct page **pages, int *nr)
2143*4882a593Smuzhiyun {
2144*4882a593Smuzhiyun struct dev_pagemap *pgmap = NULL;
2145*4882a593Smuzhiyun int nr_start = *nr, ret = 0;
2146*4882a593Smuzhiyun pte_t *ptep, *ptem;
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun ptem = ptep = pte_offset_map(&pmd, addr);
2149*4882a593Smuzhiyun do {
2150*4882a593Smuzhiyun pte_t pte = gup_get_pte(ptep);
2151*4882a593Smuzhiyun struct page *head, *page;
2152*4882a593Smuzhiyun
2153*4882a593Smuzhiyun /*
2154*4882a593Smuzhiyun * Similar to the PMD case below, NUMA hinting must take slow
2155*4882a593Smuzhiyun * path using the pte_protnone check.
2156*4882a593Smuzhiyun */
2157*4882a593Smuzhiyun if (pte_protnone(pte))
2158*4882a593Smuzhiyun goto pte_unmap;
2159*4882a593Smuzhiyun
2160*4882a593Smuzhiyun if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2161*4882a593Smuzhiyun goto pte_unmap;
2162*4882a593Smuzhiyun
2163*4882a593Smuzhiyun if (pte_devmap(pte)) {
2164*4882a593Smuzhiyun if (unlikely(flags & FOLL_LONGTERM))
2165*4882a593Smuzhiyun goto pte_unmap;
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
2168*4882a593Smuzhiyun if (unlikely(!pgmap)) {
2169*4882a593Smuzhiyun undo_dev_pagemap(nr, nr_start, flags, pages);
2170*4882a593Smuzhiyun goto pte_unmap;
2171*4882a593Smuzhiyun }
2172*4882a593Smuzhiyun } else if (pte_special(pte))
2173*4882a593Smuzhiyun goto pte_unmap;
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2176*4882a593Smuzhiyun page = pte_page(pte);
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun head = try_grab_compound_head(page, 1, flags);
2179*4882a593Smuzhiyun if (!head)
2180*4882a593Smuzhiyun goto pte_unmap;
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) ||
2183*4882a593Smuzhiyun unlikely(pte_val(pte) != pte_val(*ptep))) {
2184*4882a593Smuzhiyun put_compound_head(head, 1, flags);
2185*4882a593Smuzhiyun goto pte_unmap;
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun VM_BUG_ON_PAGE(compound_head(page) != head, page);
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun /*
2191*4882a593Smuzhiyun * We need to make the page accessible if and only if we are
2192*4882a593Smuzhiyun * going to access its content (the FOLL_PIN case). Please
2193*4882a593Smuzhiyun * see Documentation/core-api/pin_user_pages.rst for
2194*4882a593Smuzhiyun * details.
2195*4882a593Smuzhiyun */
2196*4882a593Smuzhiyun if (flags & FOLL_PIN) {
2197*4882a593Smuzhiyun ret = arch_make_page_accessible(page);
2198*4882a593Smuzhiyun if (ret) {
2199*4882a593Smuzhiyun unpin_user_page(page);
2200*4882a593Smuzhiyun goto pte_unmap;
2201*4882a593Smuzhiyun }
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun SetPageReferenced(page);
2204*4882a593Smuzhiyun pages[*nr] = page;
2205*4882a593Smuzhiyun (*nr)++;
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun } while (ptep++, addr += PAGE_SIZE, addr != end);
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun ret = 1;
2210*4882a593Smuzhiyun
2211*4882a593Smuzhiyun pte_unmap:
2212*4882a593Smuzhiyun if (pgmap)
2213*4882a593Smuzhiyun put_dev_pagemap(pgmap);
2214*4882a593Smuzhiyun pte_unmap(ptem);
2215*4882a593Smuzhiyun return ret;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun #else
2218*4882a593Smuzhiyun
2219*4882a593Smuzhiyun /*
2220*4882a593Smuzhiyun * If we can't determine whether or not a pte is special, then fail immediately
2221*4882a593Smuzhiyun * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
2222*4882a593Smuzhiyun * to be special.
2223*4882a593Smuzhiyun *
2224*4882a593Smuzhiyun * For a futex to be placed on a THP tail page, get_futex_key requires a
2225*4882a593Smuzhiyun * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2226*4882a593Smuzhiyun * useful to have gup_huge_pmd even if we can't operate on ptes.
2227*4882a593Smuzhiyun */
gup_pte_range(pmd_t pmd,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2228*4882a593Smuzhiyun static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
2229*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2230*4882a593Smuzhiyun struct page **pages, int *nr)
2231*4882a593Smuzhiyun {
2232*4882a593Smuzhiyun return 0;
2233*4882a593Smuzhiyun }
2234*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
2235*4882a593Smuzhiyun
2236*4882a593Smuzhiyun #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
__gup_device_huge(unsigned long pfn,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2237*4882a593Smuzhiyun static int __gup_device_huge(unsigned long pfn, unsigned long addr,
2238*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2239*4882a593Smuzhiyun struct page **pages, int *nr)
2240*4882a593Smuzhiyun {
2241*4882a593Smuzhiyun int nr_start = *nr;
2242*4882a593Smuzhiyun struct dev_pagemap *pgmap = NULL;
2243*4882a593Smuzhiyun
2244*4882a593Smuzhiyun do {
2245*4882a593Smuzhiyun struct page *page = pfn_to_page(pfn);
2246*4882a593Smuzhiyun
2247*4882a593Smuzhiyun pgmap = get_dev_pagemap(pfn, pgmap);
2248*4882a593Smuzhiyun if (unlikely(!pgmap)) {
2249*4882a593Smuzhiyun undo_dev_pagemap(nr, nr_start, flags, pages);
2250*4882a593Smuzhiyun return 0;
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun SetPageReferenced(page);
2253*4882a593Smuzhiyun pages[*nr] = page;
2254*4882a593Smuzhiyun if (unlikely(!try_grab_page(page, flags))) {
2255*4882a593Smuzhiyun undo_dev_pagemap(nr, nr_start, flags, pages);
2256*4882a593Smuzhiyun return 0;
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun (*nr)++;
2259*4882a593Smuzhiyun pfn++;
2260*4882a593Smuzhiyun } while (addr += PAGE_SIZE, addr != end);
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun if (pgmap)
2263*4882a593Smuzhiyun put_dev_pagemap(pgmap);
2264*4882a593Smuzhiyun return 1;
2265*4882a593Smuzhiyun }
2266*4882a593Smuzhiyun
__gup_device_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2267*4882a593Smuzhiyun static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2268*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2269*4882a593Smuzhiyun struct page **pages, int *nr)
2270*4882a593Smuzhiyun {
2271*4882a593Smuzhiyun unsigned long fault_pfn;
2272*4882a593Smuzhiyun int nr_start = *nr;
2273*4882a593Smuzhiyun
2274*4882a593Smuzhiyun fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2275*4882a593Smuzhiyun if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2276*4882a593Smuzhiyun return 0;
2277*4882a593Smuzhiyun
2278*4882a593Smuzhiyun if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2279*4882a593Smuzhiyun undo_dev_pagemap(nr, nr_start, flags, pages);
2280*4882a593Smuzhiyun return 0;
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun return 1;
2283*4882a593Smuzhiyun }
2284*4882a593Smuzhiyun
__gup_device_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2285*4882a593Smuzhiyun static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2286*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2287*4882a593Smuzhiyun struct page **pages, int *nr)
2288*4882a593Smuzhiyun {
2289*4882a593Smuzhiyun unsigned long fault_pfn;
2290*4882a593Smuzhiyun int nr_start = *nr;
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2293*4882a593Smuzhiyun if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2294*4882a593Smuzhiyun return 0;
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2297*4882a593Smuzhiyun undo_dev_pagemap(nr, nr_start, flags, pages);
2298*4882a593Smuzhiyun return 0;
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun return 1;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun #else
__gup_device_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2303*4882a593Smuzhiyun static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2304*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2305*4882a593Smuzhiyun struct page **pages, int *nr)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun BUILD_BUG();
2308*4882a593Smuzhiyun return 0;
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun
__gup_device_huge_pud(pud_t pud,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2311*4882a593Smuzhiyun static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
2312*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2313*4882a593Smuzhiyun struct page **pages, int *nr)
2314*4882a593Smuzhiyun {
2315*4882a593Smuzhiyun BUILD_BUG();
2316*4882a593Smuzhiyun return 0;
2317*4882a593Smuzhiyun }
2318*4882a593Smuzhiyun #endif
2319*4882a593Smuzhiyun
record_subpages(struct page * page,unsigned long addr,unsigned long end,struct page ** pages)2320*4882a593Smuzhiyun static int record_subpages(struct page *page, unsigned long addr,
2321*4882a593Smuzhiyun unsigned long end, struct page **pages)
2322*4882a593Smuzhiyun {
2323*4882a593Smuzhiyun int nr;
2324*4882a593Smuzhiyun
2325*4882a593Smuzhiyun for (nr = 0; addr != end; addr += PAGE_SIZE)
2326*4882a593Smuzhiyun pages[nr++] = page++;
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun return nr;
2329*4882a593Smuzhiyun }
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_HUGEPD
hugepte_addr_end(unsigned long addr,unsigned long end,unsigned long sz)2332*4882a593Smuzhiyun static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2333*4882a593Smuzhiyun unsigned long sz)
2334*4882a593Smuzhiyun {
2335*4882a593Smuzhiyun unsigned long __boundary = (addr + sz) & ~(sz-1);
2336*4882a593Smuzhiyun return (__boundary - 1 < end - 1) ? __boundary : end;
2337*4882a593Smuzhiyun }
2338*4882a593Smuzhiyun
gup_hugepte(pte_t * ptep,unsigned long sz,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2339*4882a593Smuzhiyun static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
2340*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2341*4882a593Smuzhiyun struct page **pages, int *nr)
2342*4882a593Smuzhiyun {
2343*4882a593Smuzhiyun unsigned long pte_end;
2344*4882a593Smuzhiyun struct page *head, *page;
2345*4882a593Smuzhiyun pte_t pte;
2346*4882a593Smuzhiyun int refs;
2347*4882a593Smuzhiyun
2348*4882a593Smuzhiyun pte_end = (addr + sz) & ~(sz-1);
2349*4882a593Smuzhiyun if (pte_end < end)
2350*4882a593Smuzhiyun end = pte_end;
2351*4882a593Smuzhiyun
2352*4882a593Smuzhiyun pte = huge_ptep_get(ptep);
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun if (!pte_access_permitted(pte, flags & FOLL_WRITE))
2355*4882a593Smuzhiyun return 0;
2356*4882a593Smuzhiyun
2357*4882a593Smuzhiyun /* hugepages are never "special" */
2358*4882a593Smuzhiyun VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun head = pte_page(pte);
2361*4882a593Smuzhiyun page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
2362*4882a593Smuzhiyun refs = record_subpages(page, addr, end, pages + *nr);
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun head = try_grab_compound_head(head, refs, flags);
2365*4882a593Smuzhiyun if (!head)
2366*4882a593Smuzhiyun return 0;
2367*4882a593Smuzhiyun
2368*4882a593Smuzhiyun if (unlikely(pte_val(pte) != pte_val(*ptep))) {
2369*4882a593Smuzhiyun put_compound_head(head, refs, flags);
2370*4882a593Smuzhiyun return 0;
2371*4882a593Smuzhiyun }
2372*4882a593Smuzhiyun
2373*4882a593Smuzhiyun *nr += refs;
2374*4882a593Smuzhiyun SetPageReferenced(head);
2375*4882a593Smuzhiyun return 1;
2376*4882a593Smuzhiyun }
2377*4882a593Smuzhiyun
gup_huge_pd(hugepd_t hugepd,unsigned long addr,unsigned int pdshift,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2378*4882a593Smuzhiyun static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2379*4882a593Smuzhiyun unsigned int pdshift, unsigned long end, unsigned int flags,
2380*4882a593Smuzhiyun struct page **pages, int *nr)
2381*4882a593Smuzhiyun {
2382*4882a593Smuzhiyun pte_t *ptep;
2383*4882a593Smuzhiyun unsigned long sz = 1UL << hugepd_shift(hugepd);
2384*4882a593Smuzhiyun unsigned long next;
2385*4882a593Smuzhiyun
2386*4882a593Smuzhiyun ptep = hugepte_offset(hugepd, addr, pdshift);
2387*4882a593Smuzhiyun do {
2388*4882a593Smuzhiyun next = hugepte_addr_end(addr, end, sz);
2389*4882a593Smuzhiyun if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2390*4882a593Smuzhiyun return 0;
2391*4882a593Smuzhiyun } while (ptep++, addr = next, addr != end);
2392*4882a593Smuzhiyun
2393*4882a593Smuzhiyun return 1;
2394*4882a593Smuzhiyun }
2395*4882a593Smuzhiyun #else
gup_huge_pd(hugepd_t hugepd,unsigned long addr,unsigned int pdshift,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2396*4882a593Smuzhiyun static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
2397*4882a593Smuzhiyun unsigned int pdshift, unsigned long end, unsigned int flags,
2398*4882a593Smuzhiyun struct page **pages, int *nr)
2399*4882a593Smuzhiyun {
2400*4882a593Smuzhiyun return 0;
2401*4882a593Smuzhiyun }
2402*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_HUGEPD */
2403*4882a593Smuzhiyun
gup_huge_pmd(pmd_t orig,pmd_t * pmdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2404*4882a593Smuzhiyun static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
2405*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2406*4882a593Smuzhiyun struct page **pages, int *nr)
2407*4882a593Smuzhiyun {
2408*4882a593Smuzhiyun struct page *head, *page;
2409*4882a593Smuzhiyun int refs;
2410*4882a593Smuzhiyun
2411*4882a593Smuzhiyun if (!pmd_access_permitted(orig, flags & FOLL_WRITE))
2412*4882a593Smuzhiyun return 0;
2413*4882a593Smuzhiyun
2414*4882a593Smuzhiyun if (pmd_devmap(orig)) {
2415*4882a593Smuzhiyun if (unlikely(flags & FOLL_LONGTERM))
2416*4882a593Smuzhiyun return 0;
2417*4882a593Smuzhiyun return __gup_device_huge_pmd(orig, pmdp, addr, end, flags,
2418*4882a593Smuzhiyun pages, nr);
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun
2421*4882a593Smuzhiyun page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
2422*4882a593Smuzhiyun refs = record_subpages(page, addr, end, pages + *nr);
2423*4882a593Smuzhiyun
2424*4882a593Smuzhiyun head = try_grab_compound_head(pmd_page(orig), refs, flags);
2425*4882a593Smuzhiyun if (!head)
2426*4882a593Smuzhiyun return 0;
2427*4882a593Smuzhiyun
2428*4882a593Smuzhiyun if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
2429*4882a593Smuzhiyun put_compound_head(head, refs, flags);
2430*4882a593Smuzhiyun return 0;
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun *nr += refs;
2434*4882a593Smuzhiyun SetPageReferenced(head);
2435*4882a593Smuzhiyun return 1;
2436*4882a593Smuzhiyun }
2437*4882a593Smuzhiyun
gup_huge_pud(pud_t orig,pud_t * pudp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2438*4882a593Smuzhiyun static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
2439*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2440*4882a593Smuzhiyun struct page **pages, int *nr)
2441*4882a593Smuzhiyun {
2442*4882a593Smuzhiyun struct page *head, *page;
2443*4882a593Smuzhiyun int refs;
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun if (!pud_access_permitted(orig, flags & FOLL_WRITE))
2446*4882a593Smuzhiyun return 0;
2447*4882a593Smuzhiyun
2448*4882a593Smuzhiyun if (pud_devmap(orig)) {
2449*4882a593Smuzhiyun if (unlikely(flags & FOLL_LONGTERM))
2450*4882a593Smuzhiyun return 0;
2451*4882a593Smuzhiyun return __gup_device_huge_pud(orig, pudp, addr, end, flags,
2452*4882a593Smuzhiyun pages, nr);
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun
2455*4882a593Smuzhiyun page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
2456*4882a593Smuzhiyun refs = record_subpages(page, addr, end, pages + *nr);
2457*4882a593Smuzhiyun
2458*4882a593Smuzhiyun head = try_grab_compound_head(pud_page(orig), refs, flags);
2459*4882a593Smuzhiyun if (!head)
2460*4882a593Smuzhiyun return 0;
2461*4882a593Smuzhiyun
2462*4882a593Smuzhiyun if (unlikely(pud_val(orig) != pud_val(*pudp))) {
2463*4882a593Smuzhiyun put_compound_head(head, refs, flags);
2464*4882a593Smuzhiyun return 0;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
2467*4882a593Smuzhiyun *nr += refs;
2468*4882a593Smuzhiyun SetPageReferenced(head);
2469*4882a593Smuzhiyun return 1;
2470*4882a593Smuzhiyun }
2471*4882a593Smuzhiyun
gup_huge_pgd(pgd_t orig,pgd_t * pgdp,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2472*4882a593Smuzhiyun static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
2473*4882a593Smuzhiyun unsigned long end, unsigned int flags,
2474*4882a593Smuzhiyun struct page **pages, int *nr)
2475*4882a593Smuzhiyun {
2476*4882a593Smuzhiyun int refs;
2477*4882a593Smuzhiyun struct page *head, *page;
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun if (!pgd_access_permitted(orig, flags & FOLL_WRITE))
2480*4882a593Smuzhiyun return 0;
2481*4882a593Smuzhiyun
2482*4882a593Smuzhiyun BUILD_BUG_ON(pgd_devmap(orig));
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2485*4882a593Smuzhiyun refs = record_subpages(page, addr, end, pages + *nr);
2486*4882a593Smuzhiyun
2487*4882a593Smuzhiyun head = try_grab_compound_head(pgd_page(orig), refs, flags);
2488*4882a593Smuzhiyun if (!head)
2489*4882a593Smuzhiyun return 0;
2490*4882a593Smuzhiyun
2491*4882a593Smuzhiyun if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
2492*4882a593Smuzhiyun put_compound_head(head, refs, flags);
2493*4882a593Smuzhiyun return 0;
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun *nr += refs;
2497*4882a593Smuzhiyun SetPageReferenced(head);
2498*4882a593Smuzhiyun return 1;
2499*4882a593Smuzhiyun }
2500*4882a593Smuzhiyun
gup_pmd_range(pud_t * pudp,pud_t pud,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2501*4882a593Smuzhiyun static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end,
2502*4882a593Smuzhiyun unsigned int flags, struct page **pages, int *nr)
2503*4882a593Smuzhiyun {
2504*4882a593Smuzhiyun unsigned long next;
2505*4882a593Smuzhiyun pmd_t *pmdp;
2506*4882a593Smuzhiyun
2507*4882a593Smuzhiyun pmdp = pmd_offset_lockless(pudp, pud, addr);
2508*4882a593Smuzhiyun do {
2509*4882a593Smuzhiyun pmd_t pmd = READ_ONCE(*pmdp);
2510*4882a593Smuzhiyun
2511*4882a593Smuzhiyun next = pmd_addr_end(addr, end);
2512*4882a593Smuzhiyun if (!pmd_present(pmd))
2513*4882a593Smuzhiyun return 0;
2514*4882a593Smuzhiyun
2515*4882a593Smuzhiyun if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) ||
2516*4882a593Smuzhiyun pmd_devmap(pmd))) {
2517*4882a593Smuzhiyun /*
2518*4882a593Smuzhiyun * NUMA hinting faults need to be handled in the GUP
2519*4882a593Smuzhiyun * slowpath for accounting purposes and so that they
2520*4882a593Smuzhiyun * can be serialised against THP migration.
2521*4882a593Smuzhiyun */
2522*4882a593Smuzhiyun if (pmd_protnone(pmd))
2523*4882a593Smuzhiyun return 0;
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun if (!gup_huge_pmd(pmd, pmdp, addr, next, flags,
2526*4882a593Smuzhiyun pages, nr))
2527*4882a593Smuzhiyun return 0;
2528*4882a593Smuzhiyun
2529*4882a593Smuzhiyun } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
2530*4882a593Smuzhiyun /*
2531*4882a593Smuzhiyun * architecture have different format for hugetlbfs
2532*4882a593Smuzhiyun * pmd format and THP pmd format
2533*4882a593Smuzhiyun */
2534*4882a593Smuzhiyun if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
2535*4882a593Smuzhiyun PMD_SHIFT, next, flags, pages, nr))
2536*4882a593Smuzhiyun return 0;
2537*4882a593Smuzhiyun } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
2538*4882a593Smuzhiyun return 0;
2539*4882a593Smuzhiyun } while (pmdp++, addr = next, addr != end);
2540*4882a593Smuzhiyun
2541*4882a593Smuzhiyun return 1;
2542*4882a593Smuzhiyun }
2543*4882a593Smuzhiyun
gup_pud_range(p4d_t * p4dp,p4d_t p4d,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2544*4882a593Smuzhiyun static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end,
2545*4882a593Smuzhiyun unsigned int flags, struct page **pages, int *nr)
2546*4882a593Smuzhiyun {
2547*4882a593Smuzhiyun unsigned long next;
2548*4882a593Smuzhiyun pud_t *pudp;
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun pudp = pud_offset_lockless(p4dp, p4d, addr);
2551*4882a593Smuzhiyun do {
2552*4882a593Smuzhiyun pud_t pud = READ_ONCE(*pudp);
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun next = pud_addr_end(addr, end);
2555*4882a593Smuzhiyun if (unlikely(!pud_present(pud)))
2556*4882a593Smuzhiyun return 0;
2557*4882a593Smuzhiyun if (unlikely(pud_huge(pud) || pud_devmap(pud))) {
2558*4882a593Smuzhiyun if (!gup_huge_pud(pud, pudp, addr, next, flags,
2559*4882a593Smuzhiyun pages, nr))
2560*4882a593Smuzhiyun return 0;
2561*4882a593Smuzhiyun } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
2562*4882a593Smuzhiyun if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
2563*4882a593Smuzhiyun PUD_SHIFT, next, flags, pages, nr))
2564*4882a593Smuzhiyun return 0;
2565*4882a593Smuzhiyun } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2566*4882a593Smuzhiyun return 0;
2567*4882a593Smuzhiyun } while (pudp++, addr = next, addr != end);
2568*4882a593Smuzhiyun
2569*4882a593Smuzhiyun return 1;
2570*4882a593Smuzhiyun }
2571*4882a593Smuzhiyun
gup_p4d_range(pgd_t * pgdp,pgd_t pgd,unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2572*4882a593Smuzhiyun static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end,
2573*4882a593Smuzhiyun unsigned int flags, struct page **pages, int *nr)
2574*4882a593Smuzhiyun {
2575*4882a593Smuzhiyun unsigned long next;
2576*4882a593Smuzhiyun p4d_t *p4dp;
2577*4882a593Smuzhiyun
2578*4882a593Smuzhiyun p4dp = p4d_offset_lockless(pgdp, pgd, addr);
2579*4882a593Smuzhiyun do {
2580*4882a593Smuzhiyun p4d_t p4d = READ_ONCE(*p4dp);
2581*4882a593Smuzhiyun
2582*4882a593Smuzhiyun next = p4d_addr_end(addr, end);
2583*4882a593Smuzhiyun if (p4d_none(p4d))
2584*4882a593Smuzhiyun return 0;
2585*4882a593Smuzhiyun BUILD_BUG_ON(p4d_huge(p4d));
2586*4882a593Smuzhiyun if (unlikely(is_hugepd(__hugepd(p4d_val(p4d))))) {
2587*4882a593Smuzhiyun if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr,
2588*4882a593Smuzhiyun P4D_SHIFT, next, flags, pages, nr))
2589*4882a593Smuzhiyun return 0;
2590*4882a593Smuzhiyun } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2591*4882a593Smuzhiyun return 0;
2592*4882a593Smuzhiyun } while (p4dp++, addr = next, addr != end);
2593*4882a593Smuzhiyun
2594*4882a593Smuzhiyun return 1;
2595*4882a593Smuzhiyun }
2596*4882a593Smuzhiyun
gup_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2597*4882a593Smuzhiyun static void gup_pgd_range(unsigned long addr, unsigned long end,
2598*4882a593Smuzhiyun unsigned int flags, struct page **pages, int *nr)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun unsigned long next;
2601*4882a593Smuzhiyun pgd_t *pgdp;
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun pgdp = pgd_offset(current->mm, addr);
2604*4882a593Smuzhiyun do {
2605*4882a593Smuzhiyun pgd_t pgd = READ_ONCE(*pgdp);
2606*4882a593Smuzhiyun
2607*4882a593Smuzhiyun next = pgd_addr_end(addr, end);
2608*4882a593Smuzhiyun if (pgd_none(pgd))
2609*4882a593Smuzhiyun return;
2610*4882a593Smuzhiyun if (unlikely(pgd_huge(pgd))) {
2611*4882a593Smuzhiyun if (!gup_huge_pgd(pgd, pgdp, addr, next, flags,
2612*4882a593Smuzhiyun pages, nr))
2613*4882a593Smuzhiyun return;
2614*4882a593Smuzhiyun } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
2615*4882a593Smuzhiyun if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
2616*4882a593Smuzhiyun PGDIR_SHIFT, next, flags, pages, nr))
2617*4882a593Smuzhiyun return;
2618*4882a593Smuzhiyun } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2619*4882a593Smuzhiyun return;
2620*4882a593Smuzhiyun } while (pgdp++, addr = next, addr != end);
2621*4882a593Smuzhiyun }
2622*4882a593Smuzhiyun #else
gup_pgd_range(unsigned long addr,unsigned long end,unsigned int flags,struct page ** pages,int * nr)2623*4882a593Smuzhiyun static inline void gup_pgd_range(unsigned long addr, unsigned long end,
2624*4882a593Smuzhiyun unsigned int flags, struct page **pages, int *nr)
2625*4882a593Smuzhiyun {
2626*4882a593Smuzhiyun }
2627*4882a593Smuzhiyun #endif /* CONFIG_HAVE_FAST_GUP */
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun #ifndef gup_fast_permitted
2630*4882a593Smuzhiyun /*
2631*4882a593Smuzhiyun * Check if it's allowed to use get_user_pages_fast_only() for the range, or
2632*4882a593Smuzhiyun * we need to fall back to the slow version:
2633*4882a593Smuzhiyun */
gup_fast_permitted(unsigned long start,unsigned long end)2634*4882a593Smuzhiyun static bool gup_fast_permitted(unsigned long start, unsigned long end)
2635*4882a593Smuzhiyun {
2636*4882a593Smuzhiyun return true;
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun #endif
2639*4882a593Smuzhiyun
__gup_longterm_unlocked(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2640*4882a593Smuzhiyun static int __gup_longterm_unlocked(unsigned long start, int nr_pages,
2641*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages)
2642*4882a593Smuzhiyun {
2643*4882a593Smuzhiyun int ret;
2644*4882a593Smuzhiyun
2645*4882a593Smuzhiyun /*
2646*4882a593Smuzhiyun * FIXME: FOLL_LONGTERM does not work with
2647*4882a593Smuzhiyun * get_user_pages_unlocked() (see comments in that function)
2648*4882a593Smuzhiyun */
2649*4882a593Smuzhiyun if (gup_flags & FOLL_LONGTERM) {
2650*4882a593Smuzhiyun mmap_read_lock(current->mm);
2651*4882a593Smuzhiyun ret = __gup_longterm_locked(current->mm,
2652*4882a593Smuzhiyun start, nr_pages,
2653*4882a593Smuzhiyun pages, NULL, gup_flags);
2654*4882a593Smuzhiyun mmap_read_unlock(current->mm);
2655*4882a593Smuzhiyun } else {
2656*4882a593Smuzhiyun ret = get_user_pages_unlocked(start, nr_pages,
2657*4882a593Smuzhiyun pages, gup_flags);
2658*4882a593Smuzhiyun }
2659*4882a593Smuzhiyun
2660*4882a593Smuzhiyun return ret;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun
lockless_pages_from_mm(unsigned long start,unsigned long end,unsigned int gup_flags,struct page ** pages)2663*4882a593Smuzhiyun static unsigned long lockless_pages_from_mm(unsigned long start,
2664*4882a593Smuzhiyun unsigned long end,
2665*4882a593Smuzhiyun unsigned int gup_flags,
2666*4882a593Smuzhiyun struct page **pages)
2667*4882a593Smuzhiyun {
2668*4882a593Smuzhiyun unsigned long flags;
2669*4882a593Smuzhiyun int nr_pinned = 0;
2670*4882a593Smuzhiyun unsigned seq;
2671*4882a593Smuzhiyun
2672*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) ||
2673*4882a593Smuzhiyun !gup_fast_permitted(start, end))
2674*4882a593Smuzhiyun return 0;
2675*4882a593Smuzhiyun
2676*4882a593Smuzhiyun if (gup_flags & FOLL_PIN) {
2677*4882a593Smuzhiyun seq = raw_read_seqcount(¤t->mm->write_protect_seq);
2678*4882a593Smuzhiyun if (seq & 1)
2679*4882a593Smuzhiyun return 0;
2680*4882a593Smuzhiyun }
2681*4882a593Smuzhiyun
2682*4882a593Smuzhiyun /*
2683*4882a593Smuzhiyun * Disable interrupts. The nested form is used, in order to allow full,
2684*4882a593Smuzhiyun * general purpose use of this routine.
2685*4882a593Smuzhiyun *
2686*4882a593Smuzhiyun * With interrupts disabled, we block page table pages from being freed
2687*4882a593Smuzhiyun * from under us. See struct mmu_table_batch comments in
2688*4882a593Smuzhiyun * include/asm-generic/tlb.h for more details.
2689*4882a593Smuzhiyun *
2690*4882a593Smuzhiyun * We do not adopt an rcu_read_lock() here as we also want to block IPIs
2691*4882a593Smuzhiyun * that come from THPs splitting.
2692*4882a593Smuzhiyun */
2693*4882a593Smuzhiyun local_irq_save(flags);
2694*4882a593Smuzhiyun gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2695*4882a593Smuzhiyun local_irq_restore(flags);
2696*4882a593Smuzhiyun
2697*4882a593Smuzhiyun /*
2698*4882a593Smuzhiyun * When pinning pages for DMA there could be a concurrent write protect
2699*4882a593Smuzhiyun * from fork() via copy_page_range(), in this case always fail fast GUP.
2700*4882a593Smuzhiyun */
2701*4882a593Smuzhiyun if (gup_flags & FOLL_PIN) {
2702*4882a593Smuzhiyun if (read_seqcount_retry(¤t->mm->write_protect_seq, seq)) {
2703*4882a593Smuzhiyun unpin_user_pages(pages, nr_pinned);
2704*4882a593Smuzhiyun return 0;
2705*4882a593Smuzhiyun }
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun return nr_pinned;
2708*4882a593Smuzhiyun }
2709*4882a593Smuzhiyun
internal_get_user_pages_fast(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages)2710*4882a593Smuzhiyun static int internal_get_user_pages_fast(unsigned long start,
2711*4882a593Smuzhiyun unsigned long nr_pages,
2712*4882a593Smuzhiyun unsigned int gup_flags,
2713*4882a593Smuzhiyun struct page **pages)
2714*4882a593Smuzhiyun {
2715*4882a593Smuzhiyun unsigned long len, end;
2716*4882a593Smuzhiyun unsigned long nr_pinned;
2717*4882a593Smuzhiyun int ret;
2718*4882a593Smuzhiyun
2719*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
2720*4882a593Smuzhiyun FOLL_FORCE | FOLL_PIN | FOLL_GET |
2721*4882a593Smuzhiyun FOLL_FAST_ONLY)))
2722*4882a593Smuzhiyun return -EINVAL;
2723*4882a593Smuzhiyun
2724*4882a593Smuzhiyun if (gup_flags & FOLL_PIN)
2725*4882a593Smuzhiyun atomic_set(¤t->mm->has_pinned, 1);
2726*4882a593Smuzhiyun
2727*4882a593Smuzhiyun if (!(gup_flags & FOLL_FAST_ONLY))
2728*4882a593Smuzhiyun might_lock_read(¤t->mm->mmap_lock);
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun start = untagged_addr(start) & PAGE_MASK;
2731*4882a593Smuzhiyun len = nr_pages << PAGE_SHIFT;
2732*4882a593Smuzhiyun if (check_add_overflow(start, len, &end))
2733*4882a593Smuzhiyun return 0;
2734*4882a593Smuzhiyun if (unlikely(!access_ok((void __user *)start, len)))
2735*4882a593Smuzhiyun return -EFAULT;
2736*4882a593Smuzhiyun
2737*4882a593Smuzhiyun nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2738*4882a593Smuzhiyun if (nr_pinned == nr_pages || gup_flags & FOLL_FAST_ONLY)
2739*4882a593Smuzhiyun return nr_pinned;
2740*4882a593Smuzhiyun
2741*4882a593Smuzhiyun /* Slow path: try to get the remaining pages with get_user_pages */
2742*4882a593Smuzhiyun start += nr_pinned << PAGE_SHIFT;
2743*4882a593Smuzhiyun pages += nr_pinned;
2744*4882a593Smuzhiyun ret = __gup_longterm_unlocked(start, nr_pages - nr_pinned, gup_flags,
2745*4882a593Smuzhiyun pages);
2746*4882a593Smuzhiyun if (ret < 0) {
2747*4882a593Smuzhiyun /*
2748*4882a593Smuzhiyun * The caller has to unpin the pages we already pinned so
2749*4882a593Smuzhiyun * returning -errno is not an option
2750*4882a593Smuzhiyun */
2751*4882a593Smuzhiyun if (nr_pinned)
2752*4882a593Smuzhiyun return nr_pinned;
2753*4882a593Smuzhiyun return ret;
2754*4882a593Smuzhiyun }
2755*4882a593Smuzhiyun return ret + nr_pinned;
2756*4882a593Smuzhiyun }
2757*4882a593Smuzhiyun
2758*4882a593Smuzhiyun /**
2759*4882a593Smuzhiyun * get_user_pages_fast_only() - pin user pages in memory
2760*4882a593Smuzhiyun * @start: starting user address
2761*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
2762*4882a593Smuzhiyun * @gup_flags: flags modifying pin behaviour
2763*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
2764*4882a593Smuzhiyun * Should be at least nr_pages long.
2765*4882a593Smuzhiyun *
2766*4882a593Smuzhiyun * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
2767*4882a593Smuzhiyun * the regular GUP.
2768*4882a593Smuzhiyun * Note a difference with get_user_pages_fast: this always returns the
2769*4882a593Smuzhiyun * number of pages pinned, 0 if no pages were pinned.
2770*4882a593Smuzhiyun *
2771*4882a593Smuzhiyun * If the architecture does not support this function, simply return with no
2772*4882a593Smuzhiyun * pages pinned.
2773*4882a593Smuzhiyun *
2774*4882a593Smuzhiyun * Careful, careful! COW breaking can go either way, so a non-write
2775*4882a593Smuzhiyun * access can get ambiguous page results. If you call this function without
2776*4882a593Smuzhiyun * 'write' set, you'd better be sure that you're ok with that ambiguity.
2777*4882a593Smuzhiyun */
get_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2778*4882a593Smuzhiyun int get_user_pages_fast_only(unsigned long start, int nr_pages,
2779*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages)
2780*4882a593Smuzhiyun {
2781*4882a593Smuzhiyun int nr_pinned;
2782*4882a593Smuzhiyun /*
2783*4882a593Smuzhiyun * Internally (within mm/gup.c), gup fast variants must set FOLL_GET,
2784*4882a593Smuzhiyun * because gup fast is always a "pin with a +1 page refcount" request.
2785*4882a593Smuzhiyun *
2786*4882a593Smuzhiyun * FOLL_FAST_ONLY is required in order to match the API description of
2787*4882a593Smuzhiyun * this routine: no fall back to regular ("slow") GUP.
2788*4882a593Smuzhiyun */
2789*4882a593Smuzhiyun gup_flags |= FOLL_GET | FOLL_FAST_ONLY;
2790*4882a593Smuzhiyun
2791*4882a593Smuzhiyun nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2792*4882a593Smuzhiyun pages);
2793*4882a593Smuzhiyun
2794*4882a593Smuzhiyun /*
2795*4882a593Smuzhiyun * As specified in the API description above, this routine is not
2796*4882a593Smuzhiyun * allowed to return negative values. However, the common core
2797*4882a593Smuzhiyun * routine internal_get_user_pages_fast() *can* return -errno.
2798*4882a593Smuzhiyun * Therefore, correct for that here:
2799*4882a593Smuzhiyun */
2800*4882a593Smuzhiyun if (nr_pinned < 0)
2801*4882a593Smuzhiyun nr_pinned = 0;
2802*4882a593Smuzhiyun
2803*4882a593Smuzhiyun return nr_pinned;
2804*4882a593Smuzhiyun }
2805*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_user_pages_fast_only);
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun /**
2808*4882a593Smuzhiyun * get_user_pages_fast() - pin user pages in memory
2809*4882a593Smuzhiyun * @start: starting user address
2810*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
2811*4882a593Smuzhiyun * @gup_flags: flags modifying pin behaviour
2812*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
2813*4882a593Smuzhiyun * Should be at least nr_pages long.
2814*4882a593Smuzhiyun *
2815*4882a593Smuzhiyun * Attempt to pin user pages in memory without taking mm->mmap_lock.
2816*4882a593Smuzhiyun * If not successful, it will fall back to taking the lock and
2817*4882a593Smuzhiyun * calling get_user_pages().
2818*4882a593Smuzhiyun *
2819*4882a593Smuzhiyun * Returns number of pages pinned. This may be fewer than the number requested.
2820*4882a593Smuzhiyun * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2821*4882a593Smuzhiyun * -errno.
2822*4882a593Smuzhiyun */
get_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2823*4882a593Smuzhiyun int get_user_pages_fast(unsigned long start, int nr_pages,
2824*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages)
2825*4882a593Smuzhiyun {
2826*4882a593Smuzhiyun if (!is_valid_gup_flags(gup_flags))
2827*4882a593Smuzhiyun return -EINVAL;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun /*
2830*4882a593Smuzhiyun * The caller may or may not have explicitly set FOLL_GET; either way is
2831*4882a593Smuzhiyun * OK. However, internally (within mm/gup.c), gup fast variants must set
2832*4882a593Smuzhiyun * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
2833*4882a593Smuzhiyun * request.
2834*4882a593Smuzhiyun */
2835*4882a593Smuzhiyun gup_flags |= FOLL_GET;
2836*4882a593Smuzhiyun return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2837*4882a593Smuzhiyun }
2838*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(get_user_pages_fast);
2839*4882a593Smuzhiyun
2840*4882a593Smuzhiyun /**
2841*4882a593Smuzhiyun * pin_user_pages_fast() - pin user pages in memory without taking locks
2842*4882a593Smuzhiyun *
2843*4882a593Smuzhiyun * @start: starting user address
2844*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
2845*4882a593Smuzhiyun * @gup_flags: flags modifying pin behaviour
2846*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
2847*4882a593Smuzhiyun * Should be at least nr_pages long.
2848*4882a593Smuzhiyun *
2849*4882a593Smuzhiyun * Nearly the same as get_user_pages_fast(), except that FOLL_PIN is set. See
2850*4882a593Smuzhiyun * get_user_pages_fast() for documentation on the function arguments, because
2851*4882a593Smuzhiyun * the arguments here are identical.
2852*4882a593Smuzhiyun *
2853*4882a593Smuzhiyun * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2854*4882a593Smuzhiyun * see Documentation/core-api/pin_user_pages.rst for further details.
2855*4882a593Smuzhiyun */
pin_user_pages_fast(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2856*4882a593Smuzhiyun int pin_user_pages_fast(unsigned long start, int nr_pages,
2857*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages)
2858*4882a593Smuzhiyun {
2859*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2860*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2861*4882a593Smuzhiyun return -EINVAL;
2862*4882a593Smuzhiyun
2863*4882a593Smuzhiyun gup_flags |= FOLL_PIN;
2864*4882a593Smuzhiyun return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2865*4882a593Smuzhiyun }
2866*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pin_user_pages_fast);
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun /*
2869*4882a593Smuzhiyun * This is the FOLL_PIN equivalent of get_user_pages_fast_only(). Behavior
2870*4882a593Smuzhiyun * is the same, except that this one sets FOLL_PIN instead of FOLL_GET.
2871*4882a593Smuzhiyun *
2872*4882a593Smuzhiyun * The API rules are the same, too: no negative values may be returned.
2873*4882a593Smuzhiyun */
pin_user_pages_fast_only(unsigned long start,int nr_pages,unsigned int gup_flags,struct page ** pages)2874*4882a593Smuzhiyun int pin_user_pages_fast_only(unsigned long start, int nr_pages,
2875*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages)
2876*4882a593Smuzhiyun {
2877*4882a593Smuzhiyun int nr_pinned;
2878*4882a593Smuzhiyun
2879*4882a593Smuzhiyun /*
2880*4882a593Smuzhiyun * FOLL_GET and FOLL_PIN are mutually exclusive. Note that the API
2881*4882a593Smuzhiyun * rules require returning 0, rather than -errno:
2882*4882a593Smuzhiyun */
2883*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2884*4882a593Smuzhiyun return 0;
2885*4882a593Smuzhiyun /*
2886*4882a593Smuzhiyun * FOLL_FAST_ONLY is required in order to match the API description of
2887*4882a593Smuzhiyun * this routine: no fall back to regular ("slow") GUP.
2888*4882a593Smuzhiyun */
2889*4882a593Smuzhiyun gup_flags |= (FOLL_PIN | FOLL_FAST_ONLY);
2890*4882a593Smuzhiyun nr_pinned = internal_get_user_pages_fast(start, nr_pages, gup_flags,
2891*4882a593Smuzhiyun pages);
2892*4882a593Smuzhiyun /*
2893*4882a593Smuzhiyun * This routine is not allowed to return negative values. However,
2894*4882a593Smuzhiyun * internal_get_user_pages_fast() *can* return -errno. Therefore,
2895*4882a593Smuzhiyun * correct for that here:
2896*4882a593Smuzhiyun */
2897*4882a593Smuzhiyun if (nr_pinned < 0)
2898*4882a593Smuzhiyun nr_pinned = 0;
2899*4882a593Smuzhiyun
2900*4882a593Smuzhiyun return nr_pinned;
2901*4882a593Smuzhiyun }
2902*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pin_user_pages_fast_only);
2903*4882a593Smuzhiyun
2904*4882a593Smuzhiyun /**
2905*4882a593Smuzhiyun * pin_user_pages_remote() - pin pages of a remote process
2906*4882a593Smuzhiyun *
2907*4882a593Smuzhiyun * @mm: mm_struct of target mm
2908*4882a593Smuzhiyun * @start: starting user address
2909*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
2910*4882a593Smuzhiyun * @gup_flags: flags modifying lookup behaviour
2911*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
2912*4882a593Smuzhiyun * Should be at least nr_pages long. Or NULL, if caller
2913*4882a593Smuzhiyun * only intends to ensure the pages are faulted in.
2914*4882a593Smuzhiyun * @vmas: array of pointers to vmas corresponding to each page.
2915*4882a593Smuzhiyun * Or NULL if the caller does not require them.
2916*4882a593Smuzhiyun * @locked: pointer to lock flag indicating whether lock is held and
2917*4882a593Smuzhiyun * subsequently whether VM_FAULT_RETRY functionality can be
2918*4882a593Smuzhiyun * utilised. Lock must initially be held.
2919*4882a593Smuzhiyun *
2920*4882a593Smuzhiyun * Nearly the same as get_user_pages_remote(), except that FOLL_PIN is set. See
2921*4882a593Smuzhiyun * get_user_pages_remote() for documentation on the function arguments, because
2922*4882a593Smuzhiyun * the arguments here are identical.
2923*4882a593Smuzhiyun *
2924*4882a593Smuzhiyun * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2925*4882a593Smuzhiyun * see Documentation/core-api/pin_user_pages.rst for details.
2926*4882a593Smuzhiyun */
pin_user_pages_remote(struct mm_struct * mm,unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas,int * locked)2927*4882a593Smuzhiyun long pin_user_pages_remote(struct mm_struct *mm,
2928*4882a593Smuzhiyun unsigned long start, unsigned long nr_pages,
2929*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
2930*4882a593Smuzhiyun struct vm_area_struct **vmas, int *locked)
2931*4882a593Smuzhiyun {
2932*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2933*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2934*4882a593Smuzhiyun return -EINVAL;
2935*4882a593Smuzhiyun
2936*4882a593Smuzhiyun gup_flags |= FOLL_PIN;
2937*4882a593Smuzhiyun return __get_user_pages_remote(mm, start, nr_pages, gup_flags,
2938*4882a593Smuzhiyun pages, vmas, locked);
2939*4882a593Smuzhiyun }
2940*4882a593Smuzhiyun EXPORT_SYMBOL(pin_user_pages_remote);
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun /**
2943*4882a593Smuzhiyun * pin_user_pages() - pin user pages in memory for use by other devices
2944*4882a593Smuzhiyun *
2945*4882a593Smuzhiyun * @start: starting user address
2946*4882a593Smuzhiyun * @nr_pages: number of pages from start to pin
2947*4882a593Smuzhiyun * @gup_flags: flags modifying lookup behaviour
2948*4882a593Smuzhiyun * @pages: array that receives pointers to the pages pinned.
2949*4882a593Smuzhiyun * Should be at least nr_pages long. Or NULL, if caller
2950*4882a593Smuzhiyun * only intends to ensure the pages are faulted in.
2951*4882a593Smuzhiyun * @vmas: array of pointers to vmas corresponding to each page.
2952*4882a593Smuzhiyun * Or NULL if the caller does not require them.
2953*4882a593Smuzhiyun *
2954*4882a593Smuzhiyun * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and
2955*4882a593Smuzhiyun * FOLL_PIN is set.
2956*4882a593Smuzhiyun *
2957*4882a593Smuzhiyun * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2958*4882a593Smuzhiyun * see Documentation/core-api/pin_user_pages.rst for details.
2959*4882a593Smuzhiyun */
pin_user_pages(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,struct vm_area_struct ** vmas)2960*4882a593Smuzhiyun long pin_user_pages(unsigned long start, unsigned long nr_pages,
2961*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
2962*4882a593Smuzhiyun struct vm_area_struct **vmas)
2963*4882a593Smuzhiyun {
2964*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2965*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2966*4882a593Smuzhiyun return -EINVAL;
2967*4882a593Smuzhiyun
2968*4882a593Smuzhiyun gup_flags |= FOLL_PIN;
2969*4882a593Smuzhiyun return __gup_longterm_locked(current->mm, start, nr_pages,
2970*4882a593Smuzhiyun pages, vmas, gup_flags);
2971*4882a593Smuzhiyun }
2972*4882a593Smuzhiyun EXPORT_SYMBOL(pin_user_pages);
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun /*
2975*4882a593Smuzhiyun * pin_user_pages_unlocked() is the FOLL_PIN variant of
2976*4882a593Smuzhiyun * get_user_pages_unlocked(). Behavior is the same, except that this one sets
2977*4882a593Smuzhiyun * FOLL_PIN and rejects FOLL_GET.
2978*4882a593Smuzhiyun */
pin_user_pages_unlocked(unsigned long start,unsigned long nr_pages,struct page ** pages,unsigned int gup_flags)2979*4882a593Smuzhiyun long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2980*4882a593Smuzhiyun struct page **pages, unsigned int gup_flags)
2981*4882a593Smuzhiyun {
2982*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
2983*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_GET))
2984*4882a593Smuzhiyun return -EINVAL;
2985*4882a593Smuzhiyun
2986*4882a593Smuzhiyun gup_flags |= FOLL_PIN;
2987*4882a593Smuzhiyun return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
2988*4882a593Smuzhiyun }
2989*4882a593Smuzhiyun EXPORT_SYMBOL(pin_user_pages_unlocked);
2990*4882a593Smuzhiyun
2991*4882a593Smuzhiyun /*
2992*4882a593Smuzhiyun * pin_user_pages_locked() is the FOLL_PIN variant of get_user_pages_locked().
2993*4882a593Smuzhiyun * Behavior is the same, except that this one sets FOLL_PIN and rejects
2994*4882a593Smuzhiyun * FOLL_GET.
2995*4882a593Smuzhiyun */
pin_user_pages_locked(unsigned long start,unsigned long nr_pages,unsigned int gup_flags,struct page ** pages,int * locked)2996*4882a593Smuzhiyun long pin_user_pages_locked(unsigned long start, unsigned long nr_pages,
2997*4882a593Smuzhiyun unsigned int gup_flags, struct page **pages,
2998*4882a593Smuzhiyun int *locked)
2999*4882a593Smuzhiyun {
3000*4882a593Smuzhiyun /*
3001*4882a593Smuzhiyun * FIXME: Current FOLL_LONGTERM behavior is incompatible with
3002*4882a593Smuzhiyun * FAULT_FLAG_ALLOW_RETRY because of the FS DAX check requirement on
3003*4882a593Smuzhiyun * vmas. As there are no users of this flag in this call we simply
3004*4882a593Smuzhiyun * disallow this option for now.
3005*4882a593Smuzhiyun */
3006*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_LONGTERM))
3007*4882a593Smuzhiyun return -EINVAL;
3008*4882a593Smuzhiyun
3009*4882a593Smuzhiyun /* FOLL_GET and FOLL_PIN are mutually exclusive. */
3010*4882a593Smuzhiyun if (WARN_ON_ONCE(gup_flags & FOLL_GET))
3011*4882a593Smuzhiyun return -EINVAL;
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun gup_flags |= FOLL_PIN;
3014*4882a593Smuzhiyun return __get_user_pages_locked(current->mm, start, nr_pages,
3015*4882a593Smuzhiyun pages, NULL, locked,
3016*4882a593Smuzhiyun gup_flags | FOLL_TOUCH);
3017*4882a593Smuzhiyun }
3018*4882a593Smuzhiyun EXPORT_SYMBOL(pin_user_pages_locked);
3019