Lines Matching refs:pages

298 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,  in unpin_user_pages_dirty_lock()  argument
310 unpin_user_pages(pages, npages); in unpin_user_pages_dirty_lock()
315 struct page *page = compound_head(pages[index]); in unpin_user_pages_dirty_lock()
352 void unpin_user_pages(struct page **pages, unsigned long npages) in unpin_user_pages() argument
369 unpin_user_page(pages[index]); in unpin_user_pages()
1063 unsigned int gup_flags, struct page **pages, in __get_user_pages() argument
1075 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); in __get_user_pages()
1096 pages ? &pages[i] : NULL); in __get_user_pages()
1112 i = follow_hugetlb_page(mm, vma, pages, vmas, in __get_user_pages()
1166 if (pages) { in __get_user_pages()
1167 pages[i] = page; in __get_user_pages()
1294 struct page **pages, in __get_user_pages_locked() argument
1321 if (pages && !(flags & FOLL_PIN)) in __get_user_pages_locked()
1327 ret = __get_user_pages(mm, start, nr_pages, flags, pages, in __get_user_pages_locked()
1358 if (likely(pages)) in __get_user_pages_locked()
1359 pages += ret; in __get_user_pages_locked()
1388 pages, NULL, locked); in __get_user_pages_locked()
1404 if (likely(pages)) in __get_user_pages_locked()
1405 pages++; in __get_user_pages_locked()
1539 unsigned long nr_pages, struct page **pages, in __get_user_pages_locked() argument
1565 if (pages) { in __get_user_pages_locked()
1566 pages[i] = virt_to_page(start); in __get_user_pages_locked()
1567 if (pages[i]) in __get_user_pages_locked()
1568 get_page(pages[i]); in __get_user_pages_locked()
1618 struct page **pages, in check_and_migrate_cma_pages() argument
1637 head = compound_head(pages[i]); in check_and_migrate_cma_pages()
1681 unpin_user_pages(pages, nr_pages); in check_and_migrate_cma_pages()
1684 put_page(pages[i]); in check_and_migrate_cma_pages()
1696 ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in check_and_migrate_cma_pages()
1713 struct page **pages, in check_and_migrate_cma_pages() argument
1728 struct page **pages, in __gup_longterm_locked() argument
1738 rc = __get_user_pages_locked(mm, start, nr_pages, pages, vmas, NULL, in __gup_longterm_locked()
1743 rc = check_and_migrate_cma_pages(mm, start, rc, pages, in __gup_longterm_locked()
1772 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1789 return __gup_longterm_locked(mm, start, nr_pages, pages, in __get_user_pages_remote()
1794 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, in __get_user_pages_remote()
1861 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1868 pages, vmas, locked); in get_user_pages_remote()
1875 unsigned int gup_flags, struct page **pages, in get_user_pages_remote() argument
1883 unsigned int gup_flags, struct page **pages, in __get_user_pages_remote() argument
1907 unsigned int gup_flags, struct page **pages, in get_user_pages() argument
1914 pages, vmas, gup_flags | FOLL_TOUCH); in get_user_pages()
1951 unsigned int gup_flags, struct page **pages, in get_user_pages_locked() argument
1970 pages, NULL, locked, in get_user_pages_locked()
1991 struct page **pages, unsigned int gup_flags) in get_user_pages_unlocked() argument
2007 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL, in get_user_pages_unlocked()
2107 struct page **pages) in undo_dev_pagemap() argument
2110 struct page *page = pages[--(*nr)]; in undo_dev_pagemap()
2142 struct page **pages, int *nr) in gup_pte_range() argument
2169 undo_dev_pagemap(nr, nr_start, flags, pages); in gup_pte_range()
2204 pages[*nr] = page; in gup_pte_range()
2230 struct page **pages, int *nr) in gup_pte_range() argument
2239 struct page **pages, int *nr) in __gup_device_huge() argument
2249 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2253 pages[*nr] = page; in __gup_device_huge()
2255 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge()
2269 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2275 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pmd()
2279 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pmd()
2287 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2293 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr)) in __gup_device_huge_pud()
2297 undo_dev_pagemap(nr, nr_start, flags, pages); in __gup_device_huge_pud()
2305 struct page **pages, int *nr) in __gup_device_huge_pmd() argument
2313 struct page **pages, int *nr) in __gup_device_huge_pud() argument
2321 unsigned long end, struct page **pages) in record_subpages() argument
2326 pages[nr++] = page++; in record_subpages()
2341 struct page **pages, int *nr) in gup_hugepte() argument
2362 refs = record_subpages(page, addr, end, pages + *nr); in gup_hugepte()
2380 struct page **pages, int *nr) in gup_huge_pd() argument
2389 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr)) in gup_huge_pd()
2398 struct page **pages, int *nr) in gup_huge_pd() argument
2406 struct page **pages, int *nr) in gup_huge_pmd() argument
2418 pages, nr); in gup_huge_pmd()
2422 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pmd()
2440 struct page **pages, int *nr) in gup_huge_pud() argument
2452 pages, nr); in gup_huge_pud()
2456 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pud()
2474 struct page **pages, int *nr) in gup_huge_pgd() argument
2485 refs = record_subpages(page, addr, end, pages + *nr); in gup_huge_pgd()
2502 unsigned int flags, struct page **pages, int *nr) in gup_pmd_range() argument
2526 pages, nr)) in gup_pmd_range()
2535 PMD_SHIFT, next, flags, pages, nr)) in gup_pmd_range()
2537 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr)) in gup_pmd_range()
2545 unsigned int flags, struct page **pages, int *nr) in gup_pud_range() argument
2559 pages, nr)) in gup_pud_range()
2563 PUD_SHIFT, next, flags, pages, nr)) in gup_pud_range()
2565 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) in gup_pud_range()
2573 unsigned int flags, struct page **pages, int *nr) in gup_p4d_range() argument
2588 P4D_SHIFT, next, flags, pages, nr)) in gup_p4d_range()
2590 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) in gup_p4d_range()
2598 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2612 pages, nr)) in gup_pgd_range()
2616 PGDIR_SHIFT, next, flags, pages, nr)) in gup_pgd_range()
2618 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) in gup_pgd_range()
2624 unsigned int flags, struct page **pages, int *nr) in gup_pgd_range() argument
2641 unsigned int gup_flags, struct page **pages) in __gup_longterm_unlocked() argument
2653 pages, NULL, gup_flags); in __gup_longterm_unlocked()
2657 pages, gup_flags); in __gup_longterm_unlocked()
2666 struct page **pages) in lockless_pages_from_mm() argument
2694 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned); in lockless_pages_from_mm()
2703 unpin_user_pages(pages, nr_pinned); in lockless_pages_from_mm()
2713 struct page **pages) in internal_get_user_pages_fast() argument
2737 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages); in internal_get_user_pages_fast()
2743 pages += nr_pinned; in internal_get_user_pages_fast()
2745 pages); in internal_get_user_pages_fast()
2779 unsigned int gup_flags, struct page **pages) in get_user_pages_fast_only() argument
2792 pages); in get_user_pages_fast_only()
2824 unsigned int gup_flags, struct page **pages) in get_user_pages_fast() argument
2836 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in get_user_pages_fast()
2857 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast() argument
2864 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); in pin_user_pages_fast()
2875 unsigned int gup_flags, struct page **pages) in pin_user_pages_fast_only() argument
2891 pages); in pin_user_pages_fast_only()
2929 unsigned int gup_flags, struct page **pages, in pin_user_pages_remote() argument
2938 pages, vmas, locked); in pin_user_pages_remote()
2961 unsigned int gup_flags, struct page **pages, in pin_user_pages() argument
2970 pages, vmas, gup_flags); in pin_user_pages()
2980 struct page **pages, unsigned int gup_flags) in pin_user_pages_unlocked() argument
2987 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags); in pin_user_pages_unlocked()
2997 unsigned int gup_flags, struct page **pages, in pin_user_pages_locked() argument
3015 pages, NULL, locked, in pin_user_pages_locked()