1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/mm/mincore.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1994-2006 Linus Torvalds
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun * The mincore() system call.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/pagemap.h>
12*4882a593Smuzhiyun #include <linux/gfp.h>
13*4882a593Smuzhiyun #include <linux/pagewalk.h>
14*4882a593Smuzhiyun #include <linux/mman.h>
15*4882a593Smuzhiyun #include <linux/syscalls.h>
16*4882a593Smuzhiyun #include <linux/swap.h>
17*4882a593Smuzhiyun #include <linux/swapops.h>
18*4882a593Smuzhiyun #include <linux/shmem_fs.h>
19*4882a593Smuzhiyun #include <linux/hugetlb.h>
20*4882a593Smuzhiyun #include <linux/pgtable.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/uaccess.h>
23*4882a593Smuzhiyun
mincore_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)24*4882a593Smuzhiyun static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
25*4882a593Smuzhiyun unsigned long end, struct mm_walk *walk)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
28*4882a593Smuzhiyun unsigned char present;
29*4882a593Smuzhiyun unsigned char *vec = walk->private;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun * Hugepages under user process are always in RAM and never
33*4882a593Smuzhiyun * swapped out, but theoretically it needs to be checked.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun present = pte && !huge_pte_none(huge_ptep_get(pte));
36*4882a593Smuzhiyun for (; addr != end; vec++, addr += PAGE_SIZE)
37*4882a593Smuzhiyun *vec = present;
38*4882a593Smuzhiyun walk->private = vec;
39*4882a593Smuzhiyun #else
40*4882a593Smuzhiyun BUG();
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun return 0;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun * Later we can get more picky about what "in core" means precisely.
47*4882a593Smuzhiyun * For now, simply check to see if the page is in the page cache,
48*4882a593Smuzhiyun * and is up to date; i.e. that no page-in operation would be required
49*4882a593Smuzhiyun * at this time if an application were to map and access this page.
50*4882a593Smuzhiyun */
mincore_page(struct address_space * mapping,pgoff_t index)51*4882a593Smuzhiyun static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun unsigned char present = 0;
54*4882a593Smuzhiyun struct page *page;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * When tmpfs swaps out a page from a file, any process mapping that
58*4882a593Smuzhiyun * file will not get a swp_entry_t in its pte, but rather it is like
59*4882a593Smuzhiyun * any other file mapping (ie. marked !present and faulted in with
60*4882a593Smuzhiyun * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
61*4882a593Smuzhiyun */
62*4882a593Smuzhiyun page = find_get_incore_page(mapping, index);
63*4882a593Smuzhiyun if (page) {
64*4882a593Smuzhiyun present = PageUptodate(page);
65*4882a593Smuzhiyun put_page(page);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun return present;
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
__mincore_unmapped_range(unsigned long addr,unsigned long end,struct vm_area_struct * vma,unsigned char * vec)71*4882a593Smuzhiyun static int __mincore_unmapped_range(unsigned long addr, unsigned long end,
72*4882a593Smuzhiyun struct vm_area_struct *vma, unsigned char *vec)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun unsigned long nr = (end - addr) >> PAGE_SHIFT;
75*4882a593Smuzhiyun int i;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (vma->vm_file) {
78*4882a593Smuzhiyun pgoff_t pgoff;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun pgoff = linear_page_index(vma, addr);
81*4882a593Smuzhiyun for (i = 0; i < nr; i++, pgoff++)
82*4882a593Smuzhiyun vec[i] = mincore_page(vma->vm_file->f_mapping, pgoff);
83*4882a593Smuzhiyun } else {
84*4882a593Smuzhiyun for (i = 0; i < nr; i++)
85*4882a593Smuzhiyun vec[i] = 0;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun return nr;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
mincore_unmapped_range(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)90*4882a593Smuzhiyun static int mincore_unmapped_range(unsigned long addr, unsigned long end,
91*4882a593Smuzhiyun __always_unused int depth,
92*4882a593Smuzhiyun struct mm_walk *walk)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun walk->private += __mincore_unmapped_range(addr, end,
95*4882a593Smuzhiyun walk->vma, walk->private);
96*4882a593Smuzhiyun return 0;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
mincore_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)99*4882a593Smuzhiyun static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
100*4882a593Smuzhiyun struct mm_walk *walk)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun spinlock_t *ptl;
103*4882a593Smuzhiyun struct vm_area_struct *vma = walk->vma;
104*4882a593Smuzhiyun pte_t *ptep;
105*4882a593Smuzhiyun unsigned char *vec = walk->private;
106*4882a593Smuzhiyun int nr = (end - addr) >> PAGE_SHIFT;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun ptl = pmd_trans_huge_lock(pmd, vma);
109*4882a593Smuzhiyun if (ptl) {
110*4882a593Smuzhiyun memset(vec, 1, nr);
111*4882a593Smuzhiyun spin_unlock(ptl);
112*4882a593Smuzhiyun goto out;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (pmd_trans_unstable(pmd)) {
116*4882a593Smuzhiyun __mincore_unmapped_range(addr, end, vma, vec);
117*4882a593Smuzhiyun goto out;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
121*4882a593Smuzhiyun for (; addr != end; ptep++, addr += PAGE_SIZE) {
122*4882a593Smuzhiyun pte_t pte = *ptep;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (pte_none(pte))
125*4882a593Smuzhiyun __mincore_unmapped_range(addr, addr + PAGE_SIZE,
126*4882a593Smuzhiyun vma, vec);
127*4882a593Smuzhiyun else if (pte_present(pte))
128*4882a593Smuzhiyun *vec = 1;
129*4882a593Smuzhiyun else { /* pte is a swap entry */
130*4882a593Smuzhiyun swp_entry_t entry = pte_to_swp_entry(pte);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if (non_swap_entry(entry)) {
133*4882a593Smuzhiyun /*
134*4882a593Smuzhiyun * migration or hwpoison entries are always
135*4882a593Smuzhiyun * uptodate
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun *vec = 1;
138*4882a593Smuzhiyun } else {
139*4882a593Smuzhiyun #ifdef CONFIG_SWAP
140*4882a593Smuzhiyun *vec = mincore_page(swap_address_space(entry),
141*4882a593Smuzhiyun swp_offset(entry));
142*4882a593Smuzhiyun #else
143*4882a593Smuzhiyun WARN_ON(1);
144*4882a593Smuzhiyun *vec = 1;
145*4882a593Smuzhiyun #endif
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun vec++;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun pte_unmap_unlock(ptep - 1, ptl);
151*4882a593Smuzhiyun out:
152*4882a593Smuzhiyun walk->private += nr;
153*4882a593Smuzhiyun cond_resched();
154*4882a593Smuzhiyun return 0;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
can_do_mincore(struct vm_area_struct * vma)157*4882a593Smuzhiyun static inline bool can_do_mincore(struct vm_area_struct *vma)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun if (vma_is_anonymous(vma))
160*4882a593Smuzhiyun return true;
161*4882a593Smuzhiyun if (!vma->vm_file)
162*4882a593Smuzhiyun return false;
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * Reveal pagecache information only for non-anonymous mappings that
165*4882a593Smuzhiyun * correspond to the files the calling process could (if tried) open
166*4882a593Smuzhiyun * for writing; otherwise we'd be including shared non-exclusive
167*4882a593Smuzhiyun * mappings, which opens a side channel.
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun return inode_owner_or_capable(file_inode(vma->vm_file)) ||
170*4882a593Smuzhiyun inode_permission(file_inode(vma->vm_file), MAY_WRITE) == 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun static const struct mm_walk_ops mincore_walk_ops = {
174*4882a593Smuzhiyun .pmd_entry = mincore_pte_range,
175*4882a593Smuzhiyun .pte_hole = mincore_unmapped_range,
176*4882a593Smuzhiyun .hugetlb_entry = mincore_hugetlb,
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Do a chunk of "sys_mincore()". We've already checked
181*4882a593Smuzhiyun * all the arguments, we hold the mmap semaphore: we should
182*4882a593Smuzhiyun * just return the amount of info we're asked for.
183*4882a593Smuzhiyun */
do_mincore(unsigned long addr,unsigned long pages,unsigned char * vec)184*4882a593Smuzhiyun static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *vec)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct vm_area_struct *vma;
187*4882a593Smuzhiyun unsigned long end;
188*4882a593Smuzhiyun int err;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun vma = find_vma(current->mm, addr);
191*4882a593Smuzhiyun if (!vma || addr < vma->vm_start)
192*4882a593Smuzhiyun return -ENOMEM;
193*4882a593Smuzhiyun end = min(vma->vm_end, addr + (pages << PAGE_SHIFT));
194*4882a593Smuzhiyun if (!can_do_mincore(vma)) {
195*4882a593Smuzhiyun unsigned long pages = DIV_ROUND_UP(end - addr, PAGE_SIZE);
196*4882a593Smuzhiyun memset(vec, 1, pages);
197*4882a593Smuzhiyun return pages;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec);
200*4882a593Smuzhiyun if (err < 0)
201*4882a593Smuzhiyun return err;
202*4882a593Smuzhiyun return (end - addr) >> PAGE_SHIFT;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /*
206*4882a593Smuzhiyun * The mincore(2) system call.
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * mincore() returns the memory residency status of the pages in the
209*4882a593Smuzhiyun * current process's address space specified by [addr, addr + len).
210*4882a593Smuzhiyun * The status is returned in a vector of bytes. The least significant
211*4882a593Smuzhiyun * bit of each byte is 1 if the referenced page is in memory, otherwise
212*4882a593Smuzhiyun * it is zero.
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun * Because the status of a page can change after mincore() checks it
215*4882a593Smuzhiyun * but before it returns to the application, the returned vector may
216*4882a593Smuzhiyun * contain stale information. Only locked pages are guaranteed to
217*4882a593Smuzhiyun * remain in memory.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * return values:
220*4882a593Smuzhiyun * zero - success
221*4882a593Smuzhiyun * -EFAULT - vec points to an illegal address
222*4882a593Smuzhiyun * -EINVAL - addr is not a multiple of PAGE_SIZE
223*4882a593Smuzhiyun * -ENOMEM - Addresses in the range [addr, addr + len] are
224*4882a593Smuzhiyun * invalid for the address space of this process, or
225*4882a593Smuzhiyun * specify one or more pages which are not currently
226*4882a593Smuzhiyun * mapped
227*4882a593Smuzhiyun * -EAGAIN - A kernel resource was temporarily unavailable.
228*4882a593Smuzhiyun */
SYSCALL_DEFINE3(mincore,unsigned long,start,size_t,len,unsigned char __user *,vec)229*4882a593Smuzhiyun SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
230*4882a593Smuzhiyun unsigned char __user *, vec)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun long retval;
233*4882a593Smuzhiyun unsigned long pages;
234*4882a593Smuzhiyun unsigned char *tmp;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun start = untagged_addr(start);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Check the start address: needs to be page-aligned.. */
239*4882a593Smuzhiyun if (start & ~PAGE_MASK)
240*4882a593Smuzhiyun return -EINVAL;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* ..and we need to be passed a valid user-space range */
243*4882a593Smuzhiyun if (!access_ok((void __user *) start, len))
244*4882a593Smuzhiyun return -ENOMEM;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* This also avoids any overflows on PAGE_ALIGN */
247*4882a593Smuzhiyun pages = len >> PAGE_SHIFT;
248*4882a593Smuzhiyun pages += (offset_in_page(len)) != 0;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (!access_ok(vec, pages))
251*4882a593Smuzhiyun return -EFAULT;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun tmp = (void *) __get_free_page(GFP_USER);
254*4882a593Smuzhiyun if (!tmp)
255*4882a593Smuzhiyun return -EAGAIN;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun retval = 0;
258*4882a593Smuzhiyun while (pages) {
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Do at most PAGE_SIZE entries per iteration, due to
261*4882a593Smuzhiyun * the temporary buffer size.
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun mmap_read_lock(current->mm);
264*4882a593Smuzhiyun retval = do_mincore(start, min(pages, PAGE_SIZE), tmp);
265*4882a593Smuzhiyun mmap_read_unlock(current->mm);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (retval <= 0)
268*4882a593Smuzhiyun break;
269*4882a593Smuzhiyun if (copy_to_user(vec, tmp, retval)) {
270*4882a593Smuzhiyun retval = -EFAULT;
271*4882a593Smuzhiyun break;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun pages -= retval;
274*4882a593Smuzhiyun vec += retval;
275*4882a593Smuzhiyun start += retval << PAGE_SHIFT;
276*4882a593Smuzhiyun retval = 0;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun free_page((unsigned long) tmp);
279*4882a593Smuzhiyun return retval;
280*4882a593Smuzhiyun }
281