xref: /OK3568_Linux_fs/kernel/fs/proc/task_mmu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/pagewalk.h>
3*4882a593Smuzhiyun #include <linux/vmacache.h>
4*4882a593Smuzhiyun #include <linux/hugetlb.h>
5*4882a593Smuzhiyun #include <linux/huge_mm.h>
6*4882a593Smuzhiyun #include <linux/mount.h>
7*4882a593Smuzhiyun #include <linux/seq_file.h>
8*4882a593Smuzhiyun #include <linux/highmem.h>
9*4882a593Smuzhiyun #include <linux/ptrace.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/pagemap.h>
12*4882a593Smuzhiyun #include <linux/mempolicy.h>
13*4882a593Smuzhiyun #include <linux/rmap.h>
14*4882a593Smuzhiyun #include <linux/swap.h>
15*4882a593Smuzhiyun #include <linux/sched/mm.h>
16*4882a593Smuzhiyun #include <linux/swapops.h>
17*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
18*4882a593Smuzhiyun #include <linux/page_idle.h>
19*4882a593Smuzhiyun #include <linux/shmem_fs.h>
20*4882a593Smuzhiyun #include <linux/uaccess.h>
21*4882a593Smuzhiyun #include <linux/pkeys.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include <asm/elf.h>
24*4882a593Smuzhiyun #include <asm/tlb.h>
25*4882a593Smuzhiyun #include <asm/tlbflush.h>
26*4882a593Smuzhiyun #include "internal.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define SEQ_PUT_DEC(str, val) \
29*4882a593Smuzhiyun 		seq_put_decimal_ull_width(m, str, (val) << (PAGE_SHIFT-10), 8)
task_mem(struct seq_file * m,struct mm_struct * mm)30*4882a593Smuzhiyun void task_mem(struct seq_file *m, struct mm_struct *mm)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	unsigned long text, lib, swap, anon, file, shmem;
33*4882a593Smuzhiyun 	unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	anon = get_mm_counter(mm, MM_ANONPAGES);
36*4882a593Smuzhiyun 	file = get_mm_counter(mm, MM_FILEPAGES);
37*4882a593Smuzhiyun 	shmem = get_mm_counter(mm, MM_SHMEMPAGES);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/*
40*4882a593Smuzhiyun 	 * Note: to minimize their overhead, mm maintains hiwater_vm and
41*4882a593Smuzhiyun 	 * hiwater_rss only when about to *lower* total_vm or rss.  Any
42*4882a593Smuzhiyun 	 * collector of these hiwater stats must therefore get total_vm
43*4882a593Smuzhiyun 	 * and rss too, which will usually be the higher.  Barriers? not
44*4882a593Smuzhiyun 	 * worth the effort, such snapshots can always be inconsistent.
45*4882a593Smuzhiyun 	 */
46*4882a593Smuzhiyun 	hiwater_vm = total_vm = mm->total_vm;
47*4882a593Smuzhiyun 	if (hiwater_vm < mm->hiwater_vm)
48*4882a593Smuzhiyun 		hiwater_vm = mm->hiwater_vm;
49*4882a593Smuzhiyun 	hiwater_rss = total_rss = anon + file + shmem;
50*4882a593Smuzhiyun 	if (hiwater_rss < mm->hiwater_rss)
51*4882a593Smuzhiyun 		hiwater_rss = mm->hiwater_rss;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	/* split executable areas between text and lib */
54*4882a593Smuzhiyun 	text = PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK);
55*4882a593Smuzhiyun 	text = min(text, mm->exec_vm << PAGE_SHIFT);
56*4882a593Smuzhiyun 	lib = (mm->exec_vm << PAGE_SHIFT) - text;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	swap = get_mm_counter(mm, MM_SWAPENTS);
59*4882a593Smuzhiyun 	SEQ_PUT_DEC("VmPeak:\t", hiwater_vm);
60*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmSize:\t", total_vm);
61*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmLck:\t", mm->locked_vm);
62*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmPin:\t", atomic64_read(&mm->pinned_vm));
63*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmHWM:\t", hiwater_rss);
64*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmRSS:\t", total_rss);
65*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nRssAnon:\t", anon);
66*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nRssFile:\t", file);
67*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nRssShmem:\t", shmem);
68*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmData:\t", mm->data_vm);
69*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmStk:\t", mm->stack_vm);
70*4882a593Smuzhiyun 	seq_put_decimal_ull_width(m,
71*4882a593Smuzhiyun 		    " kB\nVmExe:\t", text >> 10, 8);
72*4882a593Smuzhiyun 	seq_put_decimal_ull_width(m,
73*4882a593Smuzhiyun 		    " kB\nVmLib:\t", lib >> 10, 8);
74*4882a593Smuzhiyun 	seq_put_decimal_ull_width(m,
75*4882a593Smuzhiyun 		    " kB\nVmPTE:\t", mm_pgtables_bytes(mm) >> 10, 8);
76*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nVmSwap:\t", swap);
77*4882a593Smuzhiyun 	seq_puts(m, " kB\n");
78*4882a593Smuzhiyun 	hugetlb_report_usage(m, mm);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun #undef SEQ_PUT_DEC
81*4882a593Smuzhiyun 
task_vsize(struct mm_struct * mm)82*4882a593Smuzhiyun unsigned long task_vsize(struct mm_struct *mm)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	return PAGE_SIZE * mm->total_vm;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
task_statm(struct mm_struct * mm,unsigned long * shared,unsigned long * text,unsigned long * data,unsigned long * resident)87*4882a593Smuzhiyun unsigned long task_statm(struct mm_struct *mm,
88*4882a593Smuzhiyun 			 unsigned long *shared, unsigned long *text,
89*4882a593Smuzhiyun 			 unsigned long *data, unsigned long *resident)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	*shared = get_mm_counter(mm, MM_FILEPAGES) +
92*4882a593Smuzhiyun 			get_mm_counter(mm, MM_SHMEMPAGES);
93*4882a593Smuzhiyun 	*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
94*4882a593Smuzhiyun 								>> PAGE_SHIFT;
95*4882a593Smuzhiyun 	*data = mm->data_vm + mm->stack_vm;
96*4882a593Smuzhiyun 	*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
97*4882a593Smuzhiyun 	return mm->total_vm;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #ifdef CONFIG_NUMA
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * Save get_task_policy() for show_numa_map().
103*4882a593Smuzhiyun  */
hold_task_mempolicy(struct proc_maps_private * priv)104*4882a593Smuzhiyun static void hold_task_mempolicy(struct proc_maps_private *priv)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct task_struct *task = priv->task;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	task_lock(task);
109*4882a593Smuzhiyun 	priv->task_mempolicy = get_task_policy(task);
110*4882a593Smuzhiyun 	mpol_get(priv->task_mempolicy);
111*4882a593Smuzhiyun 	task_unlock(task);
112*4882a593Smuzhiyun }
release_task_mempolicy(struct proc_maps_private * priv)113*4882a593Smuzhiyun static void release_task_mempolicy(struct proc_maps_private *priv)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	mpol_put(priv->task_mempolicy);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun #else
hold_task_mempolicy(struct proc_maps_private * priv)118*4882a593Smuzhiyun static void hold_task_mempolicy(struct proc_maps_private *priv)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun }
release_task_mempolicy(struct proc_maps_private * priv)121*4882a593Smuzhiyun static void release_task_mempolicy(struct proc_maps_private *priv)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun #endif
125*4882a593Smuzhiyun 
seq_print_vma_name(struct seq_file * m,struct vm_area_struct * vma)126*4882a593Smuzhiyun static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	const char __user *name = vma_get_anon_name(vma);
129*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	unsigned long page_start_vaddr;
132*4882a593Smuzhiyun 	unsigned long page_offset;
133*4882a593Smuzhiyun 	unsigned long num_pages;
134*4882a593Smuzhiyun 	unsigned long max_len = NAME_MAX;
135*4882a593Smuzhiyun 	int i;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	page_start_vaddr = (unsigned long)name & PAGE_MASK;
138*4882a593Smuzhiyun 	page_offset = (unsigned long)name - page_start_vaddr;
139*4882a593Smuzhiyun 	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	seq_puts(m, "[anon:");
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	for (i = 0; i < num_pages; i++) {
144*4882a593Smuzhiyun 		int len;
145*4882a593Smuzhiyun 		int write_len;
146*4882a593Smuzhiyun 		const char *kaddr;
147*4882a593Smuzhiyun 		long pages_pinned;
148*4882a593Smuzhiyun 		struct page *page;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		pages_pinned = get_user_pages_remote(mm, page_start_vaddr, 1, 0,
151*4882a593Smuzhiyun 						     &page, NULL, NULL);
152*4882a593Smuzhiyun 		if (pages_pinned < 1) {
153*4882a593Smuzhiyun 			seq_puts(m, "<fault>]");
154*4882a593Smuzhiyun 			return;
155*4882a593Smuzhiyun 		}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		kaddr = (const char *)kmap(page);
158*4882a593Smuzhiyun 		len = min(max_len, PAGE_SIZE - page_offset);
159*4882a593Smuzhiyun 		write_len = strnlen(kaddr + page_offset, len);
160*4882a593Smuzhiyun 		seq_write(m, kaddr + page_offset, write_len);
161*4882a593Smuzhiyun 		kunmap(page);
162*4882a593Smuzhiyun 		put_user_page(page);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		/* if strnlen hit a null terminator then we're done */
165*4882a593Smuzhiyun 		if (write_len != len)
166*4882a593Smuzhiyun 			break;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		max_len -= len;
169*4882a593Smuzhiyun 		page_offset = 0;
170*4882a593Smuzhiyun 		page_start_vaddr += PAGE_SIZE;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	seq_putc(m, ']');
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
m_start(struct seq_file * m,loff_t * ppos)176*4882a593Smuzhiyun static void *m_start(struct seq_file *m, loff_t *ppos)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct proc_maps_private *priv = m->private;
179*4882a593Smuzhiyun 	unsigned long last_addr = *ppos;
180*4882a593Smuzhiyun 	struct mm_struct *mm;
181*4882a593Smuzhiyun 	struct vm_area_struct *vma;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* See m_next(). Zero at the start or after lseek. */
184*4882a593Smuzhiyun 	if (last_addr == -1UL)
185*4882a593Smuzhiyun 		return NULL;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	priv->task = get_proc_task(priv->inode);
188*4882a593Smuzhiyun 	if (!priv->task)
189*4882a593Smuzhiyun 		return ERR_PTR(-ESRCH);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	mm = priv->mm;
192*4882a593Smuzhiyun 	if (!mm || !mmget_not_zero(mm)) {
193*4882a593Smuzhiyun 		put_task_struct(priv->task);
194*4882a593Smuzhiyun 		priv->task = NULL;
195*4882a593Smuzhiyun 		return NULL;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (mmap_read_lock_killable(mm)) {
199*4882a593Smuzhiyun 		mmput(mm);
200*4882a593Smuzhiyun 		put_task_struct(priv->task);
201*4882a593Smuzhiyun 		priv->task = NULL;
202*4882a593Smuzhiyun 		return ERR_PTR(-EINTR);
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	hold_task_mempolicy(priv);
206*4882a593Smuzhiyun 	priv->tail_vma = get_gate_vma(mm);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	vma = find_vma(mm, last_addr);
209*4882a593Smuzhiyun 	if (vma)
210*4882a593Smuzhiyun 		return vma;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return priv->tail_vma;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
m_next(struct seq_file * m,void * v,loff_t * ppos)215*4882a593Smuzhiyun static void *m_next(struct seq_file *m, void *v, loff_t *ppos)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	struct proc_maps_private *priv = m->private;
218*4882a593Smuzhiyun 	struct vm_area_struct *next, *vma = v;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	if (vma == priv->tail_vma)
221*4882a593Smuzhiyun 		next = NULL;
222*4882a593Smuzhiyun 	else if (vma->vm_next)
223*4882a593Smuzhiyun 		next = vma->vm_next;
224*4882a593Smuzhiyun 	else
225*4882a593Smuzhiyun 		next = priv->tail_vma;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	*ppos = next ? next->vm_start : -1UL;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	return next;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
m_stop(struct seq_file * m,void * v)232*4882a593Smuzhiyun static void m_stop(struct seq_file *m, void *v)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	struct proc_maps_private *priv = m->private;
235*4882a593Smuzhiyun 	struct mm_struct *mm = priv->mm;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	if (!priv->task)
238*4882a593Smuzhiyun 		return;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	release_task_mempolicy(priv);
241*4882a593Smuzhiyun 	mmap_read_unlock(mm);
242*4882a593Smuzhiyun 	mmput(mm);
243*4882a593Smuzhiyun 	put_task_struct(priv->task);
244*4882a593Smuzhiyun 	priv->task = NULL;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
proc_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops,int psize)247*4882a593Smuzhiyun static int proc_maps_open(struct inode *inode, struct file *file,
248*4882a593Smuzhiyun 			const struct seq_operations *ops, int psize)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct proc_maps_private *priv = __seq_open_private(file, ops, psize);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (!priv)
253*4882a593Smuzhiyun 		return -ENOMEM;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	priv->inode = inode;
256*4882a593Smuzhiyun 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
257*4882a593Smuzhiyun 	if (IS_ERR(priv->mm)) {
258*4882a593Smuzhiyun 		int err = PTR_ERR(priv->mm);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		seq_release_private(inode, file);
261*4882a593Smuzhiyun 		return err;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
proc_map_release(struct inode * inode,struct file * file)267*4882a593Smuzhiyun static int proc_map_release(struct inode *inode, struct file *file)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct seq_file *seq = file->private_data;
270*4882a593Smuzhiyun 	struct proc_maps_private *priv = seq->private;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (priv->mm)
273*4882a593Smuzhiyun 		mmdrop(priv->mm);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return seq_release_private(inode, file);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
do_maps_open(struct inode * inode,struct file * file,const struct seq_operations * ops)278*4882a593Smuzhiyun static int do_maps_open(struct inode *inode, struct file *file,
279*4882a593Smuzhiyun 			const struct seq_operations *ops)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	return proc_maps_open(inode, file, ops,
282*4882a593Smuzhiyun 				sizeof(struct proc_maps_private));
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun  * Indicate if the VMA is a stack for the given task; for
287*4882a593Smuzhiyun  * /proc/PID/maps that is the stack of the main task.
288*4882a593Smuzhiyun  */
is_stack(struct vm_area_struct * vma)289*4882a593Smuzhiyun static int is_stack(struct vm_area_struct *vma)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	/*
292*4882a593Smuzhiyun 	 * We make no effort to guess what a given thread considers to be
293*4882a593Smuzhiyun 	 * its "stack".  It's not even well-defined for programs written
294*4882a593Smuzhiyun 	 * languages like Go.
295*4882a593Smuzhiyun 	 */
296*4882a593Smuzhiyun 	return vma->vm_start <= vma->vm_mm->start_stack &&
297*4882a593Smuzhiyun 		vma->vm_end >= vma->vm_mm->start_stack;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
show_vma_header_prefix(struct seq_file * m,unsigned long start,unsigned long end,vm_flags_t flags,unsigned long long pgoff,dev_t dev,unsigned long ino)300*4882a593Smuzhiyun static void show_vma_header_prefix(struct seq_file *m,
301*4882a593Smuzhiyun 				   unsigned long start, unsigned long end,
302*4882a593Smuzhiyun 				   vm_flags_t flags, unsigned long long pgoff,
303*4882a593Smuzhiyun 				   dev_t dev, unsigned long ino)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
306*4882a593Smuzhiyun 	seq_put_hex_ll(m, NULL, start, 8);
307*4882a593Smuzhiyun 	seq_put_hex_ll(m, "-", end, 8);
308*4882a593Smuzhiyun 	seq_putc(m, ' ');
309*4882a593Smuzhiyun 	seq_putc(m, flags & VM_READ ? 'r' : '-');
310*4882a593Smuzhiyun 	seq_putc(m, flags & VM_WRITE ? 'w' : '-');
311*4882a593Smuzhiyun 	seq_putc(m, flags & VM_EXEC ? 'x' : '-');
312*4882a593Smuzhiyun 	seq_putc(m, flags & VM_MAYSHARE ? 's' : 'p');
313*4882a593Smuzhiyun 	seq_put_hex_ll(m, " ", pgoff, 8);
314*4882a593Smuzhiyun 	seq_put_hex_ll(m, " ", MAJOR(dev), 2);
315*4882a593Smuzhiyun 	seq_put_hex_ll(m, ":", MINOR(dev), 2);
316*4882a593Smuzhiyun 	seq_put_decimal_ull(m, " ", ino);
317*4882a593Smuzhiyun 	seq_putc(m, ' ');
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun static void
show_map_vma(struct seq_file * m,struct vm_area_struct * vma)321*4882a593Smuzhiyun show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
324*4882a593Smuzhiyun 	struct file *file = vma->vm_file;
325*4882a593Smuzhiyun 	vm_flags_t flags = vma->vm_flags;
326*4882a593Smuzhiyun 	unsigned long ino = 0;
327*4882a593Smuzhiyun 	unsigned long long pgoff = 0;
328*4882a593Smuzhiyun 	unsigned long start, end;
329*4882a593Smuzhiyun 	dev_t dev = 0;
330*4882a593Smuzhiyun 	const char *name = NULL;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (file) {
333*4882a593Smuzhiyun 		struct inode *inode = file_inode(vma->vm_file);
334*4882a593Smuzhiyun 		dev = inode->i_sb->s_dev;
335*4882a593Smuzhiyun 		ino = inode->i_ino;
336*4882a593Smuzhiyun 		pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
337*4882a593Smuzhiyun 	}
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	start = vma->vm_start;
340*4882a593Smuzhiyun 	end = vma->vm_end;
341*4882a593Smuzhiyun 	show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/*
344*4882a593Smuzhiyun 	 * Print the dentry name for named mappings, and a
345*4882a593Smuzhiyun 	 * special [heap] marker for the heap:
346*4882a593Smuzhiyun 	 */
347*4882a593Smuzhiyun 	if (file) {
348*4882a593Smuzhiyun 		seq_pad(m, ' ');
349*4882a593Smuzhiyun 		seq_file_path(m, file, "\n");
350*4882a593Smuzhiyun 		goto done;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (vma->vm_ops && vma->vm_ops->name) {
354*4882a593Smuzhiyun 		name = vma->vm_ops->name(vma);
355*4882a593Smuzhiyun 		if (name)
356*4882a593Smuzhiyun 			goto done;
357*4882a593Smuzhiyun 	}
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	name = arch_vma_name(vma);
360*4882a593Smuzhiyun 	if (!name) {
361*4882a593Smuzhiyun 		if (!mm) {
362*4882a593Smuzhiyun 			name = "[vdso]";
363*4882a593Smuzhiyun 			goto done;
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		if (vma->vm_start <= mm->brk &&
367*4882a593Smuzhiyun 		    vma->vm_end >= mm->start_brk) {
368*4882a593Smuzhiyun 			name = "[heap]";
369*4882a593Smuzhiyun 			goto done;
370*4882a593Smuzhiyun 		}
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 		if (is_stack(vma)) {
373*4882a593Smuzhiyun 			name = "[stack]";
374*4882a593Smuzhiyun 			goto done;
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		if (vma_get_anon_name(vma)) {
378*4882a593Smuzhiyun 			seq_pad(m, ' ');
379*4882a593Smuzhiyun 			seq_print_vma_name(m, vma);
380*4882a593Smuzhiyun 		}
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun done:
384*4882a593Smuzhiyun 	if (name) {
385*4882a593Smuzhiyun 		seq_pad(m, ' ');
386*4882a593Smuzhiyun 		seq_puts(m, name);
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 	seq_putc(m, '\n');
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
show_map(struct seq_file * m,void * v)391*4882a593Smuzhiyun static int show_map(struct seq_file *m, void *v)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	show_map_vma(m, v);
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun static const struct seq_operations proc_pid_maps_op = {
398*4882a593Smuzhiyun 	.start	= m_start,
399*4882a593Smuzhiyun 	.next	= m_next,
400*4882a593Smuzhiyun 	.stop	= m_stop,
401*4882a593Smuzhiyun 	.show	= show_map
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun 
pid_maps_open(struct inode * inode,struct file * file)404*4882a593Smuzhiyun static int pid_maps_open(struct inode *inode, struct file *file)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	return do_maps_open(inode, file, &proc_pid_maps_op);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun const struct file_operations proc_pid_maps_operations = {
410*4882a593Smuzhiyun 	.open		= pid_maps_open,
411*4882a593Smuzhiyun 	.read		= seq_read,
412*4882a593Smuzhiyun 	.llseek		= seq_lseek,
413*4882a593Smuzhiyun 	.release	= proc_map_release,
414*4882a593Smuzhiyun };
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /*
417*4882a593Smuzhiyun  * Proportional Set Size(PSS): my share of RSS.
418*4882a593Smuzhiyun  *
419*4882a593Smuzhiyun  * PSS of a process is the count of pages it has in memory, where each
420*4882a593Smuzhiyun  * page is divided by the number of processes sharing it.  So if a
421*4882a593Smuzhiyun  * process has 1000 pages all to itself, and 1000 shared with one other
422*4882a593Smuzhiyun  * process, its PSS will be 1500.
423*4882a593Smuzhiyun  *
424*4882a593Smuzhiyun  * To keep (accumulated) division errors low, we adopt a 64bit
425*4882a593Smuzhiyun  * fixed-point pss counter to minimize division errors. So (pss >>
426*4882a593Smuzhiyun  * PSS_SHIFT) would be the real byte count.
427*4882a593Smuzhiyun  *
428*4882a593Smuzhiyun  * A shift of 12 before division means (assuming 4K page size):
429*4882a593Smuzhiyun  * 	- 1M 3-user-pages add up to 8KB errors;
430*4882a593Smuzhiyun  * 	- supports mapcount up to 2^24, or 16M;
431*4882a593Smuzhiyun  * 	- supports PSS up to 2^52 bytes, or 4PB.
432*4882a593Smuzhiyun  */
433*4882a593Smuzhiyun #define PSS_SHIFT 12
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun #ifdef CONFIG_PROC_PAGE_MONITOR
436*4882a593Smuzhiyun struct mem_size_stats {
437*4882a593Smuzhiyun 	unsigned long resident;
438*4882a593Smuzhiyun 	unsigned long shared_clean;
439*4882a593Smuzhiyun 	unsigned long shared_dirty;
440*4882a593Smuzhiyun 	unsigned long private_clean;
441*4882a593Smuzhiyun 	unsigned long private_dirty;
442*4882a593Smuzhiyun 	unsigned long referenced;
443*4882a593Smuzhiyun 	unsigned long anonymous;
444*4882a593Smuzhiyun 	unsigned long lazyfree;
445*4882a593Smuzhiyun 	unsigned long anonymous_thp;
446*4882a593Smuzhiyun 	unsigned long shmem_thp;
447*4882a593Smuzhiyun 	unsigned long file_thp;
448*4882a593Smuzhiyun 	unsigned long swap;
449*4882a593Smuzhiyun 	unsigned long shared_hugetlb;
450*4882a593Smuzhiyun 	unsigned long private_hugetlb;
451*4882a593Smuzhiyun 	u64 pss;
452*4882a593Smuzhiyun 	u64 pss_anon;
453*4882a593Smuzhiyun 	u64 pss_file;
454*4882a593Smuzhiyun 	u64 pss_shmem;
455*4882a593Smuzhiyun 	u64 pss_locked;
456*4882a593Smuzhiyun 	u64 swap_pss;
457*4882a593Smuzhiyun 	bool check_shmem_swap;
458*4882a593Smuzhiyun };
459*4882a593Smuzhiyun 
smaps_page_accumulate(struct mem_size_stats * mss,struct page * page,unsigned long size,unsigned long pss,bool dirty,bool locked,bool private)460*4882a593Smuzhiyun static void smaps_page_accumulate(struct mem_size_stats *mss,
461*4882a593Smuzhiyun 		struct page *page, unsigned long size, unsigned long pss,
462*4882a593Smuzhiyun 		bool dirty, bool locked, bool private)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	mss->pss += pss;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (PageAnon(page))
467*4882a593Smuzhiyun 		mss->pss_anon += pss;
468*4882a593Smuzhiyun 	else if (PageSwapBacked(page))
469*4882a593Smuzhiyun 		mss->pss_shmem += pss;
470*4882a593Smuzhiyun 	else
471*4882a593Smuzhiyun 		mss->pss_file += pss;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (locked)
474*4882a593Smuzhiyun 		mss->pss_locked += pss;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	if (dirty || PageDirty(page)) {
477*4882a593Smuzhiyun 		if (private)
478*4882a593Smuzhiyun 			mss->private_dirty += size;
479*4882a593Smuzhiyun 		else
480*4882a593Smuzhiyun 			mss->shared_dirty += size;
481*4882a593Smuzhiyun 	} else {
482*4882a593Smuzhiyun 		if (private)
483*4882a593Smuzhiyun 			mss->private_clean += size;
484*4882a593Smuzhiyun 		else
485*4882a593Smuzhiyun 			mss->shared_clean += size;
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
smaps_account(struct mem_size_stats * mss,struct page * page,bool compound,bool young,bool dirty,bool locked,bool migration)489*4882a593Smuzhiyun static void smaps_account(struct mem_size_stats *mss, struct page *page,
490*4882a593Smuzhiyun 		bool compound, bool young, bool dirty, bool locked,
491*4882a593Smuzhiyun 		bool migration)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	int i, nr = compound ? compound_nr(page) : 1;
494*4882a593Smuzhiyun 	unsigned long size = nr * PAGE_SIZE;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/*
497*4882a593Smuzhiyun 	 * First accumulate quantities that depend only on |size| and the type
498*4882a593Smuzhiyun 	 * of the compound page.
499*4882a593Smuzhiyun 	 */
500*4882a593Smuzhiyun 	if (PageAnon(page)) {
501*4882a593Smuzhiyun 		mss->anonymous += size;
502*4882a593Smuzhiyun 		if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
503*4882a593Smuzhiyun 			mss->lazyfree += size;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	mss->resident += size;
507*4882a593Smuzhiyun 	/* Accumulate the size in pages that have been accessed. */
508*4882a593Smuzhiyun 	if (young || page_is_young(page) || PageReferenced(page))
509*4882a593Smuzhiyun 		mss->referenced += size;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/*
512*4882a593Smuzhiyun 	 * Then accumulate quantities that may depend on sharing, or that may
513*4882a593Smuzhiyun 	 * differ page-by-page.
514*4882a593Smuzhiyun 	 *
515*4882a593Smuzhiyun 	 * page_count(page) == 1 guarantees the page is mapped exactly once.
516*4882a593Smuzhiyun 	 * If any subpage of the compound page mapped with PTE it would elevate
517*4882a593Smuzhiyun 	 * page_count().
518*4882a593Smuzhiyun 	 *
519*4882a593Smuzhiyun 	 * The page_mapcount() is called to get a snapshot of the mapcount.
520*4882a593Smuzhiyun 	 * Without holding the page lock this snapshot can be slightly wrong as
521*4882a593Smuzhiyun 	 * we cannot always read the mapcount atomically.  It is not safe to
522*4882a593Smuzhiyun 	 * call page_mapcount() even with PTL held if the page is not mapped,
523*4882a593Smuzhiyun 	 * especially for migration entries.  Treat regular migration entries
524*4882a593Smuzhiyun 	 * as mapcount == 1.
525*4882a593Smuzhiyun 	 */
526*4882a593Smuzhiyun 	if ((page_count(page) == 1) || migration) {
527*4882a593Smuzhiyun 		smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
528*4882a593Smuzhiyun 			locked, true);
529*4882a593Smuzhiyun 		return;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 	for (i = 0; i < nr; i++, page++) {
532*4882a593Smuzhiyun 		int mapcount = page_mapcount(page);
533*4882a593Smuzhiyun 		unsigned long pss = PAGE_SIZE << PSS_SHIFT;
534*4882a593Smuzhiyun 		if (mapcount >= 2)
535*4882a593Smuzhiyun 			pss /= mapcount;
536*4882a593Smuzhiyun 		smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
537*4882a593Smuzhiyun 				      mapcount < 2);
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun #ifdef CONFIG_SHMEM
smaps_pte_hole(unsigned long addr,unsigned long end,__always_unused int depth,struct mm_walk * walk)542*4882a593Smuzhiyun static int smaps_pte_hole(unsigned long addr, unsigned long end,
543*4882a593Smuzhiyun 			  __always_unused int depth, struct mm_walk *walk)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	struct mem_size_stats *mss = walk->private;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	mss->swap += shmem_partial_swap_usage(
548*4882a593Smuzhiyun 			walk->vma->vm_file->f_mapping, addr, end);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun #else
553*4882a593Smuzhiyun #define smaps_pte_hole		NULL
554*4882a593Smuzhiyun #endif /* CONFIG_SHMEM */
555*4882a593Smuzhiyun 
smaps_pte_entry(pte_t * pte,unsigned long addr,struct mm_walk * walk)556*4882a593Smuzhiyun static void smaps_pte_entry(pte_t *pte, unsigned long addr,
557*4882a593Smuzhiyun 		struct mm_walk *walk)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct mem_size_stats *mss = walk->private;
560*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
561*4882a593Smuzhiyun 	bool locked = !!(vma->vm_flags & VM_LOCKED);
562*4882a593Smuzhiyun 	struct page *page = NULL;
563*4882a593Smuzhiyun 	bool migration = false, young = false, dirty = false;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (pte_present(*pte)) {
566*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, *pte);
567*4882a593Smuzhiyun 		young = pte_young(*pte);
568*4882a593Smuzhiyun 		dirty = pte_dirty(*pte);
569*4882a593Smuzhiyun 	} else if (is_swap_pte(*pte)) {
570*4882a593Smuzhiyun 		swp_entry_t swpent = pte_to_swp_entry(*pte);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		if (!non_swap_entry(swpent)) {
573*4882a593Smuzhiyun 			int mapcount;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 			mss->swap += PAGE_SIZE;
576*4882a593Smuzhiyun 			mapcount = swp_swapcount(swpent);
577*4882a593Smuzhiyun 			if (mapcount >= 2) {
578*4882a593Smuzhiyun 				u64 pss_delta = (u64)PAGE_SIZE << PSS_SHIFT;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 				do_div(pss_delta, mapcount);
581*4882a593Smuzhiyun 				mss->swap_pss += pss_delta;
582*4882a593Smuzhiyun 			} else {
583*4882a593Smuzhiyun 				mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
584*4882a593Smuzhiyun 			}
585*4882a593Smuzhiyun 		} else if (is_migration_entry(swpent)) {
586*4882a593Smuzhiyun 			migration = true;
587*4882a593Smuzhiyun 			page = migration_entry_to_page(swpent);
588*4882a593Smuzhiyun 		} else if (is_device_private_entry(swpent))
589*4882a593Smuzhiyun 			page = device_private_entry_to_page(swpent);
590*4882a593Smuzhiyun 	} else if (unlikely(IS_ENABLED(CONFIG_SHMEM) && mss->check_shmem_swap
591*4882a593Smuzhiyun 							&& pte_none(*pte))) {
592*4882a593Smuzhiyun 		page = xa_load(&vma->vm_file->f_mapping->i_pages,
593*4882a593Smuzhiyun 						linear_page_index(vma, addr));
594*4882a593Smuzhiyun 		if (xa_is_value(page))
595*4882a593Smuzhiyun 			mss->swap += PAGE_SIZE;
596*4882a593Smuzhiyun 		return;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	if (!page)
600*4882a593Smuzhiyun 		return;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	smaps_account(mss, page, false, young, dirty, locked, migration);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
smaps_pmd_entry(pmd_t * pmd,unsigned long addr,struct mm_walk * walk)606*4882a593Smuzhiyun static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
607*4882a593Smuzhiyun 		struct mm_walk *walk)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	struct mem_size_stats *mss = walk->private;
610*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
611*4882a593Smuzhiyun 	bool locked = !!(vma->vm_flags & VM_LOCKED);
612*4882a593Smuzhiyun 	struct page *page = NULL;
613*4882a593Smuzhiyun 	bool migration = false;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (pmd_present(*pmd)) {
616*4882a593Smuzhiyun 		/* FOLL_DUMP will return -EFAULT on huge zero page */
617*4882a593Smuzhiyun 		page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
618*4882a593Smuzhiyun 	} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
619*4882a593Smuzhiyun 		swp_entry_t entry = pmd_to_swp_entry(*pmd);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		if (is_migration_entry(entry)) {
622*4882a593Smuzhiyun 			migration = true;
623*4882a593Smuzhiyun 			page = migration_entry_to_page(entry);
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 	}
626*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(page))
627*4882a593Smuzhiyun 		return;
628*4882a593Smuzhiyun 	if (PageAnon(page))
629*4882a593Smuzhiyun 		mss->anonymous_thp += HPAGE_PMD_SIZE;
630*4882a593Smuzhiyun 	else if (PageSwapBacked(page))
631*4882a593Smuzhiyun 		mss->shmem_thp += HPAGE_PMD_SIZE;
632*4882a593Smuzhiyun 	else if (is_zone_device_page(page))
633*4882a593Smuzhiyun 		/* pass */;
634*4882a593Smuzhiyun 	else
635*4882a593Smuzhiyun 		mss->file_thp += HPAGE_PMD_SIZE;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
638*4882a593Smuzhiyun 		      locked, migration);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun #else
smaps_pmd_entry(pmd_t * pmd,unsigned long addr,struct mm_walk * walk)641*4882a593Smuzhiyun static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
642*4882a593Smuzhiyun 		struct mm_walk *walk)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun #endif
646*4882a593Smuzhiyun 
smaps_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)647*4882a593Smuzhiyun static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
648*4882a593Smuzhiyun 			   struct mm_walk *walk)
649*4882a593Smuzhiyun {
650*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
651*4882a593Smuzhiyun 	pte_t *pte;
652*4882a593Smuzhiyun 	spinlock_t *ptl;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	ptl = pmd_trans_huge_lock(pmd, vma);
655*4882a593Smuzhiyun 	if (ptl) {
656*4882a593Smuzhiyun 		smaps_pmd_entry(pmd, addr, walk);
657*4882a593Smuzhiyun 		spin_unlock(ptl);
658*4882a593Smuzhiyun 		goto out;
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	if (pmd_trans_unstable(pmd))
662*4882a593Smuzhiyun 		goto out;
663*4882a593Smuzhiyun 	/*
664*4882a593Smuzhiyun 	 * The mmap_lock held all the way back in m_start() is what
665*4882a593Smuzhiyun 	 * keeps khugepaged out of here and from collapsing things
666*4882a593Smuzhiyun 	 * in here.
667*4882a593Smuzhiyun 	 */
668*4882a593Smuzhiyun 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
669*4882a593Smuzhiyun 	for (; addr != end; pte++, addr += PAGE_SIZE)
670*4882a593Smuzhiyun 		smaps_pte_entry(pte, addr, walk);
671*4882a593Smuzhiyun 	pte_unmap_unlock(pte - 1, ptl);
672*4882a593Smuzhiyun out:
673*4882a593Smuzhiyun 	cond_resched();
674*4882a593Smuzhiyun 	return 0;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
show_smap_vma_flags(struct seq_file * m,struct vm_area_struct * vma)677*4882a593Smuzhiyun static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	/*
680*4882a593Smuzhiyun 	 * Don't forget to update Documentation/ on changes.
681*4882a593Smuzhiyun 	 */
682*4882a593Smuzhiyun 	static const char mnemonics[BITS_PER_LONG][2] = {
683*4882a593Smuzhiyun 		/*
684*4882a593Smuzhiyun 		 * In case if we meet a flag we don't know about.
685*4882a593Smuzhiyun 		 */
686*4882a593Smuzhiyun 		[0 ... (BITS_PER_LONG-1)] = "??",
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 		[ilog2(VM_READ)]	= "rd",
689*4882a593Smuzhiyun 		[ilog2(VM_WRITE)]	= "wr",
690*4882a593Smuzhiyun 		[ilog2(VM_EXEC)]	= "ex",
691*4882a593Smuzhiyun 		[ilog2(VM_SHARED)]	= "sh",
692*4882a593Smuzhiyun 		[ilog2(VM_MAYREAD)]	= "mr",
693*4882a593Smuzhiyun 		[ilog2(VM_MAYWRITE)]	= "mw",
694*4882a593Smuzhiyun 		[ilog2(VM_MAYEXEC)]	= "me",
695*4882a593Smuzhiyun 		[ilog2(VM_MAYSHARE)]	= "ms",
696*4882a593Smuzhiyun 		[ilog2(VM_GROWSDOWN)]	= "gd",
697*4882a593Smuzhiyun 		[ilog2(VM_PFNMAP)]	= "pf",
698*4882a593Smuzhiyun 		[ilog2(VM_DENYWRITE)]	= "dw",
699*4882a593Smuzhiyun 		[ilog2(VM_LOCKED)]	= "lo",
700*4882a593Smuzhiyun 		[ilog2(VM_IO)]		= "io",
701*4882a593Smuzhiyun 		[ilog2(VM_SEQ_READ)]	= "sr",
702*4882a593Smuzhiyun 		[ilog2(VM_RAND_READ)]	= "rr",
703*4882a593Smuzhiyun 		[ilog2(VM_DONTCOPY)]	= "dc",
704*4882a593Smuzhiyun 		[ilog2(VM_DONTEXPAND)]	= "de",
705*4882a593Smuzhiyun 		[ilog2(VM_ACCOUNT)]	= "ac",
706*4882a593Smuzhiyun 		[ilog2(VM_NORESERVE)]	= "nr",
707*4882a593Smuzhiyun 		[ilog2(VM_HUGETLB)]	= "ht",
708*4882a593Smuzhiyun 		[ilog2(VM_SYNC)]	= "sf",
709*4882a593Smuzhiyun 		[ilog2(VM_ARCH_1)]	= "ar",
710*4882a593Smuzhiyun 		[ilog2(VM_WIPEONFORK)]	= "wf",
711*4882a593Smuzhiyun 		[ilog2(VM_DONTDUMP)]	= "dd",
712*4882a593Smuzhiyun #ifdef CONFIG_ARM64_BTI
713*4882a593Smuzhiyun 		[ilog2(VM_ARM64_BTI)]	= "bt",
714*4882a593Smuzhiyun #endif
715*4882a593Smuzhiyun #ifdef CONFIG_MEM_SOFT_DIRTY
716*4882a593Smuzhiyun 		[ilog2(VM_SOFTDIRTY)]	= "sd",
717*4882a593Smuzhiyun #endif
718*4882a593Smuzhiyun 		[ilog2(VM_MIXEDMAP)]	= "mm",
719*4882a593Smuzhiyun 		[ilog2(VM_HUGEPAGE)]	= "hg",
720*4882a593Smuzhiyun 		[ilog2(VM_NOHUGEPAGE)]	= "nh",
721*4882a593Smuzhiyun 		[ilog2(VM_MERGEABLE)]	= "mg",
722*4882a593Smuzhiyun 		[ilog2(VM_UFFD_MISSING)]= "um",
723*4882a593Smuzhiyun 		[ilog2(VM_UFFD_WP)]	= "uw",
724*4882a593Smuzhiyun #ifdef CONFIG_ARM64_MTE
725*4882a593Smuzhiyun 		[ilog2(VM_MTE)]		= "mt",
726*4882a593Smuzhiyun 		[ilog2(VM_MTE_ALLOWED)]	= "",
727*4882a593Smuzhiyun #endif
728*4882a593Smuzhiyun #ifdef CONFIG_ARCH_HAS_PKEYS
729*4882a593Smuzhiyun 		/* These come out via ProtectionKey: */
730*4882a593Smuzhiyun 		[ilog2(VM_PKEY_BIT0)]	= "",
731*4882a593Smuzhiyun 		[ilog2(VM_PKEY_BIT1)]	= "",
732*4882a593Smuzhiyun 		[ilog2(VM_PKEY_BIT2)]	= "",
733*4882a593Smuzhiyun 		[ilog2(VM_PKEY_BIT3)]	= "",
734*4882a593Smuzhiyun #if VM_PKEY_BIT4
735*4882a593Smuzhiyun 		[ilog2(VM_PKEY_BIT4)]	= "",
736*4882a593Smuzhiyun #endif
737*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_PKEYS */
738*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
739*4882a593Smuzhiyun 		[ilog2(VM_UFFD_MINOR)]	= "ui",
740*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
741*4882a593Smuzhiyun 	};
742*4882a593Smuzhiyun 	size_t i;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	seq_puts(m, "VmFlags: ");
745*4882a593Smuzhiyun 	for (i = 0; i < BITS_PER_LONG; i++) {
746*4882a593Smuzhiyun 		if (!mnemonics[i][0])
747*4882a593Smuzhiyun 			continue;
748*4882a593Smuzhiyun 		if (vma->vm_flags & (1UL << i)) {
749*4882a593Smuzhiyun 			seq_putc(m, mnemonics[i][0]);
750*4882a593Smuzhiyun 			seq_putc(m, mnemonics[i][1]);
751*4882a593Smuzhiyun 			seq_putc(m, ' ');
752*4882a593Smuzhiyun 		}
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 	seq_putc(m, '\n');
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
smaps_hugetlb_range(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)758*4882a593Smuzhiyun static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
759*4882a593Smuzhiyun 				 unsigned long addr, unsigned long end,
760*4882a593Smuzhiyun 				 struct mm_walk *walk)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun 	struct mem_size_stats *mss = walk->private;
763*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
764*4882a593Smuzhiyun 	struct page *page = NULL;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	if (pte_present(*pte)) {
767*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, *pte);
768*4882a593Smuzhiyun 	} else if (is_swap_pte(*pte)) {
769*4882a593Smuzhiyun 		swp_entry_t swpent = pte_to_swp_entry(*pte);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 		if (is_migration_entry(swpent))
772*4882a593Smuzhiyun 			page = migration_entry_to_page(swpent);
773*4882a593Smuzhiyun 		else if (is_device_private_entry(swpent))
774*4882a593Smuzhiyun 			page = device_private_entry_to_page(swpent);
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 	if (page) {
777*4882a593Smuzhiyun 		int mapcount = page_mapcount(page);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		if (mapcount >= 2)
780*4882a593Smuzhiyun 			mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
781*4882a593Smuzhiyun 		else
782*4882a593Smuzhiyun 			mss->private_hugetlb += huge_page_size(hstate_vma(vma));
783*4882a593Smuzhiyun 	}
784*4882a593Smuzhiyun 	return 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun #else
787*4882a593Smuzhiyun #define smaps_hugetlb_range	NULL
788*4882a593Smuzhiyun #endif /* HUGETLB_PAGE */
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun static const struct mm_walk_ops smaps_walk_ops = {
791*4882a593Smuzhiyun 	.pmd_entry		= smaps_pte_range,
792*4882a593Smuzhiyun 	.hugetlb_entry		= smaps_hugetlb_range,
793*4882a593Smuzhiyun };
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun static const struct mm_walk_ops smaps_shmem_walk_ops = {
796*4882a593Smuzhiyun 	.pmd_entry		= smaps_pte_range,
797*4882a593Smuzhiyun 	.hugetlb_entry		= smaps_hugetlb_range,
798*4882a593Smuzhiyun 	.pte_hole		= smaps_pte_hole,
799*4882a593Smuzhiyun };
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun /*
802*4882a593Smuzhiyun  * Gather mem stats from @vma with the indicated beginning
803*4882a593Smuzhiyun  * address @start, and keep them in @mss.
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * Use vm_start of @vma as the beginning address if @start is 0.
806*4882a593Smuzhiyun  */
smap_gather_stats(struct vm_area_struct * vma,struct mem_size_stats * mss,unsigned long start)807*4882a593Smuzhiyun static void smap_gather_stats(struct vm_area_struct *vma,
808*4882a593Smuzhiyun 		struct mem_size_stats *mss, unsigned long start)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun 	const struct mm_walk_ops *ops = &smaps_walk_ops;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	/* Invalid start */
813*4882a593Smuzhiyun 	if (start >= vma->vm_end)
814*4882a593Smuzhiyun 		return;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun #ifdef CONFIG_SHMEM
817*4882a593Smuzhiyun 	/* In case of smaps_rollup, reset the value from previous vma */
818*4882a593Smuzhiyun 	mss->check_shmem_swap = false;
819*4882a593Smuzhiyun 	if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) {
820*4882a593Smuzhiyun 		/*
821*4882a593Smuzhiyun 		 * For shared or readonly shmem mappings we know that all
822*4882a593Smuzhiyun 		 * swapped out pages belong to the shmem object, and we can
823*4882a593Smuzhiyun 		 * obtain the swap value much more efficiently. For private
824*4882a593Smuzhiyun 		 * writable mappings, we might have COW pages that are
825*4882a593Smuzhiyun 		 * not affected by the parent swapped out pages of the shmem
826*4882a593Smuzhiyun 		 * object, so we have to distinguish them during the page walk.
827*4882a593Smuzhiyun 		 * Unless we know that the shmem object (or the part mapped by
828*4882a593Smuzhiyun 		 * our VMA) has no swapped out pages at all.
829*4882a593Smuzhiyun 		 */
830*4882a593Smuzhiyun 		unsigned long shmem_swapped = shmem_swap_usage(vma);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 		if (!start && (!shmem_swapped || (vma->vm_flags & VM_SHARED) ||
833*4882a593Smuzhiyun 					!(vma->vm_flags & VM_WRITE))) {
834*4882a593Smuzhiyun 			mss->swap += shmem_swapped;
835*4882a593Smuzhiyun 		} else {
836*4882a593Smuzhiyun 			mss->check_shmem_swap = true;
837*4882a593Smuzhiyun 			ops = &smaps_shmem_walk_ops;
838*4882a593Smuzhiyun 		}
839*4882a593Smuzhiyun 	}
840*4882a593Smuzhiyun #endif
841*4882a593Smuzhiyun 	/* mmap_lock is held in m_start */
842*4882a593Smuzhiyun 	if (!start)
843*4882a593Smuzhiyun 		walk_page_vma(vma, ops, mss);
844*4882a593Smuzhiyun 	else
845*4882a593Smuzhiyun 		walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss);
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun #define SEQ_PUT_DEC(str, val) \
849*4882a593Smuzhiyun 		seq_put_decimal_ull_width(m, str, (val) >> 10, 8)
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun /* Show the contents common for smaps and smaps_rollup */
__show_smap(struct seq_file * m,const struct mem_size_stats * mss,bool rollup_mode)852*4882a593Smuzhiyun static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
853*4882a593Smuzhiyun 	bool rollup_mode)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	SEQ_PUT_DEC("Rss:            ", mss->resident);
856*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nPss:            ", mss->pss >> PSS_SHIFT);
857*4882a593Smuzhiyun 	if (rollup_mode) {
858*4882a593Smuzhiyun 		/*
859*4882a593Smuzhiyun 		 * These are meaningful only for smaps_rollup, otherwise two of
860*4882a593Smuzhiyun 		 * them are zero, and the other one is the same as Pss.
861*4882a593Smuzhiyun 		 */
862*4882a593Smuzhiyun 		SEQ_PUT_DEC(" kB\nPss_Anon:       ",
863*4882a593Smuzhiyun 			mss->pss_anon >> PSS_SHIFT);
864*4882a593Smuzhiyun 		SEQ_PUT_DEC(" kB\nPss_File:       ",
865*4882a593Smuzhiyun 			mss->pss_file >> PSS_SHIFT);
866*4882a593Smuzhiyun 		SEQ_PUT_DEC(" kB\nPss_Shmem:      ",
867*4882a593Smuzhiyun 			mss->pss_shmem >> PSS_SHIFT);
868*4882a593Smuzhiyun 	}
869*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nShared_Clean:   ", mss->shared_clean);
870*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nShared_Dirty:   ", mss->shared_dirty);
871*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nPrivate_Clean:  ", mss->private_clean);
872*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nPrivate_Dirty:  ", mss->private_dirty);
873*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nReferenced:     ", mss->referenced);
874*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nAnonymous:      ", mss->anonymous);
875*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nLazyFree:       ", mss->lazyfree);
876*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nAnonHugePages:  ", mss->anonymous_thp);
877*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
878*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nFilePmdMapped:  ", mss->file_thp);
879*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb);
880*4882a593Smuzhiyun 	seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ",
881*4882a593Smuzhiyun 				  mss->private_hugetlb >> 10, 7);
882*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nSwap:           ", mss->swap);
883*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nSwapPss:        ",
884*4882a593Smuzhiyun 					mss->swap_pss >> PSS_SHIFT);
885*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nLocked:         ",
886*4882a593Smuzhiyun 					mss->pss_locked >> PSS_SHIFT);
887*4882a593Smuzhiyun 	seq_puts(m, " kB\n");
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun 
show_smap(struct seq_file * m,void * v)890*4882a593Smuzhiyun static int show_smap(struct seq_file *m, void *v)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	struct vm_area_struct *vma = v;
893*4882a593Smuzhiyun 	struct mem_size_stats mss;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	memset(&mss, 0, sizeof(mss));
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	smap_gather_stats(vma, &mss, 0);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	show_map_vma(m, vma);
900*4882a593Smuzhiyun 	if (vma_get_anon_name(vma)) {
901*4882a593Smuzhiyun 		seq_puts(m, "Name:           ");
902*4882a593Smuzhiyun 		seq_print_vma_name(m, vma);
903*4882a593Smuzhiyun 		seq_putc(m, '\n');
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
907*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
908*4882a593Smuzhiyun 	SEQ_PUT_DEC(" kB\nMMUPageSize:    ", vma_mmu_pagesize(vma));
909*4882a593Smuzhiyun 	seq_puts(m, " kB\n");
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	__show_smap(m, &mss, false);
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	seq_printf(m, "THPeligible:    %d\n",
914*4882a593Smuzhiyun 		   transparent_hugepage_active(vma));
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	if (arch_pkeys_enabled())
917*4882a593Smuzhiyun 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
918*4882a593Smuzhiyun 	show_smap_vma_flags(m, vma);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	return 0;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
show_smaps_rollup(struct seq_file * m,void * v)923*4882a593Smuzhiyun static int show_smaps_rollup(struct seq_file *m, void *v)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun 	struct proc_maps_private *priv = m->private;
926*4882a593Smuzhiyun 	struct mem_size_stats mss;
927*4882a593Smuzhiyun 	struct mm_struct *mm;
928*4882a593Smuzhiyun 	struct vm_area_struct *vma;
929*4882a593Smuzhiyun 	unsigned long last_vma_end = 0;
930*4882a593Smuzhiyun 	int ret = 0;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	priv->task = get_proc_task(priv->inode);
933*4882a593Smuzhiyun 	if (!priv->task)
934*4882a593Smuzhiyun 		return -ESRCH;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	mm = priv->mm;
937*4882a593Smuzhiyun 	if (!mm || !mmget_not_zero(mm)) {
938*4882a593Smuzhiyun 		ret = -ESRCH;
939*4882a593Smuzhiyun 		goto out_put_task;
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	memset(&mss, 0, sizeof(mss));
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	ret = mmap_read_lock_killable(mm);
945*4882a593Smuzhiyun 	if (ret)
946*4882a593Smuzhiyun 		goto out_put_mm;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	hold_task_mempolicy(priv);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	for (vma = priv->mm->mmap; vma;) {
951*4882a593Smuzhiyun 		smap_gather_stats(vma, &mss, 0);
952*4882a593Smuzhiyun 		last_vma_end = vma->vm_end;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 		/*
955*4882a593Smuzhiyun 		 * Release mmap_lock temporarily if someone wants to
956*4882a593Smuzhiyun 		 * access it for write request.
957*4882a593Smuzhiyun 		 */
958*4882a593Smuzhiyun 		if (mmap_lock_is_contended(mm)) {
959*4882a593Smuzhiyun 			mmap_read_unlock(mm);
960*4882a593Smuzhiyun 			ret = mmap_read_lock_killable(mm);
961*4882a593Smuzhiyun 			if (ret) {
962*4882a593Smuzhiyun 				release_task_mempolicy(priv);
963*4882a593Smuzhiyun 				goto out_put_mm;
964*4882a593Smuzhiyun 			}
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 			/*
967*4882a593Smuzhiyun 			 * After dropping the lock, there are four cases to
968*4882a593Smuzhiyun 			 * consider. See the following example for explanation.
969*4882a593Smuzhiyun 			 *
970*4882a593Smuzhiyun 			 *   +------+------+-----------+
971*4882a593Smuzhiyun 			 *   | VMA1 | VMA2 | VMA3      |
972*4882a593Smuzhiyun 			 *   +------+------+-----------+
973*4882a593Smuzhiyun 			 *   |      |      |           |
974*4882a593Smuzhiyun 			 *  4k     8k     16k         400k
975*4882a593Smuzhiyun 			 *
976*4882a593Smuzhiyun 			 * Suppose we drop the lock after reading VMA2 due to
977*4882a593Smuzhiyun 			 * contention, then we get:
978*4882a593Smuzhiyun 			 *
979*4882a593Smuzhiyun 			 *	last_vma_end = 16k
980*4882a593Smuzhiyun 			 *
981*4882a593Smuzhiyun 			 * 1) VMA2 is freed, but VMA3 exists:
982*4882a593Smuzhiyun 			 *
983*4882a593Smuzhiyun 			 *    find_vma(mm, 16k - 1) will return VMA3.
984*4882a593Smuzhiyun 			 *    In this case, just continue from VMA3.
985*4882a593Smuzhiyun 			 *
986*4882a593Smuzhiyun 			 * 2) VMA2 still exists:
987*4882a593Smuzhiyun 			 *
988*4882a593Smuzhiyun 			 *    find_vma(mm, 16k - 1) will return VMA2.
989*4882a593Smuzhiyun 			 *    Iterate the loop like the original one.
990*4882a593Smuzhiyun 			 *
991*4882a593Smuzhiyun 			 * 3) No more VMAs can be found:
992*4882a593Smuzhiyun 			 *
993*4882a593Smuzhiyun 			 *    find_vma(mm, 16k - 1) will return NULL.
994*4882a593Smuzhiyun 			 *    No more things to do, just break.
995*4882a593Smuzhiyun 			 *
996*4882a593Smuzhiyun 			 * 4) (last_vma_end - 1) is the middle of a vma (VMA'):
997*4882a593Smuzhiyun 			 *
998*4882a593Smuzhiyun 			 *    find_vma(mm, 16k - 1) will return VMA' whose range
999*4882a593Smuzhiyun 			 *    contains last_vma_end.
1000*4882a593Smuzhiyun 			 *    Iterate VMA' from last_vma_end.
1001*4882a593Smuzhiyun 			 */
1002*4882a593Smuzhiyun 			vma = find_vma(mm, last_vma_end - 1);
1003*4882a593Smuzhiyun 			/* Case 3 above */
1004*4882a593Smuzhiyun 			if (!vma)
1005*4882a593Smuzhiyun 				break;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 			/* Case 1 above */
1008*4882a593Smuzhiyun 			if (vma->vm_start >= last_vma_end)
1009*4882a593Smuzhiyun 				continue;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 			/* Case 4 above */
1012*4882a593Smuzhiyun 			if (vma->vm_end > last_vma_end)
1013*4882a593Smuzhiyun 				smap_gather_stats(vma, &mss, last_vma_end);
1014*4882a593Smuzhiyun 		}
1015*4882a593Smuzhiyun 		/* Case 2 above */
1016*4882a593Smuzhiyun 		vma = vma->vm_next;
1017*4882a593Smuzhiyun 	}
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	show_vma_header_prefix(m, priv->mm->mmap ? priv->mm->mmap->vm_start : 0,
1020*4882a593Smuzhiyun 			       last_vma_end, 0, 0, 0, 0);
1021*4882a593Smuzhiyun 	seq_pad(m, ' ');
1022*4882a593Smuzhiyun 	seq_puts(m, "[rollup]\n");
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	__show_smap(m, &mss, true);
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	release_task_mempolicy(priv);
1027*4882a593Smuzhiyun 	mmap_read_unlock(mm);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun out_put_mm:
1030*4882a593Smuzhiyun 	mmput(mm);
1031*4882a593Smuzhiyun out_put_task:
1032*4882a593Smuzhiyun 	put_task_struct(priv->task);
1033*4882a593Smuzhiyun 	priv->task = NULL;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	return ret;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun #undef SEQ_PUT_DEC
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun static const struct seq_operations proc_pid_smaps_op = {
1040*4882a593Smuzhiyun 	.start	= m_start,
1041*4882a593Smuzhiyun 	.next	= m_next,
1042*4882a593Smuzhiyun 	.stop	= m_stop,
1043*4882a593Smuzhiyun 	.show	= show_smap
1044*4882a593Smuzhiyun };
1045*4882a593Smuzhiyun 
pid_smaps_open(struct inode * inode,struct file * file)1046*4882a593Smuzhiyun static int pid_smaps_open(struct inode *inode, struct file *file)
1047*4882a593Smuzhiyun {
1048*4882a593Smuzhiyun 	return do_maps_open(inode, file, &proc_pid_smaps_op);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
smaps_rollup_open(struct inode * inode,struct file * file)1051*4882a593Smuzhiyun static int smaps_rollup_open(struct inode *inode, struct file *file)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun 	int ret;
1054*4882a593Smuzhiyun 	struct proc_maps_private *priv;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	priv = kzalloc(sizeof(*priv), GFP_KERNEL_ACCOUNT);
1057*4882a593Smuzhiyun 	if (!priv)
1058*4882a593Smuzhiyun 		return -ENOMEM;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	ret = single_open(file, show_smaps_rollup, priv);
1061*4882a593Smuzhiyun 	if (ret)
1062*4882a593Smuzhiyun 		goto out_free;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	priv->inode = inode;
1065*4882a593Smuzhiyun 	priv->mm = proc_mem_open(inode, PTRACE_MODE_READ);
1066*4882a593Smuzhiyun 	if (IS_ERR(priv->mm)) {
1067*4882a593Smuzhiyun 		ret = PTR_ERR(priv->mm);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 		single_release(inode, file);
1070*4882a593Smuzhiyun 		goto out_free;
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	return 0;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun out_free:
1076*4882a593Smuzhiyun 	kfree(priv);
1077*4882a593Smuzhiyun 	return ret;
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
smaps_rollup_release(struct inode * inode,struct file * file)1080*4882a593Smuzhiyun static int smaps_rollup_release(struct inode *inode, struct file *file)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun 	struct seq_file *seq = file->private_data;
1083*4882a593Smuzhiyun 	struct proc_maps_private *priv = seq->private;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	if (priv->mm)
1086*4882a593Smuzhiyun 		mmdrop(priv->mm);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	kfree(priv);
1089*4882a593Smuzhiyun 	return single_release(inode, file);
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun const struct file_operations proc_pid_smaps_operations = {
1093*4882a593Smuzhiyun 	.open		= pid_smaps_open,
1094*4882a593Smuzhiyun 	.read		= seq_read,
1095*4882a593Smuzhiyun 	.llseek		= seq_lseek,
1096*4882a593Smuzhiyun 	.release	= proc_map_release,
1097*4882a593Smuzhiyun };
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun const struct file_operations proc_pid_smaps_rollup_operations = {
1100*4882a593Smuzhiyun 	.open		= smaps_rollup_open,
1101*4882a593Smuzhiyun 	.read		= seq_read,
1102*4882a593Smuzhiyun 	.llseek		= seq_lseek,
1103*4882a593Smuzhiyun 	.release	= smaps_rollup_release,
1104*4882a593Smuzhiyun };
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun enum clear_refs_types {
1107*4882a593Smuzhiyun 	CLEAR_REFS_ALL = 1,
1108*4882a593Smuzhiyun 	CLEAR_REFS_ANON,
1109*4882a593Smuzhiyun 	CLEAR_REFS_MAPPED,
1110*4882a593Smuzhiyun 	CLEAR_REFS_SOFT_DIRTY,
1111*4882a593Smuzhiyun 	CLEAR_REFS_MM_HIWATER_RSS,
1112*4882a593Smuzhiyun 	CLEAR_REFS_LAST,
1113*4882a593Smuzhiyun };
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun struct clear_refs_private {
1116*4882a593Smuzhiyun 	enum clear_refs_types type;
1117*4882a593Smuzhiyun };
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun #ifdef CONFIG_MEM_SOFT_DIRTY
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun #define is_cow_mapping(flags) (((flags) & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE)
1122*4882a593Smuzhiyun 
pte_is_pinned(struct vm_area_struct * vma,unsigned long addr,pte_t pte)1123*4882a593Smuzhiyun static inline bool pte_is_pinned(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1124*4882a593Smuzhiyun {
1125*4882a593Smuzhiyun 	struct page *page;
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	if (!pte_write(pte))
1128*4882a593Smuzhiyun 		return false;
1129*4882a593Smuzhiyun 	if (!is_cow_mapping(vma->vm_flags))
1130*4882a593Smuzhiyun 		return false;
1131*4882a593Smuzhiyun 	if (likely(!atomic_read(&vma->vm_mm->has_pinned)))
1132*4882a593Smuzhiyun 		return false;
1133*4882a593Smuzhiyun 	page = vm_normal_page(vma, addr, pte);
1134*4882a593Smuzhiyun 	if (!page)
1135*4882a593Smuzhiyun 		return false;
1136*4882a593Smuzhiyun 	return page_maybe_dma_pinned(page);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun 
clear_soft_dirty(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)1139*4882a593Smuzhiyun static inline void clear_soft_dirty(struct vm_area_struct *vma,
1140*4882a593Smuzhiyun 		unsigned long addr, pte_t *pte)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	/*
1143*4882a593Smuzhiyun 	 * The soft-dirty tracker uses #PF-s to catch writes
1144*4882a593Smuzhiyun 	 * to pages, so write-protect the pte as well. See the
1145*4882a593Smuzhiyun 	 * Documentation/admin-guide/mm/soft-dirty.rst for full description
1146*4882a593Smuzhiyun 	 * of how soft-dirty works.
1147*4882a593Smuzhiyun 	 */
1148*4882a593Smuzhiyun 	pte_t ptent = *pte;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	if (pte_present(ptent)) {
1151*4882a593Smuzhiyun 		pte_t old_pte;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 		if (pte_is_pinned(vma, addr, ptent))
1154*4882a593Smuzhiyun 			return;
1155*4882a593Smuzhiyun 		old_pte = ptep_modify_prot_start(vma, addr, pte);
1156*4882a593Smuzhiyun 		ptent = pte_wrprotect(old_pte);
1157*4882a593Smuzhiyun 		ptent = pte_clear_soft_dirty(ptent);
1158*4882a593Smuzhiyun 		ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
1159*4882a593Smuzhiyun 	} else if (is_swap_pte(ptent)) {
1160*4882a593Smuzhiyun 		ptent = pte_swp_clear_soft_dirty(ptent);
1161*4882a593Smuzhiyun 		set_pte_at(vma->vm_mm, addr, pte, ptent);
1162*4882a593Smuzhiyun 	}
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun #else
clear_soft_dirty(struct vm_area_struct * vma,unsigned long addr,pte_t * pte)1165*4882a593Smuzhiyun static inline void clear_soft_dirty(struct vm_area_struct *vma,
1166*4882a593Smuzhiyun 		unsigned long addr, pte_t *pte)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun #endif
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun #if defined(CONFIG_MEM_SOFT_DIRTY) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
clear_soft_dirty_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1172*4882a593Smuzhiyun static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1173*4882a593Smuzhiyun 		unsigned long addr, pmd_t *pmdp)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	pmd_t old, pmd = *pmdp;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	if (pmd_present(pmd)) {
1178*4882a593Smuzhiyun 		/* See comment in change_huge_pmd() */
1179*4882a593Smuzhiyun 		old = pmdp_invalidate(vma, addr, pmdp);
1180*4882a593Smuzhiyun 		if (pmd_dirty(old))
1181*4882a593Smuzhiyun 			pmd = pmd_mkdirty(pmd);
1182*4882a593Smuzhiyun 		if (pmd_young(old))
1183*4882a593Smuzhiyun 			pmd = pmd_mkyoung(pmd);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 		pmd = pmd_wrprotect(pmd);
1186*4882a593Smuzhiyun 		pmd = pmd_clear_soft_dirty(pmd);
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1189*4882a593Smuzhiyun 	} else if (is_migration_entry(pmd_to_swp_entry(pmd))) {
1190*4882a593Smuzhiyun 		pmd = pmd_swp_clear_soft_dirty(pmd);
1191*4882a593Smuzhiyun 		set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun #else
clear_soft_dirty_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp)1195*4882a593Smuzhiyun static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
1196*4882a593Smuzhiyun 		unsigned long addr, pmd_t *pmdp)
1197*4882a593Smuzhiyun {
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun #endif
1200*4882a593Smuzhiyun 
clear_refs_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1201*4882a593Smuzhiyun static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
1202*4882a593Smuzhiyun 				unsigned long end, struct mm_walk *walk)
1203*4882a593Smuzhiyun {
1204*4882a593Smuzhiyun 	struct clear_refs_private *cp = walk->private;
1205*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
1206*4882a593Smuzhiyun 	pte_t *pte, ptent;
1207*4882a593Smuzhiyun 	spinlock_t *ptl;
1208*4882a593Smuzhiyun 	struct page *page;
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	ptl = pmd_trans_huge_lock(pmd, vma);
1211*4882a593Smuzhiyun 	if (ptl) {
1212*4882a593Smuzhiyun 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1213*4882a593Smuzhiyun 			clear_soft_dirty_pmd(vma, addr, pmd);
1214*4882a593Smuzhiyun 			goto out;
1215*4882a593Smuzhiyun 		}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		if (!pmd_present(*pmd))
1218*4882a593Smuzhiyun 			goto out;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 		page = pmd_page(*pmd);
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 		/* Clear accessed and referenced bits. */
1223*4882a593Smuzhiyun 		pmdp_test_and_clear_young(vma, addr, pmd);
1224*4882a593Smuzhiyun 		test_and_clear_page_young(page);
1225*4882a593Smuzhiyun 		ClearPageReferenced(page);
1226*4882a593Smuzhiyun out:
1227*4882a593Smuzhiyun 		spin_unlock(ptl);
1228*4882a593Smuzhiyun 		return 0;
1229*4882a593Smuzhiyun 	}
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 	if (pmd_trans_unstable(pmd))
1232*4882a593Smuzhiyun 		return 0;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1235*4882a593Smuzhiyun 	for (; addr != end; pte++, addr += PAGE_SIZE) {
1236*4882a593Smuzhiyun 		ptent = *pte;
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
1239*4882a593Smuzhiyun 			clear_soft_dirty(vma, addr, pte);
1240*4882a593Smuzhiyun 			continue;
1241*4882a593Smuzhiyun 		}
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 		if (!pte_present(ptent))
1244*4882a593Smuzhiyun 			continue;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, ptent);
1247*4882a593Smuzhiyun 		if (!page)
1248*4882a593Smuzhiyun 			continue;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 		/* Clear accessed and referenced bits. */
1251*4882a593Smuzhiyun 		ptep_test_and_clear_young(vma, addr, pte);
1252*4882a593Smuzhiyun 		test_and_clear_page_young(page);
1253*4882a593Smuzhiyun 		ClearPageReferenced(page);
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 	pte_unmap_unlock(pte - 1, ptl);
1256*4882a593Smuzhiyun 	cond_resched();
1257*4882a593Smuzhiyun 	return 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun 
clear_refs_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)1260*4882a593Smuzhiyun static int clear_refs_test_walk(unsigned long start, unsigned long end,
1261*4882a593Smuzhiyun 				struct mm_walk *walk)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun 	struct clear_refs_private *cp = walk->private;
1264*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	if (vma->vm_flags & VM_PFNMAP)
1267*4882a593Smuzhiyun 		return 1;
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun 	/*
1270*4882a593Smuzhiyun 	 * Writing 1 to /proc/pid/clear_refs affects all pages.
1271*4882a593Smuzhiyun 	 * Writing 2 to /proc/pid/clear_refs only affects anonymous pages.
1272*4882a593Smuzhiyun 	 * Writing 3 to /proc/pid/clear_refs only affects file mapped pages.
1273*4882a593Smuzhiyun 	 * Writing 4 to /proc/pid/clear_refs affects all pages.
1274*4882a593Smuzhiyun 	 */
1275*4882a593Smuzhiyun 	if (cp->type == CLEAR_REFS_ANON && vma->vm_file)
1276*4882a593Smuzhiyun 		return 1;
1277*4882a593Smuzhiyun 	if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file)
1278*4882a593Smuzhiyun 		return 1;
1279*4882a593Smuzhiyun 	return 0;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun static const struct mm_walk_ops clear_refs_walk_ops = {
1283*4882a593Smuzhiyun 	.pmd_entry		= clear_refs_pte_range,
1284*4882a593Smuzhiyun 	.test_walk		= clear_refs_test_walk,
1285*4882a593Smuzhiyun };
1286*4882a593Smuzhiyun 
clear_refs_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)1287*4882a593Smuzhiyun static ssize_t clear_refs_write(struct file *file, const char __user *buf,
1288*4882a593Smuzhiyun 				size_t count, loff_t *ppos)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun 	struct task_struct *task;
1291*4882a593Smuzhiyun 	char buffer[PROC_NUMBUF];
1292*4882a593Smuzhiyun 	struct mm_struct *mm;
1293*4882a593Smuzhiyun 	struct vm_area_struct *vma;
1294*4882a593Smuzhiyun 	enum clear_refs_types type;
1295*4882a593Smuzhiyun 	int itype;
1296*4882a593Smuzhiyun 	int rv;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	memset(buffer, 0, sizeof(buffer));
1299*4882a593Smuzhiyun 	if (count > sizeof(buffer) - 1)
1300*4882a593Smuzhiyun 		count = sizeof(buffer) - 1;
1301*4882a593Smuzhiyun 	if (copy_from_user(buffer, buf, count))
1302*4882a593Smuzhiyun 		return -EFAULT;
1303*4882a593Smuzhiyun 	rv = kstrtoint(strstrip(buffer), 10, &itype);
1304*4882a593Smuzhiyun 	if (rv < 0)
1305*4882a593Smuzhiyun 		return rv;
1306*4882a593Smuzhiyun 	type = (enum clear_refs_types)itype;
1307*4882a593Smuzhiyun 	if (type < CLEAR_REFS_ALL || type >= CLEAR_REFS_LAST)
1308*4882a593Smuzhiyun 		return -EINVAL;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	task = get_proc_task(file_inode(file));
1311*4882a593Smuzhiyun 	if (!task)
1312*4882a593Smuzhiyun 		return -ESRCH;
1313*4882a593Smuzhiyun 	mm = get_task_mm(task);
1314*4882a593Smuzhiyun 	if (mm) {
1315*4882a593Smuzhiyun 		struct mmu_notifier_range range;
1316*4882a593Smuzhiyun 		struct clear_refs_private cp = {
1317*4882a593Smuzhiyun 			.type = type,
1318*4882a593Smuzhiyun 		};
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 		if (mmap_write_lock_killable(mm)) {
1321*4882a593Smuzhiyun 			count = -EINTR;
1322*4882a593Smuzhiyun 			goto out_mm;
1323*4882a593Smuzhiyun 		}
1324*4882a593Smuzhiyun 		if (type == CLEAR_REFS_MM_HIWATER_RSS) {
1325*4882a593Smuzhiyun 			/*
1326*4882a593Smuzhiyun 			 * Writing 5 to /proc/pid/clear_refs resets the peak
1327*4882a593Smuzhiyun 			 * resident set size to this mm's current rss value.
1328*4882a593Smuzhiyun 			 */
1329*4882a593Smuzhiyun 			reset_mm_hiwater_rss(mm);
1330*4882a593Smuzhiyun 			goto out_unlock;
1331*4882a593Smuzhiyun 		}
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1334*4882a593Smuzhiyun 			for (vma = mm->mmap; vma; vma = vma->vm_next) {
1335*4882a593Smuzhiyun 				if (!(vma->vm_flags & VM_SOFTDIRTY))
1336*4882a593Smuzhiyun 					continue;
1337*4882a593Smuzhiyun 				vm_write_begin(vma);
1338*4882a593Smuzhiyun 				WRITE_ONCE(vma->vm_flags,
1339*4882a593Smuzhiyun 					vma->vm_flags & ~VM_SOFTDIRTY);
1340*4882a593Smuzhiyun 				vma_set_page_prot(vma);
1341*4882a593Smuzhiyun 				vm_write_end(vma);
1342*4882a593Smuzhiyun 			}
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 			inc_tlb_flush_pending(mm);
1345*4882a593Smuzhiyun 			mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
1346*4882a593Smuzhiyun 						0, NULL, mm, 0, -1UL);
1347*4882a593Smuzhiyun 			mmu_notifier_invalidate_range_start(&range);
1348*4882a593Smuzhiyun 		}
1349*4882a593Smuzhiyun 		walk_page_range(mm, 0, mm->highest_vm_end, &clear_refs_walk_ops,
1350*4882a593Smuzhiyun 				&cp);
1351*4882a593Smuzhiyun 		if (type == CLEAR_REFS_SOFT_DIRTY) {
1352*4882a593Smuzhiyun 			mmu_notifier_invalidate_range_end(&range);
1353*4882a593Smuzhiyun 			flush_tlb_mm(mm);
1354*4882a593Smuzhiyun 			dec_tlb_flush_pending(mm);
1355*4882a593Smuzhiyun 		}
1356*4882a593Smuzhiyun out_unlock:
1357*4882a593Smuzhiyun 		mmap_write_unlock(mm);
1358*4882a593Smuzhiyun out_mm:
1359*4882a593Smuzhiyun 		mmput(mm);
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 	put_task_struct(task);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	return count;
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun const struct file_operations proc_clear_refs_operations = {
1367*4882a593Smuzhiyun 	.write		= clear_refs_write,
1368*4882a593Smuzhiyun 	.llseek		= noop_llseek,
1369*4882a593Smuzhiyun };
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun typedef struct {
1372*4882a593Smuzhiyun 	u64 pme;
1373*4882a593Smuzhiyun } pagemap_entry_t;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun struct pagemapread {
1376*4882a593Smuzhiyun 	int pos, len;		/* units: PM_ENTRY_BYTES, not bytes */
1377*4882a593Smuzhiyun 	pagemap_entry_t *buffer;
1378*4882a593Smuzhiyun 	bool show_pfn;
1379*4882a593Smuzhiyun };
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun #define PAGEMAP_WALK_SIZE	(PMD_SIZE)
1382*4882a593Smuzhiyun #define PAGEMAP_WALK_MASK	(PMD_MASK)
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun #define PM_ENTRY_BYTES		sizeof(pagemap_entry_t)
1385*4882a593Smuzhiyun #define PM_PFRAME_BITS		55
1386*4882a593Smuzhiyun #define PM_PFRAME_MASK		GENMASK_ULL(PM_PFRAME_BITS - 1, 0)
1387*4882a593Smuzhiyun #define PM_SOFT_DIRTY		BIT_ULL(55)
1388*4882a593Smuzhiyun #define PM_MMAP_EXCLUSIVE	BIT_ULL(56)
1389*4882a593Smuzhiyun #define PM_FILE			BIT_ULL(61)
1390*4882a593Smuzhiyun #define PM_SWAP			BIT_ULL(62)
1391*4882a593Smuzhiyun #define PM_PRESENT		BIT_ULL(63)
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun #define PM_END_OF_BUFFER    1
1394*4882a593Smuzhiyun 
make_pme(u64 frame,u64 flags)1395*4882a593Smuzhiyun static inline pagemap_entry_t make_pme(u64 frame, u64 flags)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun 	return (pagemap_entry_t) { .pme = (frame & PM_PFRAME_MASK) | flags };
1398*4882a593Smuzhiyun }
1399*4882a593Smuzhiyun 
add_to_pagemap(unsigned long addr,pagemap_entry_t * pme,struct pagemapread * pm)1400*4882a593Smuzhiyun static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
1401*4882a593Smuzhiyun 			  struct pagemapread *pm)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun 	pm->buffer[pm->pos++] = *pme;
1404*4882a593Smuzhiyun 	if (pm->pos >= pm->len)
1405*4882a593Smuzhiyun 		return PM_END_OF_BUFFER;
1406*4882a593Smuzhiyun 	return 0;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun 
pagemap_pte_hole(unsigned long start,unsigned long end,__always_unused int depth,struct mm_walk * walk)1409*4882a593Smuzhiyun static int pagemap_pte_hole(unsigned long start, unsigned long end,
1410*4882a593Smuzhiyun 			    __always_unused int depth, struct mm_walk *walk)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun 	struct pagemapread *pm = walk->private;
1413*4882a593Smuzhiyun 	unsigned long addr = start;
1414*4882a593Smuzhiyun 	int err = 0;
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	while (addr < end) {
1417*4882a593Smuzhiyun 		struct vm_area_struct *vma = find_vma(walk->mm, addr);
1418*4882a593Smuzhiyun 		pagemap_entry_t pme = make_pme(0, 0);
1419*4882a593Smuzhiyun 		/* End of address space hole, which we mark as non-present. */
1420*4882a593Smuzhiyun 		unsigned long hole_end;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 		if (vma)
1423*4882a593Smuzhiyun 			hole_end = min(end, vma->vm_start);
1424*4882a593Smuzhiyun 		else
1425*4882a593Smuzhiyun 			hole_end = end;
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 		for (; addr < hole_end; addr += PAGE_SIZE) {
1428*4882a593Smuzhiyun 			err = add_to_pagemap(addr, &pme, pm);
1429*4882a593Smuzhiyun 			if (err)
1430*4882a593Smuzhiyun 				goto out;
1431*4882a593Smuzhiyun 		}
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 		if (!vma)
1434*4882a593Smuzhiyun 			break;
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 		/* Addresses in the VMA. */
1437*4882a593Smuzhiyun 		if (vma->vm_flags & VM_SOFTDIRTY)
1438*4882a593Smuzhiyun 			pme = make_pme(0, PM_SOFT_DIRTY);
1439*4882a593Smuzhiyun 		for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) {
1440*4882a593Smuzhiyun 			err = add_to_pagemap(addr, &pme, pm);
1441*4882a593Smuzhiyun 			if (err)
1442*4882a593Smuzhiyun 				goto out;
1443*4882a593Smuzhiyun 		}
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun out:
1446*4882a593Smuzhiyun 	return err;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun 
pte_to_pagemap_entry(struct pagemapread * pm,struct vm_area_struct * vma,unsigned long addr,pte_t pte)1449*4882a593Smuzhiyun static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
1450*4882a593Smuzhiyun 		struct vm_area_struct *vma, unsigned long addr, pte_t pte)
1451*4882a593Smuzhiyun {
1452*4882a593Smuzhiyun 	u64 frame = 0, flags = 0;
1453*4882a593Smuzhiyun 	struct page *page = NULL;
1454*4882a593Smuzhiyun 	bool migration = false;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun 	if (pte_present(pte)) {
1457*4882a593Smuzhiyun 		if (pm->show_pfn)
1458*4882a593Smuzhiyun 			frame = pte_pfn(pte);
1459*4882a593Smuzhiyun 		flags |= PM_PRESENT;
1460*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, pte);
1461*4882a593Smuzhiyun 		if (pte_soft_dirty(pte))
1462*4882a593Smuzhiyun 			flags |= PM_SOFT_DIRTY;
1463*4882a593Smuzhiyun 	} else if (is_swap_pte(pte)) {
1464*4882a593Smuzhiyun 		swp_entry_t entry;
1465*4882a593Smuzhiyun 		if (pte_swp_soft_dirty(pte))
1466*4882a593Smuzhiyun 			flags |= PM_SOFT_DIRTY;
1467*4882a593Smuzhiyun 		entry = pte_to_swp_entry(pte);
1468*4882a593Smuzhiyun 		if (pm->show_pfn)
1469*4882a593Smuzhiyun 			frame = swp_type(entry) |
1470*4882a593Smuzhiyun 				(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1471*4882a593Smuzhiyun 		flags |= PM_SWAP;
1472*4882a593Smuzhiyun 		if (is_migration_entry(entry)) {
1473*4882a593Smuzhiyun 			migration = true;
1474*4882a593Smuzhiyun 			page = migration_entry_to_page(entry);
1475*4882a593Smuzhiyun 		}
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun 		if (is_device_private_entry(entry))
1478*4882a593Smuzhiyun 			page = device_private_entry_to_page(entry);
1479*4882a593Smuzhiyun 	}
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	if (page && !PageAnon(page))
1482*4882a593Smuzhiyun 		flags |= PM_FILE;
1483*4882a593Smuzhiyun 	if (page && !migration && page_mapcount(page) == 1)
1484*4882a593Smuzhiyun 		flags |= PM_MMAP_EXCLUSIVE;
1485*4882a593Smuzhiyun 	if (vma->vm_flags & VM_SOFTDIRTY)
1486*4882a593Smuzhiyun 		flags |= PM_SOFT_DIRTY;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	return make_pme(frame, flags);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
pagemap_pmd_range(pmd_t * pmdp,unsigned long addr,unsigned long end,struct mm_walk * walk)1491*4882a593Smuzhiyun static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
1492*4882a593Smuzhiyun 			     struct mm_walk *walk)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
1495*4882a593Smuzhiyun 	struct pagemapread *pm = walk->private;
1496*4882a593Smuzhiyun 	spinlock_t *ptl;
1497*4882a593Smuzhiyun 	pte_t *pte, *orig_pte;
1498*4882a593Smuzhiyun 	int err = 0;
1499*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1500*4882a593Smuzhiyun 	bool migration = false;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	ptl = pmd_trans_huge_lock(pmdp, vma);
1503*4882a593Smuzhiyun 	if (ptl) {
1504*4882a593Smuzhiyun 		u64 flags = 0, frame = 0;
1505*4882a593Smuzhiyun 		pmd_t pmd = *pmdp;
1506*4882a593Smuzhiyun 		struct page *page = NULL;
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 		if (vma->vm_flags & VM_SOFTDIRTY)
1509*4882a593Smuzhiyun 			flags |= PM_SOFT_DIRTY;
1510*4882a593Smuzhiyun 
1511*4882a593Smuzhiyun 		if (pmd_present(pmd)) {
1512*4882a593Smuzhiyun 			page = pmd_page(pmd);
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 			flags |= PM_PRESENT;
1515*4882a593Smuzhiyun 			if (pmd_soft_dirty(pmd))
1516*4882a593Smuzhiyun 				flags |= PM_SOFT_DIRTY;
1517*4882a593Smuzhiyun 			if (pm->show_pfn)
1518*4882a593Smuzhiyun 				frame = pmd_pfn(pmd) +
1519*4882a593Smuzhiyun 					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1520*4882a593Smuzhiyun 		}
1521*4882a593Smuzhiyun #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1522*4882a593Smuzhiyun 		else if (is_swap_pmd(pmd)) {
1523*4882a593Smuzhiyun 			swp_entry_t entry = pmd_to_swp_entry(pmd);
1524*4882a593Smuzhiyun 			unsigned long offset;
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 			if (pm->show_pfn) {
1527*4882a593Smuzhiyun 				offset = swp_offset(entry) +
1528*4882a593Smuzhiyun 					((addr & ~PMD_MASK) >> PAGE_SHIFT);
1529*4882a593Smuzhiyun 				frame = swp_type(entry) |
1530*4882a593Smuzhiyun 					(offset << MAX_SWAPFILES_SHIFT);
1531*4882a593Smuzhiyun 			}
1532*4882a593Smuzhiyun 			flags |= PM_SWAP;
1533*4882a593Smuzhiyun 			if (pmd_swp_soft_dirty(pmd))
1534*4882a593Smuzhiyun 				flags |= PM_SOFT_DIRTY;
1535*4882a593Smuzhiyun 			VM_BUG_ON(!is_pmd_migration_entry(pmd));
1536*4882a593Smuzhiyun 			migration = is_migration_entry(entry);
1537*4882a593Smuzhiyun 			page = migration_entry_to_page(entry);
1538*4882a593Smuzhiyun 		}
1539*4882a593Smuzhiyun #endif
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 		if (page && !migration && page_mapcount(page) == 1)
1542*4882a593Smuzhiyun 			flags |= PM_MMAP_EXCLUSIVE;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 		for (; addr != end; addr += PAGE_SIZE) {
1545*4882a593Smuzhiyun 			pagemap_entry_t pme = make_pme(frame, flags);
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 			err = add_to_pagemap(addr, &pme, pm);
1548*4882a593Smuzhiyun 			if (err)
1549*4882a593Smuzhiyun 				break;
1550*4882a593Smuzhiyun 			if (pm->show_pfn) {
1551*4882a593Smuzhiyun 				if (flags & PM_PRESENT)
1552*4882a593Smuzhiyun 					frame++;
1553*4882a593Smuzhiyun 				else if (flags & PM_SWAP)
1554*4882a593Smuzhiyun 					frame += (1 << MAX_SWAPFILES_SHIFT);
1555*4882a593Smuzhiyun 			}
1556*4882a593Smuzhiyun 		}
1557*4882a593Smuzhiyun 		spin_unlock(ptl);
1558*4882a593Smuzhiyun 		return err;
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	if (pmd_trans_unstable(pmdp))
1562*4882a593Smuzhiyun 		return 0;
1563*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	/*
1566*4882a593Smuzhiyun 	 * We can assume that @vma always points to a valid one and @end never
1567*4882a593Smuzhiyun 	 * goes beyond vma->vm_end.
1568*4882a593Smuzhiyun 	 */
1569*4882a593Smuzhiyun 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
1570*4882a593Smuzhiyun 	for (; addr < end; pte++, addr += PAGE_SIZE) {
1571*4882a593Smuzhiyun 		pagemap_entry_t pme;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 		pme = pte_to_pagemap_entry(pm, vma, addr, *pte);
1574*4882a593Smuzhiyun 		err = add_to_pagemap(addr, &pme, pm);
1575*4882a593Smuzhiyun 		if (err)
1576*4882a593Smuzhiyun 			break;
1577*4882a593Smuzhiyun 	}
1578*4882a593Smuzhiyun 	pte_unmap_unlock(orig_pte, ptl);
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	cond_resched();
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	return err;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
1586*4882a593Smuzhiyun /* This function walks within one hugetlb entry in the single call */
pagemap_hugetlb_range(pte_t * ptep,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1587*4882a593Smuzhiyun static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
1588*4882a593Smuzhiyun 				 unsigned long addr, unsigned long end,
1589*4882a593Smuzhiyun 				 struct mm_walk *walk)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun 	struct pagemapread *pm = walk->private;
1592*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
1593*4882a593Smuzhiyun 	u64 flags = 0, frame = 0;
1594*4882a593Smuzhiyun 	int err = 0;
1595*4882a593Smuzhiyun 	pte_t pte;
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	if (vma->vm_flags & VM_SOFTDIRTY)
1598*4882a593Smuzhiyun 		flags |= PM_SOFT_DIRTY;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	pte = huge_ptep_get(ptep);
1601*4882a593Smuzhiyun 	if (pte_present(pte)) {
1602*4882a593Smuzhiyun 		struct page *page = pte_page(pte);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 		if (!PageAnon(page))
1605*4882a593Smuzhiyun 			flags |= PM_FILE;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		if (page_mapcount(page) == 1)
1608*4882a593Smuzhiyun 			flags |= PM_MMAP_EXCLUSIVE;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		flags |= PM_PRESENT;
1611*4882a593Smuzhiyun 		if (pm->show_pfn)
1612*4882a593Smuzhiyun 			frame = pte_pfn(pte) +
1613*4882a593Smuzhiyun 				((addr & ~hmask) >> PAGE_SHIFT);
1614*4882a593Smuzhiyun 	}
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	for (; addr != end; addr += PAGE_SIZE) {
1617*4882a593Smuzhiyun 		pagemap_entry_t pme = make_pme(frame, flags);
1618*4882a593Smuzhiyun 
1619*4882a593Smuzhiyun 		err = add_to_pagemap(addr, &pme, pm);
1620*4882a593Smuzhiyun 		if (err)
1621*4882a593Smuzhiyun 			return err;
1622*4882a593Smuzhiyun 		if (pm->show_pfn && (flags & PM_PRESENT))
1623*4882a593Smuzhiyun 			frame++;
1624*4882a593Smuzhiyun 	}
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun 	cond_resched();
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	return err;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun #else
1631*4882a593Smuzhiyun #define pagemap_hugetlb_range	NULL
1632*4882a593Smuzhiyun #endif /* HUGETLB_PAGE */
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun static const struct mm_walk_ops pagemap_ops = {
1635*4882a593Smuzhiyun 	.pmd_entry	= pagemap_pmd_range,
1636*4882a593Smuzhiyun 	.pte_hole	= pagemap_pte_hole,
1637*4882a593Smuzhiyun 	.hugetlb_entry	= pagemap_hugetlb_range,
1638*4882a593Smuzhiyun };
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun /*
1641*4882a593Smuzhiyun  * /proc/pid/pagemap - an array mapping virtual pages to pfns
1642*4882a593Smuzhiyun  *
1643*4882a593Smuzhiyun  * For each page in the address space, this file contains one 64-bit entry
1644*4882a593Smuzhiyun  * consisting of the following:
1645*4882a593Smuzhiyun  *
1646*4882a593Smuzhiyun  * Bits 0-54  page frame number (PFN) if present
1647*4882a593Smuzhiyun  * Bits 0-4   swap type if swapped
1648*4882a593Smuzhiyun  * Bits 5-54  swap offset if swapped
1649*4882a593Smuzhiyun  * Bit  55    pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
1650*4882a593Smuzhiyun  * Bit  56    page exclusively mapped
1651*4882a593Smuzhiyun  * Bits 57-60 zero
1652*4882a593Smuzhiyun  * Bit  61    page is file-page or shared-anon
1653*4882a593Smuzhiyun  * Bit  62    page swapped
1654*4882a593Smuzhiyun  * Bit  63    page present
1655*4882a593Smuzhiyun  *
1656*4882a593Smuzhiyun  * If the page is not present but in swap, then the PFN contains an
1657*4882a593Smuzhiyun  * encoding of the swap file number and the page's offset into the
1658*4882a593Smuzhiyun  * swap. Unmapped pages return a null PFN. This allows determining
1659*4882a593Smuzhiyun  * precisely which pages are mapped (or in swap) and comparing mapped
1660*4882a593Smuzhiyun  * pages between processes.
1661*4882a593Smuzhiyun  *
1662*4882a593Smuzhiyun  * Efficient users of this interface will use /proc/pid/maps to
1663*4882a593Smuzhiyun  * determine which areas of memory are actually mapped and llseek to
1664*4882a593Smuzhiyun  * skip over unmapped regions.
1665*4882a593Smuzhiyun  */
pagemap_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)1666*4882a593Smuzhiyun static ssize_t pagemap_read(struct file *file, char __user *buf,
1667*4882a593Smuzhiyun 			    size_t count, loff_t *ppos)
1668*4882a593Smuzhiyun {
1669*4882a593Smuzhiyun 	struct mm_struct *mm = file->private_data;
1670*4882a593Smuzhiyun 	struct pagemapread pm;
1671*4882a593Smuzhiyun 	unsigned long src;
1672*4882a593Smuzhiyun 	unsigned long svpfn;
1673*4882a593Smuzhiyun 	unsigned long start_vaddr;
1674*4882a593Smuzhiyun 	unsigned long end_vaddr;
1675*4882a593Smuzhiyun 	int ret = 0, copied = 0;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	if (!mm || !mmget_not_zero(mm))
1678*4882a593Smuzhiyun 		goto out;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	ret = -EINVAL;
1681*4882a593Smuzhiyun 	/* file position must be aligned */
1682*4882a593Smuzhiyun 	if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
1683*4882a593Smuzhiyun 		goto out_mm;
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	ret = 0;
1686*4882a593Smuzhiyun 	if (!count)
1687*4882a593Smuzhiyun 		goto out_mm;
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	/* do not disclose physical addresses: attack vector */
1690*4882a593Smuzhiyun 	pm.show_pfn = file_ns_capable(file, &init_user_ns, CAP_SYS_ADMIN);
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1693*4882a593Smuzhiyun 	pm.buffer = kmalloc_array(pm.len, PM_ENTRY_BYTES, GFP_KERNEL);
1694*4882a593Smuzhiyun 	ret = -ENOMEM;
1695*4882a593Smuzhiyun 	if (!pm.buffer)
1696*4882a593Smuzhiyun 		goto out_mm;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	src = *ppos;
1699*4882a593Smuzhiyun 	svpfn = src / PM_ENTRY_BYTES;
1700*4882a593Smuzhiyun 	end_vaddr = mm->task_size;
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	/* watch out for wraparound */
1703*4882a593Smuzhiyun 	start_vaddr = end_vaddr;
1704*4882a593Smuzhiyun 	if (svpfn <= (ULONG_MAX >> PAGE_SHIFT))
1705*4882a593Smuzhiyun 		start_vaddr = untagged_addr(svpfn << PAGE_SHIFT);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	/* Ensure the address is inside the task */
1708*4882a593Smuzhiyun 	if (start_vaddr > mm->task_size)
1709*4882a593Smuzhiyun 		start_vaddr = end_vaddr;
1710*4882a593Smuzhiyun 
1711*4882a593Smuzhiyun 	/*
1712*4882a593Smuzhiyun 	 * The odds are that this will stop walking way
1713*4882a593Smuzhiyun 	 * before end_vaddr, because the length of the
1714*4882a593Smuzhiyun 	 * user buffer is tracked in "pm", and the walk
1715*4882a593Smuzhiyun 	 * will stop when we hit the end of the buffer.
1716*4882a593Smuzhiyun 	 */
1717*4882a593Smuzhiyun 	ret = 0;
1718*4882a593Smuzhiyun 	while (count && (start_vaddr < end_vaddr)) {
1719*4882a593Smuzhiyun 		int len;
1720*4882a593Smuzhiyun 		unsigned long end;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 		pm.pos = 0;
1723*4882a593Smuzhiyun 		end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
1724*4882a593Smuzhiyun 		/* overflow ? */
1725*4882a593Smuzhiyun 		if (end < start_vaddr || end > end_vaddr)
1726*4882a593Smuzhiyun 			end = end_vaddr;
1727*4882a593Smuzhiyun 		ret = mmap_read_lock_killable(mm);
1728*4882a593Smuzhiyun 		if (ret)
1729*4882a593Smuzhiyun 			goto out_free;
1730*4882a593Smuzhiyun 		ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm);
1731*4882a593Smuzhiyun 		mmap_read_unlock(mm);
1732*4882a593Smuzhiyun 		start_vaddr = end;
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 		len = min(count, PM_ENTRY_BYTES * pm.pos);
1735*4882a593Smuzhiyun 		if (copy_to_user(buf, pm.buffer, len)) {
1736*4882a593Smuzhiyun 			ret = -EFAULT;
1737*4882a593Smuzhiyun 			goto out_free;
1738*4882a593Smuzhiyun 		}
1739*4882a593Smuzhiyun 		copied += len;
1740*4882a593Smuzhiyun 		buf += len;
1741*4882a593Smuzhiyun 		count -= len;
1742*4882a593Smuzhiyun 	}
1743*4882a593Smuzhiyun 	*ppos += copied;
1744*4882a593Smuzhiyun 	if (!ret || ret == PM_END_OF_BUFFER)
1745*4882a593Smuzhiyun 		ret = copied;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun out_free:
1748*4882a593Smuzhiyun 	kfree(pm.buffer);
1749*4882a593Smuzhiyun out_mm:
1750*4882a593Smuzhiyun 	mmput(mm);
1751*4882a593Smuzhiyun out:
1752*4882a593Smuzhiyun 	return ret;
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun 
pagemap_open(struct inode * inode,struct file * file)1755*4882a593Smuzhiyun static int pagemap_open(struct inode *inode, struct file *file)
1756*4882a593Smuzhiyun {
1757*4882a593Smuzhiyun 	struct mm_struct *mm;
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	mm = proc_mem_open(inode, PTRACE_MODE_READ);
1760*4882a593Smuzhiyun 	if (IS_ERR(mm))
1761*4882a593Smuzhiyun 		return PTR_ERR(mm);
1762*4882a593Smuzhiyun 	file->private_data = mm;
1763*4882a593Smuzhiyun 	return 0;
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun 
pagemap_release(struct inode * inode,struct file * file)1766*4882a593Smuzhiyun static int pagemap_release(struct inode *inode, struct file *file)
1767*4882a593Smuzhiyun {
1768*4882a593Smuzhiyun 	struct mm_struct *mm = file->private_data;
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	if (mm)
1771*4882a593Smuzhiyun 		mmdrop(mm);
1772*4882a593Smuzhiyun 	return 0;
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun const struct file_operations proc_pagemap_operations = {
1776*4882a593Smuzhiyun 	.llseek		= mem_lseek, /* borrow this */
1777*4882a593Smuzhiyun 	.read		= pagemap_read,
1778*4882a593Smuzhiyun 	.open		= pagemap_open,
1779*4882a593Smuzhiyun 	.release	= pagemap_release,
1780*4882a593Smuzhiyun };
1781*4882a593Smuzhiyun #endif /* CONFIG_PROC_PAGE_MONITOR */
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun struct numa_maps {
1786*4882a593Smuzhiyun 	unsigned long pages;
1787*4882a593Smuzhiyun 	unsigned long anon;
1788*4882a593Smuzhiyun 	unsigned long active;
1789*4882a593Smuzhiyun 	unsigned long writeback;
1790*4882a593Smuzhiyun 	unsigned long mapcount_max;
1791*4882a593Smuzhiyun 	unsigned long dirty;
1792*4882a593Smuzhiyun 	unsigned long swapcache;
1793*4882a593Smuzhiyun 	unsigned long node[MAX_NUMNODES];
1794*4882a593Smuzhiyun };
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun struct numa_maps_private {
1797*4882a593Smuzhiyun 	struct proc_maps_private proc_maps;
1798*4882a593Smuzhiyun 	struct numa_maps md;
1799*4882a593Smuzhiyun };
1800*4882a593Smuzhiyun 
gather_stats(struct page * page,struct numa_maps * md,int pte_dirty,unsigned long nr_pages)1801*4882a593Smuzhiyun static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1802*4882a593Smuzhiyun 			unsigned long nr_pages)
1803*4882a593Smuzhiyun {
1804*4882a593Smuzhiyun 	int count = page_mapcount(page);
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	md->pages += nr_pages;
1807*4882a593Smuzhiyun 	if (pte_dirty || PageDirty(page))
1808*4882a593Smuzhiyun 		md->dirty += nr_pages;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	if (PageSwapCache(page))
1811*4882a593Smuzhiyun 		md->swapcache += nr_pages;
1812*4882a593Smuzhiyun 
1813*4882a593Smuzhiyun 	if (PageActive(page) || PageUnevictable(page))
1814*4882a593Smuzhiyun 		md->active += nr_pages;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	if (PageWriteback(page))
1817*4882a593Smuzhiyun 		md->writeback += nr_pages;
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 	if (PageAnon(page))
1820*4882a593Smuzhiyun 		md->anon += nr_pages;
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	if (count > md->mapcount_max)
1823*4882a593Smuzhiyun 		md->mapcount_max = count;
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 	md->node[page_to_nid(page)] += nr_pages;
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun 
can_gather_numa_stats(pte_t pte,struct vm_area_struct * vma,unsigned long addr)1828*4882a593Smuzhiyun static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1829*4882a593Smuzhiyun 		unsigned long addr)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun 	struct page *page;
1832*4882a593Smuzhiyun 	int nid;
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 	if (!pte_present(pte))
1835*4882a593Smuzhiyun 		return NULL;
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	page = vm_normal_page(vma, addr, pte);
1838*4882a593Smuzhiyun 	if (!page)
1839*4882a593Smuzhiyun 		return NULL;
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	if (PageReserved(page))
1842*4882a593Smuzhiyun 		return NULL;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	nid = page_to_nid(page);
1845*4882a593Smuzhiyun 	if (!node_isset(nid, node_states[N_MEMORY]))
1846*4882a593Smuzhiyun 		return NULL;
1847*4882a593Smuzhiyun 
1848*4882a593Smuzhiyun 	return page;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
can_gather_numa_stats_pmd(pmd_t pmd,struct vm_area_struct * vma,unsigned long addr)1852*4882a593Smuzhiyun static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
1853*4882a593Smuzhiyun 					      struct vm_area_struct *vma,
1854*4882a593Smuzhiyun 					      unsigned long addr)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun 	struct page *page;
1857*4882a593Smuzhiyun 	int nid;
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	if (!pmd_present(pmd))
1860*4882a593Smuzhiyun 		return NULL;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	page = vm_normal_page_pmd(vma, addr, pmd);
1863*4882a593Smuzhiyun 	if (!page)
1864*4882a593Smuzhiyun 		return NULL;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	if (PageReserved(page))
1867*4882a593Smuzhiyun 		return NULL;
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	nid = page_to_nid(page);
1870*4882a593Smuzhiyun 	if (!node_isset(nid, node_states[N_MEMORY]))
1871*4882a593Smuzhiyun 		return NULL;
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	return page;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun #endif
1876*4882a593Smuzhiyun 
gather_pte_stats(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)1877*4882a593Smuzhiyun static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1878*4882a593Smuzhiyun 		unsigned long end, struct mm_walk *walk)
1879*4882a593Smuzhiyun {
1880*4882a593Smuzhiyun 	struct numa_maps *md = walk->private;
1881*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
1882*4882a593Smuzhiyun 	spinlock_t *ptl;
1883*4882a593Smuzhiyun 	pte_t *orig_pte;
1884*4882a593Smuzhiyun 	pte_t *pte;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1887*4882a593Smuzhiyun 	ptl = pmd_trans_huge_lock(pmd, vma);
1888*4882a593Smuzhiyun 	if (ptl) {
1889*4882a593Smuzhiyun 		struct page *page;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 		page = can_gather_numa_stats_pmd(*pmd, vma, addr);
1892*4882a593Smuzhiyun 		if (page)
1893*4882a593Smuzhiyun 			gather_stats(page, md, pmd_dirty(*pmd),
1894*4882a593Smuzhiyun 				     HPAGE_PMD_SIZE/PAGE_SIZE);
1895*4882a593Smuzhiyun 		spin_unlock(ptl);
1896*4882a593Smuzhiyun 		return 0;
1897*4882a593Smuzhiyun 	}
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	if (pmd_trans_unstable(pmd))
1900*4882a593Smuzhiyun 		return 0;
1901*4882a593Smuzhiyun #endif
1902*4882a593Smuzhiyun 	orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1903*4882a593Smuzhiyun 	do {
1904*4882a593Smuzhiyun 		struct page *page = can_gather_numa_stats(*pte, vma, addr);
1905*4882a593Smuzhiyun 		if (!page)
1906*4882a593Smuzhiyun 			continue;
1907*4882a593Smuzhiyun 		gather_stats(page, md, pte_dirty(*pte), 1);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	} while (pte++, addr += PAGE_SIZE, addr != end);
1910*4882a593Smuzhiyun 	pte_unmap_unlock(orig_pte, ptl);
1911*4882a593Smuzhiyun 	cond_resched();
1912*4882a593Smuzhiyun 	return 0;
1913*4882a593Smuzhiyun }
1914*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
gather_hugetlb_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1915*4882a593Smuzhiyun static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1916*4882a593Smuzhiyun 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1917*4882a593Smuzhiyun {
1918*4882a593Smuzhiyun 	pte_t huge_pte = huge_ptep_get(pte);
1919*4882a593Smuzhiyun 	struct numa_maps *md;
1920*4882a593Smuzhiyun 	struct page *page;
1921*4882a593Smuzhiyun 
1922*4882a593Smuzhiyun 	if (!pte_present(huge_pte))
1923*4882a593Smuzhiyun 		return 0;
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	page = pte_page(huge_pte);
1926*4882a593Smuzhiyun 	if (!page)
1927*4882a593Smuzhiyun 		return 0;
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	md = walk->private;
1930*4882a593Smuzhiyun 	gather_stats(page, md, pte_dirty(huge_pte), 1);
1931*4882a593Smuzhiyun 	return 0;
1932*4882a593Smuzhiyun }
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun #else
gather_hugetlb_stats(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)1935*4882a593Smuzhiyun static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
1936*4882a593Smuzhiyun 		unsigned long addr, unsigned long end, struct mm_walk *walk)
1937*4882a593Smuzhiyun {
1938*4882a593Smuzhiyun 	return 0;
1939*4882a593Smuzhiyun }
1940*4882a593Smuzhiyun #endif
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun static const struct mm_walk_ops show_numa_ops = {
1943*4882a593Smuzhiyun 	.hugetlb_entry = gather_hugetlb_stats,
1944*4882a593Smuzhiyun 	.pmd_entry = gather_pte_stats,
1945*4882a593Smuzhiyun };
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun /*
1948*4882a593Smuzhiyun  * Display pages allocated per node and memory policy via /proc.
1949*4882a593Smuzhiyun  */
show_numa_map(struct seq_file * m,void * v)1950*4882a593Smuzhiyun static int show_numa_map(struct seq_file *m, void *v)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun 	struct numa_maps_private *numa_priv = m->private;
1953*4882a593Smuzhiyun 	struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
1954*4882a593Smuzhiyun 	struct vm_area_struct *vma = v;
1955*4882a593Smuzhiyun 	struct numa_maps *md = &numa_priv->md;
1956*4882a593Smuzhiyun 	struct file *file = vma->vm_file;
1957*4882a593Smuzhiyun 	struct mm_struct *mm = vma->vm_mm;
1958*4882a593Smuzhiyun 	struct mempolicy *pol;
1959*4882a593Smuzhiyun 	char buffer[64];
1960*4882a593Smuzhiyun 	int nid;
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	if (!mm)
1963*4882a593Smuzhiyun 		return 0;
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun 	/* Ensure we start with an empty set of numa_maps statistics. */
1966*4882a593Smuzhiyun 	memset(md, 0, sizeof(*md));
1967*4882a593Smuzhiyun 
1968*4882a593Smuzhiyun 	pol = __get_vma_policy(vma, vma->vm_start);
1969*4882a593Smuzhiyun 	if (pol) {
1970*4882a593Smuzhiyun 		mpol_to_str(buffer, sizeof(buffer), pol);
1971*4882a593Smuzhiyun 		mpol_cond_put(pol);
1972*4882a593Smuzhiyun 	} else {
1973*4882a593Smuzhiyun 		mpol_to_str(buffer, sizeof(buffer), proc_priv->task_mempolicy);
1974*4882a593Smuzhiyun 	}
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	if (file) {
1979*4882a593Smuzhiyun 		seq_puts(m, " file=");
1980*4882a593Smuzhiyun 		seq_file_path(m, file, "\n\t= ");
1981*4882a593Smuzhiyun 	} else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1982*4882a593Smuzhiyun 		seq_puts(m, " heap");
1983*4882a593Smuzhiyun 	} else if (is_stack(vma)) {
1984*4882a593Smuzhiyun 		seq_puts(m, " stack");
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	if (is_vm_hugetlb_page(vma))
1988*4882a593Smuzhiyun 		seq_puts(m, " huge");
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	/* mmap_lock is held by m_start */
1991*4882a593Smuzhiyun 	walk_page_vma(vma, &show_numa_ops, md);
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	if (!md->pages)
1994*4882a593Smuzhiyun 		goto out;
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	if (md->anon)
1997*4882a593Smuzhiyun 		seq_printf(m, " anon=%lu", md->anon);
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 	if (md->dirty)
2000*4882a593Smuzhiyun 		seq_printf(m, " dirty=%lu", md->dirty);
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	if (md->pages != md->anon && md->pages != md->dirty)
2003*4882a593Smuzhiyun 		seq_printf(m, " mapped=%lu", md->pages);
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 	if (md->mapcount_max > 1)
2006*4882a593Smuzhiyun 		seq_printf(m, " mapmax=%lu", md->mapcount_max);
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	if (md->swapcache)
2009*4882a593Smuzhiyun 		seq_printf(m, " swapcache=%lu", md->swapcache);
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	if (md->active < md->pages && !is_vm_hugetlb_page(vma))
2012*4882a593Smuzhiyun 		seq_printf(m, " active=%lu", md->active);
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	if (md->writeback)
2015*4882a593Smuzhiyun 		seq_printf(m, " writeback=%lu", md->writeback);
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	for_each_node_state(nid, N_MEMORY)
2018*4882a593Smuzhiyun 		if (md->node[nid])
2019*4882a593Smuzhiyun 			seq_printf(m, " N%d=%lu", nid, md->node[nid]);
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10);
2022*4882a593Smuzhiyun out:
2023*4882a593Smuzhiyun 	seq_putc(m, '\n');
2024*4882a593Smuzhiyun 	return 0;
2025*4882a593Smuzhiyun }
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun static const struct seq_operations proc_pid_numa_maps_op = {
2028*4882a593Smuzhiyun 	.start  = m_start,
2029*4882a593Smuzhiyun 	.next   = m_next,
2030*4882a593Smuzhiyun 	.stop   = m_stop,
2031*4882a593Smuzhiyun 	.show   = show_numa_map,
2032*4882a593Smuzhiyun };
2033*4882a593Smuzhiyun 
pid_numa_maps_open(struct inode * inode,struct file * file)2034*4882a593Smuzhiyun static int pid_numa_maps_open(struct inode *inode, struct file *file)
2035*4882a593Smuzhiyun {
2036*4882a593Smuzhiyun 	return proc_maps_open(inode, file, &proc_pid_numa_maps_op,
2037*4882a593Smuzhiyun 				sizeof(struct numa_maps_private));
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun const struct file_operations proc_pid_numa_maps_operations = {
2041*4882a593Smuzhiyun 	.open		= pid_numa_maps_open,
2042*4882a593Smuzhiyun 	.read		= seq_read,
2043*4882a593Smuzhiyun 	.llseek		= seq_lseek,
2044*4882a593Smuzhiyun 	.release	= proc_map_release,
2045*4882a593Smuzhiyun };
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
2048