xref: /OK3568_Linux_fs/kernel/mm/vmstat.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  linux/mm/vmstat.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Manages VM statistics
6*4882a593Smuzhiyun  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  zoned VM statistics
9*4882a593Smuzhiyun  *  Copyright (C) 2006 Silicon Graphics, Inc.,
10*4882a593Smuzhiyun  *		Christoph Lameter <christoph@lameter.com>
11*4882a593Smuzhiyun  *  Copyright (C) 2008-2014 Christoph Lameter
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/cpu.h>
19*4882a593Smuzhiyun #include <linux/cpumask.h>
20*4882a593Smuzhiyun #include <linux/vmstat.h>
21*4882a593Smuzhiyun #include <linux/proc_fs.h>
22*4882a593Smuzhiyun #include <linux/seq_file.h>
23*4882a593Smuzhiyun #include <linux/debugfs.h>
24*4882a593Smuzhiyun #include <linux/sched.h>
25*4882a593Smuzhiyun #include <linux/math64.h>
26*4882a593Smuzhiyun #include <linux/writeback.h>
27*4882a593Smuzhiyun #include <linux/compaction.h>
28*4882a593Smuzhiyun #include <linux/mm_inline.h>
29*4882a593Smuzhiyun #include <linux/page_ext.h>
30*4882a593Smuzhiyun #include <linux/page_owner.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include "internal.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define NUMA_STATS_THRESHOLD (U16_MAX - 2)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #ifdef CONFIG_NUMA
37*4882a593Smuzhiyun int sysctl_vm_numa_stat = ENABLE_NUMA_STAT;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* zero numa counters within a zone */
zero_zone_numa_counters(struct zone * zone)40*4882a593Smuzhiyun static void zero_zone_numa_counters(struct zone *zone)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	int item, cpu;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++) {
45*4882a593Smuzhiyun 		atomic_long_set(&zone->vm_numa_stat[item], 0);
46*4882a593Smuzhiyun 		for_each_online_cpu(cpu)
47*4882a593Smuzhiyun 			per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item]
48*4882a593Smuzhiyun 						= 0;
49*4882a593Smuzhiyun 	}
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* zero numa counters of all the populated zones */
zero_zones_numa_counters(void)53*4882a593Smuzhiyun static void zero_zones_numa_counters(void)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct zone *zone;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	for_each_populated_zone(zone)
58*4882a593Smuzhiyun 		zero_zone_numa_counters(zone);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* zero global numa counters */
zero_global_numa_counters(void)62*4882a593Smuzhiyun static void zero_global_numa_counters(void)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	int item;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	for (item = 0; item < NR_VM_NUMA_STAT_ITEMS; item++)
67*4882a593Smuzhiyun 		atomic_long_set(&vm_numa_stat[item], 0);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
invalid_numa_statistics(void)70*4882a593Smuzhiyun static void invalid_numa_statistics(void)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	zero_zones_numa_counters();
73*4882a593Smuzhiyun 	zero_global_numa_counters();
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static DEFINE_MUTEX(vm_numa_stat_lock);
77*4882a593Smuzhiyun 
sysctl_vm_numa_stat_handler(struct ctl_table * table,int write,void * buffer,size_t * length,loff_t * ppos)78*4882a593Smuzhiyun int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
79*4882a593Smuzhiyun 		void *buffer, size_t *length, loff_t *ppos)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	int ret, oldval;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	mutex_lock(&vm_numa_stat_lock);
84*4882a593Smuzhiyun 	if (write)
85*4882a593Smuzhiyun 		oldval = sysctl_vm_numa_stat;
86*4882a593Smuzhiyun 	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
87*4882a593Smuzhiyun 	if (ret || !write)
88*4882a593Smuzhiyun 		goto out;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	if (oldval == sysctl_vm_numa_stat)
91*4882a593Smuzhiyun 		goto out;
92*4882a593Smuzhiyun 	else if (sysctl_vm_numa_stat == ENABLE_NUMA_STAT) {
93*4882a593Smuzhiyun 		static_branch_enable(&vm_numa_stat_key);
94*4882a593Smuzhiyun 		pr_info("enable numa statistics\n");
95*4882a593Smuzhiyun 	} else {
96*4882a593Smuzhiyun 		static_branch_disable(&vm_numa_stat_key);
97*4882a593Smuzhiyun 		invalid_numa_statistics();
98*4882a593Smuzhiyun 		pr_info("disable numa statistics, and clear numa counters\n");
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun out:
102*4882a593Smuzhiyun 	mutex_unlock(&vm_numa_stat_lock);
103*4882a593Smuzhiyun 	return ret;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun #endif
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #ifdef CONFIG_VM_EVENT_COUNTERS
108*4882a593Smuzhiyun DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}};
109*4882a593Smuzhiyun EXPORT_PER_CPU_SYMBOL(vm_event_states);
110*4882a593Smuzhiyun 
sum_vm_events(unsigned long * ret)111*4882a593Smuzhiyun static void sum_vm_events(unsigned long *ret)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	int cpu;
114*4882a593Smuzhiyun 	int i;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
119*4882a593Smuzhiyun 		struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
122*4882a593Smuzhiyun 			ret[i] += this->event[i];
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun  * Accumulate the vm event counters across all CPUs.
128*4882a593Smuzhiyun  * The result is unavoidably approximate - it can change
129*4882a593Smuzhiyun  * during and after execution of this function.
130*4882a593Smuzhiyun */
all_vm_events(unsigned long * ret)131*4882a593Smuzhiyun void all_vm_events(unsigned long *ret)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	get_online_cpus();
134*4882a593Smuzhiyun 	sum_vm_events(ret);
135*4882a593Smuzhiyun 	put_online_cpus();
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(all_vm_events);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * Fold the foreign cpu events into our own.
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * This is adding to the events on one processor
143*4882a593Smuzhiyun  * but keeps the global counts constant.
144*4882a593Smuzhiyun  */
vm_events_fold_cpu(int cpu)145*4882a593Smuzhiyun void vm_events_fold_cpu(int cpu)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	struct vm_event_state *fold_state = &per_cpu(vm_event_states, cpu);
148*4882a593Smuzhiyun 	int i;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_EVENT_ITEMS; i++) {
151*4882a593Smuzhiyun 		count_vm_events(i, fold_state->event[i]);
152*4882a593Smuzhiyun 		fold_state->event[i] = 0;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #endif /* CONFIG_VM_EVENT_COUNTERS */
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun  * Manage combined zone based / global counters
160*4882a593Smuzhiyun  *
161*4882a593Smuzhiyun  * vm_stat contains the global counters
162*4882a593Smuzhiyun  */
163*4882a593Smuzhiyun atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
164*4882a593Smuzhiyun atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS] __cacheline_aligned_in_smp;
165*4882a593Smuzhiyun atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS] __cacheline_aligned_in_smp;
166*4882a593Smuzhiyun EXPORT_SYMBOL(vm_zone_stat);
167*4882a593Smuzhiyun EXPORT_SYMBOL(vm_numa_stat);
168*4882a593Smuzhiyun EXPORT_SYMBOL(vm_node_stat);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #ifdef CONFIG_SMP
171*4882a593Smuzhiyun 
calculate_pressure_threshold(struct zone * zone)172*4882a593Smuzhiyun int calculate_pressure_threshold(struct zone *zone)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	int threshold;
175*4882a593Smuzhiyun 	int watermark_distance;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/*
178*4882a593Smuzhiyun 	 * As vmstats are not up to date, there is drift between the estimated
179*4882a593Smuzhiyun 	 * and real values. For high thresholds and a high number of CPUs, it
180*4882a593Smuzhiyun 	 * is possible for the min watermark to be breached while the estimated
181*4882a593Smuzhiyun 	 * value looks fine. The pressure threshold is a reduced value such
182*4882a593Smuzhiyun 	 * that even the maximum amount of drift will not accidentally breach
183*4882a593Smuzhiyun 	 * the min watermark
184*4882a593Smuzhiyun 	 */
185*4882a593Smuzhiyun 	watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
186*4882a593Smuzhiyun 	threshold = max(1, (int)(watermark_distance / num_online_cpus()));
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/*
189*4882a593Smuzhiyun 	 * Maximum threshold is 125
190*4882a593Smuzhiyun 	 */
191*4882a593Smuzhiyun 	threshold = min(125, threshold);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	return threshold;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
calculate_normal_threshold(struct zone * zone)196*4882a593Smuzhiyun int calculate_normal_threshold(struct zone *zone)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	int threshold;
199*4882a593Smuzhiyun 	int mem;	/* memory in 128 MB units */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/*
202*4882a593Smuzhiyun 	 * The threshold scales with the number of processors and the amount
203*4882a593Smuzhiyun 	 * of memory per zone. More memory means that we can defer updates for
204*4882a593Smuzhiyun 	 * longer, more processors could lead to more contention.
205*4882a593Smuzhiyun  	 * fls() is used to have a cheap way of logarithmic scaling.
206*4882a593Smuzhiyun 	 *
207*4882a593Smuzhiyun 	 * Some sample thresholds:
208*4882a593Smuzhiyun 	 *
209*4882a593Smuzhiyun 	 * Threshold	Processors	(fls)	Zonesize	fls(mem+1)
210*4882a593Smuzhiyun 	 * ------------------------------------------------------------------
211*4882a593Smuzhiyun 	 * 8		1		1	0.9-1 GB	4
212*4882a593Smuzhiyun 	 * 16		2		2	0.9-1 GB	4
213*4882a593Smuzhiyun 	 * 20 		2		2	1-2 GB		5
214*4882a593Smuzhiyun 	 * 24		2		2	2-4 GB		6
215*4882a593Smuzhiyun 	 * 28		2		2	4-8 GB		7
216*4882a593Smuzhiyun 	 * 32		2		2	8-16 GB		8
217*4882a593Smuzhiyun 	 * 4		2		2	<128M		1
218*4882a593Smuzhiyun 	 * 30		4		3	2-4 GB		5
219*4882a593Smuzhiyun 	 * 48		4		3	8-16 GB		8
220*4882a593Smuzhiyun 	 * 32		8		4	1-2 GB		4
221*4882a593Smuzhiyun 	 * 32		8		4	0.9-1GB		4
222*4882a593Smuzhiyun 	 * 10		16		5	<128M		1
223*4882a593Smuzhiyun 	 * 40		16		5	900M		4
224*4882a593Smuzhiyun 	 * 70		64		7	2-4 GB		5
225*4882a593Smuzhiyun 	 * 84		64		7	4-8 GB		6
226*4882a593Smuzhiyun 	 * 108		512		9	4-8 GB		6
227*4882a593Smuzhiyun 	 * 125		1024		10	8-16 GB		8
228*4882a593Smuzhiyun 	 * 125		1024		10	16-32 GB	9
229*4882a593Smuzhiyun 	 */
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	threshold = 2 * fls(num_online_cpus()) * (1 + fls(mem));
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/*
236*4882a593Smuzhiyun 	 * Maximum threshold is 125
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	threshold = min(125, threshold);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return threshold;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun  * Refresh the thresholds for each zone.
245*4882a593Smuzhiyun  */
refresh_zone_stat_thresholds(void)246*4882a593Smuzhiyun void refresh_zone_stat_thresholds(void)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct pglist_data *pgdat;
249*4882a593Smuzhiyun 	struct zone *zone;
250*4882a593Smuzhiyun 	int cpu;
251*4882a593Smuzhiyun 	int threshold;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	/* Zero current pgdat thresholds */
254*4882a593Smuzhiyun 	for_each_online_pgdat(pgdat) {
255*4882a593Smuzhiyun 		for_each_online_cpu(cpu) {
256*4882a593Smuzhiyun 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0;
257*4882a593Smuzhiyun 		}
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	for_each_populated_zone(zone) {
261*4882a593Smuzhiyun 		struct pglist_data *pgdat = zone->zone_pgdat;
262*4882a593Smuzhiyun 		unsigned long max_drift, tolerate_drift;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		threshold = calculate_normal_threshold(zone);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		for_each_online_cpu(cpu) {
267*4882a593Smuzhiyun 			int pgdat_threshold;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
270*4882a593Smuzhiyun 							= threshold;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 			/* Base nodestat threshold on the largest populated zone. */
273*4882a593Smuzhiyun 			pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold;
274*4882a593Smuzhiyun 			per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold
275*4882a593Smuzhiyun 				= max(threshold, pgdat_threshold);
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		/*
279*4882a593Smuzhiyun 		 * Only set percpu_drift_mark if there is a danger that
280*4882a593Smuzhiyun 		 * NR_FREE_PAGES reports the low watermark is ok when in fact
281*4882a593Smuzhiyun 		 * the min watermark could be breached by an allocation
282*4882a593Smuzhiyun 		 */
283*4882a593Smuzhiyun 		tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
284*4882a593Smuzhiyun 		max_drift = num_online_cpus() * threshold;
285*4882a593Smuzhiyun 		if (max_drift > tolerate_drift)
286*4882a593Smuzhiyun 			zone->percpu_drift_mark = high_wmark_pages(zone) +
287*4882a593Smuzhiyun 					max_drift;
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
set_pgdat_percpu_threshold(pg_data_t * pgdat,int (* calculate_pressure)(struct zone *))291*4882a593Smuzhiyun void set_pgdat_percpu_threshold(pg_data_t *pgdat,
292*4882a593Smuzhiyun 				int (*calculate_pressure)(struct zone *))
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct zone *zone;
295*4882a593Smuzhiyun 	int cpu;
296*4882a593Smuzhiyun 	int threshold;
297*4882a593Smuzhiyun 	int i;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	for (i = 0; i < pgdat->nr_zones; i++) {
300*4882a593Smuzhiyun 		zone = &pgdat->node_zones[i];
301*4882a593Smuzhiyun 		if (!zone->percpu_drift_mark)
302*4882a593Smuzhiyun 			continue;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		threshold = (*calculate_pressure)(zone);
305*4882a593Smuzhiyun 		for_each_online_cpu(cpu)
306*4882a593Smuzhiyun 			per_cpu_ptr(zone->pageset, cpu)->stat_threshold
307*4882a593Smuzhiyun 							= threshold;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun  * For use when we know that interrupts are disabled,
313*4882a593Smuzhiyun  * or when we know that preemption is disabled and that
314*4882a593Smuzhiyun  * particular counter cannot be updated from interrupt context.
315*4882a593Smuzhiyun  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)316*4882a593Smuzhiyun void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
317*4882a593Smuzhiyun 			   long delta)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
320*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_stat_diff + item;
321*4882a593Smuzhiyun 	long x;
322*4882a593Smuzhiyun 	long t;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	x = delta + __this_cpu_read(*p);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	t = __this_cpu_read(pcp->stat_threshold);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (unlikely(abs(x) > t)) {
329*4882a593Smuzhiyun 		zone_page_state_add(x, zone, item);
330*4882a593Smuzhiyun 		x = 0;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 	__this_cpu_write(*p, x);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun EXPORT_SYMBOL(__mod_zone_page_state);
335*4882a593Smuzhiyun 
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)336*4882a593Smuzhiyun void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
337*4882a593Smuzhiyun 				long delta)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
340*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
341*4882a593Smuzhiyun 	long x;
342*4882a593Smuzhiyun 	long t;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (vmstat_item_in_bytes(item)) {
345*4882a593Smuzhiyun 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
346*4882a593Smuzhiyun 		delta >>= PAGE_SHIFT;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	x = delta + __this_cpu_read(*p);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	t = __this_cpu_read(pcp->stat_threshold);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (unlikely(abs(x) > t)) {
354*4882a593Smuzhiyun 		node_page_state_add(x, pgdat, item);
355*4882a593Smuzhiyun 		x = 0;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 	__this_cpu_write(*p, x);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun EXPORT_SYMBOL(__mod_node_page_state);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun  * Optimized increment and decrement functions.
363*4882a593Smuzhiyun  *
364*4882a593Smuzhiyun  * These are only for a single page and therefore can take a struct page *
365*4882a593Smuzhiyun  * argument instead of struct zone *. This allows the inclusion of the code
366*4882a593Smuzhiyun  * generated for page_zone(page) into the optimized functions.
367*4882a593Smuzhiyun  *
368*4882a593Smuzhiyun  * No overflow check is necessary and therefore the differential can be
369*4882a593Smuzhiyun  * incremented or decremented in place which may allow the compilers to
370*4882a593Smuzhiyun  * generate better code.
371*4882a593Smuzhiyun  * The increment or decrement is known and therefore one boundary check can
372*4882a593Smuzhiyun  * be omitted.
373*4882a593Smuzhiyun  *
374*4882a593Smuzhiyun  * NOTE: These functions are very performance sensitive. Change only
375*4882a593Smuzhiyun  * with care.
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * Some processors have inc/dec instructions that are atomic vs an interrupt.
378*4882a593Smuzhiyun  * However, the code must first determine the differential location in a zone
379*4882a593Smuzhiyun  * based on the processor number and then inc/dec the counter. There is no
380*4882a593Smuzhiyun  * guarantee without disabling preemption that the processor will not change
381*4882a593Smuzhiyun  * in between and therefore the atomicity vs. interrupt cannot be exploited
382*4882a593Smuzhiyun  * in a useful way here.
383*4882a593Smuzhiyun  */
__inc_zone_state(struct zone * zone,enum zone_stat_item item)384*4882a593Smuzhiyun void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
387*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_stat_diff + item;
388*4882a593Smuzhiyun 	s8 v, t;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	v = __this_cpu_inc_return(*p);
391*4882a593Smuzhiyun 	t = __this_cpu_read(pcp->stat_threshold);
392*4882a593Smuzhiyun 	if (unlikely(v > t)) {
393*4882a593Smuzhiyun 		s8 overstep = t >> 1;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		zone_page_state_add(v + overstep, zone, item);
396*4882a593Smuzhiyun 		__this_cpu_write(*p, -overstep);
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)400*4882a593Smuzhiyun void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
403*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
404*4882a593Smuzhiyun 	s8 v, t;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	v = __this_cpu_inc_return(*p);
409*4882a593Smuzhiyun 	t = __this_cpu_read(pcp->stat_threshold);
410*4882a593Smuzhiyun 	if (unlikely(v > t)) {
411*4882a593Smuzhiyun 		s8 overstep = t >> 1;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		node_page_state_add(v + overstep, pgdat, item);
414*4882a593Smuzhiyun 		__this_cpu_write(*p, -overstep);
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)418*4882a593Smuzhiyun void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	__inc_zone_state(page_zone(page), item);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun EXPORT_SYMBOL(__inc_zone_page_state);
423*4882a593Smuzhiyun 
__inc_node_page_state(struct page * page,enum node_stat_item item)424*4882a593Smuzhiyun void __inc_node_page_state(struct page *page, enum node_stat_item item)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	__inc_node_state(page_pgdat(page), item);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun EXPORT_SYMBOL(__inc_node_page_state);
429*4882a593Smuzhiyun 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)430*4882a593Smuzhiyun void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
433*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_stat_diff + item;
434*4882a593Smuzhiyun 	s8 v, t;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	v = __this_cpu_dec_return(*p);
437*4882a593Smuzhiyun 	t = __this_cpu_read(pcp->stat_threshold);
438*4882a593Smuzhiyun 	if (unlikely(v < - t)) {
439*4882a593Smuzhiyun 		s8 overstep = t >> 1;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		zone_page_state_add(v - overstep, zone, item);
442*4882a593Smuzhiyun 		__this_cpu_write(*p, overstep);
443*4882a593Smuzhiyun 	}
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)446*4882a593Smuzhiyun void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
449*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
450*4882a593Smuzhiyun 	s8 v, t;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	v = __this_cpu_dec_return(*p);
455*4882a593Smuzhiyun 	t = __this_cpu_read(pcp->stat_threshold);
456*4882a593Smuzhiyun 	if (unlikely(v < - t)) {
457*4882a593Smuzhiyun 		s8 overstep = t >> 1;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 		node_page_state_add(v - overstep, pgdat, item);
460*4882a593Smuzhiyun 		__this_cpu_write(*p, overstep);
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)464*4882a593Smuzhiyun void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun 	__dec_zone_state(page_zone(page), item);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun EXPORT_SYMBOL(__dec_zone_page_state);
469*4882a593Smuzhiyun 
__dec_node_page_state(struct page * page,enum node_stat_item item)470*4882a593Smuzhiyun void __dec_node_page_state(struct page *page, enum node_stat_item item)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	__dec_node_state(page_pgdat(page), item);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun EXPORT_SYMBOL(__dec_node_page_state);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #ifdef CONFIG_HAVE_CMPXCHG_LOCAL
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun  * If we have cmpxchg_local support then we do not need to incur the overhead
479*4882a593Smuzhiyun  * that comes with local_irq_save/restore if we use this_cpu_cmpxchg.
480*4882a593Smuzhiyun  *
481*4882a593Smuzhiyun  * mod_state() modifies the zone counter state through atomic per cpu
482*4882a593Smuzhiyun  * operations.
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * Overstep mode specifies how overstep should handled:
485*4882a593Smuzhiyun  *     0       No overstepping
486*4882a593Smuzhiyun  *     1       Overstepping half of threshold
487*4882a593Smuzhiyun  *     -1      Overstepping minus half of threshold
488*4882a593Smuzhiyun */
mod_zone_state(struct zone * zone,enum zone_stat_item item,long delta,int overstep_mode)489*4882a593Smuzhiyun static inline void mod_zone_state(struct zone *zone,
490*4882a593Smuzhiyun        enum zone_stat_item item, long delta, int overstep_mode)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
493*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_stat_diff + item;
494*4882a593Smuzhiyun 	long o, n, t, z;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	do {
497*4882a593Smuzhiyun 		z = 0;  /* overflow to zone counters */
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 		/*
500*4882a593Smuzhiyun 		 * The fetching of the stat_threshold is racy. We may apply
501*4882a593Smuzhiyun 		 * a counter threshold to the wrong the cpu if we get
502*4882a593Smuzhiyun 		 * rescheduled while executing here. However, the next
503*4882a593Smuzhiyun 		 * counter update will apply the threshold again and
504*4882a593Smuzhiyun 		 * therefore bring the counter under the threshold again.
505*4882a593Smuzhiyun 		 *
506*4882a593Smuzhiyun 		 * Most of the time the thresholds are the same anyways
507*4882a593Smuzhiyun 		 * for all cpus in a zone.
508*4882a593Smuzhiyun 		 */
509*4882a593Smuzhiyun 		t = this_cpu_read(pcp->stat_threshold);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		o = this_cpu_read(*p);
512*4882a593Smuzhiyun 		n = delta + o;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		if (abs(n) > t) {
515*4882a593Smuzhiyun 			int os = overstep_mode * (t >> 1) ;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 			/* Overflow must be added to zone counters */
518*4882a593Smuzhiyun 			z = n + os;
519*4882a593Smuzhiyun 			n = -os;
520*4882a593Smuzhiyun 		}
521*4882a593Smuzhiyun 	} while (this_cpu_cmpxchg(*p, o, n) != o);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (z)
524*4882a593Smuzhiyun 		zone_page_state_add(z, zone, item);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)527*4882a593Smuzhiyun void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
528*4882a593Smuzhiyun 			 long delta)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	mod_zone_state(zone, item, delta, 0);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun EXPORT_SYMBOL(mod_zone_page_state);
533*4882a593Smuzhiyun 
inc_zone_page_state(struct page * page,enum zone_stat_item item)534*4882a593Smuzhiyun void inc_zone_page_state(struct page *page, enum zone_stat_item item)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	mod_zone_state(page_zone(page), item, 1, 1);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun EXPORT_SYMBOL(inc_zone_page_state);
539*4882a593Smuzhiyun 
dec_zone_page_state(struct page * page,enum zone_stat_item item)540*4882a593Smuzhiyun void dec_zone_page_state(struct page *page, enum zone_stat_item item)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	mod_zone_state(page_zone(page), item, -1, -1);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun EXPORT_SYMBOL(dec_zone_page_state);
545*4882a593Smuzhiyun 
mod_node_state(struct pglist_data * pgdat,enum node_stat_item item,int delta,int overstep_mode)546*4882a593Smuzhiyun static inline void mod_node_state(struct pglist_data *pgdat,
547*4882a593Smuzhiyun        enum node_stat_item item, int delta, int overstep_mode)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun 	struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
550*4882a593Smuzhiyun 	s8 __percpu *p = pcp->vm_node_stat_diff + item;
551*4882a593Smuzhiyun 	long o, n, t, z;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (vmstat_item_in_bytes(item)) {
554*4882a593Smuzhiyun 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
555*4882a593Smuzhiyun 		delta >>= PAGE_SHIFT;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	do {
559*4882a593Smuzhiyun 		z = 0;  /* overflow to node counters */
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		/*
562*4882a593Smuzhiyun 		 * The fetching of the stat_threshold is racy. We may apply
563*4882a593Smuzhiyun 		 * a counter threshold to the wrong the cpu if we get
564*4882a593Smuzhiyun 		 * rescheduled while executing here. However, the next
565*4882a593Smuzhiyun 		 * counter update will apply the threshold again and
566*4882a593Smuzhiyun 		 * therefore bring the counter under the threshold again.
567*4882a593Smuzhiyun 		 *
568*4882a593Smuzhiyun 		 * Most of the time the thresholds are the same anyways
569*4882a593Smuzhiyun 		 * for all cpus in a node.
570*4882a593Smuzhiyun 		 */
571*4882a593Smuzhiyun 		t = this_cpu_read(pcp->stat_threshold);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		o = this_cpu_read(*p);
574*4882a593Smuzhiyun 		n = delta + o;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 		if (abs(n) > t) {
577*4882a593Smuzhiyun 			int os = overstep_mode * (t >> 1) ;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 			/* Overflow must be added to node counters */
580*4882a593Smuzhiyun 			z = n + os;
581*4882a593Smuzhiyun 			n = -os;
582*4882a593Smuzhiyun 		}
583*4882a593Smuzhiyun 	} while (this_cpu_cmpxchg(*p, o, n) != o);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	if (z)
586*4882a593Smuzhiyun 		node_page_state_add(z, pgdat, item);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)589*4882a593Smuzhiyun void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
590*4882a593Smuzhiyun 					long delta)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	mod_node_state(pgdat, item, delta, 0);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun EXPORT_SYMBOL(mod_node_page_state);
595*4882a593Smuzhiyun 
inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)596*4882a593Smuzhiyun void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun 	mod_node_state(pgdat, item, 1, 1);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun 
inc_node_page_state(struct page * page,enum node_stat_item item)601*4882a593Smuzhiyun void inc_node_page_state(struct page *page, enum node_stat_item item)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	mod_node_state(page_pgdat(page), item, 1, 1);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun EXPORT_SYMBOL(inc_node_page_state);
606*4882a593Smuzhiyun 
dec_node_page_state(struct page * page,enum node_stat_item item)607*4882a593Smuzhiyun void dec_node_page_state(struct page *page, enum node_stat_item item)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	mod_node_state(page_pgdat(page), item, -1, -1);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun EXPORT_SYMBOL(dec_node_page_state);
612*4882a593Smuzhiyun #else
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun  * Use interrupt disable to serialize counter updates
615*4882a593Smuzhiyun  */
mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)616*4882a593Smuzhiyun void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
617*4882a593Smuzhiyun 			 long delta)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	unsigned long flags;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	local_irq_save(flags);
622*4882a593Smuzhiyun 	__mod_zone_page_state(zone, item, delta);
623*4882a593Smuzhiyun 	local_irq_restore(flags);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun EXPORT_SYMBOL(mod_zone_page_state);
626*4882a593Smuzhiyun 
inc_zone_page_state(struct page * page,enum zone_stat_item item)627*4882a593Smuzhiyun void inc_zone_page_state(struct page *page, enum zone_stat_item item)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	unsigned long flags;
630*4882a593Smuzhiyun 	struct zone *zone;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	zone = page_zone(page);
633*4882a593Smuzhiyun 	local_irq_save(flags);
634*4882a593Smuzhiyun 	__inc_zone_state(zone, item);
635*4882a593Smuzhiyun 	local_irq_restore(flags);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun EXPORT_SYMBOL(inc_zone_page_state);
638*4882a593Smuzhiyun 
dec_zone_page_state(struct page * page,enum zone_stat_item item)639*4882a593Smuzhiyun void dec_zone_page_state(struct page *page, enum zone_stat_item item)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun 	unsigned long flags;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	local_irq_save(flags);
644*4882a593Smuzhiyun 	__dec_zone_page_state(page, item);
645*4882a593Smuzhiyun 	local_irq_restore(flags);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun EXPORT_SYMBOL(dec_zone_page_state);
648*4882a593Smuzhiyun 
inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)649*4882a593Smuzhiyun void inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	unsigned long flags;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	local_irq_save(flags);
654*4882a593Smuzhiyun 	__inc_node_state(pgdat, item);
655*4882a593Smuzhiyun 	local_irq_restore(flags);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun EXPORT_SYMBOL(inc_node_state);
658*4882a593Smuzhiyun 
mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,long delta)659*4882a593Smuzhiyun void mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
660*4882a593Smuzhiyun 					long delta)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	unsigned long flags;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	local_irq_save(flags);
665*4882a593Smuzhiyun 	__mod_node_page_state(pgdat, item, delta);
666*4882a593Smuzhiyun 	local_irq_restore(flags);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun EXPORT_SYMBOL(mod_node_page_state);
669*4882a593Smuzhiyun 
inc_node_page_state(struct page * page,enum node_stat_item item)670*4882a593Smuzhiyun void inc_node_page_state(struct page *page, enum node_stat_item item)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	unsigned long flags;
673*4882a593Smuzhiyun 	struct pglist_data *pgdat;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	pgdat = page_pgdat(page);
676*4882a593Smuzhiyun 	local_irq_save(flags);
677*4882a593Smuzhiyun 	__inc_node_state(pgdat, item);
678*4882a593Smuzhiyun 	local_irq_restore(flags);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun EXPORT_SYMBOL(inc_node_page_state);
681*4882a593Smuzhiyun 
dec_node_page_state(struct page * page,enum node_stat_item item)682*4882a593Smuzhiyun void dec_node_page_state(struct page *page, enum node_stat_item item)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	unsigned long flags;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	local_irq_save(flags);
687*4882a593Smuzhiyun 	__dec_node_page_state(page, item);
688*4882a593Smuzhiyun 	local_irq_restore(flags);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun EXPORT_SYMBOL(dec_node_page_state);
691*4882a593Smuzhiyun #endif
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /*
694*4882a593Smuzhiyun  * Fold a differential into the global counters.
695*4882a593Smuzhiyun  * Returns the number of counters updated.
696*4882a593Smuzhiyun  */
697*4882a593Smuzhiyun #ifdef CONFIG_NUMA
fold_diff(int * zone_diff,int * numa_diff,int * node_diff)698*4882a593Smuzhiyun static int fold_diff(int *zone_diff, int *numa_diff, int *node_diff)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	int i;
701*4882a593Smuzhiyun 	int changes = 0;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
704*4882a593Smuzhiyun 		if (zone_diff[i]) {
705*4882a593Smuzhiyun 			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
706*4882a593Smuzhiyun 			changes++;
707*4882a593Smuzhiyun 	}
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
710*4882a593Smuzhiyun 		if (numa_diff[i]) {
711*4882a593Smuzhiyun 			atomic_long_add(numa_diff[i], &vm_numa_stat[i]);
712*4882a593Smuzhiyun 			changes++;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
716*4882a593Smuzhiyun 		if (node_diff[i]) {
717*4882a593Smuzhiyun 			atomic_long_add(node_diff[i], &vm_node_stat[i]);
718*4882a593Smuzhiyun 			changes++;
719*4882a593Smuzhiyun 	}
720*4882a593Smuzhiyun 	return changes;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun #else
fold_diff(int * zone_diff,int * node_diff)723*4882a593Smuzhiyun static int fold_diff(int *zone_diff, int *node_diff)
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	int i;
726*4882a593Smuzhiyun 	int changes = 0;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
729*4882a593Smuzhiyun 		if (zone_diff[i]) {
730*4882a593Smuzhiyun 			atomic_long_add(zone_diff[i], &vm_zone_stat[i]);
731*4882a593Smuzhiyun 			changes++;
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
735*4882a593Smuzhiyun 		if (node_diff[i]) {
736*4882a593Smuzhiyun 			atomic_long_add(node_diff[i], &vm_node_stat[i]);
737*4882a593Smuzhiyun 			changes++;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 	return changes;
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun /*
744*4882a593Smuzhiyun  * Update the zone counters for the current cpu.
745*4882a593Smuzhiyun  *
746*4882a593Smuzhiyun  * Note that refresh_cpu_vm_stats strives to only access
747*4882a593Smuzhiyun  * node local memory. The per cpu pagesets on remote zones are placed
748*4882a593Smuzhiyun  * in the memory local to the processor using that pageset. So the
749*4882a593Smuzhiyun  * loop over all zones will access a series of cachelines local to
750*4882a593Smuzhiyun  * the processor.
751*4882a593Smuzhiyun  *
752*4882a593Smuzhiyun  * The call to zone_page_state_add updates the cachelines with the
753*4882a593Smuzhiyun  * statistics in the remote zone struct as well as the global cachelines
754*4882a593Smuzhiyun  * with the global counters. These could cause remote node cache line
755*4882a593Smuzhiyun  * bouncing and will have to be only done when necessary.
756*4882a593Smuzhiyun  *
757*4882a593Smuzhiyun  * The function returns the number of global counters updated.
758*4882a593Smuzhiyun  */
refresh_cpu_vm_stats(bool do_pagesets)759*4882a593Smuzhiyun static int refresh_cpu_vm_stats(bool do_pagesets)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun 	struct pglist_data *pgdat;
762*4882a593Smuzhiyun 	struct zone *zone;
763*4882a593Smuzhiyun 	int i;
764*4882a593Smuzhiyun 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
765*4882a593Smuzhiyun #ifdef CONFIG_NUMA
766*4882a593Smuzhiyun 	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
767*4882a593Smuzhiyun #endif
768*4882a593Smuzhiyun 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
769*4882a593Smuzhiyun 	int changes = 0;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	for_each_populated_zone(zone) {
772*4882a593Smuzhiyun 		struct per_cpu_pageset __percpu *p = zone->pageset;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
775*4882a593Smuzhiyun 			int v;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 			v = this_cpu_xchg(p->vm_stat_diff[i], 0);
778*4882a593Smuzhiyun 			if (v) {
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 				atomic_long_add(v, &zone->vm_stat[i]);
781*4882a593Smuzhiyun 				global_zone_diff[i] += v;
782*4882a593Smuzhiyun #ifdef CONFIG_NUMA
783*4882a593Smuzhiyun 				/* 3 seconds idle till flush */
784*4882a593Smuzhiyun 				__this_cpu_write(p->expire, 3);
785*4882a593Smuzhiyun #endif
786*4882a593Smuzhiyun 			}
787*4882a593Smuzhiyun 		}
788*4882a593Smuzhiyun #ifdef CONFIG_NUMA
789*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
790*4882a593Smuzhiyun 			int v;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 			v = this_cpu_xchg(p->vm_numa_stat_diff[i], 0);
793*4882a593Smuzhiyun 			if (v) {
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 				atomic_long_add(v, &zone->vm_numa_stat[i]);
796*4882a593Smuzhiyun 				global_numa_diff[i] += v;
797*4882a593Smuzhiyun 				__this_cpu_write(p->expire, 3);
798*4882a593Smuzhiyun 			}
799*4882a593Smuzhiyun 		}
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 		if (do_pagesets) {
802*4882a593Smuzhiyun 			cond_resched();
803*4882a593Smuzhiyun 			/*
804*4882a593Smuzhiyun 			 * Deal with draining the remote pageset of this
805*4882a593Smuzhiyun 			 * processor
806*4882a593Smuzhiyun 			 *
807*4882a593Smuzhiyun 			 * Check if there are pages remaining in this pageset
808*4882a593Smuzhiyun 			 * if not then there is nothing to expire.
809*4882a593Smuzhiyun 			 */
810*4882a593Smuzhiyun 			if (!__this_cpu_read(p->expire) ||
811*4882a593Smuzhiyun 			       !__this_cpu_read(p->pcp.count))
812*4882a593Smuzhiyun 				continue;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 			/*
815*4882a593Smuzhiyun 			 * We never drain zones local to this processor.
816*4882a593Smuzhiyun 			 */
817*4882a593Smuzhiyun 			if (zone_to_nid(zone) == numa_node_id()) {
818*4882a593Smuzhiyun 				__this_cpu_write(p->expire, 0);
819*4882a593Smuzhiyun 				continue;
820*4882a593Smuzhiyun 			}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 			if (__this_cpu_dec_return(p->expire))
823*4882a593Smuzhiyun 				continue;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 			if (__this_cpu_read(p->pcp.count)) {
826*4882a593Smuzhiyun 				drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
827*4882a593Smuzhiyun 				changes++;
828*4882a593Smuzhiyun 			}
829*4882a593Smuzhiyun 		}
830*4882a593Smuzhiyun #endif
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	for_each_online_pgdat(pgdat) {
834*4882a593Smuzhiyun 		struct per_cpu_nodestat __percpu *p = pgdat->per_cpu_nodestats;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
837*4882a593Smuzhiyun 			int v;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 			v = this_cpu_xchg(p->vm_node_stat_diff[i], 0);
840*4882a593Smuzhiyun 			if (v) {
841*4882a593Smuzhiyun 				atomic_long_add(v, &pgdat->vm_stat[i]);
842*4882a593Smuzhiyun 				global_node_diff[i] += v;
843*4882a593Smuzhiyun 			}
844*4882a593Smuzhiyun 		}
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun #ifdef CONFIG_NUMA
848*4882a593Smuzhiyun 	changes += fold_diff(global_zone_diff, global_numa_diff,
849*4882a593Smuzhiyun 			     global_node_diff);
850*4882a593Smuzhiyun #else
851*4882a593Smuzhiyun 	changes += fold_diff(global_zone_diff, global_node_diff);
852*4882a593Smuzhiyun #endif
853*4882a593Smuzhiyun 	return changes;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun  * Fold the data for an offline cpu into the global array.
858*4882a593Smuzhiyun  * There cannot be any access by the offline cpu and therefore
859*4882a593Smuzhiyun  * synchronization is simplified.
860*4882a593Smuzhiyun  */
cpu_vm_stats_fold(int cpu)861*4882a593Smuzhiyun void cpu_vm_stats_fold(int cpu)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	struct pglist_data *pgdat;
864*4882a593Smuzhiyun 	struct zone *zone;
865*4882a593Smuzhiyun 	int i;
866*4882a593Smuzhiyun 	int global_zone_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
867*4882a593Smuzhiyun #ifdef CONFIG_NUMA
868*4882a593Smuzhiyun 	int global_numa_diff[NR_VM_NUMA_STAT_ITEMS] = { 0, };
869*4882a593Smuzhiyun #endif
870*4882a593Smuzhiyun 	int global_node_diff[NR_VM_NODE_STAT_ITEMS] = { 0, };
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	for_each_populated_zone(zone) {
873*4882a593Smuzhiyun 		struct per_cpu_pageset *p;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 		p = per_cpu_ptr(zone->pageset, cpu);
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
878*4882a593Smuzhiyun 			if (p->vm_stat_diff[i]) {
879*4882a593Smuzhiyun 				int v;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 				v = p->vm_stat_diff[i];
882*4882a593Smuzhiyun 				p->vm_stat_diff[i] = 0;
883*4882a593Smuzhiyun 				atomic_long_add(v, &zone->vm_stat[i]);
884*4882a593Smuzhiyun 				global_zone_diff[i] += v;
885*4882a593Smuzhiyun 			}
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun #ifdef CONFIG_NUMA
888*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
889*4882a593Smuzhiyun 			if (p->vm_numa_stat_diff[i]) {
890*4882a593Smuzhiyun 				int v;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 				v = p->vm_numa_stat_diff[i];
893*4882a593Smuzhiyun 				p->vm_numa_stat_diff[i] = 0;
894*4882a593Smuzhiyun 				atomic_long_add(v, &zone->vm_numa_stat[i]);
895*4882a593Smuzhiyun 				global_numa_diff[i] += v;
896*4882a593Smuzhiyun 			}
897*4882a593Smuzhiyun #endif
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	for_each_online_pgdat(pgdat) {
901*4882a593Smuzhiyun 		struct per_cpu_nodestat *p;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 		p = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
906*4882a593Smuzhiyun 			if (p->vm_node_stat_diff[i]) {
907*4882a593Smuzhiyun 				int v;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 				v = p->vm_node_stat_diff[i];
910*4882a593Smuzhiyun 				p->vm_node_stat_diff[i] = 0;
911*4882a593Smuzhiyun 				atomic_long_add(v, &pgdat->vm_stat[i]);
912*4882a593Smuzhiyun 				global_node_diff[i] += v;
913*4882a593Smuzhiyun 			}
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun #ifdef CONFIG_NUMA
917*4882a593Smuzhiyun 	fold_diff(global_zone_diff, global_numa_diff, global_node_diff);
918*4882a593Smuzhiyun #else
919*4882a593Smuzhiyun 	fold_diff(global_zone_diff, global_node_diff);
920*4882a593Smuzhiyun #endif
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun /*
924*4882a593Smuzhiyun  * this is only called if !populated_zone(zone), which implies no other users of
925*4882a593Smuzhiyun  * pset->vm_stat_diff[] exsist.
926*4882a593Smuzhiyun  */
drain_zonestat(struct zone * zone,struct per_cpu_pageset * pset)927*4882a593Smuzhiyun void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	int i;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
932*4882a593Smuzhiyun 		if (pset->vm_stat_diff[i]) {
933*4882a593Smuzhiyun 			int v = pset->vm_stat_diff[i];
934*4882a593Smuzhiyun 			pset->vm_stat_diff[i] = 0;
935*4882a593Smuzhiyun 			atomic_long_add(v, &zone->vm_stat[i]);
936*4882a593Smuzhiyun 			atomic_long_add(v, &vm_zone_stat[i]);
937*4882a593Smuzhiyun 		}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun #ifdef CONFIG_NUMA
940*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
941*4882a593Smuzhiyun 		if (pset->vm_numa_stat_diff[i]) {
942*4882a593Smuzhiyun 			int v = pset->vm_numa_stat_diff[i];
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 			pset->vm_numa_stat_diff[i] = 0;
945*4882a593Smuzhiyun 			atomic_long_add(v, &zone->vm_numa_stat[i]);
946*4882a593Smuzhiyun 			atomic_long_add(v, &vm_numa_stat[i]);
947*4882a593Smuzhiyun 		}
948*4882a593Smuzhiyun #endif
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun #endif
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun #ifdef CONFIG_NUMA
__inc_numa_state(struct zone * zone,enum numa_stat_item item)953*4882a593Smuzhiyun void __inc_numa_state(struct zone *zone,
954*4882a593Smuzhiyun 				 enum numa_stat_item item)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	struct per_cpu_pageset __percpu *pcp = zone->pageset;
957*4882a593Smuzhiyun 	u16 __percpu *p = pcp->vm_numa_stat_diff + item;
958*4882a593Smuzhiyun 	u16 v;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	v = __this_cpu_inc_return(*p);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	if (unlikely(v > NUMA_STATS_THRESHOLD)) {
963*4882a593Smuzhiyun 		zone_numa_state_add(v, zone, item);
964*4882a593Smuzhiyun 		__this_cpu_write(*p, 0);
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun  * Determine the per node value of a stat item. This function
970*4882a593Smuzhiyun  * is called frequently in a NUMA machine, so try to be as
971*4882a593Smuzhiyun  * frugal as possible.
972*4882a593Smuzhiyun  */
sum_zone_node_page_state(int node,enum zone_stat_item item)973*4882a593Smuzhiyun unsigned long sum_zone_node_page_state(int node,
974*4882a593Smuzhiyun 				 enum zone_stat_item item)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	struct zone *zones = NODE_DATA(node)->node_zones;
977*4882a593Smuzhiyun 	int i;
978*4882a593Smuzhiyun 	unsigned long count = 0;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 	for (i = 0; i < MAX_NR_ZONES; i++)
981*4882a593Smuzhiyun 		count += zone_page_state(zones + i, item);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	return count;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun /*
987*4882a593Smuzhiyun  * Determine the per node value of a numa stat item. To avoid deviation,
988*4882a593Smuzhiyun  * the per cpu stat number in vm_numa_stat_diff[] is also included.
989*4882a593Smuzhiyun  */
sum_zone_numa_state(int node,enum numa_stat_item item)990*4882a593Smuzhiyun unsigned long sum_zone_numa_state(int node,
991*4882a593Smuzhiyun 				 enum numa_stat_item item)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	struct zone *zones = NODE_DATA(node)->node_zones;
994*4882a593Smuzhiyun 	int i;
995*4882a593Smuzhiyun 	unsigned long count = 0;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	for (i = 0; i < MAX_NR_ZONES; i++)
998*4882a593Smuzhiyun 		count += zone_numa_state_snapshot(zones + i, item);
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	return count;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun /*
1004*4882a593Smuzhiyun  * Determine the per node value of a stat item.
1005*4882a593Smuzhiyun  */
node_page_state_pages(struct pglist_data * pgdat,enum node_stat_item item)1006*4882a593Smuzhiyun unsigned long node_page_state_pages(struct pglist_data *pgdat,
1007*4882a593Smuzhiyun 				    enum node_stat_item item)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun 	long x = atomic_long_read(&pgdat->vm_stat[item]);
1010*4882a593Smuzhiyun #ifdef CONFIG_SMP
1011*4882a593Smuzhiyun 	if (x < 0)
1012*4882a593Smuzhiyun 		x = 0;
1013*4882a593Smuzhiyun #endif
1014*4882a593Smuzhiyun 	return x;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
node_page_state(struct pglist_data * pgdat,enum node_stat_item item)1017*4882a593Smuzhiyun unsigned long node_page_state(struct pglist_data *pgdat,
1018*4882a593Smuzhiyun 			      enum node_stat_item item)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	return node_page_state_pages(pgdat, item);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun #endif
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun struct contig_page_info {
1029*4882a593Smuzhiyun 	unsigned long free_pages;
1030*4882a593Smuzhiyun 	unsigned long free_blocks_total;
1031*4882a593Smuzhiyun 	unsigned long free_blocks_suitable;
1032*4882a593Smuzhiyun };
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun /*
1035*4882a593Smuzhiyun  * Calculate the number of free pages in a zone, how many contiguous
1036*4882a593Smuzhiyun  * pages are free and how many are large enough to satisfy an allocation of
1037*4882a593Smuzhiyun  * the target size. Note that this function makes no attempt to estimate
1038*4882a593Smuzhiyun  * how many suitable free blocks there *might* be if MOVABLE pages were
1039*4882a593Smuzhiyun  * migrated. Calculating that is possible, but expensive and can be
1040*4882a593Smuzhiyun  * figured out from userspace
1041*4882a593Smuzhiyun  */
fill_contig_page_info(struct zone * zone,unsigned int suitable_order,struct contig_page_info * info)1042*4882a593Smuzhiyun static void fill_contig_page_info(struct zone *zone,
1043*4882a593Smuzhiyun 				unsigned int suitable_order,
1044*4882a593Smuzhiyun 				struct contig_page_info *info)
1045*4882a593Smuzhiyun {
1046*4882a593Smuzhiyun 	unsigned int order;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	info->free_pages = 0;
1049*4882a593Smuzhiyun 	info->free_blocks_total = 0;
1050*4882a593Smuzhiyun 	info->free_blocks_suitable = 0;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	for (order = 0; order < MAX_ORDER; order++) {
1053*4882a593Smuzhiyun 		unsigned long blocks;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 		/* Count number of free blocks */
1056*4882a593Smuzhiyun 		blocks = zone->free_area[order].nr_free;
1057*4882a593Smuzhiyun 		info->free_blocks_total += blocks;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 		/* Count free base pages */
1060*4882a593Smuzhiyun 		info->free_pages += blocks << order;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 		/* Count the suitable free blocks */
1063*4882a593Smuzhiyun 		if (order >= suitable_order)
1064*4882a593Smuzhiyun 			info->free_blocks_suitable += blocks <<
1065*4882a593Smuzhiyun 						(order - suitable_order);
1066*4882a593Smuzhiyun 	}
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun /*
1070*4882a593Smuzhiyun  * A fragmentation index only makes sense if an allocation of a requested
1071*4882a593Smuzhiyun  * size would fail. If that is true, the fragmentation index indicates
1072*4882a593Smuzhiyun  * whether external fragmentation or a lack of memory was the problem.
1073*4882a593Smuzhiyun  * The value can be used to determine if page reclaim or compaction
1074*4882a593Smuzhiyun  * should be used
1075*4882a593Smuzhiyun  */
__fragmentation_index(unsigned int order,struct contig_page_info * info)1076*4882a593Smuzhiyun static int __fragmentation_index(unsigned int order, struct contig_page_info *info)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	unsigned long requested = 1UL << order;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	if (WARN_ON_ONCE(order >= MAX_ORDER))
1081*4882a593Smuzhiyun 		return 0;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	if (!info->free_blocks_total)
1084*4882a593Smuzhiyun 		return 0;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	/* Fragmentation index only makes sense when a request would fail */
1087*4882a593Smuzhiyun 	if (info->free_blocks_suitable)
1088*4882a593Smuzhiyun 		return -1000;
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	/*
1091*4882a593Smuzhiyun 	 * Index is between 0 and 1 so return within 3 decimal places
1092*4882a593Smuzhiyun 	 *
1093*4882a593Smuzhiyun 	 * 0 => allocation would fail due to lack of memory
1094*4882a593Smuzhiyun 	 * 1 => allocation would fail due to fragmentation
1095*4882a593Smuzhiyun 	 */
1096*4882a593Smuzhiyun 	return 1000 - div_u64( (1000+(div_u64(info->free_pages * 1000ULL, requested))), info->free_blocks_total);
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun  * Calculates external fragmentation within a zone wrt the given order.
1101*4882a593Smuzhiyun  * It is defined as the percentage of pages found in blocks of size
1102*4882a593Smuzhiyun  * less than 1 << order. It returns values in range [0, 100].
1103*4882a593Smuzhiyun  */
extfrag_for_order(struct zone * zone,unsigned int order)1104*4882a593Smuzhiyun unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun 	struct contig_page_info info;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	fill_contig_page_info(zone, order, &info);
1109*4882a593Smuzhiyun 	if (info.free_pages == 0)
1110*4882a593Smuzhiyun 		return 0;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	return div_u64((info.free_pages -
1113*4882a593Smuzhiyun 			(info.free_blocks_suitable << order)) * 100,
1114*4882a593Smuzhiyun 			info.free_pages);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun /* Same as __fragmentation index but allocs contig_page_info on stack */
fragmentation_index(struct zone * zone,unsigned int order)1118*4882a593Smuzhiyun int fragmentation_index(struct zone *zone, unsigned int order)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	struct contig_page_info info;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	fill_contig_page_info(zone, order, &info);
1123*4882a593Smuzhiyun 	return __fragmentation_index(order, &info);
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun #endif
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun #if defined(CONFIG_PROC_FS) || defined(CONFIG_SYSFS) || \
1128*4882a593Smuzhiyun     defined(CONFIG_NUMA) || defined(CONFIG_MEMCG)
1129*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA
1130*4882a593Smuzhiyun #define TEXT_FOR_DMA(xx) xx "_dma",
1131*4882a593Smuzhiyun #else
1132*4882a593Smuzhiyun #define TEXT_FOR_DMA(xx)
1133*4882a593Smuzhiyun #endif
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun #ifdef CONFIG_ZONE_DMA32
1136*4882a593Smuzhiyun #define TEXT_FOR_DMA32(xx) xx "_dma32",
1137*4882a593Smuzhiyun #else
1138*4882a593Smuzhiyun #define TEXT_FOR_DMA32(xx)
1139*4882a593Smuzhiyun #endif
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
1142*4882a593Smuzhiyun #define TEXT_FOR_HIGHMEM(xx) xx "_high",
1143*4882a593Smuzhiyun #else
1144*4882a593Smuzhiyun #define TEXT_FOR_HIGHMEM(xx)
1145*4882a593Smuzhiyun #endif
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun #define TEXTS_FOR_ZONES(xx) TEXT_FOR_DMA(xx) TEXT_FOR_DMA32(xx) xx "_normal", \
1148*4882a593Smuzhiyun 					TEXT_FOR_HIGHMEM(xx) xx "_movable",
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun const char * const vmstat_text[] = {
1151*4882a593Smuzhiyun 	/* enum zone_stat_item counters */
1152*4882a593Smuzhiyun 	"nr_free_pages",
1153*4882a593Smuzhiyun 	"nr_zone_inactive_anon",
1154*4882a593Smuzhiyun 	"nr_zone_active_anon",
1155*4882a593Smuzhiyun 	"nr_zone_inactive_file",
1156*4882a593Smuzhiyun 	"nr_zone_active_file",
1157*4882a593Smuzhiyun 	"nr_zone_unevictable",
1158*4882a593Smuzhiyun 	"nr_zone_write_pending",
1159*4882a593Smuzhiyun 	"nr_mlock",
1160*4882a593Smuzhiyun 	"nr_page_table_pages",
1161*4882a593Smuzhiyun 	"nr_bounce",
1162*4882a593Smuzhiyun 	"nr_zspages",
1163*4882a593Smuzhiyun 	"nr_free_cma",
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	/* enum numa_stat_item counters */
1166*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1167*4882a593Smuzhiyun 	"numa_hit",
1168*4882a593Smuzhiyun 	"numa_miss",
1169*4882a593Smuzhiyun 	"numa_foreign",
1170*4882a593Smuzhiyun 	"numa_interleave",
1171*4882a593Smuzhiyun 	"numa_local",
1172*4882a593Smuzhiyun 	"numa_other",
1173*4882a593Smuzhiyun #endif
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	/* enum node_stat_item counters */
1176*4882a593Smuzhiyun 	"nr_inactive_anon",
1177*4882a593Smuzhiyun 	"nr_active_anon",
1178*4882a593Smuzhiyun 	"nr_inactive_file",
1179*4882a593Smuzhiyun 	"nr_active_file",
1180*4882a593Smuzhiyun 	"nr_unevictable",
1181*4882a593Smuzhiyun 	"nr_slab_reclaimable",
1182*4882a593Smuzhiyun 	"nr_slab_unreclaimable",
1183*4882a593Smuzhiyun 	"nr_isolated_anon",
1184*4882a593Smuzhiyun 	"nr_isolated_file",
1185*4882a593Smuzhiyun 	"workingset_nodes",
1186*4882a593Smuzhiyun 	"workingset_refault_anon",
1187*4882a593Smuzhiyun 	"workingset_refault_file",
1188*4882a593Smuzhiyun 	"workingset_activate_anon",
1189*4882a593Smuzhiyun 	"workingset_activate_file",
1190*4882a593Smuzhiyun 	"workingset_restore_anon",
1191*4882a593Smuzhiyun 	"workingset_restore_file",
1192*4882a593Smuzhiyun 	"workingset_nodereclaim",
1193*4882a593Smuzhiyun 	"nr_anon_pages",
1194*4882a593Smuzhiyun 	"nr_mapped",
1195*4882a593Smuzhiyun 	"nr_file_pages",
1196*4882a593Smuzhiyun 	"nr_dirty",
1197*4882a593Smuzhiyun 	"nr_writeback",
1198*4882a593Smuzhiyun 	"nr_writeback_temp",
1199*4882a593Smuzhiyun 	"nr_shmem",
1200*4882a593Smuzhiyun 	"nr_shmem_hugepages",
1201*4882a593Smuzhiyun 	"nr_shmem_pmdmapped",
1202*4882a593Smuzhiyun 	"nr_file_hugepages",
1203*4882a593Smuzhiyun 	"nr_file_pmdmapped",
1204*4882a593Smuzhiyun 	"nr_anon_transparent_hugepages",
1205*4882a593Smuzhiyun 	"nr_vmscan_write",
1206*4882a593Smuzhiyun 	"nr_vmscan_immediate_reclaim",
1207*4882a593Smuzhiyun 	"nr_dirtied",
1208*4882a593Smuzhiyun 	"nr_written",
1209*4882a593Smuzhiyun 	"nr_kernel_misc_reclaimable",
1210*4882a593Smuzhiyun 	"nr_foll_pin_acquired",
1211*4882a593Smuzhiyun 	"nr_foll_pin_released",
1212*4882a593Smuzhiyun 	"nr_kernel_stack",
1213*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SHADOW_CALL_STACK)
1214*4882a593Smuzhiyun 	"nr_shadow_call_stack",
1215*4882a593Smuzhiyun #endif
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	/* enum writeback_stat_item counters */
1218*4882a593Smuzhiyun 	"nr_dirty_threshold",
1219*4882a593Smuzhiyun 	"nr_dirty_background_threshold",
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
1222*4882a593Smuzhiyun 	/* enum vm_event_item counters */
1223*4882a593Smuzhiyun 	"pgpgin",
1224*4882a593Smuzhiyun 	"pgpgout",
1225*4882a593Smuzhiyun 	"pswpin",
1226*4882a593Smuzhiyun 	"pswpout",
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	TEXTS_FOR_ZONES("pgalloc")
1229*4882a593Smuzhiyun 	TEXTS_FOR_ZONES("allocstall")
1230*4882a593Smuzhiyun 	TEXTS_FOR_ZONES("pgskip")
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	"pgfree",
1233*4882a593Smuzhiyun 	"pgactivate",
1234*4882a593Smuzhiyun 	"pgdeactivate",
1235*4882a593Smuzhiyun 	"pglazyfree",
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	"pgfault",
1238*4882a593Smuzhiyun 	"pgmajfault",
1239*4882a593Smuzhiyun 	"pglazyfreed",
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	"pgrefill",
1242*4882a593Smuzhiyun 	"pgreuse",
1243*4882a593Smuzhiyun 	"pgsteal_kswapd",
1244*4882a593Smuzhiyun 	"pgsteal_direct",
1245*4882a593Smuzhiyun 	"pgscan_kswapd",
1246*4882a593Smuzhiyun 	"pgscan_direct",
1247*4882a593Smuzhiyun 	"pgscan_direct_throttle",
1248*4882a593Smuzhiyun 	"pgscan_anon",
1249*4882a593Smuzhiyun 	"pgscan_file",
1250*4882a593Smuzhiyun 	"pgsteal_anon",
1251*4882a593Smuzhiyun 	"pgsteal_file",
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1254*4882a593Smuzhiyun 	"zone_reclaim_failed",
1255*4882a593Smuzhiyun #endif
1256*4882a593Smuzhiyun 	"pginodesteal",
1257*4882a593Smuzhiyun 	"slabs_scanned",
1258*4882a593Smuzhiyun 	"kswapd_inodesteal",
1259*4882a593Smuzhiyun 	"kswapd_low_wmark_hit_quickly",
1260*4882a593Smuzhiyun 	"kswapd_high_wmark_hit_quickly",
1261*4882a593Smuzhiyun 	"pageoutrun",
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	"pgrotated",
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	"drop_pagecache",
1266*4882a593Smuzhiyun 	"drop_slab",
1267*4882a593Smuzhiyun 	"oom_kill",
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
1270*4882a593Smuzhiyun 	"numa_pte_updates",
1271*4882a593Smuzhiyun 	"numa_huge_pte_updates",
1272*4882a593Smuzhiyun 	"numa_hint_faults",
1273*4882a593Smuzhiyun 	"numa_hint_faults_local",
1274*4882a593Smuzhiyun 	"numa_pages_migrated",
1275*4882a593Smuzhiyun #endif
1276*4882a593Smuzhiyun #ifdef CONFIG_MIGRATION
1277*4882a593Smuzhiyun 	"pgmigrate_success",
1278*4882a593Smuzhiyun 	"pgmigrate_fail",
1279*4882a593Smuzhiyun 	"thp_migration_success",
1280*4882a593Smuzhiyun 	"thp_migration_fail",
1281*4882a593Smuzhiyun 	"thp_migration_split",
1282*4882a593Smuzhiyun #endif
1283*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
1284*4882a593Smuzhiyun 	"compact_migrate_scanned",
1285*4882a593Smuzhiyun 	"compact_free_scanned",
1286*4882a593Smuzhiyun 	"compact_isolated",
1287*4882a593Smuzhiyun 	"compact_stall",
1288*4882a593Smuzhiyun 	"compact_fail",
1289*4882a593Smuzhiyun 	"compact_success",
1290*4882a593Smuzhiyun 	"compact_daemon_wake",
1291*4882a593Smuzhiyun 	"compact_daemon_migrate_scanned",
1292*4882a593Smuzhiyun 	"compact_daemon_free_scanned",
1293*4882a593Smuzhiyun #endif
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
1296*4882a593Smuzhiyun 	"htlb_buddy_alloc_success",
1297*4882a593Smuzhiyun 	"htlb_buddy_alloc_fail",
1298*4882a593Smuzhiyun #endif
1299*4882a593Smuzhiyun #ifdef CONFIG_CMA
1300*4882a593Smuzhiyun 	"cma_alloc_success",
1301*4882a593Smuzhiyun 	"cma_alloc_fail",
1302*4882a593Smuzhiyun #endif
1303*4882a593Smuzhiyun 	"unevictable_pgs_culled",
1304*4882a593Smuzhiyun 	"unevictable_pgs_scanned",
1305*4882a593Smuzhiyun 	"unevictable_pgs_rescued",
1306*4882a593Smuzhiyun 	"unevictable_pgs_mlocked",
1307*4882a593Smuzhiyun 	"unevictable_pgs_munlocked",
1308*4882a593Smuzhiyun 	"unevictable_pgs_cleared",
1309*4882a593Smuzhiyun 	"unevictable_pgs_stranded",
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1312*4882a593Smuzhiyun 	"thp_fault_alloc",
1313*4882a593Smuzhiyun 	"thp_fault_fallback",
1314*4882a593Smuzhiyun 	"thp_fault_fallback_charge",
1315*4882a593Smuzhiyun 	"thp_collapse_alloc",
1316*4882a593Smuzhiyun 	"thp_collapse_alloc_failed",
1317*4882a593Smuzhiyun 	"thp_file_alloc",
1318*4882a593Smuzhiyun 	"thp_file_fallback",
1319*4882a593Smuzhiyun 	"thp_file_fallback_charge",
1320*4882a593Smuzhiyun 	"thp_file_mapped",
1321*4882a593Smuzhiyun 	"thp_split_page",
1322*4882a593Smuzhiyun 	"thp_split_page_failed",
1323*4882a593Smuzhiyun 	"thp_deferred_split_page",
1324*4882a593Smuzhiyun 	"thp_split_pmd",
1325*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1326*4882a593Smuzhiyun 	"thp_split_pud",
1327*4882a593Smuzhiyun #endif
1328*4882a593Smuzhiyun 	"thp_zero_page_alloc",
1329*4882a593Smuzhiyun 	"thp_zero_page_alloc_failed",
1330*4882a593Smuzhiyun 	"thp_swpout",
1331*4882a593Smuzhiyun 	"thp_swpout_fallback",
1332*4882a593Smuzhiyun #endif
1333*4882a593Smuzhiyun #ifdef CONFIG_MEMORY_BALLOON
1334*4882a593Smuzhiyun 	"balloon_inflate",
1335*4882a593Smuzhiyun 	"balloon_deflate",
1336*4882a593Smuzhiyun #ifdef CONFIG_BALLOON_COMPACTION
1337*4882a593Smuzhiyun 	"balloon_migrate",
1338*4882a593Smuzhiyun #endif
1339*4882a593Smuzhiyun #endif /* CONFIG_MEMORY_BALLOON */
1340*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_TLBFLUSH
1341*4882a593Smuzhiyun 	"nr_tlb_remote_flush",
1342*4882a593Smuzhiyun 	"nr_tlb_remote_flush_received",
1343*4882a593Smuzhiyun 	"nr_tlb_local_flush_all",
1344*4882a593Smuzhiyun 	"nr_tlb_local_flush_one",
1345*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_TLBFLUSH */
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM_VMACACHE
1348*4882a593Smuzhiyun 	"vmacache_find_calls",
1349*4882a593Smuzhiyun 	"vmacache_find_hits",
1350*4882a593Smuzhiyun #endif
1351*4882a593Smuzhiyun #ifdef CONFIG_SWAP
1352*4882a593Smuzhiyun 	"swap_ra",
1353*4882a593Smuzhiyun 	"swap_ra_hit",
1354*4882a593Smuzhiyun #endif
1355*4882a593Smuzhiyun #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
1356*4882a593Smuzhiyun 	"speculative_pgfault",
1357*4882a593Smuzhiyun 	"speculative_pgfault_file"
1358*4882a593Smuzhiyun #endif
1359*4882a593Smuzhiyun #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
1360*4882a593Smuzhiyun };
1361*4882a593Smuzhiyun #endif /* CONFIG_PROC_FS || CONFIG_SYSFS || CONFIG_NUMA || CONFIG_MEMCG */
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun #if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)) || \
1364*4882a593Smuzhiyun      defined(CONFIG_PROC_FS)
frag_start(struct seq_file * m,loff_t * pos)1365*4882a593Smuzhiyun static void *frag_start(struct seq_file *m, loff_t *pos)
1366*4882a593Smuzhiyun {
1367*4882a593Smuzhiyun 	pg_data_t *pgdat;
1368*4882a593Smuzhiyun 	loff_t node = *pos;
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	for (pgdat = first_online_pgdat();
1371*4882a593Smuzhiyun 	     pgdat && node;
1372*4882a593Smuzhiyun 	     pgdat = next_online_pgdat(pgdat))
1373*4882a593Smuzhiyun 		--node;
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 	return pgdat;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun 
frag_next(struct seq_file * m,void * arg,loff_t * pos)1378*4882a593Smuzhiyun static void *frag_next(struct seq_file *m, void *arg, loff_t *pos)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	(*pos)++;
1383*4882a593Smuzhiyun 	return next_online_pgdat(pgdat);
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun 
frag_stop(struct seq_file * m,void * arg)1386*4882a593Smuzhiyun static void frag_stop(struct seq_file *m, void *arg)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun /*
1391*4882a593Smuzhiyun  * Walk zones in a node and print using a callback.
1392*4882a593Smuzhiyun  * If @assert_populated is true, only use callback for zones that are populated.
1393*4882a593Smuzhiyun  */
walk_zones_in_node(struct seq_file * m,pg_data_t * pgdat,bool assert_populated,bool nolock,void (* print)(struct seq_file * m,pg_data_t *,struct zone *))1394*4882a593Smuzhiyun static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat,
1395*4882a593Smuzhiyun 		bool assert_populated, bool nolock,
1396*4882a593Smuzhiyun 		void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	struct zone *zone;
1399*4882a593Smuzhiyun 	struct zone *node_zones = pgdat->node_zones;
1400*4882a593Smuzhiyun 	unsigned long flags;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
1403*4882a593Smuzhiyun 		if (assert_populated && !populated_zone(zone))
1404*4882a593Smuzhiyun 			continue;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 		if (!nolock)
1407*4882a593Smuzhiyun 			spin_lock_irqsave(&zone->lock, flags);
1408*4882a593Smuzhiyun 		print(m, pgdat, zone);
1409*4882a593Smuzhiyun 		if (!nolock)
1410*4882a593Smuzhiyun 			spin_unlock_irqrestore(&zone->lock, flags);
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun #endif
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
frag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1416*4882a593Smuzhiyun static void frag_show_print(struct seq_file *m, pg_data_t *pgdat,
1417*4882a593Smuzhiyun 						struct zone *zone)
1418*4882a593Smuzhiyun {
1419*4882a593Smuzhiyun 	int order;
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1422*4882a593Smuzhiyun 	for (order = 0; order < MAX_ORDER; ++order)
1423*4882a593Smuzhiyun 		seq_printf(m, "%6lu ", zone->free_area[order].nr_free);
1424*4882a593Smuzhiyun 	seq_putc(m, '\n');
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun /*
1428*4882a593Smuzhiyun  * This walks the free areas for each zone.
1429*4882a593Smuzhiyun  */
frag_show(struct seq_file * m,void * arg)1430*4882a593Smuzhiyun static int frag_show(struct seq_file *m, void *arg)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
1433*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, true, false, frag_show_print);
1434*4882a593Smuzhiyun 	return 0;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun 
pagetypeinfo_showfree_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1437*4882a593Smuzhiyun static void pagetypeinfo_showfree_print(struct seq_file *m,
1438*4882a593Smuzhiyun 					pg_data_t *pgdat, struct zone *zone)
1439*4882a593Smuzhiyun {
1440*4882a593Smuzhiyun 	int order, mtype;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++) {
1443*4882a593Smuzhiyun 		seq_printf(m, "Node %4d, zone %8s, type %12s ",
1444*4882a593Smuzhiyun 					pgdat->node_id,
1445*4882a593Smuzhiyun 					zone->name,
1446*4882a593Smuzhiyun 					migratetype_names[mtype]);
1447*4882a593Smuzhiyun 		for (order = 0; order < MAX_ORDER; ++order) {
1448*4882a593Smuzhiyun 			unsigned long freecount = 0;
1449*4882a593Smuzhiyun 			struct free_area *area;
1450*4882a593Smuzhiyun 			struct list_head *curr;
1451*4882a593Smuzhiyun 			bool overflow = false;
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 			area = &(zone->free_area[order]);
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 			list_for_each(curr, &area->free_list[mtype]) {
1456*4882a593Smuzhiyun 				/*
1457*4882a593Smuzhiyun 				 * Cap the free_list iteration because it might
1458*4882a593Smuzhiyun 				 * be really large and we are under a spinlock
1459*4882a593Smuzhiyun 				 * so a long time spent here could trigger a
1460*4882a593Smuzhiyun 				 * hard lockup detector. Anyway this is a
1461*4882a593Smuzhiyun 				 * debugging tool so knowing there is a handful
1462*4882a593Smuzhiyun 				 * of pages of this order should be more than
1463*4882a593Smuzhiyun 				 * sufficient.
1464*4882a593Smuzhiyun 				 */
1465*4882a593Smuzhiyun 				if (++freecount >= 100000) {
1466*4882a593Smuzhiyun 					overflow = true;
1467*4882a593Smuzhiyun 					break;
1468*4882a593Smuzhiyun 				}
1469*4882a593Smuzhiyun 			}
1470*4882a593Smuzhiyun 			seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
1471*4882a593Smuzhiyun 			spin_unlock_irq(&zone->lock);
1472*4882a593Smuzhiyun 			cond_resched();
1473*4882a593Smuzhiyun 			spin_lock_irq(&zone->lock);
1474*4882a593Smuzhiyun 		}
1475*4882a593Smuzhiyun 		seq_putc(m, '\n');
1476*4882a593Smuzhiyun 	}
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun /* Print out the free pages at each order for each migatetype */
pagetypeinfo_showfree(struct seq_file * m,void * arg)1480*4882a593Smuzhiyun static int pagetypeinfo_showfree(struct seq_file *m, void *arg)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun 	int order;
1483*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	/* Print header */
1486*4882a593Smuzhiyun 	seq_printf(m, "%-43s ", "Free pages count per migrate type at order");
1487*4882a593Smuzhiyun 	for (order = 0; order < MAX_ORDER; ++order)
1488*4882a593Smuzhiyun 		seq_printf(m, "%6d ", order);
1489*4882a593Smuzhiyun 	seq_putc(m, '\n');
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, true, false, pagetypeinfo_showfree_print);
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	return 0;
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun 
pagetypeinfo_showblockcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1496*4882a593Smuzhiyun static void pagetypeinfo_showblockcount_print(struct seq_file *m,
1497*4882a593Smuzhiyun 					pg_data_t *pgdat, struct zone *zone)
1498*4882a593Smuzhiyun {
1499*4882a593Smuzhiyun 	int mtype;
1500*4882a593Smuzhiyun 	unsigned long pfn;
1501*4882a593Smuzhiyun 	unsigned long start_pfn = zone->zone_start_pfn;
1502*4882a593Smuzhiyun 	unsigned long end_pfn = zone_end_pfn(zone);
1503*4882a593Smuzhiyun 	unsigned long count[MIGRATE_TYPES] = { 0, };
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
1506*4882a593Smuzhiyun 		struct page *page;
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 		page = pfn_to_online_page(pfn);
1509*4882a593Smuzhiyun 		if (!page)
1510*4882a593Smuzhiyun 			continue;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 		if (page_zone(page) != zone)
1513*4882a593Smuzhiyun 			continue;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		mtype = get_pageblock_migratetype(page);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 		if (mtype < MIGRATE_TYPES)
1518*4882a593Smuzhiyun 			count[mtype]++;
1519*4882a593Smuzhiyun 	}
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	/* Print counts */
1522*4882a593Smuzhiyun 	seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
1523*4882a593Smuzhiyun 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1524*4882a593Smuzhiyun 		seq_printf(m, "%12lu ", count[mtype]);
1525*4882a593Smuzhiyun 	seq_putc(m, '\n');
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun /* Print out the number of pageblocks for each migratetype */
pagetypeinfo_showblockcount(struct seq_file * m,void * arg)1529*4882a593Smuzhiyun static int pagetypeinfo_showblockcount(struct seq_file *m, void *arg)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	int mtype;
1532*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	seq_printf(m, "\n%-23s", "Number of blocks type ");
1535*4882a593Smuzhiyun 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1536*4882a593Smuzhiyun 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1537*4882a593Smuzhiyun 	seq_putc(m, '\n');
1538*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, true, false,
1539*4882a593Smuzhiyun 		pagetypeinfo_showblockcount_print);
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	return 0;
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun /*
1545*4882a593Smuzhiyun  * Print out the number of pageblocks for each migratetype that contain pages
1546*4882a593Smuzhiyun  * of other types. This gives an indication of how well fallbacks are being
1547*4882a593Smuzhiyun  * contained by rmqueue_fallback(). It requires information from PAGE_OWNER
1548*4882a593Smuzhiyun  * to determine what is going on
1549*4882a593Smuzhiyun  */
pagetypeinfo_showmixedcount(struct seq_file * m,pg_data_t * pgdat)1550*4882a593Smuzhiyun static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat)
1551*4882a593Smuzhiyun {
1552*4882a593Smuzhiyun #ifdef CONFIG_PAGE_OWNER
1553*4882a593Smuzhiyun 	int mtype;
1554*4882a593Smuzhiyun 
1555*4882a593Smuzhiyun 	if (!static_branch_unlikely(&page_owner_inited))
1556*4882a593Smuzhiyun 		return;
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 	drain_all_pages(NULL);
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	seq_printf(m, "\n%-23s", "Number of mixed blocks ");
1561*4882a593Smuzhiyun 	for (mtype = 0; mtype < MIGRATE_TYPES; mtype++)
1562*4882a593Smuzhiyun 		seq_printf(m, "%12s ", migratetype_names[mtype]);
1563*4882a593Smuzhiyun 	seq_putc(m, '\n');
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, true, true,
1566*4882a593Smuzhiyun 		pagetypeinfo_showmixedcount_print);
1567*4882a593Smuzhiyun #endif /* CONFIG_PAGE_OWNER */
1568*4882a593Smuzhiyun }
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun /*
1571*4882a593Smuzhiyun  * This prints out statistics in relation to grouping pages by mobility.
1572*4882a593Smuzhiyun  * It is expensive to collect so do not constantly read the file.
1573*4882a593Smuzhiyun  */
pagetypeinfo_show(struct seq_file * m,void * arg)1574*4882a593Smuzhiyun static int pagetypeinfo_show(struct seq_file *m, void *arg)
1575*4882a593Smuzhiyun {
1576*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	/* check memoryless node */
1579*4882a593Smuzhiyun 	if (!node_state(pgdat->node_id, N_MEMORY))
1580*4882a593Smuzhiyun 		return 0;
1581*4882a593Smuzhiyun 
1582*4882a593Smuzhiyun 	seq_printf(m, "Page block order: %d\n", pageblock_order);
1583*4882a593Smuzhiyun 	seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
1584*4882a593Smuzhiyun 	seq_putc(m, '\n');
1585*4882a593Smuzhiyun 	pagetypeinfo_showfree(m, pgdat);
1586*4882a593Smuzhiyun 	pagetypeinfo_showblockcount(m, pgdat);
1587*4882a593Smuzhiyun 	pagetypeinfo_showmixedcount(m, pgdat);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	return 0;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun static const struct seq_operations fragmentation_op = {
1593*4882a593Smuzhiyun 	.start	= frag_start,
1594*4882a593Smuzhiyun 	.next	= frag_next,
1595*4882a593Smuzhiyun 	.stop	= frag_stop,
1596*4882a593Smuzhiyun 	.show	= frag_show,
1597*4882a593Smuzhiyun };
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun static const struct seq_operations pagetypeinfo_op = {
1600*4882a593Smuzhiyun 	.start	= frag_start,
1601*4882a593Smuzhiyun 	.next	= frag_next,
1602*4882a593Smuzhiyun 	.stop	= frag_stop,
1603*4882a593Smuzhiyun 	.show	= pagetypeinfo_show,
1604*4882a593Smuzhiyun };
1605*4882a593Smuzhiyun 
is_zone_first_populated(pg_data_t * pgdat,struct zone * zone)1606*4882a593Smuzhiyun static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
1607*4882a593Smuzhiyun {
1608*4882a593Smuzhiyun 	int zid;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1611*4882a593Smuzhiyun 		struct zone *compare = &pgdat->node_zones[zid];
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		if (populated_zone(compare))
1614*4882a593Smuzhiyun 			return zone == compare;
1615*4882a593Smuzhiyun 	}
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	return false;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun 
zoneinfo_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)1620*4882a593Smuzhiyun static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1621*4882a593Smuzhiyun 							struct zone *zone)
1622*4882a593Smuzhiyun {
1623*4882a593Smuzhiyun 	int i;
1624*4882a593Smuzhiyun 	seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
1625*4882a593Smuzhiyun 	if (is_zone_first_populated(pgdat, zone)) {
1626*4882a593Smuzhiyun 		seq_printf(m, "\n  per-node stats");
1627*4882a593Smuzhiyun 		for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
1628*4882a593Smuzhiyun 			seq_printf(m, "\n      %-12s %lu", node_stat_name(i),
1629*4882a593Smuzhiyun 				   node_page_state_pages(pgdat, i));
1630*4882a593Smuzhiyun 		}
1631*4882a593Smuzhiyun 	}
1632*4882a593Smuzhiyun 	seq_printf(m,
1633*4882a593Smuzhiyun 		   "\n  pages free     %lu"
1634*4882a593Smuzhiyun 		   "\n        min      %lu"
1635*4882a593Smuzhiyun 		   "\n        low      %lu"
1636*4882a593Smuzhiyun 		   "\n        high     %lu"
1637*4882a593Smuzhiyun 		   "\n        spanned  %lu"
1638*4882a593Smuzhiyun 		   "\n        present  %lu"
1639*4882a593Smuzhiyun 		   "\n        managed  %lu"
1640*4882a593Smuzhiyun 		   "\n        cma      %lu",
1641*4882a593Smuzhiyun 		   zone_page_state(zone, NR_FREE_PAGES),
1642*4882a593Smuzhiyun 		   min_wmark_pages(zone),
1643*4882a593Smuzhiyun 		   low_wmark_pages(zone),
1644*4882a593Smuzhiyun 		   high_wmark_pages(zone),
1645*4882a593Smuzhiyun 		   zone->spanned_pages,
1646*4882a593Smuzhiyun 		   zone->present_pages,
1647*4882a593Smuzhiyun 		   zone_managed_pages(zone),
1648*4882a593Smuzhiyun 		   zone_cma_pages(zone));
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	seq_printf(m,
1651*4882a593Smuzhiyun 		   "\n        protection: (%ld",
1652*4882a593Smuzhiyun 		   zone->lowmem_reserve[0]);
1653*4882a593Smuzhiyun 	for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
1654*4882a593Smuzhiyun 		seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
1655*4882a593Smuzhiyun 	seq_putc(m, ')');
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	/* If unpopulated, no other information is useful */
1658*4882a593Smuzhiyun 	if (!populated_zone(zone)) {
1659*4882a593Smuzhiyun 		seq_putc(m, '\n');
1660*4882a593Smuzhiyun 		return;
1661*4882a593Smuzhiyun 	}
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1664*4882a593Smuzhiyun 		seq_printf(m, "\n      %-12s %lu", zone_stat_name(i),
1665*4882a593Smuzhiyun 			   zone_page_state(zone, i));
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1668*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1669*4882a593Smuzhiyun 		seq_printf(m, "\n      %-12s %lu", numa_stat_name(i),
1670*4882a593Smuzhiyun 			   zone_numa_state_snapshot(zone, i));
1671*4882a593Smuzhiyun #endif
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	seq_printf(m, "\n  pagesets");
1674*4882a593Smuzhiyun 	for_each_online_cpu(i) {
1675*4882a593Smuzhiyun 		struct per_cpu_pageset *pageset;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 		pageset = per_cpu_ptr(zone->pageset, i);
1678*4882a593Smuzhiyun 		seq_printf(m,
1679*4882a593Smuzhiyun 			   "\n    cpu: %i"
1680*4882a593Smuzhiyun 			   "\n              count: %i"
1681*4882a593Smuzhiyun 			   "\n              high:  %i"
1682*4882a593Smuzhiyun 			   "\n              batch: %i",
1683*4882a593Smuzhiyun 			   i,
1684*4882a593Smuzhiyun 			   pageset->pcp.count,
1685*4882a593Smuzhiyun 			   pageset->pcp.high,
1686*4882a593Smuzhiyun 			   pageset->pcp.batch);
1687*4882a593Smuzhiyun #ifdef CONFIG_SMP
1688*4882a593Smuzhiyun 		seq_printf(m, "\n  vm stats threshold: %d",
1689*4882a593Smuzhiyun 				pageset->stat_threshold);
1690*4882a593Smuzhiyun #endif
1691*4882a593Smuzhiyun 	}
1692*4882a593Smuzhiyun 	seq_printf(m,
1693*4882a593Smuzhiyun 		   "\n  node_unreclaimable:  %u"
1694*4882a593Smuzhiyun 		   "\n  start_pfn:           %lu",
1695*4882a593Smuzhiyun 		   pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1696*4882a593Smuzhiyun 		   zone->zone_start_pfn);
1697*4882a593Smuzhiyun 	seq_putc(m, '\n');
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun /*
1701*4882a593Smuzhiyun  * Output information about zones in @pgdat.  All zones are printed regardless
1702*4882a593Smuzhiyun  * of whether they are populated or not: lowmem_reserve_ratio operates on the
1703*4882a593Smuzhiyun  * set of all zones and userspace would not be aware of such zones if they are
1704*4882a593Smuzhiyun  * suppressed here (zoneinfo displays the effect of lowmem_reserve_ratio).
1705*4882a593Smuzhiyun  */
zoneinfo_show(struct seq_file * m,void * arg)1706*4882a593Smuzhiyun static int zoneinfo_show(struct seq_file *m, void *arg)
1707*4882a593Smuzhiyun {
1708*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
1709*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, false, false, zoneinfo_show_print);
1710*4882a593Smuzhiyun 	return 0;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun static const struct seq_operations zoneinfo_op = {
1714*4882a593Smuzhiyun 	.start	= frag_start, /* iterate over all zones. The same as in
1715*4882a593Smuzhiyun 			       * fragmentation. */
1716*4882a593Smuzhiyun 	.next	= frag_next,
1717*4882a593Smuzhiyun 	.stop	= frag_stop,
1718*4882a593Smuzhiyun 	.show	= zoneinfo_show,
1719*4882a593Smuzhiyun };
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun #define NR_VMSTAT_ITEMS (NR_VM_ZONE_STAT_ITEMS + \
1722*4882a593Smuzhiyun 			 NR_VM_NUMA_STAT_ITEMS + \
1723*4882a593Smuzhiyun 			 NR_VM_NODE_STAT_ITEMS + \
1724*4882a593Smuzhiyun 			 NR_VM_WRITEBACK_STAT_ITEMS + \
1725*4882a593Smuzhiyun 			 (IS_ENABLED(CONFIG_VM_EVENT_COUNTERS) ? \
1726*4882a593Smuzhiyun 			  NR_VM_EVENT_ITEMS : 0))
1727*4882a593Smuzhiyun 
vmstat_start(struct seq_file * m,loff_t * pos)1728*4882a593Smuzhiyun static void *vmstat_start(struct seq_file *m, loff_t *pos)
1729*4882a593Smuzhiyun {
1730*4882a593Smuzhiyun 	unsigned long *v;
1731*4882a593Smuzhiyun 	int i;
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	if (*pos >= NR_VMSTAT_ITEMS)
1734*4882a593Smuzhiyun 		return NULL;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	BUILD_BUG_ON(ARRAY_SIZE(vmstat_text) < NR_VMSTAT_ITEMS);
1737*4882a593Smuzhiyun 	v = kmalloc_array(NR_VMSTAT_ITEMS, sizeof(unsigned long), GFP_KERNEL);
1738*4882a593Smuzhiyun 	m->private = v;
1739*4882a593Smuzhiyun 	if (!v)
1740*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1741*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
1742*4882a593Smuzhiyun 		v[i] = global_zone_page_state(i);
1743*4882a593Smuzhiyun 	v += NR_VM_ZONE_STAT_ITEMS;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1746*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
1747*4882a593Smuzhiyun 		v[i] = global_numa_state(i);
1748*4882a593Smuzhiyun 	v += NR_VM_NUMA_STAT_ITEMS;
1749*4882a593Smuzhiyun #endif
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
1752*4882a593Smuzhiyun 		v[i] = global_node_page_state_pages(i);
1753*4882a593Smuzhiyun 	v += NR_VM_NODE_STAT_ITEMS;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	global_dirty_limits(v + NR_DIRTY_BG_THRESHOLD,
1756*4882a593Smuzhiyun 			    v + NR_DIRTY_THRESHOLD);
1757*4882a593Smuzhiyun 	v += NR_VM_WRITEBACK_STAT_ITEMS;
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun #ifdef CONFIG_VM_EVENT_COUNTERS
1760*4882a593Smuzhiyun 	all_vm_events(v);
1761*4882a593Smuzhiyun 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
1762*4882a593Smuzhiyun 	v[PGPGOUT] /= 2;
1763*4882a593Smuzhiyun #endif
1764*4882a593Smuzhiyun 	return (unsigned long *)m->private + *pos;
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
vmstat_next(struct seq_file * m,void * arg,loff_t * pos)1767*4882a593Smuzhiyun static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
1768*4882a593Smuzhiyun {
1769*4882a593Smuzhiyun 	(*pos)++;
1770*4882a593Smuzhiyun 	if (*pos >= NR_VMSTAT_ITEMS)
1771*4882a593Smuzhiyun 		return NULL;
1772*4882a593Smuzhiyun 	return (unsigned long *)m->private + *pos;
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun 
vmstat_show(struct seq_file * m,void * arg)1775*4882a593Smuzhiyun static int vmstat_show(struct seq_file *m, void *arg)
1776*4882a593Smuzhiyun {
1777*4882a593Smuzhiyun 	unsigned long *l = arg;
1778*4882a593Smuzhiyun 	unsigned long off = l - (unsigned long *)m->private;
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 	seq_puts(m, vmstat_text[off]);
1781*4882a593Smuzhiyun 	seq_put_decimal_ull(m, " ", *l);
1782*4882a593Smuzhiyun 	seq_putc(m, '\n');
1783*4882a593Smuzhiyun 
1784*4882a593Smuzhiyun 	if (off == NR_VMSTAT_ITEMS - 1) {
1785*4882a593Smuzhiyun 		/*
1786*4882a593Smuzhiyun 		 * We've come to the end - add any deprecated counters to avoid
1787*4882a593Smuzhiyun 		 * breaking userspace which might depend on them being present.
1788*4882a593Smuzhiyun 		 */
1789*4882a593Smuzhiyun 		seq_puts(m, "nr_unstable 0\n");
1790*4882a593Smuzhiyun 	}
1791*4882a593Smuzhiyun 	return 0;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun 
vmstat_stop(struct seq_file * m,void * arg)1794*4882a593Smuzhiyun static void vmstat_stop(struct seq_file *m, void *arg)
1795*4882a593Smuzhiyun {
1796*4882a593Smuzhiyun 	kfree(m->private);
1797*4882a593Smuzhiyun 	m->private = NULL;
1798*4882a593Smuzhiyun }
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun static const struct seq_operations vmstat_op = {
1801*4882a593Smuzhiyun 	.start	= vmstat_start,
1802*4882a593Smuzhiyun 	.next	= vmstat_next,
1803*4882a593Smuzhiyun 	.stop	= vmstat_stop,
1804*4882a593Smuzhiyun 	.show	= vmstat_show,
1805*4882a593Smuzhiyun };
1806*4882a593Smuzhiyun #endif /* CONFIG_PROC_FS */
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun #ifdef CONFIG_SMP
1809*4882a593Smuzhiyun static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
1810*4882a593Smuzhiyun int sysctl_stat_interval __read_mostly = HZ;
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
refresh_vm_stats(struct work_struct * work)1813*4882a593Smuzhiyun static void refresh_vm_stats(struct work_struct *work)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun 	refresh_cpu_vm_stats(true);
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun 
vmstat_refresh(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)1818*4882a593Smuzhiyun int vmstat_refresh(struct ctl_table *table, int write,
1819*4882a593Smuzhiyun 		   void *buffer, size_t *lenp, loff_t *ppos)
1820*4882a593Smuzhiyun {
1821*4882a593Smuzhiyun 	long val;
1822*4882a593Smuzhiyun 	int err;
1823*4882a593Smuzhiyun 	int i;
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 	/*
1826*4882a593Smuzhiyun 	 * The regular update, every sysctl_stat_interval, may come later
1827*4882a593Smuzhiyun 	 * than expected: leaving a significant amount in per_cpu buckets.
1828*4882a593Smuzhiyun 	 * This is particularly misleading when checking a quantity of HUGE
1829*4882a593Smuzhiyun 	 * pages, immediately after running a test.  /proc/sys/vm/stat_refresh,
1830*4882a593Smuzhiyun 	 * which can equally be echo'ed to or cat'ted from (by root),
1831*4882a593Smuzhiyun 	 * can be used to update the stats just before reading them.
1832*4882a593Smuzhiyun 	 *
1833*4882a593Smuzhiyun 	 * Oh, and since global_zone_page_state() etc. are so careful to hide
1834*4882a593Smuzhiyun 	 * transiently negative values, report an error here if any of
1835*4882a593Smuzhiyun 	 * the stats is negative, so we know to go looking for imbalance.
1836*4882a593Smuzhiyun 	 */
1837*4882a593Smuzhiyun 	err = schedule_on_each_cpu(refresh_vm_stats);
1838*4882a593Smuzhiyun 	if (err)
1839*4882a593Smuzhiyun 		return err;
1840*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
1841*4882a593Smuzhiyun 		val = atomic_long_read(&vm_zone_stat[i]);
1842*4882a593Smuzhiyun 		if (val < 0) {
1843*4882a593Smuzhiyun 			pr_warn("%s: %s %ld\n",
1844*4882a593Smuzhiyun 				__func__, zone_stat_name(i), val);
1845*4882a593Smuzhiyun 			err = -EINVAL;
1846*4882a593Smuzhiyun 		}
1847*4882a593Smuzhiyun 	}
1848*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1849*4882a593Smuzhiyun 	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++) {
1850*4882a593Smuzhiyun 		val = atomic_long_read(&vm_numa_stat[i]);
1851*4882a593Smuzhiyun 		if (val < 0) {
1852*4882a593Smuzhiyun 			pr_warn("%s: %s %ld\n",
1853*4882a593Smuzhiyun 				__func__, numa_stat_name(i), val);
1854*4882a593Smuzhiyun 			err = -EINVAL;
1855*4882a593Smuzhiyun 		}
1856*4882a593Smuzhiyun 	}
1857*4882a593Smuzhiyun #endif
1858*4882a593Smuzhiyun 	if (err)
1859*4882a593Smuzhiyun 		return err;
1860*4882a593Smuzhiyun 	if (write)
1861*4882a593Smuzhiyun 		*ppos += *lenp;
1862*4882a593Smuzhiyun 	else
1863*4882a593Smuzhiyun 		*lenp = 0;
1864*4882a593Smuzhiyun 	return 0;
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun #endif /* CONFIG_PROC_FS */
1867*4882a593Smuzhiyun 
vmstat_update(struct work_struct * w)1868*4882a593Smuzhiyun static void vmstat_update(struct work_struct *w)
1869*4882a593Smuzhiyun {
1870*4882a593Smuzhiyun 	if (refresh_cpu_vm_stats(true)) {
1871*4882a593Smuzhiyun 		/*
1872*4882a593Smuzhiyun 		 * Counters were updated so we expect more updates
1873*4882a593Smuzhiyun 		 * to occur in the future. Keep on running the
1874*4882a593Smuzhiyun 		 * update worker thread.
1875*4882a593Smuzhiyun 		 */
1876*4882a593Smuzhiyun 		queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
1877*4882a593Smuzhiyun 				this_cpu_ptr(&vmstat_work),
1878*4882a593Smuzhiyun 				round_jiffies_relative(sysctl_stat_interval));
1879*4882a593Smuzhiyun 	}
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun /*
1883*4882a593Smuzhiyun  * Switch off vmstat processing and then fold all the remaining differentials
1884*4882a593Smuzhiyun  * until the diffs stay at zero. The function is used by NOHZ and can only be
1885*4882a593Smuzhiyun  * invoked when tick processing is not active.
1886*4882a593Smuzhiyun  */
1887*4882a593Smuzhiyun /*
1888*4882a593Smuzhiyun  * Check if the diffs for a certain cpu indicate that
1889*4882a593Smuzhiyun  * an update is needed.
1890*4882a593Smuzhiyun  */
need_update(int cpu)1891*4882a593Smuzhiyun static bool need_update(int cpu)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun 	struct zone *zone;
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	for_each_populated_zone(zone) {
1896*4882a593Smuzhiyun 		struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu);
1897*4882a593Smuzhiyun 
1898*4882a593Smuzhiyun 		BUILD_BUG_ON(sizeof(p->vm_stat_diff[0]) != 1);
1899*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1900*4882a593Smuzhiyun 		BUILD_BUG_ON(sizeof(p->vm_numa_stat_diff[0]) != 2);
1901*4882a593Smuzhiyun #endif
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 		/*
1904*4882a593Smuzhiyun 		 * The fast way of checking if there are any vmstat diffs.
1905*4882a593Smuzhiyun 		 */
1906*4882a593Smuzhiyun 		if (memchr_inv(p->vm_stat_diff, 0, NR_VM_ZONE_STAT_ITEMS *
1907*4882a593Smuzhiyun 			       sizeof(p->vm_stat_diff[0])))
1908*4882a593Smuzhiyun 			return true;
1909*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1910*4882a593Smuzhiyun 		if (memchr_inv(p->vm_numa_stat_diff, 0, NR_VM_NUMA_STAT_ITEMS *
1911*4882a593Smuzhiyun 			       sizeof(p->vm_numa_stat_diff[0])))
1912*4882a593Smuzhiyun 			return true;
1913*4882a593Smuzhiyun #endif
1914*4882a593Smuzhiyun 	}
1915*4882a593Smuzhiyun 	return false;
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun /*
1919*4882a593Smuzhiyun  * Switch off vmstat processing and then fold all the remaining differentials
1920*4882a593Smuzhiyun  * until the diffs stay at zero. The function is used by NOHZ and can only be
1921*4882a593Smuzhiyun  * invoked when tick processing is not active.
1922*4882a593Smuzhiyun  */
quiet_vmstat(void)1923*4882a593Smuzhiyun void quiet_vmstat(void)
1924*4882a593Smuzhiyun {
1925*4882a593Smuzhiyun 	if (system_state != SYSTEM_RUNNING)
1926*4882a593Smuzhiyun 		return;
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	if (!delayed_work_pending(this_cpu_ptr(&vmstat_work)))
1929*4882a593Smuzhiyun 		return;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	if (!need_update(smp_processor_id()))
1932*4882a593Smuzhiyun 		return;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	/*
1935*4882a593Smuzhiyun 	 * Just refresh counters and do not care about the pending delayed
1936*4882a593Smuzhiyun 	 * vmstat_update. It doesn't fire that often to matter and canceling
1937*4882a593Smuzhiyun 	 * it would be too expensive from this path.
1938*4882a593Smuzhiyun 	 * vmstat_shepherd will take care about that for us.
1939*4882a593Smuzhiyun 	 */
1940*4882a593Smuzhiyun 	refresh_cpu_vm_stats(false);
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun /*
1944*4882a593Smuzhiyun  * Shepherd worker thread that checks the
1945*4882a593Smuzhiyun  * differentials of processors that have their worker
1946*4882a593Smuzhiyun  * threads for vm statistics updates disabled because of
1947*4882a593Smuzhiyun  * inactivity.
1948*4882a593Smuzhiyun  */
1949*4882a593Smuzhiyun static void vmstat_shepherd(struct work_struct *w);
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
1952*4882a593Smuzhiyun 
vmstat_shepherd(struct work_struct * w)1953*4882a593Smuzhiyun static void vmstat_shepherd(struct work_struct *w)
1954*4882a593Smuzhiyun {
1955*4882a593Smuzhiyun 	int cpu;
1956*4882a593Smuzhiyun 
1957*4882a593Smuzhiyun 	get_online_cpus();
1958*4882a593Smuzhiyun 	/* Check processors whose vmstat worker threads have been disabled */
1959*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
1960*4882a593Smuzhiyun 		struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 		if (!delayed_work_pending(dw) && need_update(cpu))
1963*4882a593Smuzhiyun 			queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0);
1964*4882a593Smuzhiyun 	}
1965*4882a593Smuzhiyun 	put_online_cpus();
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun 	schedule_delayed_work(&shepherd,
1968*4882a593Smuzhiyun 		round_jiffies_relative(sysctl_stat_interval));
1969*4882a593Smuzhiyun }
1970*4882a593Smuzhiyun 
start_shepherd_timer(void)1971*4882a593Smuzhiyun static void __init start_shepherd_timer(void)
1972*4882a593Smuzhiyun {
1973*4882a593Smuzhiyun 	int cpu;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
1976*4882a593Smuzhiyun 		INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
1977*4882a593Smuzhiyun 			vmstat_update);
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	schedule_delayed_work(&shepherd,
1980*4882a593Smuzhiyun 		round_jiffies_relative(sysctl_stat_interval));
1981*4882a593Smuzhiyun }
1982*4882a593Smuzhiyun 
init_cpu_node_state(void)1983*4882a593Smuzhiyun static void __init init_cpu_node_state(void)
1984*4882a593Smuzhiyun {
1985*4882a593Smuzhiyun 	int node;
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 	for_each_online_node(node) {
1988*4882a593Smuzhiyun 		if (cpumask_weight(cpumask_of_node(node)) > 0)
1989*4882a593Smuzhiyun 			node_set_state(node, N_CPU);
1990*4882a593Smuzhiyun 	}
1991*4882a593Smuzhiyun }
1992*4882a593Smuzhiyun 
vmstat_cpu_online(unsigned int cpu)1993*4882a593Smuzhiyun static int vmstat_cpu_online(unsigned int cpu)
1994*4882a593Smuzhiyun {
1995*4882a593Smuzhiyun 	refresh_zone_stat_thresholds();
1996*4882a593Smuzhiyun 	node_set_state(cpu_to_node(cpu), N_CPU);
1997*4882a593Smuzhiyun 	return 0;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun 
vmstat_cpu_down_prep(unsigned int cpu)2000*4882a593Smuzhiyun static int vmstat_cpu_down_prep(unsigned int cpu)
2001*4882a593Smuzhiyun {
2002*4882a593Smuzhiyun 	cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu));
2003*4882a593Smuzhiyun 	return 0;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun 
vmstat_cpu_dead(unsigned int cpu)2006*4882a593Smuzhiyun static int vmstat_cpu_dead(unsigned int cpu)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun 	const struct cpumask *node_cpus;
2009*4882a593Smuzhiyun 	int node;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	node = cpu_to_node(cpu);
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	refresh_zone_stat_thresholds();
2014*4882a593Smuzhiyun 	node_cpus = cpumask_of_node(node);
2015*4882a593Smuzhiyun 	if (cpumask_weight(node_cpus) > 0)
2016*4882a593Smuzhiyun 		return 0;
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	node_clear_state(node, N_CPU);
2019*4882a593Smuzhiyun 	return 0;
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun #endif
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun struct workqueue_struct *mm_percpu_wq;
2025*4882a593Smuzhiyun 
init_mm_internals(void)2026*4882a593Smuzhiyun void __init init_mm_internals(void)
2027*4882a593Smuzhiyun {
2028*4882a593Smuzhiyun 	int ret __maybe_unused;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	mm_percpu_wq = alloc_workqueue("mm_percpu_wq", WQ_MEM_RECLAIM, 0);
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun #ifdef CONFIG_SMP
2033*4882a593Smuzhiyun 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead",
2034*4882a593Smuzhiyun 					NULL, vmstat_cpu_dead);
2035*4882a593Smuzhiyun 	if (ret < 0)
2036*4882a593Smuzhiyun 		pr_err("vmstat: failed to register 'dead' hotplug state\n");
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online",
2039*4882a593Smuzhiyun 					vmstat_cpu_online,
2040*4882a593Smuzhiyun 					vmstat_cpu_down_prep);
2041*4882a593Smuzhiyun 	if (ret < 0)
2042*4882a593Smuzhiyun 		pr_err("vmstat: failed to register 'online' hotplug state\n");
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	get_online_cpus();
2045*4882a593Smuzhiyun 	init_cpu_node_state();
2046*4882a593Smuzhiyun 	put_online_cpus();
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	start_shepherd_timer();
2049*4882a593Smuzhiyun #endif
2050*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
2051*4882a593Smuzhiyun 	proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
2052*4882a593Smuzhiyun 	proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
2053*4882a593Smuzhiyun 	proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
2054*4882a593Smuzhiyun 	proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
2055*4882a593Smuzhiyun #endif
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun /*
2061*4882a593Smuzhiyun  * Return an index indicating how much of the available free memory is
2062*4882a593Smuzhiyun  * unusable for an allocation of the requested size.
2063*4882a593Smuzhiyun  */
unusable_free_index(unsigned int order,struct contig_page_info * info)2064*4882a593Smuzhiyun static int unusable_free_index(unsigned int order,
2065*4882a593Smuzhiyun 				struct contig_page_info *info)
2066*4882a593Smuzhiyun {
2067*4882a593Smuzhiyun 	/* No free memory is interpreted as all free memory is unusable */
2068*4882a593Smuzhiyun 	if (info->free_pages == 0)
2069*4882a593Smuzhiyun 		return 1000;
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 	/*
2072*4882a593Smuzhiyun 	 * Index should be a value between 0 and 1. Return a value to 3
2073*4882a593Smuzhiyun 	 * decimal places.
2074*4882a593Smuzhiyun 	 *
2075*4882a593Smuzhiyun 	 * 0 => no fragmentation
2076*4882a593Smuzhiyun 	 * 1 => high fragmentation
2077*4882a593Smuzhiyun 	 */
2078*4882a593Smuzhiyun 	return div_u64((info->free_pages - (info->free_blocks_suitable << order)) * 1000ULL, info->free_pages);
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun }
2081*4882a593Smuzhiyun 
unusable_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)2082*4882a593Smuzhiyun static void unusable_show_print(struct seq_file *m,
2083*4882a593Smuzhiyun 					pg_data_t *pgdat, struct zone *zone)
2084*4882a593Smuzhiyun {
2085*4882a593Smuzhiyun 	unsigned int order;
2086*4882a593Smuzhiyun 	int index;
2087*4882a593Smuzhiyun 	struct contig_page_info info;
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	seq_printf(m, "Node %d, zone %8s ",
2090*4882a593Smuzhiyun 				pgdat->node_id,
2091*4882a593Smuzhiyun 				zone->name);
2092*4882a593Smuzhiyun 	for (order = 0; order < MAX_ORDER; ++order) {
2093*4882a593Smuzhiyun 		fill_contig_page_info(zone, order, &info);
2094*4882a593Smuzhiyun 		index = unusable_free_index(order, &info);
2095*4882a593Smuzhiyun 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2096*4882a593Smuzhiyun 	}
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	seq_putc(m, '\n');
2099*4882a593Smuzhiyun }
2100*4882a593Smuzhiyun 
2101*4882a593Smuzhiyun /*
2102*4882a593Smuzhiyun  * Display unusable free space index
2103*4882a593Smuzhiyun  *
2104*4882a593Smuzhiyun  * The unusable free space index measures how much of the available free
2105*4882a593Smuzhiyun  * memory cannot be used to satisfy an allocation of a given size and is a
2106*4882a593Smuzhiyun  * value between 0 and 1. The higher the value, the more of free memory is
2107*4882a593Smuzhiyun  * unusable and by implication, the worse the external fragmentation is. This
2108*4882a593Smuzhiyun  * can be expressed as a percentage by multiplying by 100.
2109*4882a593Smuzhiyun  */
unusable_show(struct seq_file * m,void * arg)2110*4882a593Smuzhiyun static int unusable_show(struct seq_file *m, void *arg)
2111*4882a593Smuzhiyun {
2112*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	/* check memoryless node */
2115*4882a593Smuzhiyun 	if (!node_state(pgdat->node_id, N_MEMORY))
2116*4882a593Smuzhiyun 		return 0;
2117*4882a593Smuzhiyun 
2118*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, true, false, unusable_show_print);
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	return 0;
2121*4882a593Smuzhiyun }
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun static const struct seq_operations unusable_sops = {
2124*4882a593Smuzhiyun 	.start	= frag_start,
2125*4882a593Smuzhiyun 	.next	= frag_next,
2126*4882a593Smuzhiyun 	.stop	= frag_stop,
2127*4882a593Smuzhiyun 	.show	= unusable_show,
2128*4882a593Smuzhiyun };
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun DEFINE_SEQ_ATTRIBUTE(unusable);
2131*4882a593Smuzhiyun 
extfrag_show_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone)2132*4882a593Smuzhiyun static void extfrag_show_print(struct seq_file *m,
2133*4882a593Smuzhiyun 					pg_data_t *pgdat, struct zone *zone)
2134*4882a593Smuzhiyun {
2135*4882a593Smuzhiyun 	unsigned int order;
2136*4882a593Smuzhiyun 	int index;
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 	/* Alloc on stack as interrupts are disabled for zone walk */
2139*4882a593Smuzhiyun 	struct contig_page_info info;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 	seq_printf(m, "Node %d, zone %8s ",
2142*4882a593Smuzhiyun 				pgdat->node_id,
2143*4882a593Smuzhiyun 				zone->name);
2144*4882a593Smuzhiyun 	for (order = 0; order < MAX_ORDER; ++order) {
2145*4882a593Smuzhiyun 		fill_contig_page_info(zone, order, &info);
2146*4882a593Smuzhiyun 		index = __fragmentation_index(order, &info);
2147*4882a593Smuzhiyun 		seq_printf(m, "%d.%03d ", index / 1000, index % 1000);
2148*4882a593Smuzhiyun 	}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	seq_putc(m, '\n');
2151*4882a593Smuzhiyun }
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun /*
2154*4882a593Smuzhiyun  * Display fragmentation index for orders that allocations would fail for
2155*4882a593Smuzhiyun  */
extfrag_show(struct seq_file * m,void * arg)2156*4882a593Smuzhiyun static int extfrag_show(struct seq_file *m, void *arg)
2157*4882a593Smuzhiyun {
2158*4882a593Smuzhiyun 	pg_data_t *pgdat = (pg_data_t *)arg;
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	walk_zones_in_node(m, pgdat, true, false, extfrag_show_print);
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 	return 0;
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun 
2165*4882a593Smuzhiyun static const struct seq_operations extfrag_sops = {
2166*4882a593Smuzhiyun 	.start	= frag_start,
2167*4882a593Smuzhiyun 	.next	= frag_next,
2168*4882a593Smuzhiyun 	.stop	= frag_stop,
2169*4882a593Smuzhiyun 	.show	= extfrag_show,
2170*4882a593Smuzhiyun };
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun DEFINE_SEQ_ATTRIBUTE(extfrag);
2173*4882a593Smuzhiyun 
extfrag_debug_init(void)2174*4882a593Smuzhiyun static int __init extfrag_debug_init(void)
2175*4882a593Smuzhiyun {
2176*4882a593Smuzhiyun 	struct dentry *extfrag_debug_root;
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	debugfs_create_file("unusable_index", 0444, extfrag_debug_root, NULL,
2181*4882a593Smuzhiyun 			    &unusable_fops);
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	debugfs_create_file("extfrag_index", 0444, extfrag_debug_root, NULL,
2184*4882a593Smuzhiyun 			    &extfrag_fops);
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	return 0;
2187*4882a593Smuzhiyun }
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun module_init(extfrag_debug_init);
2190*4882a593Smuzhiyun #endif
2191