xref: /OK3568_Linux_fs/kernel/include/linux/vmstat.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_VMSTAT_H
3*4882a593Smuzhiyun #define _LINUX_VMSTAT_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <linux/percpu.h>
7*4882a593Smuzhiyun #include <linux/mmzone.h>
8*4882a593Smuzhiyun #include <linux/vm_event_item.h>
9*4882a593Smuzhiyun #include <linux/atomic.h>
10*4882a593Smuzhiyun #include <linux/static_key.h>
11*4882a593Smuzhiyun #include <linux/mmdebug.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun extern int sysctl_stat_interval;
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #ifdef CONFIG_NUMA
16*4882a593Smuzhiyun #define ENABLE_NUMA_STAT   1
17*4882a593Smuzhiyun #define DISABLE_NUMA_STAT   0
18*4882a593Smuzhiyun extern int sysctl_vm_numa_stat;
19*4882a593Smuzhiyun DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
20*4882a593Smuzhiyun int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
21*4882a593Smuzhiyun 		void *buffer, size_t *length, loff_t *ppos);
22*4882a593Smuzhiyun #endif
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun struct reclaim_stat {
25*4882a593Smuzhiyun 	unsigned nr_dirty;
26*4882a593Smuzhiyun 	unsigned nr_unqueued_dirty;
27*4882a593Smuzhiyun 	unsigned nr_congested;
28*4882a593Smuzhiyun 	unsigned nr_writeback;
29*4882a593Smuzhiyun 	unsigned nr_immediate;
30*4882a593Smuzhiyun 	unsigned nr_pageout;
31*4882a593Smuzhiyun 	unsigned nr_activate[ANON_AND_FILE];
32*4882a593Smuzhiyun 	unsigned nr_ref_keep;
33*4882a593Smuzhiyun 	unsigned nr_unmap_fail;
34*4882a593Smuzhiyun 	unsigned nr_lazyfree_fail;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun enum writeback_stat_item {
38*4882a593Smuzhiyun 	NR_DIRTY_THRESHOLD,
39*4882a593Smuzhiyun 	NR_DIRTY_BG_THRESHOLD,
40*4882a593Smuzhiyun 	NR_VM_WRITEBACK_STAT_ITEMS,
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #ifdef CONFIG_VM_EVENT_COUNTERS
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Light weight per cpu counter implementation.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * Counters should only be incremented and no critical kernel component
48*4882a593Smuzhiyun  * should rely on the counter values.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * Counters are handled completely inline. On many platforms the code
51*4882a593Smuzhiyun  * generated will simply be the increment of a global address.
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun struct vm_event_state {
55*4882a593Smuzhiyun 	unsigned long event[NR_VM_EVENT_ITEMS];
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * vm counters are allowed to be racy. Use raw_cpu_ops to avoid the
62*4882a593Smuzhiyun  * local_irq_disable overhead.
63*4882a593Smuzhiyun  */
__count_vm_event(enum vm_event_item item)64*4882a593Smuzhiyun static inline void __count_vm_event(enum vm_event_item item)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	raw_cpu_inc(vm_event_states.event[item]);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
count_vm_event(enum vm_event_item item)69*4882a593Smuzhiyun static inline void count_vm_event(enum vm_event_item item)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	this_cpu_inc(vm_event_states.event[item]);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
__count_vm_events(enum vm_event_item item,long delta)74*4882a593Smuzhiyun static inline void __count_vm_events(enum vm_event_item item, long delta)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	raw_cpu_add(vm_event_states.event[item], delta);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
count_vm_events(enum vm_event_item item,long delta)79*4882a593Smuzhiyun static inline void count_vm_events(enum vm_event_item item, long delta)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	this_cpu_add(vm_event_states.event[item], delta);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun extern void all_vm_events(unsigned long *);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun extern void vm_events_fold_cpu(int cpu);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #else
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Disable counters */
count_vm_event(enum vm_event_item item)91*4882a593Smuzhiyun static inline void count_vm_event(enum vm_event_item item)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun }
count_vm_events(enum vm_event_item item,long delta)94*4882a593Smuzhiyun static inline void count_vm_events(enum vm_event_item item, long delta)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun }
__count_vm_event(enum vm_event_item item)97*4882a593Smuzhiyun static inline void __count_vm_event(enum vm_event_item item)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun }
__count_vm_events(enum vm_event_item item,long delta)100*4882a593Smuzhiyun static inline void __count_vm_events(enum vm_event_item item, long delta)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun }
all_vm_events(unsigned long * ret)103*4882a593Smuzhiyun static inline void all_vm_events(unsigned long *ret)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun }
vm_events_fold_cpu(int cpu)106*4882a593Smuzhiyun static inline void vm_events_fold_cpu(int cpu)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #endif /* CONFIG_VM_EVENT_COUNTERS */
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
113*4882a593Smuzhiyun #define count_vm_numa_event(x)     count_vm_event(x)
114*4882a593Smuzhiyun #define count_vm_numa_events(x, y) count_vm_events(x, y)
115*4882a593Smuzhiyun #else
116*4882a593Smuzhiyun #define count_vm_numa_event(x) do {} while (0)
117*4882a593Smuzhiyun #define count_vm_numa_events(x, y) do { (void)(y); } while (0)
118*4882a593Smuzhiyun #endif /* CONFIG_NUMA_BALANCING */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_TLBFLUSH
121*4882a593Smuzhiyun #define count_vm_tlb_event(x)	   count_vm_event(x)
122*4882a593Smuzhiyun #define count_vm_tlb_events(x, y)  count_vm_events(x, y)
123*4882a593Smuzhiyun #else
124*4882a593Smuzhiyun #define count_vm_tlb_event(x)     do {} while (0)
125*4882a593Smuzhiyun #define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
126*4882a593Smuzhiyun #endif
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM_VMACACHE
129*4882a593Smuzhiyun #define count_vm_vmacache_event(x) count_vm_event(x)
130*4882a593Smuzhiyun #else
131*4882a593Smuzhiyun #define count_vm_vmacache_event(x) do {} while (0)
132*4882a593Smuzhiyun #endif
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define __count_zid_vm_events(item, zid, delta) \
135*4882a593Smuzhiyun 	__count_vm_events(item##_NORMAL - ZONE_NORMAL + zid, delta)
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun  * Zone and node-based page accounting with per cpu differentials.
139*4882a593Smuzhiyun  */
140*4882a593Smuzhiyun extern atomic_long_t vm_zone_stat[NR_VM_ZONE_STAT_ITEMS];
141*4882a593Smuzhiyun extern atomic_long_t vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
142*4882a593Smuzhiyun extern atomic_long_t vm_node_stat[NR_VM_NODE_STAT_ITEMS];
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #ifdef CONFIG_NUMA
zone_numa_state_add(long x,struct zone * zone,enum numa_stat_item item)145*4882a593Smuzhiyun static inline void zone_numa_state_add(long x, struct zone *zone,
146*4882a593Smuzhiyun 				 enum numa_stat_item item)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	atomic_long_add(x, &zone->vm_numa_stat[item]);
149*4882a593Smuzhiyun 	atomic_long_add(x, &vm_numa_stat[item]);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
global_numa_state(enum numa_stat_item item)152*4882a593Smuzhiyun static inline unsigned long global_numa_state(enum numa_stat_item item)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	long x = atomic_long_read(&vm_numa_stat[item]);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return x;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
zone_numa_state_snapshot(struct zone * zone,enum numa_stat_item item)159*4882a593Smuzhiyun static inline unsigned long zone_numa_state_snapshot(struct zone *zone,
160*4882a593Smuzhiyun 					enum numa_stat_item item)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	long x = atomic_long_read(&zone->vm_numa_stat[item]);
163*4882a593Smuzhiyun 	int cpu;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	for_each_online_cpu(cpu)
166*4882a593Smuzhiyun 		x += per_cpu_ptr(zone->pageset, cpu)->vm_numa_stat_diff[item];
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	return x;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
171*4882a593Smuzhiyun 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)172*4882a593Smuzhiyun static inline void zone_page_state_add(long x, struct zone *zone,
173*4882a593Smuzhiyun 				 enum zone_stat_item item)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	atomic_long_add(x, &zone->vm_stat[item]);
176*4882a593Smuzhiyun 	atomic_long_add(x, &vm_zone_stat[item]);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
node_page_state_add(long x,struct pglist_data * pgdat,enum node_stat_item item)179*4882a593Smuzhiyun static inline void node_page_state_add(long x, struct pglist_data *pgdat,
180*4882a593Smuzhiyun 				 enum node_stat_item item)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	atomic_long_add(x, &pgdat->vm_stat[item]);
183*4882a593Smuzhiyun 	atomic_long_add(x, &vm_node_stat[item]);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
global_zone_page_state(enum zone_stat_item item)186*4882a593Smuzhiyun static inline unsigned long global_zone_page_state(enum zone_stat_item item)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	long x = atomic_long_read(&vm_zone_stat[item]);
189*4882a593Smuzhiyun #ifdef CONFIG_SMP
190*4882a593Smuzhiyun 	if (x < 0)
191*4882a593Smuzhiyun 		x = 0;
192*4882a593Smuzhiyun #endif
193*4882a593Smuzhiyun 	return x;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun static inline
global_node_page_state_pages(enum node_stat_item item)197*4882a593Smuzhiyun unsigned long global_node_page_state_pages(enum node_stat_item item)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	long x = atomic_long_read(&vm_node_stat[item]);
200*4882a593Smuzhiyun #ifdef CONFIG_SMP
201*4882a593Smuzhiyun 	if (x < 0)
202*4882a593Smuzhiyun 		x = 0;
203*4882a593Smuzhiyun #endif
204*4882a593Smuzhiyun 	return x;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
global_node_page_state(enum node_stat_item item)207*4882a593Smuzhiyun static inline unsigned long global_node_page_state(enum node_stat_item item)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	return global_node_page_state_pages(item);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
zone_page_state(struct zone * zone,enum zone_stat_item item)214*4882a593Smuzhiyun static inline unsigned long zone_page_state(struct zone *zone,
215*4882a593Smuzhiyun 					enum zone_stat_item item)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	long x = atomic_long_read(&zone->vm_stat[item]);
218*4882a593Smuzhiyun #ifdef CONFIG_SMP
219*4882a593Smuzhiyun 	if (x < 0)
220*4882a593Smuzhiyun 		x = 0;
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun 	return x;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun  * More accurate version that also considers the currently pending
227*4882a593Smuzhiyun  * deltas. For that we need to loop over all cpus to find the current
228*4882a593Smuzhiyun  * deltas. There is no synchronization so the result cannot be
229*4882a593Smuzhiyun  * exactly accurate either.
230*4882a593Smuzhiyun  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)231*4882a593Smuzhiyun static inline unsigned long zone_page_state_snapshot(struct zone *zone,
232*4882a593Smuzhiyun 					enum zone_stat_item item)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun 	long x = atomic_long_read(&zone->vm_stat[item]);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun #ifdef CONFIG_SMP
237*4882a593Smuzhiyun 	int cpu;
238*4882a593Smuzhiyun 	for_each_online_cpu(cpu)
239*4882a593Smuzhiyun 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (x < 0)
242*4882a593Smuzhiyun 		x = 0;
243*4882a593Smuzhiyun #endif
244*4882a593Smuzhiyun 	return x;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #ifdef CONFIG_NUMA
248*4882a593Smuzhiyun extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item);
249*4882a593Smuzhiyun extern unsigned long sum_zone_node_page_state(int node,
250*4882a593Smuzhiyun 					      enum zone_stat_item item);
251*4882a593Smuzhiyun extern unsigned long sum_zone_numa_state(int node, enum numa_stat_item item);
252*4882a593Smuzhiyun extern unsigned long node_page_state(struct pglist_data *pgdat,
253*4882a593Smuzhiyun 						enum node_stat_item item);
254*4882a593Smuzhiyun extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
255*4882a593Smuzhiyun 					   enum node_stat_item item);
256*4882a593Smuzhiyun #else
257*4882a593Smuzhiyun #define sum_zone_node_page_state(node, item) global_zone_page_state(item)
258*4882a593Smuzhiyun #define node_page_state(node, item) global_node_page_state(item)
259*4882a593Smuzhiyun #define node_page_state_pages(node, item) global_node_page_state_pages(item)
260*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #ifdef CONFIG_SMP
263*4882a593Smuzhiyun void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
264*4882a593Smuzhiyun void __inc_zone_page_state(struct page *, enum zone_stat_item);
265*4882a593Smuzhiyun void __dec_zone_page_state(struct page *, enum zone_stat_item);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun void __mod_node_page_state(struct pglist_data *, enum node_stat_item item, long);
268*4882a593Smuzhiyun void __inc_node_page_state(struct page *, enum node_stat_item);
269*4882a593Smuzhiyun void __dec_node_page_state(struct page *, enum node_stat_item);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
272*4882a593Smuzhiyun void inc_zone_page_state(struct page *, enum zone_stat_item);
273*4882a593Smuzhiyun void dec_zone_page_state(struct page *, enum zone_stat_item);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun void mod_node_page_state(struct pglist_data *, enum node_stat_item, long);
276*4882a593Smuzhiyun void inc_node_page_state(struct page *, enum node_stat_item);
277*4882a593Smuzhiyun void dec_node_page_state(struct page *, enum node_stat_item);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun extern void inc_node_state(struct pglist_data *, enum node_stat_item);
280*4882a593Smuzhiyun extern void __inc_zone_state(struct zone *, enum zone_stat_item);
281*4882a593Smuzhiyun extern void __inc_node_state(struct pglist_data *, enum node_stat_item);
282*4882a593Smuzhiyun extern void dec_zone_state(struct zone *, enum zone_stat_item);
283*4882a593Smuzhiyun extern void __dec_zone_state(struct zone *, enum zone_stat_item);
284*4882a593Smuzhiyun extern void __dec_node_state(struct pglist_data *, enum node_stat_item);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun void quiet_vmstat(void);
287*4882a593Smuzhiyun void cpu_vm_stats_fold(int cpu);
288*4882a593Smuzhiyun void refresh_zone_stat_thresholds(void);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun struct ctl_table;
291*4882a593Smuzhiyun int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
292*4882a593Smuzhiyun 		loff_t *ppos);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun int calculate_pressure_threshold(struct zone *zone);
297*4882a593Smuzhiyun int calculate_normal_threshold(struct zone *zone);
298*4882a593Smuzhiyun void set_pgdat_percpu_threshold(pg_data_t *pgdat,
299*4882a593Smuzhiyun 				int (*calculate_pressure)(struct zone *));
300*4882a593Smuzhiyun #else /* CONFIG_SMP */
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun  * We do not maintain differentials in a single processor configuration.
304*4882a593Smuzhiyun  * The functions directly modify the zone and global counters.
305*4882a593Smuzhiyun  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,long delta)306*4882a593Smuzhiyun static inline void __mod_zone_page_state(struct zone *zone,
307*4882a593Smuzhiyun 			enum zone_stat_item item, long delta)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	zone_page_state_add(delta, zone, item);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
__mod_node_page_state(struct pglist_data * pgdat,enum node_stat_item item,int delta)312*4882a593Smuzhiyun static inline void __mod_node_page_state(struct pglist_data *pgdat,
313*4882a593Smuzhiyun 			enum node_stat_item item, int delta)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	if (vmstat_item_in_bytes(item)) {
316*4882a593Smuzhiyun 		VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1));
317*4882a593Smuzhiyun 		delta >>= PAGE_SHIFT;
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	node_page_state_add(delta, pgdat, item);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)323*4882a593Smuzhiyun static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	atomic_long_inc(&zone->vm_stat[item]);
326*4882a593Smuzhiyun 	atomic_long_inc(&vm_zone_stat[item]);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun 
__inc_node_state(struct pglist_data * pgdat,enum node_stat_item item)329*4882a593Smuzhiyun static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	atomic_long_inc(&pgdat->vm_stat[item]);
332*4882a593Smuzhiyun 	atomic_long_inc(&vm_node_stat[item]);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)335*4882a593Smuzhiyun static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun 	atomic_long_dec(&zone->vm_stat[item]);
338*4882a593Smuzhiyun 	atomic_long_dec(&vm_zone_stat[item]);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
__dec_node_state(struct pglist_data * pgdat,enum node_stat_item item)341*4882a593Smuzhiyun static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	atomic_long_dec(&pgdat->vm_stat[item]);
344*4882a593Smuzhiyun 	atomic_long_dec(&vm_node_stat[item]);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)347*4882a593Smuzhiyun static inline void __inc_zone_page_state(struct page *page,
348*4882a593Smuzhiyun 			enum zone_stat_item item)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	__inc_zone_state(page_zone(page), item);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
__inc_node_page_state(struct page * page,enum node_stat_item item)353*4882a593Smuzhiyun static inline void __inc_node_page_state(struct page *page,
354*4882a593Smuzhiyun 			enum node_stat_item item)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	__inc_node_state(page_pgdat(page), item);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)360*4882a593Smuzhiyun static inline void __dec_zone_page_state(struct page *page,
361*4882a593Smuzhiyun 			enum zone_stat_item item)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	__dec_zone_state(page_zone(page), item);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
__dec_node_page_state(struct page * page,enum node_stat_item item)366*4882a593Smuzhiyun static inline void __dec_node_page_state(struct page *page,
367*4882a593Smuzhiyun 			enum node_stat_item item)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	__dec_node_state(page_pgdat(page), item);
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun  * We only use atomic operations to update counters. So there is no need to
375*4882a593Smuzhiyun  * disable interrupts.
376*4882a593Smuzhiyun  */
377*4882a593Smuzhiyun #define inc_zone_page_state __inc_zone_page_state
378*4882a593Smuzhiyun #define dec_zone_page_state __dec_zone_page_state
379*4882a593Smuzhiyun #define mod_zone_page_state __mod_zone_page_state
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun #define inc_node_page_state __inc_node_page_state
382*4882a593Smuzhiyun #define dec_node_page_state __dec_node_page_state
383*4882a593Smuzhiyun #define mod_node_page_state __mod_node_page_state
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun #define inc_zone_state __inc_zone_state
386*4882a593Smuzhiyun #define inc_node_state __inc_node_state
387*4882a593Smuzhiyun #define dec_zone_state __dec_zone_state
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun #define set_pgdat_percpu_threshold(pgdat, callback) { }
390*4882a593Smuzhiyun 
refresh_zone_stat_thresholds(void)391*4882a593Smuzhiyun static inline void refresh_zone_stat_thresholds(void) { }
cpu_vm_stats_fold(int cpu)392*4882a593Smuzhiyun static inline void cpu_vm_stats_fold(int cpu) { }
quiet_vmstat(void)393*4882a593Smuzhiyun static inline void quiet_vmstat(void) { }
394*4882a593Smuzhiyun 
drain_zonestat(struct zone * zone,struct per_cpu_pageset * pset)395*4882a593Smuzhiyun static inline void drain_zonestat(struct zone *zone,
396*4882a593Smuzhiyun 			struct per_cpu_pageset *pset) { }
397*4882a593Smuzhiyun #endif		/* CONFIG_SMP */
398*4882a593Smuzhiyun 
__mod_zone_freepage_state(struct zone * zone,int nr_pages,int migratetype)399*4882a593Smuzhiyun static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
400*4882a593Smuzhiyun 					     int migratetype)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
403*4882a593Smuzhiyun 	if (is_migrate_cma(migratetype))
404*4882a593Smuzhiyun 		__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun extern const char * const vmstat_text[];
408*4882a593Smuzhiyun 
zone_stat_name(enum zone_stat_item item)409*4882a593Smuzhiyun static inline const char *zone_stat_name(enum zone_stat_item item)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	return vmstat_text[item];
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #ifdef CONFIG_NUMA
numa_stat_name(enum numa_stat_item item)415*4882a593Smuzhiyun static inline const char *numa_stat_name(enum numa_stat_item item)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
418*4882a593Smuzhiyun 			   item];
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
421*4882a593Smuzhiyun 
node_stat_name(enum node_stat_item item)422*4882a593Smuzhiyun static inline const char *node_stat_name(enum node_stat_item item)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
425*4882a593Smuzhiyun 			   NR_VM_NUMA_STAT_ITEMS +
426*4882a593Smuzhiyun 			   item];
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
lru_list_name(enum lru_list lru)429*4882a593Smuzhiyun static inline const char *lru_list_name(enum lru_list lru)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
writeback_stat_name(enum writeback_stat_item item)434*4882a593Smuzhiyun static inline const char *writeback_stat_name(enum writeback_stat_item item)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
437*4882a593Smuzhiyun 			   NR_VM_NUMA_STAT_ITEMS +
438*4882a593Smuzhiyun 			   NR_VM_NODE_STAT_ITEMS +
439*4882a593Smuzhiyun 			   item];
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun #if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
vm_event_name(enum vm_event_item item)443*4882a593Smuzhiyun static inline const char *vm_event_name(enum vm_event_item item)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
446*4882a593Smuzhiyun 			   NR_VM_NUMA_STAT_ITEMS +
447*4882a593Smuzhiyun 			   NR_VM_NODE_STAT_ITEMS +
448*4882a593Smuzhiyun 			   NR_VM_WRITEBACK_STAT_ITEMS +
449*4882a593Smuzhiyun 			   item];
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun #endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun #endif /* _LINUX_VMSTAT_H */
454