xref: /OK3568_Linux_fs/kernel/include/linux/compaction.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_COMPACTION_H
3*4882a593Smuzhiyun #define _LINUX_COMPACTION_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * Determines how hard direct compaction should try to succeed.
7*4882a593Smuzhiyun  * Lower value means higher priority, analogically to reclaim priority.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun enum compact_priority {
10*4882a593Smuzhiyun 	COMPACT_PRIO_SYNC_FULL,
11*4882a593Smuzhiyun 	MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_FULL,
12*4882a593Smuzhiyun 	COMPACT_PRIO_SYNC_LIGHT,
13*4882a593Smuzhiyun 	MIN_COMPACT_COSTLY_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
14*4882a593Smuzhiyun 	DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
15*4882a593Smuzhiyun 	COMPACT_PRIO_ASYNC,
16*4882a593Smuzhiyun 	INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Return values for compact_zone() and try_to_compact_pages() */
20*4882a593Smuzhiyun /* When adding new states, please adjust include/trace/events/compaction.h */
21*4882a593Smuzhiyun enum compact_result {
22*4882a593Smuzhiyun 	/* For more detailed tracepoint output - internal to compaction */
23*4882a593Smuzhiyun 	COMPACT_NOT_SUITABLE_ZONE,
24*4882a593Smuzhiyun 	/*
25*4882a593Smuzhiyun 	 * compaction didn't start as it was not possible or direct reclaim
26*4882a593Smuzhiyun 	 * was more suitable
27*4882a593Smuzhiyun 	 */
28*4882a593Smuzhiyun 	COMPACT_SKIPPED,
29*4882a593Smuzhiyun 	/* compaction didn't start as it was deferred due to past failures */
30*4882a593Smuzhiyun 	COMPACT_DEFERRED,
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	/* For more detailed tracepoint output - internal to compaction */
33*4882a593Smuzhiyun 	COMPACT_NO_SUITABLE_PAGE,
34*4882a593Smuzhiyun 	/* compaction should continue to another pageblock */
35*4882a593Smuzhiyun 	COMPACT_CONTINUE,
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/*
38*4882a593Smuzhiyun 	 * The full zone was compacted scanned but wasn't successfull to compact
39*4882a593Smuzhiyun 	 * suitable pages.
40*4882a593Smuzhiyun 	 */
41*4882a593Smuzhiyun 	COMPACT_COMPLETE,
42*4882a593Smuzhiyun 	/*
43*4882a593Smuzhiyun 	 * direct compaction has scanned part of the zone but wasn't successfull
44*4882a593Smuzhiyun 	 * to compact suitable pages.
45*4882a593Smuzhiyun 	 */
46*4882a593Smuzhiyun 	COMPACT_PARTIAL_SKIPPED,
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	/* compaction terminated prematurely due to lock contentions */
49*4882a593Smuzhiyun 	COMPACT_CONTENDED,
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	/*
52*4882a593Smuzhiyun 	 * direct compaction terminated after concluding that the allocation
53*4882a593Smuzhiyun 	 * should now succeed
54*4882a593Smuzhiyun 	 */
55*4882a593Smuzhiyun 	COMPACT_SUCCESS,
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct alloc_context; /* in mm/internal.h */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Number of free order-0 pages that should be available above given watermark
62*4882a593Smuzhiyun  * to make sure compaction has reasonable chance of not running out of free
63*4882a593Smuzhiyun  * pages that it needs to isolate as migration target during its work.
64*4882a593Smuzhiyun  */
compact_gap(unsigned int order)65*4882a593Smuzhiyun static inline unsigned long compact_gap(unsigned int order)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	/*
68*4882a593Smuzhiyun 	 * Although all the isolations for migration are temporary, compaction
69*4882a593Smuzhiyun 	 * free scanner may have up to 1 << order pages on its list and then
70*4882a593Smuzhiyun 	 * try to split an (order - 1) free page. At that point, a gap of
71*4882a593Smuzhiyun 	 * 1 << order might not be enough, so it's safer to require twice that
72*4882a593Smuzhiyun 	 * amount. Note that the number of pages on the list is also
73*4882a593Smuzhiyun 	 * effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
74*4882a593Smuzhiyun 	 * that the migrate scanner can have isolated on migrate list, and free
75*4882a593Smuzhiyun 	 * scanner is only invoked when the number of isolated free pages is
76*4882a593Smuzhiyun 	 * lower than that. But it's not worth to complicate the formula here
77*4882a593Smuzhiyun 	 * as a bigger gap for higher orders than strictly necessary can also
78*4882a593Smuzhiyun 	 * improve chances of compaction success.
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	return 2UL << order;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #ifdef CONFIG_COMPACTION
84*4882a593Smuzhiyun extern int sysctl_compact_memory;
85*4882a593Smuzhiyun extern unsigned int sysctl_compaction_proactiveness;
86*4882a593Smuzhiyun extern int sysctl_compaction_handler(struct ctl_table *table, int write,
87*4882a593Smuzhiyun 			void *buffer, size_t *length, loff_t *ppos);
88*4882a593Smuzhiyun extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table,
89*4882a593Smuzhiyun 		int write, void *buffer, size_t *length, loff_t *ppos);
90*4882a593Smuzhiyun extern int sysctl_extfrag_threshold;
91*4882a593Smuzhiyun extern int sysctl_compact_unevictable_allowed;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
94*4882a593Smuzhiyun extern int fragmentation_index(struct zone *zone, unsigned int order);
95*4882a593Smuzhiyun extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
96*4882a593Smuzhiyun 		unsigned int order, unsigned int alloc_flags,
97*4882a593Smuzhiyun 		const struct alloc_context *ac, enum compact_priority prio,
98*4882a593Smuzhiyun 		struct page **page);
99*4882a593Smuzhiyun extern void reset_isolation_suitable(pg_data_t *pgdat);
100*4882a593Smuzhiyun extern enum compact_result compaction_suitable(struct zone *zone, int order,
101*4882a593Smuzhiyun 		unsigned int alloc_flags, int highest_zoneidx);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun extern void defer_compaction(struct zone *zone, int order);
104*4882a593Smuzhiyun extern bool compaction_deferred(struct zone *zone, int order);
105*4882a593Smuzhiyun extern void compaction_defer_reset(struct zone *zone, int order,
106*4882a593Smuzhiyun 				bool alloc_success);
107*4882a593Smuzhiyun extern bool compaction_restarting(struct zone *zone, int order);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* Compaction has made some progress and retrying makes sense */
compaction_made_progress(enum compact_result result)110*4882a593Smuzhiyun static inline bool compaction_made_progress(enum compact_result result)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	/*
113*4882a593Smuzhiyun 	 * Even though this might sound confusing this in fact tells us
114*4882a593Smuzhiyun 	 * that the compaction successfully isolated and migrated some
115*4882a593Smuzhiyun 	 * pageblocks.
116*4882a593Smuzhiyun 	 */
117*4882a593Smuzhiyun 	if (result == COMPACT_SUCCESS)
118*4882a593Smuzhiyun 		return true;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return false;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /* Compaction has failed and it doesn't make much sense to keep retrying. */
compaction_failed(enum compact_result result)124*4882a593Smuzhiyun static inline bool compaction_failed(enum compact_result result)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	/* All zones were scanned completely and still not result. */
127*4882a593Smuzhiyun 	if (result == COMPACT_COMPLETE)
128*4882a593Smuzhiyun 		return true;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return false;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Compaction needs reclaim to be performed first, so it can continue. */
compaction_needs_reclaim(enum compact_result result)134*4882a593Smuzhiyun static inline bool compaction_needs_reclaim(enum compact_result result)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	/*
137*4882a593Smuzhiyun 	 * Compaction backed off due to watermark checks for order-0
138*4882a593Smuzhiyun 	 * so the regular reclaim has to try harder and reclaim something.
139*4882a593Smuzhiyun 	 */
140*4882a593Smuzhiyun 	if (result == COMPACT_SKIPPED)
141*4882a593Smuzhiyun 		return true;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	return false;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /*
147*4882a593Smuzhiyun  * Compaction has backed off for some reason after doing some work or none
148*4882a593Smuzhiyun  * at all. It might be throttling or lock contention. Retrying might be still
149*4882a593Smuzhiyun  * worthwhile, but with a higher priority if allowed.
150*4882a593Smuzhiyun  */
compaction_withdrawn(enum compact_result result)151*4882a593Smuzhiyun static inline bool compaction_withdrawn(enum compact_result result)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	/*
154*4882a593Smuzhiyun 	 * If compaction is deferred for high-order allocations, it is
155*4882a593Smuzhiyun 	 * because sync compaction recently failed. If this is the case
156*4882a593Smuzhiyun 	 * and the caller requested a THP allocation, we do not want
157*4882a593Smuzhiyun 	 * to heavily disrupt the system, so we fail the allocation
158*4882a593Smuzhiyun 	 * instead of entering direct reclaim.
159*4882a593Smuzhiyun 	 */
160*4882a593Smuzhiyun 	if (result == COMPACT_DEFERRED)
161*4882a593Smuzhiyun 		return true;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/*
164*4882a593Smuzhiyun 	 * If compaction in async mode encounters contention or blocks higher
165*4882a593Smuzhiyun 	 * priority task we back off early rather than cause stalls.
166*4882a593Smuzhiyun 	 */
167*4882a593Smuzhiyun 	if (result == COMPACT_CONTENDED)
168*4882a593Smuzhiyun 		return true;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * Page scanners have met but we haven't scanned full zones so this
172*4882a593Smuzhiyun 	 * is a back off in fact.
173*4882a593Smuzhiyun 	 */
174*4882a593Smuzhiyun 	if (result == COMPACT_PARTIAL_SKIPPED)
175*4882a593Smuzhiyun 		return true;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	return false;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
182*4882a593Smuzhiyun 					int alloc_flags);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun extern int kcompactd_run(int nid);
185*4882a593Smuzhiyun extern void kcompactd_stop(int nid);
186*4882a593Smuzhiyun extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
187*4882a593Smuzhiyun extern unsigned long isolate_and_split_free_page(struct page *page,
188*4882a593Smuzhiyun 				struct list_head *list);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #else
reset_isolation_suitable(pg_data_t * pgdat)191*4882a593Smuzhiyun static inline void reset_isolation_suitable(pg_data_t *pgdat)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
compaction_suitable(struct zone * zone,int order,int alloc_flags,int highest_zoneidx)195*4882a593Smuzhiyun static inline enum compact_result compaction_suitable(struct zone *zone, int order,
196*4882a593Smuzhiyun 					int alloc_flags, int highest_zoneidx)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	return COMPACT_SKIPPED;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
defer_compaction(struct zone * zone,int order)201*4882a593Smuzhiyun static inline void defer_compaction(struct zone *zone, int order)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
compaction_deferred(struct zone * zone,int order)205*4882a593Smuzhiyun static inline bool compaction_deferred(struct zone *zone, int order)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	return true;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
compaction_made_progress(enum compact_result result)210*4882a593Smuzhiyun static inline bool compaction_made_progress(enum compact_result result)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	return false;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
compaction_failed(enum compact_result result)215*4882a593Smuzhiyun static inline bool compaction_failed(enum compact_result result)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	return false;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
compaction_needs_reclaim(enum compact_result result)220*4882a593Smuzhiyun static inline bool compaction_needs_reclaim(enum compact_result result)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	return false;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
compaction_withdrawn(enum compact_result result)225*4882a593Smuzhiyun static inline bool compaction_withdrawn(enum compact_result result)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	return true;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
kcompactd_run(int nid)230*4882a593Smuzhiyun static inline int kcompactd_run(int nid)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	return 0;
233*4882a593Smuzhiyun }
kcompactd_stop(int nid)234*4882a593Smuzhiyun static inline void kcompactd_stop(int nid)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun 
wakeup_kcompactd(pg_data_t * pgdat,int order,int highest_zoneidx)238*4882a593Smuzhiyun static inline void wakeup_kcompactd(pg_data_t *pgdat,
239*4882a593Smuzhiyun 				int order, int highest_zoneidx)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
isolate_and_split_free_page(struct page * page,struct list_head * list)243*4882a593Smuzhiyun static inline unsigned long isolate_and_split_free_page(struct page *page,
244*4882a593Smuzhiyun 				struct list_head *list)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun #endif /* CONFIG_COMPACTION */
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun struct node;
252*4882a593Smuzhiyun #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
253*4882a593Smuzhiyun extern int compaction_register_node(struct node *node);
254*4882a593Smuzhiyun extern void compaction_unregister_node(struct node *node);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun #else
257*4882a593Smuzhiyun 
compaction_register_node(struct node * node)258*4882a593Smuzhiyun static inline int compaction_register_node(struct node *node)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
compaction_unregister_node(struct node * node)263*4882a593Smuzhiyun static inline void compaction_unregister_node(struct node *node)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun #endif /* _LINUX_COMPACTION_H */
269