xref: /OK3568_Linux_fs/kernel/include/linux/mempolicy.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * NUMA memory policies for Linux.
4*4882a593Smuzhiyun  * Copyright 2003,2004 Andi Kleen SuSE Labs
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #ifndef _LINUX_MEMPOLICY_H
7*4882a593Smuzhiyun #define _LINUX_MEMPOLICY_H 1
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/sched.h>
10*4882a593Smuzhiyun #include <linux/mmzone.h>
11*4882a593Smuzhiyun #include <linux/dax.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/rbtree.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/nodemask.h>
16*4882a593Smuzhiyun #include <linux/pagemap.h>
17*4882a593Smuzhiyun #include <uapi/linux/mempolicy.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct mm_struct;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #ifdef CONFIG_NUMA
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * Describe a memory policy.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * A mempolicy can be either associated with a process or with a VMA.
27*4882a593Smuzhiyun  * For VMA related allocations the VMA policy is preferred, otherwise
28*4882a593Smuzhiyun  * the process policy is used. Interrupts ignore the memory policy
29*4882a593Smuzhiyun  * of the current process.
30*4882a593Smuzhiyun  *
31*4882a593Smuzhiyun  * Locking policy for interleave:
32*4882a593Smuzhiyun  * In process context there is no locking because only the process accesses
33*4882a593Smuzhiyun  * its own state. All vma manipulation is somewhat protected by a down_read on
34*4882a593Smuzhiyun  * mmap_lock.
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  * Freeing policy:
37*4882a593Smuzhiyun  * Mempolicy objects are reference counted.  A mempolicy will be freed when
38*4882a593Smuzhiyun  * mpol_put() decrements the reference count to zero.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  * Duplicating policy objects:
41*4882a593Smuzhiyun  * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
42*4882a593Smuzhiyun  * to the new storage.  The reference count of the new object is initialized
43*4882a593Smuzhiyun  * to 1, representing the caller of mpol_dup().
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun struct mempolicy {
46*4882a593Smuzhiyun 	atomic_t refcnt;
47*4882a593Smuzhiyun 	unsigned short mode; 	/* See MPOL_* above */
48*4882a593Smuzhiyun 	unsigned short flags;	/* See set_mempolicy() MPOL_F_* above */
49*4882a593Smuzhiyun 	union {
50*4882a593Smuzhiyun 		short 		 preferred_node; /* preferred */
51*4882a593Smuzhiyun 		nodemask_t	 nodes;		/* interleave/bind */
52*4882a593Smuzhiyun 		/* undefined for default */
53*4882a593Smuzhiyun 	} v;
54*4882a593Smuzhiyun 	union {
55*4882a593Smuzhiyun 		nodemask_t cpuset_mems_allowed;	/* relative to these nodes */
56*4882a593Smuzhiyun 		nodemask_t user_nodemask;	/* nodemask passed by user */
57*4882a593Smuzhiyun 	} w;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Support for managing mempolicy data objects (clone, copy, destroy)
62*4882a593Smuzhiyun  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun extern void __mpol_put(struct mempolicy *pol);
mpol_put(struct mempolicy * pol)66*4882a593Smuzhiyun static inline void mpol_put(struct mempolicy *pol)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	if (pol)
69*4882a593Smuzhiyun 		__mpol_put(pol);
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * Does mempolicy pol need explicit unref after use?
74*4882a593Smuzhiyun  * Currently only needed for shared policies.
75*4882a593Smuzhiyun  */
mpol_needs_cond_ref(struct mempolicy * pol)76*4882a593Smuzhiyun static inline int mpol_needs_cond_ref(struct mempolicy *pol)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	return (pol && (pol->flags & MPOL_F_SHARED));
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
mpol_cond_put(struct mempolicy * pol)81*4882a593Smuzhiyun static inline void mpol_cond_put(struct mempolicy *pol)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	if (mpol_needs_cond_ref(pol))
84*4882a593Smuzhiyun 		__mpol_put(pol);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
mpol_dup(struct mempolicy * pol)88*4882a593Smuzhiyun static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	if (pol)
91*4882a593Smuzhiyun 		pol = __mpol_dup(pol);
92*4882a593Smuzhiyun 	return pol;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define vma_policy(vma) ((vma)->vm_policy)
96*4882a593Smuzhiyun 
mpol_get(struct mempolicy * pol)97*4882a593Smuzhiyun static inline void mpol_get(struct mempolicy *pol)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	if (pol)
100*4882a593Smuzhiyun 		atomic_inc(&pol->refcnt);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b);
mpol_equal(struct mempolicy * a,struct mempolicy * b)104*4882a593Smuzhiyun static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	if (a == b)
107*4882a593Smuzhiyun 		return true;
108*4882a593Smuzhiyun 	return __mpol_equal(a, b);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * Tree of shared policies for a shared memory region.
113*4882a593Smuzhiyun  * Maintain the policies in a pseudo mm that contains vmas. The vmas
114*4882a593Smuzhiyun  * carry the policy. As a special twist the pseudo mm is indexed in pages, not
115*4882a593Smuzhiyun  * bytes, so that we can work with shared memory segments bigger than
116*4882a593Smuzhiyun  * unsigned long.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun struct sp_node {
120*4882a593Smuzhiyun 	struct rb_node nd;
121*4882a593Smuzhiyun 	unsigned long start, end;
122*4882a593Smuzhiyun 	struct mempolicy *policy;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun struct shared_policy {
126*4882a593Smuzhiyun 	struct rb_root root;
127*4882a593Smuzhiyun 	rwlock_t lock;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
131*4882a593Smuzhiyun void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
132*4882a593Smuzhiyun int mpol_set_shared_policy(struct shared_policy *info,
133*4882a593Smuzhiyun 				struct vm_area_struct *vma,
134*4882a593Smuzhiyun 				struct mempolicy *new);
135*4882a593Smuzhiyun void mpol_free_shared_policy(struct shared_policy *p);
136*4882a593Smuzhiyun struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
137*4882a593Smuzhiyun 					    unsigned long idx);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun struct mempolicy *get_task_policy(struct task_struct *p);
140*4882a593Smuzhiyun struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
141*4882a593Smuzhiyun 		unsigned long addr);
142*4882a593Smuzhiyun bool vma_policy_mof(struct vm_area_struct *vma);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun extern void numa_default_policy(void);
145*4882a593Smuzhiyun extern void numa_policy_init(void);
146*4882a593Smuzhiyun extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
147*4882a593Smuzhiyun extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun extern int huge_node(struct vm_area_struct *vma,
150*4882a593Smuzhiyun 				unsigned long addr, gfp_t gfp_flags,
151*4882a593Smuzhiyun 				struct mempolicy **mpol, nodemask_t **nodemask);
152*4882a593Smuzhiyun extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
153*4882a593Smuzhiyun extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
154*4882a593Smuzhiyun 				const nodemask_t *mask);
155*4882a593Smuzhiyun extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
156*4882a593Smuzhiyun 
policy_nodemask_current(gfp_t gfp)157*4882a593Smuzhiyun static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct mempolicy *mpol = get_task_policy(current);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return policy_nodemask(gfp, mpol);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun extern unsigned int mempolicy_slab_node(void);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun extern enum zone_type policy_zone;
167*4882a593Smuzhiyun 
check_highest_zone(enum zone_type k)168*4882a593Smuzhiyun static inline void check_highest_zone(enum zone_type k)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (k > policy_zone && k != ZONE_MOVABLE)
171*4882a593Smuzhiyun 		policy_zone = k;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
175*4882a593Smuzhiyun 		     const nodemask_t *to, int flags);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
179*4882a593Smuzhiyun extern int mpol_parse_str(char *str, struct mempolicy **mpol);
180*4882a593Smuzhiyun #endif
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /* Check if a vma is migratable */
185*4882a593Smuzhiyun extern bool vma_migratable(struct vm_area_struct *vma);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
188*4882a593Smuzhiyun extern void mpol_put_task_policy(struct task_struct *);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #else
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun struct mempolicy {};
193*4882a593Smuzhiyun 
mpol_equal(struct mempolicy * a,struct mempolicy * b)194*4882a593Smuzhiyun static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	return true;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
mpol_put(struct mempolicy * p)199*4882a593Smuzhiyun static inline void mpol_put(struct mempolicy *p)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
mpol_cond_put(struct mempolicy * pol)203*4882a593Smuzhiyun static inline void mpol_cond_put(struct mempolicy *pol)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
mpol_get(struct mempolicy * pol)207*4882a593Smuzhiyun static inline void mpol_get(struct mempolicy *pol)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun struct shared_policy {};
212*4882a593Smuzhiyun 
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)213*4882a593Smuzhiyun static inline void mpol_shared_policy_init(struct shared_policy *sp,
214*4882a593Smuzhiyun 						struct mempolicy *mpol)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
mpol_free_shared_policy(struct shared_policy * p)218*4882a593Smuzhiyun static inline void mpol_free_shared_policy(struct shared_policy *p)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun static inline struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)223*4882a593Smuzhiyun mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	return NULL;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun #define vma_policy(vma) NULL
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun static inline int
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)231*4882a593Smuzhiyun vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
numa_policy_init(void)236*4882a593Smuzhiyun static inline void numa_policy_init(void)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
numa_default_policy(void)240*4882a593Smuzhiyun static inline void numa_default_policy(void)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)244*4882a593Smuzhiyun static inline void mpol_rebind_task(struct task_struct *tsk,
245*4882a593Smuzhiyun 				const nodemask_t *new)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)249*4882a593Smuzhiyun static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)253*4882a593Smuzhiyun static inline int huge_node(struct vm_area_struct *vma,
254*4882a593Smuzhiyun 				unsigned long addr, gfp_t gfp_flags,
255*4882a593Smuzhiyun 				struct mempolicy **mpol, nodemask_t **nodemask)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	*mpol = NULL;
258*4882a593Smuzhiyun 	*nodemask = NULL;
259*4882a593Smuzhiyun 	return 0;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
init_nodemask_of_mempolicy(nodemask_t * m)262*4882a593Smuzhiyun static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	return false;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)267*4882a593Smuzhiyun static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
268*4882a593Smuzhiyun 				   const nodemask_t *to, int flags)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
check_highest_zone(int k)273*4882a593Smuzhiyun static inline void check_highest_zone(int k)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
mpol_parse_str(char * str,struct mempolicy ** mpol)278*4882a593Smuzhiyun static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return 1;	/* error */
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun 
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long address)284*4882a593Smuzhiyun static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
285*4882a593Smuzhiyun 				 unsigned long address)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	return -1; /* no node preference */
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
mpol_put_task_policy(struct task_struct * task)290*4882a593Smuzhiyun static inline void mpol_put_task_policy(struct task_struct *task)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
policy_nodemask_current(gfp_t gfp)294*4882a593Smuzhiyun static inline nodemask_t *policy_nodemask_current(gfp_t gfp)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	return NULL;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun #endif /* CONFIG_NUMA */
299*4882a593Smuzhiyun #endif
300