xref: /OK3568_Linux_fs/kernel/mm/mempolicy.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Simple NUMA memory policy for the Linux kernel.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2003,2004 Andi Kleen, SuSE Labs.
6*4882a593Smuzhiyun  * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * NUMA policy allows the user to give hints in which node(s) memory should
9*4882a593Smuzhiyun  * be allocated.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Support four policies per VMA and per process:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * The VMA policy has priority over the process policy for a page fault.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * interleave     Allocate memory interleaved over a set of nodes,
16*4882a593Smuzhiyun  *                with normal fallback if it fails.
17*4882a593Smuzhiyun  *                For VMA based allocations this interleaves based on the
18*4882a593Smuzhiyun  *                offset into the backing object or offset into the mapping
19*4882a593Smuzhiyun  *                for anonymous memory. For process policy an process counter
20*4882a593Smuzhiyun  *                is used.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * bind           Only allocate memory on a specific set of nodes,
23*4882a593Smuzhiyun  *                no fallback.
24*4882a593Smuzhiyun  *                FIXME: memory is allocated starting with the first node
25*4882a593Smuzhiyun  *                to the last. It would be better if bind would truly restrict
26*4882a593Smuzhiyun  *                the allocation to memory nodes instead
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * preferred       Try a specific node first before normal fallback.
29*4882a593Smuzhiyun  *                As a special case NUMA_NO_NODE here means do the allocation
30*4882a593Smuzhiyun  *                on the local CPU. This is normally identical to default,
31*4882a593Smuzhiyun  *                but useful to set in a VMA when you have a non default
32*4882a593Smuzhiyun  *                process policy.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * default        Allocate on the local node first, or when on a VMA
35*4882a593Smuzhiyun  *                use the process policy. This is what Linux always did
36*4882a593Smuzhiyun  *		  in a NUMA aware kernel and still does by, ahem, default.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * The process policy is applied for most non interrupt memory allocations
39*4882a593Smuzhiyun  * in that process' context. Interrupts ignore the policies and always
40*4882a593Smuzhiyun  * try to allocate on the local CPU. The VMA policy is only applied for memory
41*4882a593Smuzhiyun  * allocations for a VMA in the VM.
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * Currently there are a few corner cases in swapping where the policy
44*4882a593Smuzhiyun  * is not applied, but the majority should be handled. When process policy
45*4882a593Smuzhiyun  * is used it is not remembered over swap outs/swap ins.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * Only the highest zone in the zone hierarchy gets policied. Allocations
48*4882a593Smuzhiyun  * requesting a lower zone just use default policy. This implies that
49*4882a593Smuzhiyun  * on systems with highmem kernel lowmem allocation don't get policied.
50*4882a593Smuzhiyun  * Same with GFP_DMA allocations.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53*4882a593Smuzhiyun  * all users and remembered even when nobody has memory mapped.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Notebook:
57*4882a593Smuzhiyun    fix mmap readahead to honour policy and enable policy for any page cache
58*4882a593Smuzhiyun    object
59*4882a593Smuzhiyun    statistics for bigpages
60*4882a593Smuzhiyun    global policy for page cache? currently it uses process policy. Requires
61*4882a593Smuzhiyun    first item above.
62*4882a593Smuzhiyun    handle mremap for shared memory (currently ignored for the policy)
63*4882a593Smuzhiyun    grows down?
64*4882a593Smuzhiyun    make bind policy root only? It can trigger oom much faster and the
65*4882a593Smuzhiyun    kernel is not always grateful with that.
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #include <linux/mempolicy.h>
71*4882a593Smuzhiyun #include <linux/pagewalk.h>
72*4882a593Smuzhiyun #include <linux/highmem.h>
73*4882a593Smuzhiyun #include <linux/hugetlb.h>
74*4882a593Smuzhiyun #include <linux/kernel.h>
75*4882a593Smuzhiyun #include <linux/sched.h>
76*4882a593Smuzhiyun #include <linux/sched/mm.h>
77*4882a593Smuzhiyun #include <linux/sched/numa_balancing.h>
78*4882a593Smuzhiyun #include <linux/sched/task.h>
79*4882a593Smuzhiyun #include <linux/nodemask.h>
80*4882a593Smuzhiyun #include <linux/cpuset.h>
81*4882a593Smuzhiyun #include <linux/slab.h>
82*4882a593Smuzhiyun #include <linux/string.h>
83*4882a593Smuzhiyun #include <linux/export.h>
84*4882a593Smuzhiyun #include <linux/nsproxy.h>
85*4882a593Smuzhiyun #include <linux/interrupt.h>
86*4882a593Smuzhiyun #include <linux/init.h>
87*4882a593Smuzhiyun #include <linux/compat.h>
88*4882a593Smuzhiyun #include <linux/ptrace.h>
89*4882a593Smuzhiyun #include <linux/swap.h>
90*4882a593Smuzhiyun #include <linux/seq_file.h>
91*4882a593Smuzhiyun #include <linux/proc_fs.h>
92*4882a593Smuzhiyun #include <linux/migrate.h>
93*4882a593Smuzhiyun #include <linux/ksm.h>
94*4882a593Smuzhiyun #include <linux/rmap.h>
95*4882a593Smuzhiyun #include <linux/security.h>
96*4882a593Smuzhiyun #include <linux/syscalls.h>
97*4882a593Smuzhiyun #include <linux/ctype.h>
98*4882a593Smuzhiyun #include <linux/mm_inline.h>
99*4882a593Smuzhiyun #include <linux/mmu_notifier.h>
100*4882a593Smuzhiyun #include <linux/printk.h>
101*4882a593Smuzhiyun #include <linux/swapops.h>
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #include <asm/tlbflush.h>
104*4882a593Smuzhiyun #include <linux/uaccess.h>
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #include "internal.h"
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /* Internal flags */
109*4882a593Smuzhiyun #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0)	/* Skip checks for continuous vmas */
110*4882a593Smuzhiyun #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1)		/* Invert check for nodemask */
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static struct kmem_cache *policy_cache;
113*4882a593Smuzhiyun static struct kmem_cache *sn_cache;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Highest zone. An specific allocation for a zone below that is not
116*4882a593Smuzhiyun    policied. */
117*4882a593Smuzhiyun enum zone_type policy_zone = 0;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * run-time system-wide default policy => local allocation
121*4882a593Smuzhiyun  */
122*4882a593Smuzhiyun static struct mempolicy default_policy = {
123*4882a593Smuzhiyun 	.refcnt = ATOMIC_INIT(1), /* never free it */
124*4882a593Smuzhiyun 	.mode = MPOL_PREFERRED,
125*4882a593Smuzhiyun 	.flags = MPOL_F_LOCAL,
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun static struct mempolicy preferred_node_policy[MAX_NUMNODES];
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun  * numa_map_to_online_node - Find closest online node
132*4882a593Smuzhiyun  * @node: Node id to start the search
133*4882a593Smuzhiyun  *
134*4882a593Smuzhiyun  * Lookup the next closest node by distance if @nid is not online.
135*4882a593Smuzhiyun  */
numa_map_to_online_node(int node)136*4882a593Smuzhiyun int numa_map_to_online_node(int node)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun 	int min_dist = INT_MAX, dist, n, min_node;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (node == NUMA_NO_NODE || node_online(node))
141*4882a593Smuzhiyun 		return node;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	min_node = node;
144*4882a593Smuzhiyun 	for_each_online_node(n) {
145*4882a593Smuzhiyun 		dist = node_distance(node, n);
146*4882a593Smuzhiyun 		if (dist < min_dist) {
147*4882a593Smuzhiyun 			min_dist = dist;
148*4882a593Smuzhiyun 			min_node = n;
149*4882a593Smuzhiyun 		}
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	return min_node;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(numa_map_to_online_node);
155*4882a593Smuzhiyun 
get_task_policy(struct task_struct * p)156*4882a593Smuzhiyun struct mempolicy *get_task_policy(struct task_struct *p)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct mempolicy *pol = p->mempolicy;
159*4882a593Smuzhiyun 	int node;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (pol)
162*4882a593Smuzhiyun 		return pol;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	node = numa_node_id();
165*4882a593Smuzhiyun 	if (node != NUMA_NO_NODE) {
166*4882a593Smuzhiyun 		pol = &preferred_node_policy[node];
167*4882a593Smuzhiyun 		/* preferred_node_policy is not initialised early in boot */
168*4882a593Smuzhiyun 		if (pol->mode)
169*4882a593Smuzhiyun 			return pol;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	return &default_policy;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun static const struct mempolicy_operations {
176*4882a593Smuzhiyun 	int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
177*4882a593Smuzhiyun 	void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
178*4882a593Smuzhiyun } mpol_ops[MPOL_MAX];
179*4882a593Smuzhiyun 
mpol_store_user_nodemask(const struct mempolicy * pol)180*4882a593Smuzhiyun static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	return pol->flags & MPOL_MODE_FLAGS;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
mpol_relative_nodemask(nodemask_t * ret,const nodemask_t * orig,const nodemask_t * rel)185*4882a593Smuzhiyun static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
186*4882a593Smuzhiyun 				   const nodemask_t *rel)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	nodemask_t tmp;
189*4882a593Smuzhiyun 	nodes_fold(tmp, *orig, nodes_weight(*rel));
190*4882a593Smuzhiyun 	nodes_onto(*ret, tmp, *rel);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
mpol_new_interleave(struct mempolicy * pol,const nodemask_t * nodes)193*4882a593Smuzhiyun static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	if (nodes_empty(*nodes))
196*4882a593Smuzhiyun 		return -EINVAL;
197*4882a593Smuzhiyun 	pol->v.nodes = *nodes;
198*4882a593Smuzhiyun 	return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
mpol_new_preferred(struct mempolicy * pol,const nodemask_t * nodes)201*4882a593Smuzhiyun static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	if (!nodes)
204*4882a593Smuzhiyun 		pol->flags |= MPOL_F_LOCAL;	/* local allocation */
205*4882a593Smuzhiyun 	else if (nodes_empty(*nodes))
206*4882a593Smuzhiyun 		return -EINVAL;			/*  no allowed nodes */
207*4882a593Smuzhiyun 	else
208*4882a593Smuzhiyun 		pol->v.preferred_node = first_node(*nodes);
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
mpol_new_bind(struct mempolicy * pol,const nodemask_t * nodes)212*4882a593Smuzhiyun static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	if (nodes_empty(*nodes))
215*4882a593Smuzhiyun 		return -EINVAL;
216*4882a593Smuzhiyun 	pol->v.nodes = *nodes;
217*4882a593Smuzhiyun 	return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun  * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
222*4882a593Smuzhiyun  * any, for the new policy.  mpol_new() has already validated the nodes
223*4882a593Smuzhiyun  * parameter with respect to the policy mode and flags.  But, we need to
224*4882a593Smuzhiyun  * handle an empty nodemask with MPOL_PREFERRED here.
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * Must be called holding task's alloc_lock to protect task's mems_allowed
227*4882a593Smuzhiyun  * and mempolicy.  May also be called holding the mmap_lock for write.
228*4882a593Smuzhiyun  */
mpol_set_nodemask(struct mempolicy * pol,const nodemask_t * nodes,struct nodemask_scratch * nsc)229*4882a593Smuzhiyun static int mpol_set_nodemask(struct mempolicy *pol,
230*4882a593Smuzhiyun 		     const nodemask_t *nodes, struct nodemask_scratch *nsc)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	int ret;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
235*4882a593Smuzhiyun 	if (pol == NULL)
236*4882a593Smuzhiyun 		return 0;
237*4882a593Smuzhiyun 	/* Check N_MEMORY */
238*4882a593Smuzhiyun 	nodes_and(nsc->mask1,
239*4882a593Smuzhiyun 		  cpuset_current_mems_allowed, node_states[N_MEMORY]);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	VM_BUG_ON(!nodes);
242*4882a593Smuzhiyun 	if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
243*4882a593Smuzhiyun 		nodes = NULL;	/* explicit local allocation */
244*4882a593Smuzhiyun 	else {
245*4882a593Smuzhiyun 		if (pol->flags & MPOL_F_RELATIVE_NODES)
246*4882a593Smuzhiyun 			mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
247*4882a593Smuzhiyun 		else
248*4882a593Smuzhiyun 			nodes_and(nsc->mask2, *nodes, nsc->mask1);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 		if (mpol_store_user_nodemask(pol))
251*4882a593Smuzhiyun 			pol->w.user_nodemask = *nodes;
252*4882a593Smuzhiyun 		else
253*4882a593Smuzhiyun 			pol->w.cpuset_mems_allowed =
254*4882a593Smuzhiyun 						cpuset_current_mems_allowed;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (nodes)
258*4882a593Smuzhiyun 		ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
259*4882a593Smuzhiyun 	else
260*4882a593Smuzhiyun 		ret = mpol_ops[pol->mode].create(pol, NULL);
261*4882a593Smuzhiyun 	return ret;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun  * This function just creates a new policy, does some check and simple
266*4882a593Smuzhiyun  * initialization. You must invoke mpol_set_nodemask() to set nodes.
267*4882a593Smuzhiyun  */
mpol_new(unsigned short mode,unsigned short flags,nodemask_t * nodes)268*4882a593Smuzhiyun static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
269*4882a593Smuzhiyun 				  nodemask_t *nodes)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct mempolicy *policy;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	pr_debug("setting mode %d flags %d nodes[0] %lx\n",
274*4882a593Smuzhiyun 		 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	if (mode == MPOL_DEFAULT) {
277*4882a593Smuzhiyun 		if (nodes && !nodes_empty(*nodes))
278*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
279*4882a593Smuzhiyun 		return NULL;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	VM_BUG_ON(!nodes);
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/*
284*4882a593Smuzhiyun 	 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
285*4882a593Smuzhiyun 	 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
286*4882a593Smuzhiyun 	 * All other modes require a valid pointer to a non-empty nodemask.
287*4882a593Smuzhiyun 	 */
288*4882a593Smuzhiyun 	if (mode == MPOL_PREFERRED) {
289*4882a593Smuzhiyun 		if (nodes_empty(*nodes)) {
290*4882a593Smuzhiyun 			if (((flags & MPOL_F_STATIC_NODES) ||
291*4882a593Smuzhiyun 			     (flags & MPOL_F_RELATIVE_NODES)))
292*4882a593Smuzhiyun 				return ERR_PTR(-EINVAL);
293*4882a593Smuzhiyun 		}
294*4882a593Smuzhiyun 	} else if (mode == MPOL_LOCAL) {
295*4882a593Smuzhiyun 		if (!nodes_empty(*nodes) ||
296*4882a593Smuzhiyun 		    (flags & MPOL_F_STATIC_NODES) ||
297*4882a593Smuzhiyun 		    (flags & MPOL_F_RELATIVE_NODES))
298*4882a593Smuzhiyun 			return ERR_PTR(-EINVAL);
299*4882a593Smuzhiyun 		mode = MPOL_PREFERRED;
300*4882a593Smuzhiyun 	} else if (nodes_empty(*nodes))
301*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
302*4882a593Smuzhiyun 	policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
303*4882a593Smuzhiyun 	if (!policy)
304*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
305*4882a593Smuzhiyun 	atomic_set(&policy->refcnt, 1);
306*4882a593Smuzhiyun 	policy->mode = mode;
307*4882a593Smuzhiyun 	policy->flags = flags;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return policy;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /* Slow path of a mpol destructor. */
__mpol_put(struct mempolicy * p)313*4882a593Smuzhiyun void __mpol_put(struct mempolicy *p)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&p->refcnt))
316*4882a593Smuzhiyun 		return;
317*4882a593Smuzhiyun 	kmem_cache_free(policy_cache, p);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
mpol_rebind_default(struct mempolicy * pol,const nodemask_t * nodes)320*4882a593Smuzhiyun static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
mpol_rebind_nodemask(struct mempolicy * pol,const nodemask_t * nodes)324*4882a593Smuzhiyun static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	nodemask_t tmp;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (pol->flags & MPOL_F_STATIC_NODES)
329*4882a593Smuzhiyun 		nodes_and(tmp, pol->w.user_nodemask, *nodes);
330*4882a593Smuzhiyun 	else if (pol->flags & MPOL_F_RELATIVE_NODES)
331*4882a593Smuzhiyun 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
332*4882a593Smuzhiyun 	else {
333*4882a593Smuzhiyun 		nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed,
334*4882a593Smuzhiyun 								*nodes);
335*4882a593Smuzhiyun 		pol->w.cpuset_mems_allowed = *nodes;
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (nodes_empty(tmp))
339*4882a593Smuzhiyun 		tmp = *nodes;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	pol->v.nodes = tmp;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
mpol_rebind_preferred(struct mempolicy * pol,const nodemask_t * nodes)344*4882a593Smuzhiyun static void mpol_rebind_preferred(struct mempolicy *pol,
345*4882a593Smuzhiyun 						const nodemask_t *nodes)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	nodemask_t tmp;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	if (pol->flags & MPOL_F_STATIC_NODES) {
350*4882a593Smuzhiyun 		int node = first_node(pol->w.user_nodemask);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 		if (node_isset(node, *nodes)) {
353*4882a593Smuzhiyun 			pol->v.preferred_node = node;
354*4882a593Smuzhiyun 			pol->flags &= ~MPOL_F_LOCAL;
355*4882a593Smuzhiyun 		} else
356*4882a593Smuzhiyun 			pol->flags |= MPOL_F_LOCAL;
357*4882a593Smuzhiyun 	} else if (pol->flags & MPOL_F_RELATIVE_NODES) {
358*4882a593Smuzhiyun 		mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
359*4882a593Smuzhiyun 		pol->v.preferred_node = first_node(tmp);
360*4882a593Smuzhiyun 	} else if (!(pol->flags & MPOL_F_LOCAL)) {
361*4882a593Smuzhiyun 		pol->v.preferred_node = node_remap(pol->v.preferred_node,
362*4882a593Smuzhiyun 						   pol->w.cpuset_mems_allowed,
363*4882a593Smuzhiyun 						   *nodes);
364*4882a593Smuzhiyun 		pol->w.cpuset_mems_allowed = *nodes;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun /*
369*4882a593Smuzhiyun  * mpol_rebind_policy - Migrate a policy to a different set of nodes
370*4882a593Smuzhiyun  *
371*4882a593Smuzhiyun  * Per-vma policies are protected by mmap_lock. Allocations using per-task
372*4882a593Smuzhiyun  * policies are protected by task->mems_allowed_seq to prevent a premature
373*4882a593Smuzhiyun  * OOM/allocation failure due to parallel nodemask modification.
374*4882a593Smuzhiyun  */
mpol_rebind_policy(struct mempolicy * pol,const nodemask_t * newmask)375*4882a593Smuzhiyun static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	if (!pol || pol->mode == MPOL_LOCAL)
378*4882a593Smuzhiyun 		return;
379*4882a593Smuzhiyun 	if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
380*4882a593Smuzhiyun 	    nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
381*4882a593Smuzhiyun 		return;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	mpol_ops[pol->mode].rebind(pol, newmask);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun  * Wrapper for mpol_rebind_policy() that just requires task
388*4882a593Smuzhiyun  * pointer, and updates task mempolicy.
389*4882a593Smuzhiyun  *
390*4882a593Smuzhiyun  * Called with task's alloc_lock held.
391*4882a593Smuzhiyun  */
392*4882a593Smuzhiyun 
mpol_rebind_task(struct task_struct * tsk,const nodemask_t * new)393*4882a593Smuzhiyun void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	mpol_rebind_policy(tsk->mempolicy, new);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun  * Rebind each vma in mm to new nodemask.
400*4882a593Smuzhiyun  *
401*4882a593Smuzhiyun  * Call holding a reference to mm.  Takes mm->mmap_lock during call.
402*4882a593Smuzhiyun  */
403*4882a593Smuzhiyun 
mpol_rebind_mm(struct mm_struct * mm,nodemask_t * new)404*4882a593Smuzhiyun void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct vm_area_struct *vma;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	mmap_write_lock(mm);
409*4882a593Smuzhiyun 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
410*4882a593Smuzhiyun 		vm_write_begin(vma);
411*4882a593Smuzhiyun 		mpol_rebind_policy(vma->vm_policy, new);
412*4882a593Smuzhiyun 		vm_write_end(vma);
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 	mmap_write_unlock(mm);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
418*4882a593Smuzhiyun 	[MPOL_DEFAULT] = {
419*4882a593Smuzhiyun 		.rebind = mpol_rebind_default,
420*4882a593Smuzhiyun 	},
421*4882a593Smuzhiyun 	[MPOL_INTERLEAVE] = {
422*4882a593Smuzhiyun 		.create = mpol_new_interleave,
423*4882a593Smuzhiyun 		.rebind = mpol_rebind_nodemask,
424*4882a593Smuzhiyun 	},
425*4882a593Smuzhiyun 	[MPOL_PREFERRED] = {
426*4882a593Smuzhiyun 		.create = mpol_new_preferred,
427*4882a593Smuzhiyun 		.rebind = mpol_rebind_preferred,
428*4882a593Smuzhiyun 	},
429*4882a593Smuzhiyun 	[MPOL_BIND] = {
430*4882a593Smuzhiyun 		.create = mpol_new_bind,
431*4882a593Smuzhiyun 		.rebind = mpol_rebind_nodemask,
432*4882a593Smuzhiyun 	},
433*4882a593Smuzhiyun };
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun static int migrate_page_add(struct page *page, struct list_head *pagelist,
436*4882a593Smuzhiyun 				unsigned long flags);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun struct queue_pages {
439*4882a593Smuzhiyun 	struct list_head *pagelist;
440*4882a593Smuzhiyun 	unsigned long flags;
441*4882a593Smuzhiyun 	nodemask_t *nmask;
442*4882a593Smuzhiyun 	unsigned long start;
443*4882a593Smuzhiyun 	unsigned long end;
444*4882a593Smuzhiyun 	struct vm_area_struct *first;
445*4882a593Smuzhiyun };
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun  * Check if the page's nid is in qp->nmask.
449*4882a593Smuzhiyun  *
450*4882a593Smuzhiyun  * If MPOL_MF_INVERT is set in qp->flags, check if the nid is
451*4882a593Smuzhiyun  * in the invert of qp->nmask.
452*4882a593Smuzhiyun  */
queue_pages_required(struct page * page,struct queue_pages * qp)453*4882a593Smuzhiyun static inline bool queue_pages_required(struct page *page,
454*4882a593Smuzhiyun 					struct queue_pages *qp)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	int nid = page_to_nid(page);
457*4882a593Smuzhiyun 	unsigned long flags = qp->flags;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun  * queue_pages_pmd() has four possible return values:
464*4882a593Smuzhiyun  * 0 - pages are placed on the right node or queued successfully.
465*4882a593Smuzhiyun  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
466*4882a593Smuzhiyun  *     specified.
467*4882a593Smuzhiyun  * 2 - THP was split.
468*4882a593Smuzhiyun  * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
469*4882a593Smuzhiyun  *        existing page was already on a node that does not follow the
470*4882a593Smuzhiyun  *        policy.
471*4882a593Smuzhiyun  */
queue_pages_pmd(pmd_t * pmd,spinlock_t * ptl,unsigned long addr,unsigned long end,struct mm_walk * walk)472*4882a593Smuzhiyun static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
473*4882a593Smuzhiyun 				unsigned long end, struct mm_walk *walk)
474*4882a593Smuzhiyun 	__releases(ptl)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	int ret = 0;
477*4882a593Smuzhiyun 	struct page *page;
478*4882a593Smuzhiyun 	struct queue_pages *qp = walk->private;
479*4882a593Smuzhiyun 	unsigned long flags;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (unlikely(is_pmd_migration_entry(*pmd))) {
482*4882a593Smuzhiyun 		ret = -EIO;
483*4882a593Smuzhiyun 		goto unlock;
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 	page = pmd_page(*pmd);
486*4882a593Smuzhiyun 	if (is_huge_zero_page(page)) {
487*4882a593Smuzhiyun 		spin_unlock(ptl);
488*4882a593Smuzhiyun 		__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
489*4882a593Smuzhiyun 		ret = 2;
490*4882a593Smuzhiyun 		goto out;
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 	if (!queue_pages_required(page, qp))
493*4882a593Smuzhiyun 		goto unlock;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	flags = qp->flags;
496*4882a593Smuzhiyun 	/* go to thp migration */
497*4882a593Smuzhiyun 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
498*4882a593Smuzhiyun 		if (!vma_migratable(walk->vma) ||
499*4882a593Smuzhiyun 		    migrate_page_add(page, qp->pagelist, flags)) {
500*4882a593Smuzhiyun 			ret = 1;
501*4882a593Smuzhiyun 			goto unlock;
502*4882a593Smuzhiyun 		}
503*4882a593Smuzhiyun 	} else
504*4882a593Smuzhiyun 		ret = -EIO;
505*4882a593Smuzhiyun unlock:
506*4882a593Smuzhiyun 	spin_unlock(ptl);
507*4882a593Smuzhiyun out:
508*4882a593Smuzhiyun 	return ret;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun /*
512*4882a593Smuzhiyun  * Scan through pages checking if pages follow certain conditions,
513*4882a593Smuzhiyun  * and move them to the pagelist if they do.
514*4882a593Smuzhiyun  *
515*4882a593Smuzhiyun  * queue_pages_pte_range() has three possible return values:
516*4882a593Smuzhiyun  * 0 - pages are placed on the right node or queued successfully.
517*4882a593Smuzhiyun  * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
518*4882a593Smuzhiyun  *     specified.
519*4882a593Smuzhiyun  * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
520*4882a593Smuzhiyun  *        on a node that does not follow the policy.
521*4882a593Smuzhiyun  */
queue_pages_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)522*4882a593Smuzhiyun static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
523*4882a593Smuzhiyun 			unsigned long end, struct mm_walk *walk)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
526*4882a593Smuzhiyun 	struct page *page;
527*4882a593Smuzhiyun 	struct queue_pages *qp = walk->private;
528*4882a593Smuzhiyun 	unsigned long flags = qp->flags;
529*4882a593Smuzhiyun 	int ret;
530*4882a593Smuzhiyun 	bool has_unmovable = false;
531*4882a593Smuzhiyun 	pte_t *pte, *mapped_pte;
532*4882a593Smuzhiyun 	spinlock_t *ptl;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	ptl = pmd_trans_huge_lock(pmd, vma);
535*4882a593Smuzhiyun 	if (ptl) {
536*4882a593Smuzhiyun 		ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
537*4882a593Smuzhiyun 		if (ret != 2)
538*4882a593Smuzhiyun 			return ret;
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 	/* THP was split, fall through to pte walk */
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	if (pmd_trans_unstable(pmd))
543*4882a593Smuzhiyun 		return 0;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
546*4882a593Smuzhiyun 	for (; addr != end; pte++, addr += PAGE_SIZE) {
547*4882a593Smuzhiyun 		if (!pte_present(*pte))
548*4882a593Smuzhiyun 			continue;
549*4882a593Smuzhiyun 		page = vm_normal_page(vma, addr, *pte);
550*4882a593Smuzhiyun 		if (!page)
551*4882a593Smuzhiyun 			continue;
552*4882a593Smuzhiyun 		/*
553*4882a593Smuzhiyun 		 * vm_normal_page() filters out zero pages, but there might
554*4882a593Smuzhiyun 		 * still be PageReserved pages to skip, perhaps in a VDSO.
555*4882a593Smuzhiyun 		 */
556*4882a593Smuzhiyun 		if (PageReserved(page))
557*4882a593Smuzhiyun 			continue;
558*4882a593Smuzhiyun 		if (!queue_pages_required(page, qp))
559*4882a593Smuzhiyun 			continue;
560*4882a593Smuzhiyun 		if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
561*4882a593Smuzhiyun 			/* MPOL_MF_STRICT must be specified if we get here */
562*4882a593Smuzhiyun 			if (!vma_migratable(vma)) {
563*4882a593Smuzhiyun 				has_unmovable = true;
564*4882a593Smuzhiyun 				break;
565*4882a593Smuzhiyun 			}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 			/*
568*4882a593Smuzhiyun 			 * Do not abort immediately since there may be
569*4882a593Smuzhiyun 			 * temporary off LRU pages in the range.  Still
570*4882a593Smuzhiyun 			 * need migrate other LRU pages.
571*4882a593Smuzhiyun 			 */
572*4882a593Smuzhiyun 			if (migrate_page_add(page, qp->pagelist, flags))
573*4882a593Smuzhiyun 				has_unmovable = true;
574*4882a593Smuzhiyun 		} else
575*4882a593Smuzhiyun 			break;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 	pte_unmap_unlock(mapped_pte, ptl);
578*4882a593Smuzhiyun 	cond_resched();
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	if (has_unmovable)
581*4882a593Smuzhiyun 		return 1;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	return addr != end ? -EIO : 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
queue_pages_hugetlb(pte_t * pte,unsigned long hmask,unsigned long addr,unsigned long end,struct mm_walk * walk)586*4882a593Smuzhiyun static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
587*4882a593Smuzhiyun 			       unsigned long addr, unsigned long end,
588*4882a593Smuzhiyun 			       struct mm_walk *walk)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	int ret = 0;
591*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
592*4882a593Smuzhiyun 	struct queue_pages *qp = walk->private;
593*4882a593Smuzhiyun 	unsigned long flags = (qp->flags & MPOL_MF_VALID);
594*4882a593Smuzhiyun 	struct page *page;
595*4882a593Smuzhiyun 	spinlock_t *ptl;
596*4882a593Smuzhiyun 	pte_t entry;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
599*4882a593Smuzhiyun 	entry = huge_ptep_get(pte);
600*4882a593Smuzhiyun 	if (!pte_present(entry))
601*4882a593Smuzhiyun 		goto unlock;
602*4882a593Smuzhiyun 	page = pte_page(entry);
603*4882a593Smuzhiyun 	if (!queue_pages_required(page, qp))
604*4882a593Smuzhiyun 		goto unlock;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	if (flags == MPOL_MF_STRICT) {
607*4882a593Smuzhiyun 		/*
608*4882a593Smuzhiyun 		 * STRICT alone means only detecting misplaced page and no
609*4882a593Smuzhiyun 		 * need to further check other vma.
610*4882a593Smuzhiyun 		 */
611*4882a593Smuzhiyun 		ret = -EIO;
612*4882a593Smuzhiyun 		goto unlock;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (!vma_migratable(walk->vma)) {
616*4882a593Smuzhiyun 		/*
617*4882a593Smuzhiyun 		 * Must be STRICT with MOVE*, otherwise .test_walk() have
618*4882a593Smuzhiyun 		 * stopped walking current vma.
619*4882a593Smuzhiyun 		 * Detecting misplaced page but allow migrating pages which
620*4882a593Smuzhiyun 		 * have been queued.
621*4882a593Smuzhiyun 		 */
622*4882a593Smuzhiyun 		ret = 1;
623*4882a593Smuzhiyun 		goto unlock;
624*4882a593Smuzhiyun 	}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	/* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
627*4882a593Smuzhiyun 	if (flags & (MPOL_MF_MOVE_ALL) ||
628*4882a593Smuzhiyun 	    (flags & MPOL_MF_MOVE && page_mapcount(page) == 1)) {
629*4882a593Smuzhiyun 		if (!isolate_huge_page(page, qp->pagelist) &&
630*4882a593Smuzhiyun 			(flags & MPOL_MF_STRICT))
631*4882a593Smuzhiyun 			/*
632*4882a593Smuzhiyun 			 * Failed to isolate page but allow migrating pages
633*4882a593Smuzhiyun 			 * which have been queued.
634*4882a593Smuzhiyun 			 */
635*4882a593Smuzhiyun 			ret = 1;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun unlock:
638*4882a593Smuzhiyun 	spin_unlock(ptl);
639*4882a593Smuzhiyun #else
640*4882a593Smuzhiyun 	BUG();
641*4882a593Smuzhiyun #endif
642*4882a593Smuzhiyun 	return ret;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun  * This is used to mark a range of virtual addresses to be inaccessible.
648*4882a593Smuzhiyun  * These are later cleared by a NUMA hinting fault. Depending on these
649*4882a593Smuzhiyun  * faults, pages may be migrated for better NUMA placement.
650*4882a593Smuzhiyun  *
651*4882a593Smuzhiyun  * This is assuming that NUMA faults are handled using PROT_NONE. If
652*4882a593Smuzhiyun  * an architecture makes a different choice, it will need further
653*4882a593Smuzhiyun  * changes to the core.
654*4882a593Smuzhiyun  */
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)655*4882a593Smuzhiyun unsigned long change_prot_numa(struct vm_area_struct *vma,
656*4882a593Smuzhiyun 			unsigned long addr, unsigned long end)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	int nr_updated;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
661*4882a593Smuzhiyun 	if (nr_updated)
662*4882a593Smuzhiyun 		count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	return nr_updated;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun #else
change_prot_numa(struct vm_area_struct * vma,unsigned long addr,unsigned long end)667*4882a593Smuzhiyun static unsigned long change_prot_numa(struct vm_area_struct *vma,
668*4882a593Smuzhiyun 			unsigned long addr, unsigned long end)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun 	return 0;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun #endif /* CONFIG_NUMA_BALANCING */
673*4882a593Smuzhiyun 
queue_pages_test_walk(unsigned long start,unsigned long end,struct mm_walk * walk)674*4882a593Smuzhiyun static int queue_pages_test_walk(unsigned long start, unsigned long end,
675*4882a593Smuzhiyun 				struct mm_walk *walk)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct vm_area_struct *vma = walk->vma;
678*4882a593Smuzhiyun 	struct queue_pages *qp = walk->private;
679*4882a593Smuzhiyun 	unsigned long endvma = vma->vm_end;
680*4882a593Smuzhiyun 	unsigned long flags = qp->flags;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	/* range check first */
683*4882a593Smuzhiyun 	VM_BUG_ON_VMA((vma->vm_start > start) || (vma->vm_end < end), vma);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	if (!qp->first) {
686*4882a593Smuzhiyun 		qp->first = vma;
687*4882a593Smuzhiyun 		if (!(flags & MPOL_MF_DISCONTIG_OK) &&
688*4882a593Smuzhiyun 			(qp->start < vma->vm_start))
689*4882a593Smuzhiyun 			/* hole at head side of range */
690*4882a593Smuzhiyun 			return -EFAULT;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 	if (!(flags & MPOL_MF_DISCONTIG_OK) &&
693*4882a593Smuzhiyun 		((vma->vm_end < qp->end) &&
694*4882a593Smuzhiyun 		(!vma->vm_next || vma->vm_end < vma->vm_next->vm_start)))
695*4882a593Smuzhiyun 		/* hole at middle or tail of range */
696*4882a593Smuzhiyun 		return -EFAULT;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	/*
699*4882a593Smuzhiyun 	 * Need check MPOL_MF_STRICT to return -EIO if possible
700*4882a593Smuzhiyun 	 * regardless of vma_migratable
701*4882a593Smuzhiyun 	 */
702*4882a593Smuzhiyun 	if (!vma_migratable(vma) &&
703*4882a593Smuzhiyun 	    !(flags & MPOL_MF_STRICT))
704*4882a593Smuzhiyun 		return 1;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (endvma > end)
707*4882a593Smuzhiyun 		endvma = end;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	if (flags & MPOL_MF_LAZY) {
710*4882a593Smuzhiyun 		/* Similar to task_numa_work, skip inaccessible VMAs */
711*4882a593Smuzhiyun 		if (!is_vm_hugetlb_page(vma) && vma_is_accessible(vma) &&
712*4882a593Smuzhiyun 			!(vma->vm_flags & VM_MIXEDMAP))
713*4882a593Smuzhiyun 			change_prot_numa(vma, start, endvma);
714*4882a593Smuzhiyun 		return 1;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* queue pages from current vma */
718*4882a593Smuzhiyun 	if (flags & MPOL_MF_VALID)
719*4882a593Smuzhiyun 		return 0;
720*4882a593Smuzhiyun 	return 1;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun static const struct mm_walk_ops queue_pages_walk_ops = {
724*4882a593Smuzhiyun 	.hugetlb_entry		= queue_pages_hugetlb,
725*4882a593Smuzhiyun 	.pmd_entry		= queue_pages_pte_range,
726*4882a593Smuzhiyun 	.test_walk		= queue_pages_test_walk,
727*4882a593Smuzhiyun };
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun  * Walk through page tables and collect pages to be migrated.
731*4882a593Smuzhiyun  *
732*4882a593Smuzhiyun  * If pages found in a given range are on a set of nodes (determined by
733*4882a593Smuzhiyun  * @nodes and @flags,) it's isolated and queued to the pagelist which is
734*4882a593Smuzhiyun  * passed via @private.
735*4882a593Smuzhiyun  *
736*4882a593Smuzhiyun  * queue_pages_range() has three possible return values:
737*4882a593Smuzhiyun  * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
738*4882a593Smuzhiyun  *     specified.
739*4882a593Smuzhiyun  * 0 - queue pages successfully or no misplaced page.
740*4882a593Smuzhiyun  * errno - i.e. misplaced pages with MPOL_MF_STRICT specified (-EIO) or
741*4882a593Smuzhiyun  *         memory range specified by nodemask and maxnode points outside
742*4882a593Smuzhiyun  *         your accessible address space (-EFAULT)
743*4882a593Smuzhiyun  */
744*4882a593Smuzhiyun static int
queue_pages_range(struct mm_struct * mm,unsigned long start,unsigned long end,nodemask_t * nodes,unsigned long flags,struct list_head * pagelist)745*4882a593Smuzhiyun queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
746*4882a593Smuzhiyun 		nodemask_t *nodes, unsigned long flags,
747*4882a593Smuzhiyun 		struct list_head *pagelist)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	int err;
750*4882a593Smuzhiyun 	struct queue_pages qp = {
751*4882a593Smuzhiyun 		.pagelist = pagelist,
752*4882a593Smuzhiyun 		.flags = flags,
753*4882a593Smuzhiyun 		.nmask = nodes,
754*4882a593Smuzhiyun 		.start = start,
755*4882a593Smuzhiyun 		.end = end,
756*4882a593Smuzhiyun 		.first = NULL,
757*4882a593Smuzhiyun 	};
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	if (!qp.first)
762*4882a593Smuzhiyun 		/* whole range in hole */
763*4882a593Smuzhiyun 		err = -EFAULT;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	return err;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun /*
769*4882a593Smuzhiyun  * Apply policy to a single VMA
770*4882a593Smuzhiyun  * This must be called with the mmap_lock held for writing.
771*4882a593Smuzhiyun  */
vma_replace_policy(struct vm_area_struct * vma,struct mempolicy * pol)772*4882a593Smuzhiyun static int vma_replace_policy(struct vm_area_struct *vma,
773*4882a593Smuzhiyun 						struct mempolicy *pol)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	int err;
776*4882a593Smuzhiyun 	struct mempolicy *old;
777*4882a593Smuzhiyun 	struct mempolicy *new;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
780*4882a593Smuzhiyun 		 vma->vm_start, vma->vm_end, vma->vm_pgoff,
781*4882a593Smuzhiyun 		 vma->vm_ops, vma->vm_file,
782*4882a593Smuzhiyun 		 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	new = mpol_dup(pol);
785*4882a593Smuzhiyun 	if (IS_ERR(new))
786*4882a593Smuzhiyun 		return PTR_ERR(new);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	vm_write_begin(vma);
789*4882a593Smuzhiyun 	if (vma->vm_ops && vma->vm_ops->set_policy) {
790*4882a593Smuzhiyun 		err = vma->vm_ops->set_policy(vma, new);
791*4882a593Smuzhiyun 		if (err)
792*4882a593Smuzhiyun 			goto err_out;
793*4882a593Smuzhiyun 	}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	old = vma->vm_policy;
796*4882a593Smuzhiyun 	/*
797*4882a593Smuzhiyun 	 * The speculative page fault handler accesses this field without
798*4882a593Smuzhiyun 	 * hodling the mmap_sem.
799*4882a593Smuzhiyun 	 */
800*4882a593Smuzhiyun 	WRITE_ONCE(vma->vm_policy,  new);
801*4882a593Smuzhiyun 	vm_write_end(vma);
802*4882a593Smuzhiyun 	mpol_put(old);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	return 0;
805*4882a593Smuzhiyun  err_out:
806*4882a593Smuzhiyun 	vm_write_end(vma);
807*4882a593Smuzhiyun 	mpol_put(new);
808*4882a593Smuzhiyun 	return err;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun /* Step 2: apply policy to a range and do splits. */
mbind_range(struct mm_struct * mm,unsigned long start,unsigned long end,struct mempolicy * new_pol)812*4882a593Smuzhiyun static int mbind_range(struct mm_struct *mm, unsigned long start,
813*4882a593Smuzhiyun 		       unsigned long end, struct mempolicy *new_pol)
814*4882a593Smuzhiyun {
815*4882a593Smuzhiyun 	struct vm_area_struct *prev;
816*4882a593Smuzhiyun 	struct vm_area_struct *vma;
817*4882a593Smuzhiyun 	int err = 0;
818*4882a593Smuzhiyun 	pgoff_t pgoff;
819*4882a593Smuzhiyun 	unsigned long vmstart;
820*4882a593Smuzhiyun 	unsigned long vmend;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	vma = find_vma(mm, start);
823*4882a593Smuzhiyun 	VM_BUG_ON(!vma);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	prev = vma->vm_prev;
826*4882a593Smuzhiyun 	if (start > vma->vm_start)
827*4882a593Smuzhiyun 		prev = vma;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	for (; vma && vma->vm_start < end; prev = vma, vma = vma->vm_next) {
830*4882a593Smuzhiyun 		vmstart = max(start, vma->vm_start);
831*4882a593Smuzhiyun 		vmend   = min(end, vma->vm_end);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 		if (mpol_equal(vma_policy(vma), new_pol))
834*4882a593Smuzhiyun 			continue;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 		pgoff = vma->vm_pgoff +
837*4882a593Smuzhiyun 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
838*4882a593Smuzhiyun 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
839*4882a593Smuzhiyun 				 vma->anon_vma, vma->vm_file, pgoff,
840*4882a593Smuzhiyun 				 new_pol, vma->vm_userfaultfd_ctx,
841*4882a593Smuzhiyun 				 vma_get_anon_name(vma));
842*4882a593Smuzhiyun 		if (prev) {
843*4882a593Smuzhiyun 			vma = prev;
844*4882a593Smuzhiyun 			goto replace;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 		if (vma->vm_start != vmstart) {
847*4882a593Smuzhiyun 			err = split_vma(vma->vm_mm, vma, vmstart, 1);
848*4882a593Smuzhiyun 			if (err)
849*4882a593Smuzhiyun 				goto out;
850*4882a593Smuzhiyun 		}
851*4882a593Smuzhiyun 		if (vma->vm_end != vmend) {
852*4882a593Smuzhiyun 			err = split_vma(vma->vm_mm, vma, vmend, 0);
853*4882a593Smuzhiyun 			if (err)
854*4882a593Smuzhiyun 				goto out;
855*4882a593Smuzhiyun 		}
856*4882a593Smuzhiyun  replace:
857*4882a593Smuzhiyun 		err = vma_replace_policy(vma, new_pol);
858*4882a593Smuzhiyun 		if (err)
859*4882a593Smuzhiyun 			goto out;
860*4882a593Smuzhiyun 	}
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun  out:
863*4882a593Smuzhiyun 	return err;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun /* Set the process memory policy */
do_set_mempolicy(unsigned short mode,unsigned short flags,nodemask_t * nodes)867*4882a593Smuzhiyun static long do_set_mempolicy(unsigned short mode, unsigned short flags,
868*4882a593Smuzhiyun 			     nodemask_t *nodes)
869*4882a593Smuzhiyun {
870*4882a593Smuzhiyun 	struct mempolicy *new, *old;
871*4882a593Smuzhiyun 	NODEMASK_SCRATCH(scratch);
872*4882a593Smuzhiyun 	int ret;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if (!scratch)
875*4882a593Smuzhiyun 		return -ENOMEM;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	new = mpol_new(mode, flags, nodes);
878*4882a593Smuzhiyun 	if (IS_ERR(new)) {
879*4882a593Smuzhiyun 		ret = PTR_ERR(new);
880*4882a593Smuzhiyun 		goto out;
881*4882a593Smuzhiyun 	}
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	ret = mpol_set_nodemask(new, nodes, scratch);
884*4882a593Smuzhiyun 	if (ret) {
885*4882a593Smuzhiyun 		mpol_put(new);
886*4882a593Smuzhiyun 		goto out;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 	task_lock(current);
889*4882a593Smuzhiyun 	old = current->mempolicy;
890*4882a593Smuzhiyun 	current->mempolicy = new;
891*4882a593Smuzhiyun 	if (new && new->mode == MPOL_INTERLEAVE)
892*4882a593Smuzhiyun 		current->il_prev = MAX_NUMNODES-1;
893*4882a593Smuzhiyun 	task_unlock(current);
894*4882a593Smuzhiyun 	mpol_put(old);
895*4882a593Smuzhiyun 	ret = 0;
896*4882a593Smuzhiyun out:
897*4882a593Smuzhiyun 	NODEMASK_SCRATCH_FREE(scratch);
898*4882a593Smuzhiyun 	return ret;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun  * Return nodemask for policy for get_mempolicy() query
903*4882a593Smuzhiyun  *
904*4882a593Smuzhiyun  * Called with task's alloc_lock held
905*4882a593Smuzhiyun  */
get_policy_nodemask(struct mempolicy * p,nodemask_t * nodes)906*4882a593Smuzhiyun static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	nodes_clear(*nodes);
909*4882a593Smuzhiyun 	if (p == &default_policy)
910*4882a593Smuzhiyun 		return;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	switch (p->mode) {
913*4882a593Smuzhiyun 	case MPOL_BIND:
914*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
915*4882a593Smuzhiyun 		*nodes = p->v.nodes;
916*4882a593Smuzhiyun 		break;
917*4882a593Smuzhiyun 	case MPOL_PREFERRED:
918*4882a593Smuzhiyun 		if (!(p->flags & MPOL_F_LOCAL))
919*4882a593Smuzhiyun 			node_set(p->v.preferred_node, *nodes);
920*4882a593Smuzhiyun 		/* else return empty node mask for local allocation */
921*4882a593Smuzhiyun 		break;
922*4882a593Smuzhiyun 	default:
923*4882a593Smuzhiyun 		BUG();
924*4882a593Smuzhiyun 	}
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun 
lookup_node(struct mm_struct * mm,unsigned long addr)927*4882a593Smuzhiyun static int lookup_node(struct mm_struct *mm, unsigned long addr)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	struct page *p = NULL;
930*4882a593Smuzhiyun 	int err;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	int locked = 1;
933*4882a593Smuzhiyun 	err = get_user_pages_locked(addr & PAGE_MASK, 1, 0, &p, &locked);
934*4882a593Smuzhiyun 	if (err > 0) {
935*4882a593Smuzhiyun 		err = page_to_nid(p);
936*4882a593Smuzhiyun 		put_page(p);
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 	if (locked)
939*4882a593Smuzhiyun 		mmap_read_unlock(mm);
940*4882a593Smuzhiyun 	return err;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun /* Retrieve NUMA policy */
do_get_mempolicy(int * policy,nodemask_t * nmask,unsigned long addr,unsigned long flags)944*4882a593Smuzhiyun static long do_get_mempolicy(int *policy, nodemask_t *nmask,
945*4882a593Smuzhiyun 			     unsigned long addr, unsigned long flags)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	int err;
948*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
949*4882a593Smuzhiyun 	struct vm_area_struct *vma = NULL;
950*4882a593Smuzhiyun 	struct mempolicy *pol = current->mempolicy, *pol_refcount = NULL;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	if (flags &
953*4882a593Smuzhiyun 		~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
954*4882a593Smuzhiyun 		return -EINVAL;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	if (flags & MPOL_F_MEMS_ALLOWED) {
957*4882a593Smuzhiyun 		if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
958*4882a593Smuzhiyun 			return -EINVAL;
959*4882a593Smuzhiyun 		*policy = 0;	/* just so it's initialized */
960*4882a593Smuzhiyun 		task_lock(current);
961*4882a593Smuzhiyun 		*nmask  = cpuset_current_mems_allowed;
962*4882a593Smuzhiyun 		task_unlock(current);
963*4882a593Smuzhiyun 		return 0;
964*4882a593Smuzhiyun 	}
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	if (flags & MPOL_F_ADDR) {
967*4882a593Smuzhiyun 		/*
968*4882a593Smuzhiyun 		 * Do NOT fall back to task policy if the
969*4882a593Smuzhiyun 		 * vma/shared policy at addr is NULL.  We
970*4882a593Smuzhiyun 		 * want to return MPOL_DEFAULT in this case.
971*4882a593Smuzhiyun 		 */
972*4882a593Smuzhiyun 		mmap_read_lock(mm);
973*4882a593Smuzhiyun 		vma = find_vma_intersection(mm, addr, addr+1);
974*4882a593Smuzhiyun 		if (!vma) {
975*4882a593Smuzhiyun 			mmap_read_unlock(mm);
976*4882a593Smuzhiyun 			return -EFAULT;
977*4882a593Smuzhiyun 		}
978*4882a593Smuzhiyun 		if (vma->vm_ops && vma->vm_ops->get_policy)
979*4882a593Smuzhiyun 			pol = vma->vm_ops->get_policy(vma, addr);
980*4882a593Smuzhiyun 		else
981*4882a593Smuzhiyun 			pol = vma->vm_policy;
982*4882a593Smuzhiyun 	} else if (addr)
983*4882a593Smuzhiyun 		return -EINVAL;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	if (!pol)
986*4882a593Smuzhiyun 		pol = &default_policy;	/* indicates default behavior */
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	if (flags & MPOL_F_NODE) {
989*4882a593Smuzhiyun 		if (flags & MPOL_F_ADDR) {
990*4882a593Smuzhiyun 			/*
991*4882a593Smuzhiyun 			 * Take a refcount on the mpol, lookup_node()
992*4882a593Smuzhiyun 			 * wil drop the mmap_lock, so after calling
993*4882a593Smuzhiyun 			 * lookup_node() only "pol" remains valid, "vma"
994*4882a593Smuzhiyun 			 * is stale.
995*4882a593Smuzhiyun 			 */
996*4882a593Smuzhiyun 			pol_refcount = pol;
997*4882a593Smuzhiyun 			vma = NULL;
998*4882a593Smuzhiyun 			mpol_get(pol);
999*4882a593Smuzhiyun 			err = lookup_node(mm, addr);
1000*4882a593Smuzhiyun 			if (err < 0)
1001*4882a593Smuzhiyun 				goto out;
1002*4882a593Smuzhiyun 			*policy = err;
1003*4882a593Smuzhiyun 		} else if (pol == current->mempolicy &&
1004*4882a593Smuzhiyun 				pol->mode == MPOL_INTERLEAVE) {
1005*4882a593Smuzhiyun 			*policy = next_node_in(current->il_prev, pol->v.nodes);
1006*4882a593Smuzhiyun 		} else {
1007*4882a593Smuzhiyun 			err = -EINVAL;
1008*4882a593Smuzhiyun 			goto out;
1009*4882a593Smuzhiyun 		}
1010*4882a593Smuzhiyun 	} else {
1011*4882a593Smuzhiyun 		*policy = pol == &default_policy ? MPOL_DEFAULT :
1012*4882a593Smuzhiyun 						pol->mode;
1013*4882a593Smuzhiyun 		/*
1014*4882a593Smuzhiyun 		 * Internal mempolicy flags must be masked off before exposing
1015*4882a593Smuzhiyun 		 * the policy to userspace.
1016*4882a593Smuzhiyun 		 */
1017*4882a593Smuzhiyun 		*policy |= (pol->flags & MPOL_MODE_FLAGS);
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	err = 0;
1021*4882a593Smuzhiyun 	if (nmask) {
1022*4882a593Smuzhiyun 		if (mpol_store_user_nodemask(pol)) {
1023*4882a593Smuzhiyun 			*nmask = pol->w.user_nodemask;
1024*4882a593Smuzhiyun 		} else {
1025*4882a593Smuzhiyun 			task_lock(current);
1026*4882a593Smuzhiyun 			get_policy_nodemask(pol, nmask);
1027*4882a593Smuzhiyun 			task_unlock(current);
1028*4882a593Smuzhiyun 		}
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun  out:
1032*4882a593Smuzhiyun 	mpol_cond_put(pol);
1033*4882a593Smuzhiyun 	if (vma)
1034*4882a593Smuzhiyun 		mmap_read_unlock(mm);
1035*4882a593Smuzhiyun 	if (pol_refcount)
1036*4882a593Smuzhiyun 		mpol_put(pol_refcount);
1037*4882a593Smuzhiyun 	return err;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun #ifdef CONFIG_MIGRATION
1041*4882a593Smuzhiyun /*
1042*4882a593Smuzhiyun  * page migration, thp tail pages can be passed.
1043*4882a593Smuzhiyun  */
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1044*4882a593Smuzhiyun static int migrate_page_add(struct page *page, struct list_head *pagelist,
1045*4882a593Smuzhiyun 				unsigned long flags)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct page *head = compound_head(page);
1048*4882a593Smuzhiyun 	/*
1049*4882a593Smuzhiyun 	 * Avoid migrating a page that is shared with others.
1050*4882a593Smuzhiyun 	 */
1051*4882a593Smuzhiyun 	if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
1052*4882a593Smuzhiyun 		if (!isolate_lru_page(head)) {
1053*4882a593Smuzhiyun 			list_add_tail(&head->lru, pagelist);
1054*4882a593Smuzhiyun 			mod_node_page_state(page_pgdat(head),
1055*4882a593Smuzhiyun 				NR_ISOLATED_ANON + page_is_file_lru(head),
1056*4882a593Smuzhiyun 				thp_nr_pages(head));
1057*4882a593Smuzhiyun 		} else if (flags & MPOL_MF_STRICT) {
1058*4882a593Smuzhiyun 			/*
1059*4882a593Smuzhiyun 			 * Non-movable page may reach here.  And, there may be
1060*4882a593Smuzhiyun 			 * temporary off LRU pages or non-LRU movable pages.
1061*4882a593Smuzhiyun 			 * Treat them as unmovable pages since they can't be
1062*4882a593Smuzhiyun 			 * isolated, so they can't be moved at the moment.  It
1063*4882a593Smuzhiyun 			 * should return -EIO for this case too.
1064*4882a593Smuzhiyun 			 */
1065*4882a593Smuzhiyun 			return -EIO;
1066*4882a593Smuzhiyun 		}
1067*4882a593Smuzhiyun 	}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	return 0;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun /*
1073*4882a593Smuzhiyun  * Migrate pages from one node to a target node.
1074*4882a593Smuzhiyun  * Returns error or the number of pages not migrated.
1075*4882a593Smuzhiyun  */
migrate_to_node(struct mm_struct * mm,int source,int dest,int flags)1076*4882a593Smuzhiyun static int migrate_to_node(struct mm_struct *mm, int source, int dest,
1077*4882a593Smuzhiyun 			   int flags)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun 	nodemask_t nmask;
1080*4882a593Smuzhiyun 	LIST_HEAD(pagelist);
1081*4882a593Smuzhiyun 	int err = 0;
1082*4882a593Smuzhiyun 	struct migration_target_control mtc = {
1083*4882a593Smuzhiyun 		.nid = dest,
1084*4882a593Smuzhiyun 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1085*4882a593Smuzhiyun 	};
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	nodes_clear(nmask);
1088*4882a593Smuzhiyun 	node_set(source, nmask);
1089*4882a593Smuzhiyun 
1090*4882a593Smuzhiyun 	/*
1091*4882a593Smuzhiyun 	 * This does not "check" the range but isolates all pages that
1092*4882a593Smuzhiyun 	 * need migration.  Between passing in the full user address
1093*4882a593Smuzhiyun 	 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
1094*4882a593Smuzhiyun 	 */
1095*4882a593Smuzhiyun 	VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
1096*4882a593Smuzhiyun 	queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
1097*4882a593Smuzhiyun 			flags | MPOL_MF_DISCONTIG_OK, &pagelist);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	if (!list_empty(&pagelist)) {
1100*4882a593Smuzhiyun 		err = migrate_pages(&pagelist, alloc_migration_target, NULL,
1101*4882a593Smuzhiyun 				(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL);
1102*4882a593Smuzhiyun 		if (err)
1103*4882a593Smuzhiyun 			putback_movable_pages(&pagelist);
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	return err;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun /*
1110*4882a593Smuzhiyun  * Move pages between the two nodesets so as to preserve the physical
1111*4882a593Smuzhiyun  * layout as much as possible.
1112*4882a593Smuzhiyun  *
1113*4882a593Smuzhiyun  * Returns the number of page that could not be moved.
1114*4882a593Smuzhiyun  */
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1115*4882a593Smuzhiyun int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1116*4882a593Smuzhiyun 		     const nodemask_t *to, int flags)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	int busy = 0;
1119*4882a593Smuzhiyun 	int err = 0;
1120*4882a593Smuzhiyun 	nodemask_t tmp;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	lru_cache_disable();
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	mmap_read_lock(mm);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	/*
1127*4882a593Smuzhiyun 	 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1128*4882a593Smuzhiyun 	 * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
1129*4882a593Smuzhiyun 	 * bit in 'tmp', and return that <source, dest> pair for migration.
1130*4882a593Smuzhiyun 	 * The pair of nodemasks 'to' and 'from' define the map.
1131*4882a593Smuzhiyun 	 *
1132*4882a593Smuzhiyun 	 * If no pair of bits is found that way, fallback to picking some
1133*4882a593Smuzhiyun 	 * pair of 'source' and 'dest' bits that are not the same.  If the
1134*4882a593Smuzhiyun 	 * 'source' and 'dest' bits are the same, this represents a node
1135*4882a593Smuzhiyun 	 * that will be migrating to itself, so no pages need move.
1136*4882a593Smuzhiyun 	 *
1137*4882a593Smuzhiyun 	 * If no bits are left in 'tmp', or if all remaining bits left
1138*4882a593Smuzhiyun 	 * in 'tmp' correspond to the same bit in 'to', return false
1139*4882a593Smuzhiyun 	 * (nothing left to migrate).
1140*4882a593Smuzhiyun 	 *
1141*4882a593Smuzhiyun 	 * This lets us pick a pair of nodes to migrate between, such that
1142*4882a593Smuzhiyun 	 * if possible the dest node is not already occupied by some other
1143*4882a593Smuzhiyun 	 * source node, minimizing the risk of overloading the memory on a
1144*4882a593Smuzhiyun 	 * node that would happen if we migrated incoming memory to a node
1145*4882a593Smuzhiyun 	 * before migrating outgoing memory source that same node.
1146*4882a593Smuzhiyun 	 *
1147*4882a593Smuzhiyun 	 * A single scan of tmp is sufficient.  As we go, we remember the
1148*4882a593Smuzhiyun 	 * most recent <s, d> pair that moved (s != d).  If we find a pair
1149*4882a593Smuzhiyun 	 * that not only moved, but what's better, moved to an empty slot
1150*4882a593Smuzhiyun 	 * (d is not set in tmp), then we break out then, with that pair.
1151*4882a593Smuzhiyun 	 * Otherwise when we finish scanning from_tmp, we at least have the
1152*4882a593Smuzhiyun 	 * most recent <s, d> pair that moved.  If we get all the way through
1153*4882a593Smuzhiyun 	 * the scan of tmp without finding any node that moved, much less
1154*4882a593Smuzhiyun 	 * moved to an empty node, then there is nothing left worth migrating.
1155*4882a593Smuzhiyun 	 */
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	tmp = *from;
1158*4882a593Smuzhiyun 	while (!nodes_empty(tmp)) {
1159*4882a593Smuzhiyun 		int s,d;
1160*4882a593Smuzhiyun 		int source = NUMA_NO_NODE;
1161*4882a593Smuzhiyun 		int dest = 0;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		for_each_node_mask(s, tmp) {
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 			/*
1166*4882a593Smuzhiyun 			 * do_migrate_pages() tries to maintain the relative
1167*4882a593Smuzhiyun 			 * node relationship of the pages established between
1168*4882a593Smuzhiyun 			 * threads and memory areas.
1169*4882a593Smuzhiyun                          *
1170*4882a593Smuzhiyun 			 * However if the number of source nodes is not equal to
1171*4882a593Smuzhiyun 			 * the number of destination nodes we can not preserve
1172*4882a593Smuzhiyun 			 * this node relative relationship.  In that case, skip
1173*4882a593Smuzhiyun 			 * copying memory from a node that is in the destination
1174*4882a593Smuzhiyun 			 * mask.
1175*4882a593Smuzhiyun 			 *
1176*4882a593Smuzhiyun 			 * Example: [2,3,4] -> [3,4,5] moves everything.
1177*4882a593Smuzhiyun 			 *          [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1178*4882a593Smuzhiyun 			 */
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 			if ((nodes_weight(*from) != nodes_weight(*to)) &&
1181*4882a593Smuzhiyun 						(node_isset(s, *to)))
1182*4882a593Smuzhiyun 				continue;
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 			d = node_remap(s, *from, *to);
1185*4882a593Smuzhiyun 			if (s == d)
1186*4882a593Smuzhiyun 				continue;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 			source = s;	/* Node moved. Memorize */
1189*4882a593Smuzhiyun 			dest = d;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 			/* dest not in remaining from nodes? */
1192*4882a593Smuzhiyun 			if (!node_isset(dest, tmp))
1193*4882a593Smuzhiyun 				break;
1194*4882a593Smuzhiyun 		}
1195*4882a593Smuzhiyun 		if (source == NUMA_NO_NODE)
1196*4882a593Smuzhiyun 			break;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 		node_clear(source, tmp);
1199*4882a593Smuzhiyun 		err = migrate_to_node(mm, source, dest, flags);
1200*4882a593Smuzhiyun 		if (err > 0)
1201*4882a593Smuzhiyun 			busy += err;
1202*4882a593Smuzhiyun 		if (err < 0)
1203*4882a593Smuzhiyun 			break;
1204*4882a593Smuzhiyun 	}
1205*4882a593Smuzhiyun 	mmap_read_unlock(mm);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	lru_cache_enable();
1208*4882a593Smuzhiyun 	if (err < 0)
1209*4882a593Smuzhiyun 		return err;
1210*4882a593Smuzhiyun 	return busy;
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun /*
1215*4882a593Smuzhiyun  * Allocate a new page for page migration based on vma policy.
1216*4882a593Smuzhiyun  * Start by assuming the page is mapped by the same vma as contains @start.
1217*4882a593Smuzhiyun  * Search forward from there, if not.  N.B., this assumes that the
1218*4882a593Smuzhiyun  * list of pages handed to migrate_pages()--which is how we get here--
1219*4882a593Smuzhiyun  * is in virtual address order.
1220*4882a593Smuzhiyun  */
new_page(struct page * page,unsigned long start)1221*4882a593Smuzhiyun static struct page *new_page(struct page *page, unsigned long start)
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun 	struct vm_area_struct *vma;
1224*4882a593Smuzhiyun 	unsigned long address;
1225*4882a593Smuzhiyun 
1226*4882a593Smuzhiyun 	vma = find_vma(current->mm, start);
1227*4882a593Smuzhiyun 	while (vma) {
1228*4882a593Smuzhiyun 		address = page_address_in_vma(page, vma);
1229*4882a593Smuzhiyun 		if (address != -EFAULT)
1230*4882a593Smuzhiyun 			break;
1231*4882a593Smuzhiyun 		vma = vma->vm_next;
1232*4882a593Smuzhiyun 	}
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 	if (PageHuge(page)) {
1235*4882a593Smuzhiyun 		return alloc_huge_page_vma(page_hstate(compound_head(page)),
1236*4882a593Smuzhiyun 				vma, address);
1237*4882a593Smuzhiyun 	} else if (PageTransHuge(page)) {
1238*4882a593Smuzhiyun 		struct page *thp;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 		thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
1241*4882a593Smuzhiyun 					 HPAGE_PMD_ORDER);
1242*4882a593Smuzhiyun 		if (!thp)
1243*4882a593Smuzhiyun 			return NULL;
1244*4882a593Smuzhiyun 		prep_transhuge_page(thp);
1245*4882a593Smuzhiyun 		return thp;
1246*4882a593Smuzhiyun 	}
1247*4882a593Smuzhiyun 	/*
1248*4882a593Smuzhiyun 	 * if !vma, alloc_page_vma() will use task or system default policy
1249*4882a593Smuzhiyun 	 */
1250*4882a593Smuzhiyun 	return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL,
1251*4882a593Smuzhiyun 			vma, address);
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun #else
1254*4882a593Smuzhiyun 
migrate_page_add(struct page * page,struct list_head * pagelist,unsigned long flags)1255*4882a593Smuzhiyun static int migrate_page_add(struct page *page, struct list_head *pagelist,
1256*4882a593Smuzhiyun 				unsigned long flags)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun 	return -EIO;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun 
do_migrate_pages(struct mm_struct * mm,const nodemask_t * from,const nodemask_t * to,int flags)1261*4882a593Smuzhiyun int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1262*4882a593Smuzhiyun 		     const nodemask_t *to, int flags)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun 	return -ENOSYS;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun 
new_page(struct page * page,unsigned long start)1267*4882a593Smuzhiyun static struct page *new_page(struct page *page, unsigned long start)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	return NULL;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun #endif
1272*4882a593Smuzhiyun 
do_mbind(unsigned long start,unsigned long len,unsigned short mode,unsigned short mode_flags,nodemask_t * nmask,unsigned long flags)1273*4882a593Smuzhiyun static long do_mbind(unsigned long start, unsigned long len,
1274*4882a593Smuzhiyun 		     unsigned short mode, unsigned short mode_flags,
1275*4882a593Smuzhiyun 		     nodemask_t *nmask, unsigned long flags)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun 	struct mm_struct *mm = current->mm;
1278*4882a593Smuzhiyun 	struct mempolicy *new;
1279*4882a593Smuzhiyun 	unsigned long end;
1280*4882a593Smuzhiyun 	int err;
1281*4882a593Smuzhiyun 	int ret;
1282*4882a593Smuzhiyun 	LIST_HEAD(pagelist);
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	if (flags & ~(unsigned long)MPOL_MF_VALID)
1285*4882a593Smuzhiyun 		return -EINVAL;
1286*4882a593Smuzhiyun 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1287*4882a593Smuzhiyun 		return -EPERM;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	if (start & ~PAGE_MASK)
1290*4882a593Smuzhiyun 		return -EINVAL;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 	if (mode == MPOL_DEFAULT)
1293*4882a593Smuzhiyun 		flags &= ~MPOL_MF_STRICT;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1296*4882a593Smuzhiyun 	end = start + len;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	if (end < start)
1299*4882a593Smuzhiyun 		return -EINVAL;
1300*4882a593Smuzhiyun 	if (end == start)
1301*4882a593Smuzhiyun 		return 0;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	new = mpol_new(mode, mode_flags, nmask);
1304*4882a593Smuzhiyun 	if (IS_ERR(new))
1305*4882a593Smuzhiyun 		return PTR_ERR(new);
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	if (flags & MPOL_MF_LAZY)
1308*4882a593Smuzhiyun 		new->flags |= MPOL_F_MOF;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	/*
1311*4882a593Smuzhiyun 	 * If we are using the default policy then operation
1312*4882a593Smuzhiyun 	 * on discontinuous address spaces is okay after all
1313*4882a593Smuzhiyun 	 */
1314*4882a593Smuzhiyun 	if (!new)
1315*4882a593Smuzhiyun 		flags |= MPOL_MF_DISCONTIG_OK;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1318*4882a593Smuzhiyun 		 start, start + len, mode, mode_flags,
1319*4882a593Smuzhiyun 		 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 		lru_cache_disable();
1324*4882a593Smuzhiyun 	}
1325*4882a593Smuzhiyun 	{
1326*4882a593Smuzhiyun 		NODEMASK_SCRATCH(scratch);
1327*4882a593Smuzhiyun 		if (scratch) {
1328*4882a593Smuzhiyun 			mmap_write_lock(mm);
1329*4882a593Smuzhiyun 			err = mpol_set_nodemask(new, nmask, scratch);
1330*4882a593Smuzhiyun 			if (err)
1331*4882a593Smuzhiyun 				mmap_write_unlock(mm);
1332*4882a593Smuzhiyun 		} else
1333*4882a593Smuzhiyun 			err = -ENOMEM;
1334*4882a593Smuzhiyun 		NODEMASK_SCRATCH_FREE(scratch);
1335*4882a593Smuzhiyun 	}
1336*4882a593Smuzhiyun 	if (err)
1337*4882a593Smuzhiyun 		goto mpol_out;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	ret = queue_pages_range(mm, start, end, nmask,
1340*4882a593Smuzhiyun 			  flags | MPOL_MF_INVERT, &pagelist);
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	if (ret < 0) {
1343*4882a593Smuzhiyun 		err = ret;
1344*4882a593Smuzhiyun 		goto up_out;
1345*4882a593Smuzhiyun 	}
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	err = mbind_range(mm, start, end, new);
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (!err) {
1350*4882a593Smuzhiyun 		int nr_failed = 0;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 		if (!list_empty(&pagelist)) {
1353*4882a593Smuzhiyun 			WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1354*4882a593Smuzhiyun 			nr_failed = migrate_pages(&pagelist, new_page, NULL,
1355*4882a593Smuzhiyun 				start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1356*4882a593Smuzhiyun 			if (nr_failed)
1357*4882a593Smuzhiyun 				putback_movable_pages(&pagelist);
1358*4882a593Smuzhiyun 		}
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 		if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
1361*4882a593Smuzhiyun 			err = -EIO;
1362*4882a593Smuzhiyun 	} else {
1363*4882a593Smuzhiyun up_out:
1364*4882a593Smuzhiyun 		if (!list_empty(&pagelist))
1365*4882a593Smuzhiyun 			putback_movable_pages(&pagelist);
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	mmap_write_unlock(mm);
1369*4882a593Smuzhiyun mpol_out:
1370*4882a593Smuzhiyun 	mpol_put(new);
1371*4882a593Smuzhiyun 	if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
1372*4882a593Smuzhiyun 		lru_cache_enable();
1373*4882a593Smuzhiyun 	return err;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun /*
1377*4882a593Smuzhiyun  * User space interface with variable sized bitmaps for nodelists.
1378*4882a593Smuzhiyun  */
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun /* Copy a node mask from user space. */
get_nodes(nodemask_t * nodes,const unsigned long __user * nmask,unsigned long maxnode)1381*4882a593Smuzhiyun static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1382*4882a593Smuzhiyun 		     unsigned long maxnode)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun 	unsigned long k;
1385*4882a593Smuzhiyun 	unsigned long t;
1386*4882a593Smuzhiyun 	unsigned long nlongs;
1387*4882a593Smuzhiyun 	unsigned long endmask;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	--maxnode;
1390*4882a593Smuzhiyun 	nodes_clear(*nodes);
1391*4882a593Smuzhiyun 	if (maxnode == 0 || !nmask)
1392*4882a593Smuzhiyun 		return 0;
1393*4882a593Smuzhiyun 	if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1394*4882a593Smuzhiyun 		return -EINVAL;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	nlongs = BITS_TO_LONGS(maxnode);
1397*4882a593Smuzhiyun 	if ((maxnode % BITS_PER_LONG) == 0)
1398*4882a593Smuzhiyun 		endmask = ~0UL;
1399*4882a593Smuzhiyun 	else
1400*4882a593Smuzhiyun 		endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	/*
1403*4882a593Smuzhiyun 	 * When the user specified more nodes than supported just check
1404*4882a593Smuzhiyun 	 * if the non supported part is all zero.
1405*4882a593Smuzhiyun 	 *
1406*4882a593Smuzhiyun 	 * If maxnode have more longs than MAX_NUMNODES, check
1407*4882a593Smuzhiyun 	 * the bits in that area first. And then go through to
1408*4882a593Smuzhiyun 	 * check the rest bits which equal or bigger than MAX_NUMNODES.
1409*4882a593Smuzhiyun 	 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1410*4882a593Smuzhiyun 	 */
1411*4882a593Smuzhiyun 	if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1412*4882a593Smuzhiyun 		for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1413*4882a593Smuzhiyun 			if (get_user(t, nmask + k))
1414*4882a593Smuzhiyun 				return -EFAULT;
1415*4882a593Smuzhiyun 			if (k == nlongs - 1) {
1416*4882a593Smuzhiyun 				if (t & endmask)
1417*4882a593Smuzhiyun 					return -EINVAL;
1418*4882a593Smuzhiyun 			} else if (t)
1419*4882a593Smuzhiyun 				return -EINVAL;
1420*4882a593Smuzhiyun 		}
1421*4882a593Smuzhiyun 		nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1422*4882a593Smuzhiyun 		endmask = ~0UL;
1423*4882a593Smuzhiyun 	}
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1426*4882a593Smuzhiyun 		unsigned long valid_mask = endmask;
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 		valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1429*4882a593Smuzhiyun 		if (get_user(t, nmask + nlongs - 1))
1430*4882a593Smuzhiyun 			return -EFAULT;
1431*4882a593Smuzhiyun 		if (t & valid_mask)
1432*4882a593Smuzhiyun 			return -EINVAL;
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1436*4882a593Smuzhiyun 		return -EFAULT;
1437*4882a593Smuzhiyun 	nodes_addr(*nodes)[nlongs-1] &= endmask;
1438*4882a593Smuzhiyun 	return 0;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun /* Copy a kernel node mask to user space */
copy_nodes_to_user(unsigned long __user * mask,unsigned long maxnode,nodemask_t * nodes)1442*4882a593Smuzhiyun static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1443*4882a593Smuzhiyun 			      nodemask_t *nodes)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun 	unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1446*4882a593Smuzhiyun 	unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	if (copy > nbytes) {
1449*4882a593Smuzhiyun 		if (copy > PAGE_SIZE)
1450*4882a593Smuzhiyun 			return -EINVAL;
1451*4882a593Smuzhiyun 		if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1452*4882a593Smuzhiyun 			return -EFAULT;
1453*4882a593Smuzhiyun 		copy = nbytes;
1454*4882a593Smuzhiyun 	}
1455*4882a593Smuzhiyun 	return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun 
kernel_mbind(unsigned long start,unsigned long len,unsigned long mode,const unsigned long __user * nmask,unsigned long maxnode,unsigned int flags)1458*4882a593Smuzhiyun static long kernel_mbind(unsigned long start, unsigned long len,
1459*4882a593Smuzhiyun 			 unsigned long mode, const unsigned long __user *nmask,
1460*4882a593Smuzhiyun 			 unsigned long maxnode, unsigned int flags)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun 	nodemask_t nodes;
1463*4882a593Smuzhiyun 	int err;
1464*4882a593Smuzhiyun 	unsigned short mode_flags;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	start = untagged_addr(start);
1467*4882a593Smuzhiyun 	mode_flags = mode & MPOL_MODE_FLAGS;
1468*4882a593Smuzhiyun 	mode &= ~MPOL_MODE_FLAGS;
1469*4882a593Smuzhiyun 	if (mode >= MPOL_MAX)
1470*4882a593Smuzhiyun 		return -EINVAL;
1471*4882a593Smuzhiyun 	if ((mode_flags & MPOL_F_STATIC_NODES) &&
1472*4882a593Smuzhiyun 	    (mode_flags & MPOL_F_RELATIVE_NODES))
1473*4882a593Smuzhiyun 		return -EINVAL;
1474*4882a593Smuzhiyun 	err = get_nodes(&nodes, nmask, maxnode);
1475*4882a593Smuzhiyun 	if (err)
1476*4882a593Smuzhiyun 		return err;
1477*4882a593Smuzhiyun 	return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun 
SYSCALL_DEFINE6(mbind,unsigned long,start,unsigned long,len,unsigned long,mode,const unsigned long __user *,nmask,unsigned long,maxnode,unsigned int,flags)1480*4882a593Smuzhiyun SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1481*4882a593Smuzhiyun 		unsigned long, mode, const unsigned long __user *, nmask,
1482*4882a593Smuzhiyun 		unsigned long, maxnode, unsigned int, flags)
1483*4882a593Smuzhiyun {
1484*4882a593Smuzhiyun 	return kernel_mbind(start, len, mode, nmask, maxnode, flags);
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun /* Set the process memory policy */
kernel_set_mempolicy(int mode,const unsigned long __user * nmask,unsigned long maxnode)1488*4882a593Smuzhiyun static long kernel_set_mempolicy(int mode, const unsigned long __user *nmask,
1489*4882a593Smuzhiyun 				 unsigned long maxnode)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun 	int err;
1492*4882a593Smuzhiyun 	nodemask_t nodes;
1493*4882a593Smuzhiyun 	unsigned short flags;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	flags = mode & MPOL_MODE_FLAGS;
1496*4882a593Smuzhiyun 	mode &= ~MPOL_MODE_FLAGS;
1497*4882a593Smuzhiyun 	if ((unsigned int)mode >= MPOL_MAX)
1498*4882a593Smuzhiyun 		return -EINVAL;
1499*4882a593Smuzhiyun 	if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1500*4882a593Smuzhiyun 		return -EINVAL;
1501*4882a593Smuzhiyun 	err = get_nodes(&nodes, nmask, maxnode);
1502*4882a593Smuzhiyun 	if (err)
1503*4882a593Smuzhiyun 		return err;
1504*4882a593Smuzhiyun 	return do_set_mempolicy(mode, flags, &nodes);
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun 
SYSCALL_DEFINE3(set_mempolicy,int,mode,const unsigned long __user *,nmask,unsigned long,maxnode)1507*4882a593Smuzhiyun SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1508*4882a593Smuzhiyun 		unsigned long, maxnode)
1509*4882a593Smuzhiyun {
1510*4882a593Smuzhiyun 	return kernel_set_mempolicy(mode, nmask, maxnode);
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun 
kernel_migrate_pages(pid_t pid,unsigned long maxnode,const unsigned long __user * old_nodes,const unsigned long __user * new_nodes)1513*4882a593Smuzhiyun static int kernel_migrate_pages(pid_t pid, unsigned long maxnode,
1514*4882a593Smuzhiyun 				const unsigned long __user *old_nodes,
1515*4882a593Smuzhiyun 				const unsigned long __user *new_nodes)
1516*4882a593Smuzhiyun {
1517*4882a593Smuzhiyun 	struct mm_struct *mm = NULL;
1518*4882a593Smuzhiyun 	struct task_struct *task;
1519*4882a593Smuzhiyun 	nodemask_t task_nodes;
1520*4882a593Smuzhiyun 	int err;
1521*4882a593Smuzhiyun 	nodemask_t *old;
1522*4882a593Smuzhiyun 	nodemask_t *new;
1523*4882a593Smuzhiyun 	NODEMASK_SCRATCH(scratch);
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	if (!scratch)
1526*4882a593Smuzhiyun 		return -ENOMEM;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	old = &scratch->mask1;
1529*4882a593Smuzhiyun 	new = &scratch->mask2;
1530*4882a593Smuzhiyun 
1531*4882a593Smuzhiyun 	err = get_nodes(old, old_nodes, maxnode);
1532*4882a593Smuzhiyun 	if (err)
1533*4882a593Smuzhiyun 		goto out;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	err = get_nodes(new, new_nodes, maxnode);
1536*4882a593Smuzhiyun 	if (err)
1537*4882a593Smuzhiyun 		goto out;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	/* Find the mm_struct */
1540*4882a593Smuzhiyun 	rcu_read_lock();
1541*4882a593Smuzhiyun 	task = pid ? find_task_by_vpid(pid) : current;
1542*4882a593Smuzhiyun 	if (!task) {
1543*4882a593Smuzhiyun 		rcu_read_unlock();
1544*4882a593Smuzhiyun 		err = -ESRCH;
1545*4882a593Smuzhiyun 		goto out;
1546*4882a593Smuzhiyun 	}
1547*4882a593Smuzhiyun 	get_task_struct(task);
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	err = -EINVAL;
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	/*
1552*4882a593Smuzhiyun 	 * Check if this process has the right to modify the specified process.
1553*4882a593Smuzhiyun 	 * Use the regular "ptrace_may_access()" checks.
1554*4882a593Smuzhiyun 	 */
1555*4882a593Smuzhiyun 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
1556*4882a593Smuzhiyun 		rcu_read_unlock();
1557*4882a593Smuzhiyun 		err = -EPERM;
1558*4882a593Smuzhiyun 		goto out_put;
1559*4882a593Smuzhiyun 	}
1560*4882a593Smuzhiyun 	rcu_read_unlock();
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	task_nodes = cpuset_mems_allowed(task);
1563*4882a593Smuzhiyun 	/* Is the user allowed to access the target nodes? */
1564*4882a593Smuzhiyun 	if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1565*4882a593Smuzhiyun 		err = -EPERM;
1566*4882a593Smuzhiyun 		goto out_put;
1567*4882a593Smuzhiyun 	}
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	task_nodes = cpuset_mems_allowed(current);
1570*4882a593Smuzhiyun 	nodes_and(*new, *new, task_nodes);
1571*4882a593Smuzhiyun 	if (nodes_empty(*new))
1572*4882a593Smuzhiyun 		goto out_put;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	err = security_task_movememory(task);
1575*4882a593Smuzhiyun 	if (err)
1576*4882a593Smuzhiyun 		goto out_put;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	mm = get_task_mm(task);
1579*4882a593Smuzhiyun 	put_task_struct(task);
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	if (!mm) {
1582*4882a593Smuzhiyun 		err = -EINVAL;
1583*4882a593Smuzhiyun 		goto out;
1584*4882a593Smuzhiyun 	}
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	err = do_migrate_pages(mm, old, new,
1587*4882a593Smuzhiyun 		capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	mmput(mm);
1590*4882a593Smuzhiyun out:
1591*4882a593Smuzhiyun 	NODEMASK_SCRATCH_FREE(scratch);
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	return err;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun out_put:
1596*4882a593Smuzhiyun 	put_task_struct(task);
1597*4882a593Smuzhiyun 	goto out;
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun 
SYSCALL_DEFINE4(migrate_pages,pid_t,pid,unsigned long,maxnode,const unsigned long __user *,old_nodes,const unsigned long __user *,new_nodes)1601*4882a593Smuzhiyun SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1602*4882a593Smuzhiyun 		const unsigned long __user *, old_nodes,
1603*4882a593Smuzhiyun 		const unsigned long __user *, new_nodes)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	return kernel_migrate_pages(pid, maxnode, old_nodes, new_nodes);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun /* Retrieve NUMA policy */
kernel_get_mempolicy(int __user * policy,unsigned long __user * nmask,unsigned long maxnode,unsigned long addr,unsigned long flags)1610*4882a593Smuzhiyun static int kernel_get_mempolicy(int __user *policy,
1611*4882a593Smuzhiyun 				unsigned long __user *nmask,
1612*4882a593Smuzhiyun 				unsigned long maxnode,
1613*4882a593Smuzhiyun 				unsigned long addr,
1614*4882a593Smuzhiyun 				unsigned long flags)
1615*4882a593Smuzhiyun {
1616*4882a593Smuzhiyun 	int err;
1617*4882a593Smuzhiyun 	int pval;
1618*4882a593Smuzhiyun 	nodemask_t nodes;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	if (nmask != NULL && maxnode < nr_node_ids)
1621*4882a593Smuzhiyun 		return -EINVAL;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	addr = untagged_addr(addr);
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	err = do_get_mempolicy(&pval, &nodes, addr, flags);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	if (err)
1628*4882a593Smuzhiyun 		return err;
1629*4882a593Smuzhiyun 
1630*4882a593Smuzhiyun 	if (policy && put_user(pval, policy))
1631*4882a593Smuzhiyun 		return -EFAULT;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	if (nmask)
1634*4882a593Smuzhiyun 		err = copy_nodes_to_user(nmask, maxnode, &nodes);
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	return err;
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun 
SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,unsigned long __user *,nmask,unsigned long,maxnode,unsigned long,addr,unsigned long,flags)1639*4882a593Smuzhiyun SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1640*4882a593Smuzhiyun 		unsigned long __user *, nmask, unsigned long, maxnode,
1641*4882a593Smuzhiyun 		unsigned long, addr, unsigned long, flags)
1642*4882a593Smuzhiyun {
1643*4882a593Smuzhiyun 	return kernel_get_mempolicy(policy, nmask, maxnode, addr, flags);
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
1647*4882a593Smuzhiyun 
COMPAT_SYSCALL_DEFINE5(get_mempolicy,int __user *,policy,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,addr,compat_ulong_t,flags)1648*4882a593Smuzhiyun COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1649*4882a593Smuzhiyun 		       compat_ulong_t __user *, nmask,
1650*4882a593Smuzhiyun 		       compat_ulong_t, maxnode,
1651*4882a593Smuzhiyun 		       compat_ulong_t, addr, compat_ulong_t, flags)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun 	long err;
1654*4882a593Smuzhiyun 	unsigned long __user *nm = NULL;
1655*4882a593Smuzhiyun 	unsigned long nr_bits, alloc_size;
1656*4882a593Smuzhiyun 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
1659*4882a593Smuzhiyun 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1660*4882a593Smuzhiyun 
1661*4882a593Smuzhiyun 	if (nmask)
1662*4882a593Smuzhiyun 		nm = compat_alloc_user_space(alloc_size);
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun 	err = kernel_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1665*4882a593Smuzhiyun 
1666*4882a593Smuzhiyun 	if (!err && nmask) {
1667*4882a593Smuzhiyun 		unsigned long copy_size;
1668*4882a593Smuzhiyun 		copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1669*4882a593Smuzhiyun 		err = copy_from_user(bm, nm, copy_size);
1670*4882a593Smuzhiyun 		/* ensure entire bitmap is zeroed */
1671*4882a593Smuzhiyun 		err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1672*4882a593Smuzhiyun 		err |= compat_put_bitmap(nmask, bm, nr_bits);
1673*4882a593Smuzhiyun 	}
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	return err;
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun 
COMPAT_SYSCALL_DEFINE3(set_mempolicy,int,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode)1678*4882a593Smuzhiyun COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1679*4882a593Smuzhiyun 		       compat_ulong_t, maxnode)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun 	unsigned long __user *nm = NULL;
1682*4882a593Smuzhiyun 	unsigned long nr_bits, alloc_size;
1683*4882a593Smuzhiyun 	DECLARE_BITMAP(bm, MAX_NUMNODES);
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1686*4882a593Smuzhiyun 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun 	if (nmask) {
1689*4882a593Smuzhiyun 		if (compat_get_bitmap(bm, nmask, nr_bits))
1690*4882a593Smuzhiyun 			return -EFAULT;
1691*4882a593Smuzhiyun 		nm = compat_alloc_user_space(alloc_size);
1692*4882a593Smuzhiyun 		if (copy_to_user(nm, bm, alloc_size))
1693*4882a593Smuzhiyun 			return -EFAULT;
1694*4882a593Smuzhiyun 	}
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	return kernel_set_mempolicy(mode, nm, nr_bits+1);
1697*4882a593Smuzhiyun }
1698*4882a593Smuzhiyun 
COMPAT_SYSCALL_DEFINE6(mbind,compat_ulong_t,start,compat_ulong_t,len,compat_ulong_t,mode,compat_ulong_t __user *,nmask,compat_ulong_t,maxnode,compat_ulong_t,flags)1699*4882a593Smuzhiyun COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1700*4882a593Smuzhiyun 		       compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1701*4882a593Smuzhiyun 		       compat_ulong_t, maxnode, compat_ulong_t, flags)
1702*4882a593Smuzhiyun {
1703*4882a593Smuzhiyun 	unsigned long __user *nm = NULL;
1704*4882a593Smuzhiyun 	unsigned long nr_bits, alloc_size;
1705*4882a593Smuzhiyun 	nodemask_t bm;
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1708*4882a593Smuzhiyun 	alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	if (nmask) {
1711*4882a593Smuzhiyun 		if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1712*4882a593Smuzhiyun 			return -EFAULT;
1713*4882a593Smuzhiyun 		nm = compat_alloc_user_space(alloc_size);
1714*4882a593Smuzhiyun 		if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1715*4882a593Smuzhiyun 			return -EFAULT;
1716*4882a593Smuzhiyun 	}
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	return kernel_mbind(start, len, mode, nm, nr_bits+1, flags);
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun 
COMPAT_SYSCALL_DEFINE4(migrate_pages,compat_pid_t,pid,compat_ulong_t,maxnode,const compat_ulong_t __user *,old_nodes,const compat_ulong_t __user *,new_nodes)1721*4882a593Smuzhiyun COMPAT_SYSCALL_DEFINE4(migrate_pages, compat_pid_t, pid,
1722*4882a593Smuzhiyun 		       compat_ulong_t, maxnode,
1723*4882a593Smuzhiyun 		       const compat_ulong_t __user *, old_nodes,
1724*4882a593Smuzhiyun 		       const compat_ulong_t __user *, new_nodes)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun 	unsigned long __user *old = NULL;
1727*4882a593Smuzhiyun 	unsigned long __user *new = NULL;
1728*4882a593Smuzhiyun 	nodemask_t tmp_mask;
1729*4882a593Smuzhiyun 	unsigned long nr_bits;
1730*4882a593Smuzhiyun 	unsigned long size;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
1733*4882a593Smuzhiyun 	size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1734*4882a593Smuzhiyun 	if (old_nodes) {
1735*4882a593Smuzhiyun 		if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1736*4882a593Smuzhiyun 			return -EFAULT;
1737*4882a593Smuzhiyun 		old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1738*4882a593Smuzhiyun 		if (new_nodes)
1739*4882a593Smuzhiyun 			new = old + size / sizeof(unsigned long);
1740*4882a593Smuzhiyun 		if (copy_to_user(old, nodes_addr(tmp_mask), size))
1741*4882a593Smuzhiyun 			return -EFAULT;
1742*4882a593Smuzhiyun 	}
1743*4882a593Smuzhiyun 	if (new_nodes) {
1744*4882a593Smuzhiyun 		if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1745*4882a593Smuzhiyun 			return -EFAULT;
1746*4882a593Smuzhiyun 		if (new == NULL)
1747*4882a593Smuzhiyun 			new = compat_alloc_user_space(size);
1748*4882a593Smuzhiyun 		if (copy_to_user(new, nodes_addr(tmp_mask), size))
1749*4882a593Smuzhiyun 			return -EFAULT;
1750*4882a593Smuzhiyun 	}
1751*4882a593Smuzhiyun 	return kernel_migrate_pages(pid, nr_bits + 1, old, new);
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
1755*4882a593Smuzhiyun 
vma_migratable(struct vm_area_struct * vma)1756*4882a593Smuzhiyun bool vma_migratable(struct vm_area_struct *vma)
1757*4882a593Smuzhiyun {
1758*4882a593Smuzhiyun 	if (vma->vm_flags & (VM_IO | VM_PFNMAP))
1759*4882a593Smuzhiyun 		return false;
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	/*
1762*4882a593Smuzhiyun 	 * DAX device mappings require predictable access latency, so avoid
1763*4882a593Smuzhiyun 	 * incurring periodic faults.
1764*4882a593Smuzhiyun 	 */
1765*4882a593Smuzhiyun 	if (vma_is_dax(vma))
1766*4882a593Smuzhiyun 		return false;
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun 	if (is_vm_hugetlb_page(vma) &&
1769*4882a593Smuzhiyun 		!hugepage_migration_supported(hstate_vma(vma)))
1770*4882a593Smuzhiyun 		return false;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	/*
1773*4882a593Smuzhiyun 	 * Migration allocates pages in the highest zone. If we cannot
1774*4882a593Smuzhiyun 	 * do so then migration (at least from node to node) is not
1775*4882a593Smuzhiyun 	 * possible.
1776*4882a593Smuzhiyun 	 */
1777*4882a593Smuzhiyun 	if (vma->vm_file &&
1778*4882a593Smuzhiyun 		gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
1779*4882a593Smuzhiyun 			< policy_zone)
1780*4882a593Smuzhiyun 		return false;
1781*4882a593Smuzhiyun 	return true;
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun 
__get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1784*4882a593Smuzhiyun struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1785*4882a593Smuzhiyun 						unsigned long addr)
1786*4882a593Smuzhiyun {
1787*4882a593Smuzhiyun 	struct mempolicy *pol;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	if (!vma)
1790*4882a593Smuzhiyun 		return NULL;
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 	if (vma->vm_ops && vma->vm_ops->get_policy)
1793*4882a593Smuzhiyun 		return vma->vm_ops->get_policy(vma, addr);
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	/*
1796*4882a593Smuzhiyun 	 * This could be called without holding the mmap_sem in the
1797*4882a593Smuzhiyun 	 * speculative page fault handler's path.
1798*4882a593Smuzhiyun 	 */
1799*4882a593Smuzhiyun 	pol = READ_ONCE(vma->vm_policy);
1800*4882a593Smuzhiyun 	if (pol) {
1801*4882a593Smuzhiyun 		/*
1802*4882a593Smuzhiyun 		 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1803*4882a593Smuzhiyun 		 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1804*4882a593Smuzhiyun 		 * count on these policies which will be dropped by
1805*4882a593Smuzhiyun 		 * mpol_cond_put() later
1806*4882a593Smuzhiyun 		 */
1807*4882a593Smuzhiyun 		if (mpol_needs_cond_ref(pol))
1808*4882a593Smuzhiyun 			mpol_get(pol);
1809*4882a593Smuzhiyun 	}
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	return pol;
1812*4882a593Smuzhiyun }
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun /*
1815*4882a593Smuzhiyun  * get_vma_policy(@vma, @addr)
1816*4882a593Smuzhiyun  * @vma: virtual memory area whose policy is sought
1817*4882a593Smuzhiyun  * @addr: address in @vma for shared policy lookup
1818*4882a593Smuzhiyun  *
1819*4882a593Smuzhiyun  * Returns effective policy for a VMA at specified address.
1820*4882a593Smuzhiyun  * Falls back to current->mempolicy or system default policy, as necessary.
1821*4882a593Smuzhiyun  * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1822*4882a593Smuzhiyun  * count--added by the get_policy() vm_op, as appropriate--to protect against
1823*4882a593Smuzhiyun  * freeing by another task.  It is the caller's responsibility to free the
1824*4882a593Smuzhiyun  * extra reference for shared policies.
1825*4882a593Smuzhiyun  */
get_vma_policy(struct vm_area_struct * vma,unsigned long addr)1826*4882a593Smuzhiyun static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1827*4882a593Smuzhiyun 						unsigned long addr)
1828*4882a593Smuzhiyun {
1829*4882a593Smuzhiyun 	struct mempolicy *pol = __get_vma_policy(vma, addr);
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	if (!pol)
1832*4882a593Smuzhiyun 		pol = get_task_policy(current);
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 	return pol;
1835*4882a593Smuzhiyun }
1836*4882a593Smuzhiyun 
vma_policy_mof(struct vm_area_struct * vma)1837*4882a593Smuzhiyun bool vma_policy_mof(struct vm_area_struct *vma)
1838*4882a593Smuzhiyun {
1839*4882a593Smuzhiyun 	struct mempolicy *pol;
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	if (vma->vm_ops && vma->vm_ops->get_policy) {
1842*4882a593Smuzhiyun 		bool ret = false;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 		pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1845*4882a593Smuzhiyun 		if (pol && (pol->flags & MPOL_F_MOF))
1846*4882a593Smuzhiyun 			ret = true;
1847*4882a593Smuzhiyun 		mpol_cond_put(pol);
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 		return ret;
1850*4882a593Smuzhiyun 	}
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	pol = vma->vm_policy;
1853*4882a593Smuzhiyun 	if (!pol)
1854*4882a593Smuzhiyun 		pol = get_task_policy(current);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	return pol->flags & MPOL_F_MOF;
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun 
apply_policy_zone(struct mempolicy * policy,enum zone_type zone)1859*4882a593Smuzhiyun static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1860*4882a593Smuzhiyun {
1861*4882a593Smuzhiyun 	enum zone_type dynamic_policy_zone = policy_zone;
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1864*4882a593Smuzhiyun 
1865*4882a593Smuzhiyun 	/*
1866*4882a593Smuzhiyun 	 * if policy->v.nodes has movable memory only,
1867*4882a593Smuzhiyun 	 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1868*4882a593Smuzhiyun 	 *
1869*4882a593Smuzhiyun 	 * policy->v.nodes is intersect with node_states[N_MEMORY].
1870*4882a593Smuzhiyun 	 * so if the following test faile, it implies
1871*4882a593Smuzhiyun 	 * policy->v.nodes has movable memory only.
1872*4882a593Smuzhiyun 	 */
1873*4882a593Smuzhiyun 	if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1874*4882a593Smuzhiyun 		dynamic_policy_zone = ZONE_MOVABLE;
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	return zone >= dynamic_policy_zone;
1877*4882a593Smuzhiyun }
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun /*
1880*4882a593Smuzhiyun  * Return a nodemask representing a mempolicy for filtering nodes for
1881*4882a593Smuzhiyun  * page allocation
1882*4882a593Smuzhiyun  */
policy_nodemask(gfp_t gfp,struct mempolicy * policy)1883*4882a593Smuzhiyun nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1884*4882a593Smuzhiyun {
1885*4882a593Smuzhiyun 	/* Lower zones don't get a nodemask applied for MPOL_BIND */
1886*4882a593Smuzhiyun 	if (unlikely(policy->mode == MPOL_BIND) &&
1887*4882a593Smuzhiyun 			apply_policy_zone(policy, gfp_zone(gfp)) &&
1888*4882a593Smuzhiyun 			cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1889*4882a593Smuzhiyun 		return &policy->v.nodes;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	return NULL;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun 
1894*4882a593Smuzhiyun /* Return the node id preferred by the given mempolicy, or the given id */
policy_node(gfp_t gfp,struct mempolicy * policy,int nd)1895*4882a593Smuzhiyun static int policy_node(gfp_t gfp, struct mempolicy *policy, int nd)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun 	if (policy->mode == MPOL_PREFERRED && !(policy->flags & MPOL_F_LOCAL))
1898*4882a593Smuzhiyun 		nd = policy->v.preferred_node;
1899*4882a593Smuzhiyun 	else {
1900*4882a593Smuzhiyun 		/*
1901*4882a593Smuzhiyun 		 * __GFP_THISNODE shouldn't even be used with the bind policy
1902*4882a593Smuzhiyun 		 * because we might easily break the expectation to stay on the
1903*4882a593Smuzhiyun 		 * requested node and not break the policy.
1904*4882a593Smuzhiyun 		 */
1905*4882a593Smuzhiyun 		WARN_ON_ONCE(policy->mode == MPOL_BIND && (gfp & __GFP_THISNODE));
1906*4882a593Smuzhiyun 	}
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	return nd;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun 
1911*4882a593Smuzhiyun /* Do dynamic interleaving for a process */
interleave_nodes(struct mempolicy * policy)1912*4882a593Smuzhiyun static unsigned interleave_nodes(struct mempolicy *policy)
1913*4882a593Smuzhiyun {
1914*4882a593Smuzhiyun 	unsigned next;
1915*4882a593Smuzhiyun 	struct task_struct *me = current;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	next = next_node_in(me->il_prev, policy->v.nodes);
1918*4882a593Smuzhiyun 	if (next < MAX_NUMNODES)
1919*4882a593Smuzhiyun 		me->il_prev = next;
1920*4882a593Smuzhiyun 	return next;
1921*4882a593Smuzhiyun }
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun /*
1924*4882a593Smuzhiyun  * Depending on the memory policy provide a node from which to allocate the
1925*4882a593Smuzhiyun  * next slab entry.
1926*4882a593Smuzhiyun  */
mempolicy_slab_node(void)1927*4882a593Smuzhiyun unsigned int mempolicy_slab_node(void)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun 	struct mempolicy *policy;
1930*4882a593Smuzhiyun 	int node = numa_mem_id();
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	if (in_interrupt())
1933*4882a593Smuzhiyun 		return node;
1934*4882a593Smuzhiyun 
1935*4882a593Smuzhiyun 	policy = current->mempolicy;
1936*4882a593Smuzhiyun 	if (!policy || policy->flags & MPOL_F_LOCAL)
1937*4882a593Smuzhiyun 		return node;
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	switch (policy->mode) {
1940*4882a593Smuzhiyun 	case MPOL_PREFERRED:
1941*4882a593Smuzhiyun 		/*
1942*4882a593Smuzhiyun 		 * handled MPOL_F_LOCAL above
1943*4882a593Smuzhiyun 		 */
1944*4882a593Smuzhiyun 		return policy->v.preferred_node;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
1947*4882a593Smuzhiyun 		return interleave_nodes(policy);
1948*4882a593Smuzhiyun 
1949*4882a593Smuzhiyun 	case MPOL_BIND: {
1950*4882a593Smuzhiyun 		struct zoneref *z;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 		/*
1953*4882a593Smuzhiyun 		 * Follow bind policy behavior and start allocation at the
1954*4882a593Smuzhiyun 		 * first node.
1955*4882a593Smuzhiyun 		 */
1956*4882a593Smuzhiyun 		struct zonelist *zonelist;
1957*4882a593Smuzhiyun 		enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1958*4882a593Smuzhiyun 		zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1959*4882a593Smuzhiyun 		z = first_zones_zonelist(zonelist, highest_zoneidx,
1960*4882a593Smuzhiyun 							&policy->v.nodes);
1961*4882a593Smuzhiyun 		return z->zone ? zone_to_nid(z->zone) : node;
1962*4882a593Smuzhiyun 	}
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	default:
1965*4882a593Smuzhiyun 		BUG();
1966*4882a593Smuzhiyun 	}
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun /*
1970*4882a593Smuzhiyun  * Do static interleaving for a VMA with known offset @n.  Returns the n'th
1971*4882a593Smuzhiyun  * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1972*4882a593Smuzhiyun  * number of present nodes.
1973*4882a593Smuzhiyun  */
offset_il_node(struct mempolicy * pol,unsigned long n)1974*4882a593Smuzhiyun static unsigned offset_il_node(struct mempolicy *pol, unsigned long n)
1975*4882a593Smuzhiyun {
1976*4882a593Smuzhiyun 	unsigned nnodes = nodes_weight(pol->v.nodes);
1977*4882a593Smuzhiyun 	unsigned target;
1978*4882a593Smuzhiyun 	int i;
1979*4882a593Smuzhiyun 	int nid;
1980*4882a593Smuzhiyun 
1981*4882a593Smuzhiyun 	if (!nnodes)
1982*4882a593Smuzhiyun 		return numa_node_id();
1983*4882a593Smuzhiyun 	target = (unsigned int)n % nnodes;
1984*4882a593Smuzhiyun 	nid = first_node(pol->v.nodes);
1985*4882a593Smuzhiyun 	for (i = 0; i < target; i++)
1986*4882a593Smuzhiyun 		nid = next_node(nid, pol->v.nodes);
1987*4882a593Smuzhiyun 	return nid;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun /* Determine a node number for interleave */
interleave_nid(struct mempolicy * pol,struct vm_area_struct * vma,unsigned long addr,int shift)1991*4882a593Smuzhiyun static inline unsigned interleave_nid(struct mempolicy *pol,
1992*4882a593Smuzhiyun 		 struct vm_area_struct *vma, unsigned long addr, int shift)
1993*4882a593Smuzhiyun {
1994*4882a593Smuzhiyun 	if (vma) {
1995*4882a593Smuzhiyun 		unsigned long off;
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun 		/*
1998*4882a593Smuzhiyun 		 * for small pages, there is no difference between
1999*4882a593Smuzhiyun 		 * shift and PAGE_SHIFT, so the bit-shift is safe.
2000*4882a593Smuzhiyun 		 * for huge pages, since vm_pgoff is in units of small
2001*4882a593Smuzhiyun 		 * pages, we need to shift off the always 0 bits to get
2002*4882a593Smuzhiyun 		 * a useful offset.
2003*4882a593Smuzhiyun 		 */
2004*4882a593Smuzhiyun 		BUG_ON(shift < PAGE_SHIFT);
2005*4882a593Smuzhiyun 		off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
2006*4882a593Smuzhiyun 		off += (addr - vma->vm_start) >> shift;
2007*4882a593Smuzhiyun 		return offset_il_node(pol, off);
2008*4882a593Smuzhiyun 	} else
2009*4882a593Smuzhiyun 		return interleave_nodes(pol);
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun #ifdef CONFIG_HUGETLBFS
2013*4882a593Smuzhiyun /*
2014*4882a593Smuzhiyun  * huge_node(@vma, @addr, @gfp_flags, @mpol)
2015*4882a593Smuzhiyun  * @vma: virtual memory area whose policy is sought
2016*4882a593Smuzhiyun  * @addr: address in @vma for shared policy lookup and interleave policy
2017*4882a593Smuzhiyun  * @gfp_flags: for requested zone
2018*4882a593Smuzhiyun  * @mpol: pointer to mempolicy pointer for reference counted mempolicy
2019*4882a593Smuzhiyun  * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
2020*4882a593Smuzhiyun  *
2021*4882a593Smuzhiyun  * Returns a nid suitable for a huge page allocation and a pointer
2022*4882a593Smuzhiyun  * to the struct mempolicy for conditional unref after allocation.
2023*4882a593Smuzhiyun  * If the effective policy is 'BIND, returns a pointer to the mempolicy's
2024*4882a593Smuzhiyun  * @nodemask for filtering the zonelist.
2025*4882a593Smuzhiyun  *
2026*4882a593Smuzhiyun  * Must be protected by read_mems_allowed_begin()
2027*4882a593Smuzhiyun  */
huge_node(struct vm_area_struct * vma,unsigned long addr,gfp_t gfp_flags,struct mempolicy ** mpol,nodemask_t ** nodemask)2028*4882a593Smuzhiyun int huge_node(struct vm_area_struct *vma, unsigned long addr, gfp_t gfp_flags,
2029*4882a593Smuzhiyun 				struct mempolicy **mpol, nodemask_t **nodemask)
2030*4882a593Smuzhiyun {
2031*4882a593Smuzhiyun 	int nid;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	*mpol = get_vma_policy(vma, addr);
2034*4882a593Smuzhiyun 	*nodemask = NULL;	/* assume !MPOL_BIND */
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 	if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
2037*4882a593Smuzhiyun 		nid = interleave_nid(*mpol, vma, addr,
2038*4882a593Smuzhiyun 					huge_page_shift(hstate_vma(vma)));
2039*4882a593Smuzhiyun 	} else {
2040*4882a593Smuzhiyun 		nid = policy_node(gfp_flags, *mpol, numa_node_id());
2041*4882a593Smuzhiyun 		if ((*mpol)->mode == MPOL_BIND)
2042*4882a593Smuzhiyun 			*nodemask = &(*mpol)->v.nodes;
2043*4882a593Smuzhiyun 	}
2044*4882a593Smuzhiyun 	return nid;
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun /*
2048*4882a593Smuzhiyun  * init_nodemask_of_mempolicy
2049*4882a593Smuzhiyun  *
2050*4882a593Smuzhiyun  * If the current task's mempolicy is "default" [NULL], return 'false'
2051*4882a593Smuzhiyun  * to indicate default policy.  Otherwise, extract the policy nodemask
2052*4882a593Smuzhiyun  * for 'bind' or 'interleave' policy into the argument nodemask, or
2053*4882a593Smuzhiyun  * initialize the argument nodemask to contain the single node for
2054*4882a593Smuzhiyun  * 'preferred' or 'local' policy and return 'true' to indicate presence
2055*4882a593Smuzhiyun  * of non-default mempolicy.
2056*4882a593Smuzhiyun  *
2057*4882a593Smuzhiyun  * We don't bother with reference counting the mempolicy [mpol_get/put]
2058*4882a593Smuzhiyun  * because the current task is examining it's own mempolicy and a task's
2059*4882a593Smuzhiyun  * mempolicy is only ever changed by the task itself.
2060*4882a593Smuzhiyun  *
2061*4882a593Smuzhiyun  * N.B., it is the caller's responsibility to free a returned nodemask.
2062*4882a593Smuzhiyun  */
init_nodemask_of_mempolicy(nodemask_t * mask)2063*4882a593Smuzhiyun bool init_nodemask_of_mempolicy(nodemask_t *mask)
2064*4882a593Smuzhiyun {
2065*4882a593Smuzhiyun 	struct mempolicy *mempolicy;
2066*4882a593Smuzhiyun 	int nid;
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	if (!(mask && current->mempolicy))
2069*4882a593Smuzhiyun 		return false;
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun 	task_lock(current);
2072*4882a593Smuzhiyun 	mempolicy = current->mempolicy;
2073*4882a593Smuzhiyun 	switch (mempolicy->mode) {
2074*4882a593Smuzhiyun 	case MPOL_PREFERRED:
2075*4882a593Smuzhiyun 		if (mempolicy->flags & MPOL_F_LOCAL)
2076*4882a593Smuzhiyun 			nid = numa_node_id();
2077*4882a593Smuzhiyun 		else
2078*4882a593Smuzhiyun 			nid = mempolicy->v.preferred_node;
2079*4882a593Smuzhiyun 		init_nodemask_of_node(mask, nid);
2080*4882a593Smuzhiyun 		break;
2081*4882a593Smuzhiyun 
2082*4882a593Smuzhiyun 	case MPOL_BIND:
2083*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
2084*4882a593Smuzhiyun 		*mask =  mempolicy->v.nodes;
2085*4882a593Smuzhiyun 		break;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	default:
2088*4882a593Smuzhiyun 		BUG();
2089*4882a593Smuzhiyun 	}
2090*4882a593Smuzhiyun 	task_unlock(current);
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	return true;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun #endif
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun /*
2097*4882a593Smuzhiyun  * mempolicy_nodemask_intersects
2098*4882a593Smuzhiyun  *
2099*4882a593Smuzhiyun  * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
2100*4882a593Smuzhiyun  * policy.  Otherwise, check for intersection between mask and the policy
2101*4882a593Smuzhiyun  * nodemask for 'bind' or 'interleave' policy.  For 'perferred' or 'local'
2102*4882a593Smuzhiyun  * policy, always return true since it may allocate elsewhere on fallback.
2103*4882a593Smuzhiyun  *
2104*4882a593Smuzhiyun  * Takes task_lock(tsk) to prevent freeing of its mempolicy.
2105*4882a593Smuzhiyun  */
mempolicy_nodemask_intersects(struct task_struct * tsk,const nodemask_t * mask)2106*4882a593Smuzhiyun bool mempolicy_nodemask_intersects(struct task_struct *tsk,
2107*4882a593Smuzhiyun 					const nodemask_t *mask)
2108*4882a593Smuzhiyun {
2109*4882a593Smuzhiyun 	struct mempolicy *mempolicy;
2110*4882a593Smuzhiyun 	bool ret = true;
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	if (!mask)
2113*4882a593Smuzhiyun 		return ret;
2114*4882a593Smuzhiyun 	task_lock(tsk);
2115*4882a593Smuzhiyun 	mempolicy = tsk->mempolicy;
2116*4882a593Smuzhiyun 	if (!mempolicy)
2117*4882a593Smuzhiyun 		goto out;
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	switch (mempolicy->mode) {
2120*4882a593Smuzhiyun 	case MPOL_PREFERRED:
2121*4882a593Smuzhiyun 		/*
2122*4882a593Smuzhiyun 		 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
2123*4882a593Smuzhiyun 		 * allocate from, they may fallback to other nodes when oom.
2124*4882a593Smuzhiyun 		 * Thus, it's possible for tsk to have allocated memory from
2125*4882a593Smuzhiyun 		 * nodes in mask.
2126*4882a593Smuzhiyun 		 */
2127*4882a593Smuzhiyun 		break;
2128*4882a593Smuzhiyun 	case MPOL_BIND:
2129*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
2130*4882a593Smuzhiyun 		ret = nodes_intersects(mempolicy->v.nodes, *mask);
2131*4882a593Smuzhiyun 		break;
2132*4882a593Smuzhiyun 	default:
2133*4882a593Smuzhiyun 		BUG();
2134*4882a593Smuzhiyun 	}
2135*4882a593Smuzhiyun out:
2136*4882a593Smuzhiyun 	task_unlock(tsk);
2137*4882a593Smuzhiyun 	return ret;
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun /* Allocate a page in interleaved policy.
2141*4882a593Smuzhiyun    Own path because it needs to do special accounting. */
alloc_page_interleave(gfp_t gfp,unsigned order,unsigned nid)2142*4882a593Smuzhiyun static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
2143*4882a593Smuzhiyun 					unsigned nid)
2144*4882a593Smuzhiyun {
2145*4882a593Smuzhiyun 	struct page *page;
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	page = __alloc_pages(gfp, order, nid);
2148*4882a593Smuzhiyun 	/* skip NUMA_INTERLEAVE_HIT counter update if numa stats is disabled */
2149*4882a593Smuzhiyun 	if (!static_branch_likely(&vm_numa_stat_key))
2150*4882a593Smuzhiyun 		return page;
2151*4882a593Smuzhiyun 	if (page && page_to_nid(page) == nid) {
2152*4882a593Smuzhiyun 		preempt_disable();
2153*4882a593Smuzhiyun 		__inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT);
2154*4882a593Smuzhiyun 		preempt_enable();
2155*4882a593Smuzhiyun 	}
2156*4882a593Smuzhiyun 	return page;
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun 
2159*4882a593Smuzhiyun /**
2160*4882a593Smuzhiyun  * 	alloc_pages_vma	- Allocate a page for a VMA.
2161*4882a593Smuzhiyun  *
2162*4882a593Smuzhiyun  * 	@gfp:
2163*4882a593Smuzhiyun  *      %GFP_USER    user allocation.
2164*4882a593Smuzhiyun  *      %GFP_KERNEL  kernel allocations,
2165*4882a593Smuzhiyun  *      %GFP_HIGHMEM highmem/user allocations,
2166*4882a593Smuzhiyun  *      %GFP_FS      allocation should not call back into a file system.
2167*4882a593Smuzhiyun  *      %GFP_ATOMIC  don't sleep.
2168*4882a593Smuzhiyun  *
2169*4882a593Smuzhiyun  *	@order:Order of the GFP allocation.
2170*4882a593Smuzhiyun  * 	@vma:  Pointer to VMA or NULL if not available.
2171*4882a593Smuzhiyun  *	@addr: Virtual Address of the allocation. Must be inside the VMA.
2172*4882a593Smuzhiyun  *	@node: Which node to prefer for allocation (modulo policy).
2173*4882a593Smuzhiyun  *	@hugepage: for hugepages try only the preferred node if possible
2174*4882a593Smuzhiyun  *
2175*4882a593Smuzhiyun  * 	This function allocates a page from the kernel page pool and applies
2176*4882a593Smuzhiyun  *	a NUMA policy associated with the VMA or the current process.
2177*4882a593Smuzhiyun  *	When VMA is not NULL caller must read-lock the mmap_lock of the
2178*4882a593Smuzhiyun  *	mm_struct of the VMA to prevent it from going away. Should be used for
2179*4882a593Smuzhiyun  *	all allocations for pages that will be mapped into user space. Returns
2180*4882a593Smuzhiyun  *	NULL when no page can be allocated.
2181*4882a593Smuzhiyun  */
2182*4882a593Smuzhiyun struct page *
alloc_pages_vma(gfp_t gfp,int order,struct vm_area_struct * vma,unsigned long addr,int node,bool hugepage)2183*4882a593Smuzhiyun alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
2184*4882a593Smuzhiyun 		unsigned long addr, int node, bool hugepage)
2185*4882a593Smuzhiyun {
2186*4882a593Smuzhiyun 	struct mempolicy *pol;
2187*4882a593Smuzhiyun 	struct page *page;
2188*4882a593Smuzhiyun 	int preferred_nid;
2189*4882a593Smuzhiyun 	nodemask_t *nmask;
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	pol = get_vma_policy(vma, addr);
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	if (pol->mode == MPOL_INTERLEAVE) {
2194*4882a593Smuzhiyun 		unsigned nid;
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun 		nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2197*4882a593Smuzhiyun 		mpol_cond_put(pol);
2198*4882a593Smuzhiyun 		page = alloc_page_interleave(gfp, order, nid);
2199*4882a593Smuzhiyun 		goto out;
2200*4882a593Smuzhiyun 	}
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2203*4882a593Smuzhiyun 		int hpage_node = node;
2204*4882a593Smuzhiyun 
2205*4882a593Smuzhiyun 		/*
2206*4882a593Smuzhiyun 		 * For hugepage allocation and non-interleave policy which
2207*4882a593Smuzhiyun 		 * allows the current node (or other explicitly preferred
2208*4882a593Smuzhiyun 		 * node) we only try to allocate from the current/preferred
2209*4882a593Smuzhiyun 		 * node and don't fall back to other nodes, as the cost of
2210*4882a593Smuzhiyun 		 * remote accesses would likely offset THP benefits.
2211*4882a593Smuzhiyun 		 *
2212*4882a593Smuzhiyun 		 * If the policy is interleave, or does not allow the current
2213*4882a593Smuzhiyun 		 * node in its nodemask, we allocate the standard way.
2214*4882a593Smuzhiyun 		 */
2215*4882a593Smuzhiyun 		if (pol->mode == MPOL_PREFERRED && !(pol->flags & MPOL_F_LOCAL))
2216*4882a593Smuzhiyun 			hpage_node = pol->v.preferred_node;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 		nmask = policy_nodemask(gfp, pol);
2219*4882a593Smuzhiyun 		if (!nmask || node_isset(hpage_node, *nmask)) {
2220*4882a593Smuzhiyun 			mpol_cond_put(pol);
2221*4882a593Smuzhiyun 			/*
2222*4882a593Smuzhiyun 			 * First, try to allocate THP only on local node, but
2223*4882a593Smuzhiyun 			 * don't reclaim unnecessarily, just compact.
2224*4882a593Smuzhiyun 			 */
2225*4882a593Smuzhiyun 			page = __alloc_pages_node(hpage_node,
2226*4882a593Smuzhiyun 				gfp | __GFP_THISNODE | __GFP_NORETRY, order);
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 			/*
2229*4882a593Smuzhiyun 			 * If hugepage allocations are configured to always
2230*4882a593Smuzhiyun 			 * synchronous compact or the vma has been madvised
2231*4882a593Smuzhiyun 			 * to prefer hugepage backing, retry allowing remote
2232*4882a593Smuzhiyun 			 * memory with both reclaim and compact as well.
2233*4882a593Smuzhiyun 			 */
2234*4882a593Smuzhiyun 			if (!page && (gfp & __GFP_DIRECT_RECLAIM))
2235*4882a593Smuzhiyun 				page = __alloc_pages_nodemask(gfp, order,
2236*4882a593Smuzhiyun 							hpage_node, nmask);
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 			goto out;
2239*4882a593Smuzhiyun 		}
2240*4882a593Smuzhiyun 	}
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	nmask = policy_nodemask(gfp, pol);
2243*4882a593Smuzhiyun 	preferred_nid = policy_node(gfp, pol, node);
2244*4882a593Smuzhiyun 	page = __alloc_pages_nodemask(gfp, order, preferred_nid, nmask);
2245*4882a593Smuzhiyun 	mpol_cond_put(pol);
2246*4882a593Smuzhiyun out:
2247*4882a593Smuzhiyun 	return page;
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun EXPORT_SYMBOL(alloc_pages_vma);
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun /**
2252*4882a593Smuzhiyun  * 	alloc_pages_current - Allocate pages.
2253*4882a593Smuzhiyun  *
2254*4882a593Smuzhiyun  *	@gfp:
2255*4882a593Smuzhiyun  *		%GFP_USER   user allocation,
2256*4882a593Smuzhiyun  *      	%GFP_KERNEL kernel allocation,
2257*4882a593Smuzhiyun  *      	%GFP_HIGHMEM highmem allocation,
2258*4882a593Smuzhiyun  *      	%GFP_FS     don't call back into a file system.
2259*4882a593Smuzhiyun  *      	%GFP_ATOMIC don't sleep.
2260*4882a593Smuzhiyun  *	@order: Power of two of allocation size in pages. 0 is a single page.
2261*4882a593Smuzhiyun  *
2262*4882a593Smuzhiyun  *	Allocate a page from the kernel page pool.  When not in
2263*4882a593Smuzhiyun  *	interrupt context and apply the current process NUMA policy.
2264*4882a593Smuzhiyun  *	Returns NULL when no page can be allocated.
2265*4882a593Smuzhiyun  */
alloc_pages_current(gfp_t gfp,unsigned order)2266*4882a593Smuzhiyun struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2267*4882a593Smuzhiyun {
2268*4882a593Smuzhiyun 	struct mempolicy *pol = &default_policy;
2269*4882a593Smuzhiyun 	struct page *page;
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2272*4882a593Smuzhiyun 		pol = get_task_policy(current);
2273*4882a593Smuzhiyun 
2274*4882a593Smuzhiyun 	/*
2275*4882a593Smuzhiyun 	 * No reference counting needed for current->mempolicy
2276*4882a593Smuzhiyun 	 * nor system default_policy
2277*4882a593Smuzhiyun 	 */
2278*4882a593Smuzhiyun 	if (pol->mode == MPOL_INTERLEAVE)
2279*4882a593Smuzhiyun 		page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2280*4882a593Smuzhiyun 	else
2281*4882a593Smuzhiyun 		page = __alloc_pages_nodemask(gfp, order,
2282*4882a593Smuzhiyun 				policy_node(gfp, pol, numa_node_id()),
2283*4882a593Smuzhiyun 				policy_nodemask(gfp, pol));
2284*4882a593Smuzhiyun 
2285*4882a593Smuzhiyun 	return page;
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun EXPORT_SYMBOL(alloc_pages_current);
2288*4882a593Smuzhiyun 
vma_dup_policy(struct vm_area_struct * src,struct vm_area_struct * dst)2289*4882a593Smuzhiyun int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2290*4882a593Smuzhiyun {
2291*4882a593Smuzhiyun 	struct mempolicy *pol = mpol_dup(vma_policy(src));
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun 	if (IS_ERR(pol))
2294*4882a593Smuzhiyun 		return PTR_ERR(pol);
2295*4882a593Smuzhiyun 	dst->vm_policy = pol;
2296*4882a593Smuzhiyun 	return 0;
2297*4882a593Smuzhiyun }
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun /*
2300*4882a593Smuzhiyun  * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2301*4882a593Smuzhiyun  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2302*4882a593Smuzhiyun  * with the mems_allowed returned by cpuset_mems_allowed().  This
2303*4882a593Smuzhiyun  * keeps mempolicies cpuset relative after its cpuset moves.  See
2304*4882a593Smuzhiyun  * further kernel/cpuset.c update_nodemask().
2305*4882a593Smuzhiyun  *
2306*4882a593Smuzhiyun  * current's mempolicy may be rebinded by the other task(the task that changes
2307*4882a593Smuzhiyun  * cpuset's mems), so we needn't do rebind work for current task.
2308*4882a593Smuzhiyun  */
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun /* Slow path of a mempolicy duplicate */
__mpol_dup(struct mempolicy * old)2311*4882a593Smuzhiyun struct mempolicy *__mpol_dup(struct mempolicy *old)
2312*4882a593Smuzhiyun {
2313*4882a593Smuzhiyun 	struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 	if (!new)
2316*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	/* task's mempolicy is protected by alloc_lock */
2319*4882a593Smuzhiyun 	if (old == current->mempolicy) {
2320*4882a593Smuzhiyun 		task_lock(current);
2321*4882a593Smuzhiyun 		*new = *old;
2322*4882a593Smuzhiyun 		task_unlock(current);
2323*4882a593Smuzhiyun 	} else
2324*4882a593Smuzhiyun 		*new = *old;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	if (current_cpuset_is_being_rebound()) {
2327*4882a593Smuzhiyun 		nodemask_t mems = cpuset_mems_allowed(current);
2328*4882a593Smuzhiyun 		mpol_rebind_policy(new, &mems);
2329*4882a593Smuzhiyun 	}
2330*4882a593Smuzhiyun 	atomic_set(&new->refcnt, 1);
2331*4882a593Smuzhiyun 	return new;
2332*4882a593Smuzhiyun }
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun /* Slow path of a mempolicy comparison */
__mpol_equal(struct mempolicy * a,struct mempolicy * b)2335*4882a593Smuzhiyun bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2336*4882a593Smuzhiyun {
2337*4882a593Smuzhiyun 	if (!a || !b)
2338*4882a593Smuzhiyun 		return false;
2339*4882a593Smuzhiyun 	if (a->mode != b->mode)
2340*4882a593Smuzhiyun 		return false;
2341*4882a593Smuzhiyun 	if (a->flags != b->flags)
2342*4882a593Smuzhiyun 		return false;
2343*4882a593Smuzhiyun 	if (mpol_store_user_nodemask(a))
2344*4882a593Smuzhiyun 		if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2345*4882a593Smuzhiyun 			return false;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	switch (a->mode) {
2348*4882a593Smuzhiyun 	case MPOL_BIND:
2349*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
2350*4882a593Smuzhiyun 		return !!nodes_equal(a->v.nodes, b->v.nodes);
2351*4882a593Smuzhiyun 	case MPOL_PREFERRED:
2352*4882a593Smuzhiyun 		/* a's ->flags is the same as b's */
2353*4882a593Smuzhiyun 		if (a->flags & MPOL_F_LOCAL)
2354*4882a593Smuzhiyun 			return true;
2355*4882a593Smuzhiyun 		return a->v.preferred_node == b->v.preferred_node;
2356*4882a593Smuzhiyun 	default:
2357*4882a593Smuzhiyun 		BUG();
2358*4882a593Smuzhiyun 		return false;
2359*4882a593Smuzhiyun 	}
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun /*
2363*4882a593Smuzhiyun  * Shared memory backing store policy support.
2364*4882a593Smuzhiyun  *
2365*4882a593Smuzhiyun  * Remember policies even when nobody has shared memory mapped.
2366*4882a593Smuzhiyun  * The policies are kept in Red-Black tree linked from the inode.
2367*4882a593Smuzhiyun  * They are protected by the sp->lock rwlock, which should be held
2368*4882a593Smuzhiyun  * for any accesses to the tree.
2369*4882a593Smuzhiyun  */
2370*4882a593Smuzhiyun 
2371*4882a593Smuzhiyun /*
2372*4882a593Smuzhiyun  * lookup first element intersecting start-end.  Caller holds sp->lock for
2373*4882a593Smuzhiyun  * reading or for writing
2374*4882a593Smuzhiyun  */
2375*4882a593Smuzhiyun static struct sp_node *
sp_lookup(struct shared_policy * sp,unsigned long start,unsigned long end)2376*4882a593Smuzhiyun sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2377*4882a593Smuzhiyun {
2378*4882a593Smuzhiyun 	struct rb_node *n = sp->root.rb_node;
2379*4882a593Smuzhiyun 
2380*4882a593Smuzhiyun 	while (n) {
2381*4882a593Smuzhiyun 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 		if (start >= p->end)
2384*4882a593Smuzhiyun 			n = n->rb_right;
2385*4882a593Smuzhiyun 		else if (end <= p->start)
2386*4882a593Smuzhiyun 			n = n->rb_left;
2387*4882a593Smuzhiyun 		else
2388*4882a593Smuzhiyun 			break;
2389*4882a593Smuzhiyun 	}
2390*4882a593Smuzhiyun 	if (!n)
2391*4882a593Smuzhiyun 		return NULL;
2392*4882a593Smuzhiyun 	for (;;) {
2393*4882a593Smuzhiyun 		struct sp_node *w = NULL;
2394*4882a593Smuzhiyun 		struct rb_node *prev = rb_prev(n);
2395*4882a593Smuzhiyun 		if (!prev)
2396*4882a593Smuzhiyun 			break;
2397*4882a593Smuzhiyun 		w = rb_entry(prev, struct sp_node, nd);
2398*4882a593Smuzhiyun 		if (w->end <= start)
2399*4882a593Smuzhiyun 			break;
2400*4882a593Smuzhiyun 		n = prev;
2401*4882a593Smuzhiyun 	}
2402*4882a593Smuzhiyun 	return rb_entry(n, struct sp_node, nd);
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun 
2405*4882a593Smuzhiyun /*
2406*4882a593Smuzhiyun  * Insert a new shared policy into the list.  Caller holds sp->lock for
2407*4882a593Smuzhiyun  * writing.
2408*4882a593Smuzhiyun  */
sp_insert(struct shared_policy * sp,struct sp_node * new)2409*4882a593Smuzhiyun static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun 	struct rb_node **p = &sp->root.rb_node;
2412*4882a593Smuzhiyun 	struct rb_node *parent = NULL;
2413*4882a593Smuzhiyun 	struct sp_node *nd;
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 	while (*p) {
2416*4882a593Smuzhiyun 		parent = *p;
2417*4882a593Smuzhiyun 		nd = rb_entry(parent, struct sp_node, nd);
2418*4882a593Smuzhiyun 		if (new->start < nd->start)
2419*4882a593Smuzhiyun 			p = &(*p)->rb_left;
2420*4882a593Smuzhiyun 		else if (new->end > nd->end)
2421*4882a593Smuzhiyun 			p = &(*p)->rb_right;
2422*4882a593Smuzhiyun 		else
2423*4882a593Smuzhiyun 			BUG();
2424*4882a593Smuzhiyun 	}
2425*4882a593Smuzhiyun 	rb_link_node(&new->nd, parent, p);
2426*4882a593Smuzhiyun 	rb_insert_color(&new->nd, &sp->root);
2427*4882a593Smuzhiyun 	pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2428*4882a593Smuzhiyun 		 new->policy ? new->policy->mode : 0);
2429*4882a593Smuzhiyun }
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun /* Find shared policy intersecting idx */
2432*4882a593Smuzhiyun struct mempolicy *
mpol_shared_policy_lookup(struct shared_policy * sp,unsigned long idx)2433*4882a593Smuzhiyun mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2434*4882a593Smuzhiyun {
2435*4882a593Smuzhiyun 	struct mempolicy *pol = NULL;
2436*4882a593Smuzhiyun 	struct sp_node *sn;
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun 	if (!sp->root.rb_node)
2439*4882a593Smuzhiyun 		return NULL;
2440*4882a593Smuzhiyun 	read_lock(&sp->lock);
2441*4882a593Smuzhiyun 	sn = sp_lookup(sp, idx, idx+1);
2442*4882a593Smuzhiyun 	if (sn) {
2443*4882a593Smuzhiyun 		mpol_get(sn->policy);
2444*4882a593Smuzhiyun 		pol = sn->policy;
2445*4882a593Smuzhiyun 	}
2446*4882a593Smuzhiyun 	read_unlock(&sp->lock);
2447*4882a593Smuzhiyun 	return pol;
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun 
sp_free(struct sp_node * n)2450*4882a593Smuzhiyun static void sp_free(struct sp_node *n)
2451*4882a593Smuzhiyun {
2452*4882a593Smuzhiyun 	mpol_put(n->policy);
2453*4882a593Smuzhiyun 	kmem_cache_free(sn_cache, n);
2454*4882a593Smuzhiyun }
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun /**
2457*4882a593Smuzhiyun  * mpol_misplaced - check whether current page node is valid in policy
2458*4882a593Smuzhiyun  *
2459*4882a593Smuzhiyun  * @page: page to be checked
2460*4882a593Smuzhiyun  * @vma: vm area where page mapped
2461*4882a593Smuzhiyun  * @addr: virtual address where page mapped
2462*4882a593Smuzhiyun  *
2463*4882a593Smuzhiyun  * Lookup current policy node id for vma,addr and "compare to" page's
2464*4882a593Smuzhiyun  * node id.
2465*4882a593Smuzhiyun  *
2466*4882a593Smuzhiyun  * Returns:
2467*4882a593Smuzhiyun  *	-1	- not misplaced, page is in the right node
2468*4882a593Smuzhiyun  *	node	- node id where the page should be
2469*4882a593Smuzhiyun  *
2470*4882a593Smuzhiyun  * Policy determination "mimics" alloc_page_vma().
2471*4882a593Smuzhiyun  * Called from fault path where we know the vma and faulting address.
2472*4882a593Smuzhiyun  */
mpol_misplaced(struct page * page,struct vm_area_struct * vma,unsigned long addr)2473*4882a593Smuzhiyun int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2474*4882a593Smuzhiyun {
2475*4882a593Smuzhiyun 	struct mempolicy *pol;
2476*4882a593Smuzhiyun 	struct zoneref *z;
2477*4882a593Smuzhiyun 	int curnid = page_to_nid(page);
2478*4882a593Smuzhiyun 	unsigned long pgoff;
2479*4882a593Smuzhiyun 	int thiscpu = raw_smp_processor_id();
2480*4882a593Smuzhiyun 	int thisnid = cpu_to_node(thiscpu);
2481*4882a593Smuzhiyun 	int polnid = NUMA_NO_NODE;
2482*4882a593Smuzhiyun 	int ret = -1;
2483*4882a593Smuzhiyun 
2484*4882a593Smuzhiyun 	pol = get_vma_policy(vma, addr);
2485*4882a593Smuzhiyun 	if (!(pol->flags & MPOL_F_MOF))
2486*4882a593Smuzhiyun 		goto out;
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 	switch (pol->mode) {
2489*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
2490*4882a593Smuzhiyun 		pgoff = vma->vm_pgoff;
2491*4882a593Smuzhiyun 		pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2492*4882a593Smuzhiyun 		polnid = offset_il_node(pol, pgoff);
2493*4882a593Smuzhiyun 		break;
2494*4882a593Smuzhiyun 
2495*4882a593Smuzhiyun 	case MPOL_PREFERRED:
2496*4882a593Smuzhiyun 		if (pol->flags & MPOL_F_LOCAL)
2497*4882a593Smuzhiyun 			polnid = numa_node_id();
2498*4882a593Smuzhiyun 		else
2499*4882a593Smuzhiyun 			polnid = pol->v.preferred_node;
2500*4882a593Smuzhiyun 		break;
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 	case MPOL_BIND:
2503*4882a593Smuzhiyun 
2504*4882a593Smuzhiyun 		/*
2505*4882a593Smuzhiyun 		 * allows binding to multiple nodes.
2506*4882a593Smuzhiyun 		 * use current page if in policy nodemask,
2507*4882a593Smuzhiyun 		 * else select nearest allowed node, if any.
2508*4882a593Smuzhiyun 		 * If no allowed nodes, use current [!misplaced].
2509*4882a593Smuzhiyun 		 */
2510*4882a593Smuzhiyun 		if (node_isset(curnid, pol->v.nodes))
2511*4882a593Smuzhiyun 			goto out;
2512*4882a593Smuzhiyun 		z = first_zones_zonelist(
2513*4882a593Smuzhiyun 				node_zonelist(numa_node_id(), GFP_HIGHUSER),
2514*4882a593Smuzhiyun 				gfp_zone(GFP_HIGHUSER),
2515*4882a593Smuzhiyun 				&pol->v.nodes);
2516*4882a593Smuzhiyun 		polnid = zone_to_nid(z->zone);
2517*4882a593Smuzhiyun 		break;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	default:
2520*4882a593Smuzhiyun 		BUG();
2521*4882a593Smuzhiyun 	}
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 	/* Migrate the page towards the node whose CPU is referencing it */
2524*4882a593Smuzhiyun 	if (pol->flags & MPOL_F_MORON) {
2525*4882a593Smuzhiyun 		polnid = thisnid;
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 		if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2528*4882a593Smuzhiyun 			goto out;
2529*4882a593Smuzhiyun 	}
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	if (curnid != polnid)
2532*4882a593Smuzhiyun 		ret = polnid;
2533*4882a593Smuzhiyun out:
2534*4882a593Smuzhiyun 	mpol_cond_put(pol);
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	return ret;
2537*4882a593Smuzhiyun }
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun /*
2540*4882a593Smuzhiyun  * Drop the (possibly final) reference to task->mempolicy.  It needs to be
2541*4882a593Smuzhiyun  * dropped after task->mempolicy is set to NULL so that any allocation done as
2542*4882a593Smuzhiyun  * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2543*4882a593Smuzhiyun  * policy.
2544*4882a593Smuzhiyun  */
mpol_put_task_policy(struct task_struct * task)2545*4882a593Smuzhiyun void mpol_put_task_policy(struct task_struct *task)
2546*4882a593Smuzhiyun {
2547*4882a593Smuzhiyun 	struct mempolicy *pol;
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	task_lock(task);
2550*4882a593Smuzhiyun 	pol = task->mempolicy;
2551*4882a593Smuzhiyun 	task->mempolicy = NULL;
2552*4882a593Smuzhiyun 	task_unlock(task);
2553*4882a593Smuzhiyun 	mpol_put(pol);
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun 
sp_delete(struct shared_policy * sp,struct sp_node * n)2556*4882a593Smuzhiyun static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2557*4882a593Smuzhiyun {
2558*4882a593Smuzhiyun 	pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2559*4882a593Smuzhiyun 	rb_erase(&n->nd, &sp->root);
2560*4882a593Smuzhiyun 	sp_free(n);
2561*4882a593Smuzhiyun }
2562*4882a593Smuzhiyun 
sp_node_init(struct sp_node * node,unsigned long start,unsigned long end,struct mempolicy * pol)2563*4882a593Smuzhiyun static void sp_node_init(struct sp_node *node, unsigned long start,
2564*4882a593Smuzhiyun 			unsigned long end, struct mempolicy *pol)
2565*4882a593Smuzhiyun {
2566*4882a593Smuzhiyun 	node->start = start;
2567*4882a593Smuzhiyun 	node->end = end;
2568*4882a593Smuzhiyun 	node->policy = pol;
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun 
sp_alloc(unsigned long start,unsigned long end,struct mempolicy * pol)2571*4882a593Smuzhiyun static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2572*4882a593Smuzhiyun 				struct mempolicy *pol)
2573*4882a593Smuzhiyun {
2574*4882a593Smuzhiyun 	struct sp_node *n;
2575*4882a593Smuzhiyun 	struct mempolicy *newpol;
2576*4882a593Smuzhiyun 
2577*4882a593Smuzhiyun 	n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2578*4882a593Smuzhiyun 	if (!n)
2579*4882a593Smuzhiyun 		return NULL;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	newpol = mpol_dup(pol);
2582*4882a593Smuzhiyun 	if (IS_ERR(newpol)) {
2583*4882a593Smuzhiyun 		kmem_cache_free(sn_cache, n);
2584*4882a593Smuzhiyun 		return NULL;
2585*4882a593Smuzhiyun 	}
2586*4882a593Smuzhiyun 	newpol->flags |= MPOL_F_SHARED;
2587*4882a593Smuzhiyun 	sp_node_init(n, start, end, newpol);
2588*4882a593Smuzhiyun 
2589*4882a593Smuzhiyun 	return n;
2590*4882a593Smuzhiyun }
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun /* Replace a policy range. */
shared_policy_replace(struct shared_policy * sp,unsigned long start,unsigned long end,struct sp_node * new)2593*4882a593Smuzhiyun static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2594*4882a593Smuzhiyun 				 unsigned long end, struct sp_node *new)
2595*4882a593Smuzhiyun {
2596*4882a593Smuzhiyun 	struct sp_node *n;
2597*4882a593Smuzhiyun 	struct sp_node *n_new = NULL;
2598*4882a593Smuzhiyun 	struct mempolicy *mpol_new = NULL;
2599*4882a593Smuzhiyun 	int ret = 0;
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun restart:
2602*4882a593Smuzhiyun 	write_lock(&sp->lock);
2603*4882a593Smuzhiyun 	n = sp_lookup(sp, start, end);
2604*4882a593Smuzhiyun 	/* Take care of old policies in the same range. */
2605*4882a593Smuzhiyun 	while (n && n->start < end) {
2606*4882a593Smuzhiyun 		struct rb_node *next = rb_next(&n->nd);
2607*4882a593Smuzhiyun 		if (n->start >= start) {
2608*4882a593Smuzhiyun 			if (n->end <= end)
2609*4882a593Smuzhiyun 				sp_delete(sp, n);
2610*4882a593Smuzhiyun 			else
2611*4882a593Smuzhiyun 				n->start = end;
2612*4882a593Smuzhiyun 		} else {
2613*4882a593Smuzhiyun 			/* Old policy spanning whole new range. */
2614*4882a593Smuzhiyun 			if (n->end > end) {
2615*4882a593Smuzhiyun 				if (!n_new)
2616*4882a593Smuzhiyun 					goto alloc_new;
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 				*mpol_new = *n->policy;
2619*4882a593Smuzhiyun 				atomic_set(&mpol_new->refcnt, 1);
2620*4882a593Smuzhiyun 				sp_node_init(n_new, end, n->end, mpol_new);
2621*4882a593Smuzhiyun 				n->end = start;
2622*4882a593Smuzhiyun 				sp_insert(sp, n_new);
2623*4882a593Smuzhiyun 				n_new = NULL;
2624*4882a593Smuzhiyun 				mpol_new = NULL;
2625*4882a593Smuzhiyun 				break;
2626*4882a593Smuzhiyun 			} else
2627*4882a593Smuzhiyun 				n->end = start;
2628*4882a593Smuzhiyun 		}
2629*4882a593Smuzhiyun 		if (!next)
2630*4882a593Smuzhiyun 			break;
2631*4882a593Smuzhiyun 		n = rb_entry(next, struct sp_node, nd);
2632*4882a593Smuzhiyun 	}
2633*4882a593Smuzhiyun 	if (new)
2634*4882a593Smuzhiyun 		sp_insert(sp, new);
2635*4882a593Smuzhiyun 	write_unlock(&sp->lock);
2636*4882a593Smuzhiyun 	ret = 0;
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun err_out:
2639*4882a593Smuzhiyun 	if (mpol_new)
2640*4882a593Smuzhiyun 		mpol_put(mpol_new);
2641*4882a593Smuzhiyun 	if (n_new)
2642*4882a593Smuzhiyun 		kmem_cache_free(sn_cache, n_new);
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun 	return ret;
2645*4882a593Smuzhiyun 
2646*4882a593Smuzhiyun alloc_new:
2647*4882a593Smuzhiyun 	write_unlock(&sp->lock);
2648*4882a593Smuzhiyun 	ret = -ENOMEM;
2649*4882a593Smuzhiyun 	n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2650*4882a593Smuzhiyun 	if (!n_new)
2651*4882a593Smuzhiyun 		goto err_out;
2652*4882a593Smuzhiyun 	mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2653*4882a593Smuzhiyun 	if (!mpol_new)
2654*4882a593Smuzhiyun 		goto err_out;
2655*4882a593Smuzhiyun 	atomic_set(&mpol_new->refcnt, 1);
2656*4882a593Smuzhiyun 	goto restart;
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun /**
2660*4882a593Smuzhiyun  * mpol_shared_policy_init - initialize shared policy for inode
2661*4882a593Smuzhiyun  * @sp: pointer to inode shared policy
2662*4882a593Smuzhiyun  * @mpol:  struct mempolicy to install
2663*4882a593Smuzhiyun  *
2664*4882a593Smuzhiyun  * Install non-NULL @mpol in inode's shared policy rb-tree.
2665*4882a593Smuzhiyun  * On entry, the current task has a reference on a non-NULL @mpol.
2666*4882a593Smuzhiyun  * This must be released on exit.
2667*4882a593Smuzhiyun  * This is called at get_inode() calls and we can use GFP_KERNEL.
2668*4882a593Smuzhiyun  */
mpol_shared_policy_init(struct shared_policy * sp,struct mempolicy * mpol)2669*4882a593Smuzhiyun void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2670*4882a593Smuzhiyun {
2671*4882a593Smuzhiyun 	int ret;
2672*4882a593Smuzhiyun 
2673*4882a593Smuzhiyun 	sp->root = RB_ROOT;		/* empty tree == default mempolicy */
2674*4882a593Smuzhiyun 	rwlock_init(&sp->lock);
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	if (mpol) {
2677*4882a593Smuzhiyun 		struct vm_area_struct pvma;
2678*4882a593Smuzhiyun 		struct mempolicy *new;
2679*4882a593Smuzhiyun 		NODEMASK_SCRATCH(scratch);
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 		if (!scratch)
2682*4882a593Smuzhiyun 			goto put_mpol;
2683*4882a593Smuzhiyun 		/* contextualize the tmpfs mount point mempolicy */
2684*4882a593Smuzhiyun 		new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2685*4882a593Smuzhiyun 		if (IS_ERR(new))
2686*4882a593Smuzhiyun 			goto free_scratch; /* no valid nodemask intersection */
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 		task_lock(current);
2689*4882a593Smuzhiyun 		ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2690*4882a593Smuzhiyun 		task_unlock(current);
2691*4882a593Smuzhiyun 		if (ret)
2692*4882a593Smuzhiyun 			goto put_new;
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 		/* Create pseudo-vma that contains just the policy */
2695*4882a593Smuzhiyun 		vma_init(&pvma, NULL);
2696*4882a593Smuzhiyun 		pvma.vm_end = TASK_SIZE;	/* policy covers entire file */
2697*4882a593Smuzhiyun 		mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2698*4882a593Smuzhiyun 
2699*4882a593Smuzhiyun put_new:
2700*4882a593Smuzhiyun 		mpol_put(new);			/* drop initial ref */
2701*4882a593Smuzhiyun free_scratch:
2702*4882a593Smuzhiyun 		NODEMASK_SCRATCH_FREE(scratch);
2703*4882a593Smuzhiyun put_mpol:
2704*4882a593Smuzhiyun 		mpol_put(mpol);	/* drop our incoming ref on sb mpol */
2705*4882a593Smuzhiyun 	}
2706*4882a593Smuzhiyun }
2707*4882a593Smuzhiyun 
mpol_set_shared_policy(struct shared_policy * info,struct vm_area_struct * vma,struct mempolicy * npol)2708*4882a593Smuzhiyun int mpol_set_shared_policy(struct shared_policy *info,
2709*4882a593Smuzhiyun 			struct vm_area_struct *vma, struct mempolicy *npol)
2710*4882a593Smuzhiyun {
2711*4882a593Smuzhiyun 	int err;
2712*4882a593Smuzhiyun 	struct sp_node *new = NULL;
2713*4882a593Smuzhiyun 	unsigned long sz = vma_pages(vma);
2714*4882a593Smuzhiyun 
2715*4882a593Smuzhiyun 	pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2716*4882a593Smuzhiyun 		 vma->vm_pgoff,
2717*4882a593Smuzhiyun 		 sz, npol ? npol->mode : -1,
2718*4882a593Smuzhiyun 		 npol ? npol->flags : -1,
2719*4882a593Smuzhiyun 		 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 	if (npol) {
2722*4882a593Smuzhiyun 		new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2723*4882a593Smuzhiyun 		if (!new)
2724*4882a593Smuzhiyun 			return -ENOMEM;
2725*4882a593Smuzhiyun 	}
2726*4882a593Smuzhiyun 	err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2727*4882a593Smuzhiyun 	if (err && new)
2728*4882a593Smuzhiyun 		sp_free(new);
2729*4882a593Smuzhiyun 	return err;
2730*4882a593Smuzhiyun }
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun /* Free a backing policy store on inode delete. */
mpol_free_shared_policy(struct shared_policy * p)2733*4882a593Smuzhiyun void mpol_free_shared_policy(struct shared_policy *p)
2734*4882a593Smuzhiyun {
2735*4882a593Smuzhiyun 	struct sp_node *n;
2736*4882a593Smuzhiyun 	struct rb_node *next;
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 	if (!p->root.rb_node)
2739*4882a593Smuzhiyun 		return;
2740*4882a593Smuzhiyun 	write_lock(&p->lock);
2741*4882a593Smuzhiyun 	next = rb_first(&p->root);
2742*4882a593Smuzhiyun 	while (next) {
2743*4882a593Smuzhiyun 		n = rb_entry(next, struct sp_node, nd);
2744*4882a593Smuzhiyun 		next = rb_next(&n->nd);
2745*4882a593Smuzhiyun 		sp_delete(p, n);
2746*4882a593Smuzhiyun 	}
2747*4882a593Smuzhiyun 	write_unlock(&p->lock);
2748*4882a593Smuzhiyun }
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun #ifdef CONFIG_NUMA_BALANCING
2751*4882a593Smuzhiyun static int __initdata numabalancing_override;
2752*4882a593Smuzhiyun 
check_numabalancing_enable(void)2753*4882a593Smuzhiyun static void __init check_numabalancing_enable(void)
2754*4882a593Smuzhiyun {
2755*4882a593Smuzhiyun 	bool numabalancing_default = false;
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2758*4882a593Smuzhiyun 		numabalancing_default = true;
2759*4882a593Smuzhiyun 
2760*4882a593Smuzhiyun 	/* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2761*4882a593Smuzhiyun 	if (numabalancing_override)
2762*4882a593Smuzhiyun 		set_numabalancing_state(numabalancing_override == 1);
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun 	if (num_online_nodes() > 1 && !numabalancing_override) {
2765*4882a593Smuzhiyun 		pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2766*4882a593Smuzhiyun 			numabalancing_default ? "Enabling" : "Disabling");
2767*4882a593Smuzhiyun 		set_numabalancing_state(numabalancing_default);
2768*4882a593Smuzhiyun 	}
2769*4882a593Smuzhiyun }
2770*4882a593Smuzhiyun 
setup_numabalancing(char * str)2771*4882a593Smuzhiyun static int __init setup_numabalancing(char *str)
2772*4882a593Smuzhiyun {
2773*4882a593Smuzhiyun 	int ret = 0;
2774*4882a593Smuzhiyun 	if (!str)
2775*4882a593Smuzhiyun 		goto out;
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	if (!strcmp(str, "enable")) {
2778*4882a593Smuzhiyun 		numabalancing_override = 1;
2779*4882a593Smuzhiyun 		ret = 1;
2780*4882a593Smuzhiyun 	} else if (!strcmp(str, "disable")) {
2781*4882a593Smuzhiyun 		numabalancing_override = -1;
2782*4882a593Smuzhiyun 		ret = 1;
2783*4882a593Smuzhiyun 	}
2784*4882a593Smuzhiyun out:
2785*4882a593Smuzhiyun 	if (!ret)
2786*4882a593Smuzhiyun 		pr_warn("Unable to parse numa_balancing=\n");
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	return ret;
2789*4882a593Smuzhiyun }
2790*4882a593Smuzhiyun __setup("numa_balancing=", setup_numabalancing);
2791*4882a593Smuzhiyun #else
check_numabalancing_enable(void)2792*4882a593Smuzhiyun static inline void __init check_numabalancing_enable(void)
2793*4882a593Smuzhiyun {
2794*4882a593Smuzhiyun }
2795*4882a593Smuzhiyun #endif /* CONFIG_NUMA_BALANCING */
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun /* assumes fs == KERNEL_DS */
numa_policy_init(void)2798*4882a593Smuzhiyun void __init numa_policy_init(void)
2799*4882a593Smuzhiyun {
2800*4882a593Smuzhiyun 	nodemask_t interleave_nodes;
2801*4882a593Smuzhiyun 	unsigned long largest = 0;
2802*4882a593Smuzhiyun 	int nid, prefer = 0;
2803*4882a593Smuzhiyun 
2804*4882a593Smuzhiyun 	policy_cache = kmem_cache_create("numa_policy",
2805*4882a593Smuzhiyun 					 sizeof(struct mempolicy),
2806*4882a593Smuzhiyun 					 0, SLAB_PANIC, NULL);
2807*4882a593Smuzhiyun 
2808*4882a593Smuzhiyun 	sn_cache = kmem_cache_create("shared_policy_node",
2809*4882a593Smuzhiyun 				     sizeof(struct sp_node),
2810*4882a593Smuzhiyun 				     0, SLAB_PANIC, NULL);
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun 	for_each_node(nid) {
2813*4882a593Smuzhiyun 		preferred_node_policy[nid] = (struct mempolicy) {
2814*4882a593Smuzhiyun 			.refcnt = ATOMIC_INIT(1),
2815*4882a593Smuzhiyun 			.mode = MPOL_PREFERRED,
2816*4882a593Smuzhiyun 			.flags = MPOL_F_MOF | MPOL_F_MORON,
2817*4882a593Smuzhiyun 			.v = { .preferred_node = nid, },
2818*4882a593Smuzhiyun 		};
2819*4882a593Smuzhiyun 	}
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	/*
2822*4882a593Smuzhiyun 	 * Set interleaving policy for system init. Interleaving is only
2823*4882a593Smuzhiyun 	 * enabled across suitably sized nodes (default is >= 16MB), or
2824*4882a593Smuzhiyun 	 * fall back to the largest node if they're all smaller.
2825*4882a593Smuzhiyun 	 */
2826*4882a593Smuzhiyun 	nodes_clear(interleave_nodes);
2827*4882a593Smuzhiyun 	for_each_node_state(nid, N_MEMORY) {
2828*4882a593Smuzhiyun 		unsigned long total_pages = node_present_pages(nid);
2829*4882a593Smuzhiyun 
2830*4882a593Smuzhiyun 		/* Preserve the largest node */
2831*4882a593Smuzhiyun 		if (largest < total_pages) {
2832*4882a593Smuzhiyun 			largest = total_pages;
2833*4882a593Smuzhiyun 			prefer = nid;
2834*4882a593Smuzhiyun 		}
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 		/* Interleave this node? */
2837*4882a593Smuzhiyun 		if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2838*4882a593Smuzhiyun 			node_set(nid, interleave_nodes);
2839*4882a593Smuzhiyun 	}
2840*4882a593Smuzhiyun 
2841*4882a593Smuzhiyun 	/* All too small, use the largest */
2842*4882a593Smuzhiyun 	if (unlikely(nodes_empty(interleave_nodes)))
2843*4882a593Smuzhiyun 		node_set(prefer, interleave_nodes);
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 	if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2846*4882a593Smuzhiyun 		pr_err("%s: interleaving failed\n", __func__);
2847*4882a593Smuzhiyun 
2848*4882a593Smuzhiyun 	check_numabalancing_enable();
2849*4882a593Smuzhiyun }
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun /* Reset policy of current process to default */
numa_default_policy(void)2852*4882a593Smuzhiyun void numa_default_policy(void)
2853*4882a593Smuzhiyun {
2854*4882a593Smuzhiyun 	do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2855*4882a593Smuzhiyun }
2856*4882a593Smuzhiyun 
2857*4882a593Smuzhiyun /*
2858*4882a593Smuzhiyun  * Parse and format mempolicy from/to strings
2859*4882a593Smuzhiyun  */
2860*4882a593Smuzhiyun 
2861*4882a593Smuzhiyun /*
2862*4882a593Smuzhiyun  * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2863*4882a593Smuzhiyun  */
2864*4882a593Smuzhiyun static const char * const policy_modes[] =
2865*4882a593Smuzhiyun {
2866*4882a593Smuzhiyun 	[MPOL_DEFAULT]    = "default",
2867*4882a593Smuzhiyun 	[MPOL_PREFERRED]  = "prefer",
2868*4882a593Smuzhiyun 	[MPOL_BIND]       = "bind",
2869*4882a593Smuzhiyun 	[MPOL_INTERLEAVE] = "interleave",
2870*4882a593Smuzhiyun 	[MPOL_LOCAL]      = "local",
2871*4882a593Smuzhiyun };
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun #ifdef CONFIG_TMPFS
2875*4882a593Smuzhiyun /**
2876*4882a593Smuzhiyun  * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2877*4882a593Smuzhiyun  * @str:  string containing mempolicy to parse
2878*4882a593Smuzhiyun  * @mpol:  pointer to struct mempolicy pointer, returned on success.
2879*4882a593Smuzhiyun  *
2880*4882a593Smuzhiyun  * Format of input:
2881*4882a593Smuzhiyun  *	<mode>[=<flags>][:<nodelist>]
2882*4882a593Smuzhiyun  *
2883*4882a593Smuzhiyun  * On success, returns 0, else 1
2884*4882a593Smuzhiyun  */
mpol_parse_str(char * str,struct mempolicy ** mpol)2885*4882a593Smuzhiyun int mpol_parse_str(char *str, struct mempolicy **mpol)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun 	struct mempolicy *new = NULL;
2888*4882a593Smuzhiyun 	unsigned short mode_flags;
2889*4882a593Smuzhiyun 	nodemask_t nodes;
2890*4882a593Smuzhiyun 	char *nodelist = strchr(str, ':');
2891*4882a593Smuzhiyun 	char *flags = strchr(str, '=');
2892*4882a593Smuzhiyun 	int err = 1, mode;
2893*4882a593Smuzhiyun 
2894*4882a593Smuzhiyun 	if (flags)
2895*4882a593Smuzhiyun 		*flags++ = '\0';	/* terminate mode string */
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun 	if (nodelist) {
2898*4882a593Smuzhiyun 		/* NUL-terminate mode or flags string */
2899*4882a593Smuzhiyun 		*nodelist++ = '\0';
2900*4882a593Smuzhiyun 		if (nodelist_parse(nodelist, nodes))
2901*4882a593Smuzhiyun 			goto out;
2902*4882a593Smuzhiyun 		if (!nodes_subset(nodes, node_states[N_MEMORY]))
2903*4882a593Smuzhiyun 			goto out;
2904*4882a593Smuzhiyun 	} else
2905*4882a593Smuzhiyun 		nodes_clear(nodes);
2906*4882a593Smuzhiyun 
2907*4882a593Smuzhiyun 	mode = match_string(policy_modes, MPOL_MAX, str);
2908*4882a593Smuzhiyun 	if (mode < 0)
2909*4882a593Smuzhiyun 		goto out;
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun 	switch (mode) {
2912*4882a593Smuzhiyun 	case MPOL_PREFERRED:
2913*4882a593Smuzhiyun 		/*
2914*4882a593Smuzhiyun 		 * Insist on a nodelist of one node only, although later
2915*4882a593Smuzhiyun 		 * we use first_node(nodes) to grab a single node, so here
2916*4882a593Smuzhiyun 		 * nodelist (or nodes) cannot be empty.
2917*4882a593Smuzhiyun 		 */
2918*4882a593Smuzhiyun 		if (nodelist) {
2919*4882a593Smuzhiyun 			char *rest = nodelist;
2920*4882a593Smuzhiyun 			while (isdigit(*rest))
2921*4882a593Smuzhiyun 				rest++;
2922*4882a593Smuzhiyun 			if (*rest)
2923*4882a593Smuzhiyun 				goto out;
2924*4882a593Smuzhiyun 			if (nodes_empty(nodes))
2925*4882a593Smuzhiyun 				goto out;
2926*4882a593Smuzhiyun 		}
2927*4882a593Smuzhiyun 		break;
2928*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
2929*4882a593Smuzhiyun 		/*
2930*4882a593Smuzhiyun 		 * Default to online nodes with memory if no nodelist
2931*4882a593Smuzhiyun 		 */
2932*4882a593Smuzhiyun 		if (!nodelist)
2933*4882a593Smuzhiyun 			nodes = node_states[N_MEMORY];
2934*4882a593Smuzhiyun 		break;
2935*4882a593Smuzhiyun 	case MPOL_LOCAL:
2936*4882a593Smuzhiyun 		/*
2937*4882a593Smuzhiyun 		 * Don't allow a nodelist;  mpol_new() checks flags
2938*4882a593Smuzhiyun 		 */
2939*4882a593Smuzhiyun 		if (nodelist)
2940*4882a593Smuzhiyun 			goto out;
2941*4882a593Smuzhiyun 		mode = MPOL_PREFERRED;
2942*4882a593Smuzhiyun 		break;
2943*4882a593Smuzhiyun 	case MPOL_DEFAULT:
2944*4882a593Smuzhiyun 		/*
2945*4882a593Smuzhiyun 		 * Insist on a empty nodelist
2946*4882a593Smuzhiyun 		 */
2947*4882a593Smuzhiyun 		if (!nodelist)
2948*4882a593Smuzhiyun 			err = 0;
2949*4882a593Smuzhiyun 		goto out;
2950*4882a593Smuzhiyun 	case MPOL_BIND:
2951*4882a593Smuzhiyun 		/*
2952*4882a593Smuzhiyun 		 * Insist on a nodelist
2953*4882a593Smuzhiyun 		 */
2954*4882a593Smuzhiyun 		if (!nodelist)
2955*4882a593Smuzhiyun 			goto out;
2956*4882a593Smuzhiyun 	}
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 	mode_flags = 0;
2959*4882a593Smuzhiyun 	if (flags) {
2960*4882a593Smuzhiyun 		/*
2961*4882a593Smuzhiyun 		 * Currently, we only support two mutually exclusive
2962*4882a593Smuzhiyun 		 * mode flags.
2963*4882a593Smuzhiyun 		 */
2964*4882a593Smuzhiyun 		if (!strcmp(flags, "static"))
2965*4882a593Smuzhiyun 			mode_flags |= MPOL_F_STATIC_NODES;
2966*4882a593Smuzhiyun 		else if (!strcmp(flags, "relative"))
2967*4882a593Smuzhiyun 			mode_flags |= MPOL_F_RELATIVE_NODES;
2968*4882a593Smuzhiyun 		else
2969*4882a593Smuzhiyun 			goto out;
2970*4882a593Smuzhiyun 	}
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	new = mpol_new(mode, mode_flags, &nodes);
2973*4882a593Smuzhiyun 	if (IS_ERR(new))
2974*4882a593Smuzhiyun 		goto out;
2975*4882a593Smuzhiyun 
2976*4882a593Smuzhiyun 	/*
2977*4882a593Smuzhiyun 	 * Save nodes for mpol_to_str() to show the tmpfs mount options
2978*4882a593Smuzhiyun 	 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2979*4882a593Smuzhiyun 	 */
2980*4882a593Smuzhiyun 	if (mode != MPOL_PREFERRED)
2981*4882a593Smuzhiyun 		new->v.nodes = nodes;
2982*4882a593Smuzhiyun 	else if (nodelist)
2983*4882a593Smuzhiyun 		new->v.preferred_node = first_node(nodes);
2984*4882a593Smuzhiyun 	else
2985*4882a593Smuzhiyun 		new->flags |= MPOL_F_LOCAL;
2986*4882a593Smuzhiyun 
2987*4882a593Smuzhiyun 	/*
2988*4882a593Smuzhiyun 	 * Save nodes for contextualization: this will be used to "clone"
2989*4882a593Smuzhiyun 	 * the mempolicy in a specific context [cpuset] at a later time.
2990*4882a593Smuzhiyun 	 */
2991*4882a593Smuzhiyun 	new->w.user_nodemask = nodes;
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 	err = 0;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun out:
2996*4882a593Smuzhiyun 	/* Restore string for error message */
2997*4882a593Smuzhiyun 	if (nodelist)
2998*4882a593Smuzhiyun 		*--nodelist = ':';
2999*4882a593Smuzhiyun 	if (flags)
3000*4882a593Smuzhiyun 		*--flags = '=';
3001*4882a593Smuzhiyun 	if (!err)
3002*4882a593Smuzhiyun 		*mpol = new;
3003*4882a593Smuzhiyun 	return err;
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun #endif /* CONFIG_TMPFS */
3006*4882a593Smuzhiyun 
3007*4882a593Smuzhiyun /**
3008*4882a593Smuzhiyun  * mpol_to_str - format a mempolicy structure for printing
3009*4882a593Smuzhiyun  * @buffer:  to contain formatted mempolicy string
3010*4882a593Smuzhiyun  * @maxlen:  length of @buffer
3011*4882a593Smuzhiyun  * @pol:  pointer to mempolicy to be formatted
3012*4882a593Smuzhiyun  *
3013*4882a593Smuzhiyun  * Convert @pol into a string.  If @buffer is too short, truncate the string.
3014*4882a593Smuzhiyun  * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
3015*4882a593Smuzhiyun  * longest flag, "relative", and to display at least a few node ids.
3016*4882a593Smuzhiyun  */
mpol_to_str(char * buffer,int maxlen,struct mempolicy * pol)3017*4882a593Smuzhiyun void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
3018*4882a593Smuzhiyun {
3019*4882a593Smuzhiyun 	char *p = buffer;
3020*4882a593Smuzhiyun 	nodemask_t nodes = NODE_MASK_NONE;
3021*4882a593Smuzhiyun 	unsigned short mode = MPOL_DEFAULT;
3022*4882a593Smuzhiyun 	unsigned short flags = 0;
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun 	if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
3025*4882a593Smuzhiyun 		mode = pol->mode;
3026*4882a593Smuzhiyun 		flags = pol->flags;
3027*4882a593Smuzhiyun 	}
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 	switch (mode) {
3030*4882a593Smuzhiyun 	case MPOL_DEFAULT:
3031*4882a593Smuzhiyun 		break;
3032*4882a593Smuzhiyun 	case MPOL_PREFERRED:
3033*4882a593Smuzhiyun 		if (flags & MPOL_F_LOCAL)
3034*4882a593Smuzhiyun 			mode = MPOL_LOCAL;
3035*4882a593Smuzhiyun 		else
3036*4882a593Smuzhiyun 			node_set(pol->v.preferred_node, nodes);
3037*4882a593Smuzhiyun 		break;
3038*4882a593Smuzhiyun 	case MPOL_BIND:
3039*4882a593Smuzhiyun 	case MPOL_INTERLEAVE:
3040*4882a593Smuzhiyun 		nodes = pol->v.nodes;
3041*4882a593Smuzhiyun 		break;
3042*4882a593Smuzhiyun 	default:
3043*4882a593Smuzhiyun 		WARN_ON_ONCE(1);
3044*4882a593Smuzhiyun 		snprintf(p, maxlen, "unknown");
3045*4882a593Smuzhiyun 		return;
3046*4882a593Smuzhiyun 	}
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 	p += snprintf(p, maxlen, "%s", policy_modes[mode]);
3049*4882a593Smuzhiyun 
3050*4882a593Smuzhiyun 	if (flags & MPOL_MODE_FLAGS) {
3051*4882a593Smuzhiyun 		p += snprintf(p, buffer + maxlen - p, "=");
3052*4882a593Smuzhiyun 
3053*4882a593Smuzhiyun 		/*
3054*4882a593Smuzhiyun 		 * Currently, the only defined flags are mutually exclusive
3055*4882a593Smuzhiyun 		 */
3056*4882a593Smuzhiyun 		if (flags & MPOL_F_STATIC_NODES)
3057*4882a593Smuzhiyun 			p += snprintf(p, buffer + maxlen - p, "static");
3058*4882a593Smuzhiyun 		else if (flags & MPOL_F_RELATIVE_NODES)
3059*4882a593Smuzhiyun 			p += snprintf(p, buffer + maxlen - p, "relative");
3060*4882a593Smuzhiyun 	}
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun 	if (!nodes_empty(nodes))
3063*4882a593Smuzhiyun 		p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
3064*4882a593Smuzhiyun 			       nodemask_pr_args(&nodes));
3065*4882a593Smuzhiyun }
3066