xref: /OK3568_Linux_fs/kernel/net/sunrpc/svc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * linux/net/sunrpc/svc.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * High-level RPC service routines
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Multiple threads pools and NUMAisation
10*4882a593Smuzhiyun  * Copyright (c) 2006 Silicon Graphics, Inc.
11*4882a593Smuzhiyun  * by Greg Banks <gnb@melbourne.sgi.com>
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/linkage.h>
15*4882a593Smuzhiyun #include <linux/sched/signal.h>
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/net.h>
18*4882a593Smuzhiyun #include <linux/in.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/interrupt.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun #include <linux/kthread.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/sunrpc/types.h>
26*4882a593Smuzhiyun #include <linux/sunrpc/xdr.h>
27*4882a593Smuzhiyun #include <linux/sunrpc/stats.h>
28*4882a593Smuzhiyun #include <linux/sunrpc/svcsock.h>
29*4882a593Smuzhiyun #include <linux/sunrpc/clnt.h>
30*4882a593Smuzhiyun #include <linux/sunrpc/bc_xprt.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #include <trace/events/sunrpc.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define RPCDBG_FACILITY	RPCDBG_SVCDSP
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static void svc_unregister(const struct svc_serv *serv, struct net *net);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define svc_serv_is_pooled(serv)    ((serv)->sv_ops->svo_function)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define SVC_POOL_DEFAULT	SVC_POOL_GLOBAL
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * Structure for mapping cpus to pools and vice versa.
44*4882a593Smuzhiyun  * Setup once during sunrpc initialisation.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun struct svc_pool_map svc_pool_map = {
47*4882a593Smuzhiyun 	.mode = SVC_POOL_DEFAULT
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_pool_map);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static int
param_set_pool_mode(const char * val,const struct kernel_param * kp)54*4882a593Smuzhiyun param_set_pool_mode(const char *val, const struct kernel_param *kp)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	int *ip = (int *)kp->arg;
57*4882a593Smuzhiyun 	struct svc_pool_map *m = &svc_pool_map;
58*4882a593Smuzhiyun 	int err;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	mutex_lock(&svc_pool_map_mutex);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	err = -EBUSY;
63*4882a593Smuzhiyun 	if (m->count)
64*4882a593Smuzhiyun 		goto out;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	err = 0;
67*4882a593Smuzhiyun 	if (!strncmp(val, "auto", 4))
68*4882a593Smuzhiyun 		*ip = SVC_POOL_AUTO;
69*4882a593Smuzhiyun 	else if (!strncmp(val, "global", 6))
70*4882a593Smuzhiyun 		*ip = SVC_POOL_GLOBAL;
71*4882a593Smuzhiyun 	else if (!strncmp(val, "percpu", 6))
72*4882a593Smuzhiyun 		*ip = SVC_POOL_PERCPU;
73*4882a593Smuzhiyun 	else if (!strncmp(val, "pernode", 7))
74*4882a593Smuzhiyun 		*ip = SVC_POOL_PERNODE;
75*4882a593Smuzhiyun 	else
76*4882a593Smuzhiyun 		err = -EINVAL;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun out:
79*4882a593Smuzhiyun 	mutex_unlock(&svc_pool_map_mutex);
80*4882a593Smuzhiyun 	return err;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun static int
param_get_pool_mode(char * buf,const struct kernel_param * kp)84*4882a593Smuzhiyun param_get_pool_mode(char *buf, const struct kernel_param *kp)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	int *ip = (int *)kp->arg;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	switch (*ip)
89*4882a593Smuzhiyun 	{
90*4882a593Smuzhiyun 	case SVC_POOL_AUTO:
91*4882a593Smuzhiyun 		return strlcpy(buf, "auto\n", 20);
92*4882a593Smuzhiyun 	case SVC_POOL_GLOBAL:
93*4882a593Smuzhiyun 		return strlcpy(buf, "global\n", 20);
94*4882a593Smuzhiyun 	case SVC_POOL_PERCPU:
95*4882a593Smuzhiyun 		return strlcpy(buf, "percpu\n", 20);
96*4882a593Smuzhiyun 	case SVC_POOL_PERNODE:
97*4882a593Smuzhiyun 		return strlcpy(buf, "pernode\n", 20);
98*4882a593Smuzhiyun 	default:
99*4882a593Smuzhiyun 		return sprintf(buf, "%d\n", *ip);
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
104*4882a593Smuzhiyun 		 &svc_pool_map.mode, 0644);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * Detect best pool mapping mode heuristically,
108*4882a593Smuzhiyun  * according to the machine's topology.
109*4882a593Smuzhiyun  */
110*4882a593Smuzhiyun static int
svc_pool_map_choose_mode(void)111*4882a593Smuzhiyun svc_pool_map_choose_mode(void)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	unsigned int node;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (nr_online_nodes > 1) {
116*4882a593Smuzhiyun 		/*
117*4882a593Smuzhiyun 		 * Actually have multiple NUMA nodes,
118*4882a593Smuzhiyun 		 * so split pools on NUMA node boundaries
119*4882a593Smuzhiyun 		 */
120*4882a593Smuzhiyun 		return SVC_POOL_PERNODE;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	node = first_online_node;
124*4882a593Smuzhiyun 	if (nr_cpus_node(node) > 2) {
125*4882a593Smuzhiyun 		/*
126*4882a593Smuzhiyun 		 * Non-trivial SMP, or CONFIG_NUMA on
127*4882a593Smuzhiyun 		 * non-NUMA hardware, e.g. with a generic
128*4882a593Smuzhiyun 		 * x86_64 kernel on Xeons.  In this case we
129*4882a593Smuzhiyun 		 * want to divide the pools on cpu boundaries.
130*4882a593Smuzhiyun 		 */
131*4882a593Smuzhiyun 		return SVC_POOL_PERCPU;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	/* default: one global pool */
135*4882a593Smuzhiyun 	return SVC_POOL_GLOBAL;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun /*
139*4882a593Smuzhiyun  * Allocate the to_pool[] and pool_to[] arrays.
140*4882a593Smuzhiyun  * Returns 0 on success or an errno.
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun static int
svc_pool_map_alloc_arrays(struct svc_pool_map * m,unsigned int maxpools)143*4882a593Smuzhiyun svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
146*4882a593Smuzhiyun 	if (!m->to_pool)
147*4882a593Smuzhiyun 		goto fail;
148*4882a593Smuzhiyun 	m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
149*4882a593Smuzhiyun 	if (!m->pool_to)
150*4882a593Smuzhiyun 		goto fail_free;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	return 0;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun fail_free:
155*4882a593Smuzhiyun 	kfree(m->to_pool);
156*4882a593Smuzhiyun 	m->to_pool = NULL;
157*4882a593Smuzhiyun fail:
158*4882a593Smuzhiyun 	return -ENOMEM;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun  * Initialise the pool map for SVC_POOL_PERCPU mode.
163*4882a593Smuzhiyun  * Returns number of pools or <0 on error.
164*4882a593Smuzhiyun  */
165*4882a593Smuzhiyun static int
svc_pool_map_init_percpu(struct svc_pool_map * m)166*4882a593Smuzhiyun svc_pool_map_init_percpu(struct svc_pool_map *m)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	unsigned int maxpools = nr_cpu_ids;
169*4882a593Smuzhiyun 	unsigned int pidx = 0;
170*4882a593Smuzhiyun 	unsigned int cpu;
171*4882a593Smuzhiyun 	int err;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	err = svc_pool_map_alloc_arrays(m, maxpools);
174*4882a593Smuzhiyun 	if (err)
175*4882a593Smuzhiyun 		return err;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
178*4882a593Smuzhiyun 		BUG_ON(pidx >= maxpools);
179*4882a593Smuzhiyun 		m->to_pool[cpu] = pidx;
180*4882a593Smuzhiyun 		m->pool_to[pidx] = cpu;
181*4882a593Smuzhiyun 		pidx++;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	/* cpus brought online later all get mapped to pool0, sorry */
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	return pidx;
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * Initialise the pool map for SVC_POOL_PERNODE mode.
191*4882a593Smuzhiyun  * Returns number of pools or <0 on error.
192*4882a593Smuzhiyun  */
193*4882a593Smuzhiyun static int
svc_pool_map_init_pernode(struct svc_pool_map * m)194*4882a593Smuzhiyun svc_pool_map_init_pernode(struct svc_pool_map *m)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	unsigned int maxpools = nr_node_ids;
197*4882a593Smuzhiyun 	unsigned int pidx = 0;
198*4882a593Smuzhiyun 	unsigned int node;
199*4882a593Smuzhiyun 	int err;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	err = svc_pool_map_alloc_arrays(m, maxpools);
202*4882a593Smuzhiyun 	if (err)
203*4882a593Smuzhiyun 		return err;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	for_each_node_with_cpus(node) {
206*4882a593Smuzhiyun 		/* some architectures (e.g. SN2) have cpuless nodes */
207*4882a593Smuzhiyun 		BUG_ON(pidx > maxpools);
208*4882a593Smuzhiyun 		m->to_pool[node] = pidx;
209*4882a593Smuzhiyun 		m->pool_to[pidx] = node;
210*4882a593Smuzhiyun 		pidx++;
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	/* nodes brought online later all get mapped to pool0, sorry */
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	return pidx;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /*
219*4882a593Smuzhiyun  * Add a reference to the global map of cpus to pools (and
220*4882a593Smuzhiyun  * vice versa).  Initialise the map if we're the first user.
221*4882a593Smuzhiyun  * Returns the number of pools.
222*4882a593Smuzhiyun  */
223*4882a593Smuzhiyun unsigned int
svc_pool_map_get(void)224*4882a593Smuzhiyun svc_pool_map_get(void)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun 	struct svc_pool_map *m = &svc_pool_map;
227*4882a593Smuzhiyun 	int npools = -1;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	mutex_lock(&svc_pool_map_mutex);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (m->count++) {
232*4882a593Smuzhiyun 		mutex_unlock(&svc_pool_map_mutex);
233*4882a593Smuzhiyun 		return m->npools;
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	if (m->mode == SVC_POOL_AUTO)
237*4882a593Smuzhiyun 		m->mode = svc_pool_map_choose_mode();
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	switch (m->mode) {
240*4882a593Smuzhiyun 	case SVC_POOL_PERCPU:
241*4882a593Smuzhiyun 		npools = svc_pool_map_init_percpu(m);
242*4882a593Smuzhiyun 		break;
243*4882a593Smuzhiyun 	case SVC_POOL_PERNODE:
244*4882a593Smuzhiyun 		npools = svc_pool_map_init_pernode(m);
245*4882a593Smuzhiyun 		break;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (npools < 0) {
249*4882a593Smuzhiyun 		/* default, or memory allocation failure */
250*4882a593Smuzhiyun 		npools = 1;
251*4882a593Smuzhiyun 		m->mode = SVC_POOL_GLOBAL;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 	m->npools = npools;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	mutex_unlock(&svc_pool_map_mutex);
256*4882a593Smuzhiyun 	return m->npools;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_pool_map_get);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun  * Drop a reference to the global map of cpus to pools.
262*4882a593Smuzhiyun  * When the last reference is dropped, the map data is
263*4882a593Smuzhiyun  * freed; this allows the sysadmin to change the pool
264*4882a593Smuzhiyun  * mode using the pool_mode module option without
265*4882a593Smuzhiyun  * rebooting or re-loading sunrpc.ko.
266*4882a593Smuzhiyun  */
267*4882a593Smuzhiyun void
svc_pool_map_put(void)268*4882a593Smuzhiyun svc_pool_map_put(void)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	struct svc_pool_map *m = &svc_pool_map;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	mutex_lock(&svc_pool_map_mutex);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	if (!--m->count) {
275*4882a593Smuzhiyun 		kfree(m->to_pool);
276*4882a593Smuzhiyun 		m->to_pool = NULL;
277*4882a593Smuzhiyun 		kfree(m->pool_to);
278*4882a593Smuzhiyun 		m->pool_to = NULL;
279*4882a593Smuzhiyun 		m->npools = 0;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	mutex_unlock(&svc_pool_map_mutex);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_pool_map_put);
285*4882a593Smuzhiyun 
svc_pool_map_get_node(unsigned int pidx)286*4882a593Smuzhiyun static int svc_pool_map_get_node(unsigned int pidx)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	const struct svc_pool_map *m = &svc_pool_map;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (m->count) {
291*4882a593Smuzhiyun 		if (m->mode == SVC_POOL_PERCPU)
292*4882a593Smuzhiyun 			return cpu_to_node(m->pool_to[pidx]);
293*4882a593Smuzhiyun 		if (m->mode == SVC_POOL_PERNODE)
294*4882a593Smuzhiyun 			return m->pool_to[pidx];
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 	return NUMA_NO_NODE;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun /*
299*4882a593Smuzhiyun  * Set the given thread's cpus_allowed mask so that it
300*4882a593Smuzhiyun  * will only run on cpus in the given pool.
301*4882a593Smuzhiyun  */
302*4882a593Smuzhiyun static inline void
svc_pool_map_set_cpumask(struct task_struct * task,unsigned int pidx)303*4882a593Smuzhiyun svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct svc_pool_map *m = &svc_pool_map;
306*4882a593Smuzhiyun 	unsigned int node = m->pool_to[pidx];
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/*
309*4882a593Smuzhiyun 	 * The caller checks for sv_nrpools > 1, which
310*4882a593Smuzhiyun 	 * implies that we've been initialized.
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	WARN_ON_ONCE(m->count == 0);
313*4882a593Smuzhiyun 	if (m->count == 0)
314*4882a593Smuzhiyun 		return;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	switch (m->mode) {
317*4882a593Smuzhiyun 	case SVC_POOL_PERCPU:
318*4882a593Smuzhiyun 	{
319*4882a593Smuzhiyun 		set_cpus_allowed_ptr(task, cpumask_of(node));
320*4882a593Smuzhiyun 		break;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 	case SVC_POOL_PERNODE:
323*4882a593Smuzhiyun 	{
324*4882a593Smuzhiyun 		set_cpus_allowed_ptr(task, cpumask_of_node(node));
325*4882a593Smuzhiyun 		break;
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun  * Use the mapping mode to choose a pool for a given CPU.
332*4882a593Smuzhiyun  * Used when enqueueing an incoming RPC.  Always returns
333*4882a593Smuzhiyun  * a non-NULL pool pointer.
334*4882a593Smuzhiyun  */
335*4882a593Smuzhiyun struct svc_pool *
svc_pool_for_cpu(struct svc_serv * serv,int cpu)336*4882a593Smuzhiyun svc_pool_for_cpu(struct svc_serv *serv, int cpu)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct svc_pool_map *m = &svc_pool_map;
339*4882a593Smuzhiyun 	unsigned int pidx = 0;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/*
342*4882a593Smuzhiyun 	 * An uninitialised map happens in a pure client when
343*4882a593Smuzhiyun 	 * lockd is brought up, so silently treat it the
344*4882a593Smuzhiyun 	 * same as SVC_POOL_GLOBAL.
345*4882a593Smuzhiyun 	 */
346*4882a593Smuzhiyun 	if (svc_serv_is_pooled(serv)) {
347*4882a593Smuzhiyun 		switch (m->mode) {
348*4882a593Smuzhiyun 		case SVC_POOL_PERCPU:
349*4882a593Smuzhiyun 			pidx = m->to_pool[cpu];
350*4882a593Smuzhiyun 			break;
351*4882a593Smuzhiyun 		case SVC_POOL_PERNODE:
352*4882a593Smuzhiyun 			pidx = m->to_pool[cpu_to_node(cpu)];
353*4882a593Smuzhiyun 			break;
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 	return &serv->sv_pools[pidx % serv->sv_nrpools];
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
svc_rpcb_setup(struct svc_serv * serv,struct net * net)359*4882a593Smuzhiyun int svc_rpcb_setup(struct svc_serv *serv, struct net *net)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun 	int err;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	err = rpcb_create_local(net);
364*4882a593Smuzhiyun 	if (err)
365*4882a593Smuzhiyun 		return err;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	/* Remove any stale portmap registrations */
368*4882a593Smuzhiyun 	svc_unregister(serv, net);
369*4882a593Smuzhiyun 	return 0;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_rpcb_setup);
372*4882a593Smuzhiyun 
svc_rpcb_cleanup(struct svc_serv * serv,struct net * net)373*4882a593Smuzhiyun void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	svc_unregister(serv, net);
376*4882a593Smuzhiyun 	rpcb_put_local(net);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_rpcb_cleanup);
379*4882a593Smuzhiyun 
svc_uses_rpcbind(struct svc_serv * serv)380*4882a593Smuzhiyun static int svc_uses_rpcbind(struct svc_serv *serv)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct svc_program	*progp;
383*4882a593Smuzhiyun 	unsigned int		i;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
386*4882a593Smuzhiyun 		for (i = 0; i < progp->pg_nvers; i++) {
387*4882a593Smuzhiyun 			if (progp->pg_vers[i] == NULL)
388*4882a593Smuzhiyun 				continue;
389*4882a593Smuzhiyun 			if (!progp->pg_vers[i]->vs_hidden)
390*4882a593Smuzhiyun 				return 1;
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
svc_bind(struct svc_serv * serv,struct net * net)397*4882a593Smuzhiyun int svc_bind(struct svc_serv *serv, struct net *net)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	if (!svc_uses_rpcbind(serv))
400*4882a593Smuzhiyun 		return 0;
401*4882a593Smuzhiyun 	return svc_rpcb_setup(serv, net);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_bind);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun #if defined(CONFIG_SUNRPC_BACKCHANNEL)
406*4882a593Smuzhiyun static void
__svc_init_bc(struct svc_serv * serv)407*4882a593Smuzhiyun __svc_init_bc(struct svc_serv *serv)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	INIT_LIST_HEAD(&serv->sv_cb_list);
410*4882a593Smuzhiyun 	spin_lock_init(&serv->sv_cb_lock);
411*4882a593Smuzhiyun 	init_waitqueue_head(&serv->sv_cb_waitq);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun #else
414*4882a593Smuzhiyun static void
__svc_init_bc(struct svc_serv * serv)415*4882a593Smuzhiyun __svc_init_bc(struct svc_serv *serv)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun #endif
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun /*
421*4882a593Smuzhiyun  * Create an RPC service
422*4882a593Smuzhiyun  */
423*4882a593Smuzhiyun static struct svc_serv *
__svc_create(struct svc_program * prog,unsigned int bufsize,int npools,const struct svc_serv_ops * ops)424*4882a593Smuzhiyun __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
425*4882a593Smuzhiyun 	     const struct svc_serv_ops *ops)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct svc_serv	*serv;
428*4882a593Smuzhiyun 	unsigned int vers;
429*4882a593Smuzhiyun 	unsigned int xdrsize;
430*4882a593Smuzhiyun 	unsigned int i;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
433*4882a593Smuzhiyun 		return NULL;
434*4882a593Smuzhiyun 	serv->sv_name      = prog->pg_name;
435*4882a593Smuzhiyun 	serv->sv_program   = prog;
436*4882a593Smuzhiyun 	serv->sv_nrthreads = 1;
437*4882a593Smuzhiyun 	serv->sv_stats     = prog->pg_stats;
438*4882a593Smuzhiyun 	if (bufsize > RPCSVC_MAXPAYLOAD)
439*4882a593Smuzhiyun 		bufsize = RPCSVC_MAXPAYLOAD;
440*4882a593Smuzhiyun 	serv->sv_max_payload = bufsize? bufsize : 4096;
441*4882a593Smuzhiyun 	serv->sv_max_mesg  = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
442*4882a593Smuzhiyun 	serv->sv_ops = ops;
443*4882a593Smuzhiyun 	xdrsize = 0;
444*4882a593Smuzhiyun 	while (prog) {
445*4882a593Smuzhiyun 		prog->pg_lovers = prog->pg_nvers-1;
446*4882a593Smuzhiyun 		for (vers=0; vers<prog->pg_nvers ; vers++)
447*4882a593Smuzhiyun 			if (prog->pg_vers[vers]) {
448*4882a593Smuzhiyun 				prog->pg_hivers = vers;
449*4882a593Smuzhiyun 				if (prog->pg_lovers > vers)
450*4882a593Smuzhiyun 					prog->pg_lovers = vers;
451*4882a593Smuzhiyun 				if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
452*4882a593Smuzhiyun 					xdrsize = prog->pg_vers[vers]->vs_xdrsize;
453*4882a593Smuzhiyun 			}
454*4882a593Smuzhiyun 		prog = prog->pg_next;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 	serv->sv_xdrsize   = xdrsize;
457*4882a593Smuzhiyun 	INIT_LIST_HEAD(&serv->sv_tempsocks);
458*4882a593Smuzhiyun 	INIT_LIST_HEAD(&serv->sv_permsocks);
459*4882a593Smuzhiyun 	timer_setup(&serv->sv_temptimer, NULL, 0);
460*4882a593Smuzhiyun 	spin_lock_init(&serv->sv_lock);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	__svc_init_bc(serv);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	serv->sv_nrpools = npools;
465*4882a593Smuzhiyun 	serv->sv_pools =
466*4882a593Smuzhiyun 		kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
467*4882a593Smuzhiyun 			GFP_KERNEL);
468*4882a593Smuzhiyun 	if (!serv->sv_pools) {
469*4882a593Smuzhiyun 		kfree(serv);
470*4882a593Smuzhiyun 		return NULL;
471*4882a593Smuzhiyun 	}
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	for (i = 0; i < serv->sv_nrpools; i++) {
474*4882a593Smuzhiyun 		struct svc_pool *pool = &serv->sv_pools[i];
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		dprintk("svc: initialising pool %u for %s\n",
477*4882a593Smuzhiyun 				i, serv->sv_name);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		pool->sp_id = i;
480*4882a593Smuzhiyun 		INIT_LIST_HEAD(&pool->sp_sockets);
481*4882a593Smuzhiyun 		INIT_LIST_HEAD(&pool->sp_all_threads);
482*4882a593Smuzhiyun 		spin_lock_init(&pool->sp_lock);
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return serv;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun struct svc_serv *
svc_create(struct svc_program * prog,unsigned int bufsize,const struct svc_serv_ops * ops)489*4882a593Smuzhiyun svc_create(struct svc_program *prog, unsigned int bufsize,
490*4882a593Smuzhiyun 	   const struct svc_serv_ops *ops)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	return __svc_create(prog, bufsize, /*npools*/1, ops);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_create);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun struct svc_serv *
svc_create_pooled(struct svc_program * prog,unsigned int bufsize,const struct svc_serv_ops * ops)497*4882a593Smuzhiyun svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
498*4882a593Smuzhiyun 		  const struct svc_serv_ops *ops)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	struct svc_serv *serv;
501*4882a593Smuzhiyun 	unsigned int npools = svc_pool_map_get();
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	serv = __svc_create(prog, bufsize, npools, ops);
504*4882a593Smuzhiyun 	if (!serv)
505*4882a593Smuzhiyun 		goto out_err;
506*4882a593Smuzhiyun 	return serv;
507*4882a593Smuzhiyun out_err:
508*4882a593Smuzhiyun 	svc_pool_map_put();
509*4882a593Smuzhiyun 	return NULL;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_create_pooled);
512*4882a593Smuzhiyun 
svc_shutdown_net(struct svc_serv * serv,struct net * net)513*4882a593Smuzhiyun void svc_shutdown_net(struct svc_serv *serv, struct net *net)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun 	svc_close_net(serv, net);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	if (serv->sv_ops->svo_shutdown)
518*4882a593Smuzhiyun 		serv->sv_ops->svo_shutdown(serv, net);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_shutdown_net);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun /*
523*4882a593Smuzhiyun  * Destroy an RPC service. Should be called with appropriate locking to
524*4882a593Smuzhiyun  * protect the sv_nrthreads, sv_permsocks and sv_tempsocks.
525*4882a593Smuzhiyun  */
526*4882a593Smuzhiyun void
svc_destroy(struct svc_serv * serv)527*4882a593Smuzhiyun svc_destroy(struct svc_serv *serv)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	dprintk("svc: svc_destroy(%s, %d)\n",
530*4882a593Smuzhiyun 				serv->sv_program->pg_name,
531*4882a593Smuzhiyun 				serv->sv_nrthreads);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (serv->sv_nrthreads) {
534*4882a593Smuzhiyun 		if (--(serv->sv_nrthreads) != 0) {
535*4882a593Smuzhiyun 			svc_sock_update_bufs(serv);
536*4882a593Smuzhiyun 			return;
537*4882a593Smuzhiyun 		}
538*4882a593Smuzhiyun 	} else
539*4882a593Smuzhiyun 		printk("svc_destroy: no threads for serv=%p!\n", serv);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	del_timer_sync(&serv->sv_temptimer);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	/*
544*4882a593Smuzhiyun 	 * The last user is gone and thus all sockets have to be destroyed to
545*4882a593Smuzhiyun 	 * the point. Check this.
546*4882a593Smuzhiyun 	 */
547*4882a593Smuzhiyun 	BUG_ON(!list_empty(&serv->sv_permsocks));
548*4882a593Smuzhiyun 	BUG_ON(!list_empty(&serv->sv_tempsocks));
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	cache_clean_deferred(serv);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (svc_serv_is_pooled(serv))
553*4882a593Smuzhiyun 		svc_pool_map_put();
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	kfree(serv->sv_pools);
556*4882a593Smuzhiyun 	kfree(serv);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_destroy);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun  * Allocate an RPC server's buffer space.
562*4882a593Smuzhiyun  * We allocate pages and place them in rq_argpages.
563*4882a593Smuzhiyun  */
564*4882a593Smuzhiyun static int
svc_init_buffer(struct svc_rqst * rqstp,unsigned int size,int node)565*4882a593Smuzhiyun svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	unsigned int pages, arghi;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* bc_xprt uses fore channel allocated buffers */
570*4882a593Smuzhiyun 	if (svc_is_backchannel(rqstp))
571*4882a593Smuzhiyun 		return 1;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
574*4882a593Smuzhiyun 				       * We assume one is at most one page
575*4882a593Smuzhiyun 				       */
576*4882a593Smuzhiyun 	arghi = 0;
577*4882a593Smuzhiyun 	WARN_ON_ONCE(pages > RPCSVC_MAXPAGES);
578*4882a593Smuzhiyun 	if (pages > RPCSVC_MAXPAGES)
579*4882a593Smuzhiyun 		pages = RPCSVC_MAXPAGES;
580*4882a593Smuzhiyun 	while (pages) {
581*4882a593Smuzhiyun 		struct page *p = alloc_pages_node(node, GFP_KERNEL, 0);
582*4882a593Smuzhiyun 		if (!p)
583*4882a593Smuzhiyun 			break;
584*4882a593Smuzhiyun 		rqstp->rq_pages[arghi++] = p;
585*4882a593Smuzhiyun 		pages--;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 	return pages == 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun  * Release an RPC server buffer
592*4882a593Smuzhiyun  */
593*4882a593Smuzhiyun static void
svc_release_buffer(struct svc_rqst * rqstp)594*4882a593Smuzhiyun svc_release_buffer(struct svc_rqst *rqstp)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	unsigned int i;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
599*4882a593Smuzhiyun 		if (rqstp->rq_pages[i])
600*4882a593Smuzhiyun 			put_page(rqstp->rq_pages[i]);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun struct svc_rqst *
svc_rqst_alloc(struct svc_serv * serv,struct svc_pool * pool,int node)604*4882a593Smuzhiyun svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	struct svc_rqst	*rqstp;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	rqstp = kzalloc_node(sizeof(*rqstp), GFP_KERNEL, node);
609*4882a593Smuzhiyun 	if (!rqstp)
610*4882a593Smuzhiyun 		return rqstp;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	__set_bit(RQ_BUSY, &rqstp->rq_flags);
613*4882a593Smuzhiyun 	spin_lock_init(&rqstp->rq_lock);
614*4882a593Smuzhiyun 	rqstp->rq_server = serv;
615*4882a593Smuzhiyun 	rqstp->rq_pool = pool;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	rqstp->rq_argp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
618*4882a593Smuzhiyun 	if (!rqstp->rq_argp)
619*4882a593Smuzhiyun 		goto out_enomem;
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	rqstp->rq_resp = kmalloc_node(serv->sv_xdrsize, GFP_KERNEL, node);
622*4882a593Smuzhiyun 	if (!rqstp->rq_resp)
623*4882a593Smuzhiyun 		goto out_enomem;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (!svc_init_buffer(rqstp, serv->sv_max_mesg, node))
626*4882a593Smuzhiyun 		goto out_enomem;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	return rqstp;
629*4882a593Smuzhiyun out_enomem:
630*4882a593Smuzhiyun 	svc_rqst_free(rqstp);
631*4882a593Smuzhiyun 	return NULL;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_rqst_alloc);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun struct svc_rqst *
svc_prepare_thread(struct svc_serv * serv,struct svc_pool * pool,int node)636*4882a593Smuzhiyun svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	struct svc_rqst	*rqstp;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	rqstp = svc_rqst_alloc(serv, pool, node);
641*4882a593Smuzhiyun 	if (!rqstp)
642*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	serv->sv_nrthreads++;
645*4882a593Smuzhiyun 	spin_lock_bh(&pool->sp_lock);
646*4882a593Smuzhiyun 	pool->sp_nrthreads++;
647*4882a593Smuzhiyun 	list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
648*4882a593Smuzhiyun 	spin_unlock_bh(&pool->sp_lock);
649*4882a593Smuzhiyun 	return rqstp;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_prepare_thread);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /*
654*4882a593Smuzhiyun  * Choose a pool in which to create a new thread, for svc_set_num_threads
655*4882a593Smuzhiyun  */
656*4882a593Smuzhiyun static inline struct svc_pool *
choose_pool(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)657*4882a593Smuzhiyun choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	if (pool != NULL)
660*4882a593Smuzhiyun 		return pool;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun /*
666*4882a593Smuzhiyun  * Choose a thread to kill, for svc_set_num_threads
667*4882a593Smuzhiyun  */
668*4882a593Smuzhiyun static inline struct task_struct *
choose_victim(struct svc_serv * serv,struct svc_pool * pool,unsigned int * state)669*4882a593Smuzhiyun choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	unsigned int i;
672*4882a593Smuzhiyun 	struct task_struct *task = NULL;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	if (pool != NULL) {
675*4882a593Smuzhiyun 		spin_lock_bh(&pool->sp_lock);
676*4882a593Smuzhiyun 	} else {
677*4882a593Smuzhiyun 		/* choose a pool in round-robin fashion */
678*4882a593Smuzhiyun 		for (i = 0; i < serv->sv_nrpools; i++) {
679*4882a593Smuzhiyun 			pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
680*4882a593Smuzhiyun 			spin_lock_bh(&pool->sp_lock);
681*4882a593Smuzhiyun 			if (!list_empty(&pool->sp_all_threads))
682*4882a593Smuzhiyun 				goto found_pool;
683*4882a593Smuzhiyun 			spin_unlock_bh(&pool->sp_lock);
684*4882a593Smuzhiyun 		}
685*4882a593Smuzhiyun 		return NULL;
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun found_pool:
689*4882a593Smuzhiyun 	if (!list_empty(&pool->sp_all_threads)) {
690*4882a593Smuzhiyun 		struct svc_rqst *rqstp;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		/*
693*4882a593Smuzhiyun 		 * Remove from the pool->sp_all_threads list
694*4882a593Smuzhiyun 		 * so we don't try to kill it again.
695*4882a593Smuzhiyun 		 */
696*4882a593Smuzhiyun 		rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
697*4882a593Smuzhiyun 		set_bit(RQ_VICTIM, &rqstp->rq_flags);
698*4882a593Smuzhiyun 		list_del_rcu(&rqstp->rq_all);
699*4882a593Smuzhiyun 		task = rqstp->rq_task;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 	spin_unlock_bh(&pool->sp_lock);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	return task;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun /* create new threads */
707*4882a593Smuzhiyun static int
svc_start_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)708*4882a593Smuzhiyun svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	struct svc_rqst	*rqstp;
711*4882a593Smuzhiyun 	struct task_struct *task;
712*4882a593Smuzhiyun 	struct svc_pool *chosen_pool;
713*4882a593Smuzhiyun 	unsigned int state = serv->sv_nrthreads-1;
714*4882a593Smuzhiyun 	int node;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	do {
717*4882a593Smuzhiyun 		nrservs--;
718*4882a593Smuzhiyun 		chosen_pool = choose_pool(serv, pool, &state);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 		node = svc_pool_map_get_node(chosen_pool->sp_id);
721*4882a593Smuzhiyun 		rqstp = svc_prepare_thread(serv, chosen_pool, node);
722*4882a593Smuzhiyun 		if (IS_ERR(rqstp))
723*4882a593Smuzhiyun 			return PTR_ERR(rqstp);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 		__module_get(serv->sv_ops->svo_module);
726*4882a593Smuzhiyun 		task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
727*4882a593Smuzhiyun 					      node, "%s", serv->sv_name);
728*4882a593Smuzhiyun 		if (IS_ERR(task)) {
729*4882a593Smuzhiyun 			module_put(serv->sv_ops->svo_module);
730*4882a593Smuzhiyun 			svc_exit_thread(rqstp);
731*4882a593Smuzhiyun 			return PTR_ERR(task);
732*4882a593Smuzhiyun 		}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 		rqstp->rq_task = task;
735*4882a593Smuzhiyun 		if (serv->sv_nrpools > 1)
736*4882a593Smuzhiyun 			svc_pool_map_set_cpumask(task, chosen_pool->sp_id);
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 		svc_sock_update_bufs(serv);
739*4882a593Smuzhiyun 		wake_up_process(task);
740*4882a593Smuzhiyun 	} while (nrservs > 0);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun /* destroy old threads */
747*4882a593Smuzhiyun static int
svc_signal_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)748*4882a593Smuzhiyun svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	struct task_struct *task;
751*4882a593Smuzhiyun 	unsigned int state = serv->sv_nrthreads-1;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/* destroy old threads */
754*4882a593Smuzhiyun 	do {
755*4882a593Smuzhiyun 		task = choose_victim(serv, pool, &state);
756*4882a593Smuzhiyun 		if (task == NULL)
757*4882a593Smuzhiyun 			break;
758*4882a593Smuzhiyun 		send_sig(SIGINT, task, 1);
759*4882a593Smuzhiyun 		nrservs++;
760*4882a593Smuzhiyun 	} while (nrservs < 0);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	return 0;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun /*
766*4882a593Smuzhiyun  * Create or destroy enough new threads to make the number
767*4882a593Smuzhiyun  * of threads the given number.  If `pool' is non-NULL, applies
768*4882a593Smuzhiyun  * only to threads in that pool, otherwise round-robins between
769*4882a593Smuzhiyun  * all pools.  Caller must ensure that mutual exclusion between this and
770*4882a593Smuzhiyun  * server startup or shutdown.
771*4882a593Smuzhiyun  *
772*4882a593Smuzhiyun  * Destroying threads relies on the service threads filling in
773*4882a593Smuzhiyun  * rqstp->rq_task, which only the nfs ones do.  Assumes the serv
774*4882a593Smuzhiyun  * has been created using svc_create_pooled().
775*4882a593Smuzhiyun  *
776*4882a593Smuzhiyun  * Based on code that used to be in nfsd_svc() but tweaked
777*4882a593Smuzhiyun  * to be pool-aware.
778*4882a593Smuzhiyun  */
779*4882a593Smuzhiyun int
svc_set_num_threads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)780*4882a593Smuzhiyun svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	if (pool == NULL) {
783*4882a593Smuzhiyun 		/* The -1 assumes caller has done a svc_get() */
784*4882a593Smuzhiyun 		nrservs -= (serv->sv_nrthreads-1);
785*4882a593Smuzhiyun 	} else {
786*4882a593Smuzhiyun 		spin_lock_bh(&pool->sp_lock);
787*4882a593Smuzhiyun 		nrservs -= pool->sp_nrthreads;
788*4882a593Smuzhiyun 		spin_unlock_bh(&pool->sp_lock);
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	if (nrservs > 0)
792*4882a593Smuzhiyun 		return svc_start_kthreads(serv, pool, nrservs);
793*4882a593Smuzhiyun 	if (nrservs < 0)
794*4882a593Smuzhiyun 		return svc_signal_kthreads(serv, pool, nrservs);
795*4882a593Smuzhiyun 	return 0;
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_set_num_threads);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun /* destroy old threads */
800*4882a593Smuzhiyun static int
svc_stop_kthreads(struct svc_serv * serv,struct svc_pool * pool,int nrservs)801*4882a593Smuzhiyun svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct task_struct *task;
804*4882a593Smuzhiyun 	unsigned int state = serv->sv_nrthreads-1;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	/* destroy old threads */
807*4882a593Smuzhiyun 	do {
808*4882a593Smuzhiyun 		task = choose_victim(serv, pool, &state);
809*4882a593Smuzhiyun 		if (task == NULL)
810*4882a593Smuzhiyun 			break;
811*4882a593Smuzhiyun 		kthread_stop(task);
812*4882a593Smuzhiyun 		nrservs++;
813*4882a593Smuzhiyun 	} while (nrservs < 0);
814*4882a593Smuzhiyun 	return 0;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun int
svc_set_num_threads_sync(struct svc_serv * serv,struct svc_pool * pool,int nrservs)818*4882a593Smuzhiyun svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun 	if (pool == NULL) {
821*4882a593Smuzhiyun 		/* The -1 assumes caller has done a svc_get() */
822*4882a593Smuzhiyun 		nrservs -= (serv->sv_nrthreads-1);
823*4882a593Smuzhiyun 	} else {
824*4882a593Smuzhiyun 		spin_lock_bh(&pool->sp_lock);
825*4882a593Smuzhiyun 		nrservs -= pool->sp_nrthreads;
826*4882a593Smuzhiyun 		spin_unlock_bh(&pool->sp_lock);
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	if (nrservs > 0)
830*4882a593Smuzhiyun 		return svc_start_kthreads(serv, pool, nrservs);
831*4882a593Smuzhiyun 	if (nrservs < 0)
832*4882a593Smuzhiyun 		return svc_stop_kthreads(serv, pool, nrservs);
833*4882a593Smuzhiyun 	return 0;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun /*
838*4882a593Smuzhiyun  * Called from a server thread as it's exiting. Caller must hold the "service
839*4882a593Smuzhiyun  * mutex" for the service.
840*4882a593Smuzhiyun  */
841*4882a593Smuzhiyun void
svc_rqst_free(struct svc_rqst * rqstp)842*4882a593Smuzhiyun svc_rqst_free(struct svc_rqst *rqstp)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	svc_release_buffer(rqstp);
845*4882a593Smuzhiyun 	kfree(rqstp->rq_resp);
846*4882a593Smuzhiyun 	kfree(rqstp->rq_argp);
847*4882a593Smuzhiyun 	kfree(rqstp->rq_auth_data);
848*4882a593Smuzhiyun 	kfree_rcu(rqstp, rq_rcu_head);
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_rqst_free);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun void
svc_exit_thread(struct svc_rqst * rqstp)853*4882a593Smuzhiyun svc_exit_thread(struct svc_rqst *rqstp)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 	struct svc_serv	*serv = rqstp->rq_server;
856*4882a593Smuzhiyun 	struct svc_pool	*pool = rqstp->rq_pool;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	spin_lock_bh(&pool->sp_lock);
859*4882a593Smuzhiyun 	pool->sp_nrthreads--;
860*4882a593Smuzhiyun 	if (!test_and_set_bit(RQ_VICTIM, &rqstp->rq_flags))
861*4882a593Smuzhiyun 		list_del_rcu(&rqstp->rq_all);
862*4882a593Smuzhiyun 	spin_unlock_bh(&pool->sp_lock);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	svc_rqst_free(rqstp);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Release the server */
867*4882a593Smuzhiyun 	if (serv)
868*4882a593Smuzhiyun 		svc_destroy(serv);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_exit_thread);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun /*
873*4882a593Smuzhiyun  * Register an "inet" protocol family netid with the local
874*4882a593Smuzhiyun  * rpcbind daemon via an rpcbind v4 SET request.
875*4882a593Smuzhiyun  *
876*4882a593Smuzhiyun  * No netconfig infrastructure is available in the kernel, so
877*4882a593Smuzhiyun  * we map IP_ protocol numbers to netids by hand.
878*4882a593Smuzhiyun  *
879*4882a593Smuzhiyun  * Returns zero on success; a negative errno value is returned
880*4882a593Smuzhiyun  * if any error occurs.
881*4882a593Smuzhiyun  */
__svc_rpcb_register4(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)882*4882a593Smuzhiyun static int __svc_rpcb_register4(struct net *net, const u32 program,
883*4882a593Smuzhiyun 				const u32 version,
884*4882a593Smuzhiyun 				const unsigned short protocol,
885*4882a593Smuzhiyun 				const unsigned short port)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	const struct sockaddr_in sin = {
888*4882a593Smuzhiyun 		.sin_family		= AF_INET,
889*4882a593Smuzhiyun 		.sin_addr.s_addr	= htonl(INADDR_ANY),
890*4882a593Smuzhiyun 		.sin_port		= htons(port),
891*4882a593Smuzhiyun 	};
892*4882a593Smuzhiyun 	const char *netid;
893*4882a593Smuzhiyun 	int error;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	switch (protocol) {
896*4882a593Smuzhiyun 	case IPPROTO_UDP:
897*4882a593Smuzhiyun 		netid = RPCBIND_NETID_UDP;
898*4882a593Smuzhiyun 		break;
899*4882a593Smuzhiyun 	case IPPROTO_TCP:
900*4882a593Smuzhiyun 		netid = RPCBIND_NETID_TCP;
901*4882a593Smuzhiyun 		break;
902*4882a593Smuzhiyun 	default:
903*4882a593Smuzhiyun 		return -ENOPROTOOPT;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	error = rpcb_v4_register(net, program, version,
907*4882a593Smuzhiyun 					(const struct sockaddr *)&sin, netid);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	/*
910*4882a593Smuzhiyun 	 * User space didn't support rpcbind v4, so retry this
911*4882a593Smuzhiyun 	 * registration request with the legacy rpcbind v2 protocol.
912*4882a593Smuzhiyun 	 */
913*4882a593Smuzhiyun 	if (error == -EPROTONOSUPPORT)
914*4882a593Smuzhiyun 		error = rpcb_register(net, program, version, protocol, port);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	return error;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
920*4882a593Smuzhiyun /*
921*4882a593Smuzhiyun  * Register an "inet6" protocol family netid with the local
922*4882a593Smuzhiyun  * rpcbind daemon via an rpcbind v4 SET request.
923*4882a593Smuzhiyun  *
924*4882a593Smuzhiyun  * No netconfig infrastructure is available in the kernel, so
925*4882a593Smuzhiyun  * we map IP_ protocol numbers to netids by hand.
926*4882a593Smuzhiyun  *
927*4882a593Smuzhiyun  * Returns zero on success; a negative errno value is returned
928*4882a593Smuzhiyun  * if any error occurs.
929*4882a593Smuzhiyun  */
__svc_rpcb_register6(struct net * net,const u32 program,const u32 version,const unsigned short protocol,const unsigned short port)930*4882a593Smuzhiyun static int __svc_rpcb_register6(struct net *net, const u32 program,
931*4882a593Smuzhiyun 				const u32 version,
932*4882a593Smuzhiyun 				const unsigned short protocol,
933*4882a593Smuzhiyun 				const unsigned short port)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun 	const struct sockaddr_in6 sin6 = {
936*4882a593Smuzhiyun 		.sin6_family		= AF_INET6,
937*4882a593Smuzhiyun 		.sin6_addr		= IN6ADDR_ANY_INIT,
938*4882a593Smuzhiyun 		.sin6_port		= htons(port),
939*4882a593Smuzhiyun 	};
940*4882a593Smuzhiyun 	const char *netid;
941*4882a593Smuzhiyun 	int error;
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	switch (protocol) {
944*4882a593Smuzhiyun 	case IPPROTO_UDP:
945*4882a593Smuzhiyun 		netid = RPCBIND_NETID_UDP6;
946*4882a593Smuzhiyun 		break;
947*4882a593Smuzhiyun 	case IPPROTO_TCP:
948*4882a593Smuzhiyun 		netid = RPCBIND_NETID_TCP6;
949*4882a593Smuzhiyun 		break;
950*4882a593Smuzhiyun 	default:
951*4882a593Smuzhiyun 		return -ENOPROTOOPT;
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	error = rpcb_v4_register(net, program, version,
955*4882a593Smuzhiyun 					(const struct sockaddr *)&sin6, netid);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	/*
958*4882a593Smuzhiyun 	 * User space didn't support rpcbind version 4, so we won't
959*4882a593Smuzhiyun 	 * use a PF_INET6 listener.
960*4882a593Smuzhiyun 	 */
961*4882a593Smuzhiyun 	if (error == -EPROTONOSUPPORT)
962*4882a593Smuzhiyun 		error = -EAFNOSUPPORT;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	return error;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun #endif	/* IS_ENABLED(CONFIG_IPV6) */
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun  * Register a kernel RPC service via rpcbind version 4.
970*4882a593Smuzhiyun  *
971*4882a593Smuzhiyun  * Returns zero on success; a negative errno value is returned
972*4882a593Smuzhiyun  * if any error occurs.
973*4882a593Smuzhiyun  */
__svc_register(struct net * net,const char * progname,const u32 program,const u32 version,const int family,const unsigned short protocol,const unsigned short port)974*4882a593Smuzhiyun static int __svc_register(struct net *net, const char *progname,
975*4882a593Smuzhiyun 			  const u32 program, const u32 version,
976*4882a593Smuzhiyun 			  const int family,
977*4882a593Smuzhiyun 			  const unsigned short protocol,
978*4882a593Smuzhiyun 			  const unsigned short port)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun 	int error = -EAFNOSUPPORT;
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 	switch (family) {
983*4882a593Smuzhiyun 	case PF_INET:
984*4882a593Smuzhiyun 		error = __svc_rpcb_register4(net, program, version,
985*4882a593Smuzhiyun 						protocol, port);
986*4882a593Smuzhiyun 		break;
987*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
988*4882a593Smuzhiyun 	case PF_INET6:
989*4882a593Smuzhiyun 		error = __svc_rpcb_register6(net, program, version,
990*4882a593Smuzhiyun 						protocol, port);
991*4882a593Smuzhiyun #endif
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	trace_svc_register(progname, version, protocol, port, family, error);
995*4882a593Smuzhiyun 	return error;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
svc_rpcbind_set_version(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)998*4882a593Smuzhiyun int svc_rpcbind_set_version(struct net *net,
999*4882a593Smuzhiyun 			    const struct svc_program *progp,
1000*4882a593Smuzhiyun 			    u32 version, int family,
1001*4882a593Smuzhiyun 			    unsigned short proto,
1002*4882a593Smuzhiyun 			    unsigned short port)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	return __svc_register(net, progp->pg_name, progp->pg_prog,
1005*4882a593Smuzhiyun 				version, family, proto, port);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_rpcbind_set_version);
1009*4882a593Smuzhiyun 
svc_generic_rpcbind_set(struct net * net,const struct svc_program * progp,u32 version,int family,unsigned short proto,unsigned short port)1010*4882a593Smuzhiyun int svc_generic_rpcbind_set(struct net *net,
1011*4882a593Smuzhiyun 			    const struct svc_program *progp,
1012*4882a593Smuzhiyun 			    u32 version, int family,
1013*4882a593Smuzhiyun 			    unsigned short proto,
1014*4882a593Smuzhiyun 			    unsigned short port)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun 	const struct svc_version *vers = progp->pg_vers[version];
1017*4882a593Smuzhiyun 	int error;
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	if (vers == NULL)
1020*4882a593Smuzhiyun 		return 0;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	if (vers->vs_hidden) {
1023*4882a593Smuzhiyun 		trace_svc_noregister(progp->pg_name, version, proto,
1024*4882a593Smuzhiyun 				     port, family, 0);
1025*4882a593Smuzhiyun 		return 0;
1026*4882a593Smuzhiyun 	}
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	/*
1029*4882a593Smuzhiyun 	 * Don't register a UDP port if we need congestion
1030*4882a593Smuzhiyun 	 * control.
1031*4882a593Smuzhiyun 	 */
1032*4882a593Smuzhiyun 	if (vers->vs_need_cong_ctrl && proto == IPPROTO_UDP)
1033*4882a593Smuzhiyun 		return 0;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	error = svc_rpcbind_set_version(net, progp, version,
1036*4882a593Smuzhiyun 					family, proto, port);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	return (vers->vs_rpcb_optnl) ? 0 : error;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_generic_rpcbind_set);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun /**
1043*4882a593Smuzhiyun  * svc_register - register an RPC service with the local portmapper
1044*4882a593Smuzhiyun  * @serv: svc_serv struct for the service to register
1045*4882a593Smuzhiyun  * @net: net namespace for the service to register
1046*4882a593Smuzhiyun  * @family: protocol family of service's listener socket
1047*4882a593Smuzhiyun  * @proto: transport protocol number to advertise
1048*4882a593Smuzhiyun  * @port: port to advertise
1049*4882a593Smuzhiyun  *
1050*4882a593Smuzhiyun  * Service is registered for any address in the passed-in protocol family
1051*4882a593Smuzhiyun  */
svc_register(const struct svc_serv * serv,struct net * net,const int family,const unsigned short proto,const unsigned short port)1052*4882a593Smuzhiyun int svc_register(const struct svc_serv *serv, struct net *net,
1053*4882a593Smuzhiyun 		 const int family, const unsigned short proto,
1054*4882a593Smuzhiyun 		 const unsigned short port)
1055*4882a593Smuzhiyun {
1056*4882a593Smuzhiyun 	struct svc_program	*progp;
1057*4882a593Smuzhiyun 	unsigned int		i;
1058*4882a593Smuzhiyun 	int			error = 0;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	WARN_ON_ONCE(proto == 0 && port == 0);
1061*4882a593Smuzhiyun 	if (proto == 0 && port == 0)
1062*4882a593Smuzhiyun 		return -EINVAL;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1065*4882a593Smuzhiyun 		for (i = 0; i < progp->pg_nvers; i++) {
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 			error = progp->pg_rpcbind_set(net, progp, i,
1068*4882a593Smuzhiyun 					family, proto, port);
1069*4882a593Smuzhiyun 			if (error < 0) {
1070*4882a593Smuzhiyun 				printk(KERN_WARNING "svc: failed to register "
1071*4882a593Smuzhiyun 					"%sv%u RPC service (errno %d).\n",
1072*4882a593Smuzhiyun 					progp->pg_name, i, -error);
1073*4882a593Smuzhiyun 				break;
1074*4882a593Smuzhiyun 			}
1075*4882a593Smuzhiyun 		}
1076*4882a593Smuzhiyun 	}
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	return error;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun /*
1082*4882a593Smuzhiyun  * If user space is running rpcbind, it should take the v4 UNSET
1083*4882a593Smuzhiyun  * and clear everything for this [program, version].  If user space
1084*4882a593Smuzhiyun  * is running portmap, it will reject the v4 UNSET, but won't have
1085*4882a593Smuzhiyun  * any "inet6" entries anyway.  So a PMAP_UNSET should be sufficient
1086*4882a593Smuzhiyun  * in this case to clear all existing entries for [program, version].
1087*4882a593Smuzhiyun  */
__svc_unregister(struct net * net,const u32 program,const u32 version,const char * progname)1088*4882a593Smuzhiyun static void __svc_unregister(struct net *net, const u32 program, const u32 version,
1089*4882a593Smuzhiyun 			     const char *progname)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	int error;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	error = rpcb_v4_register(net, program, version, NULL, "");
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	/*
1096*4882a593Smuzhiyun 	 * User space didn't support rpcbind v4, so retry this
1097*4882a593Smuzhiyun 	 * request with the legacy rpcbind v2 protocol.
1098*4882a593Smuzhiyun 	 */
1099*4882a593Smuzhiyun 	if (error == -EPROTONOSUPPORT)
1100*4882a593Smuzhiyun 		error = rpcb_register(net, program, version, 0, 0);
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	trace_svc_unregister(progname, version, error);
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun  * All netids, bind addresses and ports registered for [program, version]
1107*4882a593Smuzhiyun  * are removed from the local rpcbind database (if the service is not
1108*4882a593Smuzhiyun  * hidden) to make way for a new instance of the service.
1109*4882a593Smuzhiyun  *
1110*4882a593Smuzhiyun  * The result of unregistration is reported via dprintk for those who want
1111*4882a593Smuzhiyun  * verification of the result, but is otherwise not important.
1112*4882a593Smuzhiyun  */
svc_unregister(const struct svc_serv * serv,struct net * net)1113*4882a593Smuzhiyun static void svc_unregister(const struct svc_serv *serv, struct net *net)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun 	struct svc_program *progp;
1116*4882a593Smuzhiyun 	unsigned long flags;
1117*4882a593Smuzhiyun 	unsigned int i;
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 	clear_thread_flag(TIF_SIGPENDING);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	for (progp = serv->sv_program; progp; progp = progp->pg_next) {
1122*4882a593Smuzhiyun 		for (i = 0; i < progp->pg_nvers; i++) {
1123*4882a593Smuzhiyun 			if (progp->pg_vers[i] == NULL)
1124*4882a593Smuzhiyun 				continue;
1125*4882a593Smuzhiyun 			if (progp->pg_vers[i]->vs_hidden)
1126*4882a593Smuzhiyun 				continue;
1127*4882a593Smuzhiyun 			__svc_unregister(net, progp->pg_prog, i, progp->pg_name);
1128*4882a593Smuzhiyun 		}
1129*4882a593Smuzhiyun 	}
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	spin_lock_irqsave(&current->sighand->siglock, flags);
1132*4882a593Smuzhiyun 	recalc_sigpending();
1133*4882a593Smuzhiyun 	spin_unlock_irqrestore(&current->sighand->siglock, flags);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun /*
1137*4882a593Smuzhiyun  * dprintk the given error with the address of the client that caused it.
1138*4882a593Smuzhiyun  */
1139*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
1140*4882a593Smuzhiyun static __printf(2, 3)
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1141*4882a593Smuzhiyun void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1142*4882a593Smuzhiyun {
1143*4882a593Smuzhiyun 	struct va_format vaf;
1144*4882a593Smuzhiyun 	va_list args;
1145*4882a593Smuzhiyun 	char 	buf[RPC_MAX_ADDRBUFLEN];
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	va_start(args, fmt);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	vaf.fmt = fmt;
1150*4882a593Smuzhiyun 	vaf.va = &args;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	dprintk("svc: %s: %pV", svc_print_addr(rqstp, buf, sizeof(buf)), &vaf);
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	va_end(args);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun #else
svc_printk(struct svc_rqst * rqstp,const char * fmt,...)1157*4882a593Smuzhiyun static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1158*4882a593Smuzhiyun #endif
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun __be32
svc_return_autherr(struct svc_rqst * rqstp,__be32 auth_err)1161*4882a593Smuzhiyun svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err)
1162*4882a593Smuzhiyun {
1163*4882a593Smuzhiyun 	set_bit(RQ_AUTHERR, &rqstp->rq_flags);
1164*4882a593Smuzhiyun 	return auth_err;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_return_autherr);
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun static __be32
svc_get_autherr(struct svc_rqst * rqstp,__be32 * statp)1169*4882a593Smuzhiyun svc_get_autherr(struct svc_rqst *rqstp, __be32 *statp)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun 	if (test_and_clear_bit(RQ_AUTHERR, &rqstp->rq_flags))
1172*4882a593Smuzhiyun 		return *statp;
1173*4882a593Smuzhiyun 	return rpc_auth_ok;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun static int
svc_generic_dispatch(struct svc_rqst * rqstp,__be32 * statp)1177*4882a593Smuzhiyun svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun 	struct kvec *argv = &rqstp->rq_arg.head[0];
1180*4882a593Smuzhiyun 	struct kvec *resv = &rqstp->rq_res.head[0];
1181*4882a593Smuzhiyun 	const struct svc_procedure *procp = rqstp->rq_procinfo;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	/*
1184*4882a593Smuzhiyun 	 * Decode arguments
1185*4882a593Smuzhiyun 	 * XXX: why do we ignore the return value?
1186*4882a593Smuzhiyun 	 */
1187*4882a593Smuzhiyun 	if (procp->pc_decode &&
1188*4882a593Smuzhiyun 	    !procp->pc_decode(rqstp, argv->iov_base)) {
1189*4882a593Smuzhiyun 		*statp = rpc_garbage_args;
1190*4882a593Smuzhiyun 		return 1;
1191*4882a593Smuzhiyun 	}
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	*statp = procp->pc_func(rqstp);
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	if (*statp == rpc_drop_reply ||
1196*4882a593Smuzhiyun 	    test_bit(RQ_DROPME, &rqstp->rq_flags))
1197*4882a593Smuzhiyun 		return 0;
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	if (test_bit(RQ_AUTHERR, &rqstp->rq_flags))
1200*4882a593Smuzhiyun 		return 1;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	if (*statp != rpc_success)
1203*4882a593Smuzhiyun 		return 1;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	/* Encode reply */
1206*4882a593Smuzhiyun 	if (procp->pc_encode &&
1207*4882a593Smuzhiyun 	    !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
1208*4882a593Smuzhiyun 		dprintk("svc: failed to encode reply\n");
1209*4882a593Smuzhiyun 		/* serv->sv_stats->rpcsystemerr++; */
1210*4882a593Smuzhiyun 		*statp = rpc_system_err;
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 	return 1;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun __be32
svc_generic_init_request(struct svc_rqst * rqstp,const struct svc_program * progp,struct svc_process_info * ret)1216*4882a593Smuzhiyun svc_generic_init_request(struct svc_rqst *rqstp,
1217*4882a593Smuzhiyun 		const struct svc_program *progp,
1218*4882a593Smuzhiyun 		struct svc_process_info *ret)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun 	const struct svc_version *versp = NULL;	/* compiler food */
1221*4882a593Smuzhiyun 	const struct svc_procedure *procp = NULL;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	if (rqstp->rq_vers >= progp->pg_nvers )
1224*4882a593Smuzhiyun 		goto err_bad_vers;
1225*4882a593Smuzhiyun 	versp = progp->pg_vers[rqstp->rq_vers];
1226*4882a593Smuzhiyun 	if (!versp)
1227*4882a593Smuzhiyun 		goto err_bad_vers;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	/*
1230*4882a593Smuzhiyun 	 * Some protocol versions (namely NFSv4) require some form of
1231*4882a593Smuzhiyun 	 * congestion control.  (See RFC 7530 section 3.1 paragraph 2)
1232*4882a593Smuzhiyun 	 * In other words, UDP is not allowed. We mark those when setting
1233*4882a593Smuzhiyun 	 * up the svc_xprt, and verify that here.
1234*4882a593Smuzhiyun 	 *
1235*4882a593Smuzhiyun 	 * The spec is not very clear about what error should be returned
1236*4882a593Smuzhiyun 	 * when someone tries to access a server that is listening on UDP
1237*4882a593Smuzhiyun 	 * for lower versions. RPC_PROG_MISMATCH seems to be the closest
1238*4882a593Smuzhiyun 	 * fit.
1239*4882a593Smuzhiyun 	 */
1240*4882a593Smuzhiyun 	if (versp->vs_need_cong_ctrl && rqstp->rq_xprt &&
1241*4882a593Smuzhiyun 	    !test_bit(XPT_CONG_CTRL, &rqstp->rq_xprt->xpt_flags))
1242*4882a593Smuzhiyun 		goto err_bad_vers;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	if (rqstp->rq_proc >= versp->vs_nproc)
1245*4882a593Smuzhiyun 		goto err_bad_proc;
1246*4882a593Smuzhiyun 	rqstp->rq_procinfo = procp = &versp->vs_proc[rqstp->rq_proc];
1247*4882a593Smuzhiyun 	if (!procp)
1248*4882a593Smuzhiyun 		goto err_bad_proc;
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	/* Initialize storage for argp and resp */
1251*4882a593Smuzhiyun 	memset(rqstp->rq_argp, 0, procp->pc_argsize);
1252*4882a593Smuzhiyun 	memset(rqstp->rq_resp, 0, procp->pc_ressize);
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	/* Bump per-procedure stats counter */
1255*4882a593Smuzhiyun 	versp->vs_count[rqstp->rq_proc]++;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	ret->dispatch = versp->vs_dispatch;
1258*4882a593Smuzhiyun 	return rpc_success;
1259*4882a593Smuzhiyun err_bad_vers:
1260*4882a593Smuzhiyun 	ret->mismatch.lovers = progp->pg_lovers;
1261*4882a593Smuzhiyun 	ret->mismatch.hivers = progp->pg_hivers;
1262*4882a593Smuzhiyun 	return rpc_prog_mismatch;
1263*4882a593Smuzhiyun err_bad_proc:
1264*4882a593Smuzhiyun 	return rpc_proc_unavail;
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_generic_init_request);
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun  * Common routine for processing the RPC request.
1270*4882a593Smuzhiyun  */
1271*4882a593Smuzhiyun static int
svc_process_common(struct svc_rqst * rqstp,struct kvec * argv,struct kvec * resv)1272*4882a593Smuzhiyun svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1273*4882a593Smuzhiyun {
1274*4882a593Smuzhiyun 	struct svc_program	*progp;
1275*4882a593Smuzhiyun 	const struct svc_procedure *procp = NULL;
1276*4882a593Smuzhiyun 	struct svc_serv		*serv = rqstp->rq_server;
1277*4882a593Smuzhiyun 	struct svc_process_info process;
1278*4882a593Smuzhiyun 	__be32			*statp;
1279*4882a593Smuzhiyun 	u32			prog, vers;
1280*4882a593Smuzhiyun 	__be32			auth_stat, rpc_stat;
1281*4882a593Smuzhiyun 	int			auth_res;
1282*4882a593Smuzhiyun 	__be32			*reply_statp;
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	rpc_stat = rpc_success;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	if (argv->iov_len < 6*4)
1287*4882a593Smuzhiyun 		goto err_short_len;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	/* Will be turned off by GSS integrity and privacy services */
1290*4882a593Smuzhiyun 	set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
1291*4882a593Smuzhiyun 	/* Will be turned off only when NFSv4 Sessions are used */
1292*4882a593Smuzhiyun 	set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
1293*4882a593Smuzhiyun 	clear_bit(RQ_DROPME, &rqstp->rq_flags);
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	svc_putu32(resv, rqstp->rq_xid);
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	vers = svc_getnl(argv);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	/* First words of reply: */
1300*4882a593Smuzhiyun 	svc_putnl(resv, 1);		/* REPLY */
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	if (vers != 2)		/* RPC version number */
1303*4882a593Smuzhiyun 		goto err_bad_rpc;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	/* Save position in case we later decide to reject: */
1306*4882a593Smuzhiyun 	reply_statp = resv->iov_base + resv->iov_len;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	svc_putnl(resv, 0);		/* ACCEPT */
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	rqstp->rq_prog = prog = svc_getnl(argv);	/* program number */
1311*4882a593Smuzhiyun 	rqstp->rq_vers = svc_getnl(argv);	/* version number */
1312*4882a593Smuzhiyun 	rqstp->rq_proc = svc_getnl(argv);	/* procedure number */
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	for (progp = serv->sv_program; progp; progp = progp->pg_next)
1315*4882a593Smuzhiyun 		if (prog == progp->pg_prog)
1316*4882a593Smuzhiyun 			break;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	/*
1319*4882a593Smuzhiyun 	 * Decode auth data, and add verifier to reply buffer.
1320*4882a593Smuzhiyun 	 * We do this before anything else in order to get a decent
1321*4882a593Smuzhiyun 	 * auth verifier.
1322*4882a593Smuzhiyun 	 */
1323*4882a593Smuzhiyun 	auth_res = svc_authenticate(rqstp, &auth_stat);
1324*4882a593Smuzhiyun 	/* Also give the program a chance to reject this call: */
1325*4882a593Smuzhiyun 	if (auth_res == SVC_OK && progp) {
1326*4882a593Smuzhiyun 		auth_stat = rpc_autherr_badcred;
1327*4882a593Smuzhiyun 		auth_res = progp->pg_authenticate(rqstp);
1328*4882a593Smuzhiyun 	}
1329*4882a593Smuzhiyun 	if (auth_res != SVC_OK)
1330*4882a593Smuzhiyun 		trace_svc_authenticate(rqstp, auth_res, auth_stat);
1331*4882a593Smuzhiyun 	switch (auth_res) {
1332*4882a593Smuzhiyun 	case SVC_OK:
1333*4882a593Smuzhiyun 		break;
1334*4882a593Smuzhiyun 	case SVC_GARBAGE:
1335*4882a593Smuzhiyun 		goto err_garbage;
1336*4882a593Smuzhiyun 	case SVC_SYSERR:
1337*4882a593Smuzhiyun 		rpc_stat = rpc_system_err;
1338*4882a593Smuzhiyun 		goto err_bad;
1339*4882a593Smuzhiyun 	case SVC_DENIED:
1340*4882a593Smuzhiyun 		goto err_bad_auth;
1341*4882a593Smuzhiyun 	case SVC_CLOSE:
1342*4882a593Smuzhiyun 		goto close;
1343*4882a593Smuzhiyun 	case SVC_DROP:
1344*4882a593Smuzhiyun 		goto dropit;
1345*4882a593Smuzhiyun 	case SVC_COMPLETE:
1346*4882a593Smuzhiyun 		goto sendit;
1347*4882a593Smuzhiyun 	}
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (progp == NULL)
1350*4882a593Smuzhiyun 		goto err_bad_prog;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	rpc_stat = progp->pg_init_request(rqstp, progp, &process);
1353*4882a593Smuzhiyun 	switch (rpc_stat) {
1354*4882a593Smuzhiyun 	case rpc_success:
1355*4882a593Smuzhiyun 		break;
1356*4882a593Smuzhiyun 	case rpc_prog_unavail:
1357*4882a593Smuzhiyun 		goto err_bad_prog;
1358*4882a593Smuzhiyun 	case rpc_prog_mismatch:
1359*4882a593Smuzhiyun 		goto err_bad_vers;
1360*4882a593Smuzhiyun 	case rpc_proc_unavail:
1361*4882a593Smuzhiyun 		goto err_bad_proc;
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	procp = rqstp->rq_procinfo;
1365*4882a593Smuzhiyun 	/* Should this check go into the dispatcher? */
1366*4882a593Smuzhiyun 	if (!procp || !procp->pc_func)
1367*4882a593Smuzhiyun 		goto err_bad_proc;
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	/* Syntactic check complete */
1370*4882a593Smuzhiyun 	serv->sv_stats->rpccnt++;
1371*4882a593Smuzhiyun 	trace_svc_process(rqstp, progp->pg_name);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	/* Build the reply header. */
1374*4882a593Smuzhiyun 	statp = resv->iov_base +resv->iov_len;
1375*4882a593Smuzhiyun 	svc_putnl(resv, RPC_SUCCESS);
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	/* un-reserve some of the out-queue now that we have a
1378*4882a593Smuzhiyun 	 * better idea of reply size
1379*4882a593Smuzhiyun 	 */
1380*4882a593Smuzhiyun 	if (procp->pc_xdrressize)
1381*4882a593Smuzhiyun 		svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	/* Call the function that processes the request. */
1384*4882a593Smuzhiyun 	if (!process.dispatch) {
1385*4882a593Smuzhiyun 		if (!svc_generic_dispatch(rqstp, statp))
1386*4882a593Smuzhiyun 			goto release_dropit;
1387*4882a593Smuzhiyun 		if (*statp == rpc_garbage_args)
1388*4882a593Smuzhiyun 			goto err_garbage;
1389*4882a593Smuzhiyun 		auth_stat = svc_get_autherr(rqstp, statp);
1390*4882a593Smuzhiyun 		if (auth_stat != rpc_auth_ok)
1391*4882a593Smuzhiyun 			goto err_release_bad_auth;
1392*4882a593Smuzhiyun 	} else {
1393*4882a593Smuzhiyun 		dprintk("svc: calling dispatcher\n");
1394*4882a593Smuzhiyun 		if (!process.dispatch(rqstp, statp))
1395*4882a593Smuzhiyun 			goto release_dropit; /* Release reply info */
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun 	/* Check RPC status result */
1399*4882a593Smuzhiyun 	if (*statp != rpc_success)
1400*4882a593Smuzhiyun 		resv->iov_len = ((void*)statp)  - resv->iov_base + 4;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	/* Release reply info */
1403*4882a593Smuzhiyun 	if (procp->pc_release)
1404*4882a593Smuzhiyun 		procp->pc_release(rqstp);
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	if (procp->pc_encode == NULL)
1407*4882a593Smuzhiyun 		goto dropit;
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun  sendit:
1410*4882a593Smuzhiyun 	if (svc_authorise(rqstp))
1411*4882a593Smuzhiyun 		goto close_xprt;
1412*4882a593Smuzhiyun 	return 1;		/* Caller can now send it */
1413*4882a593Smuzhiyun 
1414*4882a593Smuzhiyun release_dropit:
1415*4882a593Smuzhiyun 	if (procp->pc_release)
1416*4882a593Smuzhiyun 		procp->pc_release(rqstp);
1417*4882a593Smuzhiyun  dropit:
1418*4882a593Smuzhiyun 	svc_authorise(rqstp);	/* doesn't hurt to call this twice */
1419*4882a593Smuzhiyun 	dprintk("svc: svc_process dropit\n");
1420*4882a593Smuzhiyun 	return 0;
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun  close:
1423*4882a593Smuzhiyun 	svc_authorise(rqstp);
1424*4882a593Smuzhiyun close_xprt:
1425*4882a593Smuzhiyun 	if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags))
1426*4882a593Smuzhiyun 		svc_close_xprt(rqstp->rq_xprt);
1427*4882a593Smuzhiyun 	dprintk("svc: svc_process close\n");
1428*4882a593Smuzhiyun 	return 0;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun err_short_len:
1431*4882a593Smuzhiyun 	svc_printk(rqstp, "short len %zd, dropping request\n",
1432*4882a593Smuzhiyun 			argv->iov_len);
1433*4882a593Smuzhiyun 	goto close_xprt;
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun err_bad_rpc:
1436*4882a593Smuzhiyun 	serv->sv_stats->rpcbadfmt++;
1437*4882a593Smuzhiyun 	svc_putnl(resv, 1);	/* REJECT */
1438*4882a593Smuzhiyun 	svc_putnl(resv, 0);	/* RPC_MISMATCH */
1439*4882a593Smuzhiyun 	svc_putnl(resv, 2);	/* Only RPCv2 supported */
1440*4882a593Smuzhiyun 	svc_putnl(resv, 2);
1441*4882a593Smuzhiyun 	goto sendit;
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun err_release_bad_auth:
1444*4882a593Smuzhiyun 	if (procp->pc_release)
1445*4882a593Smuzhiyun 		procp->pc_release(rqstp);
1446*4882a593Smuzhiyun err_bad_auth:
1447*4882a593Smuzhiyun 	dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1448*4882a593Smuzhiyun 	serv->sv_stats->rpcbadauth++;
1449*4882a593Smuzhiyun 	/* Restore write pointer to location of accept status: */
1450*4882a593Smuzhiyun 	xdr_ressize_check(rqstp, reply_statp);
1451*4882a593Smuzhiyun 	svc_putnl(resv, 1);	/* REJECT */
1452*4882a593Smuzhiyun 	svc_putnl(resv, 1);	/* AUTH_ERROR */
1453*4882a593Smuzhiyun 	svc_putnl(resv, ntohl(auth_stat));	/* status */
1454*4882a593Smuzhiyun 	goto sendit;
1455*4882a593Smuzhiyun 
1456*4882a593Smuzhiyun err_bad_prog:
1457*4882a593Smuzhiyun 	dprintk("svc: unknown program %d\n", prog);
1458*4882a593Smuzhiyun 	serv->sv_stats->rpcbadfmt++;
1459*4882a593Smuzhiyun 	svc_putnl(resv, RPC_PROG_UNAVAIL);
1460*4882a593Smuzhiyun 	goto sendit;
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun err_bad_vers:
1463*4882a593Smuzhiyun 	svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1464*4882a593Smuzhiyun 		       rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	serv->sv_stats->rpcbadfmt++;
1467*4882a593Smuzhiyun 	svc_putnl(resv, RPC_PROG_MISMATCH);
1468*4882a593Smuzhiyun 	svc_putnl(resv, process.mismatch.lovers);
1469*4882a593Smuzhiyun 	svc_putnl(resv, process.mismatch.hivers);
1470*4882a593Smuzhiyun 	goto sendit;
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun err_bad_proc:
1473*4882a593Smuzhiyun 	svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	serv->sv_stats->rpcbadfmt++;
1476*4882a593Smuzhiyun 	svc_putnl(resv, RPC_PROC_UNAVAIL);
1477*4882a593Smuzhiyun 	goto sendit;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun err_garbage:
1480*4882a593Smuzhiyun 	svc_printk(rqstp, "failed to decode args\n");
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	rpc_stat = rpc_garbage_args;
1483*4882a593Smuzhiyun err_bad:
1484*4882a593Smuzhiyun 	serv->sv_stats->rpcbadfmt++;
1485*4882a593Smuzhiyun 	svc_putnl(resv, ntohl(rpc_stat));
1486*4882a593Smuzhiyun 	goto sendit;
1487*4882a593Smuzhiyun }
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun /*
1490*4882a593Smuzhiyun  * Process the RPC request.
1491*4882a593Smuzhiyun  */
1492*4882a593Smuzhiyun int
svc_process(struct svc_rqst * rqstp)1493*4882a593Smuzhiyun svc_process(struct svc_rqst *rqstp)
1494*4882a593Smuzhiyun {
1495*4882a593Smuzhiyun 	struct kvec		*argv = &rqstp->rq_arg.head[0];
1496*4882a593Smuzhiyun 	struct kvec		*resv = &rqstp->rq_res.head[0];
1497*4882a593Smuzhiyun 	struct svc_serv		*serv = rqstp->rq_server;
1498*4882a593Smuzhiyun 	u32			dir;
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	/*
1501*4882a593Smuzhiyun 	 * Setup response xdr_buf.
1502*4882a593Smuzhiyun 	 * Initially it has just one page
1503*4882a593Smuzhiyun 	 */
1504*4882a593Smuzhiyun 	rqstp->rq_next_page = &rqstp->rq_respages[1];
1505*4882a593Smuzhiyun 	resv->iov_base = page_address(rqstp->rq_respages[0]);
1506*4882a593Smuzhiyun 	resv->iov_len = 0;
1507*4882a593Smuzhiyun 	rqstp->rq_res.pages = rqstp->rq_respages + 1;
1508*4882a593Smuzhiyun 	rqstp->rq_res.len = 0;
1509*4882a593Smuzhiyun 	rqstp->rq_res.page_base = 0;
1510*4882a593Smuzhiyun 	rqstp->rq_res.page_len = 0;
1511*4882a593Smuzhiyun 	rqstp->rq_res.buflen = PAGE_SIZE;
1512*4882a593Smuzhiyun 	rqstp->rq_res.tail[0].iov_base = NULL;
1513*4882a593Smuzhiyun 	rqstp->rq_res.tail[0].iov_len = 0;
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	dir  = svc_getnl(argv);
1516*4882a593Smuzhiyun 	if (dir != 0) {
1517*4882a593Smuzhiyun 		/* direction != CALL */
1518*4882a593Smuzhiyun 		svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1519*4882a593Smuzhiyun 		serv->sv_stats->rpcbadfmt++;
1520*4882a593Smuzhiyun 		goto out_drop;
1521*4882a593Smuzhiyun 	}
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	/* Returns 1 for send, 0 for drop */
1524*4882a593Smuzhiyun 	if (likely(svc_process_common(rqstp, argv, resv)))
1525*4882a593Smuzhiyun 		return svc_send(rqstp);
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun out_drop:
1528*4882a593Smuzhiyun 	svc_drop(rqstp);
1529*4882a593Smuzhiyun 	return 0;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_process);
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1534*4882a593Smuzhiyun /*
1535*4882a593Smuzhiyun  * Process a backchannel RPC request that arrived over an existing
1536*4882a593Smuzhiyun  * outbound connection
1537*4882a593Smuzhiyun  */
1538*4882a593Smuzhiyun int
bc_svc_process(struct svc_serv * serv,struct rpc_rqst * req,struct svc_rqst * rqstp)1539*4882a593Smuzhiyun bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1540*4882a593Smuzhiyun 	       struct svc_rqst *rqstp)
1541*4882a593Smuzhiyun {
1542*4882a593Smuzhiyun 	struct kvec	*argv = &rqstp->rq_arg.head[0];
1543*4882a593Smuzhiyun 	struct kvec	*resv = &rqstp->rq_res.head[0];
1544*4882a593Smuzhiyun 	struct rpc_task *task;
1545*4882a593Smuzhiyun 	int proc_error;
1546*4882a593Smuzhiyun 	int error;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	dprintk("svc: %s(%p)\n", __func__, req);
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 	/* Build the svc_rqst used by the common processing routine */
1551*4882a593Smuzhiyun 	rqstp->rq_xid = req->rq_xid;
1552*4882a593Smuzhiyun 	rqstp->rq_prot = req->rq_xprt->prot;
1553*4882a593Smuzhiyun 	rqstp->rq_server = serv;
1554*4882a593Smuzhiyun 	rqstp->rq_bc_net = req->rq_xprt->xprt_net;
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	rqstp->rq_addrlen = sizeof(req->rq_xprt->addr);
1557*4882a593Smuzhiyun 	memcpy(&rqstp->rq_addr, &req->rq_xprt->addr, rqstp->rq_addrlen);
1558*4882a593Smuzhiyun 	memcpy(&rqstp->rq_arg, &req->rq_rcv_buf, sizeof(rqstp->rq_arg));
1559*4882a593Smuzhiyun 	memcpy(&rqstp->rq_res, &req->rq_snd_buf, sizeof(rqstp->rq_res));
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	/* Adjust the argument buffer length */
1562*4882a593Smuzhiyun 	rqstp->rq_arg.len = req->rq_private_buf.len;
1563*4882a593Smuzhiyun 	if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len) {
1564*4882a593Smuzhiyun 		rqstp->rq_arg.head[0].iov_len = rqstp->rq_arg.len;
1565*4882a593Smuzhiyun 		rqstp->rq_arg.page_len = 0;
1566*4882a593Smuzhiyun 	} else if (rqstp->rq_arg.len <= rqstp->rq_arg.head[0].iov_len +
1567*4882a593Smuzhiyun 			rqstp->rq_arg.page_len)
1568*4882a593Smuzhiyun 		rqstp->rq_arg.page_len = rqstp->rq_arg.len -
1569*4882a593Smuzhiyun 			rqstp->rq_arg.head[0].iov_len;
1570*4882a593Smuzhiyun 	else
1571*4882a593Smuzhiyun 		rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
1572*4882a593Smuzhiyun 			rqstp->rq_arg.page_len;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	/* reset result send buffer "put" position */
1575*4882a593Smuzhiyun 	resv->iov_len = 0;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	/*
1578*4882a593Smuzhiyun 	 * Skip the next two words because they've already been
1579*4882a593Smuzhiyun 	 * processed in the transport
1580*4882a593Smuzhiyun 	 */
1581*4882a593Smuzhiyun 	svc_getu32(argv);	/* XID */
1582*4882a593Smuzhiyun 	svc_getnl(argv);	/* CALLDIR */
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	/* Parse and execute the bc call */
1585*4882a593Smuzhiyun 	proc_error = svc_process_common(rqstp, argv, resv);
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	atomic_dec(&req->rq_xprt->bc_slot_count);
1588*4882a593Smuzhiyun 	if (!proc_error) {
1589*4882a593Smuzhiyun 		/* Processing error: drop the request */
1590*4882a593Smuzhiyun 		xprt_free_bc_request(req);
1591*4882a593Smuzhiyun 		error = -EINVAL;
1592*4882a593Smuzhiyun 		goto out;
1593*4882a593Smuzhiyun 	}
1594*4882a593Smuzhiyun 	/* Finally, send the reply synchronously */
1595*4882a593Smuzhiyun 	memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf));
1596*4882a593Smuzhiyun 	task = rpc_run_bc_task(req);
1597*4882a593Smuzhiyun 	if (IS_ERR(task)) {
1598*4882a593Smuzhiyun 		error = PTR_ERR(task);
1599*4882a593Smuzhiyun 		goto out;
1600*4882a593Smuzhiyun 	}
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	WARN_ON_ONCE(atomic_read(&task->tk_count) != 1);
1603*4882a593Smuzhiyun 	error = task->tk_status;
1604*4882a593Smuzhiyun 	rpc_put_task(task);
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun out:
1607*4882a593Smuzhiyun 	dprintk("svc: %s(), error=%d\n", __func__, error);
1608*4882a593Smuzhiyun 	return error;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(bc_svc_process);
1611*4882a593Smuzhiyun #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun /*
1614*4882a593Smuzhiyun  * Return (transport-specific) limit on the rpc payload.
1615*4882a593Smuzhiyun  */
svc_max_payload(const struct svc_rqst * rqstp)1616*4882a593Smuzhiyun u32 svc_max_payload(const struct svc_rqst *rqstp)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun 	u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	if (rqstp->rq_server->sv_max_payload < max)
1621*4882a593Smuzhiyun 		max = rqstp->rq_server->sv_max_payload;
1622*4882a593Smuzhiyun 	return max;
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_max_payload);
1625*4882a593Smuzhiyun 
1626*4882a593Smuzhiyun /**
1627*4882a593Smuzhiyun  * svc_encode_read_payload - mark a range of bytes as a READ payload
1628*4882a593Smuzhiyun  * @rqstp: svc_rqst to operate on
1629*4882a593Smuzhiyun  * @offset: payload's byte offset in rqstp->rq_res
1630*4882a593Smuzhiyun  * @length: size of payload, in bytes
1631*4882a593Smuzhiyun  *
1632*4882a593Smuzhiyun  * Returns zero on success, or a negative errno if a permanent
1633*4882a593Smuzhiyun  * error occurred.
1634*4882a593Smuzhiyun  */
svc_encode_read_payload(struct svc_rqst * rqstp,unsigned int offset,unsigned int length)1635*4882a593Smuzhiyun int svc_encode_read_payload(struct svc_rqst *rqstp, unsigned int offset,
1636*4882a593Smuzhiyun 			    unsigned int length)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun 	return rqstp->rq_xprt->xpt_ops->xpo_read_payload(rqstp, offset, length);
1639*4882a593Smuzhiyun }
1640*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_encode_read_payload);
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun /**
1643*4882a593Smuzhiyun  * svc_fill_write_vector - Construct data argument for VFS write call
1644*4882a593Smuzhiyun  * @rqstp: svc_rqst to operate on
1645*4882a593Smuzhiyun  * @pages: list of pages containing data payload
1646*4882a593Smuzhiyun  * @first: buffer containing first section of write payload
1647*4882a593Smuzhiyun  * @total: total number of bytes of write payload
1648*4882a593Smuzhiyun  *
1649*4882a593Smuzhiyun  * Fills in rqstp::rq_vec, and returns the number of elements.
1650*4882a593Smuzhiyun  */
svc_fill_write_vector(struct svc_rqst * rqstp,struct page ** pages,struct kvec * first,size_t total)1651*4882a593Smuzhiyun unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
1652*4882a593Smuzhiyun 				   struct kvec *first, size_t total)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun 	struct kvec *vec = rqstp->rq_vec;
1655*4882a593Smuzhiyun 	unsigned int i;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 	/* Some types of transport can present the write payload
1658*4882a593Smuzhiyun 	 * entirely in rq_arg.pages. In this case, @first is empty.
1659*4882a593Smuzhiyun 	 */
1660*4882a593Smuzhiyun 	i = 0;
1661*4882a593Smuzhiyun 	if (first->iov_len) {
1662*4882a593Smuzhiyun 		vec[i].iov_base = first->iov_base;
1663*4882a593Smuzhiyun 		vec[i].iov_len = min_t(size_t, total, first->iov_len);
1664*4882a593Smuzhiyun 		total -= vec[i].iov_len;
1665*4882a593Smuzhiyun 		++i;
1666*4882a593Smuzhiyun 	}
1667*4882a593Smuzhiyun 
1668*4882a593Smuzhiyun 	while (total) {
1669*4882a593Smuzhiyun 		vec[i].iov_base = page_address(*pages);
1670*4882a593Smuzhiyun 		vec[i].iov_len = min_t(size_t, total, PAGE_SIZE);
1671*4882a593Smuzhiyun 		total -= vec[i].iov_len;
1672*4882a593Smuzhiyun 		++i;
1673*4882a593Smuzhiyun 		++pages;
1674*4882a593Smuzhiyun 	}
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	WARN_ON_ONCE(i > ARRAY_SIZE(rqstp->rq_vec));
1677*4882a593Smuzhiyun 	return i;
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_fill_write_vector);
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun /**
1682*4882a593Smuzhiyun  * svc_fill_symlink_pathname - Construct pathname argument for VFS symlink call
1683*4882a593Smuzhiyun  * @rqstp: svc_rqst to operate on
1684*4882a593Smuzhiyun  * @first: buffer containing first section of pathname
1685*4882a593Smuzhiyun  * @p: buffer containing remaining section of pathname
1686*4882a593Smuzhiyun  * @total: total length of the pathname argument
1687*4882a593Smuzhiyun  *
1688*4882a593Smuzhiyun  * The VFS symlink API demands a NUL-terminated pathname in mapped memory.
1689*4882a593Smuzhiyun  * Returns pointer to a NUL-terminated string, or an ERR_PTR. Caller must free
1690*4882a593Smuzhiyun  * the returned string.
1691*4882a593Smuzhiyun  */
svc_fill_symlink_pathname(struct svc_rqst * rqstp,struct kvec * first,void * p,size_t total)1692*4882a593Smuzhiyun char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first,
1693*4882a593Smuzhiyun 				void *p, size_t total)
1694*4882a593Smuzhiyun {
1695*4882a593Smuzhiyun 	size_t len, remaining;
1696*4882a593Smuzhiyun 	char *result, *dst;
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun 	result = kmalloc(total + 1, GFP_KERNEL);
1699*4882a593Smuzhiyun 	if (!result)
1700*4882a593Smuzhiyun 		return ERR_PTR(-ESERVERFAULT);
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	dst = result;
1703*4882a593Smuzhiyun 	remaining = total;
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	len = min_t(size_t, total, first->iov_len);
1706*4882a593Smuzhiyun 	if (len) {
1707*4882a593Smuzhiyun 		memcpy(dst, first->iov_base, len);
1708*4882a593Smuzhiyun 		dst += len;
1709*4882a593Smuzhiyun 		remaining -= len;
1710*4882a593Smuzhiyun 	}
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	if (remaining) {
1713*4882a593Smuzhiyun 		len = min_t(size_t, remaining, PAGE_SIZE);
1714*4882a593Smuzhiyun 		memcpy(dst, p, len);
1715*4882a593Smuzhiyun 		dst += len;
1716*4882a593Smuzhiyun 	}
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	*dst = '\0';
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun 	/* Sanity check: Linux doesn't allow the pathname argument to
1721*4882a593Smuzhiyun 	 * contain a NUL byte.
1722*4882a593Smuzhiyun 	 */
1723*4882a593Smuzhiyun 	if (strlen(result) != total) {
1724*4882a593Smuzhiyun 		kfree(result);
1725*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
1726*4882a593Smuzhiyun 	}
1727*4882a593Smuzhiyun 	return result;
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(svc_fill_symlink_pathname);
1730