xref: /OK3568_Linux_fs/kernel/net/rxrpc/conn_client.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Client connection-specific management code.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun  * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Client connections need to be cached for a little while after they've made a
8*4882a593Smuzhiyun  * call so as to handle retransmitted DATA packets in case the server didn't
9*4882a593Smuzhiyun  * receive the final ACK or terminating ABORT we sent it.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * There are flags of relevance to the cache:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *  (2) DONT_REUSE - The connection should be discarded as soon as possible and
14*4882a593Smuzhiyun  *      should not be reused.  This is set when an exclusive connection is used
15*4882a593Smuzhiyun  *      or a call ID counter overflows.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * The caching state may only be changed if the cache lock is held.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * There are two idle client connection expiry durations.  If the total number
20*4882a593Smuzhiyun  * of connections is below the reap threshold, we use the normal duration; if
21*4882a593Smuzhiyun  * it's above, we use the fast duration.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include <linux/slab.h>
27*4882a593Smuzhiyun #include <linux/idr.h>
28*4882a593Smuzhiyun #include <linux/timer.h>
29*4882a593Smuzhiyun #include <linux/sched/signal.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include "ar-internal.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun __read_mostly unsigned int rxrpc_reap_client_connections = 900;
34*4882a593Smuzhiyun __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
35*4882a593Smuzhiyun __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /*
38*4882a593Smuzhiyun  * We use machine-unique IDs for our client connections.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun DEFINE_IDR(rxrpc_client_conn_ids);
41*4882a593Smuzhiyun static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Get a connection ID and epoch for a client connection from the global pool.
47*4882a593Smuzhiyun  * The connection struct pointer is then recorded in the idr radix tree.  The
48*4882a593Smuzhiyun  * epoch doesn't change until the client is rebooted (or, at least, unless the
49*4882a593Smuzhiyun  * module is unloaded).
50*4882a593Smuzhiyun  */
rxrpc_get_client_connection_id(struct rxrpc_connection * conn,gfp_t gfp)51*4882a593Smuzhiyun static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
52*4882a593Smuzhiyun 					  gfp_t gfp)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = conn->params.local->rxnet;
55*4882a593Smuzhiyun 	int id;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	_enter("");
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	idr_preload(gfp);
60*4882a593Smuzhiyun 	spin_lock(&rxrpc_conn_id_lock);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
63*4882a593Smuzhiyun 			      1, 0x40000000, GFP_NOWAIT);
64*4882a593Smuzhiyun 	if (id < 0)
65*4882a593Smuzhiyun 		goto error;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	spin_unlock(&rxrpc_conn_id_lock);
68*4882a593Smuzhiyun 	idr_preload_end();
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	conn->proto.epoch = rxnet->epoch;
71*4882a593Smuzhiyun 	conn->proto.cid = id << RXRPC_CIDSHIFT;
72*4882a593Smuzhiyun 	set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
73*4882a593Smuzhiyun 	_leave(" [CID %x]", conn->proto.cid);
74*4882a593Smuzhiyun 	return 0;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun error:
77*4882a593Smuzhiyun 	spin_unlock(&rxrpc_conn_id_lock);
78*4882a593Smuzhiyun 	idr_preload_end();
79*4882a593Smuzhiyun 	_leave(" = %d", id);
80*4882a593Smuzhiyun 	return id;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun  * Release a connection ID for a client connection from the global pool.
85*4882a593Smuzhiyun  */
rxrpc_put_client_connection_id(struct rxrpc_connection * conn)86*4882a593Smuzhiyun static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
89*4882a593Smuzhiyun 		spin_lock(&rxrpc_conn_id_lock);
90*4882a593Smuzhiyun 		idr_remove(&rxrpc_client_conn_ids,
91*4882a593Smuzhiyun 			   conn->proto.cid >> RXRPC_CIDSHIFT);
92*4882a593Smuzhiyun 		spin_unlock(&rxrpc_conn_id_lock);
93*4882a593Smuzhiyun 	}
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * Destroy the client connection ID tree.
98*4882a593Smuzhiyun  */
rxrpc_destroy_client_conn_ids(void)99*4882a593Smuzhiyun void rxrpc_destroy_client_conn_ids(void)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	struct rxrpc_connection *conn;
102*4882a593Smuzhiyun 	int id;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (!idr_is_empty(&rxrpc_client_conn_ids)) {
105*4882a593Smuzhiyun 		idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
106*4882a593Smuzhiyun 			pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
107*4882a593Smuzhiyun 			       conn, refcount_read(&conn->ref));
108*4882a593Smuzhiyun 		}
109*4882a593Smuzhiyun 		BUG();
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	idr_destroy(&rxrpc_client_conn_ids);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun  * Allocate a connection bundle.
117*4882a593Smuzhiyun  */
rxrpc_alloc_bundle(struct rxrpc_conn_parameters * cp,gfp_t gfp)118*4882a593Smuzhiyun static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
119*4882a593Smuzhiyun 					       gfp_t gfp)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	struct rxrpc_bundle *bundle;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	bundle = kzalloc(sizeof(*bundle), gfp);
124*4882a593Smuzhiyun 	if (bundle) {
125*4882a593Smuzhiyun 		bundle->params = *cp;
126*4882a593Smuzhiyun 		rxrpc_get_peer(bundle->params.peer);
127*4882a593Smuzhiyun 		refcount_set(&bundle->ref, 1);
128*4882a593Smuzhiyun 		atomic_set(&bundle->active, 1);
129*4882a593Smuzhiyun 		spin_lock_init(&bundle->channel_lock);
130*4882a593Smuzhiyun 		INIT_LIST_HEAD(&bundle->waiting_calls);
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 	return bundle;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
rxrpc_get_bundle(struct rxrpc_bundle * bundle)135*4882a593Smuzhiyun struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	refcount_inc(&bundle->ref);
138*4882a593Smuzhiyun 	return bundle;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
rxrpc_free_bundle(struct rxrpc_bundle * bundle)141*4882a593Smuzhiyun static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	rxrpc_put_peer(bundle->params.peer);
144*4882a593Smuzhiyun 	kfree(bundle);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
rxrpc_put_bundle(struct rxrpc_bundle * bundle)147*4882a593Smuzhiyun void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	unsigned int d = bundle->debug_id;
150*4882a593Smuzhiyun 	bool dead;
151*4882a593Smuzhiyun 	int r;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	dead = __refcount_dec_and_test(&bundle->ref, &r);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	_debug("PUT B=%x %d", d, r - 1);
156*4882a593Smuzhiyun 	if (dead)
157*4882a593Smuzhiyun 		rxrpc_free_bundle(bundle);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun  * Allocate a client connection.
162*4882a593Smuzhiyun  */
163*4882a593Smuzhiyun static struct rxrpc_connection *
rxrpc_alloc_client_connection(struct rxrpc_bundle * bundle,gfp_t gfp)164*4882a593Smuzhiyun rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct rxrpc_connection *conn;
167*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
168*4882a593Smuzhiyun 	int ret;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	_enter("");
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	conn = rxrpc_alloc_connection(gfp);
173*4882a593Smuzhiyun 	if (!conn) {
174*4882a593Smuzhiyun 		_leave(" = -ENOMEM");
175*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	refcount_set(&conn->ref, 1);
179*4882a593Smuzhiyun 	conn->bundle		= bundle;
180*4882a593Smuzhiyun 	conn->params		= bundle->params;
181*4882a593Smuzhiyun 	conn->out_clientflag	= RXRPC_CLIENT_INITIATED;
182*4882a593Smuzhiyun 	conn->state		= RXRPC_CONN_CLIENT;
183*4882a593Smuzhiyun 	conn->service_id	= conn->params.service_id;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	ret = rxrpc_get_client_connection_id(conn, gfp);
186*4882a593Smuzhiyun 	if (ret < 0)
187*4882a593Smuzhiyun 		goto error_0;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	ret = rxrpc_init_client_conn_security(conn);
190*4882a593Smuzhiyun 	if (ret < 0)
191*4882a593Smuzhiyun 		goto error_1;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	ret = conn->security->prime_packet_security(conn);
194*4882a593Smuzhiyun 	if (ret < 0)
195*4882a593Smuzhiyun 		goto error_2;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	atomic_inc(&rxnet->nr_conns);
198*4882a593Smuzhiyun 	write_lock(&rxnet->conn_lock);
199*4882a593Smuzhiyun 	list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
200*4882a593Smuzhiyun 	write_unlock(&rxnet->conn_lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	rxrpc_get_bundle(bundle);
203*4882a593Smuzhiyun 	rxrpc_get_peer(conn->params.peer);
204*4882a593Smuzhiyun 	rxrpc_get_local(conn->params.local);
205*4882a593Smuzhiyun 	key_get(conn->params.key);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
208*4882a593Smuzhiyun 			 refcount_read(&conn->ref),
209*4882a593Smuzhiyun 			 __builtin_return_address(0));
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	atomic_inc(&rxnet->nr_client_conns);
212*4882a593Smuzhiyun 	trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
213*4882a593Smuzhiyun 	_leave(" = %p", conn);
214*4882a593Smuzhiyun 	return conn;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun error_2:
217*4882a593Smuzhiyun 	conn->security->clear(conn);
218*4882a593Smuzhiyun error_1:
219*4882a593Smuzhiyun 	rxrpc_put_client_connection_id(conn);
220*4882a593Smuzhiyun error_0:
221*4882a593Smuzhiyun 	kfree(conn);
222*4882a593Smuzhiyun 	_leave(" = %d", ret);
223*4882a593Smuzhiyun 	return ERR_PTR(ret);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun  * Determine if a connection may be reused.
228*4882a593Smuzhiyun  */
rxrpc_may_reuse_conn(struct rxrpc_connection * conn)229*4882a593Smuzhiyun static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct rxrpc_net *rxnet;
232*4882a593Smuzhiyun 	int id_cursor, id, distance, limit;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (!conn)
235*4882a593Smuzhiyun 		goto dont_reuse;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	rxnet = conn->params.local->rxnet;
238*4882a593Smuzhiyun 	if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
239*4882a593Smuzhiyun 		goto dont_reuse;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (conn->state != RXRPC_CONN_CLIENT ||
242*4882a593Smuzhiyun 	    conn->proto.epoch != rxnet->epoch)
243*4882a593Smuzhiyun 		goto mark_dont_reuse;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/* The IDR tree gets very expensive on memory if the connection IDs are
246*4882a593Smuzhiyun 	 * widely scattered throughout the number space, so we shall want to
247*4882a593Smuzhiyun 	 * kill off connections that, say, have an ID more than about four
248*4882a593Smuzhiyun 	 * times the maximum number of client conns away from the current
249*4882a593Smuzhiyun 	 * allocation point to try and keep the IDs concentrated.
250*4882a593Smuzhiyun 	 */
251*4882a593Smuzhiyun 	id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
252*4882a593Smuzhiyun 	id = conn->proto.cid >> RXRPC_CIDSHIFT;
253*4882a593Smuzhiyun 	distance = id - id_cursor;
254*4882a593Smuzhiyun 	if (distance < 0)
255*4882a593Smuzhiyun 		distance = -distance;
256*4882a593Smuzhiyun 	limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
257*4882a593Smuzhiyun 	if (distance > limit)
258*4882a593Smuzhiyun 		goto mark_dont_reuse;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	return true;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun mark_dont_reuse:
263*4882a593Smuzhiyun 	set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
264*4882a593Smuzhiyun dont_reuse:
265*4882a593Smuzhiyun 	return false;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun /*
269*4882a593Smuzhiyun  * Look up the conn bundle that matches the connection parameters, adding it if
270*4882a593Smuzhiyun  * it doesn't yet exist.
271*4882a593Smuzhiyun  */
rxrpc_look_up_bundle(struct rxrpc_conn_parameters * cp,gfp_t gfp)272*4882a593Smuzhiyun static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
273*4882a593Smuzhiyun 						 gfp_t gfp)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	static atomic_t rxrpc_bundle_id;
276*4882a593Smuzhiyun 	struct rxrpc_bundle *bundle, *candidate;
277*4882a593Smuzhiyun 	struct rxrpc_local *local = cp->local;
278*4882a593Smuzhiyun 	struct rb_node *p, **pp, *parent;
279*4882a593Smuzhiyun 	long diff;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	_enter("{%px,%x,%u,%u}",
282*4882a593Smuzhiyun 	       cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (cp->exclusive)
285*4882a593Smuzhiyun 		return rxrpc_alloc_bundle(cp, gfp);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* First, see if the bundle is already there. */
288*4882a593Smuzhiyun 	_debug("search 1");
289*4882a593Smuzhiyun 	spin_lock(&local->client_bundles_lock);
290*4882a593Smuzhiyun 	p = local->client_bundles.rb_node;
291*4882a593Smuzhiyun 	while (p) {
292*4882a593Smuzhiyun 		bundle = rb_entry(p, struct rxrpc_bundle, local_node);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #define cmp(X) ((long)bundle->params.X - (long)cp->X)
295*4882a593Smuzhiyun 		diff = (cmp(peer) ?:
296*4882a593Smuzhiyun 			cmp(key) ?:
297*4882a593Smuzhiyun 			cmp(security_level) ?:
298*4882a593Smuzhiyun 			cmp(upgrade));
299*4882a593Smuzhiyun #undef cmp
300*4882a593Smuzhiyun 		if (diff < 0)
301*4882a593Smuzhiyun 			p = p->rb_left;
302*4882a593Smuzhiyun 		else if (diff > 0)
303*4882a593Smuzhiyun 			p = p->rb_right;
304*4882a593Smuzhiyun 		else
305*4882a593Smuzhiyun 			goto found_bundle;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun 	spin_unlock(&local->client_bundles_lock);
308*4882a593Smuzhiyun 	_debug("not found");
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* It wasn't.  We need to add one. */
311*4882a593Smuzhiyun 	candidate = rxrpc_alloc_bundle(cp, gfp);
312*4882a593Smuzhiyun 	if (!candidate)
313*4882a593Smuzhiyun 		return NULL;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	_debug("search 2");
316*4882a593Smuzhiyun 	spin_lock(&local->client_bundles_lock);
317*4882a593Smuzhiyun 	pp = &local->client_bundles.rb_node;
318*4882a593Smuzhiyun 	parent = NULL;
319*4882a593Smuzhiyun 	while (*pp) {
320*4882a593Smuzhiyun 		parent = *pp;
321*4882a593Smuzhiyun 		bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun #define cmp(X) ((long)bundle->params.X - (long)cp->X)
324*4882a593Smuzhiyun 		diff = (cmp(peer) ?:
325*4882a593Smuzhiyun 			cmp(key) ?:
326*4882a593Smuzhiyun 			cmp(security_level) ?:
327*4882a593Smuzhiyun 			cmp(upgrade));
328*4882a593Smuzhiyun #undef cmp
329*4882a593Smuzhiyun 		if (diff < 0)
330*4882a593Smuzhiyun 			pp = &(*pp)->rb_left;
331*4882a593Smuzhiyun 		else if (diff > 0)
332*4882a593Smuzhiyun 			pp = &(*pp)->rb_right;
333*4882a593Smuzhiyun 		else
334*4882a593Smuzhiyun 			goto found_bundle_free;
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	_debug("new bundle");
338*4882a593Smuzhiyun 	candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
339*4882a593Smuzhiyun 	rb_link_node(&candidate->local_node, parent, pp);
340*4882a593Smuzhiyun 	rb_insert_color(&candidate->local_node, &local->client_bundles);
341*4882a593Smuzhiyun 	rxrpc_get_bundle(candidate);
342*4882a593Smuzhiyun 	spin_unlock(&local->client_bundles_lock);
343*4882a593Smuzhiyun 	_leave(" = %u [new]", candidate->debug_id);
344*4882a593Smuzhiyun 	return candidate;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun found_bundle_free:
347*4882a593Smuzhiyun 	rxrpc_free_bundle(candidate);
348*4882a593Smuzhiyun found_bundle:
349*4882a593Smuzhiyun 	rxrpc_get_bundle(bundle);
350*4882a593Smuzhiyun 	atomic_inc(&bundle->active);
351*4882a593Smuzhiyun 	spin_unlock(&local->client_bundles_lock);
352*4882a593Smuzhiyun 	_leave(" = %u [found]", bundle->debug_id);
353*4882a593Smuzhiyun 	return bundle;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun  * Create or find a client bundle to use for a call.
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * If we return with a connection, the call will be on its waiting list.  It's
360*4882a593Smuzhiyun  * left to the caller to assign a channel and wake up the call.
361*4882a593Smuzhiyun  */
rxrpc_prep_call(struct rxrpc_sock * rx,struct rxrpc_call * call,struct rxrpc_conn_parameters * cp,struct sockaddr_rxrpc * srx,gfp_t gfp)362*4882a593Smuzhiyun static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
363*4882a593Smuzhiyun 					    struct rxrpc_call *call,
364*4882a593Smuzhiyun 					    struct rxrpc_conn_parameters *cp,
365*4882a593Smuzhiyun 					    struct sockaddr_rxrpc *srx,
366*4882a593Smuzhiyun 					    gfp_t gfp)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	struct rxrpc_bundle *bundle;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
373*4882a593Smuzhiyun 	if (!cp->peer)
374*4882a593Smuzhiyun 		goto error;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	call->cong_cwnd = cp->peer->cong_cwnd;
377*4882a593Smuzhiyun 	if (call->cong_cwnd >= call->cong_ssthresh)
378*4882a593Smuzhiyun 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
379*4882a593Smuzhiyun 	else
380*4882a593Smuzhiyun 		call->cong_mode = RXRPC_CALL_SLOW_START;
381*4882a593Smuzhiyun 	if (cp->upgrade)
382*4882a593Smuzhiyun 		__set_bit(RXRPC_CALL_UPGRADE, &call->flags);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	/* Find the client connection bundle. */
385*4882a593Smuzhiyun 	bundle = rxrpc_look_up_bundle(cp, gfp);
386*4882a593Smuzhiyun 	if (!bundle)
387*4882a593Smuzhiyun 		goto error;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* Get this call queued.  Someone else may activate it whilst we're
390*4882a593Smuzhiyun 	 * lining up a new connection, but that's fine.
391*4882a593Smuzhiyun 	 */
392*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
393*4882a593Smuzhiyun 	list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
394*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	_leave(" = [B=%x]", bundle->debug_id);
397*4882a593Smuzhiyun 	return bundle;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun error:
400*4882a593Smuzhiyun 	_leave(" = -ENOMEM");
401*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /*
405*4882a593Smuzhiyun  * Allocate a new connection and add it into a bundle.
406*4882a593Smuzhiyun  */
rxrpc_add_conn_to_bundle(struct rxrpc_bundle * bundle,gfp_t gfp)407*4882a593Smuzhiyun static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
408*4882a593Smuzhiyun 	__releases(bundle->channel_lock)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	struct rxrpc_connection *candidate = NULL, *old = NULL;
411*4882a593Smuzhiyun 	bool conflict;
412*4882a593Smuzhiyun 	int i;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	_enter("");
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	conflict = bundle->alloc_conn;
417*4882a593Smuzhiyun 	if (!conflict)
418*4882a593Smuzhiyun 		bundle->alloc_conn = true;
419*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
420*4882a593Smuzhiyun 	if (conflict) {
421*4882a593Smuzhiyun 		_leave(" [conf]");
422*4882a593Smuzhiyun 		return;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	candidate = rxrpc_alloc_client_connection(bundle, gfp);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
428*4882a593Smuzhiyun 	bundle->alloc_conn = false;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (IS_ERR(candidate)) {
431*4882a593Smuzhiyun 		bundle->alloc_error = PTR_ERR(candidate);
432*4882a593Smuzhiyun 		spin_unlock(&bundle->channel_lock);
433*4882a593Smuzhiyun 		_leave(" [err %ld]", PTR_ERR(candidate));
434*4882a593Smuzhiyun 		return;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	bundle->alloc_error = 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
440*4882a593Smuzhiyun 		unsigned int shift = i * RXRPC_MAXCALLS;
441*4882a593Smuzhiyun 		int j;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 		old = bundle->conns[i];
444*4882a593Smuzhiyun 		if (!rxrpc_may_reuse_conn(old)) {
445*4882a593Smuzhiyun 			if (old)
446*4882a593Smuzhiyun 				trace_rxrpc_client(old, -1, rxrpc_client_replace);
447*4882a593Smuzhiyun 			candidate->bundle_shift = shift;
448*4882a593Smuzhiyun 			atomic_inc(&bundle->active);
449*4882a593Smuzhiyun 			bundle->conns[i] = candidate;
450*4882a593Smuzhiyun 			for (j = 0; j < RXRPC_MAXCALLS; j++)
451*4882a593Smuzhiyun 				set_bit(shift + j, &bundle->avail_chans);
452*4882a593Smuzhiyun 			candidate = NULL;
453*4882a593Smuzhiyun 			break;
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		old = NULL;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (candidate) {
462*4882a593Smuzhiyun 		_debug("discard C=%x", candidate->debug_id);
463*4882a593Smuzhiyun 		trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
464*4882a593Smuzhiyun 		rxrpc_put_connection(candidate);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	rxrpc_put_connection(old);
468*4882a593Smuzhiyun 	_leave("");
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun  * Add a connection to a bundle if there are no usable connections or we have
473*4882a593Smuzhiyun  * connections waiting for extra capacity.
474*4882a593Smuzhiyun  */
rxrpc_maybe_add_conn(struct rxrpc_bundle * bundle,gfp_t gfp)475*4882a593Smuzhiyun static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct rxrpc_call *call;
478*4882a593Smuzhiyun 	int i, usable;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	_enter("");
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* See if there are any usable connections. */
485*4882a593Smuzhiyun 	usable = 0;
486*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
487*4882a593Smuzhiyun 		if (rxrpc_may_reuse_conn(bundle->conns[i]))
488*4882a593Smuzhiyun 			usable++;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (!usable && !list_empty(&bundle->waiting_calls)) {
491*4882a593Smuzhiyun 		call = list_first_entry(&bundle->waiting_calls,
492*4882a593Smuzhiyun 					struct rxrpc_call, chan_wait_link);
493*4882a593Smuzhiyun 		if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
494*4882a593Smuzhiyun 			bundle->try_upgrade = true;
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (!usable)
498*4882a593Smuzhiyun 		goto alloc_conn;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (!bundle->avail_chans &&
501*4882a593Smuzhiyun 	    !bundle->try_upgrade &&
502*4882a593Smuzhiyun 	    !list_empty(&bundle->waiting_calls) &&
503*4882a593Smuzhiyun 	    usable < ARRAY_SIZE(bundle->conns))
504*4882a593Smuzhiyun 		goto alloc_conn;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
507*4882a593Smuzhiyun 	_leave("");
508*4882a593Smuzhiyun 	return;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun alloc_conn:
511*4882a593Smuzhiyun 	return rxrpc_add_conn_to_bundle(bundle, gfp);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun  * Assign a channel to the call at the front of the queue and wake the call up.
516*4882a593Smuzhiyun  * We don't increment the callNumber counter until this number has been exposed
517*4882a593Smuzhiyun  * to the world.
518*4882a593Smuzhiyun  */
rxrpc_activate_one_channel(struct rxrpc_connection * conn,unsigned int channel)519*4882a593Smuzhiyun static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
520*4882a593Smuzhiyun 				       unsigned int channel)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun 	struct rxrpc_channel *chan = &conn->channels[channel];
523*4882a593Smuzhiyun 	struct rxrpc_bundle *bundle = conn->bundle;
524*4882a593Smuzhiyun 	struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
525*4882a593Smuzhiyun 					     struct rxrpc_call, chan_wait_link);
526*4882a593Smuzhiyun 	u32 call_id = chan->call_counter + 1;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	_enter("C=%x,%u", conn->debug_id, channel);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/* Cancel the final ACK on the previous call if it hasn't been sent yet
533*4882a593Smuzhiyun 	 * as the DATA packet will implicitly ACK it.
534*4882a593Smuzhiyun 	 */
535*4882a593Smuzhiyun 	clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
536*4882a593Smuzhiyun 	clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	rxrpc_see_call(call);
539*4882a593Smuzhiyun 	list_del_init(&call->chan_wait_link);
540*4882a593Smuzhiyun 	call->peer	= rxrpc_get_peer(conn->params.peer);
541*4882a593Smuzhiyun 	call->conn	= rxrpc_get_connection(conn);
542*4882a593Smuzhiyun 	call->cid	= conn->proto.cid | channel;
543*4882a593Smuzhiyun 	call->call_id	= call_id;
544*4882a593Smuzhiyun 	call->security	= conn->security;
545*4882a593Smuzhiyun 	call->security_ix = conn->security_ix;
546*4882a593Smuzhiyun 	call->service_id = conn->service_id;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	trace_rxrpc_connect_call(call);
549*4882a593Smuzhiyun 	_net("CONNECT call %08x:%08x as call %d on conn %d",
550*4882a593Smuzhiyun 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	write_lock_bh(&call->state_lock);
553*4882a593Smuzhiyun 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
554*4882a593Smuzhiyun 	write_unlock_bh(&call->state_lock);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* Paired with the read barrier in rxrpc_connect_call().  This orders
557*4882a593Smuzhiyun 	 * cid and epoch in the connection wrt to call_id without the need to
558*4882a593Smuzhiyun 	 * take the channel_lock.
559*4882a593Smuzhiyun 	 *
560*4882a593Smuzhiyun 	 * We provisionally assign a callNumber at this point, but we don't
561*4882a593Smuzhiyun 	 * confirm it until the call is about to be exposed.
562*4882a593Smuzhiyun 	 *
563*4882a593Smuzhiyun 	 * TODO: Pair with a barrier in the data_ready handler when that looks
564*4882a593Smuzhiyun 	 * at the call ID through a connection channel.
565*4882a593Smuzhiyun 	 */
566*4882a593Smuzhiyun 	smp_wmb();
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	chan->call_id		= call_id;
569*4882a593Smuzhiyun 	chan->call_debug_id	= call->debug_id;
570*4882a593Smuzhiyun 	rcu_assign_pointer(chan->call, call);
571*4882a593Smuzhiyun 	wake_up(&call->waitq);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun  * Remove a connection from the idle list if it's on it.
576*4882a593Smuzhiyun  */
rxrpc_unidle_conn(struct rxrpc_bundle * bundle,struct rxrpc_connection * conn)577*4882a593Smuzhiyun static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
580*4882a593Smuzhiyun 	bool drop_ref;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	if (!list_empty(&conn->cache_link)) {
583*4882a593Smuzhiyun 		drop_ref = false;
584*4882a593Smuzhiyun 		spin_lock(&rxnet->client_conn_cache_lock);
585*4882a593Smuzhiyun 		if (!list_empty(&conn->cache_link)) {
586*4882a593Smuzhiyun 			list_del_init(&conn->cache_link);
587*4882a593Smuzhiyun 			drop_ref = true;
588*4882a593Smuzhiyun 		}
589*4882a593Smuzhiyun 		spin_unlock(&rxnet->client_conn_cache_lock);
590*4882a593Smuzhiyun 		if (drop_ref)
591*4882a593Smuzhiyun 			rxrpc_put_connection(conn);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun  * Assign channels and callNumbers to waiting calls with channel_lock
597*4882a593Smuzhiyun  * held by caller.
598*4882a593Smuzhiyun  */
rxrpc_activate_channels_locked(struct rxrpc_bundle * bundle)599*4882a593Smuzhiyun static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	struct rxrpc_connection *conn;
602*4882a593Smuzhiyun 	unsigned long avail, mask;
603*4882a593Smuzhiyun 	unsigned int channel, slot;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	if (bundle->try_upgrade)
606*4882a593Smuzhiyun 		mask = 1;
607*4882a593Smuzhiyun 	else
608*4882a593Smuzhiyun 		mask = ULONG_MAX;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	while (!list_empty(&bundle->waiting_calls)) {
611*4882a593Smuzhiyun 		avail = bundle->avail_chans & mask;
612*4882a593Smuzhiyun 		if (!avail)
613*4882a593Smuzhiyun 			break;
614*4882a593Smuzhiyun 		channel = __ffs(avail);
615*4882a593Smuzhiyun 		clear_bit(channel, &bundle->avail_chans);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		slot = channel / RXRPC_MAXCALLS;
618*4882a593Smuzhiyun 		conn = bundle->conns[slot];
619*4882a593Smuzhiyun 		if (!conn)
620*4882a593Smuzhiyun 			break;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 		if (bundle->try_upgrade)
623*4882a593Smuzhiyun 			set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
624*4882a593Smuzhiyun 		rxrpc_unidle_conn(bundle, conn);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		channel &= (RXRPC_MAXCALLS - 1);
627*4882a593Smuzhiyun 		conn->act_chans	|= 1 << channel;
628*4882a593Smuzhiyun 		rxrpc_activate_one_channel(conn, channel);
629*4882a593Smuzhiyun 	}
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun /*
633*4882a593Smuzhiyun  * Assign channels and callNumbers to waiting calls.
634*4882a593Smuzhiyun  */
rxrpc_activate_channels(struct rxrpc_bundle * bundle)635*4882a593Smuzhiyun static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun 	_enter("B=%x", bundle->debug_id);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (!bundle->avail_chans)
642*4882a593Smuzhiyun 		return;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
645*4882a593Smuzhiyun 	rxrpc_activate_channels_locked(bundle);
646*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
647*4882a593Smuzhiyun 	_leave("");
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun /*
651*4882a593Smuzhiyun  * Wait for a callNumber and a channel to be granted to a call.
652*4882a593Smuzhiyun  */
rxrpc_wait_for_channel(struct rxrpc_bundle * bundle,struct rxrpc_call * call,gfp_t gfp)653*4882a593Smuzhiyun static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
654*4882a593Smuzhiyun 				  struct rxrpc_call *call, gfp_t gfp)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	DECLARE_WAITQUEUE(myself, current);
657*4882a593Smuzhiyun 	int ret = 0;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	_enter("%d", call->debug_id);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	if (!gfpflags_allow_blocking(gfp)) {
662*4882a593Smuzhiyun 		rxrpc_maybe_add_conn(bundle, gfp);
663*4882a593Smuzhiyun 		rxrpc_activate_channels(bundle);
664*4882a593Smuzhiyun 		ret = bundle->alloc_error ?: -EAGAIN;
665*4882a593Smuzhiyun 		goto out;
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	add_wait_queue_exclusive(&call->waitq, &myself);
669*4882a593Smuzhiyun 	for (;;) {
670*4882a593Smuzhiyun 		rxrpc_maybe_add_conn(bundle, gfp);
671*4882a593Smuzhiyun 		rxrpc_activate_channels(bundle);
672*4882a593Smuzhiyun 		ret = bundle->alloc_error;
673*4882a593Smuzhiyun 		if (ret < 0)
674*4882a593Smuzhiyun 			break;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 		switch (call->interruptibility) {
677*4882a593Smuzhiyun 		case RXRPC_INTERRUPTIBLE:
678*4882a593Smuzhiyun 		case RXRPC_PREINTERRUPTIBLE:
679*4882a593Smuzhiyun 			set_current_state(TASK_INTERRUPTIBLE);
680*4882a593Smuzhiyun 			break;
681*4882a593Smuzhiyun 		case RXRPC_UNINTERRUPTIBLE:
682*4882a593Smuzhiyun 		default:
683*4882a593Smuzhiyun 			set_current_state(TASK_UNINTERRUPTIBLE);
684*4882a593Smuzhiyun 			break;
685*4882a593Smuzhiyun 		}
686*4882a593Smuzhiyun 		if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
687*4882a593Smuzhiyun 			break;
688*4882a593Smuzhiyun 		if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
689*4882a593Smuzhiyun 		     call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
690*4882a593Smuzhiyun 		    signal_pending(current)) {
691*4882a593Smuzhiyun 			ret = -ERESTARTSYS;
692*4882a593Smuzhiyun 			break;
693*4882a593Smuzhiyun 		}
694*4882a593Smuzhiyun 		schedule();
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun 	remove_wait_queue(&call->waitq, &myself);
697*4882a593Smuzhiyun 	__set_current_state(TASK_RUNNING);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun out:
700*4882a593Smuzhiyun 	_leave(" = %d", ret);
701*4882a593Smuzhiyun 	return ret;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun  * find a connection for a call
706*4882a593Smuzhiyun  * - called in process context with IRQs enabled
707*4882a593Smuzhiyun  */
rxrpc_connect_call(struct rxrpc_sock * rx,struct rxrpc_call * call,struct rxrpc_conn_parameters * cp,struct sockaddr_rxrpc * srx,gfp_t gfp)708*4882a593Smuzhiyun int rxrpc_connect_call(struct rxrpc_sock *rx,
709*4882a593Smuzhiyun 		       struct rxrpc_call *call,
710*4882a593Smuzhiyun 		       struct rxrpc_conn_parameters *cp,
711*4882a593Smuzhiyun 		       struct sockaddr_rxrpc *srx,
712*4882a593Smuzhiyun 		       gfp_t gfp)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct rxrpc_bundle *bundle;
715*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = cp->local->rxnet;
716*4882a593Smuzhiyun 	int ret = 0;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
723*4882a593Smuzhiyun 	if (IS_ERR(bundle)) {
724*4882a593Smuzhiyun 		ret = PTR_ERR(bundle);
725*4882a593Smuzhiyun 		goto out;
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
729*4882a593Smuzhiyun 		ret = rxrpc_wait_for_channel(bundle, call, gfp);
730*4882a593Smuzhiyun 		if (ret < 0)
731*4882a593Smuzhiyun 			goto wait_failed;
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun granted_channel:
735*4882a593Smuzhiyun 	/* Paired with the write barrier in rxrpc_activate_one_channel(). */
736*4882a593Smuzhiyun 	smp_rmb();
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun out_put_bundle:
739*4882a593Smuzhiyun 	rxrpc_deactivate_bundle(bundle);
740*4882a593Smuzhiyun 	rxrpc_put_bundle(bundle);
741*4882a593Smuzhiyun out:
742*4882a593Smuzhiyun 	_leave(" = %d", ret);
743*4882a593Smuzhiyun 	return ret;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun wait_failed:
746*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
747*4882a593Smuzhiyun 	list_del_init(&call->chan_wait_link);
748*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
751*4882a593Smuzhiyun 		ret = 0;
752*4882a593Smuzhiyun 		goto granted_channel;
753*4882a593Smuzhiyun 	}
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
756*4882a593Smuzhiyun 	rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
757*4882a593Smuzhiyun 	rxrpc_disconnect_client_call(bundle, call);
758*4882a593Smuzhiyun 	goto out_put_bundle;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun  * Note that a call, and thus a connection, is about to be exposed to the
763*4882a593Smuzhiyun  * world.
764*4882a593Smuzhiyun  */
rxrpc_expose_client_call(struct rxrpc_call * call)765*4882a593Smuzhiyun void rxrpc_expose_client_call(struct rxrpc_call *call)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
768*4882a593Smuzhiyun 	struct rxrpc_connection *conn = call->conn;
769*4882a593Smuzhiyun 	struct rxrpc_channel *chan = &conn->channels[channel];
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
772*4882a593Smuzhiyun 		/* Mark the call ID as being used.  If the callNumber counter
773*4882a593Smuzhiyun 		 * exceeds ~2 billion, we kill the connection after its
774*4882a593Smuzhiyun 		 * outstanding calls have finished so that the counter doesn't
775*4882a593Smuzhiyun 		 * wrap.
776*4882a593Smuzhiyun 		 */
777*4882a593Smuzhiyun 		chan->call_counter++;
778*4882a593Smuzhiyun 		if (chan->call_counter >= INT_MAX)
779*4882a593Smuzhiyun 			set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
780*4882a593Smuzhiyun 		trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun /*
785*4882a593Smuzhiyun  * Set the reap timer.
786*4882a593Smuzhiyun  */
rxrpc_set_client_reap_timer(struct rxrpc_net * rxnet)787*4882a593Smuzhiyun static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	if (!rxnet->kill_all_client_conns) {
790*4882a593Smuzhiyun 		unsigned long now = jiffies;
791*4882a593Smuzhiyun 		unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		if (rxnet->live)
794*4882a593Smuzhiyun 			timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun /*
799*4882a593Smuzhiyun  * Disconnect a client call.
800*4882a593Smuzhiyun  */
rxrpc_disconnect_client_call(struct rxrpc_bundle * bundle,struct rxrpc_call * call)801*4882a593Smuzhiyun void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct rxrpc_connection *conn;
804*4882a593Smuzhiyun 	struct rxrpc_channel *chan = NULL;
805*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = bundle->params.local->rxnet;
806*4882a593Smuzhiyun 	unsigned int channel;
807*4882a593Smuzhiyun 	bool may_reuse;
808*4882a593Smuzhiyun 	u32 cid;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	_enter("c=%x", call->debug_id);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
813*4882a593Smuzhiyun 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	/* Calls that have never actually been assigned a channel can simply be
816*4882a593Smuzhiyun 	 * discarded.
817*4882a593Smuzhiyun 	 */
818*4882a593Smuzhiyun 	conn = call->conn;
819*4882a593Smuzhiyun 	if (!conn) {
820*4882a593Smuzhiyun 		_debug("call is waiting");
821*4882a593Smuzhiyun 		ASSERTCMP(call->call_id, ==, 0);
822*4882a593Smuzhiyun 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
823*4882a593Smuzhiyun 		list_del_init(&call->chan_wait_link);
824*4882a593Smuzhiyun 		goto out;
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	cid = call->cid;
828*4882a593Smuzhiyun 	channel = cid & RXRPC_CHANNELMASK;
829*4882a593Smuzhiyun 	chan = &conn->channels[channel];
830*4882a593Smuzhiyun 	trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	if (rcu_access_pointer(chan->call) != call) {
833*4882a593Smuzhiyun 		spin_unlock(&bundle->channel_lock);
834*4882a593Smuzhiyun 		BUG();
835*4882a593Smuzhiyun 	}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	may_reuse = rxrpc_may_reuse_conn(conn);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	/* If a client call was exposed to the world, we save the result for
840*4882a593Smuzhiyun 	 * retransmission.
841*4882a593Smuzhiyun 	 *
842*4882a593Smuzhiyun 	 * We use a barrier here so that the call number and abort code can be
843*4882a593Smuzhiyun 	 * read without needing to take a lock.
844*4882a593Smuzhiyun 	 *
845*4882a593Smuzhiyun 	 * TODO: Make the incoming packet handler check this and handle
846*4882a593Smuzhiyun 	 * terminal retransmission without requiring access to the call.
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
849*4882a593Smuzhiyun 		_debug("exposed %u,%u", call->call_id, call->abort_code);
850*4882a593Smuzhiyun 		__rxrpc_disconnect_call(conn, call);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 		if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
853*4882a593Smuzhiyun 			trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
854*4882a593Smuzhiyun 			bundle->try_upgrade = false;
855*4882a593Smuzhiyun 			if (may_reuse)
856*4882a593Smuzhiyun 				rxrpc_activate_channels_locked(bundle);
857*4882a593Smuzhiyun 		}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/* See if we can pass the channel directly to another call. */
862*4882a593Smuzhiyun 	if (may_reuse && !list_empty(&bundle->waiting_calls)) {
863*4882a593Smuzhiyun 		trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
864*4882a593Smuzhiyun 		rxrpc_activate_one_channel(conn, channel);
865*4882a593Smuzhiyun 		goto out;
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	/* Schedule the final ACK to be transmitted in a short while so that it
869*4882a593Smuzhiyun 	 * can be skipped if we find a follow-on call.  The first DATA packet
870*4882a593Smuzhiyun 	 * of the follow on call will implicitly ACK this call.
871*4882a593Smuzhiyun 	 */
872*4882a593Smuzhiyun 	if (call->completion == RXRPC_CALL_SUCCEEDED &&
873*4882a593Smuzhiyun 	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
874*4882a593Smuzhiyun 		unsigned long final_ack_at = jiffies + 2;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 		WRITE_ONCE(chan->final_ack_at, final_ack_at);
877*4882a593Smuzhiyun 		smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
878*4882a593Smuzhiyun 		set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
879*4882a593Smuzhiyun 		rxrpc_reduce_conn_timer(conn, final_ack_at);
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	/* Deactivate the channel. */
883*4882a593Smuzhiyun 	rcu_assign_pointer(chan->call, NULL);
884*4882a593Smuzhiyun 	set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
885*4882a593Smuzhiyun 	conn->act_chans	&= ~(1 << channel);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	/* If no channels remain active, then put the connection on the idle
888*4882a593Smuzhiyun 	 * list for a short while.  Give it a ref to stop it going away if it
889*4882a593Smuzhiyun 	 * becomes unbundled.
890*4882a593Smuzhiyun 	 */
891*4882a593Smuzhiyun 	if (!conn->act_chans) {
892*4882a593Smuzhiyun 		trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
893*4882a593Smuzhiyun 		conn->idle_timestamp = jiffies;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 		rxrpc_get_connection(conn);
896*4882a593Smuzhiyun 		spin_lock(&rxnet->client_conn_cache_lock);
897*4882a593Smuzhiyun 		list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
898*4882a593Smuzhiyun 		spin_unlock(&rxnet->client_conn_cache_lock);
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 		rxrpc_set_client_reap_timer(rxnet);
901*4882a593Smuzhiyun 	}
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun out:
904*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
905*4882a593Smuzhiyun 	_leave("");
906*4882a593Smuzhiyun 	return;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun /*
910*4882a593Smuzhiyun  * Remove a connection from a bundle.
911*4882a593Smuzhiyun  */
rxrpc_unbundle_conn(struct rxrpc_connection * conn)912*4882a593Smuzhiyun static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun 	struct rxrpc_bundle *bundle = conn->bundle;
915*4882a593Smuzhiyun 	unsigned int bindex;
916*4882a593Smuzhiyun 	bool need_drop = false;
917*4882a593Smuzhiyun 	int i;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	_enter("C=%x", conn->debug_id);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
922*4882a593Smuzhiyun 		rxrpc_process_delayed_final_acks(conn, true);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	spin_lock(&bundle->channel_lock);
925*4882a593Smuzhiyun 	bindex = conn->bundle_shift / RXRPC_MAXCALLS;
926*4882a593Smuzhiyun 	if (bundle->conns[bindex] == conn) {
927*4882a593Smuzhiyun 		_debug("clear slot %u", bindex);
928*4882a593Smuzhiyun 		bundle->conns[bindex] = NULL;
929*4882a593Smuzhiyun 		for (i = 0; i < RXRPC_MAXCALLS; i++)
930*4882a593Smuzhiyun 			clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
931*4882a593Smuzhiyun 		need_drop = true;
932*4882a593Smuzhiyun 	}
933*4882a593Smuzhiyun 	spin_unlock(&bundle->channel_lock);
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	if (need_drop) {
936*4882a593Smuzhiyun 		rxrpc_deactivate_bundle(bundle);
937*4882a593Smuzhiyun 		rxrpc_put_connection(conn);
938*4882a593Smuzhiyun 	}
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun  * Drop the active count on a bundle.
943*4882a593Smuzhiyun  */
rxrpc_deactivate_bundle(struct rxrpc_bundle * bundle)944*4882a593Smuzhiyun static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	struct rxrpc_local *local = bundle->params.local;
947*4882a593Smuzhiyun 	bool need_put = false;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
950*4882a593Smuzhiyun 		if (!bundle->params.exclusive) {
951*4882a593Smuzhiyun 			_debug("erase bundle");
952*4882a593Smuzhiyun 			rb_erase(&bundle->local_node, &local->client_bundles);
953*4882a593Smuzhiyun 			need_put = true;
954*4882a593Smuzhiyun 		}
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 		spin_unlock(&local->client_bundles_lock);
957*4882a593Smuzhiyun 		if (need_put)
958*4882a593Smuzhiyun 			rxrpc_put_bundle(bundle);
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun /*
963*4882a593Smuzhiyun  * Clean up a dead client connection.
964*4882a593Smuzhiyun  */
rxrpc_kill_client_conn(struct rxrpc_connection * conn)965*4882a593Smuzhiyun static void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	struct rxrpc_local *local = conn->params.local;
968*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = local->rxnet;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	_enter("C=%x", conn->debug_id);
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
973*4882a593Smuzhiyun 	atomic_dec(&rxnet->nr_client_conns);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	rxrpc_put_client_connection_id(conn);
976*4882a593Smuzhiyun 	rxrpc_kill_connection(conn);
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun /*
980*4882a593Smuzhiyun  * Clean up a dead client connections.
981*4882a593Smuzhiyun  */
rxrpc_put_client_conn(struct rxrpc_connection * conn)982*4882a593Smuzhiyun void rxrpc_put_client_conn(struct rxrpc_connection *conn)
983*4882a593Smuzhiyun {
984*4882a593Smuzhiyun 	const void *here = __builtin_return_address(0);
985*4882a593Smuzhiyun 	unsigned int debug_id = conn->debug_id;
986*4882a593Smuzhiyun 	bool dead;
987*4882a593Smuzhiyun 	int r;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	dead = __refcount_dec_and_test(&conn->ref, &r);
990*4882a593Smuzhiyun 	trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, r - 1, here);
991*4882a593Smuzhiyun 	if (dead)
992*4882a593Smuzhiyun 		rxrpc_kill_client_conn(conn);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun /*
996*4882a593Smuzhiyun  * Discard expired client connections from the idle list.  Each conn in the
997*4882a593Smuzhiyun  * idle list has been exposed and holds an extra ref because of that.
998*4882a593Smuzhiyun  *
999*4882a593Smuzhiyun  * This may be called from conn setup or from a work item so cannot be
1000*4882a593Smuzhiyun  * considered non-reentrant.
1001*4882a593Smuzhiyun  */
rxrpc_discard_expired_client_conns(struct work_struct * work)1002*4882a593Smuzhiyun void rxrpc_discard_expired_client_conns(struct work_struct *work)
1003*4882a593Smuzhiyun {
1004*4882a593Smuzhiyun 	struct rxrpc_connection *conn;
1005*4882a593Smuzhiyun 	struct rxrpc_net *rxnet =
1006*4882a593Smuzhiyun 		container_of(work, struct rxrpc_net, client_conn_reaper);
1007*4882a593Smuzhiyun 	unsigned long expiry, conn_expires_at, now;
1008*4882a593Smuzhiyun 	unsigned int nr_conns;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	_enter("");
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	if (list_empty(&rxnet->idle_client_conns)) {
1013*4882a593Smuzhiyun 		_leave(" [empty]");
1014*4882a593Smuzhiyun 		return;
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	/* Don't double up on the discarding */
1018*4882a593Smuzhiyun 	if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1019*4882a593Smuzhiyun 		_leave(" [already]");
1020*4882a593Smuzhiyun 		return;
1021*4882a593Smuzhiyun 	}
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	/* We keep an estimate of what the number of conns ought to be after
1024*4882a593Smuzhiyun 	 * we've discarded some so that we don't overdo the discarding.
1025*4882a593Smuzhiyun 	 */
1026*4882a593Smuzhiyun 	nr_conns = atomic_read(&rxnet->nr_client_conns);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun next:
1029*4882a593Smuzhiyun 	spin_lock(&rxnet->client_conn_cache_lock);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	if (list_empty(&rxnet->idle_client_conns))
1032*4882a593Smuzhiyun 		goto out;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	conn = list_entry(rxnet->idle_client_conns.next,
1035*4882a593Smuzhiyun 			  struct rxrpc_connection, cache_link);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	if (!rxnet->kill_all_client_conns) {
1038*4882a593Smuzhiyun 		/* If the number of connections is over the reap limit, we
1039*4882a593Smuzhiyun 		 * expedite discard by reducing the expiry timeout.  We must,
1040*4882a593Smuzhiyun 		 * however, have at least a short grace period to be able to do
1041*4882a593Smuzhiyun 		 * final-ACK or ABORT retransmission.
1042*4882a593Smuzhiyun 		 */
1043*4882a593Smuzhiyun 		expiry = rxrpc_conn_idle_client_expiry;
1044*4882a593Smuzhiyun 		if (nr_conns > rxrpc_reap_client_connections)
1045*4882a593Smuzhiyun 			expiry = rxrpc_conn_idle_client_fast_expiry;
1046*4882a593Smuzhiyun 		if (conn->params.local->service_closed)
1047*4882a593Smuzhiyun 			expiry = rxrpc_closed_conn_expiry * HZ;
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 		conn_expires_at = conn->idle_timestamp + expiry;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 		now = READ_ONCE(jiffies);
1052*4882a593Smuzhiyun 		if (time_after(conn_expires_at, now))
1053*4882a593Smuzhiyun 			goto not_yet_expired;
1054*4882a593Smuzhiyun 	}
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1057*4882a593Smuzhiyun 	list_del_init(&conn->cache_link);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	spin_unlock(&rxnet->client_conn_cache_lock);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	rxrpc_unbundle_conn(conn);
1062*4882a593Smuzhiyun 	rxrpc_put_connection(conn); /* Drop the ->cache_link ref */
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	nr_conns--;
1065*4882a593Smuzhiyun 	goto next;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun not_yet_expired:
1068*4882a593Smuzhiyun 	/* The connection at the front of the queue hasn't yet expired, so
1069*4882a593Smuzhiyun 	 * schedule the work item for that point if we discarded something.
1070*4882a593Smuzhiyun 	 *
1071*4882a593Smuzhiyun 	 * We don't worry if the work item is already scheduled - it can look
1072*4882a593Smuzhiyun 	 * after rescheduling itself at a later time.  We could cancel it, but
1073*4882a593Smuzhiyun 	 * then things get messier.
1074*4882a593Smuzhiyun 	 */
1075*4882a593Smuzhiyun 	_debug("not yet");
1076*4882a593Smuzhiyun 	if (!rxnet->kill_all_client_conns)
1077*4882a593Smuzhiyun 		timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun out:
1080*4882a593Smuzhiyun 	spin_unlock(&rxnet->client_conn_cache_lock);
1081*4882a593Smuzhiyun 	spin_unlock(&rxnet->client_conn_discard_lock);
1082*4882a593Smuzhiyun 	_leave("");
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun /*
1086*4882a593Smuzhiyun  * Preemptively destroy all the client connection records rather than waiting
1087*4882a593Smuzhiyun  * for them to time out
1088*4882a593Smuzhiyun  */
rxrpc_destroy_all_client_connections(struct rxrpc_net * rxnet)1089*4882a593Smuzhiyun void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	_enter("");
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	spin_lock(&rxnet->client_conn_cache_lock);
1094*4882a593Smuzhiyun 	rxnet->kill_all_client_conns = true;
1095*4882a593Smuzhiyun 	spin_unlock(&rxnet->client_conn_cache_lock);
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	del_timer_sync(&rxnet->client_conn_reap_timer);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 	if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1100*4882a593Smuzhiyun 		_debug("destroy: queue failed");
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	_leave("");
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun  * Clean up the client connections on a local endpoint.
1107*4882a593Smuzhiyun  */
rxrpc_clean_up_local_conns(struct rxrpc_local * local)1108*4882a593Smuzhiyun void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	struct rxrpc_connection *conn, *tmp;
1111*4882a593Smuzhiyun 	struct rxrpc_net *rxnet = local->rxnet;
1112*4882a593Smuzhiyun 	LIST_HEAD(graveyard);
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	_enter("");
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	spin_lock(&rxnet->client_conn_cache_lock);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1119*4882a593Smuzhiyun 				 cache_link) {
1120*4882a593Smuzhiyun 		if (conn->params.local == local) {
1121*4882a593Smuzhiyun 			trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1122*4882a593Smuzhiyun 			list_move(&conn->cache_link, &graveyard);
1123*4882a593Smuzhiyun 		}
1124*4882a593Smuzhiyun 	}
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	spin_unlock(&rxnet->client_conn_cache_lock);
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	while (!list_empty(&graveyard)) {
1129*4882a593Smuzhiyun 		conn = list_entry(graveyard.next,
1130*4882a593Smuzhiyun 				  struct rxrpc_connection, cache_link);
1131*4882a593Smuzhiyun 		list_del_init(&conn->cache_link);
1132*4882a593Smuzhiyun 		rxrpc_unbundle_conn(conn);
1133*4882a593Smuzhiyun 		rxrpc_put_connection(conn);
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	_leave(" [culled]");
1137*4882a593Smuzhiyun }
1138