1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Service connection management
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include "ar-internal.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
12*4882a593Smuzhiyun .ref = REFCOUNT_INIT(1),
13*4882a593Smuzhiyun .debug_id = UINT_MAX,
14*4882a593Smuzhiyun .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
15*4882a593Smuzhiyun };
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * Find a service connection under RCU conditions.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * We could use a hash table, but that is subject to bucket stuffing by an
21*4882a593Smuzhiyun * attacker as the client gets to pick the epoch and cid values and would know
22*4882a593Smuzhiyun * the hash function. So, instead, we use a hash table for the peer and from
23*4882a593Smuzhiyun * that an rbtree to find the service connection. Under ordinary circumstances
24*4882a593Smuzhiyun * it might be slower than a large hash table, but it is at least limited in
25*4882a593Smuzhiyun * depth.
26*4882a593Smuzhiyun */
rxrpc_find_service_conn_rcu(struct rxrpc_peer * peer,struct sk_buff * skb)27*4882a593Smuzhiyun struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
28*4882a593Smuzhiyun struct sk_buff *skb)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct rxrpc_connection *conn = NULL;
31*4882a593Smuzhiyun struct rxrpc_conn_proto k;
32*4882a593Smuzhiyun struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
33*4882a593Smuzhiyun struct rb_node *p;
34*4882a593Smuzhiyun unsigned int seq = 0;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun k.epoch = sp->hdr.epoch;
37*4882a593Smuzhiyun k.cid = sp->hdr.cid & RXRPC_CIDMASK;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun do {
40*4882a593Smuzhiyun /* Unfortunately, rbtree walking doesn't give reliable results
41*4882a593Smuzhiyun * under just the RCU read lock, so we have to check for
42*4882a593Smuzhiyun * changes.
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun p = rcu_dereference_raw(peer->service_conns.rb_node);
47*4882a593Smuzhiyun while (p) {
48*4882a593Smuzhiyun conn = rb_entry(p, struct rxrpc_connection, service_node);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (conn->proto.index_key < k.index_key)
51*4882a593Smuzhiyun p = rcu_dereference_raw(p->rb_left);
52*4882a593Smuzhiyun else if (conn->proto.index_key > k.index_key)
53*4882a593Smuzhiyun p = rcu_dereference_raw(p->rb_right);
54*4882a593Smuzhiyun else
55*4882a593Smuzhiyun break;
56*4882a593Smuzhiyun conn = NULL;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun } while (need_seqretry(&peer->service_conn_lock, seq));
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun done_seqretry(&peer->service_conn_lock, seq);
61*4882a593Smuzhiyun _leave(" = %d", conn ? conn->debug_id : -1);
62*4882a593Smuzhiyun return conn;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Insert a service connection into a peer's tree, thereby making it a target
67*4882a593Smuzhiyun * for incoming packets.
68*4882a593Smuzhiyun */
rxrpc_publish_service_conn(struct rxrpc_peer * peer,struct rxrpc_connection * conn)69*4882a593Smuzhiyun static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
70*4882a593Smuzhiyun struct rxrpc_connection *conn)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct rxrpc_connection *cursor = NULL;
73*4882a593Smuzhiyun struct rxrpc_conn_proto k = conn->proto;
74*4882a593Smuzhiyun struct rb_node **pp, *parent;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun write_seqlock_bh(&peer->service_conn_lock);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun pp = &peer->service_conns.rb_node;
79*4882a593Smuzhiyun parent = NULL;
80*4882a593Smuzhiyun while (*pp) {
81*4882a593Smuzhiyun parent = *pp;
82*4882a593Smuzhiyun cursor = rb_entry(parent,
83*4882a593Smuzhiyun struct rxrpc_connection, service_node);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (cursor->proto.index_key < k.index_key)
86*4882a593Smuzhiyun pp = &(*pp)->rb_left;
87*4882a593Smuzhiyun else if (cursor->proto.index_key > k.index_key)
88*4882a593Smuzhiyun pp = &(*pp)->rb_right;
89*4882a593Smuzhiyun else
90*4882a593Smuzhiyun goto found_extant_conn;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun rb_link_node_rcu(&conn->service_node, parent, pp);
94*4882a593Smuzhiyun rb_insert_color(&conn->service_node, &peer->service_conns);
95*4882a593Smuzhiyun conn_published:
96*4882a593Smuzhiyun set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
97*4882a593Smuzhiyun write_sequnlock_bh(&peer->service_conn_lock);
98*4882a593Smuzhiyun _leave(" = %d [new]", conn->debug_id);
99*4882a593Smuzhiyun return;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun found_extant_conn:
102*4882a593Smuzhiyun if (refcount_read(&cursor->ref) == 0)
103*4882a593Smuzhiyun goto replace_old_connection;
104*4882a593Smuzhiyun write_sequnlock_bh(&peer->service_conn_lock);
105*4882a593Smuzhiyun /* We should not be able to get here. rxrpc_incoming_connection() is
106*4882a593Smuzhiyun * called in a non-reentrant context, so there can't be a race to
107*4882a593Smuzhiyun * insert a new connection.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun BUG();
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun replace_old_connection:
112*4882a593Smuzhiyun /* The old connection is from an outdated epoch. */
113*4882a593Smuzhiyun _debug("replace conn");
114*4882a593Smuzhiyun rb_replace_node_rcu(&cursor->service_node,
115*4882a593Smuzhiyun &conn->service_node,
116*4882a593Smuzhiyun &peer->service_conns);
117*4882a593Smuzhiyun clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &cursor->flags);
118*4882a593Smuzhiyun goto conn_published;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * Preallocate a service connection. The connection is placed on the proc and
123*4882a593Smuzhiyun * reap lists so that we don't have to get the lock from BH context.
124*4882a593Smuzhiyun */
rxrpc_prealloc_service_connection(struct rxrpc_net * rxnet,gfp_t gfp)125*4882a593Smuzhiyun struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxnet,
126*4882a593Smuzhiyun gfp_t gfp)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct rxrpc_connection *conn = rxrpc_alloc_connection(gfp);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (conn) {
131*4882a593Smuzhiyun /* We maintain an extra ref on the connection whilst it is on
132*4882a593Smuzhiyun * the rxrpc_connections list.
133*4882a593Smuzhiyun */
134*4882a593Smuzhiyun conn->state = RXRPC_CONN_SERVICE_PREALLOC;
135*4882a593Smuzhiyun refcount_set(&conn->ref, 2);
136*4882a593Smuzhiyun conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun atomic_inc(&rxnet->nr_conns);
139*4882a593Smuzhiyun write_lock(&rxnet->conn_lock);
140*4882a593Smuzhiyun list_add_tail(&conn->link, &rxnet->service_conns);
141*4882a593Smuzhiyun list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
142*4882a593Smuzhiyun write_unlock(&rxnet->conn_lock);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
145*4882a593Smuzhiyun refcount_read(&conn->ref),
146*4882a593Smuzhiyun __builtin_return_address(0));
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return conn;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * Set up an incoming connection. This is called in BH context with the RCU
154*4882a593Smuzhiyun * read lock held.
155*4882a593Smuzhiyun */
rxrpc_new_incoming_connection(struct rxrpc_sock * rx,struct rxrpc_connection * conn,const struct rxrpc_security * sec,struct key * key,struct sk_buff * skb)156*4882a593Smuzhiyun void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
157*4882a593Smuzhiyun struct rxrpc_connection *conn,
158*4882a593Smuzhiyun const struct rxrpc_security *sec,
159*4882a593Smuzhiyun struct key *key,
160*4882a593Smuzhiyun struct sk_buff *skb)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun _enter("");
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun conn->proto.epoch = sp->hdr.epoch;
167*4882a593Smuzhiyun conn->proto.cid = sp->hdr.cid & RXRPC_CIDMASK;
168*4882a593Smuzhiyun conn->params.service_id = sp->hdr.serviceId;
169*4882a593Smuzhiyun conn->service_id = sp->hdr.serviceId;
170*4882a593Smuzhiyun conn->security_ix = sp->hdr.securityIndex;
171*4882a593Smuzhiyun conn->out_clientflag = 0;
172*4882a593Smuzhiyun conn->security = sec;
173*4882a593Smuzhiyun conn->server_key = key_get(key);
174*4882a593Smuzhiyun if (conn->security_ix)
175*4882a593Smuzhiyun conn->state = RXRPC_CONN_SERVICE_UNSECURED;
176*4882a593Smuzhiyun else
177*4882a593Smuzhiyun conn->state = RXRPC_CONN_SERVICE;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* See if we should upgrade the service. This can only happen on the
180*4882a593Smuzhiyun * first packet on a new connection. Once done, it applies to all
181*4882a593Smuzhiyun * subsequent calls on that connection.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun if (sp->hdr.userStatus == RXRPC_USERSTATUS_SERVICE_UPGRADE &&
184*4882a593Smuzhiyun conn->service_id == rx->service_upgrade.from)
185*4882a593Smuzhiyun conn->service_id = rx->service_upgrade.to;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Make the connection a target for incoming packets. */
188*4882a593Smuzhiyun rxrpc_publish_service_conn(conn->params.peer, conn);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun _net("CONNECTION new %d {%x}", conn->debug_id, conn->proto.cid);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * Remove the service connection from the peer's tree, thereby removing it as a
195*4882a593Smuzhiyun * target for incoming packets.
196*4882a593Smuzhiyun */
rxrpc_unpublish_service_conn(struct rxrpc_connection * conn)197*4882a593Smuzhiyun void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct rxrpc_peer *peer = conn->params.peer;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun write_seqlock_bh(&peer->service_conn_lock);
202*4882a593Smuzhiyun if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
203*4882a593Smuzhiyun rb_erase(&conn->service_node, &peer->service_conns);
204*4882a593Smuzhiyun write_sequnlock_bh(&peer->service_conn_lock);
205*4882a593Smuzhiyun }
206