xref: /OK3568_Linux_fs/kernel/net/rxrpc/peer_event.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Peer event handling, typically ICMP messages.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5*4882a593Smuzhiyun  * Written by David Howells (dhowells@redhat.com)
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/net.h>
10*4882a593Smuzhiyun #include <linux/skbuff.h>
11*4882a593Smuzhiyun #include <linux/errqueue.h>
12*4882a593Smuzhiyun #include <linux/udp.h>
13*4882a593Smuzhiyun #include <linux/in.h>
14*4882a593Smuzhiyun #include <linux/in6.h>
15*4882a593Smuzhiyun #include <linux/icmp.h>
16*4882a593Smuzhiyun #include <net/sock.h>
17*4882a593Smuzhiyun #include <net/af_rxrpc.h>
18*4882a593Smuzhiyun #include <net/ip.h>
19*4882a593Smuzhiyun #include "ar-internal.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
22*4882a593Smuzhiyun static void rxrpc_distribute_error(struct rxrpc_peer *, int,
23*4882a593Smuzhiyun 				   enum rxrpc_call_completion);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Find the peer associated with an ICMP packet.
27*4882a593Smuzhiyun  */
rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local * local,const struct sk_buff * skb,struct sockaddr_rxrpc * srx)28*4882a593Smuzhiyun static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
29*4882a593Smuzhiyun 						     const struct sk_buff *skb,
30*4882a593Smuzhiyun 						     struct sockaddr_rxrpc *srx)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	_enter("");
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	memset(srx, 0, sizeof(*srx));
37*4882a593Smuzhiyun 	srx->transport_type = local->srx.transport_type;
38*4882a593Smuzhiyun 	srx->transport_len = local->srx.transport_len;
39*4882a593Smuzhiyun 	srx->transport.family = local->srx.transport.family;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* Can we see an ICMP4 packet on an ICMP6 listening socket?  and vice
42*4882a593Smuzhiyun 	 * versa?
43*4882a593Smuzhiyun 	 */
44*4882a593Smuzhiyun 	switch (srx->transport.family) {
45*4882a593Smuzhiyun 	case AF_INET:
46*4882a593Smuzhiyun 		srx->transport_len = sizeof(srx->transport.sin);
47*4882a593Smuzhiyun 		srx->transport.family = AF_INET;
48*4882a593Smuzhiyun 		srx->transport.sin.sin_port = serr->port;
49*4882a593Smuzhiyun 		switch (serr->ee.ee_origin) {
50*4882a593Smuzhiyun 		case SO_EE_ORIGIN_ICMP:
51*4882a593Smuzhiyun 			_net("Rx ICMP");
52*4882a593Smuzhiyun 			memcpy(&srx->transport.sin.sin_addr,
53*4882a593Smuzhiyun 			       skb_network_header(skb) + serr->addr_offset,
54*4882a593Smuzhiyun 			       sizeof(struct in_addr));
55*4882a593Smuzhiyun 			break;
56*4882a593Smuzhiyun 		case SO_EE_ORIGIN_ICMP6:
57*4882a593Smuzhiyun 			_net("Rx ICMP6 on v4 sock");
58*4882a593Smuzhiyun 			memcpy(&srx->transport.sin.sin_addr,
59*4882a593Smuzhiyun 			       skb_network_header(skb) + serr->addr_offset + 12,
60*4882a593Smuzhiyun 			       sizeof(struct in_addr));
61*4882a593Smuzhiyun 			break;
62*4882a593Smuzhiyun 		default:
63*4882a593Smuzhiyun 			memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
64*4882a593Smuzhiyun 			       sizeof(struct in_addr));
65*4882a593Smuzhiyun 			break;
66*4882a593Smuzhiyun 		}
67*4882a593Smuzhiyun 		break;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #ifdef CONFIG_AF_RXRPC_IPV6
70*4882a593Smuzhiyun 	case AF_INET6:
71*4882a593Smuzhiyun 		switch (serr->ee.ee_origin) {
72*4882a593Smuzhiyun 		case SO_EE_ORIGIN_ICMP6:
73*4882a593Smuzhiyun 			_net("Rx ICMP6");
74*4882a593Smuzhiyun 			srx->transport.sin6.sin6_port = serr->port;
75*4882a593Smuzhiyun 			memcpy(&srx->transport.sin6.sin6_addr,
76*4882a593Smuzhiyun 			       skb_network_header(skb) + serr->addr_offset,
77*4882a593Smuzhiyun 			       sizeof(struct in6_addr));
78*4882a593Smuzhiyun 			break;
79*4882a593Smuzhiyun 		case SO_EE_ORIGIN_ICMP:
80*4882a593Smuzhiyun 			_net("Rx ICMP on v6 sock");
81*4882a593Smuzhiyun 			srx->transport_len = sizeof(srx->transport.sin);
82*4882a593Smuzhiyun 			srx->transport.family = AF_INET;
83*4882a593Smuzhiyun 			srx->transport.sin.sin_port = serr->port;
84*4882a593Smuzhiyun 			memcpy(&srx->transport.sin.sin_addr,
85*4882a593Smuzhiyun 			       skb_network_header(skb) + serr->addr_offset,
86*4882a593Smuzhiyun 			       sizeof(struct in_addr));
87*4882a593Smuzhiyun 			break;
88*4882a593Smuzhiyun 		default:
89*4882a593Smuzhiyun 			memcpy(&srx->transport.sin6.sin6_addr,
90*4882a593Smuzhiyun 			       &ipv6_hdr(skb)->saddr,
91*4882a593Smuzhiyun 			       sizeof(struct in6_addr));
92*4882a593Smuzhiyun 			break;
93*4882a593Smuzhiyun 		}
94*4882a593Smuzhiyun 		break;
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	default:
98*4882a593Smuzhiyun 		BUG();
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	return rxrpc_lookup_peer_rcu(local, srx);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun  * Handle an MTU/fragmentation problem.
106*4882a593Smuzhiyun  */
rxrpc_adjust_mtu(struct rxrpc_peer * peer,struct sock_exterr_skb * serr)107*4882a593Smuzhiyun static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	u32 mtu = serr->ee.ee_info;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	_net("Rx ICMP Fragmentation Needed (%d)", mtu);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* wind down the local interface MTU */
114*4882a593Smuzhiyun 	if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
115*4882a593Smuzhiyun 		peer->if_mtu = mtu;
116*4882a593Smuzhiyun 		_net("I/F MTU %u", mtu);
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	if (mtu == 0) {
120*4882a593Smuzhiyun 		/* they didn't give us a size, estimate one */
121*4882a593Smuzhiyun 		mtu = peer->if_mtu;
122*4882a593Smuzhiyun 		if (mtu > 1500) {
123*4882a593Smuzhiyun 			mtu >>= 1;
124*4882a593Smuzhiyun 			if (mtu < 1500)
125*4882a593Smuzhiyun 				mtu = 1500;
126*4882a593Smuzhiyun 		} else {
127*4882a593Smuzhiyun 			mtu -= 100;
128*4882a593Smuzhiyun 			if (mtu < peer->hdrsize)
129*4882a593Smuzhiyun 				mtu = peer->hdrsize + 4;
130*4882a593Smuzhiyun 		}
131*4882a593Smuzhiyun 	}
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (mtu < peer->mtu) {
134*4882a593Smuzhiyun 		spin_lock_bh(&peer->lock);
135*4882a593Smuzhiyun 		peer->mtu = mtu;
136*4882a593Smuzhiyun 		peer->maxdata = peer->mtu - peer->hdrsize;
137*4882a593Smuzhiyun 		spin_unlock_bh(&peer->lock);
138*4882a593Smuzhiyun 		_net("Net MTU %u (maxdata %u)",
139*4882a593Smuzhiyun 		     peer->mtu, peer->maxdata);
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /*
144*4882a593Smuzhiyun  * Handle an error received on the local endpoint.
145*4882a593Smuzhiyun  */
rxrpc_error_report(struct sock * sk)146*4882a593Smuzhiyun void rxrpc_error_report(struct sock *sk)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct sock_exterr_skb *serr;
149*4882a593Smuzhiyun 	struct sockaddr_rxrpc srx;
150*4882a593Smuzhiyun 	struct rxrpc_local *local;
151*4882a593Smuzhiyun 	struct rxrpc_peer *peer;
152*4882a593Smuzhiyun 	struct sk_buff *skb;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	rcu_read_lock();
155*4882a593Smuzhiyun 	local = rcu_dereference_sk_user_data(sk);
156*4882a593Smuzhiyun 	if (unlikely(!local)) {
157*4882a593Smuzhiyun 		rcu_read_unlock();
158*4882a593Smuzhiyun 		return;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 	_enter("%p{%d}", sk, local->debug_id);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Clear the outstanding error value on the socket so that it doesn't
163*4882a593Smuzhiyun 	 * cause kernel_sendmsg() to return it later.
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	sock_error(sk);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	skb = sock_dequeue_err_skb(sk);
168*4882a593Smuzhiyun 	if (!skb) {
169*4882a593Smuzhiyun 		rcu_read_unlock();
170*4882a593Smuzhiyun 		_leave("UDP socket errqueue empty");
171*4882a593Smuzhiyun 		return;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 	rxrpc_new_skb(skb, rxrpc_skb_received);
174*4882a593Smuzhiyun 	serr = SKB_EXT_ERR(skb);
175*4882a593Smuzhiyun 	if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
176*4882a593Smuzhiyun 		_leave("UDP empty message");
177*4882a593Smuzhiyun 		rcu_read_unlock();
178*4882a593Smuzhiyun 		rxrpc_free_skb(skb, rxrpc_skb_freed);
179*4882a593Smuzhiyun 		return;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
183*4882a593Smuzhiyun 	if (peer && !rxrpc_get_peer_maybe(peer))
184*4882a593Smuzhiyun 		peer = NULL;
185*4882a593Smuzhiyun 	if (!peer) {
186*4882a593Smuzhiyun 		rcu_read_unlock();
187*4882a593Smuzhiyun 		rxrpc_free_skb(skb, rxrpc_skb_freed);
188*4882a593Smuzhiyun 		_leave(" [no peer]");
189*4882a593Smuzhiyun 		return;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
195*4882a593Smuzhiyun 	     serr->ee.ee_type == ICMP_DEST_UNREACH &&
196*4882a593Smuzhiyun 	     serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
197*4882a593Smuzhiyun 		rxrpc_adjust_mtu(peer, serr);
198*4882a593Smuzhiyun 		rcu_read_unlock();
199*4882a593Smuzhiyun 		rxrpc_free_skb(skb, rxrpc_skb_freed);
200*4882a593Smuzhiyun 		rxrpc_put_peer(peer);
201*4882a593Smuzhiyun 		_leave(" [MTU update]");
202*4882a593Smuzhiyun 		return;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	rxrpc_store_error(peer, serr);
206*4882a593Smuzhiyun 	rcu_read_unlock();
207*4882a593Smuzhiyun 	rxrpc_free_skb(skb, rxrpc_skb_freed);
208*4882a593Smuzhiyun 	rxrpc_put_peer(peer);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	_leave("");
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun  * Map an error report to error codes on the peer record.
215*4882a593Smuzhiyun  */
rxrpc_store_error(struct rxrpc_peer * peer,struct sock_exterr_skb * serr)216*4882a593Smuzhiyun static void rxrpc_store_error(struct rxrpc_peer *peer,
217*4882a593Smuzhiyun 			      struct sock_exterr_skb *serr)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
220*4882a593Smuzhiyun 	struct sock_extended_err *ee;
221*4882a593Smuzhiyun 	int err;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	_enter("");
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	ee = &serr->ee;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	err = ee->ee_errno;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	switch (ee->ee_origin) {
230*4882a593Smuzhiyun 	case SO_EE_ORIGIN_ICMP:
231*4882a593Smuzhiyun 		switch (ee->ee_type) {
232*4882a593Smuzhiyun 		case ICMP_DEST_UNREACH:
233*4882a593Smuzhiyun 			switch (ee->ee_code) {
234*4882a593Smuzhiyun 			case ICMP_NET_UNREACH:
235*4882a593Smuzhiyun 				_net("Rx Received ICMP Network Unreachable");
236*4882a593Smuzhiyun 				break;
237*4882a593Smuzhiyun 			case ICMP_HOST_UNREACH:
238*4882a593Smuzhiyun 				_net("Rx Received ICMP Host Unreachable");
239*4882a593Smuzhiyun 				break;
240*4882a593Smuzhiyun 			case ICMP_PORT_UNREACH:
241*4882a593Smuzhiyun 				_net("Rx Received ICMP Port Unreachable");
242*4882a593Smuzhiyun 				break;
243*4882a593Smuzhiyun 			case ICMP_NET_UNKNOWN:
244*4882a593Smuzhiyun 				_net("Rx Received ICMP Unknown Network");
245*4882a593Smuzhiyun 				break;
246*4882a593Smuzhiyun 			case ICMP_HOST_UNKNOWN:
247*4882a593Smuzhiyun 				_net("Rx Received ICMP Unknown Host");
248*4882a593Smuzhiyun 				break;
249*4882a593Smuzhiyun 			default:
250*4882a593Smuzhiyun 				_net("Rx Received ICMP DestUnreach code=%u",
251*4882a593Smuzhiyun 				     ee->ee_code);
252*4882a593Smuzhiyun 				break;
253*4882a593Smuzhiyun 			}
254*4882a593Smuzhiyun 			break;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		case ICMP_TIME_EXCEEDED:
257*4882a593Smuzhiyun 			_net("Rx Received ICMP TTL Exceeded");
258*4882a593Smuzhiyun 			break;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 		default:
261*4882a593Smuzhiyun 			_proto("Rx Received ICMP error { type=%u code=%u }",
262*4882a593Smuzhiyun 			       ee->ee_type, ee->ee_code);
263*4882a593Smuzhiyun 			break;
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 		break;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	case SO_EE_ORIGIN_NONE:
268*4882a593Smuzhiyun 	case SO_EE_ORIGIN_LOCAL:
269*4882a593Smuzhiyun 		_proto("Rx Received local error { error=%d }", err);
270*4882a593Smuzhiyun 		compl = RXRPC_CALL_LOCAL_ERROR;
271*4882a593Smuzhiyun 		break;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	case SO_EE_ORIGIN_ICMP6:
274*4882a593Smuzhiyun 		if (err == EACCES)
275*4882a593Smuzhiyun 			err = EHOSTUNREACH;
276*4882a593Smuzhiyun 		fallthrough;
277*4882a593Smuzhiyun 	default:
278*4882a593Smuzhiyun 		_proto("Rx Received error report { orig=%u }", ee->ee_origin);
279*4882a593Smuzhiyun 		break;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	rxrpc_distribute_error(peer, err, compl);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun  * Distribute an error that occurred on a peer.
287*4882a593Smuzhiyun  */
rxrpc_distribute_error(struct rxrpc_peer * peer,int error,enum rxrpc_call_completion compl)288*4882a593Smuzhiyun static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
289*4882a593Smuzhiyun 				   enum rxrpc_call_completion compl)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	struct rxrpc_call *call;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
294*4882a593Smuzhiyun 		rxrpc_see_call(call);
295*4882a593Smuzhiyun 		rxrpc_set_call_completion(call, compl, 0, -error);
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun  * Perform keep-alive pings.
301*4882a593Smuzhiyun  */
rxrpc_peer_keepalive_dispatch(struct rxrpc_net * rxnet,struct list_head * collector,time64_t base,u8 cursor)302*4882a593Smuzhiyun static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
303*4882a593Smuzhiyun 					  struct list_head *collector,
304*4882a593Smuzhiyun 					  time64_t base,
305*4882a593Smuzhiyun 					  u8 cursor)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct rxrpc_peer *peer;
308*4882a593Smuzhiyun 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
309*4882a593Smuzhiyun 	time64_t keepalive_at;
310*4882a593Smuzhiyun 	int slot;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	spin_lock_bh(&rxnet->peer_hash_lock);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	while (!list_empty(collector)) {
315*4882a593Smuzhiyun 		peer = list_entry(collector->next,
316*4882a593Smuzhiyun 				  struct rxrpc_peer, keepalive_link);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 		list_del_init(&peer->keepalive_link);
319*4882a593Smuzhiyun 		if (!rxrpc_get_peer_maybe(peer))
320*4882a593Smuzhiyun 			continue;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		if (__rxrpc_use_local(peer->local)) {
323*4882a593Smuzhiyun 			spin_unlock_bh(&rxnet->peer_hash_lock);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
326*4882a593Smuzhiyun 			slot = keepalive_at - base;
327*4882a593Smuzhiyun 			_debug("%02x peer %u t=%d {%pISp}",
328*4882a593Smuzhiyun 			       cursor, peer->debug_id, slot, &peer->srx.transport);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 			if (keepalive_at <= base ||
331*4882a593Smuzhiyun 			    keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
332*4882a593Smuzhiyun 				rxrpc_send_keepalive(peer);
333*4882a593Smuzhiyun 				slot = RXRPC_KEEPALIVE_TIME;
334*4882a593Smuzhiyun 			}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 			/* A transmission to this peer occurred since last we
337*4882a593Smuzhiyun 			 * examined it so put it into the appropriate future
338*4882a593Smuzhiyun 			 * bucket.
339*4882a593Smuzhiyun 			 */
340*4882a593Smuzhiyun 			slot += cursor;
341*4882a593Smuzhiyun 			slot &= mask;
342*4882a593Smuzhiyun 			spin_lock_bh(&rxnet->peer_hash_lock);
343*4882a593Smuzhiyun 			list_add_tail(&peer->keepalive_link,
344*4882a593Smuzhiyun 				      &rxnet->peer_keepalive[slot & mask]);
345*4882a593Smuzhiyun 			rxrpc_unuse_local(peer->local);
346*4882a593Smuzhiyun 		}
347*4882a593Smuzhiyun 		rxrpc_put_peer_locked(peer);
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	spin_unlock_bh(&rxnet->peer_hash_lock);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /*
354*4882a593Smuzhiyun  * Perform keep-alive pings with VERSION packets to keep any NAT alive.
355*4882a593Smuzhiyun  */
rxrpc_peer_keepalive_worker(struct work_struct * work)356*4882a593Smuzhiyun void rxrpc_peer_keepalive_worker(struct work_struct *work)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct rxrpc_net *rxnet =
359*4882a593Smuzhiyun 		container_of(work, struct rxrpc_net, peer_keepalive_work);
360*4882a593Smuzhiyun 	const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
361*4882a593Smuzhiyun 	time64_t base, now, delay;
362*4882a593Smuzhiyun 	u8 cursor, stop;
363*4882a593Smuzhiyun 	LIST_HEAD(collector);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	now = ktime_get_seconds();
366*4882a593Smuzhiyun 	base = rxnet->peer_keepalive_base;
367*4882a593Smuzhiyun 	cursor = rxnet->peer_keepalive_cursor;
368*4882a593Smuzhiyun 	_enter("%lld,%u", base - now, cursor);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	if (!rxnet->live)
371*4882a593Smuzhiyun 		return;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* Remove to a temporary list all the peers that are currently lodged
374*4882a593Smuzhiyun 	 * in expired buckets plus all new peers.
375*4882a593Smuzhiyun 	 *
376*4882a593Smuzhiyun 	 * Everything in the bucket at the cursor is processed this
377*4882a593Smuzhiyun 	 * second; the bucket at cursor + 1 goes at now + 1s and so
378*4882a593Smuzhiyun 	 * on...
379*4882a593Smuzhiyun 	 */
380*4882a593Smuzhiyun 	spin_lock_bh(&rxnet->peer_hash_lock);
381*4882a593Smuzhiyun 	list_splice_init(&rxnet->peer_keepalive_new, &collector);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
384*4882a593Smuzhiyun 	while (base <= now && (s8)(cursor - stop) < 0) {
385*4882a593Smuzhiyun 		list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
386*4882a593Smuzhiyun 				      &collector);
387*4882a593Smuzhiyun 		base++;
388*4882a593Smuzhiyun 		cursor++;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	base = now;
392*4882a593Smuzhiyun 	spin_unlock_bh(&rxnet->peer_hash_lock);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	rxnet->peer_keepalive_base = base;
395*4882a593Smuzhiyun 	rxnet->peer_keepalive_cursor = cursor;
396*4882a593Smuzhiyun 	rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
397*4882a593Smuzhiyun 	ASSERT(list_empty(&collector));
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	/* Schedule the timer for the next occupied timeslot. */
400*4882a593Smuzhiyun 	cursor = rxnet->peer_keepalive_cursor;
401*4882a593Smuzhiyun 	stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
402*4882a593Smuzhiyun 	for (; (s8)(cursor - stop) < 0; cursor++) {
403*4882a593Smuzhiyun 		if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
404*4882a593Smuzhiyun 			break;
405*4882a593Smuzhiyun 		base++;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	now = ktime_get_seconds();
409*4882a593Smuzhiyun 	delay = base - now;
410*4882a593Smuzhiyun 	if (delay < 1)
411*4882a593Smuzhiyun 		delay = 1;
412*4882a593Smuzhiyun 	delay *= HZ;
413*4882a593Smuzhiyun 	if (rxnet->live)
414*4882a593Smuzhiyun 		timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	_leave("");
417*4882a593Smuzhiyun }
418