xref: /OK3568_Linux_fs/kernel/net/batman-adv/tp_meter.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (C) 2012-2020  B.A.T.M.A.N. contributors:
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Edo Monticelli, Antonio Quartulli
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include "tp_meter.h"
8*4882a593Smuzhiyun #include "main.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/atomic.h>
11*4882a593Smuzhiyun #include <linux/build_bug.h>
12*4882a593Smuzhiyun #include <linux/byteorder/generic.h>
13*4882a593Smuzhiyun #include <linux/cache.h>
14*4882a593Smuzhiyun #include <linux/compiler.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/etherdevice.h>
17*4882a593Smuzhiyun #include <linux/gfp.h>
18*4882a593Smuzhiyun #include <linux/if_ether.h>
19*4882a593Smuzhiyun #include <linux/init.h>
20*4882a593Smuzhiyun #include <linux/jiffies.h>
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/kref.h>
23*4882a593Smuzhiyun #include <linux/kthread.h>
24*4882a593Smuzhiyun #include <linux/limits.h>
25*4882a593Smuzhiyun #include <linux/list.h>
26*4882a593Smuzhiyun #include <linux/netdevice.h>
27*4882a593Smuzhiyun #include <linux/param.h>
28*4882a593Smuzhiyun #include <linux/printk.h>
29*4882a593Smuzhiyun #include <linux/random.h>
30*4882a593Smuzhiyun #include <linux/rculist.h>
31*4882a593Smuzhiyun #include <linux/rcupdate.h>
32*4882a593Smuzhiyun #include <linux/sched.h>
33*4882a593Smuzhiyun #include <linux/skbuff.h>
34*4882a593Smuzhiyun #include <linux/slab.h>
35*4882a593Smuzhiyun #include <linux/spinlock.h>
36*4882a593Smuzhiyun #include <linux/stddef.h>
37*4882a593Smuzhiyun #include <linux/string.h>
38*4882a593Smuzhiyun #include <linux/timer.h>
39*4882a593Smuzhiyun #include <linux/wait.h>
40*4882a593Smuzhiyun #include <linux/workqueue.h>
41*4882a593Smuzhiyun #include <uapi/linux/batadv_packet.h>
42*4882a593Smuzhiyun #include <uapi/linux/batman_adv.h>
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #include "hard-interface.h"
45*4882a593Smuzhiyun #include "log.h"
46*4882a593Smuzhiyun #include "netlink.h"
47*4882a593Smuzhiyun #include "originator.h"
48*4882a593Smuzhiyun #include "send.h"
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun  * BATADV_TP_DEF_TEST_LENGTH - Default test length if not specified by the user
52*4882a593Smuzhiyun  *  in milliseconds
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun #define BATADV_TP_DEF_TEST_LENGTH 10000
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * BATADV_TP_AWND - Advertised window by the receiver (in bytes)
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun #define BATADV_TP_AWND 0x20000000
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun  * BATADV_TP_RECV_TIMEOUT - Receiver activity timeout. If the receiver does not
63*4882a593Smuzhiyun  *  get anything for such amount of milliseconds, the connection is killed
64*4882a593Smuzhiyun  */
65*4882a593Smuzhiyun #define BATADV_TP_RECV_TIMEOUT 1000
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun  * BATADV_TP_MAX_RTO - Maximum sender timeout. If the sender RTO gets beyond
69*4882a593Smuzhiyun  * such amount of milliseconds, the receiver is considered unreachable and the
70*4882a593Smuzhiyun  * connection is killed
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun #define BATADV_TP_MAX_RTO 30000
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * BATADV_TP_FIRST_SEQ - First seqno of each session. The number is rather high
76*4882a593Smuzhiyun  *  in order to immediately trigger a wrap around (test purposes)
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun #define BATADV_TP_FIRST_SEQ ((u32)-1 - 2000)
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /**
81*4882a593Smuzhiyun  * BATADV_TP_PLEN - length of the payload (data after the batadv_unicast header)
82*4882a593Smuzhiyun  *  to simulate
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun #define BATADV_TP_PLEN (BATADV_TP_PACKET_LEN - ETH_HLEN - \
85*4882a593Smuzhiyun 			sizeof(struct batadv_unicast_packet))
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun static u8 batadv_tp_prerandom[4096] __read_mostly;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun  * batadv_tp_session_cookie() - generate session cookie based on session ids
91*4882a593Smuzhiyun  * @session: TP session identifier
92*4882a593Smuzhiyun  * @icmp_uid: icmp pseudo uid of the tp session
93*4882a593Smuzhiyun  *
94*4882a593Smuzhiyun  * Return: 32 bit tp_meter session cookie
95*4882a593Smuzhiyun  */
batadv_tp_session_cookie(const u8 session[2],u8 icmp_uid)96*4882a593Smuzhiyun static u32 batadv_tp_session_cookie(const u8 session[2], u8 icmp_uid)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	u32 cookie;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	cookie = icmp_uid << 16;
101*4882a593Smuzhiyun 	cookie |= session[0] << 8;
102*4882a593Smuzhiyun 	cookie |= session[1];
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return cookie;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun  * batadv_tp_cwnd() - compute the new cwnd size
109*4882a593Smuzhiyun  * @base: base cwnd size value
110*4882a593Smuzhiyun  * @increment: the value to add to base to get the new size
111*4882a593Smuzhiyun  * @min: minimum cwnd value (usually MSS)
112*4882a593Smuzhiyun  *
113*4882a593Smuzhiyun  * Return the new cwnd size and ensure it does not exceed the Advertised
114*4882a593Smuzhiyun  * Receiver Window size. It is wrapped around safely.
115*4882a593Smuzhiyun  * For details refer to Section 3.1 of RFC5681
116*4882a593Smuzhiyun  *
117*4882a593Smuzhiyun  * Return: new congestion window size in bytes
118*4882a593Smuzhiyun  */
batadv_tp_cwnd(u32 base,u32 increment,u32 min)119*4882a593Smuzhiyun static u32 batadv_tp_cwnd(u32 base, u32 increment, u32 min)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u32 new_size = base + increment;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	/* check for wrap-around */
124*4882a593Smuzhiyun 	if (new_size < base)
125*4882a593Smuzhiyun 		new_size = (u32)ULONG_MAX;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	new_size = min_t(u32, new_size, BATADV_TP_AWND);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	return max_t(u32, new_size, min);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /**
133*4882a593Smuzhiyun  * batadv_tp_updated_cwnd() - update the Congestion Windows
134*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
135*4882a593Smuzhiyun  * @mss: maximum segment size of transmission
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * 1) if the session is in Slow Start, the CWND has to be increased by 1
138*4882a593Smuzhiyun  * MSS every unique received ACK
139*4882a593Smuzhiyun  * 2) if the session is in Congestion Avoidance, the CWND has to be
140*4882a593Smuzhiyun  * increased by MSS * MSS / CWND for every unique received ACK
141*4882a593Smuzhiyun  */
batadv_tp_update_cwnd(struct batadv_tp_vars * tp_vars,u32 mss)142*4882a593Smuzhiyun static void batadv_tp_update_cwnd(struct batadv_tp_vars *tp_vars, u32 mss)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->cwnd_lock);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* slow start... */
147*4882a593Smuzhiyun 	if (tp_vars->cwnd <= tp_vars->ss_threshold) {
148*4882a593Smuzhiyun 		tp_vars->dec_cwnd = 0;
149*4882a593Smuzhiyun 		tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
150*4882a593Smuzhiyun 		spin_unlock_bh(&tp_vars->cwnd_lock);
151*4882a593Smuzhiyun 		return;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* increment CWND at least of 1 (section 3.1 of RFC5681) */
155*4882a593Smuzhiyun 	tp_vars->dec_cwnd += max_t(u32, 1U << 3,
156*4882a593Smuzhiyun 				   ((mss * mss) << 6) / (tp_vars->cwnd << 3));
157*4882a593Smuzhiyun 	if (tp_vars->dec_cwnd < (mss << 3)) {
158*4882a593Smuzhiyun 		spin_unlock_bh(&tp_vars->cwnd_lock);
159*4882a593Smuzhiyun 		return;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss);
163*4882a593Smuzhiyun 	tp_vars->dec_cwnd = 0;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->cwnd_lock);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun  * batadv_tp_update_rto() - calculate new retransmission timeout
170*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
171*4882a593Smuzhiyun  * @new_rtt: new roundtrip time in msec
172*4882a593Smuzhiyun  */
batadv_tp_update_rto(struct batadv_tp_vars * tp_vars,u32 new_rtt)173*4882a593Smuzhiyun static void batadv_tp_update_rto(struct batadv_tp_vars *tp_vars,
174*4882a593Smuzhiyun 				 u32 new_rtt)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	long m = new_rtt;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* RTT update
179*4882a593Smuzhiyun 	 * Details in Section 2.2 and 2.3 of RFC6298
180*4882a593Smuzhiyun 	 *
181*4882a593Smuzhiyun 	 * It's tricky to understand. Don't lose hair please.
182*4882a593Smuzhiyun 	 * Inspired by tcp_rtt_estimator() tcp_input.c
183*4882a593Smuzhiyun 	 */
184*4882a593Smuzhiyun 	if (tp_vars->srtt != 0) {
185*4882a593Smuzhiyun 		m -= (tp_vars->srtt >> 3); /* m is now error in rtt est */
186*4882a593Smuzhiyun 		tp_vars->srtt += m; /* rtt = 7/8 srtt + 1/8 new */
187*4882a593Smuzhiyun 		if (m < 0)
188*4882a593Smuzhiyun 			m = -m;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		m -= (tp_vars->rttvar >> 2);
191*4882a593Smuzhiyun 		tp_vars->rttvar += m; /* mdev ~= 3/4 rttvar + 1/4 new */
192*4882a593Smuzhiyun 	} else {
193*4882a593Smuzhiyun 		/* first measure getting in */
194*4882a593Smuzhiyun 		tp_vars->srtt = m << 3;	/* take the measured time to be srtt */
195*4882a593Smuzhiyun 		tp_vars->rttvar = m << 1; /* new_rtt / 2 */
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* rto = srtt + 4 * rttvar.
199*4882a593Smuzhiyun 	 * rttvar is scaled by 4, therefore doesn't need to be multiplied
200*4882a593Smuzhiyun 	 */
201*4882a593Smuzhiyun 	tp_vars->rto = (tp_vars->srtt >> 3) + tp_vars->rttvar;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /**
205*4882a593Smuzhiyun  * batadv_tp_batctl_notify() - send client status result to client
206*4882a593Smuzhiyun  * @reason: reason for tp meter session stop
207*4882a593Smuzhiyun  * @dst: destination of tp_meter session
208*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
209*4882a593Smuzhiyun  * @start_time: start of transmission in jiffies
210*4882a593Smuzhiyun  * @total_sent: bytes acked to the receiver
211*4882a593Smuzhiyun  * @cookie: cookie of tp_meter session
212*4882a593Smuzhiyun  */
batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,const u8 * dst,struct batadv_priv * bat_priv,unsigned long start_time,u64 total_sent,u32 cookie)213*4882a593Smuzhiyun static void batadv_tp_batctl_notify(enum batadv_tp_meter_reason reason,
214*4882a593Smuzhiyun 				    const u8 *dst, struct batadv_priv *bat_priv,
215*4882a593Smuzhiyun 				    unsigned long start_time, u64 total_sent,
216*4882a593Smuzhiyun 				    u32 cookie)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	u32 test_time;
219*4882a593Smuzhiyun 	u8 result;
220*4882a593Smuzhiyun 	u32 total_bytes;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (!batadv_tp_is_error(reason)) {
223*4882a593Smuzhiyun 		result = BATADV_TP_REASON_COMPLETE;
224*4882a593Smuzhiyun 		test_time = jiffies_to_msecs(jiffies - start_time);
225*4882a593Smuzhiyun 		total_bytes = total_sent;
226*4882a593Smuzhiyun 	} else {
227*4882a593Smuzhiyun 		result = reason;
228*4882a593Smuzhiyun 		test_time = 0;
229*4882a593Smuzhiyun 		total_bytes = 0;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	batadv_netlink_tpmeter_notify(bat_priv, dst, result, test_time,
233*4882a593Smuzhiyun 				      total_bytes, cookie);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /**
237*4882a593Smuzhiyun  * batadv_tp_batctl_error_notify() - send client error result to client
238*4882a593Smuzhiyun  * @reason: reason for tp meter session stop
239*4882a593Smuzhiyun  * @dst: destination of tp_meter session
240*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
241*4882a593Smuzhiyun  * @cookie: cookie of tp_meter session
242*4882a593Smuzhiyun  */
batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,const u8 * dst,struct batadv_priv * bat_priv,u32 cookie)243*4882a593Smuzhiyun static void batadv_tp_batctl_error_notify(enum batadv_tp_meter_reason reason,
244*4882a593Smuzhiyun 					  const u8 *dst,
245*4882a593Smuzhiyun 					  struct batadv_priv *bat_priv,
246*4882a593Smuzhiyun 					  u32 cookie)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	batadv_tp_batctl_notify(reason, dst, bat_priv, 0, 0, cookie);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /**
252*4882a593Smuzhiyun  * batadv_tp_list_find() - find a tp_vars object in the global list
253*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
254*4882a593Smuzhiyun  * @dst: the other endpoint MAC address to look for
255*4882a593Smuzhiyun  *
256*4882a593Smuzhiyun  * Look for a tp_vars object matching dst as end_point and return it after
257*4882a593Smuzhiyun  * having increment the refcounter. Return NULL is not found
258*4882a593Smuzhiyun  *
259*4882a593Smuzhiyun  * Return: matching tp_vars or NULL when no tp_vars with @dst was found
260*4882a593Smuzhiyun  */
batadv_tp_list_find(struct batadv_priv * bat_priv,const u8 * dst)261*4882a593Smuzhiyun static struct batadv_tp_vars *batadv_tp_list_find(struct batadv_priv *bat_priv,
262*4882a593Smuzhiyun 						  const u8 *dst)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	struct batadv_tp_vars *pos, *tp_vars = NULL;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	rcu_read_lock();
267*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
268*4882a593Smuzhiyun 		if (!batadv_compare_eth(pos->other_end, dst))
269*4882a593Smuzhiyun 			continue;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 		/* most of the time this function is invoked during the normal
272*4882a593Smuzhiyun 		 * process..it makes sens to pay more when the session is
273*4882a593Smuzhiyun 		 * finished and to speed the process up during the measurement
274*4882a593Smuzhiyun 		 */
275*4882a593Smuzhiyun 		if (unlikely(!kref_get_unless_zero(&pos->refcount)))
276*4882a593Smuzhiyun 			continue;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		tp_vars = pos;
279*4882a593Smuzhiyun 		break;
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	rcu_read_unlock();
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	return tp_vars;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun  * batadv_tp_list_find_session() - find tp_vars session object in the global
288*4882a593Smuzhiyun  *  list
289*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
290*4882a593Smuzhiyun  * @dst: the other endpoint MAC address to look for
291*4882a593Smuzhiyun  * @session: session identifier
292*4882a593Smuzhiyun  *
293*4882a593Smuzhiyun  * Look for a tp_vars object matching dst as end_point, session as tp meter
294*4882a593Smuzhiyun  * session and return it after having increment the refcounter. Return NULL
295*4882a593Smuzhiyun  * is not found
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * Return: matching tp_vars or NULL when no tp_vars was found
298*4882a593Smuzhiyun  */
299*4882a593Smuzhiyun static struct batadv_tp_vars *
batadv_tp_list_find_session(struct batadv_priv * bat_priv,const u8 * dst,const u8 * session)300*4882a593Smuzhiyun batadv_tp_list_find_session(struct batadv_priv *bat_priv, const u8 *dst,
301*4882a593Smuzhiyun 			    const u8 *session)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct batadv_tp_vars *pos, *tp_vars = NULL;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	rcu_read_lock();
306*4882a593Smuzhiyun 	hlist_for_each_entry_rcu(pos, &bat_priv->tp_list, list) {
307*4882a593Smuzhiyun 		if (!batadv_compare_eth(pos->other_end, dst))
308*4882a593Smuzhiyun 			continue;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 		if (memcmp(pos->session, session, sizeof(pos->session)) != 0)
311*4882a593Smuzhiyun 			continue;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		/* most of the time this function is invoked during the normal
314*4882a593Smuzhiyun 		 * process..it makes sense to pay more when the session is
315*4882a593Smuzhiyun 		 * finished and to speed the process up during the measurement
316*4882a593Smuzhiyun 		 */
317*4882a593Smuzhiyun 		if (unlikely(!kref_get_unless_zero(&pos->refcount)))
318*4882a593Smuzhiyun 			continue;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 		tp_vars = pos;
321*4882a593Smuzhiyun 		break;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 	rcu_read_unlock();
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return tp_vars;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /**
329*4882a593Smuzhiyun  * batadv_tp_vars_release() - release batadv_tp_vars from lists and queue for
330*4882a593Smuzhiyun  *  free after rcu grace period
331*4882a593Smuzhiyun  * @ref: kref pointer of the batadv_tp_vars
332*4882a593Smuzhiyun  */
batadv_tp_vars_release(struct kref * ref)333*4882a593Smuzhiyun static void batadv_tp_vars_release(struct kref *ref)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
336*4882a593Smuzhiyun 	struct batadv_tp_unacked *un, *safe;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	tp_vars = container_of(ref, struct batadv_tp_vars, refcount);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	/* lock should not be needed because this object is now out of any
341*4882a593Smuzhiyun 	 * context!
342*4882a593Smuzhiyun 	 */
343*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->unacked_lock);
344*4882a593Smuzhiyun 	list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
345*4882a593Smuzhiyun 		list_del(&un->list);
346*4882a593Smuzhiyun 		kfree(un);
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->unacked_lock);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	kfree_rcu(tp_vars, rcu);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun /**
354*4882a593Smuzhiyun  * batadv_tp_vars_put() - decrement the batadv_tp_vars refcounter and possibly
355*4882a593Smuzhiyun  *  release it
356*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session to be free'd
357*4882a593Smuzhiyun  */
batadv_tp_vars_put(struct batadv_tp_vars * tp_vars)358*4882a593Smuzhiyun static void batadv_tp_vars_put(struct batadv_tp_vars *tp_vars)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	if (!tp_vars)
361*4882a593Smuzhiyun 		return;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	kref_put(&tp_vars->refcount, batadv_tp_vars_release);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun /**
367*4882a593Smuzhiyun  * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
368*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
369*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session to cleanup
370*4882a593Smuzhiyun  */
batadv_tp_sender_cleanup(struct batadv_priv * bat_priv,struct batadv_tp_vars * tp_vars)371*4882a593Smuzhiyun static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
372*4882a593Smuzhiyun 				     struct batadv_tp_vars *tp_vars)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	cancel_delayed_work(&tp_vars->finish_work);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
377*4882a593Smuzhiyun 	hlist_del_rcu(&tp_vars->list);
378*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	/* drop list reference */
381*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	atomic_dec(&tp_vars->bat_priv->tp_num);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* kill the timer and remove its reference */
386*4882a593Smuzhiyun 	del_timer_sync(&tp_vars->timer);
387*4882a593Smuzhiyun 	/* the worker might have rearmed itself therefore we kill it again. Note
388*4882a593Smuzhiyun 	 * that if the worker should run again before invoking the following
389*4882a593Smuzhiyun 	 * del_timer(), it would not re-arm itself once again because the status
390*4882a593Smuzhiyun 	 * is OFF now
391*4882a593Smuzhiyun 	 */
392*4882a593Smuzhiyun 	del_timer(&tp_vars->timer);
393*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun  * batadv_tp_sender_end() - print info about ended session and inform client
398*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
399*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
400*4882a593Smuzhiyun  */
batadv_tp_sender_end(struct batadv_priv * bat_priv,struct batadv_tp_vars * tp_vars)401*4882a593Smuzhiyun static void batadv_tp_sender_end(struct batadv_priv *bat_priv,
402*4882a593Smuzhiyun 				 struct batadv_tp_vars *tp_vars)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	u32 session_cookie;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
407*4882a593Smuzhiyun 		   "Test towards %pM finished..shutting down (reason=%d)\n",
408*4882a593Smuzhiyun 		   tp_vars->other_end, tp_vars->reason);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
411*4882a593Smuzhiyun 		   "Last timing stats: SRTT=%ums RTTVAR=%ums RTO=%ums\n",
412*4882a593Smuzhiyun 		   tp_vars->srtt >> 3, tp_vars->rttvar >> 2, tp_vars->rto);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
415*4882a593Smuzhiyun 		   "Final values: cwnd=%u ss_threshold=%u\n",
416*4882a593Smuzhiyun 		   tp_vars->cwnd, tp_vars->ss_threshold);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	session_cookie = batadv_tp_session_cookie(tp_vars->session,
419*4882a593Smuzhiyun 						  tp_vars->icmp_uid);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	batadv_tp_batctl_notify(tp_vars->reason,
422*4882a593Smuzhiyun 				tp_vars->other_end,
423*4882a593Smuzhiyun 				bat_priv,
424*4882a593Smuzhiyun 				tp_vars->start_time,
425*4882a593Smuzhiyun 				atomic64_read(&tp_vars->tot_sent),
426*4882a593Smuzhiyun 				session_cookie);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun /**
430*4882a593Smuzhiyun  * batadv_tp_sender_shutdown() - let sender thread/timer stop gracefully
431*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
432*4882a593Smuzhiyun  * @reason: reason for tp meter session stop
433*4882a593Smuzhiyun  */
batadv_tp_sender_shutdown(struct batadv_tp_vars * tp_vars,enum batadv_tp_meter_reason reason)434*4882a593Smuzhiyun static void batadv_tp_sender_shutdown(struct batadv_tp_vars *tp_vars,
435*4882a593Smuzhiyun 				      enum batadv_tp_meter_reason reason)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&tp_vars->sending))
438*4882a593Smuzhiyun 		return;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	tp_vars->reason = reason;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun /**
444*4882a593Smuzhiyun  * batadv_tp_sender_finish() - stop sender session after test_length was reached
445*4882a593Smuzhiyun  * @work: delayed work reference of the related tp_vars
446*4882a593Smuzhiyun  */
batadv_tp_sender_finish(struct work_struct * work)447*4882a593Smuzhiyun static void batadv_tp_sender_finish(struct work_struct *work)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct delayed_work *delayed_work;
450*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	delayed_work = to_delayed_work(work);
453*4882a593Smuzhiyun 	tp_vars = container_of(delayed_work, struct batadv_tp_vars,
454*4882a593Smuzhiyun 			       finish_work);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	batadv_tp_sender_shutdown(tp_vars, BATADV_TP_REASON_COMPLETE);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /**
460*4882a593Smuzhiyun  * batadv_tp_reset_sender_timer() - reschedule the sender timer
461*4882a593Smuzhiyun  * @tp_vars: the private TP meter data for this session
462*4882a593Smuzhiyun  *
463*4882a593Smuzhiyun  * Reschedule the timer using tp_vars->rto as delay
464*4882a593Smuzhiyun  */
batadv_tp_reset_sender_timer(struct batadv_tp_vars * tp_vars)465*4882a593Smuzhiyun static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	/* most of the time this function is invoked while normal packet
468*4882a593Smuzhiyun 	 * reception...
469*4882a593Smuzhiyun 	 */
470*4882a593Smuzhiyun 	if (unlikely(atomic_read(&tp_vars->sending) == 0))
471*4882a593Smuzhiyun 		/* timer ref will be dropped in batadv_tp_sender_cleanup */
472*4882a593Smuzhiyun 		return;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	mod_timer(&tp_vars->timer, jiffies + msecs_to_jiffies(tp_vars->rto));
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun  * batadv_tp_sender_timeout() - timer that fires in case of packet loss
479*4882a593Smuzhiyun  * @t: address to timer_list inside tp_vars
480*4882a593Smuzhiyun  *
481*4882a593Smuzhiyun  * If fired it means that there was packet loss.
482*4882a593Smuzhiyun  * Switch to Slow Start, set the ss_threshold to half of the current cwnd and
483*4882a593Smuzhiyun  * reset the cwnd to 3*MSS
484*4882a593Smuzhiyun  */
batadv_tp_sender_timeout(struct timer_list * t)485*4882a593Smuzhiyun static void batadv_tp_sender_timeout(struct timer_list *t)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
488*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = tp_vars->bat_priv;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (atomic_read(&tp_vars->sending) == 0)
491*4882a593Smuzhiyun 		return;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* if the user waited long enough...shutdown the test */
494*4882a593Smuzhiyun 	if (unlikely(tp_vars->rto >= BATADV_TP_MAX_RTO)) {
495*4882a593Smuzhiyun 		batadv_tp_sender_shutdown(tp_vars,
496*4882a593Smuzhiyun 					  BATADV_TP_REASON_DST_UNREACHABLE);
497*4882a593Smuzhiyun 		return;
498*4882a593Smuzhiyun 	}
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* RTO exponential backoff
501*4882a593Smuzhiyun 	 * Details in Section 5.5 of RFC6298
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 	tp_vars->rto <<= 1;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->cwnd_lock);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	tp_vars->ss_threshold = tp_vars->cwnd >> 1;
508*4882a593Smuzhiyun 	if (tp_vars->ss_threshold < BATADV_TP_PLEN * 2)
509*4882a593Smuzhiyun 		tp_vars->ss_threshold = BATADV_TP_PLEN * 2;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
512*4882a593Smuzhiyun 		   "Meter: RTO fired during test towards %pM! cwnd=%u new ss_thr=%u, resetting last_sent to %u\n",
513*4882a593Smuzhiyun 		   tp_vars->other_end, tp_vars->cwnd, tp_vars->ss_threshold,
514*4882a593Smuzhiyun 		   atomic_read(&tp_vars->last_acked));
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	tp_vars->cwnd = BATADV_TP_PLEN * 3;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->cwnd_lock);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* resend the non-ACKed packets.. */
521*4882a593Smuzhiyun 	tp_vars->last_sent = atomic_read(&tp_vars->last_acked);
522*4882a593Smuzhiyun 	wake_up(&tp_vars->more_bytes);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	batadv_tp_reset_sender_timer(tp_vars);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun  * batadv_tp_fill_prerandom() - Fill buffer with prefetched random bytes
529*4882a593Smuzhiyun  * @tp_vars: the private TP meter data for this session
530*4882a593Smuzhiyun  * @buf: Buffer to fill with bytes
531*4882a593Smuzhiyun  * @nbytes: amount of pseudorandom bytes
532*4882a593Smuzhiyun  */
batadv_tp_fill_prerandom(struct batadv_tp_vars * tp_vars,u8 * buf,size_t nbytes)533*4882a593Smuzhiyun static void batadv_tp_fill_prerandom(struct batadv_tp_vars *tp_vars,
534*4882a593Smuzhiyun 				     u8 *buf, size_t nbytes)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	u32 local_offset;
537*4882a593Smuzhiyun 	size_t bytes_inbuf;
538*4882a593Smuzhiyun 	size_t to_copy;
539*4882a593Smuzhiyun 	size_t pos = 0;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->prerandom_lock);
542*4882a593Smuzhiyun 	local_offset = tp_vars->prerandom_offset;
543*4882a593Smuzhiyun 	tp_vars->prerandom_offset += nbytes;
544*4882a593Smuzhiyun 	tp_vars->prerandom_offset %= sizeof(batadv_tp_prerandom);
545*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->prerandom_lock);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	while (nbytes) {
548*4882a593Smuzhiyun 		local_offset %= sizeof(batadv_tp_prerandom);
549*4882a593Smuzhiyun 		bytes_inbuf = sizeof(batadv_tp_prerandom) - local_offset;
550*4882a593Smuzhiyun 		to_copy = min(nbytes, bytes_inbuf);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		memcpy(&buf[pos], &batadv_tp_prerandom[local_offset], to_copy);
553*4882a593Smuzhiyun 		pos += to_copy;
554*4882a593Smuzhiyun 		nbytes -= to_copy;
555*4882a593Smuzhiyun 		local_offset = 0;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun /**
560*4882a593Smuzhiyun  * batadv_tp_send_msg() - send a single message
561*4882a593Smuzhiyun  * @tp_vars: the private TP meter data for this session
562*4882a593Smuzhiyun  * @src: source mac address
563*4882a593Smuzhiyun  * @orig_node: the originator of the destination
564*4882a593Smuzhiyun  * @seqno: sequence number of this packet
565*4882a593Smuzhiyun  * @len: length of the entire packet
566*4882a593Smuzhiyun  * @session: session identifier
567*4882a593Smuzhiyun  * @uid: local ICMP "socket" index
568*4882a593Smuzhiyun  * @timestamp: timestamp in jiffies which is replied in ack
569*4882a593Smuzhiyun  *
570*4882a593Smuzhiyun  * Create and send a single TP Meter message.
571*4882a593Smuzhiyun  *
572*4882a593Smuzhiyun  * Return: 0 on success, BATADV_TP_REASON_DST_UNREACHABLE if the destination is
573*4882a593Smuzhiyun  * not reachable, BATADV_TP_REASON_MEMORY_ERROR if the packet couldn't be
574*4882a593Smuzhiyun  * allocated
575*4882a593Smuzhiyun  */
batadv_tp_send_msg(struct batadv_tp_vars * tp_vars,const u8 * src,struct batadv_orig_node * orig_node,u32 seqno,size_t len,const u8 * session,int uid,u32 timestamp)576*4882a593Smuzhiyun static int batadv_tp_send_msg(struct batadv_tp_vars *tp_vars, const u8 *src,
577*4882a593Smuzhiyun 			      struct batadv_orig_node *orig_node,
578*4882a593Smuzhiyun 			      u32 seqno, size_t len, const u8 *session,
579*4882a593Smuzhiyun 			      int uid, u32 timestamp)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct batadv_icmp_tp_packet *icmp;
582*4882a593Smuzhiyun 	struct sk_buff *skb;
583*4882a593Smuzhiyun 	int r;
584*4882a593Smuzhiyun 	u8 *data;
585*4882a593Smuzhiyun 	size_t data_len;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	skb = netdev_alloc_skb_ip_align(NULL, len + ETH_HLEN);
588*4882a593Smuzhiyun 	if (unlikely(!skb))
589*4882a593Smuzhiyun 		return BATADV_TP_REASON_MEMORY_ERROR;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	skb_reserve(skb, ETH_HLEN);
592*4882a593Smuzhiyun 	icmp = skb_put(skb, sizeof(*icmp));
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	/* fill the icmp header */
595*4882a593Smuzhiyun 	ether_addr_copy(icmp->dst, orig_node->orig);
596*4882a593Smuzhiyun 	ether_addr_copy(icmp->orig, src);
597*4882a593Smuzhiyun 	icmp->version = BATADV_COMPAT_VERSION;
598*4882a593Smuzhiyun 	icmp->packet_type = BATADV_ICMP;
599*4882a593Smuzhiyun 	icmp->ttl = BATADV_TTL;
600*4882a593Smuzhiyun 	icmp->msg_type = BATADV_TP;
601*4882a593Smuzhiyun 	icmp->uid = uid;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	icmp->subtype = BATADV_TP_MSG;
604*4882a593Smuzhiyun 	memcpy(icmp->session, session, sizeof(icmp->session));
605*4882a593Smuzhiyun 	icmp->seqno = htonl(seqno);
606*4882a593Smuzhiyun 	icmp->timestamp = htonl(timestamp);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	data_len = len - sizeof(*icmp);
609*4882a593Smuzhiyun 	data = skb_put(skb, data_len);
610*4882a593Smuzhiyun 	batadv_tp_fill_prerandom(tp_vars, data, data_len);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	r = batadv_send_skb_to_orig(skb, orig_node, NULL);
613*4882a593Smuzhiyun 	if (r == NET_XMIT_SUCCESS)
614*4882a593Smuzhiyun 		return 0;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	return BATADV_TP_REASON_CANT_SEND;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun /**
620*4882a593Smuzhiyun  * batadv_tp_recv_ack() - ACK receiving function
621*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
622*4882a593Smuzhiyun  * @skb: the buffer containing the received packet
623*4882a593Smuzhiyun  *
624*4882a593Smuzhiyun  * Process a received TP ACK packet
625*4882a593Smuzhiyun  */
batadv_tp_recv_ack(struct batadv_priv * bat_priv,const struct sk_buff * skb)626*4882a593Smuzhiyun static void batadv_tp_recv_ack(struct batadv_priv *bat_priv,
627*4882a593Smuzhiyun 			       const struct sk_buff *skb)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	struct batadv_hard_iface *primary_if = NULL;
630*4882a593Smuzhiyun 	struct batadv_orig_node *orig_node = NULL;
631*4882a593Smuzhiyun 	const struct batadv_icmp_tp_packet *icmp;
632*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
633*4882a593Smuzhiyun 	size_t packet_len, mss;
634*4882a593Smuzhiyun 	u32 rtt, recv_ack, cwnd;
635*4882a593Smuzhiyun 	unsigned char *dev_addr;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	packet_len = BATADV_TP_PLEN;
638*4882a593Smuzhiyun 	mss = BATADV_TP_PLEN;
639*4882a593Smuzhiyun 	packet_len += sizeof(struct batadv_unicast_packet);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/* find the tp_vars */
644*4882a593Smuzhiyun 	tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
645*4882a593Smuzhiyun 					      icmp->session);
646*4882a593Smuzhiyun 	if (unlikely(!tp_vars))
647*4882a593Smuzhiyun 		return;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	if (unlikely(atomic_read(&tp_vars->sending) == 0))
650*4882a593Smuzhiyun 		goto out;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	/* old ACK? silently drop it.. */
653*4882a593Smuzhiyun 	if (batadv_seq_before(ntohl(icmp->seqno),
654*4882a593Smuzhiyun 			      (u32)atomic_read(&tp_vars->last_acked)))
655*4882a593Smuzhiyun 		goto out;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	primary_if = batadv_primary_if_get_selected(bat_priv);
658*4882a593Smuzhiyun 	if (unlikely(!primary_if))
659*4882a593Smuzhiyun 		goto out;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	orig_node = batadv_orig_hash_find(bat_priv, icmp->orig);
662*4882a593Smuzhiyun 	if (unlikely(!orig_node))
663*4882a593Smuzhiyun 		goto out;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	/* update RTO with the new sampled RTT, if any */
666*4882a593Smuzhiyun 	rtt = jiffies_to_msecs(jiffies) - ntohl(icmp->timestamp);
667*4882a593Smuzhiyun 	if (icmp->timestamp && rtt)
668*4882a593Smuzhiyun 		batadv_tp_update_rto(tp_vars, rtt);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	/* ACK for new data... reset the timer */
671*4882a593Smuzhiyun 	batadv_tp_reset_sender_timer(tp_vars);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	recv_ack = ntohl(icmp->seqno);
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	/* check if this ACK is a duplicate */
676*4882a593Smuzhiyun 	if (atomic_read(&tp_vars->last_acked) == recv_ack) {
677*4882a593Smuzhiyun 		atomic_inc(&tp_vars->dup_acks);
678*4882a593Smuzhiyun 		if (atomic_read(&tp_vars->dup_acks) != 3)
679*4882a593Smuzhiyun 			goto out;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		if (recv_ack >= tp_vars->recover)
682*4882a593Smuzhiyun 			goto out;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		/* if this is the third duplicate ACK do Fast Retransmit */
685*4882a593Smuzhiyun 		batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
686*4882a593Smuzhiyun 				   orig_node, recv_ack, packet_len,
687*4882a593Smuzhiyun 				   icmp->session, icmp->uid,
688*4882a593Smuzhiyun 				   jiffies_to_msecs(jiffies));
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 		spin_lock_bh(&tp_vars->cwnd_lock);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 		/* Fast Recovery */
693*4882a593Smuzhiyun 		tp_vars->fast_recovery = true;
694*4882a593Smuzhiyun 		/* Set recover to the last outstanding seqno when Fast Recovery
695*4882a593Smuzhiyun 		 * is entered. RFC6582, Section 3.2, step 1
696*4882a593Smuzhiyun 		 */
697*4882a593Smuzhiyun 		tp_vars->recover = tp_vars->last_sent;
698*4882a593Smuzhiyun 		tp_vars->ss_threshold = tp_vars->cwnd >> 1;
699*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
700*4882a593Smuzhiyun 			   "Meter: Fast Recovery, (cur cwnd=%u) ss_thr=%u last_sent=%u recv_ack=%u\n",
701*4882a593Smuzhiyun 			   tp_vars->cwnd, tp_vars->ss_threshold,
702*4882a593Smuzhiyun 			   tp_vars->last_sent, recv_ack);
703*4882a593Smuzhiyun 		tp_vars->cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 3 * mss,
704*4882a593Smuzhiyun 					       mss);
705*4882a593Smuzhiyun 		tp_vars->dec_cwnd = 0;
706*4882a593Smuzhiyun 		tp_vars->last_sent = recv_ack;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 		spin_unlock_bh(&tp_vars->cwnd_lock);
709*4882a593Smuzhiyun 	} else {
710*4882a593Smuzhiyun 		/* count the acked data */
711*4882a593Smuzhiyun 		atomic64_add(recv_ack - atomic_read(&tp_vars->last_acked),
712*4882a593Smuzhiyun 			     &tp_vars->tot_sent);
713*4882a593Smuzhiyun 		/* reset the duplicate ACKs counter */
714*4882a593Smuzhiyun 		atomic_set(&tp_vars->dup_acks, 0);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 		if (tp_vars->fast_recovery) {
717*4882a593Smuzhiyun 			/* partial ACK */
718*4882a593Smuzhiyun 			if (batadv_seq_before(recv_ack, tp_vars->recover)) {
719*4882a593Smuzhiyun 				/* this is another hole in the window. React
720*4882a593Smuzhiyun 				 * immediately as specified by NewReno (see
721*4882a593Smuzhiyun 				 * Section 3.2 of RFC6582 for details)
722*4882a593Smuzhiyun 				 */
723*4882a593Smuzhiyun 				dev_addr = primary_if->net_dev->dev_addr;
724*4882a593Smuzhiyun 				batadv_tp_send_msg(tp_vars, dev_addr,
725*4882a593Smuzhiyun 						   orig_node, recv_ack,
726*4882a593Smuzhiyun 						   packet_len, icmp->session,
727*4882a593Smuzhiyun 						   icmp->uid,
728*4882a593Smuzhiyun 						   jiffies_to_msecs(jiffies));
729*4882a593Smuzhiyun 				tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd,
730*4882a593Smuzhiyun 							       mss, mss);
731*4882a593Smuzhiyun 			} else {
732*4882a593Smuzhiyun 				tp_vars->fast_recovery = false;
733*4882a593Smuzhiyun 				/* set cwnd to the value of ss_threshold at the
734*4882a593Smuzhiyun 				 * moment that Fast Recovery was entered.
735*4882a593Smuzhiyun 				 * RFC6582, Section 3.2, step 3
736*4882a593Smuzhiyun 				 */
737*4882a593Smuzhiyun 				cwnd = batadv_tp_cwnd(tp_vars->ss_threshold, 0,
738*4882a593Smuzhiyun 						      mss);
739*4882a593Smuzhiyun 				tp_vars->cwnd = cwnd;
740*4882a593Smuzhiyun 			}
741*4882a593Smuzhiyun 			goto move_twnd;
742*4882a593Smuzhiyun 		}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 		if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss)
745*4882a593Smuzhiyun 			batadv_tp_update_cwnd(tp_vars, mss);
746*4882a593Smuzhiyun move_twnd:
747*4882a593Smuzhiyun 		/* move the Transmit Window */
748*4882a593Smuzhiyun 		atomic_set(&tp_vars->last_acked, recv_ack);
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	wake_up(&tp_vars->more_bytes);
752*4882a593Smuzhiyun out:
753*4882a593Smuzhiyun 	if (likely(primary_if))
754*4882a593Smuzhiyun 		batadv_hardif_put(primary_if);
755*4882a593Smuzhiyun 	if (likely(orig_node))
756*4882a593Smuzhiyun 		batadv_orig_node_put(orig_node);
757*4882a593Smuzhiyun 	if (likely(tp_vars))
758*4882a593Smuzhiyun 		batadv_tp_vars_put(tp_vars);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /**
762*4882a593Smuzhiyun  * batadv_tp_avail() - check if congestion window is not full
763*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
764*4882a593Smuzhiyun  * @payload_len: size of the payload of a single message
765*4882a593Smuzhiyun  *
766*4882a593Smuzhiyun  * Return: true when congestion window is not full, false otherwise
767*4882a593Smuzhiyun  */
batadv_tp_avail(struct batadv_tp_vars * tp_vars,size_t payload_len)768*4882a593Smuzhiyun static bool batadv_tp_avail(struct batadv_tp_vars *tp_vars,
769*4882a593Smuzhiyun 			    size_t payload_len)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun 	u32 win_left, win_limit;
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	win_limit = atomic_read(&tp_vars->last_acked) + tp_vars->cwnd;
774*4882a593Smuzhiyun 	win_left = win_limit - tp_vars->last_sent;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	return win_left >= payload_len;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun /**
780*4882a593Smuzhiyun  * batadv_tp_wait_available() - wait until congestion window becomes free or
781*4882a593Smuzhiyun  *  timeout is reached
782*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
783*4882a593Smuzhiyun  * @plen: size of the payload of a single message
784*4882a593Smuzhiyun  *
785*4882a593Smuzhiyun  * Return: 0 if the condition evaluated to false after the timeout elapsed,
786*4882a593Smuzhiyun  *  1 if the condition evaluated to true after the timeout elapsed, the
787*4882a593Smuzhiyun  *  remaining jiffies (at least 1) if the condition evaluated to true before
788*4882a593Smuzhiyun  *  the timeout elapsed, or -ERESTARTSYS if it was interrupted by a signal.
789*4882a593Smuzhiyun  */
batadv_tp_wait_available(struct batadv_tp_vars * tp_vars,size_t plen)790*4882a593Smuzhiyun static int batadv_tp_wait_available(struct batadv_tp_vars *tp_vars, size_t plen)
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun 	int ret;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	ret = wait_event_interruptible_timeout(tp_vars->more_bytes,
795*4882a593Smuzhiyun 					       batadv_tp_avail(tp_vars, plen),
796*4882a593Smuzhiyun 					       HZ / 10);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return ret;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun /**
802*4882a593Smuzhiyun  * batadv_tp_send() - main sending thread of a tp meter session
803*4882a593Smuzhiyun  * @arg: address of the related tp_vars
804*4882a593Smuzhiyun  *
805*4882a593Smuzhiyun  * Return: nothing, this function never returns
806*4882a593Smuzhiyun  */
batadv_tp_send(void * arg)807*4882a593Smuzhiyun static int batadv_tp_send(void *arg)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars = arg;
810*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = tp_vars->bat_priv;
811*4882a593Smuzhiyun 	struct batadv_hard_iface *primary_if = NULL;
812*4882a593Smuzhiyun 	struct batadv_orig_node *orig_node = NULL;
813*4882a593Smuzhiyun 	size_t payload_len, packet_len;
814*4882a593Smuzhiyun 	int err = 0;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (unlikely(tp_vars->role != BATADV_TP_SENDER)) {
817*4882a593Smuzhiyun 		err = BATADV_TP_REASON_DST_UNREACHABLE;
818*4882a593Smuzhiyun 		tp_vars->reason = err;
819*4882a593Smuzhiyun 		goto out;
820*4882a593Smuzhiyun 	}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	orig_node = batadv_orig_hash_find(bat_priv, tp_vars->other_end);
823*4882a593Smuzhiyun 	if (unlikely(!orig_node)) {
824*4882a593Smuzhiyun 		err = BATADV_TP_REASON_DST_UNREACHABLE;
825*4882a593Smuzhiyun 		tp_vars->reason = err;
826*4882a593Smuzhiyun 		goto out;
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	primary_if = batadv_primary_if_get_selected(bat_priv);
830*4882a593Smuzhiyun 	if (unlikely(!primary_if)) {
831*4882a593Smuzhiyun 		err = BATADV_TP_REASON_DST_UNREACHABLE;
832*4882a593Smuzhiyun 		tp_vars->reason = err;
833*4882a593Smuzhiyun 		goto out;
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* assume that all the hard_interfaces have a correctly
837*4882a593Smuzhiyun 	 * configured MTU, so use the soft_iface MTU as MSS.
838*4882a593Smuzhiyun 	 * This might not be true and in that case the fragmentation
839*4882a593Smuzhiyun 	 * should be used.
840*4882a593Smuzhiyun 	 * Now, try to send the packet as it is
841*4882a593Smuzhiyun 	 */
842*4882a593Smuzhiyun 	payload_len = BATADV_TP_PLEN;
843*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct batadv_icmp_tp_packet) > BATADV_TP_PLEN);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	batadv_tp_reset_sender_timer(tp_vars);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	/* queue the worker in charge of terminating the test */
848*4882a593Smuzhiyun 	queue_delayed_work(batadv_event_workqueue, &tp_vars->finish_work,
849*4882a593Smuzhiyun 			   msecs_to_jiffies(tp_vars->test_length));
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	while (atomic_read(&tp_vars->sending) != 0) {
852*4882a593Smuzhiyun 		if (unlikely(!batadv_tp_avail(tp_vars, payload_len))) {
853*4882a593Smuzhiyun 			batadv_tp_wait_available(tp_vars, payload_len);
854*4882a593Smuzhiyun 			continue;
855*4882a593Smuzhiyun 		}
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		/* to emulate normal unicast traffic, add to the payload len
858*4882a593Smuzhiyun 		 * the size of the unicast header
859*4882a593Smuzhiyun 		 */
860*4882a593Smuzhiyun 		packet_len = payload_len + sizeof(struct batadv_unicast_packet);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 		err = batadv_tp_send_msg(tp_vars, primary_if->net_dev->dev_addr,
863*4882a593Smuzhiyun 					 orig_node, tp_vars->last_sent,
864*4882a593Smuzhiyun 					 packet_len,
865*4882a593Smuzhiyun 					 tp_vars->session, tp_vars->icmp_uid,
866*4882a593Smuzhiyun 					 jiffies_to_msecs(jiffies));
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 		/* something went wrong during the preparation/transmission */
869*4882a593Smuzhiyun 		if (unlikely(err && err != BATADV_TP_REASON_CANT_SEND)) {
870*4882a593Smuzhiyun 			batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
871*4882a593Smuzhiyun 				   "Meter: %s() cannot send packets (%d)\n",
872*4882a593Smuzhiyun 				   __func__, err);
873*4882a593Smuzhiyun 			/* ensure nobody else tries to stop the thread now */
874*4882a593Smuzhiyun 			if (atomic_dec_and_test(&tp_vars->sending))
875*4882a593Smuzhiyun 				tp_vars->reason = err;
876*4882a593Smuzhiyun 			break;
877*4882a593Smuzhiyun 		}
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 		/* right-shift the TWND */
880*4882a593Smuzhiyun 		if (!err)
881*4882a593Smuzhiyun 			tp_vars->last_sent += payload_len;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 		cond_resched();
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun out:
887*4882a593Smuzhiyun 	if (likely(primary_if))
888*4882a593Smuzhiyun 		batadv_hardif_put(primary_if);
889*4882a593Smuzhiyun 	if (likely(orig_node))
890*4882a593Smuzhiyun 		batadv_orig_node_put(orig_node);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	batadv_tp_sender_end(bat_priv, tp_vars);
893*4882a593Smuzhiyun 	batadv_tp_sender_cleanup(bat_priv, tp_vars);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	do_exit(0);
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun /**
901*4882a593Smuzhiyun  * batadv_tp_start_kthread() - start new thread which manages the tp meter
902*4882a593Smuzhiyun  *  sender
903*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
904*4882a593Smuzhiyun  */
batadv_tp_start_kthread(struct batadv_tp_vars * tp_vars)905*4882a593Smuzhiyun static void batadv_tp_start_kthread(struct batadv_tp_vars *tp_vars)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	struct task_struct *kthread;
908*4882a593Smuzhiyun 	struct batadv_priv *bat_priv = tp_vars->bat_priv;
909*4882a593Smuzhiyun 	u32 session_cookie;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	kref_get(&tp_vars->refcount);
912*4882a593Smuzhiyun 	kthread = kthread_create(batadv_tp_send, tp_vars, "kbatadv_tp_meter");
913*4882a593Smuzhiyun 	if (IS_ERR(kthread)) {
914*4882a593Smuzhiyun 		session_cookie = batadv_tp_session_cookie(tp_vars->session,
915*4882a593Smuzhiyun 							  tp_vars->icmp_uid);
916*4882a593Smuzhiyun 		pr_err("batadv: cannot create tp meter kthread\n");
917*4882a593Smuzhiyun 		batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
918*4882a593Smuzhiyun 					      tp_vars->other_end,
919*4882a593Smuzhiyun 					      bat_priv, session_cookie);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 		/* drop reserved reference for kthread */
922*4882a593Smuzhiyun 		batadv_tp_vars_put(tp_vars);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 		/* cleanup of failed tp meter variables */
925*4882a593Smuzhiyun 		batadv_tp_sender_cleanup(bat_priv, tp_vars);
926*4882a593Smuzhiyun 		return;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	wake_up_process(kthread);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun /**
933*4882a593Smuzhiyun  * batadv_tp_start() - start a new tp meter session
934*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
935*4882a593Smuzhiyun  * @dst: the receiver MAC address
936*4882a593Smuzhiyun  * @test_length: test length in milliseconds
937*4882a593Smuzhiyun  * @cookie: session cookie
938*4882a593Smuzhiyun  */
batadv_tp_start(struct batadv_priv * bat_priv,const u8 * dst,u32 test_length,u32 * cookie)939*4882a593Smuzhiyun void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
940*4882a593Smuzhiyun 		     u32 test_length, u32 *cookie)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
943*4882a593Smuzhiyun 	u8 session_id[2];
944*4882a593Smuzhiyun 	u8 icmp_uid;
945*4882a593Smuzhiyun 	u32 session_cookie;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	get_random_bytes(session_id, sizeof(session_id));
948*4882a593Smuzhiyun 	get_random_bytes(&icmp_uid, 1);
949*4882a593Smuzhiyun 	session_cookie = batadv_tp_session_cookie(session_id, icmp_uid);
950*4882a593Smuzhiyun 	*cookie = session_cookie;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/* look for an already existing test towards this node */
953*4882a593Smuzhiyun 	spin_lock_bh(&bat_priv->tp_list_lock);
954*4882a593Smuzhiyun 	tp_vars = batadv_tp_list_find(bat_priv, dst);
955*4882a593Smuzhiyun 	if (tp_vars) {
956*4882a593Smuzhiyun 		spin_unlock_bh(&bat_priv->tp_list_lock);
957*4882a593Smuzhiyun 		batadv_tp_vars_put(tp_vars);
958*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
959*4882a593Smuzhiyun 			   "Meter: test to or from the same node already ongoing, aborting\n");
960*4882a593Smuzhiyun 		batadv_tp_batctl_error_notify(BATADV_TP_REASON_ALREADY_ONGOING,
961*4882a593Smuzhiyun 					      dst, bat_priv, session_cookie);
962*4882a593Smuzhiyun 		return;
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
966*4882a593Smuzhiyun 		spin_unlock_bh(&bat_priv->tp_list_lock);
967*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
968*4882a593Smuzhiyun 			   "Meter: too many ongoing sessions, aborting (SEND)\n");
969*4882a593Smuzhiyun 		batadv_tp_batctl_error_notify(BATADV_TP_REASON_TOO_MANY, dst,
970*4882a593Smuzhiyun 					      bat_priv, session_cookie);
971*4882a593Smuzhiyun 		return;
972*4882a593Smuzhiyun 	}
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
975*4882a593Smuzhiyun 	if (!tp_vars) {
976*4882a593Smuzhiyun 		spin_unlock_bh(&bat_priv->tp_list_lock);
977*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
978*4882a593Smuzhiyun 			   "Meter: %s cannot allocate list elements\n",
979*4882a593Smuzhiyun 			   __func__);
980*4882a593Smuzhiyun 		batadv_tp_batctl_error_notify(BATADV_TP_REASON_MEMORY_ERROR,
981*4882a593Smuzhiyun 					      dst, bat_priv, session_cookie);
982*4882a593Smuzhiyun 		return;
983*4882a593Smuzhiyun 	}
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/* initialize tp_vars */
986*4882a593Smuzhiyun 	ether_addr_copy(tp_vars->other_end, dst);
987*4882a593Smuzhiyun 	kref_init(&tp_vars->refcount);
988*4882a593Smuzhiyun 	tp_vars->role = BATADV_TP_SENDER;
989*4882a593Smuzhiyun 	atomic_set(&tp_vars->sending, 1);
990*4882a593Smuzhiyun 	memcpy(tp_vars->session, session_id, sizeof(session_id));
991*4882a593Smuzhiyun 	tp_vars->icmp_uid = icmp_uid;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	tp_vars->last_sent = BATADV_TP_FIRST_SEQ;
994*4882a593Smuzhiyun 	atomic_set(&tp_vars->last_acked, BATADV_TP_FIRST_SEQ);
995*4882a593Smuzhiyun 	tp_vars->fast_recovery = false;
996*4882a593Smuzhiyun 	tp_vars->recover = BATADV_TP_FIRST_SEQ;
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	/* initialise the CWND to 3*MSS (Section 3.1 in RFC5681).
999*4882a593Smuzhiyun 	 * For batman-adv the MSS is the size of the payload received by the
1000*4882a593Smuzhiyun 	 * soft_interface, hence its MTU
1001*4882a593Smuzhiyun 	 */
1002*4882a593Smuzhiyun 	tp_vars->cwnd = BATADV_TP_PLEN * 3;
1003*4882a593Smuzhiyun 	/* at the beginning initialise the SS threshold to the biggest possible
1004*4882a593Smuzhiyun 	 * window size, hence the AWND size
1005*4882a593Smuzhiyun 	 */
1006*4882a593Smuzhiyun 	tp_vars->ss_threshold = BATADV_TP_AWND;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	/* RTO initial value is 3 seconds.
1009*4882a593Smuzhiyun 	 * Details in Section 2.1 of RFC6298
1010*4882a593Smuzhiyun 	 */
1011*4882a593Smuzhiyun 	tp_vars->rto = 1000;
1012*4882a593Smuzhiyun 	tp_vars->srtt = 0;
1013*4882a593Smuzhiyun 	tp_vars->rttvar = 0;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	atomic64_set(&tp_vars->tot_sent, 0);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	kref_get(&tp_vars->refcount);
1018*4882a593Smuzhiyun 	timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	tp_vars->bat_priv = bat_priv;
1021*4882a593Smuzhiyun 	tp_vars->start_time = jiffies;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	init_waitqueue_head(&tp_vars->more_bytes);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	spin_lock_init(&tp_vars->unacked_lock);
1026*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tp_vars->unacked_list);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	spin_lock_init(&tp_vars->cwnd_lock);
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	tp_vars->prerandom_offset = 0;
1031*4882a593Smuzhiyun 	spin_lock_init(&tp_vars->prerandom_lock);
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	kref_get(&tp_vars->refcount);
1034*4882a593Smuzhiyun 	hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
1035*4882a593Smuzhiyun 	spin_unlock_bh(&bat_priv->tp_list_lock);
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	tp_vars->test_length = test_length;
1038*4882a593Smuzhiyun 	if (!tp_vars->test_length)
1039*4882a593Smuzhiyun 		tp_vars->test_length = BATADV_TP_DEF_TEST_LENGTH;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1042*4882a593Smuzhiyun 		   "Meter: starting throughput meter towards %pM (length=%ums)\n",
1043*4882a593Smuzhiyun 		   dst, test_length);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* init work item for finished tp tests */
1046*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&tp_vars->finish_work, batadv_tp_sender_finish);
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	/* start tp kthread. This way the write() call issued from userspace can
1049*4882a593Smuzhiyun 	 * happily return and avoid to block
1050*4882a593Smuzhiyun 	 */
1051*4882a593Smuzhiyun 	batadv_tp_start_kthread(tp_vars);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	/* don't return reference to new tp_vars */
1054*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun /**
1058*4882a593Smuzhiyun  * batadv_tp_stop() - stop currently running tp meter session
1059*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
1060*4882a593Smuzhiyun  * @dst: the receiver MAC address
1061*4882a593Smuzhiyun  * @return_value: reason for tp meter session stop
1062*4882a593Smuzhiyun  */
batadv_tp_stop(struct batadv_priv * bat_priv,const u8 * dst,u8 return_value)1063*4882a593Smuzhiyun void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
1064*4882a593Smuzhiyun 		    u8 return_value)
1065*4882a593Smuzhiyun {
1066*4882a593Smuzhiyun 	struct batadv_orig_node *orig_node;
1067*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1070*4882a593Smuzhiyun 		   "Meter: stopping test towards %pM\n", dst);
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	orig_node = batadv_orig_hash_find(bat_priv, dst);
1073*4882a593Smuzhiyun 	if (!orig_node)
1074*4882a593Smuzhiyun 		return;
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	tp_vars = batadv_tp_list_find(bat_priv, orig_node->orig);
1077*4882a593Smuzhiyun 	if (!tp_vars) {
1078*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1079*4882a593Smuzhiyun 			   "Meter: trying to interrupt an already over connection\n");
1080*4882a593Smuzhiyun 		goto out;
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	batadv_tp_sender_shutdown(tp_vars, return_value);
1084*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
1085*4882a593Smuzhiyun out:
1086*4882a593Smuzhiyun 	batadv_orig_node_put(orig_node);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun /**
1090*4882a593Smuzhiyun  * batadv_tp_reset_receiver_timer() - reset the receiver shutdown timer
1091*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
1092*4882a593Smuzhiyun  *
1093*4882a593Smuzhiyun  * start the receiver shutdown timer or reset it if already started
1094*4882a593Smuzhiyun  */
batadv_tp_reset_receiver_timer(struct batadv_tp_vars * tp_vars)1095*4882a593Smuzhiyun static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	mod_timer(&tp_vars->timer,
1098*4882a593Smuzhiyun 		  jiffies + msecs_to_jiffies(BATADV_TP_RECV_TIMEOUT));
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun /**
1102*4882a593Smuzhiyun  * batadv_tp_receiver_shutdown() - stop a tp meter receiver when timeout is
1103*4882a593Smuzhiyun  *  reached without received ack
1104*4882a593Smuzhiyun  * @t: address to timer_list inside tp_vars
1105*4882a593Smuzhiyun  */
batadv_tp_receiver_shutdown(struct timer_list * t)1106*4882a593Smuzhiyun static void batadv_tp_receiver_shutdown(struct timer_list *t)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
1109*4882a593Smuzhiyun 	struct batadv_tp_unacked *un, *safe;
1110*4882a593Smuzhiyun 	struct batadv_priv *bat_priv;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	bat_priv = tp_vars->bat_priv;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	/* if there is recent activity rearm the timer */
1115*4882a593Smuzhiyun 	if (!batadv_has_timed_out(tp_vars->last_recv_time,
1116*4882a593Smuzhiyun 				  BATADV_TP_RECV_TIMEOUT)) {
1117*4882a593Smuzhiyun 		/* reset the receiver shutdown timer */
1118*4882a593Smuzhiyun 		batadv_tp_reset_receiver_timer(tp_vars);
1119*4882a593Smuzhiyun 		return;
1120*4882a593Smuzhiyun 	}
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1123*4882a593Smuzhiyun 		   "Shutting down for inactivity (more than %dms) from %pM\n",
1124*4882a593Smuzhiyun 		   BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
1127*4882a593Smuzhiyun 	hlist_del_rcu(&tp_vars->list);
1128*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	/* drop list reference */
1131*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	atomic_dec(&bat_priv->tp_num);
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->unacked_lock);
1136*4882a593Smuzhiyun 	list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
1137*4882a593Smuzhiyun 		list_del(&un->list);
1138*4882a593Smuzhiyun 		kfree(un);
1139*4882a593Smuzhiyun 	}
1140*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->unacked_lock);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	/* drop reference of timer */
1143*4882a593Smuzhiyun 	batadv_tp_vars_put(tp_vars);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun /**
1147*4882a593Smuzhiyun  * batadv_tp_send_ack() - send an ACK packet
1148*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
1149*4882a593Smuzhiyun  * @dst: the mac address of the destination originator
1150*4882a593Smuzhiyun  * @seq: the sequence number to ACK
1151*4882a593Smuzhiyun  * @timestamp: the timestamp to echo back in the ACK
1152*4882a593Smuzhiyun  * @session: session identifier
1153*4882a593Smuzhiyun  * @socket_index: local ICMP socket identifier
1154*4882a593Smuzhiyun  *
1155*4882a593Smuzhiyun  * Return: 0 on success, a positive integer representing the reason of the
1156*4882a593Smuzhiyun  * failure otherwise
1157*4882a593Smuzhiyun  */
batadv_tp_send_ack(struct batadv_priv * bat_priv,const u8 * dst,u32 seq,__be32 timestamp,const u8 * session,int socket_index)1158*4882a593Smuzhiyun static int batadv_tp_send_ack(struct batadv_priv *bat_priv, const u8 *dst,
1159*4882a593Smuzhiyun 			      u32 seq, __be32 timestamp, const u8 *session,
1160*4882a593Smuzhiyun 			      int socket_index)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun 	struct batadv_hard_iface *primary_if = NULL;
1163*4882a593Smuzhiyun 	struct batadv_orig_node *orig_node;
1164*4882a593Smuzhiyun 	struct batadv_icmp_tp_packet *icmp;
1165*4882a593Smuzhiyun 	struct sk_buff *skb;
1166*4882a593Smuzhiyun 	int r, ret;
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun 	orig_node = batadv_orig_hash_find(bat_priv, dst);
1169*4882a593Smuzhiyun 	if (unlikely(!orig_node)) {
1170*4882a593Smuzhiyun 		ret = BATADV_TP_REASON_DST_UNREACHABLE;
1171*4882a593Smuzhiyun 		goto out;
1172*4882a593Smuzhiyun 	}
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	primary_if = batadv_primary_if_get_selected(bat_priv);
1175*4882a593Smuzhiyun 	if (unlikely(!primary_if)) {
1176*4882a593Smuzhiyun 		ret = BATADV_TP_REASON_DST_UNREACHABLE;
1177*4882a593Smuzhiyun 		goto out;
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	skb = netdev_alloc_skb_ip_align(NULL, sizeof(*icmp) + ETH_HLEN);
1181*4882a593Smuzhiyun 	if (unlikely(!skb)) {
1182*4882a593Smuzhiyun 		ret = BATADV_TP_REASON_MEMORY_ERROR;
1183*4882a593Smuzhiyun 		goto out;
1184*4882a593Smuzhiyun 	}
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	skb_reserve(skb, ETH_HLEN);
1187*4882a593Smuzhiyun 	icmp = skb_put(skb, sizeof(*icmp));
1188*4882a593Smuzhiyun 	icmp->packet_type = BATADV_ICMP;
1189*4882a593Smuzhiyun 	icmp->version = BATADV_COMPAT_VERSION;
1190*4882a593Smuzhiyun 	icmp->ttl = BATADV_TTL;
1191*4882a593Smuzhiyun 	icmp->msg_type = BATADV_TP;
1192*4882a593Smuzhiyun 	ether_addr_copy(icmp->dst, orig_node->orig);
1193*4882a593Smuzhiyun 	ether_addr_copy(icmp->orig, primary_if->net_dev->dev_addr);
1194*4882a593Smuzhiyun 	icmp->uid = socket_index;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	icmp->subtype = BATADV_TP_ACK;
1197*4882a593Smuzhiyun 	memcpy(icmp->session, session, sizeof(icmp->session));
1198*4882a593Smuzhiyun 	icmp->seqno = htonl(seq);
1199*4882a593Smuzhiyun 	icmp->timestamp = timestamp;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	/* send the ack */
1202*4882a593Smuzhiyun 	r = batadv_send_skb_to_orig(skb, orig_node, NULL);
1203*4882a593Smuzhiyun 	if (unlikely(r < 0) || r == NET_XMIT_DROP) {
1204*4882a593Smuzhiyun 		ret = BATADV_TP_REASON_DST_UNREACHABLE;
1205*4882a593Smuzhiyun 		goto out;
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun 	ret = 0;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun out:
1210*4882a593Smuzhiyun 	if (likely(orig_node))
1211*4882a593Smuzhiyun 		batadv_orig_node_put(orig_node);
1212*4882a593Smuzhiyun 	if (likely(primary_if))
1213*4882a593Smuzhiyun 		batadv_hardif_put(primary_if);
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	return ret;
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun /**
1219*4882a593Smuzhiyun  * batadv_tp_handle_out_of_order() - store an out of order packet
1220*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
1221*4882a593Smuzhiyun  * @skb: the buffer containing the received packet
1222*4882a593Smuzhiyun  *
1223*4882a593Smuzhiyun  * Store the out of order packet in the unacked list for late processing. This
1224*4882a593Smuzhiyun  * packets are kept in this list so that they can be ACKed at once as soon as
1225*4882a593Smuzhiyun  * all the previous packets have been received
1226*4882a593Smuzhiyun  *
1227*4882a593Smuzhiyun  * Return: true if the packed has been successfully processed, false otherwise
1228*4882a593Smuzhiyun  */
batadv_tp_handle_out_of_order(struct batadv_tp_vars * tp_vars,const struct sk_buff * skb)1229*4882a593Smuzhiyun static bool batadv_tp_handle_out_of_order(struct batadv_tp_vars *tp_vars,
1230*4882a593Smuzhiyun 					  const struct sk_buff *skb)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	const struct batadv_icmp_tp_packet *icmp;
1233*4882a593Smuzhiyun 	struct batadv_tp_unacked *un, *new;
1234*4882a593Smuzhiyun 	u32 payload_len;
1235*4882a593Smuzhiyun 	bool added = false;
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	new = kmalloc(sizeof(*new), GFP_ATOMIC);
1238*4882a593Smuzhiyun 	if (unlikely(!new))
1239*4882a593Smuzhiyun 		return false;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun 	new->seqno = ntohl(icmp->seqno);
1244*4882a593Smuzhiyun 	payload_len = skb->len - sizeof(struct batadv_unicast_packet);
1245*4882a593Smuzhiyun 	new->len = payload_len;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->unacked_lock);
1248*4882a593Smuzhiyun 	/* if the list is empty immediately attach this new object */
1249*4882a593Smuzhiyun 	if (list_empty(&tp_vars->unacked_list)) {
1250*4882a593Smuzhiyun 		list_add(&new->list, &tp_vars->unacked_list);
1251*4882a593Smuzhiyun 		goto out;
1252*4882a593Smuzhiyun 	}
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	/* otherwise loop over the list and either drop the packet because this
1255*4882a593Smuzhiyun 	 * is a duplicate or store it at the right position.
1256*4882a593Smuzhiyun 	 *
1257*4882a593Smuzhiyun 	 * The iteration is done in the reverse way because it is likely that
1258*4882a593Smuzhiyun 	 * the last received packet (the one being processed now) has a bigger
1259*4882a593Smuzhiyun 	 * seqno than all the others already stored.
1260*4882a593Smuzhiyun 	 */
1261*4882a593Smuzhiyun 	list_for_each_entry_reverse(un, &tp_vars->unacked_list, list) {
1262*4882a593Smuzhiyun 		/* check for duplicates */
1263*4882a593Smuzhiyun 		if (new->seqno == un->seqno) {
1264*4882a593Smuzhiyun 			if (new->len > un->len)
1265*4882a593Smuzhiyun 				un->len = new->len;
1266*4882a593Smuzhiyun 			kfree(new);
1267*4882a593Smuzhiyun 			added = true;
1268*4882a593Smuzhiyun 			break;
1269*4882a593Smuzhiyun 		}
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 		/* look for the right position */
1272*4882a593Smuzhiyun 		if (batadv_seq_before(new->seqno, un->seqno))
1273*4882a593Smuzhiyun 			continue;
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 		/* as soon as an entry having a bigger seqno is found, the new
1276*4882a593Smuzhiyun 		 * one is attached _after_ it. In this way the list is kept in
1277*4882a593Smuzhiyun 		 * ascending order
1278*4882a593Smuzhiyun 		 */
1279*4882a593Smuzhiyun 		list_add_tail(&new->list, &un->list);
1280*4882a593Smuzhiyun 		added = true;
1281*4882a593Smuzhiyun 		break;
1282*4882a593Smuzhiyun 	}
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 	/* received packet with smallest seqno out of order; add it to front */
1285*4882a593Smuzhiyun 	if (!added)
1286*4882a593Smuzhiyun 		list_add(&new->list, &tp_vars->unacked_list);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun out:
1289*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->unacked_lock);
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	return true;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun /**
1295*4882a593Smuzhiyun  * batadv_tp_ack_unordered() - update number received bytes in current stream
1296*4882a593Smuzhiyun  *  without gaps
1297*4882a593Smuzhiyun  * @tp_vars: the private data of the current TP meter session
1298*4882a593Smuzhiyun  */
batadv_tp_ack_unordered(struct batadv_tp_vars * tp_vars)1299*4882a593Smuzhiyun static void batadv_tp_ack_unordered(struct batadv_tp_vars *tp_vars)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	struct batadv_tp_unacked *un, *safe;
1302*4882a593Smuzhiyun 	u32 to_ack;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	/* go through the unacked packet list and possibly ACK them as
1305*4882a593Smuzhiyun 	 * well
1306*4882a593Smuzhiyun 	 */
1307*4882a593Smuzhiyun 	spin_lock_bh(&tp_vars->unacked_lock);
1308*4882a593Smuzhiyun 	list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
1309*4882a593Smuzhiyun 		/* the list is ordered, therefore it is possible to stop as soon
1310*4882a593Smuzhiyun 		 * there is a gap between the last acked seqno and the seqno of
1311*4882a593Smuzhiyun 		 * the packet under inspection
1312*4882a593Smuzhiyun 		 */
1313*4882a593Smuzhiyun 		if (batadv_seq_before(tp_vars->last_recv, un->seqno))
1314*4882a593Smuzhiyun 			break;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 		to_ack = un->seqno + un->len - tp_vars->last_recv;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 		if (batadv_seq_before(tp_vars->last_recv, un->seqno + un->len))
1319*4882a593Smuzhiyun 			tp_vars->last_recv += to_ack;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 		list_del(&un->list);
1322*4882a593Smuzhiyun 		kfree(un);
1323*4882a593Smuzhiyun 	}
1324*4882a593Smuzhiyun 	spin_unlock_bh(&tp_vars->unacked_lock);
1325*4882a593Smuzhiyun }
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun /**
1328*4882a593Smuzhiyun  * batadv_tp_init_recv() - return matching or create new receiver tp_vars
1329*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
1330*4882a593Smuzhiyun  * @icmp: received icmp tp msg
1331*4882a593Smuzhiyun  *
1332*4882a593Smuzhiyun  * Return: corresponding tp_vars or NULL on errors
1333*4882a593Smuzhiyun  */
1334*4882a593Smuzhiyun static struct batadv_tp_vars *
batadv_tp_init_recv(struct batadv_priv * bat_priv,const struct batadv_icmp_tp_packet * icmp)1335*4882a593Smuzhiyun batadv_tp_init_recv(struct batadv_priv *bat_priv,
1336*4882a593Smuzhiyun 		    const struct batadv_icmp_tp_packet *icmp)
1337*4882a593Smuzhiyun {
1338*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	spin_lock_bh(&bat_priv->tp_list_lock);
1341*4882a593Smuzhiyun 	tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
1342*4882a593Smuzhiyun 					      icmp->session);
1343*4882a593Smuzhiyun 	if (tp_vars)
1344*4882a593Smuzhiyun 		goto out_unlock;
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	if (!atomic_add_unless(&bat_priv->tp_num, 1, BATADV_TP_MAX_NUM)) {
1347*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1348*4882a593Smuzhiyun 			   "Meter: too many ongoing sessions, aborting (RECV)\n");
1349*4882a593Smuzhiyun 		goto out_unlock;
1350*4882a593Smuzhiyun 	}
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
1353*4882a593Smuzhiyun 	if (!tp_vars)
1354*4882a593Smuzhiyun 		goto out_unlock;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	ether_addr_copy(tp_vars->other_end, icmp->orig);
1357*4882a593Smuzhiyun 	tp_vars->role = BATADV_TP_RECEIVER;
1358*4882a593Smuzhiyun 	memcpy(tp_vars->session, icmp->session, sizeof(tp_vars->session));
1359*4882a593Smuzhiyun 	tp_vars->last_recv = BATADV_TP_FIRST_SEQ;
1360*4882a593Smuzhiyun 	tp_vars->bat_priv = bat_priv;
1361*4882a593Smuzhiyun 	kref_init(&tp_vars->refcount);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	spin_lock_init(&tp_vars->unacked_lock);
1364*4882a593Smuzhiyun 	INIT_LIST_HEAD(&tp_vars->unacked_list);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	kref_get(&tp_vars->refcount);
1367*4882a593Smuzhiyun 	hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	kref_get(&tp_vars->refcount);
1370*4882a593Smuzhiyun 	timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	batadv_tp_reset_receiver_timer(tp_vars);
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun out_unlock:
1375*4882a593Smuzhiyun 	spin_unlock_bh(&bat_priv->tp_list_lock);
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	return tp_vars;
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun /**
1381*4882a593Smuzhiyun  * batadv_tp_recv_msg() - process a single data message
1382*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
1383*4882a593Smuzhiyun  * @skb: the buffer containing the received packet
1384*4882a593Smuzhiyun  *
1385*4882a593Smuzhiyun  * Process a received TP MSG packet
1386*4882a593Smuzhiyun  */
batadv_tp_recv_msg(struct batadv_priv * bat_priv,const struct sk_buff * skb)1387*4882a593Smuzhiyun static void batadv_tp_recv_msg(struct batadv_priv *bat_priv,
1388*4882a593Smuzhiyun 			       const struct sk_buff *skb)
1389*4882a593Smuzhiyun {
1390*4882a593Smuzhiyun 	const struct batadv_icmp_tp_packet *icmp;
1391*4882a593Smuzhiyun 	struct batadv_tp_vars *tp_vars;
1392*4882a593Smuzhiyun 	size_t packet_size;
1393*4882a593Smuzhiyun 	u32 seqno;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	seqno = ntohl(icmp->seqno);
1398*4882a593Smuzhiyun 	/* check if this is the first seqno. This means that if the
1399*4882a593Smuzhiyun 	 * first packet is lost, the tp meter does not work anymore!
1400*4882a593Smuzhiyun 	 */
1401*4882a593Smuzhiyun 	if (seqno == BATADV_TP_FIRST_SEQ) {
1402*4882a593Smuzhiyun 		tp_vars = batadv_tp_init_recv(bat_priv, icmp);
1403*4882a593Smuzhiyun 		if (!tp_vars) {
1404*4882a593Smuzhiyun 			batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1405*4882a593Smuzhiyun 				   "Meter: seqno != BATADV_TP_FIRST_SEQ cannot initiate connection\n");
1406*4882a593Smuzhiyun 			goto out;
1407*4882a593Smuzhiyun 		}
1408*4882a593Smuzhiyun 	} else {
1409*4882a593Smuzhiyun 		tp_vars = batadv_tp_list_find_session(bat_priv, icmp->orig,
1410*4882a593Smuzhiyun 						      icmp->session);
1411*4882a593Smuzhiyun 		if (!tp_vars) {
1412*4882a593Smuzhiyun 			batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1413*4882a593Smuzhiyun 				   "Unexpected packet from %pM!\n",
1414*4882a593Smuzhiyun 				   icmp->orig);
1415*4882a593Smuzhiyun 			goto out;
1416*4882a593Smuzhiyun 		}
1417*4882a593Smuzhiyun 	}
1418*4882a593Smuzhiyun 
1419*4882a593Smuzhiyun 	if (unlikely(tp_vars->role != BATADV_TP_RECEIVER)) {
1420*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1421*4882a593Smuzhiyun 			   "Meter: dropping packet: not expected (role=%u)\n",
1422*4882a593Smuzhiyun 			   tp_vars->role);
1423*4882a593Smuzhiyun 		goto out;
1424*4882a593Smuzhiyun 	}
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	tp_vars->last_recv_time = jiffies;
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	/* if the packet is a duplicate, it may be the case that an ACK has been
1429*4882a593Smuzhiyun 	 * lost. Resend the ACK
1430*4882a593Smuzhiyun 	 */
1431*4882a593Smuzhiyun 	if (batadv_seq_before(seqno, tp_vars->last_recv))
1432*4882a593Smuzhiyun 		goto send_ack;
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 	/* if the packet is out of order enqueue it */
1435*4882a593Smuzhiyun 	if (ntohl(icmp->seqno) != tp_vars->last_recv) {
1436*4882a593Smuzhiyun 		/* exit immediately (and do not send any ACK) if the packet has
1437*4882a593Smuzhiyun 		 * not been enqueued correctly
1438*4882a593Smuzhiyun 		 */
1439*4882a593Smuzhiyun 		if (!batadv_tp_handle_out_of_order(tp_vars, skb))
1440*4882a593Smuzhiyun 			goto out;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 		/* send a duplicate ACK */
1443*4882a593Smuzhiyun 		goto send_ack;
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	/* if everything was fine count the ACKed bytes */
1447*4882a593Smuzhiyun 	packet_size = skb->len - sizeof(struct batadv_unicast_packet);
1448*4882a593Smuzhiyun 	tp_vars->last_recv += packet_size;
1449*4882a593Smuzhiyun 
1450*4882a593Smuzhiyun 	/* check if this ordered message filled a gap.... */
1451*4882a593Smuzhiyun 	batadv_tp_ack_unordered(tp_vars);
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun send_ack:
1454*4882a593Smuzhiyun 	/* send the ACK. If the received packet was out of order, the ACK that
1455*4882a593Smuzhiyun 	 * is going to be sent is a duplicate (the sender will count them and
1456*4882a593Smuzhiyun 	 * possibly enter Fast Retransmit as soon as it has reached 3)
1457*4882a593Smuzhiyun 	 */
1458*4882a593Smuzhiyun 	batadv_tp_send_ack(bat_priv, icmp->orig, tp_vars->last_recv,
1459*4882a593Smuzhiyun 			   icmp->timestamp, icmp->session, icmp->uid);
1460*4882a593Smuzhiyun out:
1461*4882a593Smuzhiyun 	if (likely(tp_vars))
1462*4882a593Smuzhiyun 		batadv_tp_vars_put(tp_vars);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun /**
1466*4882a593Smuzhiyun  * batadv_tp_meter_recv() - main TP Meter receiving function
1467*4882a593Smuzhiyun  * @bat_priv: the bat priv with all the soft interface information
1468*4882a593Smuzhiyun  * @skb: the buffer containing the received packet
1469*4882a593Smuzhiyun  */
batadv_tp_meter_recv(struct batadv_priv * bat_priv,struct sk_buff * skb)1470*4882a593Smuzhiyun void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun 	struct batadv_icmp_tp_packet *icmp;
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun 	icmp = (struct batadv_icmp_tp_packet *)skb->data;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	switch (icmp->subtype) {
1477*4882a593Smuzhiyun 	case BATADV_TP_MSG:
1478*4882a593Smuzhiyun 		batadv_tp_recv_msg(bat_priv, skb);
1479*4882a593Smuzhiyun 		break;
1480*4882a593Smuzhiyun 	case BATADV_TP_ACK:
1481*4882a593Smuzhiyun 		batadv_tp_recv_ack(bat_priv, skb);
1482*4882a593Smuzhiyun 		break;
1483*4882a593Smuzhiyun 	default:
1484*4882a593Smuzhiyun 		batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
1485*4882a593Smuzhiyun 			   "Received unknown TP Metric packet type %u\n",
1486*4882a593Smuzhiyun 			   icmp->subtype);
1487*4882a593Smuzhiyun 	}
1488*4882a593Smuzhiyun 	consume_skb(skb);
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun /**
1492*4882a593Smuzhiyun  * batadv_tp_meter_init() - initialize global tp_meter structures
1493*4882a593Smuzhiyun  */
batadv_tp_meter_init(void)1494*4882a593Smuzhiyun void __init batadv_tp_meter_init(void)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun 	get_random_bytes(batadv_tp_prerandom, sizeof(batadv_tp_prerandom));
1497*4882a593Smuzhiyun }
1498