xref: /OK3568_Linux_fs/kernel/net/ipv4/udp_tunnel_nic.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun // Copyright (c) 2020 Facebook Inc.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/ethtool_netlink.h>
5*4882a593Smuzhiyun #include <linux/netdevice.h>
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/workqueue.h>
9*4882a593Smuzhiyun #include <net/udp_tunnel.h>
10*4882a593Smuzhiyun #include <net/vxlan.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun enum udp_tunnel_nic_table_entry_flags {
13*4882a593Smuzhiyun 	UDP_TUNNEL_NIC_ENTRY_ADD	= BIT(0),
14*4882a593Smuzhiyun 	UDP_TUNNEL_NIC_ENTRY_DEL	= BIT(1),
15*4882a593Smuzhiyun 	UDP_TUNNEL_NIC_ENTRY_OP_FAIL	= BIT(2),
16*4882a593Smuzhiyun 	UDP_TUNNEL_NIC_ENTRY_FROZEN	= BIT(3),
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun struct udp_tunnel_nic_table_entry {
20*4882a593Smuzhiyun 	__be16 port;
21*4882a593Smuzhiyun 	u8 type;
22*4882a593Smuzhiyun 	u8 flags;
23*4882a593Smuzhiyun 	u16 use_cnt;
24*4882a593Smuzhiyun #define UDP_TUNNEL_NIC_USE_CNT_MAX	U16_MAX
25*4882a593Smuzhiyun 	u8 hw_priv;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /**
29*4882a593Smuzhiyun  * struct udp_tunnel_nic - UDP tunnel port offload state
30*4882a593Smuzhiyun  * @work:	async work for talking to hardware from process context
31*4882a593Smuzhiyun  * @dev:	netdev pointer
32*4882a593Smuzhiyun  * @need_sync:	at least one port start changed
33*4882a593Smuzhiyun  * @need_replay: space was freed, we need a replay of all ports
34*4882a593Smuzhiyun  * @work_pending: @work is currently scheduled
35*4882a593Smuzhiyun  * @n_tables:	number of tables under @entries
36*4882a593Smuzhiyun  * @missed:	bitmap of tables which overflown
37*4882a593Smuzhiyun  * @entries:	table of tables of ports currently offloaded
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun struct udp_tunnel_nic {
40*4882a593Smuzhiyun 	struct work_struct work;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	struct net_device *dev;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	u8 need_sync:1;
45*4882a593Smuzhiyun 	u8 need_replay:1;
46*4882a593Smuzhiyun 	u8 work_pending:1;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	unsigned int n_tables;
49*4882a593Smuzhiyun 	unsigned long missed;
50*4882a593Smuzhiyun 	struct udp_tunnel_nic_table_entry **entries;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /* We ensure all work structs are done using driver state, but not the code.
54*4882a593Smuzhiyun  * We need a workqueue we can flush before module gets removed.
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun static struct workqueue_struct *udp_tunnel_nic_workqueue;
57*4882a593Smuzhiyun 
udp_tunnel_nic_tunnel_type_name(unsigned int type)58*4882a593Smuzhiyun static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	switch (type) {
61*4882a593Smuzhiyun 	case UDP_TUNNEL_TYPE_VXLAN:
62*4882a593Smuzhiyun 		return "vxlan";
63*4882a593Smuzhiyun 	case UDP_TUNNEL_TYPE_GENEVE:
64*4882a593Smuzhiyun 		return "geneve";
65*4882a593Smuzhiyun 	case UDP_TUNNEL_TYPE_VXLAN_GPE:
66*4882a593Smuzhiyun 		return "vxlan-gpe";
67*4882a593Smuzhiyun 	default:
68*4882a593Smuzhiyun 		return "unknown";
69*4882a593Smuzhiyun 	}
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static bool
udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry * entry)73*4882a593Smuzhiyun udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	return entry->use_cnt == 0 && !entry->flags;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun static bool
udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry * entry)79*4882a593Smuzhiyun udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static bool
udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry * entry)85*4882a593Smuzhiyun udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun static void
udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry * entry)91*4882a593Smuzhiyun udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	if (!udp_tunnel_nic_entry_is_free(entry))
94*4882a593Smuzhiyun 		entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static void
udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry * entry)98*4882a593Smuzhiyun udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun static bool
udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry * entry)104*4882a593Smuzhiyun udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
107*4882a593Smuzhiyun 			       UDP_TUNNEL_NIC_ENTRY_DEL);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun static void
udp_tunnel_nic_entry_queue(struct udp_tunnel_nic * utn,struct udp_tunnel_nic_table_entry * entry,unsigned int flag)111*4882a593Smuzhiyun udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
112*4882a593Smuzhiyun 			   struct udp_tunnel_nic_table_entry *entry,
113*4882a593Smuzhiyun 			   unsigned int flag)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	entry->flags |= flag;
116*4882a593Smuzhiyun 	utn->need_sync = 1;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun static void
udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry * entry,struct udp_tunnel_info * ti)120*4882a593Smuzhiyun udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
121*4882a593Smuzhiyun 			     struct udp_tunnel_info *ti)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	memset(ti, 0, sizeof(*ti));
124*4882a593Smuzhiyun 	ti->port = entry->port;
125*4882a593Smuzhiyun 	ti->type = entry->type;
126*4882a593Smuzhiyun 	ti->hw_priv = entry->hw_priv;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun static bool
udp_tunnel_nic_is_empty(struct net_device * dev,struct udp_tunnel_nic * utn)130*4882a593Smuzhiyun udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
133*4882a593Smuzhiyun 	unsigned int i, j;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
136*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++)
137*4882a593Smuzhiyun 			if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
138*4882a593Smuzhiyun 				return false;
139*4882a593Smuzhiyun 	return true;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun static bool
udp_tunnel_nic_should_replay(struct net_device * dev,struct udp_tunnel_nic * utn)143*4882a593Smuzhiyun udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	const struct udp_tunnel_nic_table_info *table;
146*4882a593Smuzhiyun 	unsigned int i, j;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (!utn->missed)
149*4882a593Smuzhiyun 		return false;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++) {
152*4882a593Smuzhiyun 		table = &dev->udp_tunnel_nic_info->tables[i];
153*4882a593Smuzhiyun 		if (!test_bit(i, &utn->missed))
154*4882a593Smuzhiyun 			continue;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		for (j = 0; j < table->n_entries; j++)
157*4882a593Smuzhiyun 			if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
158*4882a593Smuzhiyun 				return true;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return false;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun static void
__udp_tunnel_nic_get_port(struct net_device * dev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)165*4882a593Smuzhiyun __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
166*4882a593Smuzhiyun 			  unsigned int idx, struct udp_tunnel_info *ti)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct udp_tunnel_nic_table_entry *entry;
169*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
172*4882a593Smuzhiyun 	entry = &utn->entries[table][idx];
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (entry->use_cnt)
175*4882a593Smuzhiyun 		udp_tunnel_nic_ti_from_entry(entry, ti);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun static void
__udp_tunnel_nic_set_port_priv(struct net_device * dev,unsigned int table,unsigned int idx,u8 priv)179*4882a593Smuzhiyun __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
180*4882a593Smuzhiyun 			       unsigned int idx, u8 priv)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun static void
udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry * entry,int err)186*4882a593Smuzhiyun udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
187*4882a593Smuzhiyun 				 int err)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
192*4882a593Smuzhiyun 		     entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
195*4882a593Smuzhiyun 	    (!err || (err == -EEXIST && dodgy)))
196*4882a593Smuzhiyun 		entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
199*4882a593Smuzhiyun 	    (!err || (err == -ENOENT && dodgy)))
200*4882a593Smuzhiyun 		entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (!err)
203*4882a593Smuzhiyun 		entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
204*4882a593Smuzhiyun 	else
205*4882a593Smuzhiyun 		entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun static void
udp_tunnel_nic_device_sync_one(struct net_device * dev,struct udp_tunnel_nic * utn,unsigned int table,unsigned int idx)209*4882a593Smuzhiyun udp_tunnel_nic_device_sync_one(struct net_device *dev,
210*4882a593Smuzhiyun 			       struct udp_tunnel_nic *utn,
211*4882a593Smuzhiyun 			       unsigned int table, unsigned int idx)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct udp_tunnel_nic_table_entry *entry;
214*4882a593Smuzhiyun 	struct udp_tunnel_info ti;
215*4882a593Smuzhiyun 	int err;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	entry = &utn->entries[table][idx];
218*4882a593Smuzhiyun 	if (!udp_tunnel_nic_entry_is_queued(entry))
219*4882a593Smuzhiyun 		return;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	udp_tunnel_nic_ti_from_entry(entry, &ti);
222*4882a593Smuzhiyun 	if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
223*4882a593Smuzhiyun 		err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
224*4882a593Smuzhiyun 	else
225*4882a593Smuzhiyun 		err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
226*4882a593Smuzhiyun 							   &ti);
227*4882a593Smuzhiyun 	udp_tunnel_nic_entry_update_done(entry, err);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	if (err)
230*4882a593Smuzhiyun 		netdev_warn(dev,
231*4882a593Smuzhiyun 			    "UDP tunnel port sync failed port %d type %s: %d\n",
232*4882a593Smuzhiyun 			    be16_to_cpu(entry->port),
233*4882a593Smuzhiyun 			    udp_tunnel_nic_tunnel_type_name(entry->type),
234*4882a593Smuzhiyun 			    err);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun static void
udp_tunnel_nic_device_sync_by_port(struct net_device * dev,struct udp_tunnel_nic * utn)238*4882a593Smuzhiyun udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
239*4882a593Smuzhiyun 				   struct udp_tunnel_nic *utn)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
242*4882a593Smuzhiyun 	unsigned int i, j;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
245*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++)
246*4882a593Smuzhiyun 			udp_tunnel_nic_device_sync_one(dev, utn, i, j);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun static void
udp_tunnel_nic_device_sync_by_table(struct net_device * dev,struct udp_tunnel_nic * utn)250*4882a593Smuzhiyun udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
251*4882a593Smuzhiyun 				    struct udp_tunnel_nic *utn)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
254*4882a593Smuzhiyun 	unsigned int i, j;
255*4882a593Smuzhiyun 	int err;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++) {
258*4882a593Smuzhiyun 		/* Find something that needs sync in this table */
259*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++)
260*4882a593Smuzhiyun 			if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
261*4882a593Smuzhiyun 				break;
262*4882a593Smuzhiyun 		if (j == info->tables[i].n_entries)
263*4882a593Smuzhiyun 			continue;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 		err = info->sync_table(dev, i);
266*4882a593Smuzhiyun 		if (err)
267*4882a593Smuzhiyun 			netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
268*4882a593Smuzhiyun 				    i, err);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++) {
271*4882a593Smuzhiyun 			struct udp_tunnel_nic_table_entry *entry;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 			entry = &utn->entries[i][j];
274*4882a593Smuzhiyun 			if (udp_tunnel_nic_entry_is_queued(entry))
275*4882a593Smuzhiyun 				udp_tunnel_nic_entry_update_done(entry, err);
276*4882a593Smuzhiyun 		}
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun static void
__udp_tunnel_nic_device_sync(struct net_device * dev,struct udp_tunnel_nic * utn)281*4882a593Smuzhiyun __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	if (!utn->need_sync)
284*4882a593Smuzhiyun 		return;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (dev->udp_tunnel_nic_info->sync_table)
287*4882a593Smuzhiyun 		udp_tunnel_nic_device_sync_by_table(dev, utn);
288*4882a593Smuzhiyun 	else
289*4882a593Smuzhiyun 		udp_tunnel_nic_device_sync_by_port(dev, utn);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	utn->need_sync = 0;
292*4882a593Smuzhiyun 	/* Can't replay directly here, in case we come from the tunnel driver's
293*4882a593Smuzhiyun 	 * notification - trying to replay may deadlock inside tunnel driver.
294*4882a593Smuzhiyun 	 */
295*4882a593Smuzhiyun 	utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun static void
udp_tunnel_nic_device_sync(struct net_device * dev,struct udp_tunnel_nic * utn)299*4882a593Smuzhiyun udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
302*4882a593Smuzhiyun 	bool may_sleep;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (!utn->need_sync)
305*4882a593Smuzhiyun 		return;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* Drivers which sleep in the callback need to update from
308*4882a593Smuzhiyun 	 * the workqueue, if we come from the tunnel driver's notification.
309*4882a593Smuzhiyun 	 */
310*4882a593Smuzhiyun 	may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
311*4882a593Smuzhiyun 	if (!may_sleep)
312*4882a593Smuzhiyun 		__udp_tunnel_nic_device_sync(dev, utn);
313*4882a593Smuzhiyun 	if (may_sleep || utn->need_replay) {
314*4882a593Smuzhiyun 		queue_work(udp_tunnel_nic_workqueue, &utn->work);
315*4882a593Smuzhiyun 		utn->work_pending = 1;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun static bool
udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info * table,struct udp_tunnel_info * ti)320*4882a593Smuzhiyun udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
321*4882a593Smuzhiyun 				struct udp_tunnel_info *ti)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	return table->tunnel_types & ti->type;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun static bool
udp_tunnel_nic_is_capable(struct net_device * dev,struct udp_tunnel_nic * utn,struct udp_tunnel_info * ti)327*4882a593Smuzhiyun udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
328*4882a593Smuzhiyun 			  struct udp_tunnel_info *ti)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
331*4882a593Smuzhiyun 	unsigned int i;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/* Special case IPv4-only NICs */
334*4882a593Smuzhiyun 	if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
335*4882a593Smuzhiyun 	    ti->sa_family != AF_INET)
336*4882a593Smuzhiyun 		return false;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
339*4882a593Smuzhiyun 		if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
340*4882a593Smuzhiyun 			return true;
341*4882a593Smuzhiyun 	return false;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun static int
udp_tunnel_nic_has_collision(struct net_device * dev,struct udp_tunnel_nic * utn,struct udp_tunnel_info * ti)345*4882a593Smuzhiyun udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
346*4882a593Smuzhiyun 			     struct udp_tunnel_info *ti)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
349*4882a593Smuzhiyun 	struct udp_tunnel_nic_table_entry *entry;
350*4882a593Smuzhiyun 	unsigned int i, j;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
353*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++) {
354*4882a593Smuzhiyun 			entry =	&utn->entries[i][j];
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 			if (!udp_tunnel_nic_entry_is_free(entry) &&
357*4882a593Smuzhiyun 			    entry->port == ti->port &&
358*4882a593Smuzhiyun 			    entry->type != ti->type) {
359*4882a593Smuzhiyun 				__set_bit(i, &utn->missed);
360*4882a593Smuzhiyun 				return true;
361*4882a593Smuzhiyun 			}
362*4882a593Smuzhiyun 		}
363*4882a593Smuzhiyun 	return false;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun static void
udp_tunnel_nic_entry_adj(struct udp_tunnel_nic * utn,unsigned int table,unsigned int idx,int use_cnt_adj)367*4882a593Smuzhiyun udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
368*4882a593Smuzhiyun 			 unsigned int table, unsigned int idx, int use_cnt_adj)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct udp_tunnel_nic_table_entry *entry =  &utn->entries[table][idx];
371*4882a593Smuzhiyun 	bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
372*4882a593Smuzhiyun 	unsigned int from, to;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	WARN_ON(entry->use_cnt + (u32)use_cnt_adj > U16_MAX);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/* If not going from used to unused or vice versa - all done.
377*4882a593Smuzhiyun 	 * For dodgy entries make sure we try to sync again (queue the entry).
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	entry->use_cnt += use_cnt_adj;
380*4882a593Smuzhiyun 	if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
381*4882a593Smuzhiyun 		return;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* Cancel the op before it was sent to the device, if possible,
384*4882a593Smuzhiyun 	 * otherwise we'd need to take special care to issue commands
385*4882a593Smuzhiyun 	 * in the same order the ports arrived.
386*4882a593Smuzhiyun 	 */
387*4882a593Smuzhiyun 	if (use_cnt_adj < 0) {
388*4882a593Smuzhiyun 		from = UDP_TUNNEL_NIC_ENTRY_ADD;
389*4882a593Smuzhiyun 		to = UDP_TUNNEL_NIC_ENTRY_DEL;
390*4882a593Smuzhiyun 	} else {
391*4882a593Smuzhiyun 		from = UDP_TUNNEL_NIC_ENTRY_DEL;
392*4882a593Smuzhiyun 		to = UDP_TUNNEL_NIC_ENTRY_ADD;
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (entry->flags & from) {
396*4882a593Smuzhiyun 		entry->flags &= ~from;
397*4882a593Smuzhiyun 		if (!dodgy)
398*4882a593Smuzhiyun 			return;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	udp_tunnel_nic_entry_queue(utn, entry, to);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun static bool
udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic * utn,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti,int use_cnt_adj)405*4882a593Smuzhiyun udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
406*4882a593Smuzhiyun 			     unsigned int table, unsigned int idx,
407*4882a593Smuzhiyun 			     struct udp_tunnel_info *ti, int use_cnt_adj)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct udp_tunnel_nic_table_entry *entry =  &utn->entries[table][idx];
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (udp_tunnel_nic_entry_is_free(entry) ||
412*4882a593Smuzhiyun 	    entry->port != ti->port ||
413*4882a593Smuzhiyun 	    entry->type != ti->type)
414*4882a593Smuzhiyun 		return false;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (udp_tunnel_nic_entry_is_frozen(entry))
417*4882a593Smuzhiyun 		return true;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
420*4882a593Smuzhiyun 	return true;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /* Try to find existing matching entry and adjust its use count, instead of
424*4882a593Smuzhiyun  * adding a new one. Returns true if entry was found. In case of delete the
425*4882a593Smuzhiyun  * entry may have gotten removed in the process, in which case it will be
426*4882a593Smuzhiyun  * queued for removal.
427*4882a593Smuzhiyun  */
428*4882a593Smuzhiyun static bool
udp_tunnel_nic_try_existing(struct net_device * dev,struct udp_tunnel_nic * utn,struct udp_tunnel_info * ti,int use_cnt_adj)429*4882a593Smuzhiyun udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
430*4882a593Smuzhiyun 			    struct udp_tunnel_info *ti, int use_cnt_adj)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	const struct udp_tunnel_nic_table_info *table;
433*4882a593Smuzhiyun 	unsigned int i, j;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++) {
436*4882a593Smuzhiyun 		table = &dev->udp_tunnel_nic_info->tables[i];
437*4882a593Smuzhiyun 		if (!udp_tunnel_nic_table_is_capable(table, ti))
438*4882a593Smuzhiyun 			continue;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		for (j = 0; j < table->n_entries; j++)
441*4882a593Smuzhiyun 			if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
442*4882a593Smuzhiyun 							 use_cnt_adj))
443*4882a593Smuzhiyun 				return true;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	return false;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun static bool
udp_tunnel_nic_add_existing(struct net_device * dev,struct udp_tunnel_nic * utn,struct udp_tunnel_info * ti)450*4882a593Smuzhiyun udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
451*4882a593Smuzhiyun 			    struct udp_tunnel_info *ti)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun static bool
udp_tunnel_nic_del_existing(struct net_device * dev,struct udp_tunnel_nic * utn,struct udp_tunnel_info * ti)457*4882a593Smuzhiyun udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
458*4882a593Smuzhiyun 			    struct udp_tunnel_info *ti)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun static bool
udp_tunnel_nic_add_new(struct net_device * dev,struct udp_tunnel_nic * utn,struct udp_tunnel_info * ti)464*4882a593Smuzhiyun udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
465*4882a593Smuzhiyun 		       struct udp_tunnel_info *ti)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	const struct udp_tunnel_nic_table_info *table;
468*4882a593Smuzhiyun 	unsigned int i, j;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++) {
471*4882a593Smuzhiyun 		table = &dev->udp_tunnel_nic_info->tables[i];
472*4882a593Smuzhiyun 		if (!udp_tunnel_nic_table_is_capable(table, ti))
473*4882a593Smuzhiyun 			continue;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 		for (j = 0; j < table->n_entries; j++) {
476*4882a593Smuzhiyun 			struct udp_tunnel_nic_table_entry *entry;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 			entry = &utn->entries[i][j];
479*4882a593Smuzhiyun 			if (!udp_tunnel_nic_entry_is_free(entry))
480*4882a593Smuzhiyun 				continue;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 			entry->port = ti->port;
483*4882a593Smuzhiyun 			entry->type = ti->type;
484*4882a593Smuzhiyun 			entry->use_cnt = 1;
485*4882a593Smuzhiyun 			udp_tunnel_nic_entry_queue(utn, entry,
486*4882a593Smuzhiyun 						   UDP_TUNNEL_NIC_ENTRY_ADD);
487*4882a593Smuzhiyun 			return true;
488*4882a593Smuzhiyun 		}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		/* The different table may still fit this port in, but there
491*4882a593Smuzhiyun 		 * are no devices currently which have multiple tables accepting
492*4882a593Smuzhiyun 		 * the same tunnel type, and false positives are okay.
493*4882a593Smuzhiyun 		 */
494*4882a593Smuzhiyun 		__set_bit(i, &utn->missed);
495*4882a593Smuzhiyun 	}
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	return false;
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun static void
__udp_tunnel_nic_add_port(struct net_device * dev,struct udp_tunnel_info * ti)501*4882a593Smuzhiyun __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
504*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
507*4882a593Smuzhiyun 	if (!utn)
508*4882a593Smuzhiyun 		return;
509*4882a593Smuzhiyun 	if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
510*4882a593Smuzhiyun 		return;
511*4882a593Smuzhiyun 	if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN &&
512*4882a593Smuzhiyun 	    ti->port == htons(IANA_VXLAN_UDP_PORT)) {
513*4882a593Smuzhiyun 		if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
514*4882a593Smuzhiyun 			netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n");
515*4882a593Smuzhiyun 		return;
516*4882a593Smuzhiyun 	}
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	if (!udp_tunnel_nic_is_capable(dev, utn, ti))
519*4882a593Smuzhiyun 		return;
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* It may happen that a tunnel of one type is removed and different
522*4882a593Smuzhiyun 	 * tunnel type tries to reuse its port before the device was informed.
523*4882a593Smuzhiyun 	 * Rely on utn->missed to re-add this port later.
524*4882a593Smuzhiyun 	 */
525*4882a593Smuzhiyun 	if (udp_tunnel_nic_has_collision(dev, utn, ti))
526*4882a593Smuzhiyun 		return;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (!udp_tunnel_nic_add_existing(dev, utn, ti))
529*4882a593Smuzhiyun 		udp_tunnel_nic_add_new(dev, utn, ti);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	udp_tunnel_nic_device_sync(dev, utn);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun static void
__udp_tunnel_nic_del_port(struct net_device * dev,struct udp_tunnel_info * ti)535*4882a593Smuzhiyun __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
540*4882a593Smuzhiyun 	if (!utn)
541*4882a593Smuzhiyun 		return;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (!udp_tunnel_nic_is_capable(dev, utn, ti))
544*4882a593Smuzhiyun 		return;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	udp_tunnel_nic_del_existing(dev, utn, ti);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	udp_tunnel_nic_device_sync(dev, utn);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
__udp_tunnel_nic_reset_ntf(struct net_device * dev)551*4882a593Smuzhiyun static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
554*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
555*4882a593Smuzhiyun 	unsigned int i, j;
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	ASSERT_RTNL();
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
560*4882a593Smuzhiyun 	if (!utn)
561*4882a593Smuzhiyun 		return;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	utn->need_sync = false;
564*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
565*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++) {
566*4882a593Smuzhiyun 			struct udp_tunnel_nic_table_entry *entry;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 			entry = &utn->entries[i][j];
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 			entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
571*4882a593Smuzhiyun 					  UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
572*4882a593Smuzhiyun 			/* We don't release rtnl across ops */
573*4882a593Smuzhiyun 			WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
574*4882a593Smuzhiyun 			if (!entry->use_cnt)
575*4882a593Smuzhiyun 				continue;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 			udp_tunnel_nic_entry_queue(utn, entry,
578*4882a593Smuzhiyun 						   UDP_TUNNEL_NIC_ENTRY_ADD);
579*4882a593Smuzhiyun 		}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	__udp_tunnel_nic_device_sync(dev, utn);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun static size_t
__udp_tunnel_nic_dump_size(struct net_device * dev,unsigned int table)585*4882a593Smuzhiyun __udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
588*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
589*4882a593Smuzhiyun 	unsigned int j;
590*4882a593Smuzhiyun 	size_t size;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
593*4882a593Smuzhiyun 	if (!utn)
594*4882a593Smuzhiyun 		return 0;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	size = 0;
597*4882a593Smuzhiyun 	for (j = 0; j < info->tables[table].n_entries; j++) {
598*4882a593Smuzhiyun 		if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
599*4882a593Smuzhiyun 			continue;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		size += nla_total_size(0) +		 /* _TABLE_ENTRY */
602*4882a593Smuzhiyun 			nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
603*4882a593Smuzhiyun 			nla_total_size(sizeof(u32));	 /* _ENTRY_TYPE */
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	return size;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun static int
__udp_tunnel_nic_dump_write(struct net_device * dev,unsigned int table,struct sk_buff * skb)610*4882a593Smuzhiyun __udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
611*4882a593Smuzhiyun 			    struct sk_buff *skb)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
614*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
615*4882a593Smuzhiyun 	struct nlattr *nest;
616*4882a593Smuzhiyun 	unsigned int j;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
619*4882a593Smuzhiyun 	if (!utn)
620*4882a593Smuzhiyun 		return 0;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	for (j = 0; j < info->tables[table].n_entries; j++) {
623*4882a593Smuzhiyun 		if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
624*4882a593Smuzhiyun 			continue;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 		if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
629*4882a593Smuzhiyun 				 utn->entries[table][j].port) ||
630*4882a593Smuzhiyun 		    nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
631*4882a593Smuzhiyun 				ilog2(utn->entries[table][j].type)))
632*4882a593Smuzhiyun 			goto err_cancel;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 		nla_nest_end(skb, nest);
635*4882a593Smuzhiyun 	}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	return 0;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun err_cancel:
640*4882a593Smuzhiyun 	nla_nest_cancel(skb, nest);
641*4882a593Smuzhiyun 	return -EMSGSIZE;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
645*4882a593Smuzhiyun 	.get_port	= __udp_tunnel_nic_get_port,
646*4882a593Smuzhiyun 	.set_port_priv	= __udp_tunnel_nic_set_port_priv,
647*4882a593Smuzhiyun 	.add_port	= __udp_tunnel_nic_add_port,
648*4882a593Smuzhiyun 	.del_port	= __udp_tunnel_nic_del_port,
649*4882a593Smuzhiyun 	.reset_ntf	= __udp_tunnel_nic_reset_ntf,
650*4882a593Smuzhiyun 	.dump_size	= __udp_tunnel_nic_dump_size,
651*4882a593Smuzhiyun 	.dump_write	= __udp_tunnel_nic_dump_write,
652*4882a593Smuzhiyun };
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun static void
udp_tunnel_nic_flush(struct net_device * dev,struct udp_tunnel_nic * utn)655*4882a593Smuzhiyun udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
658*4882a593Smuzhiyun 	unsigned int i, j;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
661*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++) {
662*4882a593Smuzhiyun 			int adj_cnt = -utn->entries[i][j].use_cnt;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 			if (adj_cnt)
665*4882a593Smuzhiyun 				udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
666*4882a593Smuzhiyun 		}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	__udp_tunnel_nic_device_sync(dev, utn);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
671*4882a593Smuzhiyun 		memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
672*4882a593Smuzhiyun 						      sizeof(**utn->entries)));
673*4882a593Smuzhiyun 	WARN_ON(utn->need_sync);
674*4882a593Smuzhiyun 	utn->need_replay = 0;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun static void
udp_tunnel_nic_replay(struct net_device * dev,struct udp_tunnel_nic * utn)678*4882a593Smuzhiyun udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
681*4882a593Smuzhiyun 	struct udp_tunnel_nic_shared_node *node;
682*4882a593Smuzhiyun 	unsigned int i, j;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	/* Freeze all the ports we are already tracking so that the replay
685*4882a593Smuzhiyun 	 * does not double up the refcount.
686*4882a593Smuzhiyun 	 */
687*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
688*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++)
689*4882a593Smuzhiyun 			udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
690*4882a593Smuzhiyun 	utn->missed = 0;
691*4882a593Smuzhiyun 	utn->need_replay = 0;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (!info->shared) {
694*4882a593Smuzhiyun 		udp_tunnel_get_rx_info(dev);
695*4882a593Smuzhiyun 	} else {
696*4882a593Smuzhiyun 		list_for_each_entry(node, &info->shared->devices, list)
697*4882a593Smuzhiyun 			udp_tunnel_get_rx_info(node->dev);
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
701*4882a593Smuzhiyun 		for (j = 0; j < info->tables[i].n_entries; j++)
702*4882a593Smuzhiyun 			udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
udp_tunnel_nic_device_sync_work(struct work_struct * work)705*4882a593Smuzhiyun static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn =
708*4882a593Smuzhiyun 		container_of(work, struct udp_tunnel_nic, work);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	rtnl_lock();
711*4882a593Smuzhiyun 	utn->work_pending = 0;
712*4882a593Smuzhiyun 	__udp_tunnel_nic_device_sync(utn->dev, utn);
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (utn->need_replay)
715*4882a593Smuzhiyun 		udp_tunnel_nic_replay(utn->dev, utn);
716*4882a593Smuzhiyun 	rtnl_unlock();
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun static struct udp_tunnel_nic *
udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info * info,unsigned int n_tables)720*4882a593Smuzhiyun udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
721*4882a593Smuzhiyun 		     unsigned int n_tables)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
724*4882a593Smuzhiyun 	unsigned int i;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	utn = kzalloc(sizeof(*utn), GFP_KERNEL);
727*4882a593Smuzhiyun 	if (!utn)
728*4882a593Smuzhiyun 		return NULL;
729*4882a593Smuzhiyun 	utn->n_tables = n_tables;
730*4882a593Smuzhiyun 	INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
733*4882a593Smuzhiyun 	if (!utn->entries)
734*4882a593Smuzhiyun 		goto err_free_utn;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	for (i = 0; i < n_tables; i++) {
737*4882a593Smuzhiyun 		utn->entries[i] = kcalloc(info->tables[i].n_entries,
738*4882a593Smuzhiyun 					  sizeof(*utn->entries[i]), GFP_KERNEL);
739*4882a593Smuzhiyun 		if (!utn->entries[i])
740*4882a593Smuzhiyun 			goto err_free_prev_entries;
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	return utn;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun err_free_prev_entries:
746*4882a593Smuzhiyun 	while (i--)
747*4882a593Smuzhiyun 		kfree(utn->entries[i]);
748*4882a593Smuzhiyun 	kfree(utn->entries);
749*4882a593Smuzhiyun err_free_utn:
750*4882a593Smuzhiyun 	kfree(utn);
751*4882a593Smuzhiyun 	return NULL;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
udp_tunnel_nic_free(struct udp_tunnel_nic * utn)754*4882a593Smuzhiyun static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	unsigned int i;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	for (i = 0; i < utn->n_tables; i++)
759*4882a593Smuzhiyun 		kfree(utn->entries[i]);
760*4882a593Smuzhiyun 	kfree(utn->entries);
761*4882a593Smuzhiyun 	kfree(utn);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
udp_tunnel_nic_register(struct net_device * dev)764*4882a593Smuzhiyun static int udp_tunnel_nic_register(struct net_device *dev)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
767*4882a593Smuzhiyun 	struct udp_tunnel_nic_shared_node *node = NULL;
768*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
769*4882a593Smuzhiyun 	unsigned int n_tables, i;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
772*4882a593Smuzhiyun 		     UDP_TUNNEL_NIC_MAX_TABLES);
773*4882a593Smuzhiyun 	/* Expect use count of at most 2 (IPv4, IPv6) per device */
774*4882a593Smuzhiyun 	BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX <
775*4882a593Smuzhiyun 		     UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2);
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	/* Check that the driver info is sane */
778*4882a593Smuzhiyun 	if (WARN_ON(!info->set_port != !info->unset_port) ||
779*4882a593Smuzhiyun 	    WARN_ON(!info->set_port == !info->sync_table) ||
780*4882a593Smuzhiyun 	    WARN_ON(!info->tables[0].n_entries))
781*4882a593Smuzhiyun 		return -EINVAL;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (WARN_ON(info->shared &&
784*4882a593Smuzhiyun 		    info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
785*4882a593Smuzhiyun 		return -EINVAL;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	n_tables = 1;
788*4882a593Smuzhiyun 	for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
789*4882a593Smuzhiyun 		if (!info->tables[i].n_entries)
790*4882a593Smuzhiyun 			continue;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 		n_tables++;
793*4882a593Smuzhiyun 		if (WARN_ON(!info->tables[i - 1].n_entries))
794*4882a593Smuzhiyun 			return -EINVAL;
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	/* Create UDP tunnel state structures */
798*4882a593Smuzhiyun 	if (info->shared) {
799*4882a593Smuzhiyun 		node = kzalloc(sizeof(*node), GFP_KERNEL);
800*4882a593Smuzhiyun 		if (!node)
801*4882a593Smuzhiyun 			return -ENOMEM;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 		node->dev = dev;
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	if (info->shared && info->shared->udp_tunnel_nic_info) {
807*4882a593Smuzhiyun 		utn = info->shared->udp_tunnel_nic_info;
808*4882a593Smuzhiyun 	} else {
809*4882a593Smuzhiyun 		utn = udp_tunnel_nic_alloc(info, n_tables);
810*4882a593Smuzhiyun 		if (!utn) {
811*4882a593Smuzhiyun 			kfree(node);
812*4882a593Smuzhiyun 			return -ENOMEM;
813*4882a593Smuzhiyun 		}
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	if (info->shared) {
817*4882a593Smuzhiyun 		if (!info->shared->udp_tunnel_nic_info) {
818*4882a593Smuzhiyun 			INIT_LIST_HEAD(&info->shared->devices);
819*4882a593Smuzhiyun 			info->shared->udp_tunnel_nic_info = utn;
820*4882a593Smuzhiyun 		}
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 		list_add_tail(&node->list, &info->shared->devices);
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	utn->dev = dev;
826*4882a593Smuzhiyun 	dev_hold(dev);
827*4882a593Smuzhiyun 	dev->udp_tunnel_nic = utn;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
830*4882a593Smuzhiyun 		udp_tunnel_get_rx_info(dev);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	return 0;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun static void
udp_tunnel_nic_unregister(struct net_device * dev,struct udp_tunnel_nic * utn)836*4882a593Smuzhiyun udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	/* For a shared table remove this dev from the list of sharing devices
841*4882a593Smuzhiyun 	 * and if there are other devices just detach.
842*4882a593Smuzhiyun 	 */
843*4882a593Smuzhiyun 	if (info->shared) {
844*4882a593Smuzhiyun 		struct udp_tunnel_nic_shared_node *node, *first;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		list_for_each_entry(node, &info->shared->devices, list)
847*4882a593Smuzhiyun 			if (node->dev == dev)
848*4882a593Smuzhiyun 				break;
849*4882a593Smuzhiyun 		if (list_entry_is_head(node, &info->shared->devices, list))
850*4882a593Smuzhiyun 			return;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 		list_del(&node->list);
853*4882a593Smuzhiyun 		kfree(node);
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 		first = list_first_entry_or_null(&info->shared->devices,
856*4882a593Smuzhiyun 						 typeof(*first), list);
857*4882a593Smuzhiyun 		if (first) {
858*4882a593Smuzhiyun 			udp_tunnel_drop_rx_info(dev);
859*4882a593Smuzhiyun 			utn->dev = first->dev;
860*4882a593Smuzhiyun 			goto release_dev;
861*4882a593Smuzhiyun 		}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 		info->shared->udp_tunnel_nic_info = NULL;
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Flush before we check work, so we don't waste time adding entries
867*4882a593Smuzhiyun 	 * from the work which we will boot immediately.
868*4882a593Smuzhiyun 	 */
869*4882a593Smuzhiyun 	udp_tunnel_nic_flush(dev, utn);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	/* Wait for the work to be done using the state, netdev core will
872*4882a593Smuzhiyun 	 * retry unregister until we give up our reference on this device.
873*4882a593Smuzhiyun 	 */
874*4882a593Smuzhiyun 	if (utn->work_pending)
875*4882a593Smuzhiyun 		return;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	udp_tunnel_nic_free(utn);
878*4882a593Smuzhiyun release_dev:
879*4882a593Smuzhiyun 	dev->udp_tunnel_nic = NULL;
880*4882a593Smuzhiyun 	dev_put(dev);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun static int
udp_tunnel_nic_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)884*4882a593Smuzhiyun udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
885*4882a593Smuzhiyun 			       unsigned long event, void *ptr)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
888*4882a593Smuzhiyun 	const struct udp_tunnel_nic_info *info;
889*4882a593Smuzhiyun 	struct udp_tunnel_nic *utn;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	info = dev->udp_tunnel_nic_info;
892*4882a593Smuzhiyun 	if (!info)
893*4882a593Smuzhiyun 		return NOTIFY_DONE;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	if (event == NETDEV_REGISTER) {
896*4882a593Smuzhiyun 		int err;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 		err = udp_tunnel_nic_register(dev);
899*4882a593Smuzhiyun 		if (err)
900*4882a593Smuzhiyun 			netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
901*4882a593Smuzhiyun 		return notifier_from_errno(err);
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun 	/* All other events will need the udp_tunnel_nic state */
904*4882a593Smuzhiyun 	utn = dev->udp_tunnel_nic;
905*4882a593Smuzhiyun 	if (!utn)
906*4882a593Smuzhiyun 		return NOTIFY_DONE;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	if (event == NETDEV_UNREGISTER) {
909*4882a593Smuzhiyun 		udp_tunnel_nic_unregister(dev, utn);
910*4882a593Smuzhiyun 		return NOTIFY_OK;
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	/* All other events only matter if NIC has to be programmed open */
914*4882a593Smuzhiyun 	if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
915*4882a593Smuzhiyun 		return NOTIFY_DONE;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (event == NETDEV_UP) {
918*4882a593Smuzhiyun 		WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
919*4882a593Smuzhiyun 		udp_tunnel_get_rx_info(dev);
920*4882a593Smuzhiyun 		return NOTIFY_OK;
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 	if (event == NETDEV_GOING_DOWN) {
923*4882a593Smuzhiyun 		udp_tunnel_nic_flush(dev, utn);
924*4882a593Smuzhiyun 		return NOTIFY_OK;
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun 	return NOTIFY_DONE;
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
931*4882a593Smuzhiyun 	.notifier_call = udp_tunnel_nic_netdevice_event,
932*4882a593Smuzhiyun };
933*4882a593Smuzhiyun 
udp_tunnel_nic_init_module(void)934*4882a593Smuzhiyun static int __init udp_tunnel_nic_init_module(void)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	int err;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
939*4882a593Smuzhiyun 	if (!udp_tunnel_nic_workqueue)
940*4882a593Smuzhiyun 		return -ENOMEM;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 	rtnl_lock();
943*4882a593Smuzhiyun 	udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
944*4882a593Smuzhiyun 	rtnl_unlock();
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
947*4882a593Smuzhiyun 	if (err)
948*4882a593Smuzhiyun 		goto err_unset_ops;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	return 0;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun err_unset_ops:
953*4882a593Smuzhiyun 	rtnl_lock();
954*4882a593Smuzhiyun 	udp_tunnel_nic_ops = NULL;
955*4882a593Smuzhiyun 	rtnl_unlock();
956*4882a593Smuzhiyun 	destroy_workqueue(udp_tunnel_nic_workqueue);
957*4882a593Smuzhiyun 	return err;
958*4882a593Smuzhiyun }
959*4882a593Smuzhiyun late_initcall(udp_tunnel_nic_init_module);
960*4882a593Smuzhiyun 
udp_tunnel_nic_cleanup_module(void)961*4882a593Smuzhiyun static void __exit udp_tunnel_nic_cleanup_module(void)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun 	unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	rtnl_lock();
966*4882a593Smuzhiyun 	udp_tunnel_nic_ops = NULL;
967*4882a593Smuzhiyun 	rtnl_unlock();
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	destroy_workqueue(udp_tunnel_nic_workqueue);
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun module_exit(udp_tunnel_nic_cleanup_module);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun MODULE_LICENSE("GPL");
974