xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * RMNET Data virtual network driver
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/etherdevice.h>
8*4882a593Smuzhiyun #include <linux/if_arp.h>
9*4882a593Smuzhiyun #include <net/pkt_sched.h>
10*4882a593Smuzhiyun #include "rmnet_config.h"
11*4882a593Smuzhiyun #include "rmnet_handlers.h"
12*4882a593Smuzhiyun #include "rmnet_private.h"
13*4882a593Smuzhiyun #include "rmnet_map.h"
14*4882a593Smuzhiyun #include "rmnet_vnd.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /* RX/TX Fixup */
17*4882a593Smuzhiyun 
rmnet_vnd_rx_fixup(struct sk_buff * skb,struct net_device * dev)18*4882a593Smuzhiyun void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
21*4882a593Smuzhiyun 	struct rmnet_pcpu_stats *pcpu_ptr;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	u64_stats_update_begin(&pcpu_ptr->syncp);
26*4882a593Smuzhiyun 	pcpu_ptr->stats.rx_pkts++;
27*4882a593Smuzhiyun 	pcpu_ptr->stats.rx_bytes += skb->len;
28*4882a593Smuzhiyun 	u64_stats_update_end(&pcpu_ptr->syncp);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
rmnet_vnd_tx_fixup(struct sk_buff * skb,struct net_device * dev)31*4882a593Smuzhiyun void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
34*4882a593Smuzhiyun 	struct rmnet_pcpu_stats *pcpu_ptr;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	u64_stats_update_begin(&pcpu_ptr->syncp);
39*4882a593Smuzhiyun 	pcpu_ptr->stats.tx_pkts++;
40*4882a593Smuzhiyun 	pcpu_ptr->stats.tx_bytes += skb->len;
41*4882a593Smuzhiyun 	u64_stats_update_end(&pcpu_ptr->syncp);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* Network Device Operations */
45*4882a593Smuzhiyun 
rmnet_vnd_start_xmit(struct sk_buff * skb,struct net_device * dev)46*4882a593Smuzhiyun static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
47*4882a593Smuzhiyun 					struct net_device *dev)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct rmnet_priv *priv;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	priv = netdev_priv(dev);
52*4882a593Smuzhiyun 	if (priv->real_dev) {
53*4882a593Smuzhiyun 		rmnet_egress_handler(skb);
54*4882a593Smuzhiyun 	} else {
55*4882a593Smuzhiyun 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
56*4882a593Smuzhiyun 		kfree_skb(skb);
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 	return NETDEV_TX_OK;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
rmnet_vnd_headroom(struct rmnet_port * port)61*4882a593Smuzhiyun static int rmnet_vnd_headroom(struct rmnet_port *port)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	u32 headroom;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	headroom = sizeof(struct rmnet_map_header);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
68*4882a593Smuzhiyun 		headroom += sizeof(struct rmnet_map_ul_csum_header);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	return headroom;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
rmnet_vnd_change_mtu(struct net_device * rmnet_dev,int new_mtu)73*4882a593Smuzhiyun static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
76*4882a593Smuzhiyun 	struct rmnet_port *port;
77*4882a593Smuzhiyun 	u32 headroom;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	port = rmnet_get_port_rtnl(priv->real_dev);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	headroom = rmnet_vnd_headroom(port);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
84*4882a593Smuzhiyun 	    new_mtu > (priv->real_dev->mtu - headroom))
85*4882a593Smuzhiyun 		return -EINVAL;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	rmnet_dev->mtu = new_mtu;
88*4882a593Smuzhiyun 	return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
rmnet_vnd_get_iflink(const struct net_device * dev)91*4882a593Smuzhiyun static int rmnet_vnd_get_iflink(const struct net_device *dev)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	return priv->real_dev->ifindex;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
rmnet_vnd_init(struct net_device * dev)98*4882a593Smuzhiyun static int rmnet_vnd_init(struct net_device *dev)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
101*4882a593Smuzhiyun 	int err;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
104*4882a593Smuzhiyun 	if (!priv->pcpu_stats)
105*4882a593Smuzhiyun 		return -ENOMEM;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	err = gro_cells_init(&priv->gro_cells, dev);
108*4882a593Smuzhiyun 	if (err) {
109*4882a593Smuzhiyun 		free_percpu(priv->pcpu_stats);
110*4882a593Smuzhiyun 		return err;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	return 0;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
rmnet_vnd_uninit(struct net_device * dev)116*4882a593Smuzhiyun static void rmnet_vnd_uninit(struct net_device *dev)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	gro_cells_destroy(&priv->gro_cells);
121*4882a593Smuzhiyun 	free_percpu(priv->pcpu_stats);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
rmnet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)124*4882a593Smuzhiyun static void rmnet_get_stats64(struct net_device *dev,
125*4882a593Smuzhiyun 			      struct rtnl_link_stats64 *s)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
128*4882a593Smuzhiyun 	struct rmnet_vnd_stats total_stats = { };
129*4882a593Smuzhiyun 	struct rmnet_pcpu_stats *pcpu_ptr;
130*4882a593Smuzhiyun 	struct rmnet_vnd_stats snapshot;
131*4882a593Smuzhiyun 	unsigned int cpu, start;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
134*4882a593Smuzhiyun 		pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 		do {
137*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
138*4882a593Smuzhiyun 			snapshot = pcpu_ptr->stats;	/* struct assignment */
139*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		total_stats.rx_pkts += snapshot.rx_pkts;
142*4882a593Smuzhiyun 		total_stats.rx_bytes += snapshot.rx_bytes;
143*4882a593Smuzhiyun 		total_stats.tx_pkts += snapshot.tx_pkts;
144*4882a593Smuzhiyun 		total_stats.tx_bytes += snapshot.tx_bytes;
145*4882a593Smuzhiyun 		total_stats.tx_drops += snapshot.tx_drops;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	s->rx_packets = total_stats.rx_pkts;
149*4882a593Smuzhiyun 	s->rx_bytes = total_stats.rx_bytes;
150*4882a593Smuzhiyun 	s->tx_packets = total_stats.tx_pkts;
151*4882a593Smuzhiyun 	s->tx_bytes = total_stats.tx_bytes;
152*4882a593Smuzhiyun 	s->tx_dropped = total_stats.tx_drops;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun static const struct net_device_ops rmnet_vnd_ops = {
156*4882a593Smuzhiyun 	.ndo_start_xmit = rmnet_vnd_start_xmit,
157*4882a593Smuzhiyun 	.ndo_change_mtu = rmnet_vnd_change_mtu,
158*4882a593Smuzhiyun 	.ndo_get_iflink = rmnet_vnd_get_iflink,
159*4882a593Smuzhiyun 	.ndo_add_slave  = rmnet_add_bridge,
160*4882a593Smuzhiyun 	.ndo_del_slave  = rmnet_del_bridge,
161*4882a593Smuzhiyun 	.ndo_init       = rmnet_vnd_init,
162*4882a593Smuzhiyun 	.ndo_uninit     = rmnet_vnd_uninit,
163*4882a593Smuzhiyun 	.ndo_get_stats64 = rmnet_get_stats64,
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
167*4882a593Smuzhiyun 	"Checksum ok",
168*4882a593Smuzhiyun 	"Checksum valid bit not set",
169*4882a593Smuzhiyun 	"Checksum validation failed",
170*4882a593Smuzhiyun 	"Checksum error bad buffer",
171*4882a593Smuzhiyun 	"Checksum error bad ip version",
172*4882a593Smuzhiyun 	"Checksum error bad transport",
173*4882a593Smuzhiyun 	"Checksum skipped on ip fragment",
174*4882a593Smuzhiyun 	"Checksum skipped",
175*4882a593Smuzhiyun 	"Checksum computed in software",
176*4882a593Smuzhiyun };
177*4882a593Smuzhiyun 
rmnet_get_strings(struct net_device * dev,u32 stringset,u8 * buf)178*4882a593Smuzhiyun static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	switch (stringset) {
181*4882a593Smuzhiyun 	case ETH_SS_STATS:
182*4882a593Smuzhiyun 		memcpy(buf, &rmnet_gstrings_stats,
183*4882a593Smuzhiyun 		       sizeof(rmnet_gstrings_stats));
184*4882a593Smuzhiyun 		break;
185*4882a593Smuzhiyun 	}
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
rmnet_get_sset_count(struct net_device * dev,int sset)188*4882a593Smuzhiyun static int rmnet_get_sset_count(struct net_device *dev, int sset)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	switch (sset) {
191*4882a593Smuzhiyun 	case ETH_SS_STATS:
192*4882a593Smuzhiyun 		return ARRAY_SIZE(rmnet_gstrings_stats);
193*4882a593Smuzhiyun 	default:
194*4882a593Smuzhiyun 		return -EOPNOTSUPP;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
rmnet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)198*4882a593Smuzhiyun static void rmnet_get_ethtool_stats(struct net_device *dev,
199*4882a593Smuzhiyun 				    struct ethtool_stats *stats, u64 *data)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(dev);
202*4882a593Smuzhiyun 	struct rmnet_priv_stats *st = &priv->stats;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (!data)
205*4882a593Smuzhiyun 		return;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun static const struct ethtool_ops rmnet_ethtool_ops = {
211*4882a593Smuzhiyun 	.get_ethtool_stats = rmnet_get_ethtool_stats,
212*4882a593Smuzhiyun 	.get_strings = rmnet_get_strings,
213*4882a593Smuzhiyun 	.get_sset_count = rmnet_get_sset_count,
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
217*4882a593Smuzhiyun  * flags, ARP type, needed headroom, etc...
218*4882a593Smuzhiyun  */
rmnet_vnd_setup(struct net_device * rmnet_dev)219*4882a593Smuzhiyun void rmnet_vnd_setup(struct net_device *rmnet_dev)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
222*4882a593Smuzhiyun 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
223*4882a593Smuzhiyun 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
224*4882a593Smuzhiyun 	eth_random_addr(rmnet_dev->dev_addr);
225*4882a593Smuzhiyun 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/* Raw IP mode */
228*4882a593Smuzhiyun 	rmnet_dev->header_ops = NULL;  /* No header */
229*4882a593Smuzhiyun 	rmnet_dev->type = ARPHRD_RAWIP;
230*4882a593Smuzhiyun 	rmnet_dev->hard_header_len = 0;
231*4882a593Smuzhiyun 	rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	rmnet_dev->needs_free_netdev = true;
234*4882a593Smuzhiyun 	rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	rmnet_dev->features |= NETIF_F_LLTX;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* This perm addr will be used as interface identifier by IPv6 */
239*4882a593Smuzhiyun 	rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
240*4882a593Smuzhiyun 	eth_random_addr(rmnet_dev->perm_addr);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* Exposed API */
244*4882a593Smuzhiyun 
rmnet_vnd_newlink(u8 id,struct net_device * rmnet_dev,struct rmnet_port * port,struct net_device * real_dev,struct rmnet_endpoint * ep,struct netlink_ext_ack * extack)245*4882a593Smuzhiyun int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
246*4882a593Smuzhiyun 		      struct rmnet_port *port,
247*4882a593Smuzhiyun 		      struct net_device *real_dev,
248*4882a593Smuzhiyun 		      struct rmnet_endpoint *ep,
249*4882a593Smuzhiyun 		      struct netlink_ext_ack *extack)
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct rmnet_priv *priv = netdev_priv(rmnet_dev);
253*4882a593Smuzhiyun 	u32 headroom;
254*4882a593Smuzhiyun 	int rc;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (rmnet_get_endpoint(port, id)) {
257*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
258*4882a593Smuzhiyun 		return -EBUSY;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	rmnet_dev->hw_features = NETIF_F_RXCSUM;
262*4882a593Smuzhiyun 	rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
263*4882a593Smuzhiyun 	rmnet_dev->hw_features |= NETIF_F_SG;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	priv->real_dev = real_dev;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	headroom = rmnet_vnd_headroom(port);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
270*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
271*4882a593Smuzhiyun 		return -EINVAL;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	rc = register_netdevice(rmnet_dev);
275*4882a593Smuzhiyun 	if (!rc) {
276*4882a593Smuzhiyun 		ep->egress_dev = rmnet_dev;
277*4882a593Smuzhiyun 		ep->mux_id = id;
278*4882a593Smuzhiyun 		port->nr_rmnet_devs++;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 		rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 		priv->mux_id = id;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	return rc;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
rmnet_vnd_dellink(u8 id,struct rmnet_port * port,struct rmnet_endpoint * ep)290*4882a593Smuzhiyun int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
291*4882a593Smuzhiyun 		      struct rmnet_endpoint *ep)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
294*4882a593Smuzhiyun 		return -EINVAL;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	ep->egress_dev = NULL;
297*4882a593Smuzhiyun 	port->nr_rmnet_devs--;
298*4882a593Smuzhiyun 	return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
rmnet_vnd_do_flow_control(struct net_device * rmnet_dev,int enable)301*4882a593Smuzhiyun int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
304*4882a593Smuzhiyun 	/* Although we expect similar number of enable/disable
305*4882a593Smuzhiyun 	 * commands, optimize for the disable. That is more
306*4882a593Smuzhiyun 	 * latency sensitive than enable
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 	if (unlikely(enable))
309*4882a593Smuzhiyun 		netif_wake_queue(rmnet_dev);
310*4882a593Smuzhiyun 	else
311*4882a593Smuzhiyun 		netif_stop_queue(rmnet_dev);
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	return 0;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
rmnet_vnd_validate_real_dev_mtu(struct net_device * real_dev)316*4882a593Smuzhiyun int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	struct hlist_node *tmp_ep;
319*4882a593Smuzhiyun 	struct rmnet_endpoint *ep;
320*4882a593Smuzhiyun 	struct rmnet_port *port;
321*4882a593Smuzhiyun 	unsigned long bkt_ep;
322*4882a593Smuzhiyun 	u32 headroom;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	port = rmnet_get_port_rtnl(real_dev);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	headroom = rmnet_vnd_headroom(port);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
329*4882a593Smuzhiyun 		if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
330*4882a593Smuzhiyun 			return -1;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
rmnet_vnd_update_dev_mtu(struct rmnet_port * port,struct net_device * real_dev)336*4882a593Smuzhiyun int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
337*4882a593Smuzhiyun 			     struct net_device *real_dev)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	struct hlist_node *tmp_ep;
340*4882a593Smuzhiyun 	struct rmnet_endpoint *ep;
341*4882a593Smuzhiyun 	unsigned long bkt_ep;
342*4882a593Smuzhiyun 	u32 headroom;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	headroom = rmnet_vnd_headroom(port);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
347*4882a593Smuzhiyun 		if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
348*4882a593Smuzhiyun 			continue;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 		if (rmnet_vnd_change_mtu(ep->egress_dev,
351*4882a593Smuzhiyun 					 real_dev->mtu - headroom))
352*4882a593Smuzhiyun 			return -1;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	return 0;
356*4882a593Smuzhiyun }
357