1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2007-2012 Nicira, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/if_arp.h>
9*4882a593Smuzhiyun #include <linux/if_bridge.h>
10*4882a593Smuzhiyun #include <linux/if_vlan.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/llc.h>
13*4882a593Smuzhiyun #include <linux/rtnetlink.h>
14*4882a593Smuzhiyun #include <linux/skbuff.h>
15*4882a593Smuzhiyun #include <linux/openvswitch.h>
16*4882a593Smuzhiyun #include <linux/export.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <net/ip_tunnels.h>
19*4882a593Smuzhiyun #include <net/rtnetlink.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include "datapath.h"
22*4882a593Smuzhiyun #include "vport.h"
23*4882a593Smuzhiyun #include "vport-internal_dev.h"
24*4882a593Smuzhiyun #include "vport-netdev.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static struct vport_ops ovs_netdev_vport_ops;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* Must be called with rcu_read_lock. */
netdev_port_receive(struct sk_buff * skb)29*4882a593Smuzhiyun static void netdev_port_receive(struct sk_buff *skb)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct vport *vport;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun vport = ovs_netdev_get_vport(skb->dev);
34*4882a593Smuzhiyun if (unlikely(!vport))
35*4882a593Smuzhiyun goto error;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun if (unlikely(skb_warn_if_lro(skb)))
38*4882a593Smuzhiyun goto error;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* Make our own copy of the packet. Otherwise we will mangle the
41*4882a593Smuzhiyun * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun skb = skb_share_check(skb, GFP_ATOMIC);
44*4882a593Smuzhiyun if (unlikely(!skb))
45*4882a593Smuzhiyun return;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (skb->dev->type == ARPHRD_ETHER) {
48*4882a593Smuzhiyun skb_push(skb, ETH_HLEN);
49*4882a593Smuzhiyun skb_postpush_rcsum(skb, skb->data, ETH_HLEN);
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
52*4882a593Smuzhiyun return;
53*4882a593Smuzhiyun error:
54*4882a593Smuzhiyun kfree_skb(skb);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Called with rcu_read_lock and bottom-halves disabled. */
netdev_frame_hook(struct sk_buff ** pskb)58*4882a593Smuzhiyun static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct sk_buff *skb = *pskb;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
63*4882a593Smuzhiyun return RX_HANDLER_PASS;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun netdev_port_receive(skb);
66*4882a593Smuzhiyun return RX_HANDLER_CONSUMED;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
get_dpdev(const struct datapath * dp)69*4882a593Smuzhiyun static struct net_device *get_dpdev(const struct datapath *dp)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct vport *local;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun local = ovs_vport_ovsl(dp, OVSP_LOCAL);
74*4882a593Smuzhiyun return local->dev;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
ovs_netdev_link(struct vport * vport,const char * name)77*4882a593Smuzhiyun struct vport *ovs_netdev_link(struct vport *vport, const char *name)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun int err;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
82*4882a593Smuzhiyun if (!vport->dev) {
83*4882a593Smuzhiyun err = -ENODEV;
84*4882a593Smuzhiyun goto error_free_vport;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (vport->dev->flags & IFF_LOOPBACK ||
88*4882a593Smuzhiyun (vport->dev->type != ARPHRD_ETHER &&
89*4882a593Smuzhiyun vport->dev->type != ARPHRD_NONE) ||
90*4882a593Smuzhiyun ovs_is_internal_dev(vport->dev)) {
91*4882a593Smuzhiyun err = -EINVAL;
92*4882a593Smuzhiyun goto error_put;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun rtnl_lock();
96*4882a593Smuzhiyun err = netdev_master_upper_dev_link(vport->dev,
97*4882a593Smuzhiyun get_dpdev(vport->dp),
98*4882a593Smuzhiyun NULL, NULL, NULL);
99*4882a593Smuzhiyun if (err)
100*4882a593Smuzhiyun goto error_unlock;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
103*4882a593Smuzhiyun vport);
104*4882a593Smuzhiyun if (err)
105*4882a593Smuzhiyun goto error_master_upper_dev_unlink;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun dev_disable_lro(vport->dev);
108*4882a593Smuzhiyun dev_set_promiscuity(vport->dev, 1);
109*4882a593Smuzhiyun vport->dev->priv_flags |= IFF_OVS_DATAPATH;
110*4882a593Smuzhiyun rtnl_unlock();
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun return vport;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun error_master_upper_dev_unlink:
115*4882a593Smuzhiyun netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
116*4882a593Smuzhiyun error_unlock:
117*4882a593Smuzhiyun rtnl_unlock();
118*4882a593Smuzhiyun error_put:
119*4882a593Smuzhiyun dev_put(vport->dev);
120*4882a593Smuzhiyun error_free_vport:
121*4882a593Smuzhiyun ovs_vport_free(vport);
122*4882a593Smuzhiyun return ERR_PTR(err);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ovs_netdev_link);
125*4882a593Smuzhiyun
netdev_create(const struct vport_parms * parms)126*4882a593Smuzhiyun static struct vport *netdev_create(const struct vport_parms *parms)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct vport *vport;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
131*4882a593Smuzhiyun if (IS_ERR(vport))
132*4882a593Smuzhiyun return vport;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return ovs_netdev_link(vport, parms->name);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
vport_netdev_free(struct rcu_head * rcu)137*4882a593Smuzhiyun static void vport_netdev_free(struct rcu_head *rcu)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct vport *vport = container_of(rcu, struct vport, rcu);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (vport->dev)
142*4882a593Smuzhiyun dev_put(vport->dev);
143*4882a593Smuzhiyun ovs_vport_free(vport);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
ovs_netdev_detach_dev(struct vport * vport)146*4882a593Smuzhiyun void ovs_netdev_detach_dev(struct vport *vport)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun ASSERT_RTNL();
149*4882a593Smuzhiyun vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
150*4882a593Smuzhiyun netdev_rx_handler_unregister(vport->dev);
151*4882a593Smuzhiyun netdev_upper_dev_unlink(vport->dev,
152*4882a593Smuzhiyun netdev_master_upper_dev_get(vport->dev));
153*4882a593Smuzhiyun dev_set_promiscuity(vport->dev, -1);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
netdev_destroy(struct vport * vport)156*4882a593Smuzhiyun static void netdev_destroy(struct vport *vport)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun rtnl_lock();
159*4882a593Smuzhiyun if (netif_is_ovs_port(vport->dev))
160*4882a593Smuzhiyun ovs_netdev_detach_dev(vport);
161*4882a593Smuzhiyun rtnl_unlock();
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun call_rcu(&vport->rcu, vport_netdev_free);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
ovs_netdev_tunnel_destroy(struct vport * vport)166*4882a593Smuzhiyun void ovs_netdev_tunnel_destroy(struct vport *vport)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun rtnl_lock();
169*4882a593Smuzhiyun if (netif_is_ovs_port(vport->dev))
170*4882a593Smuzhiyun ovs_netdev_detach_dev(vport);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* We can be invoked by both explicit vport deletion and
173*4882a593Smuzhiyun * underlying netdev deregistration; delete the link only
174*4882a593Smuzhiyun * if it's not already shutting down.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun if (vport->dev->reg_state == NETREG_REGISTERED)
177*4882a593Smuzhiyun rtnl_delete_link(vport->dev);
178*4882a593Smuzhiyun dev_put(vport->dev);
179*4882a593Smuzhiyun vport->dev = NULL;
180*4882a593Smuzhiyun rtnl_unlock();
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun call_rcu(&vport->rcu, vport_netdev_free);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Returns null if this device is not attached to a datapath. */
ovs_netdev_get_vport(struct net_device * dev)187*4882a593Smuzhiyun struct vport *ovs_netdev_get_vport(struct net_device *dev)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun if (likely(netif_is_ovs_port(dev)))
190*4882a593Smuzhiyun return (struct vport *)
191*4882a593Smuzhiyun rcu_dereference_rtnl(dev->rx_handler_data);
192*4882a593Smuzhiyun else
193*4882a593Smuzhiyun return NULL;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun static struct vport_ops ovs_netdev_vport_ops = {
197*4882a593Smuzhiyun .type = OVS_VPORT_TYPE_NETDEV,
198*4882a593Smuzhiyun .create = netdev_create,
199*4882a593Smuzhiyun .destroy = netdev_destroy,
200*4882a593Smuzhiyun .send = dev_queue_xmit,
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun
ovs_netdev_init(void)203*4882a593Smuzhiyun int __init ovs_netdev_init(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun return ovs_vport_ops_register(&ovs_netdev_vport_ops);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
ovs_netdev_exit(void)208*4882a593Smuzhiyun void ovs_netdev_exit(void)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun ovs_vport_ops_unregister(&ovs_netdev_vport_ops);
211*4882a593Smuzhiyun }
212