1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * vxcan.c - Virtual CAN Tunnel for cross namespace communication
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This code is derived from drivers/net/can/vcan.c for the virtual CAN
6*4882a593Smuzhiyun * specific parts and from drivers/net/veth.c to implement the netlink API
7*4882a593Smuzhiyun * for network interface pairs in a common and established way.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/netdevice.h>
15*4882a593Smuzhiyun #include <linux/if_arp.h>
16*4882a593Smuzhiyun #include <linux/if_ether.h>
17*4882a593Smuzhiyun #include <linux/can.h>
18*4882a593Smuzhiyun #include <linux/can/dev.h>
19*4882a593Smuzhiyun #include <linux/can/skb.h>
20*4882a593Smuzhiyun #include <linux/can/vxcan.h>
21*4882a593Smuzhiyun #include <linux/can/can-ml.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <net/rtnetlink.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define DRV_NAME "vxcan"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun MODULE_DESCRIPTION("Virtual CAN Tunnel");
28*4882a593Smuzhiyun MODULE_LICENSE("GPL");
29*4882a593Smuzhiyun MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
30*4882a593Smuzhiyun MODULE_ALIAS_RTNL_LINK(DRV_NAME);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct vxcan_priv {
33*4882a593Smuzhiyun struct net_device __rcu *peer;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun
vxcan_xmit(struct sk_buff * skb,struct net_device * dev)36*4882a593Smuzhiyun static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct vxcan_priv *priv = netdev_priv(dev);
39*4882a593Smuzhiyun struct net_device *peer;
40*4882a593Smuzhiyun struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
41*4882a593Smuzhiyun struct net_device_stats *peerstats, *srcstats = &dev->stats;
42*4882a593Smuzhiyun u8 len;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun if (can_dropped_invalid_skb(dev, skb))
45*4882a593Smuzhiyun return NETDEV_TX_OK;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun rcu_read_lock();
48*4882a593Smuzhiyun peer = rcu_dereference(priv->peer);
49*4882a593Smuzhiyun if (unlikely(!peer)) {
50*4882a593Smuzhiyun kfree_skb(skb);
51*4882a593Smuzhiyun dev->stats.tx_dropped++;
52*4882a593Smuzhiyun goto out_unlock;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun skb = can_create_echo_skb(skb);
56*4882a593Smuzhiyun if (!skb)
57*4882a593Smuzhiyun goto out_unlock;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* reset CAN GW hop counter */
60*4882a593Smuzhiyun skb->csum_start = 0;
61*4882a593Smuzhiyun skb->pkt_type = PACKET_BROADCAST;
62*4882a593Smuzhiyun skb->dev = peer;
63*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun len = cfd->len;
66*4882a593Smuzhiyun if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
67*4882a593Smuzhiyun srcstats->tx_packets++;
68*4882a593Smuzhiyun srcstats->tx_bytes += len;
69*4882a593Smuzhiyun peerstats = &peer->stats;
70*4882a593Smuzhiyun peerstats->rx_packets++;
71*4882a593Smuzhiyun peerstats->rx_bytes += len;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun out_unlock:
75*4882a593Smuzhiyun rcu_read_unlock();
76*4882a593Smuzhiyun return NETDEV_TX_OK;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun
vxcan_open(struct net_device * dev)80*4882a593Smuzhiyun static int vxcan_open(struct net_device *dev)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun struct vxcan_priv *priv = netdev_priv(dev);
83*4882a593Smuzhiyun struct net_device *peer = rtnl_dereference(priv->peer);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (!peer)
86*4882a593Smuzhiyun return -ENOTCONN;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun if (peer->flags & IFF_UP) {
89*4882a593Smuzhiyun netif_carrier_on(dev);
90*4882a593Smuzhiyun netif_carrier_on(peer);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun return 0;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
vxcan_close(struct net_device * dev)95*4882a593Smuzhiyun static int vxcan_close(struct net_device *dev)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun struct vxcan_priv *priv = netdev_priv(dev);
98*4882a593Smuzhiyun struct net_device *peer = rtnl_dereference(priv->peer);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun netif_carrier_off(dev);
101*4882a593Smuzhiyun if (peer)
102*4882a593Smuzhiyun netif_carrier_off(peer);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun return 0;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
vxcan_get_iflink(const struct net_device * dev)107*4882a593Smuzhiyun static int vxcan_get_iflink(const struct net_device *dev)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun struct vxcan_priv *priv = netdev_priv(dev);
110*4882a593Smuzhiyun struct net_device *peer;
111*4882a593Smuzhiyun int iflink;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun rcu_read_lock();
114*4882a593Smuzhiyun peer = rcu_dereference(priv->peer);
115*4882a593Smuzhiyun iflink = peer ? peer->ifindex : 0;
116*4882a593Smuzhiyun rcu_read_unlock();
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun return iflink;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
vxcan_change_mtu(struct net_device * dev,int new_mtu)121*4882a593Smuzhiyun static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun /* Do not allow changing the MTU while running */
124*4882a593Smuzhiyun if (dev->flags & IFF_UP)
125*4882a593Smuzhiyun return -EBUSY;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
128*4882a593Smuzhiyun return -EINVAL;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun dev->mtu = new_mtu;
131*4882a593Smuzhiyun return 0;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun static const struct net_device_ops vxcan_netdev_ops = {
135*4882a593Smuzhiyun .ndo_open = vxcan_open,
136*4882a593Smuzhiyun .ndo_stop = vxcan_close,
137*4882a593Smuzhiyun .ndo_start_xmit = vxcan_xmit,
138*4882a593Smuzhiyun .ndo_get_iflink = vxcan_get_iflink,
139*4882a593Smuzhiyun .ndo_change_mtu = vxcan_change_mtu,
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
vxcan_setup(struct net_device * dev)142*4882a593Smuzhiyun static void vxcan_setup(struct net_device *dev)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct can_ml_priv *can_ml;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun dev->type = ARPHRD_CAN;
147*4882a593Smuzhiyun dev->mtu = CANFD_MTU;
148*4882a593Smuzhiyun dev->hard_header_len = 0;
149*4882a593Smuzhiyun dev->addr_len = 0;
150*4882a593Smuzhiyun dev->tx_queue_len = 0;
151*4882a593Smuzhiyun dev->flags = IFF_NOARP;
152*4882a593Smuzhiyun dev->netdev_ops = &vxcan_netdev_ops;
153*4882a593Smuzhiyun dev->needs_free_netdev = true;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
156*4882a593Smuzhiyun can_set_ml_priv(dev, can_ml);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* forward declaration for rtnl_create_link() */
160*4882a593Smuzhiyun static struct rtnl_link_ops vxcan_link_ops;
161*4882a593Smuzhiyun
vxcan_newlink(struct net * net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)162*4882a593Smuzhiyun static int vxcan_newlink(struct net *net, struct net_device *dev,
163*4882a593Smuzhiyun struct nlattr *tb[], struct nlattr *data[],
164*4882a593Smuzhiyun struct netlink_ext_ack *extack)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct vxcan_priv *priv;
167*4882a593Smuzhiyun struct net_device *peer;
168*4882a593Smuzhiyun struct net *peer_net;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
171*4882a593Smuzhiyun char ifname[IFNAMSIZ];
172*4882a593Smuzhiyun unsigned char name_assign_type;
173*4882a593Smuzhiyun struct ifinfomsg *ifmp = NULL;
174*4882a593Smuzhiyun int err;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* register peer device */
177*4882a593Smuzhiyun if (data && data[VXCAN_INFO_PEER]) {
178*4882a593Smuzhiyun struct nlattr *nla_peer;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun nla_peer = data[VXCAN_INFO_PEER];
181*4882a593Smuzhiyun ifmp = nla_data(nla_peer);
182*4882a593Smuzhiyun err = rtnl_nla_parse_ifla(peer_tb,
183*4882a593Smuzhiyun nla_data(nla_peer) +
184*4882a593Smuzhiyun sizeof(struct ifinfomsg),
185*4882a593Smuzhiyun nla_len(nla_peer) -
186*4882a593Smuzhiyun sizeof(struct ifinfomsg),
187*4882a593Smuzhiyun NULL);
188*4882a593Smuzhiyun if (err < 0)
189*4882a593Smuzhiyun return err;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun tbp = peer_tb;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (ifmp && tbp[IFLA_IFNAME]) {
195*4882a593Smuzhiyun nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
196*4882a593Smuzhiyun name_assign_type = NET_NAME_USER;
197*4882a593Smuzhiyun } else {
198*4882a593Smuzhiyun snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
199*4882a593Smuzhiyun name_assign_type = NET_NAME_ENUM;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun peer_net = rtnl_link_get_net(net, tbp);
203*4882a593Smuzhiyun if (IS_ERR(peer_net))
204*4882a593Smuzhiyun return PTR_ERR(peer_net);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun peer = rtnl_create_link(peer_net, ifname, name_assign_type,
207*4882a593Smuzhiyun &vxcan_link_ops, tbp, extack);
208*4882a593Smuzhiyun if (IS_ERR(peer)) {
209*4882a593Smuzhiyun put_net(peer_net);
210*4882a593Smuzhiyun return PTR_ERR(peer);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (ifmp && dev->ifindex)
214*4882a593Smuzhiyun peer->ifindex = ifmp->ifi_index;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun err = register_netdevice(peer);
217*4882a593Smuzhiyun put_net(peer_net);
218*4882a593Smuzhiyun peer_net = NULL;
219*4882a593Smuzhiyun if (err < 0) {
220*4882a593Smuzhiyun free_netdev(peer);
221*4882a593Smuzhiyun return err;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun netif_carrier_off(peer);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun err = rtnl_configure_link(peer, ifmp);
227*4882a593Smuzhiyun if (err < 0)
228*4882a593Smuzhiyun goto unregister_network_device;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /* register first device */
231*4882a593Smuzhiyun if (tb[IFLA_IFNAME])
232*4882a593Smuzhiyun nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun err = register_netdevice(dev);
237*4882a593Smuzhiyun if (err < 0)
238*4882a593Smuzhiyun goto unregister_network_device;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun netif_carrier_off(dev);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* cross link the device pair */
243*4882a593Smuzhiyun priv = netdev_priv(dev);
244*4882a593Smuzhiyun rcu_assign_pointer(priv->peer, peer);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun priv = netdev_priv(peer);
247*4882a593Smuzhiyun rcu_assign_pointer(priv->peer, dev);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun return 0;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun unregister_network_device:
252*4882a593Smuzhiyun unregister_netdevice(peer);
253*4882a593Smuzhiyun return err;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
vxcan_dellink(struct net_device * dev,struct list_head * head)256*4882a593Smuzhiyun static void vxcan_dellink(struct net_device *dev, struct list_head *head)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct vxcan_priv *priv;
259*4882a593Smuzhiyun struct net_device *peer;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun priv = netdev_priv(dev);
262*4882a593Smuzhiyun peer = rtnl_dereference(priv->peer);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Note : dellink() is called from default_device_exit_batch(),
265*4882a593Smuzhiyun * before a rcu_synchronize() point. The devices are guaranteed
266*4882a593Smuzhiyun * not being freed before one RCU grace period.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun RCU_INIT_POINTER(priv->peer, NULL);
269*4882a593Smuzhiyun unregister_netdevice_queue(dev, head);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (peer) {
272*4882a593Smuzhiyun priv = netdev_priv(peer);
273*4882a593Smuzhiyun RCU_INIT_POINTER(priv->peer, NULL);
274*4882a593Smuzhiyun unregister_netdevice_queue(peer, head);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
279*4882a593Smuzhiyun [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun
vxcan_get_link_net(const struct net_device * dev)282*4882a593Smuzhiyun static struct net *vxcan_get_link_net(const struct net_device *dev)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct vxcan_priv *priv = netdev_priv(dev);
285*4882a593Smuzhiyun struct net_device *peer = rtnl_dereference(priv->peer);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun return peer ? dev_net(peer) : dev_net(dev);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun static struct rtnl_link_ops vxcan_link_ops = {
291*4882a593Smuzhiyun .kind = DRV_NAME,
292*4882a593Smuzhiyun .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
293*4882a593Smuzhiyun .setup = vxcan_setup,
294*4882a593Smuzhiyun .newlink = vxcan_newlink,
295*4882a593Smuzhiyun .dellink = vxcan_dellink,
296*4882a593Smuzhiyun .policy = vxcan_policy,
297*4882a593Smuzhiyun .maxtype = VXCAN_INFO_MAX,
298*4882a593Smuzhiyun .get_link_net = vxcan_get_link_net,
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun
vxcan_init(void)301*4882a593Smuzhiyun static __init int vxcan_init(void)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun pr_info("vxcan: Virtual CAN Tunnel driver\n");
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun return rtnl_link_register(&vxcan_link_ops);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
vxcan_exit(void)308*4882a593Smuzhiyun static __exit void vxcan_exit(void)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun rtnl_link_unregister(&vxcan_link_ops);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun module_init(vxcan_init);
314*4882a593Smuzhiyun module_exit(vxcan_exit);
315