xref: /OK3568_Linux_fs/kernel/net/can/raw.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2*4882a593Smuzhiyun /* raw.c - Raw sockets for protocol family CAN
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5*4882a593Smuzhiyun  * All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
8*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
9*4882a593Smuzhiyun  * are met:
10*4882a593Smuzhiyun  * 1. Redistributions of source code must retain the above copyright
11*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
12*4882a593Smuzhiyun  * 2. Redistributions in binary form must reproduce the above copyright
13*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in the
14*4882a593Smuzhiyun  *    documentation and/or other materials provided with the distribution.
15*4882a593Smuzhiyun  * 3. Neither the name of Volkswagen nor the names of its contributors
16*4882a593Smuzhiyun  *    may be used to endorse or promote products derived from this software
17*4882a593Smuzhiyun  *    without specific prior written permission.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Alternatively, provided that this notice is retained in full, this
20*4882a593Smuzhiyun  * software may be distributed under the terms of the GNU General
21*4882a593Smuzhiyun  * Public License ("GPL") version 2, in which case the provisions of the
22*4882a593Smuzhiyun  * GPL apply INSTEAD OF those given above.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * The provided data structures and external interfaces from this code
25*4882a593Smuzhiyun  * are not restricted to be used by modules with a GPL compatible license.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38*4882a593Smuzhiyun  * DAMAGE.
39*4882a593Smuzhiyun  *
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <linux/module.h>
43*4882a593Smuzhiyun #include <linux/init.h>
44*4882a593Smuzhiyun #include <linux/uio.h>
45*4882a593Smuzhiyun #include <linux/net.h>
46*4882a593Smuzhiyun #include <linux/slab.h>
47*4882a593Smuzhiyun #include <linux/netdevice.h>
48*4882a593Smuzhiyun #include <linux/socket.h>
49*4882a593Smuzhiyun #include <linux/if_arp.h>
50*4882a593Smuzhiyun #include <linux/skbuff.h>
51*4882a593Smuzhiyun #include <linux/can.h>
52*4882a593Smuzhiyun #include <linux/can/core.h>
53*4882a593Smuzhiyun #include <linux/can/skb.h>
54*4882a593Smuzhiyun #include <linux/can/raw.h>
55*4882a593Smuzhiyun #include <net/sock.h>
56*4882a593Smuzhiyun #include <net/net_namespace.h>
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun MODULE_DESCRIPTION("PF_CAN raw protocol");
59*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
60*4882a593Smuzhiyun MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
61*4882a593Smuzhiyun MODULE_ALIAS("can-proto-1");
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define RAW_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_ifindex)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #define MASK_ALL 0
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* A raw socket has a list of can_filters attached to it, each receiving
68*4882a593Smuzhiyun  * the CAN frames matching that filter.  If the filter list is empty,
69*4882a593Smuzhiyun  * no CAN frames will be received by the socket.  The default after
70*4882a593Smuzhiyun  * opening the socket, is to have one filter which receives all frames.
71*4882a593Smuzhiyun  * The filter list is allocated dynamically with the exception of the
72*4882a593Smuzhiyun  * list containing only one item.  This common case is optimized by
73*4882a593Smuzhiyun  * storing the single filter in dfilter, to avoid using dynamic memory.
74*4882a593Smuzhiyun  */
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct uniqframe {
77*4882a593Smuzhiyun 	int skbcnt;
78*4882a593Smuzhiyun 	const struct sk_buff *skb;
79*4882a593Smuzhiyun 	unsigned int join_rx_count;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun struct raw_sock {
83*4882a593Smuzhiyun 	struct sock sk;
84*4882a593Smuzhiyun 	int bound;
85*4882a593Smuzhiyun 	int ifindex;
86*4882a593Smuzhiyun 	struct list_head notifier;
87*4882a593Smuzhiyun 	int loopback;
88*4882a593Smuzhiyun 	int recv_own_msgs;
89*4882a593Smuzhiyun 	int fd_frames;
90*4882a593Smuzhiyun 	int join_filters;
91*4882a593Smuzhiyun 	int count;                 /* number of active filters */
92*4882a593Smuzhiyun 	struct can_filter dfilter; /* default/single filter */
93*4882a593Smuzhiyun 	struct can_filter *filter; /* pointer to filter(s) */
94*4882a593Smuzhiyun 	can_err_mask_t err_mask;
95*4882a593Smuzhiyun 	struct uniqframe __percpu *uniq;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun static LIST_HEAD(raw_notifier_list);
99*4882a593Smuzhiyun static DEFINE_SPINLOCK(raw_notifier_lock);
100*4882a593Smuzhiyun static struct raw_sock *raw_busy_notifier;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /* Return pointer to store the extra msg flags for raw_recvmsg().
103*4882a593Smuzhiyun  * We use the space of one unsigned int beyond the 'struct sockaddr_can'
104*4882a593Smuzhiyun  * in skb->cb.
105*4882a593Smuzhiyun  */
raw_flags(struct sk_buff * skb)106*4882a593Smuzhiyun static inline unsigned int *raw_flags(struct sk_buff *skb)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	sock_skb_cb_check_size(sizeof(struct sockaddr_can) +
109*4882a593Smuzhiyun 			       sizeof(unsigned int));
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* return pointer after struct sockaddr_can */
112*4882a593Smuzhiyun 	return (unsigned int *)(&((struct sockaddr_can *)skb->cb)[1]);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
raw_sk(const struct sock * sk)115*4882a593Smuzhiyun static inline struct raw_sock *raw_sk(const struct sock *sk)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	return (struct raw_sock *)sk;
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
raw_rcv(struct sk_buff * oskb,void * data)120*4882a593Smuzhiyun static void raw_rcv(struct sk_buff *oskb, void *data)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	struct sock *sk = (struct sock *)data;
123*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
124*4882a593Smuzhiyun 	struct sockaddr_can *addr;
125*4882a593Smuzhiyun 	struct sk_buff *skb;
126*4882a593Smuzhiyun 	unsigned int *pflags;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* check the received tx sock reference */
129*4882a593Smuzhiyun 	if (!ro->recv_own_msgs && oskb->sk == sk)
130*4882a593Smuzhiyun 		return;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/* do not pass non-CAN2.0 frames to a legacy socket */
133*4882a593Smuzhiyun 	if (!ro->fd_frames && oskb->len != CAN_MTU)
134*4882a593Smuzhiyun 		return;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	/* eliminate multiple filter matches for the same skb */
137*4882a593Smuzhiyun 	if (this_cpu_ptr(ro->uniq)->skb == oskb &&
138*4882a593Smuzhiyun 	    this_cpu_ptr(ro->uniq)->skbcnt == can_skb_prv(oskb)->skbcnt) {
139*4882a593Smuzhiyun 		if (ro->join_filters) {
140*4882a593Smuzhiyun 			this_cpu_inc(ro->uniq->join_rx_count);
141*4882a593Smuzhiyun 			/* drop frame until all enabled filters matched */
142*4882a593Smuzhiyun 			if (this_cpu_ptr(ro->uniq)->join_rx_count < ro->count)
143*4882a593Smuzhiyun 				return;
144*4882a593Smuzhiyun 		} else {
145*4882a593Smuzhiyun 			return;
146*4882a593Smuzhiyun 		}
147*4882a593Smuzhiyun 	} else {
148*4882a593Smuzhiyun 		this_cpu_ptr(ro->uniq)->skb = oskb;
149*4882a593Smuzhiyun 		this_cpu_ptr(ro->uniq)->skbcnt = can_skb_prv(oskb)->skbcnt;
150*4882a593Smuzhiyun 		this_cpu_ptr(ro->uniq)->join_rx_count = 1;
151*4882a593Smuzhiyun 		/* drop first frame to check all enabled filters? */
152*4882a593Smuzhiyun 		if (ro->join_filters && ro->count > 1)
153*4882a593Smuzhiyun 			return;
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	/* clone the given skb to be able to enqueue it into the rcv queue */
157*4882a593Smuzhiyun 	skb = skb_clone(oskb, GFP_ATOMIC);
158*4882a593Smuzhiyun 	if (!skb)
159*4882a593Smuzhiyun 		return;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* Put the datagram to the queue so that raw_recvmsg() can get
162*4882a593Smuzhiyun 	 * it from there. We need to pass the interface index to
163*4882a593Smuzhiyun 	 * raw_recvmsg(). We pass a whole struct sockaddr_can in
164*4882a593Smuzhiyun 	 * skb->cb containing the interface index.
165*4882a593Smuzhiyun 	 */
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	sock_skb_cb_check_size(sizeof(struct sockaddr_can));
168*4882a593Smuzhiyun 	addr = (struct sockaddr_can *)skb->cb;
169*4882a593Smuzhiyun 	memset(addr, 0, sizeof(*addr));
170*4882a593Smuzhiyun 	addr->can_family = AF_CAN;
171*4882a593Smuzhiyun 	addr->can_ifindex = skb->dev->ifindex;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/* add CAN specific message flags for raw_recvmsg() */
174*4882a593Smuzhiyun 	pflags = raw_flags(skb);
175*4882a593Smuzhiyun 	*pflags = 0;
176*4882a593Smuzhiyun 	if (oskb->sk)
177*4882a593Smuzhiyun 		*pflags |= MSG_DONTROUTE;
178*4882a593Smuzhiyun 	if (oskb->sk == sk)
179*4882a593Smuzhiyun 		*pflags |= MSG_CONFIRM;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	if (sock_queue_rcv_skb(sk, skb) < 0)
182*4882a593Smuzhiyun 		kfree_skb(skb);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
raw_enable_filters(struct net * net,struct net_device * dev,struct sock * sk,struct can_filter * filter,int count)185*4882a593Smuzhiyun static int raw_enable_filters(struct net *net, struct net_device *dev,
186*4882a593Smuzhiyun 			      struct sock *sk, struct can_filter *filter,
187*4882a593Smuzhiyun 			      int count)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	int err = 0;
190*4882a593Smuzhiyun 	int i;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
193*4882a593Smuzhiyun 		err = can_rx_register(net, dev, filter[i].can_id,
194*4882a593Smuzhiyun 				      filter[i].can_mask,
195*4882a593Smuzhiyun 				      raw_rcv, sk, "raw", sk);
196*4882a593Smuzhiyun 		if (err) {
197*4882a593Smuzhiyun 			/* clean up successfully registered filters */
198*4882a593Smuzhiyun 			while (--i >= 0)
199*4882a593Smuzhiyun 				can_rx_unregister(net, dev, filter[i].can_id,
200*4882a593Smuzhiyun 						  filter[i].can_mask,
201*4882a593Smuzhiyun 						  raw_rcv, sk);
202*4882a593Smuzhiyun 			break;
203*4882a593Smuzhiyun 		}
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	return err;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
raw_enable_errfilter(struct net * net,struct net_device * dev,struct sock * sk,can_err_mask_t err_mask)209*4882a593Smuzhiyun static int raw_enable_errfilter(struct net *net, struct net_device *dev,
210*4882a593Smuzhiyun 				struct sock *sk, can_err_mask_t err_mask)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	int err = 0;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (err_mask)
215*4882a593Smuzhiyun 		err = can_rx_register(net, dev, 0, err_mask | CAN_ERR_FLAG,
216*4882a593Smuzhiyun 				      raw_rcv, sk, "raw", sk);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return err;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
raw_disable_filters(struct net * net,struct net_device * dev,struct sock * sk,struct can_filter * filter,int count)221*4882a593Smuzhiyun static void raw_disable_filters(struct net *net, struct net_device *dev,
222*4882a593Smuzhiyun 				struct sock *sk, struct can_filter *filter,
223*4882a593Smuzhiyun 				int count)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	int i;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	for (i = 0; i < count; i++)
228*4882a593Smuzhiyun 		can_rx_unregister(net, dev, filter[i].can_id,
229*4882a593Smuzhiyun 				  filter[i].can_mask, raw_rcv, sk);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun 
raw_disable_errfilter(struct net * net,struct net_device * dev,struct sock * sk,can_err_mask_t err_mask)232*4882a593Smuzhiyun static inline void raw_disable_errfilter(struct net *net,
233*4882a593Smuzhiyun 					 struct net_device *dev,
234*4882a593Smuzhiyun 					 struct sock *sk,
235*4882a593Smuzhiyun 					 can_err_mask_t err_mask)
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	if (err_mask)
239*4882a593Smuzhiyun 		can_rx_unregister(net, dev, 0, err_mask | CAN_ERR_FLAG,
240*4882a593Smuzhiyun 				  raw_rcv, sk);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
raw_disable_allfilters(struct net * net,struct net_device * dev,struct sock * sk)243*4882a593Smuzhiyun static inline void raw_disable_allfilters(struct net *net,
244*4882a593Smuzhiyun 					  struct net_device *dev,
245*4882a593Smuzhiyun 					  struct sock *sk)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	raw_disable_filters(net, dev, sk, ro->filter, ro->count);
250*4882a593Smuzhiyun 	raw_disable_errfilter(net, dev, sk, ro->err_mask);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
raw_enable_allfilters(struct net * net,struct net_device * dev,struct sock * sk)253*4882a593Smuzhiyun static int raw_enable_allfilters(struct net *net, struct net_device *dev,
254*4882a593Smuzhiyun 				 struct sock *sk)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
257*4882a593Smuzhiyun 	int err;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	err = raw_enable_filters(net, dev, sk, ro->filter, ro->count);
260*4882a593Smuzhiyun 	if (!err) {
261*4882a593Smuzhiyun 		err = raw_enable_errfilter(net, dev, sk, ro->err_mask);
262*4882a593Smuzhiyun 		if (err)
263*4882a593Smuzhiyun 			raw_disable_filters(net, dev, sk, ro->filter,
264*4882a593Smuzhiyun 					    ro->count);
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return err;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
raw_notify(struct raw_sock * ro,unsigned long msg,struct net_device * dev)270*4882a593Smuzhiyun static void raw_notify(struct raw_sock *ro, unsigned long msg,
271*4882a593Smuzhiyun 		       struct net_device *dev)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct sock *sk = &ro->sk;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	if (!net_eq(dev_net(dev), sock_net(sk)))
276*4882a593Smuzhiyun 		return;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (ro->ifindex != dev->ifindex)
279*4882a593Smuzhiyun 		return;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	switch (msg) {
282*4882a593Smuzhiyun 	case NETDEV_UNREGISTER:
283*4882a593Smuzhiyun 		lock_sock(sk);
284*4882a593Smuzhiyun 		/* remove current filters & unregister */
285*4882a593Smuzhiyun 		if (ro->bound)
286*4882a593Smuzhiyun 			raw_disable_allfilters(dev_net(dev), dev, sk);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 		if (ro->count > 1)
289*4882a593Smuzhiyun 			kfree(ro->filter);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		ro->ifindex = 0;
292*4882a593Smuzhiyun 		ro->bound = 0;
293*4882a593Smuzhiyun 		ro->count = 0;
294*4882a593Smuzhiyun 		release_sock(sk);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		sk->sk_err = ENODEV;
297*4882a593Smuzhiyun 		if (!sock_flag(sk, SOCK_DEAD))
298*4882a593Smuzhiyun 			sk->sk_error_report(sk);
299*4882a593Smuzhiyun 		break;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	case NETDEV_DOWN:
302*4882a593Smuzhiyun 		sk->sk_err = ENETDOWN;
303*4882a593Smuzhiyun 		if (!sock_flag(sk, SOCK_DEAD))
304*4882a593Smuzhiyun 			sk->sk_error_report(sk);
305*4882a593Smuzhiyun 		break;
306*4882a593Smuzhiyun 	}
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
raw_notifier(struct notifier_block * nb,unsigned long msg,void * ptr)309*4882a593Smuzhiyun static int raw_notifier(struct notifier_block *nb, unsigned long msg,
310*4882a593Smuzhiyun 			void *ptr)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (dev->type != ARPHRD_CAN)
315*4882a593Smuzhiyun 		return NOTIFY_DONE;
316*4882a593Smuzhiyun 	if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
317*4882a593Smuzhiyun 		return NOTIFY_DONE;
318*4882a593Smuzhiyun 	if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
319*4882a593Smuzhiyun 		return NOTIFY_DONE;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	spin_lock(&raw_notifier_lock);
322*4882a593Smuzhiyun 	list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
323*4882a593Smuzhiyun 		spin_unlock(&raw_notifier_lock);
324*4882a593Smuzhiyun 		raw_notify(raw_busy_notifier, msg, dev);
325*4882a593Smuzhiyun 		spin_lock(&raw_notifier_lock);
326*4882a593Smuzhiyun 	}
327*4882a593Smuzhiyun 	raw_busy_notifier = NULL;
328*4882a593Smuzhiyun 	spin_unlock(&raw_notifier_lock);
329*4882a593Smuzhiyun 	return NOTIFY_DONE;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
raw_init(struct sock * sk)332*4882a593Smuzhiyun static int raw_init(struct sock *sk)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	ro->bound            = 0;
337*4882a593Smuzhiyun 	ro->ifindex          = 0;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* set default filter to single entry dfilter */
340*4882a593Smuzhiyun 	ro->dfilter.can_id   = 0;
341*4882a593Smuzhiyun 	ro->dfilter.can_mask = MASK_ALL;
342*4882a593Smuzhiyun 	ro->filter           = &ro->dfilter;
343*4882a593Smuzhiyun 	ro->count            = 1;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* set default loopback behaviour */
346*4882a593Smuzhiyun 	ro->loopback         = 1;
347*4882a593Smuzhiyun 	ro->recv_own_msgs    = 0;
348*4882a593Smuzhiyun 	ro->fd_frames        = 0;
349*4882a593Smuzhiyun 	ro->join_filters     = 0;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/* alloc_percpu provides zero'ed memory */
352*4882a593Smuzhiyun 	ro->uniq = alloc_percpu(struct uniqframe);
353*4882a593Smuzhiyun 	if (unlikely(!ro->uniq))
354*4882a593Smuzhiyun 		return -ENOMEM;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/* set notifier */
357*4882a593Smuzhiyun 	spin_lock(&raw_notifier_lock);
358*4882a593Smuzhiyun 	list_add_tail(&ro->notifier, &raw_notifier_list);
359*4882a593Smuzhiyun 	spin_unlock(&raw_notifier_lock);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	return 0;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
raw_release(struct socket * sock)364*4882a593Smuzhiyun static int raw_release(struct socket *sock)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
367*4882a593Smuzhiyun 	struct raw_sock *ro;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	if (!sk)
370*4882a593Smuzhiyun 		return 0;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	ro = raw_sk(sk);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	spin_lock(&raw_notifier_lock);
375*4882a593Smuzhiyun 	while (raw_busy_notifier == ro) {
376*4882a593Smuzhiyun 		spin_unlock(&raw_notifier_lock);
377*4882a593Smuzhiyun 		schedule_timeout_uninterruptible(1);
378*4882a593Smuzhiyun 		spin_lock(&raw_notifier_lock);
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	list_del(&ro->notifier);
381*4882a593Smuzhiyun 	spin_unlock(&raw_notifier_lock);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	lock_sock(sk);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* remove current filters & unregister */
386*4882a593Smuzhiyun 	if (ro->bound) {
387*4882a593Smuzhiyun 		if (ro->ifindex) {
388*4882a593Smuzhiyun 			struct net_device *dev;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
391*4882a593Smuzhiyun 			if (dev) {
392*4882a593Smuzhiyun 				raw_disable_allfilters(dev_net(dev), dev, sk);
393*4882a593Smuzhiyun 				dev_put(dev);
394*4882a593Smuzhiyun 			}
395*4882a593Smuzhiyun 		} else {
396*4882a593Smuzhiyun 			raw_disable_allfilters(sock_net(sk), NULL, sk);
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (ro->count > 1)
401*4882a593Smuzhiyun 		kfree(ro->filter);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	ro->ifindex = 0;
404*4882a593Smuzhiyun 	ro->bound = 0;
405*4882a593Smuzhiyun 	ro->count = 0;
406*4882a593Smuzhiyun 	free_percpu(ro->uniq);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	sock_orphan(sk);
409*4882a593Smuzhiyun 	sock->sk = NULL;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	release_sock(sk);
412*4882a593Smuzhiyun 	sock_put(sk);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	return 0;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
raw_bind(struct socket * sock,struct sockaddr * uaddr,int len)417*4882a593Smuzhiyun static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
420*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
421*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
422*4882a593Smuzhiyun 	int ifindex;
423*4882a593Smuzhiyun 	int err = 0;
424*4882a593Smuzhiyun 	int notify_enetdown = 0;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (len < RAW_MIN_NAMELEN)
427*4882a593Smuzhiyun 		return -EINVAL;
428*4882a593Smuzhiyun 	if (addr->can_family != AF_CAN)
429*4882a593Smuzhiyun 		return -EINVAL;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	lock_sock(sk);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	if (ro->bound && addr->can_ifindex == ro->ifindex)
434*4882a593Smuzhiyun 		goto out;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	if (addr->can_ifindex) {
437*4882a593Smuzhiyun 		struct net_device *dev;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		dev = dev_get_by_index(sock_net(sk), addr->can_ifindex);
440*4882a593Smuzhiyun 		if (!dev) {
441*4882a593Smuzhiyun 			err = -ENODEV;
442*4882a593Smuzhiyun 			goto out;
443*4882a593Smuzhiyun 		}
444*4882a593Smuzhiyun 		if (dev->type != ARPHRD_CAN) {
445*4882a593Smuzhiyun 			dev_put(dev);
446*4882a593Smuzhiyun 			err = -ENODEV;
447*4882a593Smuzhiyun 			goto out;
448*4882a593Smuzhiyun 		}
449*4882a593Smuzhiyun 		if (!(dev->flags & IFF_UP))
450*4882a593Smuzhiyun 			notify_enetdown = 1;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 		ifindex = dev->ifindex;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		/* filters set by default/setsockopt */
455*4882a593Smuzhiyun 		err = raw_enable_allfilters(sock_net(sk), dev, sk);
456*4882a593Smuzhiyun 		dev_put(dev);
457*4882a593Smuzhiyun 	} else {
458*4882a593Smuzhiyun 		ifindex = 0;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		/* filters set by default/setsockopt */
461*4882a593Smuzhiyun 		err = raw_enable_allfilters(sock_net(sk), NULL, sk);
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	if (!err) {
465*4882a593Smuzhiyun 		if (ro->bound) {
466*4882a593Smuzhiyun 			/* unregister old filters */
467*4882a593Smuzhiyun 			if (ro->ifindex) {
468*4882a593Smuzhiyun 				struct net_device *dev;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 				dev = dev_get_by_index(sock_net(sk),
471*4882a593Smuzhiyun 						       ro->ifindex);
472*4882a593Smuzhiyun 				if (dev) {
473*4882a593Smuzhiyun 					raw_disable_allfilters(dev_net(dev),
474*4882a593Smuzhiyun 							       dev, sk);
475*4882a593Smuzhiyun 					dev_put(dev);
476*4882a593Smuzhiyun 				}
477*4882a593Smuzhiyun 			} else {
478*4882a593Smuzhiyun 				raw_disable_allfilters(sock_net(sk), NULL, sk);
479*4882a593Smuzhiyun 			}
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 		ro->ifindex = ifindex;
482*4882a593Smuzhiyun 		ro->bound = 1;
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun  out:
486*4882a593Smuzhiyun 	release_sock(sk);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (notify_enetdown) {
489*4882a593Smuzhiyun 		sk->sk_err = ENETDOWN;
490*4882a593Smuzhiyun 		if (!sock_flag(sk, SOCK_DEAD))
491*4882a593Smuzhiyun 			sk->sk_error_report(sk);
492*4882a593Smuzhiyun 	}
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	return err;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
raw_getname(struct socket * sock,struct sockaddr * uaddr,int peer)497*4882a593Smuzhiyun static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
498*4882a593Smuzhiyun 		       int peer)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
501*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
502*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (peer)
505*4882a593Smuzhiyun 		return -EOPNOTSUPP;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	memset(addr, 0, RAW_MIN_NAMELEN);
508*4882a593Smuzhiyun 	addr->can_family  = AF_CAN;
509*4882a593Smuzhiyun 	addr->can_ifindex = ro->ifindex;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	return RAW_MIN_NAMELEN;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
raw_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)514*4882a593Smuzhiyun static int raw_setsockopt(struct socket *sock, int level, int optname,
515*4882a593Smuzhiyun 			  sockptr_t optval, unsigned int optlen)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
518*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
519*4882a593Smuzhiyun 	struct can_filter *filter = NULL;  /* dyn. alloc'ed filters */
520*4882a593Smuzhiyun 	struct can_filter sfilter;         /* single filter */
521*4882a593Smuzhiyun 	struct net_device *dev = NULL;
522*4882a593Smuzhiyun 	can_err_mask_t err_mask = 0;
523*4882a593Smuzhiyun 	int count = 0;
524*4882a593Smuzhiyun 	int err = 0;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (level != SOL_CAN_RAW)
527*4882a593Smuzhiyun 		return -EINVAL;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	switch (optname) {
530*4882a593Smuzhiyun 	case CAN_RAW_FILTER:
531*4882a593Smuzhiyun 		if (optlen % sizeof(struct can_filter) != 0)
532*4882a593Smuzhiyun 			return -EINVAL;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 		if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter))
535*4882a593Smuzhiyun 			return -EINVAL;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		count = optlen / sizeof(struct can_filter);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		if (count > 1) {
540*4882a593Smuzhiyun 			/* filter does not fit into dfilter => alloc space */
541*4882a593Smuzhiyun 			filter = memdup_sockptr(optval, optlen);
542*4882a593Smuzhiyun 			if (IS_ERR(filter))
543*4882a593Smuzhiyun 				return PTR_ERR(filter);
544*4882a593Smuzhiyun 		} else if (count == 1) {
545*4882a593Smuzhiyun 			if (copy_from_sockptr(&sfilter, optval, sizeof(sfilter)))
546*4882a593Smuzhiyun 				return -EFAULT;
547*4882a593Smuzhiyun 		}
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 		rtnl_lock();
550*4882a593Smuzhiyun 		lock_sock(sk);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		if (ro->bound && ro->ifindex) {
553*4882a593Smuzhiyun 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
554*4882a593Smuzhiyun 			if (!dev) {
555*4882a593Smuzhiyun 				if (count > 1)
556*4882a593Smuzhiyun 					kfree(filter);
557*4882a593Smuzhiyun 				err = -ENODEV;
558*4882a593Smuzhiyun 				goto out_fil;
559*4882a593Smuzhiyun 			}
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 		if (ro->bound) {
563*4882a593Smuzhiyun 			/* (try to) register the new filters */
564*4882a593Smuzhiyun 			if (count == 1)
565*4882a593Smuzhiyun 				err = raw_enable_filters(sock_net(sk), dev, sk,
566*4882a593Smuzhiyun 							 &sfilter, 1);
567*4882a593Smuzhiyun 			else
568*4882a593Smuzhiyun 				err = raw_enable_filters(sock_net(sk), dev, sk,
569*4882a593Smuzhiyun 							 filter, count);
570*4882a593Smuzhiyun 			if (err) {
571*4882a593Smuzhiyun 				if (count > 1)
572*4882a593Smuzhiyun 					kfree(filter);
573*4882a593Smuzhiyun 				goto out_fil;
574*4882a593Smuzhiyun 			}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 			/* remove old filter registrations */
577*4882a593Smuzhiyun 			raw_disable_filters(sock_net(sk), dev, sk, ro->filter,
578*4882a593Smuzhiyun 					    ro->count);
579*4882a593Smuzhiyun 		}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 		/* remove old filter space */
582*4882a593Smuzhiyun 		if (ro->count > 1)
583*4882a593Smuzhiyun 			kfree(ro->filter);
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		/* link new filters to the socket */
586*4882a593Smuzhiyun 		if (count == 1) {
587*4882a593Smuzhiyun 			/* copy filter data for single filter */
588*4882a593Smuzhiyun 			ro->dfilter = sfilter;
589*4882a593Smuzhiyun 			filter = &ro->dfilter;
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 		ro->filter = filter;
592*4882a593Smuzhiyun 		ro->count  = count;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun  out_fil:
595*4882a593Smuzhiyun 		if (dev)
596*4882a593Smuzhiyun 			dev_put(dev);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 		release_sock(sk);
599*4882a593Smuzhiyun 		rtnl_unlock();
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		break;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	case CAN_RAW_ERR_FILTER:
604*4882a593Smuzhiyun 		if (optlen != sizeof(err_mask))
605*4882a593Smuzhiyun 			return -EINVAL;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		if (copy_from_sockptr(&err_mask, optval, optlen))
608*4882a593Smuzhiyun 			return -EFAULT;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 		err_mask &= CAN_ERR_MASK;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 		rtnl_lock();
613*4882a593Smuzhiyun 		lock_sock(sk);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 		if (ro->bound && ro->ifindex) {
616*4882a593Smuzhiyun 			dev = dev_get_by_index(sock_net(sk), ro->ifindex);
617*4882a593Smuzhiyun 			if (!dev) {
618*4882a593Smuzhiyun 				err = -ENODEV;
619*4882a593Smuzhiyun 				goto out_err;
620*4882a593Smuzhiyun 			}
621*4882a593Smuzhiyun 		}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 		/* remove current error mask */
624*4882a593Smuzhiyun 		if (ro->bound) {
625*4882a593Smuzhiyun 			/* (try to) register the new err_mask */
626*4882a593Smuzhiyun 			err = raw_enable_errfilter(sock_net(sk), dev, sk,
627*4882a593Smuzhiyun 						   err_mask);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 			if (err)
630*4882a593Smuzhiyun 				goto out_err;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 			/* remove old err_mask registration */
633*4882a593Smuzhiyun 			raw_disable_errfilter(sock_net(sk), dev, sk,
634*4882a593Smuzhiyun 					      ro->err_mask);
635*4882a593Smuzhiyun 		}
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		/* link new err_mask to the socket */
638*4882a593Smuzhiyun 		ro->err_mask = err_mask;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun  out_err:
641*4882a593Smuzhiyun 		if (dev)
642*4882a593Smuzhiyun 			dev_put(dev);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		release_sock(sk);
645*4882a593Smuzhiyun 		rtnl_unlock();
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 		break;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	case CAN_RAW_LOOPBACK:
650*4882a593Smuzhiyun 		if (optlen != sizeof(ro->loopback))
651*4882a593Smuzhiyun 			return -EINVAL;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 		if (copy_from_sockptr(&ro->loopback, optval, optlen))
654*4882a593Smuzhiyun 			return -EFAULT;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 		break;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	case CAN_RAW_RECV_OWN_MSGS:
659*4882a593Smuzhiyun 		if (optlen != sizeof(ro->recv_own_msgs))
660*4882a593Smuzhiyun 			return -EINVAL;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		if (copy_from_sockptr(&ro->recv_own_msgs, optval, optlen))
663*4882a593Smuzhiyun 			return -EFAULT;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		break;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	case CAN_RAW_FD_FRAMES:
668*4882a593Smuzhiyun 		if (optlen != sizeof(ro->fd_frames))
669*4882a593Smuzhiyun 			return -EINVAL;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 		if (copy_from_sockptr(&ro->fd_frames, optval, optlen))
672*4882a593Smuzhiyun 			return -EFAULT;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 		break;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	case CAN_RAW_JOIN_FILTERS:
677*4882a593Smuzhiyun 		if (optlen != sizeof(ro->join_filters))
678*4882a593Smuzhiyun 			return -EINVAL;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 		if (copy_from_sockptr(&ro->join_filters, optval, optlen))
681*4882a593Smuzhiyun 			return -EFAULT;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 		break;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	default:
686*4882a593Smuzhiyun 		return -ENOPROTOOPT;
687*4882a593Smuzhiyun 	}
688*4882a593Smuzhiyun 	return err;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
raw_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)691*4882a593Smuzhiyun static int raw_getsockopt(struct socket *sock, int level, int optname,
692*4882a593Smuzhiyun 			  char __user *optval, int __user *optlen)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
695*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
696*4882a593Smuzhiyun 	int len;
697*4882a593Smuzhiyun 	void *val;
698*4882a593Smuzhiyun 	int err = 0;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (level != SOL_CAN_RAW)
701*4882a593Smuzhiyun 		return -EINVAL;
702*4882a593Smuzhiyun 	if (get_user(len, optlen))
703*4882a593Smuzhiyun 		return -EFAULT;
704*4882a593Smuzhiyun 	if (len < 0)
705*4882a593Smuzhiyun 		return -EINVAL;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	switch (optname) {
708*4882a593Smuzhiyun 	case CAN_RAW_FILTER:
709*4882a593Smuzhiyun 		lock_sock(sk);
710*4882a593Smuzhiyun 		if (ro->count > 0) {
711*4882a593Smuzhiyun 			int fsize = ro->count * sizeof(struct can_filter);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 			if (len > fsize)
714*4882a593Smuzhiyun 				len = fsize;
715*4882a593Smuzhiyun 			if (copy_to_user(optval, ro->filter, len))
716*4882a593Smuzhiyun 				err = -EFAULT;
717*4882a593Smuzhiyun 		} else {
718*4882a593Smuzhiyun 			len = 0;
719*4882a593Smuzhiyun 		}
720*4882a593Smuzhiyun 		release_sock(sk);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 		if (!err)
723*4882a593Smuzhiyun 			err = put_user(len, optlen);
724*4882a593Smuzhiyun 		return err;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	case CAN_RAW_ERR_FILTER:
727*4882a593Smuzhiyun 		if (len > sizeof(can_err_mask_t))
728*4882a593Smuzhiyun 			len = sizeof(can_err_mask_t);
729*4882a593Smuzhiyun 		val = &ro->err_mask;
730*4882a593Smuzhiyun 		break;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	case CAN_RAW_LOOPBACK:
733*4882a593Smuzhiyun 		if (len > sizeof(int))
734*4882a593Smuzhiyun 			len = sizeof(int);
735*4882a593Smuzhiyun 		val = &ro->loopback;
736*4882a593Smuzhiyun 		break;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	case CAN_RAW_RECV_OWN_MSGS:
739*4882a593Smuzhiyun 		if (len > sizeof(int))
740*4882a593Smuzhiyun 			len = sizeof(int);
741*4882a593Smuzhiyun 		val = &ro->recv_own_msgs;
742*4882a593Smuzhiyun 		break;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	case CAN_RAW_FD_FRAMES:
745*4882a593Smuzhiyun 		if (len > sizeof(int))
746*4882a593Smuzhiyun 			len = sizeof(int);
747*4882a593Smuzhiyun 		val = &ro->fd_frames;
748*4882a593Smuzhiyun 		break;
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	case CAN_RAW_JOIN_FILTERS:
751*4882a593Smuzhiyun 		if (len > sizeof(int))
752*4882a593Smuzhiyun 			len = sizeof(int);
753*4882a593Smuzhiyun 		val = &ro->join_filters;
754*4882a593Smuzhiyun 		break;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	default:
757*4882a593Smuzhiyun 		return -ENOPROTOOPT;
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (put_user(len, optlen))
761*4882a593Smuzhiyun 		return -EFAULT;
762*4882a593Smuzhiyun 	if (copy_to_user(optval, val, len))
763*4882a593Smuzhiyun 		return -EFAULT;
764*4882a593Smuzhiyun 	return 0;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun 
raw_sendmsg(struct socket * sock,struct msghdr * msg,size_t size)767*4882a593Smuzhiyun static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
770*4882a593Smuzhiyun 	struct raw_sock *ro = raw_sk(sk);
771*4882a593Smuzhiyun 	struct sk_buff *skb;
772*4882a593Smuzhiyun 	struct net_device *dev;
773*4882a593Smuzhiyun 	int ifindex;
774*4882a593Smuzhiyun 	int err;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (msg->msg_name) {
777*4882a593Smuzhiyun 		DECLARE_SOCKADDR(struct sockaddr_can *, addr, msg->msg_name);
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		if (msg->msg_namelen < RAW_MIN_NAMELEN)
780*4882a593Smuzhiyun 			return -EINVAL;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 		if (addr->can_family != AF_CAN)
783*4882a593Smuzhiyun 			return -EINVAL;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		ifindex = addr->can_ifindex;
786*4882a593Smuzhiyun 	} else {
787*4882a593Smuzhiyun 		ifindex = ro->ifindex;
788*4882a593Smuzhiyun 	}
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	dev = dev_get_by_index(sock_net(sk), ifindex);
791*4882a593Smuzhiyun 	if (!dev)
792*4882a593Smuzhiyun 		return -ENXIO;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	err = -EINVAL;
795*4882a593Smuzhiyun 	if (ro->fd_frames && dev->mtu == CANFD_MTU) {
796*4882a593Smuzhiyun 		if (unlikely(size != CANFD_MTU && size != CAN_MTU))
797*4882a593Smuzhiyun 			goto put_dev;
798*4882a593Smuzhiyun 	} else {
799*4882a593Smuzhiyun 		if (unlikely(size != CAN_MTU))
800*4882a593Smuzhiyun 			goto put_dev;
801*4882a593Smuzhiyun 	}
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	skb = sock_alloc_send_skb(sk, size + sizeof(struct can_skb_priv),
804*4882a593Smuzhiyun 				  msg->msg_flags & MSG_DONTWAIT, &err);
805*4882a593Smuzhiyun 	if (!skb)
806*4882a593Smuzhiyun 		goto put_dev;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	can_skb_reserve(skb);
809*4882a593Smuzhiyun 	can_skb_prv(skb)->ifindex = dev->ifindex;
810*4882a593Smuzhiyun 	can_skb_prv(skb)->skbcnt = 0;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	err = memcpy_from_msg(skb_put(skb, size), msg, size);
813*4882a593Smuzhiyun 	if (err < 0)
814*4882a593Smuzhiyun 		goto free_skb;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	skb_setup_tx_timestamp(skb, sk->sk_tsflags);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	skb->dev = dev;
819*4882a593Smuzhiyun 	skb->sk = sk;
820*4882a593Smuzhiyun 	skb->priority = sk->sk_priority;
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	err = can_send(skb, ro->loopback);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	dev_put(dev);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (err)
827*4882a593Smuzhiyun 		goto send_failed;
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	return size;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun free_skb:
832*4882a593Smuzhiyun 	kfree_skb(skb);
833*4882a593Smuzhiyun put_dev:
834*4882a593Smuzhiyun 	dev_put(dev);
835*4882a593Smuzhiyun send_failed:
836*4882a593Smuzhiyun 	return err;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
raw_recvmsg(struct socket * sock,struct msghdr * msg,size_t size,int flags)839*4882a593Smuzhiyun static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
840*4882a593Smuzhiyun 		       int flags)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	struct sock *sk = sock->sk;
843*4882a593Smuzhiyun 	struct sk_buff *skb;
844*4882a593Smuzhiyun 	int err = 0;
845*4882a593Smuzhiyun 	int noblock;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	noblock = flags & MSG_DONTWAIT;
848*4882a593Smuzhiyun 	flags &= ~MSG_DONTWAIT;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	if (flags & MSG_ERRQUEUE)
851*4882a593Smuzhiyun 		return sock_recv_errqueue(sk, msg, size,
852*4882a593Smuzhiyun 					  SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	skb = skb_recv_datagram(sk, flags, noblock, &err);
855*4882a593Smuzhiyun 	if (!skb)
856*4882a593Smuzhiyun 		return err;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (size < skb->len)
859*4882a593Smuzhiyun 		msg->msg_flags |= MSG_TRUNC;
860*4882a593Smuzhiyun 	else
861*4882a593Smuzhiyun 		size = skb->len;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	err = memcpy_to_msg(msg, skb->data, size);
864*4882a593Smuzhiyun 	if (err < 0) {
865*4882a593Smuzhiyun 		skb_free_datagram(sk, skb);
866*4882a593Smuzhiyun 		return err;
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	sock_recv_ts_and_drops(msg, sk, skb);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	if (msg->msg_name) {
872*4882a593Smuzhiyun 		__sockaddr_check_size(RAW_MIN_NAMELEN);
873*4882a593Smuzhiyun 		msg->msg_namelen = RAW_MIN_NAMELEN;
874*4882a593Smuzhiyun 		memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	/* assign the flags that have been recorded in raw_rcv() */
878*4882a593Smuzhiyun 	msg->msg_flags |= *(raw_flags(skb));
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	skb_free_datagram(sk, skb);
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	return size;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun 
raw_sock_no_ioctlcmd(struct socket * sock,unsigned int cmd,unsigned long arg)885*4882a593Smuzhiyun static int raw_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
886*4882a593Smuzhiyun 				unsigned long arg)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	/* no ioctls for socket layer -> hand it down to NIC layer */
889*4882a593Smuzhiyun 	return -ENOIOCTLCMD;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun static const struct proto_ops raw_ops = {
893*4882a593Smuzhiyun 	.family        = PF_CAN,
894*4882a593Smuzhiyun 	.release       = raw_release,
895*4882a593Smuzhiyun 	.bind          = raw_bind,
896*4882a593Smuzhiyun 	.connect       = sock_no_connect,
897*4882a593Smuzhiyun 	.socketpair    = sock_no_socketpair,
898*4882a593Smuzhiyun 	.accept        = sock_no_accept,
899*4882a593Smuzhiyun 	.getname       = raw_getname,
900*4882a593Smuzhiyun 	.poll          = datagram_poll,
901*4882a593Smuzhiyun 	.ioctl         = raw_sock_no_ioctlcmd,
902*4882a593Smuzhiyun 	.gettstamp     = sock_gettstamp,
903*4882a593Smuzhiyun 	.listen        = sock_no_listen,
904*4882a593Smuzhiyun 	.shutdown      = sock_no_shutdown,
905*4882a593Smuzhiyun 	.setsockopt    = raw_setsockopt,
906*4882a593Smuzhiyun 	.getsockopt    = raw_getsockopt,
907*4882a593Smuzhiyun 	.sendmsg       = raw_sendmsg,
908*4882a593Smuzhiyun 	.recvmsg       = raw_recvmsg,
909*4882a593Smuzhiyun 	.mmap          = sock_no_mmap,
910*4882a593Smuzhiyun 	.sendpage      = sock_no_sendpage,
911*4882a593Smuzhiyun };
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun static struct proto raw_proto __read_mostly = {
914*4882a593Smuzhiyun 	.name       = "CAN_RAW",
915*4882a593Smuzhiyun 	.owner      = THIS_MODULE,
916*4882a593Smuzhiyun 	.obj_size   = sizeof(struct raw_sock),
917*4882a593Smuzhiyun 	.init       = raw_init,
918*4882a593Smuzhiyun };
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun static const struct can_proto raw_can_proto = {
921*4882a593Smuzhiyun 	.type       = SOCK_RAW,
922*4882a593Smuzhiyun 	.protocol   = CAN_RAW,
923*4882a593Smuzhiyun 	.ops        = &raw_ops,
924*4882a593Smuzhiyun 	.prot       = &raw_proto,
925*4882a593Smuzhiyun };
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun static struct notifier_block canraw_notifier = {
928*4882a593Smuzhiyun 	.notifier_call = raw_notifier
929*4882a593Smuzhiyun };
930*4882a593Smuzhiyun 
raw_module_init(void)931*4882a593Smuzhiyun static __init int raw_module_init(void)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	int err;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	pr_info("can: raw protocol\n");
936*4882a593Smuzhiyun 
937*4882a593Smuzhiyun 	err = can_proto_register(&raw_can_proto);
938*4882a593Smuzhiyun 	if (err < 0)
939*4882a593Smuzhiyun 		pr_err("can: registration of raw protocol failed\n");
940*4882a593Smuzhiyun 	else
941*4882a593Smuzhiyun 		register_netdevice_notifier(&canraw_notifier);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	return err;
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun 
raw_module_exit(void)946*4882a593Smuzhiyun static __exit void raw_module_exit(void)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	can_proto_unregister(&raw_can_proto);
949*4882a593Smuzhiyun 	unregister_netdevice_notifier(&canraw_notifier);
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun module_init(raw_module_init);
953*4882a593Smuzhiyun module_exit(raw_module_exit);
954