xref: /OK3568_Linux_fs/kernel/drivers/net/can/mscan/mscan.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * CAN bus driver for the alone generic (as possible as) MSCAN controller.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
6*4882a593Smuzhiyun  *                         Varma Electronics Oy
7*4882a593Smuzhiyun  * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
8*4882a593Smuzhiyun  * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/netdevice.h>
16*4882a593Smuzhiyun #include <linux/if_arp.h>
17*4882a593Smuzhiyun #include <linux/if_ether.h>
18*4882a593Smuzhiyun #include <linux/list.h>
19*4882a593Smuzhiyun #include <linux/can/dev.h>
20*4882a593Smuzhiyun #include <linux/can/error.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "mscan.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun static const struct can_bittiming_const mscan_bittiming_const = {
26*4882a593Smuzhiyun 	.name = "mscan",
27*4882a593Smuzhiyun 	.tseg1_min = 4,
28*4882a593Smuzhiyun 	.tseg1_max = 16,
29*4882a593Smuzhiyun 	.tseg2_min = 2,
30*4882a593Smuzhiyun 	.tseg2_max = 8,
31*4882a593Smuzhiyun 	.sjw_max = 4,
32*4882a593Smuzhiyun 	.brp_min = 1,
33*4882a593Smuzhiyun 	.brp_max = 64,
34*4882a593Smuzhiyun 	.brp_inc = 1,
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct mscan_state {
38*4882a593Smuzhiyun 	u8 mode;
39*4882a593Smuzhiyun 	u8 canrier;
40*4882a593Smuzhiyun 	u8 cantier;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static enum can_state state_map[] = {
44*4882a593Smuzhiyun 	CAN_STATE_ERROR_ACTIVE,
45*4882a593Smuzhiyun 	CAN_STATE_ERROR_WARNING,
46*4882a593Smuzhiyun 	CAN_STATE_ERROR_PASSIVE,
47*4882a593Smuzhiyun 	CAN_STATE_BUS_OFF
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun 
mscan_set_mode(struct net_device * dev,u8 mode)50*4882a593Smuzhiyun static int mscan_set_mode(struct net_device *dev, u8 mode)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
53*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
54*4882a593Smuzhiyun 	int ret = 0;
55*4882a593Smuzhiyun 	int i;
56*4882a593Smuzhiyun 	u8 canctl1;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (mode != MSCAN_NORMAL_MODE) {
59*4882a593Smuzhiyun 		if (priv->tx_active) {
60*4882a593Smuzhiyun 			/* Abort transfers before going to sleep */#
61*4882a593Smuzhiyun 			out_8(&regs->cantarq, priv->tx_active);
62*4882a593Smuzhiyun 			/* Suppress TX done interrupts */
63*4882a593Smuzhiyun 			out_8(&regs->cantier, 0);
64*4882a593Smuzhiyun 		}
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 		canctl1 = in_8(&regs->canctl1);
67*4882a593Smuzhiyun 		if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) {
68*4882a593Smuzhiyun 			setbits8(&regs->canctl0, MSCAN_SLPRQ);
69*4882a593Smuzhiyun 			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
70*4882a593Smuzhiyun 				if (in_8(&regs->canctl1) & MSCAN_SLPAK)
71*4882a593Smuzhiyun 					break;
72*4882a593Smuzhiyun 				udelay(100);
73*4882a593Smuzhiyun 			}
74*4882a593Smuzhiyun 			/*
75*4882a593Smuzhiyun 			 * The mscan controller will fail to enter sleep mode,
76*4882a593Smuzhiyun 			 * while there are irregular activities on bus, like
77*4882a593Smuzhiyun 			 * somebody keeps retransmitting. This behavior is
78*4882a593Smuzhiyun 			 * undocumented and seems to differ between mscan built
79*4882a593Smuzhiyun 			 * in mpc5200b and mpc5200. We proceed in that case,
80*4882a593Smuzhiyun 			 * since otherwise the slprq will be kept set and the
81*4882a593Smuzhiyun 			 * controller will get stuck. NOTE: INITRQ or CSWAI
82*4882a593Smuzhiyun 			 * will abort all active transmit actions, if still
83*4882a593Smuzhiyun 			 * any, at once.
84*4882a593Smuzhiyun 			 */
85*4882a593Smuzhiyun 			if (i >= MSCAN_SET_MODE_RETRIES)
86*4882a593Smuzhiyun 				netdev_dbg(dev,
87*4882a593Smuzhiyun 					   "device failed to enter sleep mode. "
88*4882a593Smuzhiyun 					   "We proceed anyhow.\n");
89*4882a593Smuzhiyun 			else
90*4882a593Smuzhiyun 				priv->can.state = CAN_STATE_SLEEPING;
91*4882a593Smuzhiyun 		}
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) {
94*4882a593Smuzhiyun 			setbits8(&regs->canctl0, MSCAN_INITRQ);
95*4882a593Smuzhiyun 			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
96*4882a593Smuzhiyun 				if (in_8(&regs->canctl1) & MSCAN_INITAK)
97*4882a593Smuzhiyun 					break;
98*4882a593Smuzhiyun 			}
99*4882a593Smuzhiyun 			if (i >= MSCAN_SET_MODE_RETRIES)
100*4882a593Smuzhiyun 				ret = -ENODEV;
101*4882a593Smuzhiyun 		}
102*4882a593Smuzhiyun 		if (!ret)
103*4882a593Smuzhiyun 			priv->can.state = CAN_STATE_STOPPED;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 		if (mode & MSCAN_CSWAI)
106*4882a593Smuzhiyun 			setbits8(&regs->canctl0, MSCAN_CSWAI);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	} else {
109*4882a593Smuzhiyun 		canctl1 = in_8(&regs->canctl1);
110*4882a593Smuzhiyun 		if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
111*4882a593Smuzhiyun 			clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
112*4882a593Smuzhiyun 			for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
113*4882a593Smuzhiyun 				canctl1 = in_8(&regs->canctl1);
114*4882a593Smuzhiyun 				if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
115*4882a593Smuzhiyun 					break;
116*4882a593Smuzhiyun 			}
117*4882a593Smuzhiyun 			if (i >= MSCAN_SET_MODE_RETRIES)
118*4882a593Smuzhiyun 				ret = -ENODEV;
119*4882a593Smuzhiyun 			else
120*4882a593Smuzhiyun 				priv->can.state = CAN_STATE_ERROR_ACTIVE;
121*4882a593Smuzhiyun 		}
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 	return ret;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
mscan_start(struct net_device * dev)126*4882a593Smuzhiyun static int mscan_start(struct net_device *dev)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
129*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
130*4882a593Smuzhiyun 	u8 canrflg;
131*4882a593Smuzhiyun 	int err;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	out_8(&regs->canrier, 0);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	INIT_LIST_HEAD(&priv->tx_head);
136*4882a593Smuzhiyun 	priv->prev_buf_id = 0;
137*4882a593Smuzhiyun 	priv->cur_pri = 0;
138*4882a593Smuzhiyun 	priv->tx_active = 0;
139*4882a593Smuzhiyun 	priv->shadow_canrier = 0;
140*4882a593Smuzhiyun 	priv->flags = 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (priv->type == MSCAN_TYPE_MPC5121) {
143*4882a593Smuzhiyun 		/* Clear pending bus-off condition */
144*4882a593Smuzhiyun 		if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
145*4882a593Smuzhiyun 			out_8(&regs->canmisc, MSCAN_BOHOLD);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
149*4882a593Smuzhiyun 	if (err)
150*4882a593Smuzhiyun 		return err;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	canrflg = in_8(&regs->canrflg);
153*4882a593Smuzhiyun 	priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
154*4882a593Smuzhiyun 	priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
155*4882a593Smuzhiyun 				    MSCAN_STATE_TX(canrflg))];
156*4882a593Smuzhiyun 	out_8(&regs->cantier, 0);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* Enable receive interrupts. */
159*4882a593Smuzhiyun 	out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
mscan_restart(struct net_device * dev)164*4882a593Smuzhiyun static int mscan_restart(struct net_device *dev)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (priv->type == MSCAN_TYPE_MPC5121) {
169*4882a593Smuzhiyun 		struct mscan_regs __iomem *regs = priv->reg_base;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
172*4882a593Smuzhiyun 		WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
173*4882a593Smuzhiyun 		     "bus-off state expected\n");
174*4882a593Smuzhiyun 		out_8(&regs->canmisc, MSCAN_BOHOLD);
175*4882a593Smuzhiyun 		/* Re-enable receive interrupts. */
176*4882a593Smuzhiyun 		out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
177*4882a593Smuzhiyun 	} else {
178*4882a593Smuzhiyun 		if (priv->can.state <= CAN_STATE_BUS_OFF)
179*4882a593Smuzhiyun 			mscan_set_mode(dev, MSCAN_INIT_MODE);
180*4882a593Smuzhiyun 		return mscan_start(dev);
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
mscan_start_xmit(struct sk_buff * skb,struct net_device * dev)186*4882a593Smuzhiyun static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct can_frame *frame = (struct can_frame *)skb->data;
189*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
190*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
191*4882a593Smuzhiyun 	int i, rtr, buf_id;
192*4882a593Smuzhiyun 	u32 can_id;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	if (can_dropped_invalid_skb(dev, skb))
195*4882a593Smuzhiyun 		return NETDEV_TX_OK;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	out_8(&regs->cantier, 0);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	i = ~priv->tx_active & MSCAN_TXE;
200*4882a593Smuzhiyun 	buf_id = ffs(i) - 1;
201*4882a593Smuzhiyun 	switch (hweight8(i)) {
202*4882a593Smuzhiyun 	case 0:
203*4882a593Smuzhiyun 		netif_stop_queue(dev);
204*4882a593Smuzhiyun 		netdev_err(dev, "Tx Ring full when queue awake!\n");
205*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
206*4882a593Smuzhiyun 	case 1:
207*4882a593Smuzhiyun 		/*
208*4882a593Smuzhiyun 		 * if buf_id < 3, then current frame will be send out of order,
209*4882a593Smuzhiyun 		 * since buffer with lower id have higher priority (hell..)
210*4882a593Smuzhiyun 		 */
211*4882a593Smuzhiyun 		netif_stop_queue(dev);
212*4882a593Smuzhiyun 		fallthrough;
213*4882a593Smuzhiyun 	case 2:
214*4882a593Smuzhiyun 		if (buf_id < priv->prev_buf_id) {
215*4882a593Smuzhiyun 			priv->cur_pri++;
216*4882a593Smuzhiyun 			if (priv->cur_pri == 0xff) {
217*4882a593Smuzhiyun 				set_bit(F_TX_WAIT_ALL, &priv->flags);
218*4882a593Smuzhiyun 				netif_stop_queue(dev);
219*4882a593Smuzhiyun 			}
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 		set_bit(F_TX_PROGRESS, &priv->flags);
222*4882a593Smuzhiyun 		break;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 	priv->prev_buf_id = buf_id;
225*4882a593Smuzhiyun 	out_8(&regs->cantbsel, i);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	rtr = frame->can_id & CAN_RTR_FLAG;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* RTR is always the lowest bit of interest, then IDs follow */
230*4882a593Smuzhiyun 	if (frame->can_id & CAN_EFF_FLAG) {
231*4882a593Smuzhiyun 		can_id = (frame->can_id & CAN_EFF_MASK)
232*4882a593Smuzhiyun 			 << (MSCAN_EFF_RTR_SHIFT + 1);
233*4882a593Smuzhiyun 		if (rtr)
234*4882a593Smuzhiyun 			can_id |= 1 << MSCAN_EFF_RTR_SHIFT;
235*4882a593Smuzhiyun 		out_be16(&regs->tx.idr3_2, can_id);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		can_id >>= 16;
238*4882a593Smuzhiyun 		/* EFF_FLAGS are between the IDs :( */
239*4882a593Smuzhiyun 		can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
240*4882a593Smuzhiyun 			 | MSCAN_EFF_FLAGS;
241*4882a593Smuzhiyun 	} else {
242*4882a593Smuzhiyun 		can_id = (frame->can_id & CAN_SFF_MASK)
243*4882a593Smuzhiyun 			 << (MSCAN_SFF_RTR_SHIFT + 1);
244*4882a593Smuzhiyun 		if (rtr)
245*4882a593Smuzhiyun 			can_id |= 1 << MSCAN_SFF_RTR_SHIFT;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 	out_be16(&regs->tx.idr1_0, can_id);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (!rtr) {
250*4882a593Smuzhiyun 		void __iomem *data = &regs->tx.dsr1_0;
251*4882a593Smuzhiyun 		u16 *payload = (u16 *)frame->data;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		for (i = 0; i < frame->can_dlc / 2; i++) {
254*4882a593Smuzhiyun 			out_be16(data, *payload++);
255*4882a593Smuzhiyun 			data += 2 + _MSCAN_RESERVED_DSR_SIZE;
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 		/* write remaining byte if necessary */
258*4882a593Smuzhiyun 		if (frame->can_dlc & 1)
259*4882a593Smuzhiyun 			out_8(data, frame->data[frame->can_dlc - 1]);
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	out_8(&regs->tx.dlr, frame->can_dlc);
263*4882a593Smuzhiyun 	out_8(&regs->tx.tbpr, priv->cur_pri);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	/* Start transmission. */
266*4882a593Smuzhiyun 	out_8(&regs->cantflg, 1 << buf_id);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (!test_bit(F_TX_PROGRESS, &priv->flags))
269*4882a593Smuzhiyun 		netif_trans_update(dev);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	can_put_echo_skb(skb, dev, buf_id);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* Enable interrupt. */
276*4882a593Smuzhiyun 	priv->tx_active |= 1 << buf_id;
277*4882a593Smuzhiyun 	out_8(&regs->cantier, priv->tx_active);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	return NETDEV_TX_OK;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun 
get_new_state(struct net_device * dev,u8 canrflg)282*4882a593Smuzhiyun static enum can_state get_new_state(struct net_device *dev, u8 canrflg)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (unlikely(canrflg & MSCAN_CSCIF))
287*4882a593Smuzhiyun 		return state_map[max(MSCAN_STATE_RX(canrflg),
288*4882a593Smuzhiyun 				 MSCAN_STATE_TX(canrflg))];
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	return priv->can.state;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun 
mscan_get_rx_frame(struct net_device * dev,struct can_frame * frame)293*4882a593Smuzhiyun static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
296*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
297*4882a593Smuzhiyun 	u32 can_id;
298*4882a593Smuzhiyun 	int i;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	can_id = in_be16(&regs->rx.idr1_0);
301*4882a593Smuzhiyun 	if (can_id & (1 << 3)) {
302*4882a593Smuzhiyun 		frame->can_id = CAN_EFF_FLAG;
303*4882a593Smuzhiyun 		can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
304*4882a593Smuzhiyun 		can_id = ((can_id & 0xffe00000) |
305*4882a593Smuzhiyun 			  ((can_id & 0x7ffff) << 2)) >> 2;
306*4882a593Smuzhiyun 	} else {
307*4882a593Smuzhiyun 		can_id >>= 4;
308*4882a593Smuzhiyun 		frame->can_id = 0;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	frame->can_id |= can_id >> 1;
312*4882a593Smuzhiyun 	if (can_id & 1)
313*4882a593Smuzhiyun 		frame->can_id |= CAN_RTR_FLAG;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	frame->can_dlc = get_can_dlc(in_8(&regs->rx.dlr) & 0xf);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	if (!(frame->can_id & CAN_RTR_FLAG)) {
318*4882a593Smuzhiyun 		void __iomem *data = &regs->rx.dsr1_0;
319*4882a593Smuzhiyun 		u16 *payload = (u16 *)frame->data;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		for (i = 0; i < frame->can_dlc / 2; i++) {
322*4882a593Smuzhiyun 			*payload++ = in_be16(data);
323*4882a593Smuzhiyun 			data += 2 + _MSCAN_RESERVED_DSR_SIZE;
324*4882a593Smuzhiyun 		}
325*4882a593Smuzhiyun 		/* read remaining byte if necessary */
326*4882a593Smuzhiyun 		if (frame->can_dlc & 1)
327*4882a593Smuzhiyun 			frame->data[frame->can_dlc - 1] = in_8(data);
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	out_8(&regs->canrflg, MSCAN_RXF);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
mscan_get_err_frame(struct net_device * dev,struct can_frame * frame,u8 canrflg)333*4882a593Smuzhiyun static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
334*4882a593Smuzhiyun 				u8 canrflg)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
337*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
338*4882a593Smuzhiyun 	struct net_device_stats *stats = &dev->stats;
339*4882a593Smuzhiyun 	enum can_state new_state;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg);
342*4882a593Smuzhiyun 	frame->can_id = CAN_ERR_FLAG;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	if (canrflg & MSCAN_OVRIF) {
345*4882a593Smuzhiyun 		frame->can_id |= CAN_ERR_CRTL;
346*4882a593Smuzhiyun 		frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
347*4882a593Smuzhiyun 		stats->rx_over_errors++;
348*4882a593Smuzhiyun 		stats->rx_errors++;
349*4882a593Smuzhiyun 	} else {
350*4882a593Smuzhiyun 		frame->data[1] = 0;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	new_state = get_new_state(dev, canrflg);
354*4882a593Smuzhiyun 	if (new_state != priv->can.state) {
355*4882a593Smuzhiyun 		can_change_state(dev, frame,
356*4882a593Smuzhiyun 				 state_map[MSCAN_STATE_TX(canrflg)],
357*4882a593Smuzhiyun 				 state_map[MSCAN_STATE_RX(canrflg)]);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		if (priv->can.state == CAN_STATE_BUS_OFF) {
360*4882a593Smuzhiyun 			/*
361*4882a593Smuzhiyun 			 * The MSCAN on the MPC5200 does recover from bus-off
362*4882a593Smuzhiyun 			 * automatically. To avoid that we stop the chip doing
363*4882a593Smuzhiyun 			 * a light-weight stop (we are in irq-context).
364*4882a593Smuzhiyun 			 */
365*4882a593Smuzhiyun 			if (priv->type != MSCAN_TYPE_MPC5121) {
366*4882a593Smuzhiyun 				out_8(&regs->cantier, 0);
367*4882a593Smuzhiyun 				out_8(&regs->canrier, 0);
368*4882a593Smuzhiyun 				setbits8(&regs->canctl0,
369*4882a593Smuzhiyun 					 MSCAN_SLPRQ | MSCAN_INITRQ);
370*4882a593Smuzhiyun 			}
371*4882a593Smuzhiyun 			can_bus_off(dev);
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 	priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
375*4882a593Smuzhiyun 	frame->can_dlc = CAN_ERR_DLC;
376*4882a593Smuzhiyun 	out_8(&regs->canrflg, MSCAN_ERR_IF);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
mscan_rx_poll(struct napi_struct * napi,int quota)379*4882a593Smuzhiyun static int mscan_rx_poll(struct napi_struct *napi, int quota)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
382*4882a593Smuzhiyun 	struct net_device *dev = napi->dev;
383*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
384*4882a593Smuzhiyun 	struct net_device_stats *stats = &dev->stats;
385*4882a593Smuzhiyun 	int work_done = 0;
386*4882a593Smuzhiyun 	struct sk_buff *skb;
387*4882a593Smuzhiyun 	struct can_frame *frame;
388*4882a593Smuzhiyun 	u8 canrflg;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	while (work_done < quota) {
391*4882a593Smuzhiyun 		canrflg = in_8(&regs->canrflg);
392*4882a593Smuzhiyun 		if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF)))
393*4882a593Smuzhiyun 			break;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		skb = alloc_can_skb(dev, &frame);
396*4882a593Smuzhiyun 		if (!skb) {
397*4882a593Smuzhiyun 			if (printk_ratelimit())
398*4882a593Smuzhiyun 				netdev_notice(dev, "packet dropped\n");
399*4882a593Smuzhiyun 			stats->rx_dropped++;
400*4882a593Smuzhiyun 			out_8(&regs->canrflg, canrflg);
401*4882a593Smuzhiyun 			continue;
402*4882a593Smuzhiyun 		}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 		if (canrflg & MSCAN_RXF)
405*4882a593Smuzhiyun 			mscan_get_rx_frame(dev, frame);
406*4882a593Smuzhiyun 		else if (canrflg & MSCAN_ERR_IF)
407*4882a593Smuzhiyun 			mscan_get_err_frame(dev, frame, canrflg);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		stats->rx_packets++;
410*4882a593Smuzhiyun 		stats->rx_bytes += frame->can_dlc;
411*4882a593Smuzhiyun 		work_done++;
412*4882a593Smuzhiyun 		netif_receive_skb(skb);
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (work_done < quota) {
416*4882a593Smuzhiyun 		if (likely(napi_complete_done(&priv->napi, work_done))) {
417*4882a593Smuzhiyun 			clear_bit(F_RX_PROGRESS, &priv->flags);
418*4882a593Smuzhiyun 			if (priv->can.state < CAN_STATE_BUS_OFF)
419*4882a593Smuzhiyun 				out_8(&regs->canrier, priv->shadow_canrier);
420*4882a593Smuzhiyun 		}
421*4882a593Smuzhiyun 	}
422*4882a593Smuzhiyun 	return work_done;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
mscan_isr(int irq,void * dev_id)425*4882a593Smuzhiyun static irqreturn_t mscan_isr(int irq, void *dev_id)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
428*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
429*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
430*4882a593Smuzhiyun 	struct net_device_stats *stats = &dev->stats;
431*4882a593Smuzhiyun 	u8 cantier, cantflg, canrflg;
432*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	cantier = in_8(&regs->cantier) & MSCAN_TXE;
435*4882a593Smuzhiyun 	cantflg = in_8(&regs->cantflg) & cantier;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	if (cantier && cantflg) {
438*4882a593Smuzhiyun 		struct list_head *tmp, *pos;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 		list_for_each_safe(pos, tmp, &priv->tx_head) {
441*4882a593Smuzhiyun 			struct tx_queue_entry *entry =
442*4882a593Smuzhiyun 			    list_entry(pos, struct tx_queue_entry, list);
443*4882a593Smuzhiyun 			u8 mask = entry->mask;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 			if (!(cantflg & mask))
446*4882a593Smuzhiyun 				continue;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 			out_8(&regs->cantbsel, mask);
449*4882a593Smuzhiyun 			stats->tx_bytes += in_8(&regs->tx.dlr);
450*4882a593Smuzhiyun 			stats->tx_packets++;
451*4882a593Smuzhiyun 			can_get_echo_skb(dev, entry->id);
452*4882a593Smuzhiyun 			priv->tx_active &= ~mask;
453*4882a593Smuzhiyun 			list_del(pos);
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		if (list_empty(&priv->tx_head)) {
457*4882a593Smuzhiyun 			clear_bit(F_TX_WAIT_ALL, &priv->flags);
458*4882a593Smuzhiyun 			clear_bit(F_TX_PROGRESS, &priv->flags);
459*4882a593Smuzhiyun 			priv->cur_pri = 0;
460*4882a593Smuzhiyun 		} else {
461*4882a593Smuzhiyun 			netif_trans_update(dev);
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
465*4882a593Smuzhiyun 			netif_wake_queue(dev);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 		out_8(&regs->cantier, priv->tx_active);
468*4882a593Smuzhiyun 		ret = IRQ_HANDLED;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	canrflg = in_8(&regs->canrflg);
472*4882a593Smuzhiyun 	if ((canrflg & ~MSCAN_STAT_MSK) &&
473*4882a593Smuzhiyun 	    !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
474*4882a593Smuzhiyun 		if (canrflg & ~MSCAN_STAT_MSK) {
475*4882a593Smuzhiyun 			priv->shadow_canrier = in_8(&regs->canrier);
476*4882a593Smuzhiyun 			out_8(&regs->canrier, 0);
477*4882a593Smuzhiyun 			napi_schedule(&priv->napi);
478*4882a593Smuzhiyun 			ret = IRQ_HANDLED;
479*4882a593Smuzhiyun 		} else {
480*4882a593Smuzhiyun 			clear_bit(F_RX_PROGRESS, &priv->flags);
481*4882a593Smuzhiyun 		}
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 	return ret;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
mscan_do_set_mode(struct net_device * dev,enum can_mode mode)486*4882a593Smuzhiyun static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	int ret = 0;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	switch (mode) {
491*4882a593Smuzhiyun 	case CAN_MODE_START:
492*4882a593Smuzhiyun 		ret = mscan_restart(dev);
493*4882a593Smuzhiyun 		if (ret)
494*4882a593Smuzhiyun 			break;
495*4882a593Smuzhiyun 		if (netif_queue_stopped(dev))
496*4882a593Smuzhiyun 			netif_wake_queue(dev);
497*4882a593Smuzhiyun 		break;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	default:
500*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
501*4882a593Smuzhiyun 		break;
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 	return ret;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
mscan_do_set_bittiming(struct net_device * dev)506*4882a593Smuzhiyun static int mscan_do_set_bittiming(struct net_device *dev)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
509*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
510*4882a593Smuzhiyun 	struct can_bittiming *bt = &priv->can.bittiming;
511*4882a593Smuzhiyun 	u8 btr0, btr1;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
514*4882a593Smuzhiyun 	btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
515*4882a593Smuzhiyun 		BTR1_SET_TSEG2(bt->phase_seg2) |
516*4882a593Smuzhiyun 		BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	out_8(&regs->canbtr0, btr0);
521*4882a593Smuzhiyun 	out_8(&regs->canbtr1, btr1);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	return 0;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
mscan_get_berr_counter(const struct net_device * dev,struct can_berr_counter * bec)526*4882a593Smuzhiyun static int mscan_get_berr_counter(const struct net_device *dev,
527*4882a593Smuzhiyun 				  struct can_berr_counter *bec)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
530*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	bec->txerr = in_8(&regs->cantxerr);
533*4882a593Smuzhiyun 	bec->rxerr = in_8(&regs->canrxerr);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
mscan_open(struct net_device * dev)538*4882a593Smuzhiyun static int mscan_open(struct net_device *dev)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun 	int ret;
541*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
542*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->clk_ipg);
545*4882a593Smuzhiyun 	if (ret)
546*4882a593Smuzhiyun 		goto exit_retcode;
547*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->clk_can);
548*4882a593Smuzhiyun 	if (ret)
549*4882a593Smuzhiyun 		goto exit_dis_ipg_clock;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	/* common open */
552*4882a593Smuzhiyun 	ret = open_candev(dev);
553*4882a593Smuzhiyun 	if (ret)
554*4882a593Smuzhiyun 		goto exit_dis_can_clock;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	napi_enable(&priv->napi);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
559*4882a593Smuzhiyun 	if (ret < 0) {
560*4882a593Smuzhiyun 		netdev_err(dev, "failed to attach interrupt\n");
561*4882a593Smuzhiyun 		goto exit_napi_disable;
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
565*4882a593Smuzhiyun 		setbits8(&regs->canctl1, MSCAN_LISTEN);
566*4882a593Smuzhiyun 	else
567*4882a593Smuzhiyun 		clrbits8(&regs->canctl1, MSCAN_LISTEN);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	ret = mscan_start(dev);
570*4882a593Smuzhiyun 	if (ret)
571*4882a593Smuzhiyun 		goto exit_free_irq;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	netif_start_queue(dev);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	return 0;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun exit_free_irq:
578*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
579*4882a593Smuzhiyun exit_napi_disable:
580*4882a593Smuzhiyun 	napi_disable(&priv->napi);
581*4882a593Smuzhiyun 	close_candev(dev);
582*4882a593Smuzhiyun exit_dis_can_clock:
583*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk_can);
584*4882a593Smuzhiyun exit_dis_ipg_clock:
585*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk_ipg);
586*4882a593Smuzhiyun exit_retcode:
587*4882a593Smuzhiyun 	return ret;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
mscan_close(struct net_device * dev)590*4882a593Smuzhiyun static int mscan_close(struct net_device *dev)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
593*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	netif_stop_queue(dev);
596*4882a593Smuzhiyun 	napi_disable(&priv->napi);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	out_8(&regs->cantier, 0);
599*4882a593Smuzhiyun 	out_8(&regs->canrier, 0);
600*4882a593Smuzhiyun 	mscan_set_mode(dev, MSCAN_INIT_MODE);
601*4882a593Smuzhiyun 	close_candev(dev);
602*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk_can);
605*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk_ipg);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return 0;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun static const struct net_device_ops mscan_netdev_ops = {
611*4882a593Smuzhiyun 	.ndo_open	= mscan_open,
612*4882a593Smuzhiyun 	.ndo_stop	= mscan_close,
613*4882a593Smuzhiyun 	.ndo_start_xmit	= mscan_start_xmit,
614*4882a593Smuzhiyun 	.ndo_change_mtu	= can_change_mtu,
615*4882a593Smuzhiyun };
616*4882a593Smuzhiyun 
register_mscandev(struct net_device * dev,int mscan_clksrc)617*4882a593Smuzhiyun int register_mscandev(struct net_device *dev, int mscan_clksrc)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
620*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
621*4882a593Smuzhiyun 	u8 ctl1;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	ctl1 = in_8(&regs->canctl1);
624*4882a593Smuzhiyun 	if (mscan_clksrc)
625*4882a593Smuzhiyun 		ctl1 |= MSCAN_CLKSRC;
626*4882a593Smuzhiyun 	else
627*4882a593Smuzhiyun 		ctl1 &= ~MSCAN_CLKSRC;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (priv->type == MSCAN_TYPE_MPC5121) {
630*4882a593Smuzhiyun 		priv->can.do_get_berr_counter = mscan_get_berr_counter;
631*4882a593Smuzhiyun 		ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	ctl1 |= MSCAN_CANE;
635*4882a593Smuzhiyun 	out_8(&regs->canctl1, ctl1);
636*4882a593Smuzhiyun 	udelay(100);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* acceptance mask/acceptance code (accept everything) */
639*4882a593Smuzhiyun 	out_be16(&regs->canidar1_0, 0);
640*4882a593Smuzhiyun 	out_be16(&regs->canidar3_2, 0);
641*4882a593Smuzhiyun 	out_be16(&regs->canidar5_4, 0);
642*4882a593Smuzhiyun 	out_be16(&regs->canidar7_6, 0);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	out_be16(&regs->canidmr1_0, 0xffff);
645*4882a593Smuzhiyun 	out_be16(&regs->canidmr3_2, 0xffff);
646*4882a593Smuzhiyun 	out_be16(&regs->canidmr5_4, 0xffff);
647*4882a593Smuzhiyun 	out_be16(&regs->canidmr7_6, 0xffff);
648*4882a593Smuzhiyun 	/* Two 32 bit Acceptance Filters */
649*4882a593Smuzhiyun 	out_8(&regs->canidac, MSCAN_AF_32BIT);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	mscan_set_mode(dev, MSCAN_INIT_MODE);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	return register_candev(dev);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
unregister_mscandev(struct net_device * dev)656*4882a593Smuzhiyun void unregister_mscandev(struct net_device *dev)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	struct mscan_priv *priv = netdev_priv(dev);
659*4882a593Smuzhiyun 	struct mscan_regs __iomem *regs = priv->reg_base;
660*4882a593Smuzhiyun 	mscan_set_mode(dev, MSCAN_INIT_MODE);
661*4882a593Smuzhiyun 	clrbits8(&regs->canctl1, MSCAN_CANE);
662*4882a593Smuzhiyun 	unregister_candev(dev);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
alloc_mscandev(void)665*4882a593Smuzhiyun struct net_device *alloc_mscandev(void)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	struct net_device *dev;
668*4882a593Smuzhiyun 	struct mscan_priv *priv;
669*4882a593Smuzhiyun 	int i;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
672*4882a593Smuzhiyun 	if (!dev)
673*4882a593Smuzhiyun 		return NULL;
674*4882a593Smuzhiyun 	priv = netdev_priv(dev);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	dev->netdev_ops = &mscan_netdev_ops;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	dev->flags |= IFF_ECHO;	/* we support local echo */
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	priv->can.bittiming_const = &mscan_bittiming_const;
683*4882a593Smuzhiyun 	priv->can.do_set_bittiming = mscan_do_set_bittiming;
684*4882a593Smuzhiyun 	priv->can.do_set_mode = mscan_do_set_mode;
685*4882a593Smuzhiyun 	priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
686*4882a593Smuzhiyun 		CAN_CTRLMODE_LISTENONLY;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	for (i = 0; i < TX_QUEUE_SIZE; i++) {
689*4882a593Smuzhiyun 		priv->tx_queue[i].id = i;
690*4882a593Smuzhiyun 		priv->tx_queue[i].mask = 1 << i;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	return dev;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
697*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
698*4882a593Smuzhiyun MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
699