xref: /OK3568_Linux_fs/kernel/drivers/net/can/usb/ucan.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun /* Driver for Theobroma Systems UCAN devices, Protocol Version 3
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * General Description:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * The USB Device uses three Endpoints:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *   CONTROL Endpoint: Is used the setup the device (start, stop,
13*4882a593Smuzhiyun  *   info, configure).
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *   IN Endpoint: The device sends CAN Frame Messages and Device
16*4882a593Smuzhiyun  *   Information using the IN endpoint.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *   OUT Endpoint: The driver sends configuration requests, and CAN
19*4882a593Smuzhiyun  *   Frames on the out endpoint.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * Error Handling:
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *   If error reporting is turned on the device encodes error into CAN
24*4882a593Smuzhiyun  *   error frames (see uapi/linux/can/error.h) and sends it using the
25*4882a593Smuzhiyun  *   IN Endpoint. The driver updates statistics and forward it.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <linux/can.h>
29*4882a593Smuzhiyun #include <linux/can/dev.h>
30*4882a593Smuzhiyun #include <linux/can/error.h>
31*4882a593Smuzhiyun #include <linux/module.h>
32*4882a593Smuzhiyun #include <linux/netdevice.h>
33*4882a593Smuzhiyun #include <linux/signal.h>
34*4882a593Smuzhiyun #include <linux/skbuff.h>
35*4882a593Smuzhiyun #include <linux/slab.h>
36*4882a593Smuzhiyun #include <linux/usb.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define UCAN_DRIVER_NAME "ucan"
39*4882a593Smuzhiyun #define UCAN_MAX_RX_URBS 8
40*4882a593Smuzhiyun /* the CAN controller needs a while to enable/disable the bus */
41*4882a593Smuzhiyun #define UCAN_USB_CTL_PIPE_TIMEOUT 1000
42*4882a593Smuzhiyun /* this driver currently supports protocol version 3 only */
43*4882a593Smuzhiyun #define UCAN_PROTOCOL_VERSION_MIN 3
44*4882a593Smuzhiyun #define UCAN_PROTOCOL_VERSION_MAX 3
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /* UCAN Message Definitions
47*4882a593Smuzhiyun  * ------------------------
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  *  ucan_message_out_t and ucan_message_in_t define the messages
50*4882a593Smuzhiyun  *  transmitted on the OUT and IN endpoint.
51*4882a593Smuzhiyun  *
52*4882a593Smuzhiyun  *  Multibyte fields are transmitted with little endianness
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  *  INTR Endpoint: a single uint32_t storing the current space in the fifo
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  *  OUT Endpoint: single message of type ucan_message_out_t is
57*4882a593Smuzhiyun  *    transmitted on the out endpoint
58*4882a593Smuzhiyun  *
59*4882a593Smuzhiyun  *  IN Endpoint: multiple messages ucan_message_in_t concateted in
60*4882a593Smuzhiyun  *    the following way:
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  *	m[n].len <=> the length if message n(including the header in bytes)
63*4882a593Smuzhiyun  *	m[n] is is aligned to a 4 byte boundary, hence
64*4882a593Smuzhiyun  *	  offset(m[0])	 := 0;
65*4882a593Smuzhiyun  *	  offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  *	this implies that
68*4882a593Smuzhiyun  *	  offset(m[n]) % 4 <=> 0
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* Device Global Commands */
72*4882a593Smuzhiyun enum {
73*4882a593Smuzhiyun 	UCAN_DEVICE_GET_FW_STRING = 0,
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* UCAN Commands */
77*4882a593Smuzhiyun enum {
78*4882a593Smuzhiyun 	/* start the can transceiver - val defines the operation mode */
79*4882a593Smuzhiyun 	UCAN_COMMAND_START = 0,
80*4882a593Smuzhiyun 	/* cancel pending transmissions and stop the can transceiver */
81*4882a593Smuzhiyun 	UCAN_COMMAND_STOP = 1,
82*4882a593Smuzhiyun 	/* send can transceiver into low-power sleep mode */
83*4882a593Smuzhiyun 	UCAN_COMMAND_SLEEP = 2,
84*4882a593Smuzhiyun 	/* wake up can transceiver from low-power sleep mode */
85*4882a593Smuzhiyun 	UCAN_COMMAND_WAKEUP = 3,
86*4882a593Smuzhiyun 	/* reset the can transceiver */
87*4882a593Smuzhiyun 	UCAN_COMMAND_RESET = 4,
88*4882a593Smuzhiyun 	/* get piece of info from the can transceiver - subcmd defines what
89*4882a593Smuzhiyun 	 * piece
90*4882a593Smuzhiyun 	 */
91*4882a593Smuzhiyun 	UCAN_COMMAND_GET = 5,
92*4882a593Smuzhiyun 	/* clear or disable hardware filter - subcmd defines which of the two */
93*4882a593Smuzhiyun 	UCAN_COMMAND_FILTER = 6,
94*4882a593Smuzhiyun 	/* Setup bittiming */
95*4882a593Smuzhiyun 	UCAN_COMMAND_SET_BITTIMING = 7,
96*4882a593Smuzhiyun 	/* recover from bus-off state */
97*4882a593Smuzhiyun 	UCAN_COMMAND_RESTART = 8,
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap).
101*4882a593Smuzhiyun  * Undefined bits must be set to 0.
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun enum {
104*4882a593Smuzhiyun 	UCAN_MODE_LOOPBACK = BIT(0),
105*4882a593Smuzhiyun 	UCAN_MODE_SILENT = BIT(1),
106*4882a593Smuzhiyun 	UCAN_MODE_3_SAMPLES = BIT(2),
107*4882a593Smuzhiyun 	UCAN_MODE_ONE_SHOT = BIT(3),
108*4882a593Smuzhiyun 	UCAN_MODE_BERR_REPORT = BIT(4),
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* UCAN_COMMAND_GET subcommands */
112*4882a593Smuzhiyun enum {
113*4882a593Smuzhiyun 	UCAN_COMMAND_GET_INFO = 0,
114*4882a593Smuzhiyun 	UCAN_COMMAND_GET_PROTOCOL_VERSION = 1,
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* UCAN_COMMAND_FILTER subcommands */
118*4882a593Smuzhiyun enum {
119*4882a593Smuzhiyun 	UCAN_FILTER_CLEAR = 0,
120*4882a593Smuzhiyun 	UCAN_FILTER_DISABLE = 1,
121*4882a593Smuzhiyun 	UCAN_FILTER_ENABLE = 2,
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /* OUT endpoint message types */
125*4882a593Smuzhiyun enum {
126*4882a593Smuzhiyun 	UCAN_OUT_TX = 2,     /* transmit a CAN frame */
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* IN endpoint message types */
130*4882a593Smuzhiyun enum {
131*4882a593Smuzhiyun 	UCAN_IN_TX_COMPLETE = 1,  /* CAN frame transmission completed */
132*4882a593Smuzhiyun 	UCAN_IN_RX = 2,           /* CAN frame received */
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun struct ucan_ctl_cmd_start {
136*4882a593Smuzhiyun 	__le16 mode;         /* OR-ing any of UCAN_MODE_* */
137*4882a593Smuzhiyun } __packed;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun struct ucan_ctl_cmd_set_bittiming {
140*4882a593Smuzhiyun 	__le32 tq;           /* Time quanta (TQ) in nanoseconds */
141*4882a593Smuzhiyun 	__le16 brp;          /* TQ Prescaler */
142*4882a593Smuzhiyun 	__le16 sample_point; /* Samplepoint on tenth percent */
143*4882a593Smuzhiyun 	u8 prop_seg;         /* Propagation segment in TQs */
144*4882a593Smuzhiyun 	u8 phase_seg1;       /* Phase buffer segment 1 in TQs */
145*4882a593Smuzhiyun 	u8 phase_seg2;       /* Phase buffer segment 2 in TQs */
146*4882a593Smuzhiyun 	u8 sjw;              /* Synchronisation jump width in TQs */
147*4882a593Smuzhiyun } __packed;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun struct ucan_ctl_cmd_device_info {
150*4882a593Smuzhiyun 	__le32 freq;         /* Clock Frequency for tq generation */
151*4882a593Smuzhiyun 	u8 tx_fifo;          /* Size of the transmission fifo */
152*4882a593Smuzhiyun 	u8 sjw_max;          /* can_bittiming fields... */
153*4882a593Smuzhiyun 	u8 tseg1_min;
154*4882a593Smuzhiyun 	u8 tseg1_max;
155*4882a593Smuzhiyun 	u8 tseg2_min;
156*4882a593Smuzhiyun 	u8 tseg2_max;
157*4882a593Smuzhiyun 	__le16 brp_inc;
158*4882a593Smuzhiyun 	__le32 brp_min;
159*4882a593Smuzhiyun 	__le32 brp_max;      /* ...can_bittiming fields */
160*4882a593Smuzhiyun 	__le16 ctrlmodes;    /* supported control modes */
161*4882a593Smuzhiyun 	__le16 hwfilter;     /* Number of HW filter banks */
162*4882a593Smuzhiyun 	__le16 rxmboxes;     /* Number of receive Mailboxes */
163*4882a593Smuzhiyun } __packed;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun struct ucan_ctl_cmd_get_protocol_version {
166*4882a593Smuzhiyun 	__le32 version;
167*4882a593Smuzhiyun } __packed;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun union ucan_ctl_payload {
170*4882a593Smuzhiyun 	/* Setup Bittiming
171*4882a593Smuzhiyun 	 * bmRequest == UCAN_COMMAND_START
172*4882a593Smuzhiyun 	 */
173*4882a593Smuzhiyun 	struct ucan_ctl_cmd_start cmd_start;
174*4882a593Smuzhiyun 	/* Setup Bittiming
175*4882a593Smuzhiyun 	 * bmRequest == UCAN_COMMAND_SET_BITTIMING
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming;
178*4882a593Smuzhiyun 	/* Get Device Information
179*4882a593Smuzhiyun 	 * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO
180*4882a593Smuzhiyun 	 */
181*4882a593Smuzhiyun 	struct ucan_ctl_cmd_device_info cmd_get_device_info;
182*4882a593Smuzhiyun 	/* Get Protocol Version
183*4882a593Smuzhiyun 	 * bmRequest == UCAN_COMMAND_GET;
184*4882a593Smuzhiyun 	 * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION
185*4882a593Smuzhiyun 	 */
186*4882a593Smuzhiyun 	struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	u8 raw[128];
189*4882a593Smuzhiyun } __packed;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun enum {
192*4882a593Smuzhiyun 	UCAN_TX_COMPLETE_SUCCESS = BIT(0),
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun /* Transmission Complete within ucan_message_in */
196*4882a593Smuzhiyun struct ucan_tx_complete_entry_t {
197*4882a593Smuzhiyun 	u8 echo_index;
198*4882a593Smuzhiyun 	u8 flags;
199*4882a593Smuzhiyun } __packed __aligned(0x2);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /* CAN Data message format within ucan_message_in/out */
202*4882a593Smuzhiyun struct ucan_can_msg {
203*4882a593Smuzhiyun 	/* note DLC is computed by
204*4882a593Smuzhiyun 	 *    msg.len - sizeof (msg.len)
205*4882a593Smuzhiyun 	 *            - sizeof (msg.type)
206*4882a593Smuzhiyun 	 *            - sizeof (msg.can_msg.id)
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	__le32 id;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	union {
211*4882a593Smuzhiyun 		u8 data[CAN_MAX_DLEN];  /* Data of CAN frames */
212*4882a593Smuzhiyun 		u8 dlc;                 /* RTR dlc */
213*4882a593Smuzhiyun 	};
214*4882a593Smuzhiyun } __packed;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* OUT Endpoint, outbound messages */
217*4882a593Smuzhiyun struct ucan_message_out {
218*4882a593Smuzhiyun 	__le16 len; /* Length of the content include header */
219*4882a593Smuzhiyun 	u8 type;    /* UCAN_OUT_TX and friends */
220*4882a593Smuzhiyun 	u8 subtype; /* command sub type */
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	union {
223*4882a593Smuzhiyun 		/* Transmit CAN frame
224*4882a593Smuzhiyun 		 * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
225*4882a593Smuzhiyun 		 * subtype stores the echo id
226*4882a593Smuzhiyun 		 */
227*4882a593Smuzhiyun 		struct ucan_can_msg can_msg;
228*4882a593Smuzhiyun 	} msg;
229*4882a593Smuzhiyun } __packed __aligned(0x4);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /* IN Endpoint, inbound messages */
232*4882a593Smuzhiyun struct ucan_message_in {
233*4882a593Smuzhiyun 	__le16 len; /* Length of the content include header */
234*4882a593Smuzhiyun 	u8 type;    /* UCAN_IN_RX and friends */
235*4882a593Smuzhiyun 	u8 subtype; /* command sub type */
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	union {
238*4882a593Smuzhiyun 		/* CAN Frame received
239*4882a593Smuzhiyun 		 * (type == UCAN_IN_RX)
240*4882a593Smuzhiyun 		 * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0)
241*4882a593Smuzhiyun 		 */
242*4882a593Smuzhiyun 		struct ucan_can_msg can_msg;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 		/* CAN transmission complete
245*4882a593Smuzhiyun 		 * (type == UCAN_IN_TX_COMPLETE)
246*4882a593Smuzhiyun 		 */
247*4882a593Smuzhiyun 		struct ucan_tx_complete_entry_t can_tx_complete_msg[0];
248*4882a593Smuzhiyun 	} __aligned(0x4) msg;
249*4882a593Smuzhiyun } __packed;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /* Macros to calculate message lengths */
252*4882a593Smuzhiyun #define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg)
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun #define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg)
255*4882a593Smuzhiyun #define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member))
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun struct ucan_priv;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun /* Context Information for transmission URBs */
260*4882a593Smuzhiyun struct ucan_urb_context {
261*4882a593Smuzhiyun 	struct ucan_priv *up;
262*4882a593Smuzhiyun 	u8 dlc;
263*4882a593Smuzhiyun 	bool allocated;
264*4882a593Smuzhiyun };
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /* Information reported by the USB device */
267*4882a593Smuzhiyun struct ucan_device_info {
268*4882a593Smuzhiyun 	struct can_bittiming_const bittiming_const;
269*4882a593Smuzhiyun 	u8 tx_fifo;
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun /* Driver private data */
273*4882a593Smuzhiyun struct ucan_priv {
274*4882a593Smuzhiyun 	/* must be the first member */
275*4882a593Smuzhiyun 	struct can_priv can;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* linux USB device structures */
278*4882a593Smuzhiyun 	struct usb_device *udev;
279*4882a593Smuzhiyun 	struct usb_interface *intf;
280*4882a593Smuzhiyun 	struct net_device *netdev;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* lock for can->echo_skb (used around
283*4882a593Smuzhiyun 	 * can_put/get/free_echo_skb
284*4882a593Smuzhiyun 	 */
285*4882a593Smuzhiyun 	spinlock_t echo_skb_lock;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* usb device information information */
288*4882a593Smuzhiyun 	u8 intf_index;
289*4882a593Smuzhiyun 	u8 in_ep_addr;
290*4882a593Smuzhiyun 	u8 out_ep_addr;
291*4882a593Smuzhiyun 	u16 in_ep_size;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* transmission and reception buffers */
294*4882a593Smuzhiyun 	struct usb_anchor rx_urbs;
295*4882a593Smuzhiyun 	struct usb_anchor tx_urbs;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	union ucan_ctl_payload *ctl_msg_buffer;
298*4882a593Smuzhiyun 	struct ucan_device_info device_info;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* transmission control information and locks */
301*4882a593Smuzhiyun 	spinlock_t context_lock;
302*4882a593Smuzhiyun 	unsigned int available_tx_urbs;
303*4882a593Smuzhiyun 	struct ucan_urb_context *context_array;
304*4882a593Smuzhiyun };
305*4882a593Smuzhiyun 
ucan_get_can_dlc(struct ucan_can_msg * msg,u16 len)306*4882a593Smuzhiyun static u8 ucan_get_can_dlc(struct ucan_can_msg *msg, u16 len)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	if (le32_to_cpu(msg->id) & CAN_RTR_FLAG)
309*4882a593Smuzhiyun 		return get_can_dlc(msg->dlc);
310*4882a593Smuzhiyun 	else
311*4882a593Smuzhiyun 		return get_can_dlc(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id)));
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
ucan_release_context_array(struct ucan_priv * up)314*4882a593Smuzhiyun static void ucan_release_context_array(struct ucan_priv *up)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	if (!up->context_array)
317*4882a593Smuzhiyun 		return;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	/* lock is not needed because, driver is currently opening or closing */
320*4882a593Smuzhiyun 	up->available_tx_urbs = 0;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	kfree(up->context_array);
323*4882a593Smuzhiyun 	up->context_array = NULL;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
ucan_alloc_context_array(struct ucan_priv * up)326*4882a593Smuzhiyun static int ucan_alloc_context_array(struct ucan_priv *up)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	int i;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	/* release contexts if any */
331*4882a593Smuzhiyun 	ucan_release_context_array(up);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	up->context_array = kcalloc(up->device_info.tx_fifo,
334*4882a593Smuzhiyun 				    sizeof(*up->context_array),
335*4882a593Smuzhiyun 				    GFP_KERNEL);
336*4882a593Smuzhiyun 	if (!up->context_array) {
337*4882a593Smuzhiyun 		netdev_err(up->netdev,
338*4882a593Smuzhiyun 			   "Not enough memory to allocate tx contexts\n");
339*4882a593Smuzhiyun 		return -ENOMEM;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	for (i = 0; i < up->device_info.tx_fifo; i++) {
343*4882a593Smuzhiyun 		up->context_array[i].allocated = false;
344*4882a593Smuzhiyun 		up->context_array[i].up = up;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* lock is not needed because, driver is currently opening */
348*4882a593Smuzhiyun 	up->available_tx_urbs = up->device_info.tx_fifo;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	return 0;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
ucan_alloc_context(struct ucan_priv * up)353*4882a593Smuzhiyun static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	int i;
356*4882a593Smuzhiyun 	unsigned long flags;
357*4882a593Smuzhiyun 	struct ucan_urb_context *ret = NULL;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!up->context_array))
360*4882a593Smuzhiyun 		return NULL;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* execute context operation atomically */
363*4882a593Smuzhiyun 	spin_lock_irqsave(&up->context_lock, flags);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	for (i = 0; i < up->device_info.tx_fifo; i++) {
366*4882a593Smuzhiyun 		if (!up->context_array[i].allocated) {
367*4882a593Smuzhiyun 			/* update context */
368*4882a593Smuzhiyun 			ret = &up->context_array[i];
369*4882a593Smuzhiyun 			up->context_array[i].allocated = true;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 			/* stop queue if necessary */
372*4882a593Smuzhiyun 			up->available_tx_urbs--;
373*4882a593Smuzhiyun 			if (!up->available_tx_urbs)
374*4882a593Smuzhiyun 				netif_stop_queue(up->netdev);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 			break;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	spin_unlock_irqrestore(&up->context_lock, flags);
381*4882a593Smuzhiyun 	return ret;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
ucan_release_context(struct ucan_priv * up,struct ucan_urb_context * ctx)384*4882a593Smuzhiyun static bool ucan_release_context(struct ucan_priv *up,
385*4882a593Smuzhiyun 				 struct ucan_urb_context *ctx)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	unsigned long flags;
388*4882a593Smuzhiyun 	bool ret = false;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!up->context_array))
391*4882a593Smuzhiyun 		return false;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/* execute context operation atomically */
394*4882a593Smuzhiyun 	spin_lock_irqsave(&up->context_lock, flags);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* context was not allocated, maybe the device sent garbage */
397*4882a593Smuzhiyun 	if (ctx->allocated) {
398*4882a593Smuzhiyun 		ctx->allocated = false;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		/* check if the queue needs to be woken */
401*4882a593Smuzhiyun 		if (!up->available_tx_urbs)
402*4882a593Smuzhiyun 			netif_wake_queue(up->netdev);
403*4882a593Smuzhiyun 		up->available_tx_urbs++;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		ret = true;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	spin_unlock_irqrestore(&up->context_lock, flags);
409*4882a593Smuzhiyun 	return ret;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
ucan_ctrl_command_out(struct ucan_priv * up,u8 cmd,u16 subcmd,u16 datalen)412*4882a593Smuzhiyun static int ucan_ctrl_command_out(struct ucan_priv *up,
413*4882a593Smuzhiyun 				 u8 cmd, u16 subcmd, u16 datalen)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	return usb_control_msg(up->udev,
416*4882a593Smuzhiyun 			       usb_sndctrlpipe(up->udev, 0),
417*4882a593Smuzhiyun 			       cmd,
418*4882a593Smuzhiyun 			       USB_DIR_OUT | USB_TYPE_VENDOR |
419*4882a593Smuzhiyun 						USB_RECIP_INTERFACE,
420*4882a593Smuzhiyun 			       subcmd,
421*4882a593Smuzhiyun 			       up->intf_index,
422*4882a593Smuzhiyun 			       up->ctl_msg_buffer,
423*4882a593Smuzhiyun 			       datalen,
424*4882a593Smuzhiyun 			       UCAN_USB_CTL_PIPE_TIMEOUT);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
ucan_device_request_in(struct ucan_priv * up,u8 cmd,u16 subcmd,u16 datalen)427*4882a593Smuzhiyun static int ucan_device_request_in(struct ucan_priv *up,
428*4882a593Smuzhiyun 				  u8 cmd, u16 subcmd, u16 datalen)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	return usb_control_msg(up->udev,
431*4882a593Smuzhiyun 			       usb_rcvctrlpipe(up->udev, 0),
432*4882a593Smuzhiyun 			       cmd,
433*4882a593Smuzhiyun 			       USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
434*4882a593Smuzhiyun 			       subcmd,
435*4882a593Smuzhiyun 			       0,
436*4882a593Smuzhiyun 			       up->ctl_msg_buffer,
437*4882a593Smuzhiyun 			       datalen,
438*4882a593Smuzhiyun 			       UCAN_USB_CTL_PIPE_TIMEOUT);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /* Parse the device information structure reported by the device and
442*4882a593Smuzhiyun  * setup private variables accordingly
443*4882a593Smuzhiyun  */
ucan_parse_device_info(struct ucan_priv * up,struct ucan_ctl_cmd_device_info * device_info)444*4882a593Smuzhiyun static void ucan_parse_device_info(struct ucan_priv *up,
445*4882a593Smuzhiyun 				   struct ucan_ctl_cmd_device_info *device_info)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	struct can_bittiming_const *bittiming =
448*4882a593Smuzhiyun 		&up->device_info.bittiming_const;
449*4882a593Smuzhiyun 	u16 ctrlmodes;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/* store the data */
452*4882a593Smuzhiyun 	up->can.clock.freq = le32_to_cpu(device_info->freq);
453*4882a593Smuzhiyun 	up->device_info.tx_fifo = device_info->tx_fifo;
454*4882a593Smuzhiyun 	strcpy(bittiming->name, "ucan");
455*4882a593Smuzhiyun 	bittiming->tseg1_min = device_info->tseg1_min;
456*4882a593Smuzhiyun 	bittiming->tseg1_max = device_info->tseg1_max;
457*4882a593Smuzhiyun 	bittiming->tseg2_min = device_info->tseg2_min;
458*4882a593Smuzhiyun 	bittiming->tseg2_max = device_info->tseg2_max;
459*4882a593Smuzhiyun 	bittiming->sjw_max = device_info->sjw_max;
460*4882a593Smuzhiyun 	bittiming->brp_min = le32_to_cpu(device_info->brp_min);
461*4882a593Smuzhiyun 	bittiming->brp_max = le32_to_cpu(device_info->brp_max);
462*4882a593Smuzhiyun 	bittiming->brp_inc = le16_to_cpu(device_info->brp_inc);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	ctrlmodes = le16_to_cpu(device_info->ctrlmodes);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	up->can.ctrlmode_supported = 0;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (ctrlmodes & UCAN_MODE_LOOPBACK)
469*4882a593Smuzhiyun 		up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
470*4882a593Smuzhiyun 	if (ctrlmodes & UCAN_MODE_SILENT)
471*4882a593Smuzhiyun 		up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
472*4882a593Smuzhiyun 	if (ctrlmodes & UCAN_MODE_3_SAMPLES)
473*4882a593Smuzhiyun 		up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
474*4882a593Smuzhiyun 	if (ctrlmodes & UCAN_MODE_ONE_SHOT)
475*4882a593Smuzhiyun 		up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
476*4882a593Smuzhiyun 	if (ctrlmodes & UCAN_MODE_BERR_REPORT)
477*4882a593Smuzhiyun 		up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun /* Handle a CAN error frame that we have received from the device.
481*4882a593Smuzhiyun  * Returns true if the can state has changed.
482*4882a593Smuzhiyun  */
ucan_handle_error_frame(struct ucan_priv * up,struct ucan_message_in * m,canid_t canid)483*4882a593Smuzhiyun static bool ucan_handle_error_frame(struct ucan_priv *up,
484*4882a593Smuzhiyun 				    struct ucan_message_in *m,
485*4882a593Smuzhiyun 				    canid_t canid)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	enum can_state new_state = up->can.state;
488*4882a593Smuzhiyun 	struct net_device_stats *net_stats = &up->netdev->stats;
489*4882a593Smuzhiyun 	struct can_device_stats *can_stats = &up->can.can_stats;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	if (canid & CAN_ERR_LOSTARB)
492*4882a593Smuzhiyun 		can_stats->arbitration_lost++;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (canid & CAN_ERR_BUSERROR)
495*4882a593Smuzhiyun 		can_stats->bus_error++;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (canid & CAN_ERR_ACK)
498*4882a593Smuzhiyun 		net_stats->tx_errors++;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (canid & CAN_ERR_BUSOFF)
501*4882a593Smuzhiyun 		new_state = CAN_STATE_BUS_OFF;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* controller problems, details in data[1] */
504*4882a593Smuzhiyun 	if (canid & CAN_ERR_CRTL) {
505*4882a593Smuzhiyun 		u8 d1 = m->msg.can_msg.data[1];
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 		if (d1 & CAN_ERR_CRTL_RX_OVERFLOW)
508*4882a593Smuzhiyun 			net_stats->rx_over_errors++;
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 		/* controller state bits: if multiple are set the worst wins */
511*4882a593Smuzhiyun 		if (d1 & CAN_ERR_CRTL_ACTIVE)
512*4882a593Smuzhiyun 			new_state = CAN_STATE_ERROR_ACTIVE;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING))
515*4882a593Smuzhiyun 			new_state = CAN_STATE_ERROR_WARNING;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE))
518*4882a593Smuzhiyun 			new_state = CAN_STATE_ERROR_PASSIVE;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* protocol error, details in data[2] */
522*4882a593Smuzhiyun 	if (canid & CAN_ERR_PROT) {
523*4882a593Smuzhiyun 		u8 d2 = m->msg.can_msg.data[2];
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 		if (d2 & CAN_ERR_PROT_TX)
526*4882a593Smuzhiyun 			net_stats->tx_errors++;
527*4882a593Smuzhiyun 		else
528*4882a593Smuzhiyun 			net_stats->rx_errors++;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* no state change - we are done */
532*4882a593Smuzhiyun 	if (up->can.state == new_state)
533*4882a593Smuzhiyun 		return false;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* we switched into a better state */
536*4882a593Smuzhiyun 	if (up->can.state > new_state) {
537*4882a593Smuzhiyun 		up->can.state = new_state;
538*4882a593Smuzhiyun 		return true;
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/* we switched into a worse state */
542*4882a593Smuzhiyun 	up->can.state = new_state;
543*4882a593Smuzhiyun 	switch (new_state) {
544*4882a593Smuzhiyun 	case CAN_STATE_BUS_OFF:
545*4882a593Smuzhiyun 		can_stats->bus_off++;
546*4882a593Smuzhiyun 		can_bus_off(up->netdev);
547*4882a593Smuzhiyun 		break;
548*4882a593Smuzhiyun 	case CAN_STATE_ERROR_PASSIVE:
549*4882a593Smuzhiyun 		can_stats->error_passive++;
550*4882a593Smuzhiyun 		break;
551*4882a593Smuzhiyun 	case CAN_STATE_ERROR_WARNING:
552*4882a593Smuzhiyun 		can_stats->error_warning++;
553*4882a593Smuzhiyun 		break;
554*4882a593Smuzhiyun 	default:
555*4882a593Smuzhiyun 		break;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 	return true;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun /* Callback on reception of a can frame via the IN endpoint
561*4882a593Smuzhiyun  *
562*4882a593Smuzhiyun  * This function allocates an skb and transferres it to the Linux
563*4882a593Smuzhiyun  * network stack
564*4882a593Smuzhiyun  */
ucan_rx_can_msg(struct ucan_priv * up,struct ucan_message_in * m)565*4882a593Smuzhiyun static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	int len;
568*4882a593Smuzhiyun 	canid_t canid;
569*4882a593Smuzhiyun 	struct can_frame *cf;
570*4882a593Smuzhiyun 	struct sk_buff *skb;
571*4882a593Smuzhiyun 	struct net_device_stats *stats = &up->netdev->stats;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/* get the contents of the length field */
574*4882a593Smuzhiyun 	len = le16_to_cpu(m->len);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	/* check sanity */
577*4882a593Smuzhiyun 	if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) {
578*4882a593Smuzhiyun 		netdev_warn(up->netdev, "invalid input message len: %d\n", len);
579*4882a593Smuzhiyun 		return;
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/* handle error frames */
583*4882a593Smuzhiyun 	canid = le32_to_cpu(m->msg.can_msg.id);
584*4882a593Smuzhiyun 	if (canid & CAN_ERR_FLAG) {
585*4882a593Smuzhiyun 		bool busstate_changed = ucan_handle_error_frame(up, m, canid);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 		/* if berr-reporting is off only state changes get through */
588*4882a593Smuzhiyun 		if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
589*4882a593Smuzhiyun 		    !busstate_changed)
590*4882a593Smuzhiyun 			return;
591*4882a593Smuzhiyun 	} else {
592*4882a593Smuzhiyun 		canid_t canid_mask;
593*4882a593Smuzhiyun 		/* compute the mask for canid */
594*4882a593Smuzhiyun 		canid_mask = CAN_RTR_FLAG;
595*4882a593Smuzhiyun 		if (canid & CAN_EFF_FLAG)
596*4882a593Smuzhiyun 			canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG;
597*4882a593Smuzhiyun 		else
598*4882a593Smuzhiyun 			canid_mask |= CAN_SFF_MASK;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		if (canid & ~canid_mask)
601*4882a593Smuzhiyun 			netdev_warn(up->netdev,
602*4882a593Smuzhiyun 				    "unexpected bits set (canid %x, mask %x)",
603*4882a593Smuzhiyun 				    canid, canid_mask);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 		canid &= canid_mask;
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* allocate skb */
609*4882a593Smuzhiyun 	skb = alloc_can_skb(up->netdev, &cf);
610*4882a593Smuzhiyun 	if (!skb)
611*4882a593Smuzhiyun 		return;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	/* fill the can frame */
614*4882a593Smuzhiyun 	cf->can_id = canid;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* compute DLC taking RTR_FLAG into account */
617*4882a593Smuzhiyun 	cf->can_dlc = ucan_get_can_dlc(&m->msg.can_msg, len);
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/* copy the payload of non RTR frames */
620*4882a593Smuzhiyun 	if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG))
621*4882a593Smuzhiyun 		memcpy(cf->data, m->msg.can_msg.data, cf->can_dlc);
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	/* don't count error frames as real packets */
624*4882a593Smuzhiyun 	stats->rx_packets++;
625*4882a593Smuzhiyun 	stats->rx_bytes += cf->can_dlc;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	/* pass it to Linux */
628*4882a593Smuzhiyun 	netif_rx(skb);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun /* callback indicating completed transmission */
ucan_tx_complete_msg(struct ucan_priv * up,struct ucan_message_in * m)632*4882a593Smuzhiyun static void ucan_tx_complete_msg(struct ucan_priv *up,
633*4882a593Smuzhiyun 				 struct ucan_message_in *m)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	unsigned long flags;
636*4882a593Smuzhiyun 	u16 count, i;
637*4882a593Smuzhiyun 	u8 echo_index, dlc;
638*4882a593Smuzhiyun 	u16 len = le16_to_cpu(m->len);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	struct ucan_urb_context *context;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) {
643*4882a593Smuzhiyun 		netdev_err(up->netdev, "invalid tx complete length\n");
644*4882a593Smuzhiyun 		return;
645*4882a593Smuzhiyun 	}
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	count = (len - UCAN_IN_HDR_SIZE) / 2;
648*4882a593Smuzhiyun 	for (i = 0; i < count; i++) {
649*4882a593Smuzhiyun 		/* we did not submit such echo ids */
650*4882a593Smuzhiyun 		echo_index = m->msg.can_tx_complete_msg[i].echo_index;
651*4882a593Smuzhiyun 		if (echo_index >= up->device_info.tx_fifo) {
652*4882a593Smuzhiyun 			up->netdev->stats.tx_errors++;
653*4882a593Smuzhiyun 			netdev_err(up->netdev,
654*4882a593Smuzhiyun 				   "invalid echo_index %d received\n",
655*4882a593Smuzhiyun 				   echo_index);
656*4882a593Smuzhiyun 			continue;
657*4882a593Smuzhiyun 		}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 		/* gather information from the context */
660*4882a593Smuzhiyun 		context = &up->context_array[echo_index];
661*4882a593Smuzhiyun 		dlc = READ_ONCE(context->dlc);
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 		/* Release context and restart queue if necessary.
664*4882a593Smuzhiyun 		 * Also check if the context was allocated
665*4882a593Smuzhiyun 		 */
666*4882a593Smuzhiyun 		if (!ucan_release_context(up, context))
667*4882a593Smuzhiyun 			continue;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		spin_lock_irqsave(&up->echo_skb_lock, flags);
670*4882a593Smuzhiyun 		if (m->msg.can_tx_complete_msg[i].flags &
671*4882a593Smuzhiyun 		    UCAN_TX_COMPLETE_SUCCESS) {
672*4882a593Smuzhiyun 			/* update statistics */
673*4882a593Smuzhiyun 			up->netdev->stats.tx_packets++;
674*4882a593Smuzhiyun 			up->netdev->stats.tx_bytes += dlc;
675*4882a593Smuzhiyun 			can_get_echo_skb(up->netdev, echo_index);
676*4882a593Smuzhiyun 		} else {
677*4882a593Smuzhiyun 			up->netdev->stats.tx_dropped++;
678*4882a593Smuzhiyun 			can_free_echo_skb(up->netdev, echo_index);
679*4882a593Smuzhiyun 		}
680*4882a593Smuzhiyun 		spin_unlock_irqrestore(&up->echo_skb_lock, flags);
681*4882a593Smuzhiyun 	}
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun /* callback on reception of a USB message */
ucan_read_bulk_callback(struct urb * urb)685*4882a593Smuzhiyun static void ucan_read_bulk_callback(struct urb *urb)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	int ret;
688*4882a593Smuzhiyun 	int pos;
689*4882a593Smuzhiyun 	struct ucan_priv *up = urb->context;
690*4882a593Smuzhiyun 	struct net_device *netdev = up->netdev;
691*4882a593Smuzhiyun 	struct ucan_message_in *m;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/* the device is not up and the driver should not receive any
694*4882a593Smuzhiyun 	 * data on the bulk in pipe
695*4882a593Smuzhiyun 	 */
696*4882a593Smuzhiyun 	if (WARN_ON(!up->context_array)) {
697*4882a593Smuzhiyun 		usb_free_coherent(up->udev,
698*4882a593Smuzhiyun 				  up->in_ep_size,
699*4882a593Smuzhiyun 				  urb->transfer_buffer,
700*4882a593Smuzhiyun 				  urb->transfer_dma);
701*4882a593Smuzhiyun 		return;
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	/* check URB status */
705*4882a593Smuzhiyun 	switch (urb->status) {
706*4882a593Smuzhiyun 	case 0:
707*4882a593Smuzhiyun 		break;
708*4882a593Smuzhiyun 	case -ENOENT:
709*4882a593Smuzhiyun 	case -EPIPE:
710*4882a593Smuzhiyun 	case -EPROTO:
711*4882a593Smuzhiyun 	case -ESHUTDOWN:
712*4882a593Smuzhiyun 	case -ETIME:
713*4882a593Smuzhiyun 		/* urb is not resubmitted -> free dma data */
714*4882a593Smuzhiyun 		usb_free_coherent(up->udev,
715*4882a593Smuzhiyun 				  up->in_ep_size,
716*4882a593Smuzhiyun 				  urb->transfer_buffer,
717*4882a593Smuzhiyun 				  urb->transfer_dma);
718*4882a593Smuzhiyun 		netdev_dbg(up->netdev, "not resubmitting urb; status: %d\n",
719*4882a593Smuzhiyun 			   urb->status);
720*4882a593Smuzhiyun 		return;
721*4882a593Smuzhiyun 	default:
722*4882a593Smuzhiyun 		goto resubmit;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* sanity check */
726*4882a593Smuzhiyun 	if (!netif_device_present(netdev))
727*4882a593Smuzhiyun 		return;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/* iterate over input */
730*4882a593Smuzhiyun 	pos = 0;
731*4882a593Smuzhiyun 	while (pos < urb->actual_length) {
732*4882a593Smuzhiyun 		int len;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 		/* check sanity (length of header) */
735*4882a593Smuzhiyun 		if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) {
736*4882a593Smuzhiyun 			netdev_warn(up->netdev,
737*4882a593Smuzhiyun 				    "invalid message (short; no hdr; l:%d)\n",
738*4882a593Smuzhiyun 				    urb->actual_length);
739*4882a593Smuzhiyun 			goto resubmit;
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		/* setup the message address */
743*4882a593Smuzhiyun 		m = (struct ucan_message_in *)
744*4882a593Smuzhiyun 			((u8 *)urb->transfer_buffer + pos);
745*4882a593Smuzhiyun 		len = le16_to_cpu(m->len);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		/* check sanity (length of content) */
748*4882a593Smuzhiyun 		if (urb->actual_length - pos < len) {
749*4882a593Smuzhiyun 			netdev_warn(up->netdev,
750*4882a593Smuzhiyun 				    "invalid message (short; no data; l:%d)\n",
751*4882a593Smuzhiyun 				    urb->actual_length);
752*4882a593Smuzhiyun 			print_hex_dump(KERN_WARNING,
753*4882a593Smuzhiyun 				       "raw data: ",
754*4882a593Smuzhiyun 				       DUMP_PREFIX_ADDRESS,
755*4882a593Smuzhiyun 				       16,
756*4882a593Smuzhiyun 				       1,
757*4882a593Smuzhiyun 				       urb->transfer_buffer,
758*4882a593Smuzhiyun 				       urb->actual_length,
759*4882a593Smuzhiyun 				       true);
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 			goto resubmit;
762*4882a593Smuzhiyun 		}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 		switch (m->type) {
765*4882a593Smuzhiyun 		case UCAN_IN_RX:
766*4882a593Smuzhiyun 			ucan_rx_can_msg(up, m);
767*4882a593Smuzhiyun 			break;
768*4882a593Smuzhiyun 		case UCAN_IN_TX_COMPLETE:
769*4882a593Smuzhiyun 			ucan_tx_complete_msg(up, m);
770*4882a593Smuzhiyun 			break;
771*4882a593Smuzhiyun 		default:
772*4882a593Smuzhiyun 			netdev_warn(up->netdev,
773*4882a593Smuzhiyun 				    "invalid message (type; t:%d)\n",
774*4882a593Smuzhiyun 				    m->type);
775*4882a593Smuzhiyun 			break;
776*4882a593Smuzhiyun 		}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 		/* proceed to next message */
779*4882a593Smuzhiyun 		pos += len;
780*4882a593Smuzhiyun 		/* align to 4 byte boundary */
781*4882a593Smuzhiyun 		pos = round_up(pos, 4);
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun resubmit:
785*4882a593Smuzhiyun 	/* resubmit urb when done */
786*4882a593Smuzhiyun 	usb_fill_bulk_urb(urb, up->udev,
787*4882a593Smuzhiyun 			  usb_rcvbulkpipe(up->udev,
788*4882a593Smuzhiyun 					  up->in_ep_addr),
789*4882a593Smuzhiyun 			  urb->transfer_buffer,
790*4882a593Smuzhiyun 			  up->in_ep_size,
791*4882a593Smuzhiyun 			  ucan_read_bulk_callback,
792*4882a593Smuzhiyun 			  up);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	usb_anchor_urb(urb, &up->rx_urbs);
795*4882a593Smuzhiyun 	ret = usb_submit_urb(urb, GFP_ATOMIC);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (ret < 0) {
798*4882a593Smuzhiyun 		netdev_err(up->netdev,
799*4882a593Smuzhiyun 			   "failed resubmitting read bulk urb: %d\n",
800*4882a593Smuzhiyun 			   ret);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 		usb_unanchor_urb(urb);
803*4882a593Smuzhiyun 		usb_free_coherent(up->udev,
804*4882a593Smuzhiyun 				  up->in_ep_size,
805*4882a593Smuzhiyun 				  urb->transfer_buffer,
806*4882a593Smuzhiyun 				  urb->transfer_dma);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 		if (ret == -ENODEV)
809*4882a593Smuzhiyun 			netif_device_detach(netdev);
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun /* callback after transmission of a USB message */
ucan_write_bulk_callback(struct urb * urb)814*4882a593Smuzhiyun static void ucan_write_bulk_callback(struct urb *urb)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	unsigned long flags;
817*4882a593Smuzhiyun 	struct ucan_priv *up;
818*4882a593Smuzhiyun 	struct ucan_urb_context *context = urb->context;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	/* get the urb context */
821*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!context))
822*4882a593Smuzhiyun 		return;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	/* free up our allocated buffer */
825*4882a593Smuzhiyun 	usb_free_coherent(urb->dev,
826*4882a593Smuzhiyun 			  sizeof(struct ucan_message_out),
827*4882a593Smuzhiyun 			  urb->transfer_buffer,
828*4882a593Smuzhiyun 			  urb->transfer_dma);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	up = context->up;
831*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!up))
832*4882a593Smuzhiyun 		return;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	/* sanity check */
835*4882a593Smuzhiyun 	if (!netif_device_present(up->netdev))
836*4882a593Smuzhiyun 		return;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	/* transmission failed (USB - the device will not send a TX complete) */
839*4882a593Smuzhiyun 	if (urb->status) {
840*4882a593Smuzhiyun 		netdev_warn(up->netdev,
841*4882a593Smuzhiyun 			    "failed to transmit USB message to device: %d\n",
842*4882a593Smuzhiyun 			     urb->status);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 		/* update counters an cleanup */
845*4882a593Smuzhiyun 		spin_lock_irqsave(&up->echo_skb_lock, flags);
846*4882a593Smuzhiyun 		can_free_echo_skb(up->netdev, context - up->context_array);
847*4882a593Smuzhiyun 		spin_unlock_irqrestore(&up->echo_skb_lock, flags);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 		up->netdev->stats.tx_dropped++;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		/* release context and restart the queue if necessary */
852*4882a593Smuzhiyun 		if (!ucan_release_context(up, context))
853*4882a593Smuzhiyun 			netdev_err(up->netdev,
854*4882a593Smuzhiyun 				   "urb failed, failed to release context\n");
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
ucan_cleanup_rx_urbs(struct ucan_priv * up,struct urb ** urbs)858*4882a593Smuzhiyun static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun 	int i;
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
863*4882a593Smuzhiyun 		if (urbs[i]) {
864*4882a593Smuzhiyun 			usb_unanchor_urb(urbs[i]);
865*4882a593Smuzhiyun 			usb_free_coherent(up->udev,
866*4882a593Smuzhiyun 					  up->in_ep_size,
867*4882a593Smuzhiyun 					  urbs[i]->transfer_buffer,
868*4882a593Smuzhiyun 					  urbs[i]->transfer_dma);
869*4882a593Smuzhiyun 			usb_free_urb(urbs[i]);
870*4882a593Smuzhiyun 		}
871*4882a593Smuzhiyun 	}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
ucan_prepare_and_anchor_rx_urbs(struct ucan_priv * up,struct urb ** urbs)876*4882a593Smuzhiyun static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up,
877*4882a593Smuzhiyun 					   struct urb **urbs)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun 	int i;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
884*4882a593Smuzhiyun 		void *buf;
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 		urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
887*4882a593Smuzhiyun 		if (!urbs[i])
888*4882a593Smuzhiyun 			goto err;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 		buf = usb_alloc_coherent(up->udev,
891*4882a593Smuzhiyun 					 up->in_ep_size,
892*4882a593Smuzhiyun 					 GFP_KERNEL, &urbs[i]->transfer_dma);
893*4882a593Smuzhiyun 		if (!buf) {
894*4882a593Smuzhiyun 			/* cleanup this urb */
895*4882a593Smuzhiyun 			usb_free_urb(urbs[i]);
896*4882a593Smuzhiyun 			urbs[i] = NULL;
897*4882a593Smuzhiyun 			goto err;
898*4882a593Smuzhiyun 		}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 		usb_fill_bulk_urb(urbs[i], up->udev,
901*4882a593Smuzhiyun 				  usb_rcvbulkpipe(up->udev,
902*4882a593Smuzhiyun 						  up->in_ep_addr),
903*4882a593Smuzhiyun 				  buf,
904*4882a593Smuzhiyun 				  up->in_ep_size,
905*4882a593Smuzhiyun 				  ucan_read_bulk_callback,
906*4882a593Smuzhiyun 				  up);
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 		urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 		usb_anchor_urb(urbs[i], &up->rx_urbs);
911*4882a593Smuzhiyun 	}
912*4882a593Smuzhiyun 	return 0;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun err:
915*4882a593Smuzhiyun 	/* cleanup other unsubmitted urbs */
916*4882a593Smuzhiyun 	ucan_cleanup_rx_urbs(up, urbs);
917*4882a593Smuzhiyun 	return -ENOMEM;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun /* Submits rx urbs with the semantic: Either submit all, or cleanup
921*4882a593Smuzhiyun  * everything. I case of errors submitted urbs are killed and all urbs in
922*4882a593Smuzhiyun  * the array are freed. I case of no errors every entry in the urb
923*4882a593Smuzhiyun  * array is set to NULL.
924*4882a593Smuzhiyun  */
ucan_submit_rx_urbs(struct ucan_priv * up,struct urb ** urbs)925*4882a593Smuzhiyun static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	int i, ret;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	/* Iterate over all urbs to submit. On success remove the urb
930*4882a593Smuzhiyun 	 * from the list.
931*4882a593Smuzhiyun 	 */
932*4882a593Smuzhiyun 	for (i = 0; i < UCAN_MAX_RX_URBS; i++) {
933*4882a593Smuzhiyun 		ret = usb_submit_urb(urbs[i], GFP_KERNEL);
934*4882a593Smuzhiyun 		if (ret) {
935*4882a593Smuzhiyun 			netdev_err(up->netdev,
936*4882a593Smuzhiyun 				   "could not submit urb; code: %d\n",
937*4882a593Smuzhiyun 				   ret);
938*4882a593Smuzhiyun 			goto err;
939*4882a593Smuzhiyun 		}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		/* Anchor URB and drop reference, USB core will take
942*4882a593Smuzhiyun 		 * care of freeing it
943*4882a593Smuzhiyun 		 */
944*4882a593Smuzhiyun 		usb_free_urb(urbs[i]);
945*4882a593Smuzhiyun 		urbs[i] = NULL;
946*4882a593Smuzhiyun 	}
947*4882a593Smuzhiyun 	return 0;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun err:
950*4882a593Smuzhiyun 	/* Cleanup unsubmitted urbs */
951*4882a593Smuzhiyun 	ucan_cleanup_rx_urbs(up, urbs);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	/* Kill urbs that are already submitted */
954*4882a593Smuzhiyun 	usb_kill_anchored_urbs(&up->rx_urbs);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	return ret;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun /* Open the network device */
ucan_open(struct net_device * netdev)960*4882a593Smuzhiyun static int ucan_open(struct net_device *netdev)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	int ret, ret_cleanup;
963*4882a593Smuzhiyun 	u16 ctrlmode;
964*4882a593Smuzhiyun 	struct urb *urbs[UCAN_MAX_RX_URBS];
965*4882a593Smuzhiyun 	struct ucan_priv *up = netdev_priv(netdev);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	ret = ucan_alloc_context_array(up);
968*4882a593Smuzhiyun 	if (ret)
969*4882a593Smuzhiyun 		return ret;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/* Allocate and prepare IN URBS - allocated and anchored
972*4882a593Smuzhiyun 	 * urbs are stored in urbs[] for clean
973*4882a593Smuzhiyun 	 */
974*4882a593Smuzhiyun 	ret = ucan_prepare_and_anchor_rx_urbs(up, urbs);
975*4882a593Smuzhiyun 	if (ret)
976*4882a593Smuzhiyun 		goto err_contexts;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	/* Check the control mode */
979*4882a593Smuzhiyun 	ctrlmode = 0;
980*4882a593Smuzhiyun 	if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
981*4882a593Smuzhiyun 		ctrlmode |= UCAN_MODE_LOOPBACK;
982*4882a593Smuzhiyun 	if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
983*4882a593Smuzhiyun 		ctrlmode |= UCAN_MODE_SILENT;
984*4882a593Smuzhiyun 	if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
985*4882a593Smuzhiyun 		ctrlmode |= UCAN_MODE_3_SAMPLES;
986*4882a593Smuzhiyun 	if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
987*4882a593Smuzhiyun 		ctrlmode |= UCAN_MODE_ONE_SHOT;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	/* Enable this in any case - filtering is down within the
990*4882a593Smuzhiyun 	 * receive path
991*4882a593Smuzhiyun 	 */
992*4882a593Smuzhiyun 	ctrlmode |= UCAN_MODE_BERR_REPORT;
993*4882a593Smuzhiyun 	up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode);
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	/* Driver is ready to receive data - start the USB device */
996*4882a593Smuzhiyun 	ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2);
997*4882a593Smuzhiyun 	if (ret < 0) {
998*4882a593Smuzhiyun 		netdev_err(up->netdev,
999*4882a593Smuzhiyun 			   "could not start device, code: %d\n",
1000*4882a593Smuzhiyun 			   ret);
1001*4882a593Smuzhiyun 		goto err_reset;
1002*4882a593Smuzhiyun 	}
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	/* Call CAN layer open */
1005*4882a593Smuzhiyun 	ret = open_candev(netdev);
1006*4882a593Smuzhiyun 	if (ret)
1007*4882a593Smuzhiyun 		goto err_stop;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	/* Driver is ready to receive data. Submit RX URBS */
1010*4882a593Smuzhiyun 	ret = ucan_submit_rx_urbs(up, urbs);
1011*4882a593Smuzhiyun 	if (ret)
1012*4882a593Smuzhiyun 		goto err_stop;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	up->can.state = CAN_STATE_ERROR_ACTIVE;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	/* Start the network queue */
1017*4882a593Smuzhiyun 	netif_start_queue(netdev);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	return 0;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun err_stop:
1022*4882a593Smuzhiyun 	/* The device have started already stop it */
1023*4882a593Smuzhiyun 	ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
1024*4882a593Smuzhiyun 	if (ret_cleanup < 0)
1025*4882a593Smuzhiyun 		netdev_err(up->netdev,
1026*4882a593Smuzhiyun 			   "could not stop device, code: %d\n",
1027*4882a593Smuzhiyun 			   ret_cleanup);
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun err_reset:
1030*4882a593Smuzhiyun 	/* The device might have received data, reset it for
1031*4882a593Smuzhiyun 	 * consistent state
1032*4882a593Smuzhiyun 	 */
1033*4882a593Smuzhiyun 	ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
1034*4882a593Smuzhiyun 	if (ret_cleanup < 0)
1035*4882a593Smuzhiyun 		netdev_err(up->netdev,
1036*4882a593Smuzhiyun 			   "could not reset device, code: %d\n",
1037*4882a593Smuzhiyun 			   ret_cleanup);
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	/* clean up unsubmitted urbs */
1040*4882a593Smuzhiyun 	ucan_cleanup_rx_urbs(up, urbs);
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun err_contexts:
1043*4882a593Smuzhiyun 	ucan_release_context_array(up);
1044*4882a593Smuzhiyun 	return ret;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun 
ucan_prepare_tx_urb(struct ucan_priv * up,struct ucan_urb_context * context,struct can_frame * cf,u8 echo_index)1047*4882a593Smuzhiyun static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up,
1048*4882a593Smuzhiyun 				       struct ucan_urb_context *context,
1049*4882a593Smuzhiyun 				       struct can_frame *cf,
1050*4882a593Smuzhiyun 				       u8 echo_index)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	int mlen;
1053*4882a593Smuzhiyun 	struct urb *urb;
1054*4882a593Smuzhiyun 	struct ucan_message_out *m;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	/* create a URB, and a buffer for it, and copy the data to the URB */
1057*4882a593Smuzhiyun 	urb = usb_alloc_urb(0, GFP_ATOMIC);
1058*4882a593Smuzhiyun 	if (!urb) {
1059*4882a593Smuzhiyun 		netdev_err(up->netdev, "no memory left for URBs\n");
1060*4882a593Smuzhiyun 		return NULL;
1061*4882a593Smuzhiyun 	}
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	m = usb_alloc_coherent(up->udev,
1064*4882a593Smuzhiyun 			       sizeof(struct ucan_message_out),
1065*4882a593Smuzhiyun 			       GFP_ATOMIC,
1066*4882a593Smuzhiyun 			       &urb->transfer_dma);
1067*4882a593Smuzhiyun 	if (!m) {
1068*4882a593Smuzhiyun 		netdev_err(up->netdev, "no memory left for USB buffer\n");
1069*4882a593Smuzhiyun 		usb_free_urb(urb);
1070*4882a593Smuzhiyun 		return NULL;
1071*4882a593Smuzhiyun 	}
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	/* build the USB message */
1074*4882a593Smuzhiyun 	m->type = UCAN_OUT_TX;
1075*4882a593Smuzhiyun 	m->msg.can_msg.id = cpu_to_le32(cf->can_id);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (cf->can_id & CAN_RTR_FLAG) {
1078*4882a593Smuzhiyun 		mlen = UCAN_OUT_HDR_SIZE +
1079*4882a593Smuzhiyun 			offsetof(struct ucan_can_msg, dlc) +
1080*4882a593Smuzhiyun 			sizeof(m->msg.can_msg.dlc);
1081*4882a593Smuzhiyun 		m->msg.can_msg.dlc = cf->can_dlc;
1082*4882a593Smuzhiyun 	} else {
1083*4882a593Smuzhiyun 		mlen = UCAN_OUT_HDR_SIZE +
1084*4882a593Smuzhiyun 			sizeof(m->msg.can_msg.id) + cf->can_dlc;
1085*4882a593Smuzhiyun 		memcpy(m->msg.can_msg.data, cf->data, cf->can_dlc);
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 	m->len = cpu_to_le16(mlen);
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	context->dlc = cf->can_dlc;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	m->subtype = echo_index;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	/* build the urb */
1094*4882a593Smuzhiyun 	usb_fill_bulk_urb(urb, up->udev,
1095*4882a593Smuzhiyun 			  usb_sndbulkpipe(up->udev,
1096*4882a593Smuzhiyun 					  up->out_ep_addr),
1097*4882a593Smuzhiyun 			  m, mlen, ucan_write_bulk_callback, context);
1098*4882a593Smuzhiyun 	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	return urb;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun 
ucan_clean_up_tx_urb(struct ucan_priv * up,struct urb * urb)1103*4882a593Smuzhiyun static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun 	usb_free_coherent(up->udev, sizeof(struct ucan_message_out),
1106*4882a593Smuzhiyun 			  urb->transfer_buffer, urb->transfer_dma);
1107*4882a593Smuzhiyun 	usb_free_urb(urb);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun /* callback when Linux needs to send a can frame */
ucan_start_xmit(struct sk_buff * skb,struct net_device * netdev)1111*4882a593Smuzhiyun static netdev_tx_t ucan_start_xmit(struct sk_buff *skb,
1112*4882a593Smuzhiyun 				   struct net_device *netdev)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun 	unsigned long flags;
1115*4882a593Smuzhiyun 	int ret;
1116*4882a593Smuzhiyun 	u8 echo_index;
1117*4882a593Smuzhiyun 	struct urb *urb;
1118*4882a593Smuzhiyun 	struct ucan_urb_context *context;
1119*4882a593Smuzhiyun 	struct ucan_priv *up = netdev_priv(netdev);
1120*4882a593Smuzhiyun 	struct can_frame *cf = (struct can_frame *)skb->data;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	/* check skb */
1123*4882a593Smuzhiyun 	if (can_dropped_invalid_skb(netdev, skb))
1124*4882a593Smuzhiyun 		return NETDEV_TX_OK;
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	/* allocate a context and slow down tx path, if fifo state is low */
1127*4882a593Smuzhiyun 	context = ucan_alloc_context(up);
1128*4882a593Smuzhiyun 	echo_index = context - up->context_array;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!context))
1131*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	/* prepare urb for transmission */
1134*4882a593Smuzhiyun 	urb = ucan_prepare_tx_urb(up, context, cf, echo_index);
1135*4882a593Smuzhiyun 	if (!urb)
1136*4882a593Smuzhiyun 		goto drop;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	/* put the skb on can loopback stack */
1139*4882a593Smuzhiyun 	spin_lock_irqsave(&up->echo_skb_lock, flags);
1140*4882a593Smuzhiyun 	can_put_echo_skb(skb, up->netdev, echo_index);
1141*4882a593Smuzhiyun 	spin_unlock_irqrestore(&up->echo_skb_lock, flags);
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	/* transmit it */
1144*4882a593Smuzhiyun 	usb_anchor_urb(urb, &up->tx_urbs);
1145*4882a593Smuzhiyun 	ret = usb_submit_urb(urb, GFP_ATOMIC);
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	/* cleanup urb */
1148*4882a593Smuzhiyun 	if (ret) {
1149*4882a593Smuzhiyun 		/* on error, clean up */
1150*4882a593Smuzhiyun 		usb_unanchor_urb(urb);
1151*4882a593Smuzhiyun 		ucan_clean_up_tx_urb(up, urb);
1152*4882a593Smuzhiyun 		if (!ucan_release_context(up, context))
1153*4882a593Smuzhiyun 			netdev_err(up->netdev,
1154*4882a593Smuzhiyun 				   "xmit err: failed to release context\n");
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 		/* remove the skb from the echo stack - this also
1157*4882a593Smuzhiyun 		 * frees the skb
1158*4882a593Smuzhiyun 		 */
1159*4882a593Smuzhiyun 		spin_lock_irqsave(&up->echo_skb_lock, flags);
1160*4882a593Smuzhiyun 		can_free_echo_skb(up->netdev, echo_index);
1161*4882a593Smuzhiyun 		spin_unlock_irqrestore(&up->echo_skb_lock, flags);
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		if (ret == -ENODEV) {
1164*4882a593Smuzhiyun 			netif_device_detach(up->netdev);
1165*4882a593Smuzhiyun 		} else {
1166*4882a593Smuzhiyun 			netdev_warn(up->netdev,
1167*4882a593Smuzhiyun 				    "xmit err: failed to submit urb %d\n",
1168*4882a593Smuzhiyun 				    ret);
1169*4882a593Smuzhiyun 			up->netdev->stats.tx_dropped++;
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 		return NETDEV_TX_OK;
1172*4882a593Smuzhiyun 	}
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 	netif_trans_update(netdev);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	/* release ref, as we do not need the urb anymore */
1177*4882a593Smuzhiyun 	usb_free_urb(urb);
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun drop:
1182*4882a593Smuzhiyun 	if (!ucan_release_context(up, context))
1183*4882a593Smuzhiyun 		netdev_err(up->netdev,
1184*4882a593Smuzhiyun 			   "xmit drop: failed to release context\n");
1185*4882a593Smuzhiyun 	dev_kfree_skb(skb);
1186*4882a593Smuzhiyun 	up->netdev->stats.tx_dropped++;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun /* Device goes down
1192*4882a593Smuzhiyun  *
1193*4882a593Smuzhiyun  * Clean up used resources
1194*4882a593Smuzhiyun  */
ucan_close(struct net_device * netdev)1195*4882a593Smuzhiyun static int ucan_close(struct net_device *netdev)
1196*4882a593Smuzhiyun {
1197*4882a593Smuzhiyun 	int ret;
1198*4882a593Smuzhiyun 	struct ucan_priv *up = netdev_priv(netdev);
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	up->can.state = CAN_STATE_STOPPED;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	/* stop sending data */
1203*4882a593Smuzhiyun 	usb_kill_anchored_urbs(&up->tx_urbs);
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	/* stop receiving data */
1206*4882a593Smuzhiyun 	usb_kill_anchored_urbs(&up->rx_urbs);
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	/* stop and reset can device */
1209*4882a593Smuzhiyun 	ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0);
1210*4882a593Smuzhiyun 	if (ret < 0)
1211*4882a593Smuzhiyun 		netdev_err(up->netdev,
1212*4882a593Smuzhiyun 			   "could not stop device, code: %d\n",
1213*4882a593Smuzhiyun 			   ret);
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
1216*4882a593Smuzhiyun 	if (ret < 0)
1217*4882a593Smuzhiyun 		netdev_err(up->netdev,
1218*4882a593Smuzhiyun 			   "could not reset device, code: %d\n",
1219*4882a593Smuzhiyun 			   ret);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	netif_stop_queue(netdev);
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	ucan_release_context_array(up);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	close_candev(up->netdev);
1226*4882a593Smuzhiyun 	return 0;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun /* CAN driver callbacks */
1230*4882a593Smuzhiyun static const struct net_device_ops ucan_netdev_ops = {
1231*4882a593Smuzhiyun 	.ndo_open = ucan_open,
1232*4882a593Smuzhiyun 	.ndo_stop = ucan_close,
1233*4882a593Smuzhiyun 	.ndo_start_xmit = ucan_start_xmit,
1234*4882a593Smuzhiyun 	.ndo_change_mtu = can_change_mtu,
1235*4882a593Smuzhiyun };
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun /* Request to set bittiming
1238*4882a593Smuzhiyun  *
1239*4882a593Smuzhiyun  * This function generates an USB set bittiming message and transmits
1240*4882a593Smuzhiyun  * it to the device
1241*4882a593Smuzhiyun  */
ucan_set_bittiming(struct net_device * netdev)1242*4882a593Smuzhiyun static int ucan_set_bittiming(struct net_device *netdev)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun 	int ret;
1245*4882a593Smuzhiyun 	struct ucan_priv *up = netdev_priv(netdev);
1246*4882a593Smuzhiyun 	struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming;
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming;
1249*4882a593Smuzhiyun 	cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq);
1250*4882a593Smuzhiyun 	cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp);
1251*4882a593Smuzhiyun 	cmd_set_bittiming->sample_point =
1252*4882a593Smuzhiyun 	    cpu_to_le16(up->can.bittiming.sample_point);
1253*4882a593Smuzhiyun 	cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg;
1254*4882a593Smuzhiyun 	cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1;
1255*4882a593Smuzhiyun 	cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2;
1256*4882a593Smuzhiyun 	cmd_set_bittiming->sjw = up->can.bittiming.sjw;
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0,
1259*4882a593Smuzhiyun 				    sizeof(*cmd_set_bittiming));
1260*4882a593Smuzhiyun 	return (ret < 0) ? ret : 0;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun /* Restart the device to get it out of BUS-OFF state.
1264*4882a593Smuzhiyun  * Called when the user runs "ip link set can1 type can restart".
1265*4882a593Smuzhiyun  */
ucan_set_mode(struct net_device * netdev,enum can_mode mode)1266*4882a593Smuzhiyun static int ucan_set_mode(struct net_device *netdev, enum can_mode mode)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun 	int ret;
1269*4882a593Smuzhiyun 	unsigned long flags;
1270*4882a593Smuzhiyun 	struct ucan_priv *up = netdev_priv(netdev);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	switch (mode) {
1273*4882a593Smuzhiyun 	case CAN_MODE_START:
1274*4882a593Smuzhiyun 		netdev_dbg(up->netdev, "restarting device\n");
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 		ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0);
1277*4882a593Smuzhiyun 		up->can.state = CAN_STATE_ERROR_ACTIVE;
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 		/* check if queue can be restarted,
1280*4882a593Smuzhiyun 		 * up->available_tx_urbs must be protected by the
1281*4882a593Smuzhiyun 		 * lock
1282*4882a593Smuzhiyun 		 */
1283*4882a593Smuzhiyun 		spin_lock_irqsave(&up->context_lock, flags);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 		if (up->available_tx_urbs > 0)
1286*4882a593Smuzhiyun 			netif_wake_queue(up->netdev);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 		spin_unlock_irqrestore(&up->context_lock, flags);
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 		return ret;
1291*4882a593Smuzhiyun 	default:
1292*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1293*4882a593Smuzhiyun 	}
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun /* Probe the device, reset it and gather general device information */
ucan_probe(struct usb_interface * intf,const struct usb_device_id * id)1297*4882a593Smuzhiyun static int ucan_probe(struct usb_interface *intf,
1298*4882a593Smuzhiyun 		      const struct usb_device_id *id)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun 	int ret;
1301*4882a593Smuzhiyun 	int i;
1302*4882a593Smuzhiyun 	u32 protocol_version;
1303*4882a593Smuzhiyun 	struct usb_device *udev;
1304*4882a593Smuzhiyun 	struct net_device *netdev;
1305*4882a593Smuzhiyun 	struct usb_host_interface *iface_desc;
1306*4882a593Smuzhiyun 	struct ucan_priv *up;
1307*4882a593Smuzhiyun 	struct usb_endpoint_descriptor *ep;
1308*4882a593Smuzhiyun 	u16 in_ep_size;
1309*4882a593Smuzhiyun 	u16 out_ep_size;
1310*4882a593Smuzhiyun 	u8 in_ep_addr;
1311*4882a593Smuzhiyun 	u8 out_ep_addr;
1312*4882a593Smuzhiyun 	union ucan_ctl_payload *ctl_msg_buffer;
1313*4882a593Smuzhiyun 	char firmware_str[sizeof(union ucan_ctl_payload) + 1];
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	udev = interface_to_usbdev(intf);
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	/* Stage 1 - Interface Parsing
1318*4882a593Smuzhiyun 	 * ---------------------------
1319*4882a593Smuzhiyun 	 *
1320*4882a593Smuzhiyun 	 * Identifie the device USB interface descriptor and its
1321*4882a593Smuzhiyun 	 * endpoints. Probing is aborted on errors.
1322*4882a593Smuzhiyun 	 */
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	/* check if the interface is sane */
1325*4882a593Smuzhiyun 	iface_desc = intf->cur_altsetting;
1326*4882a593Smuzhiyun 	if (!iface_desc)
1327*4882a593Smuzhiyun 		return -ENODEV;
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 	dev_info(&udev->dev,
1330*4882a593Smuzhiyun 		 "%s: probing device on interface #%d\n",
1331*4882a593Smuzhiyun 		 UCAN_DRIVER_NAME,
1332*4882a593Smuzhiyun 		 iface_desc->desc.bInterfaceNumber);
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	/* interface sanity check */
1335*4882a593Smuzhiyun 	if (iface_desc->desc.bNumEndpoints != 2) {
1336*4882a593Smuzhiyun 		dev_err(&udev->dev,
1337*4882a593Smuzhiyun 			"%s: invalid EP count (%d)",
1338*4882a593Smuzhiyun 			UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints);
1339*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1340*4882a593Smuzhiyun 	}
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	/* check interface endpoints */
1343*4882a593Smuzhiyun 	in_ep_addr = 0;
1344*4882a593Smuzhiyun 	out_ep_addr = 0;
1345*4882a593Smuzhiyun 	in_ep_size = 0;
1346*4882a593Smuzhiyun 	out_ep_size = 0;
1347*4882a593Smuzhiyun 	for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
1348*4882a593Smuzhiyun 		ep = &iface_desc->endpoint[i].desc;
1349*4882a593Smuzhiyun 
1350*4882a593Smuzhiyun 		if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) &&
1351*4882a593Smuzhiyun 		    ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
1352*4882a593Smuzhiyun 		     USB_ENDPOINT_XFER_BULK)) {
1353*4882a593Smuzhiyun 			/* In Endpoint */
1354*4882a593Smuzhiyun 			in_ep_addr = ep->bEndpointAddress;
1355*4882a593Smuzhiyun 			in_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
1356*4882a593Smuzhiyun 			in_ep_size = le16_to_cpu(ep->wMaxPacketSize);
1357*4882a593Smuzhiyun 		} else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ==
1358*4882a593Smuzhiyun 			    0) &&
1359*4882a593Smuzhiyun 			   ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
1360*4882a593Smuzhiyun 			    USB_ENDPOINT_XFER_BULK)) {
1361*4882a593Smuzhiyun 			/* Out Endpoint */
1362*4882a593Smuzhiyun 			out_ep_addr = ep->bEndpointAddress;
1363*4882a593Smuzhiyun 			out_ep_addr &= USB_ENDPOINT_NUMBER_MASK;
1364*4882a593Smuzhiyun 			out_ep_size = le16_to_cpu(ep->wMaxPacketSize);
1365*4882a593Smuzhiyun 		}
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	/* check if interface is sane */
1369*4882a593Smuzhiyun 	if (!in_ep_addr || !out_ep_addr) {
1370*4882a593Smuzhiyun 		dev_err(&udev->dev, "%s: invalid endpoint configuration\n",
1371*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1372*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1373*4882a593Smuzhiyun 	}
1374*4882a593Smuzhiyun 	if (in_ep_size < sizeof(struct ucan_message_in)) {
1375*4882a593Smuzhiyun 		dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n",
1376*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1377*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1378*4882a593Smuzhiyun 	}
1379*4882a593Smuzhiyun 	if (out_ep_size < sizeof(struct ucan_message_out)) {
1380*4882a593Smuzhiyun 		dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n",
1381*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1382*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1383*4882a593Smuzhiyun 	}
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	/* Stage 2 - Device Identification
1386*4882a593Smuzhiyun 	 * -------------------------------
1387*4882a593Smuzhiyun 	 *
1388*4882a593Smuzhiyun 	 * The device interface seems to be a ucan device. Do further
1389*4882a593Smuzhiyun 	 * compatibility checks. On error probing is aborted, on
1390*4882a593Smuzhiyun 	 * success this stage leaves the ctl_msg_buffer with the
1391*4882a593Smuzhiyun 	 * reported contents of a GET_INFO command (supported
1392*4882a593Smuzhiyun 	 * bittimings, tx_fifo depth). This information is used in
1393*4882a593Smuzhiyun 	 * Stage 3 for the final driver initialisation.
1394*4882a593Smuzhiyun 	 */
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 	/* Prepare Memory for control transferes */
1397*4882a593Smuzhiyun 	ctl_msg_buffer = devm_kzalloc(&udev->dev,
1398*4882a593Smuzhiyun 				      sizeof(union ucan_ctl_payload),
1399*4882a593Smuzhiyun 				      GFP_KERNEL);
1400*4882a593Smuzhiyun 	if (!ctl_msg_buffer) {
1401*4882a593Smuzhiyun 		dev_err(&udev->dev,
1402*4882a593Smuzhiyun 			"%s: failed to allocate control pipe memory\n",
1403*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1404*4882a593Smuzhiyun 		return -ENOMEM;
1405*4882a593Smuzhiyun 	}
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	/* get protocol version
1408*4882a593Smuzhiyun 	 *
1409*4882a593Smuzhiyun 	 * note: ucan_ctrl_command_* wrappers cannot be used yet
1410*4882a593Smuzhiyun 	 * because `up` is initialised in Stage 3
1411*4882a593Smuzhiyun 	 */
1412*4882a593Smuzhiyun 	ret = usb_control_msg(udev,
1413*4882a593Smuzhiyun 			      usb_rcvctrlpipe(udev, 0),
1414*4882a593Smuzhiyun 			      UCAN_COMMAND_GET,
1415*4882a593Smuzhiyun 			      USB_DIR_IN | USB_TYPE_VENDOR |
1416*4882a593Smuzhiyun 					USB_RECIP_INTERFACE,
1417*4882a593Smuzhiyun 			      UCAN_COMMAND_GET_PROTOCOL_VERSION,
1418*4882a593Smuzhiyun 			      iface_desc->desc.bInterfaceNumber,
1419*4882a593Smuzhiyun 			      ctl_msg_buffer,
1420*4882a593Smuzhiyun 			      sizeof(union ucan_ctl_payload),
1421*4882a593Smuzhiyun 			      UCAN_USB_CTL_PIPE_TIMEOUT);
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	/* older firmware version do not support this command - those
1424*4882a593Smuzhiyun 	 * are not supported by this drive
1425*4882a593Smuzhiyun 	 */
1426*4882a593Smuzhiyun 	if (ret != 4) {
1427*4882a593Smuzhiyun 		dev_err(&udev->dev,
1428*4882a593Smuzhiyun 			"%s: could not read protocol version, ret=%d\n",
1429*4882a593Smuzhiyun 			UCAN_DRIVER_NAME, ret);
1430*4882a593Smuzhiyun 		if (ret >= 0)
1431*4882a593Smuzhiyun 			ret = -EINVAL;
1432*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	/* this driver currently supports protocol version 3 only */
1436*4882a593Smuzhiyun 	protocol_version =
1437*4882a593Smuzhiyun 		le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version);
1438*4882a593Smuzhiyun 	if (protocol_version < UCAN_PROTOCOL_VERSION_MIN ||
1439*4882a593Smuzhiyun 	    protocol_version > UCAN_PROTOCOL_VERSION_MAX) {
1440*4882a593Smuzhiyun 		dev_err(&udev->dev,
1441*4882a593Smuzhiyun 			"%s: device protocol version %d is not supported\n",
1442*4882a593Smuzhiyun 			UCAN_DRIVER_NAME, protocol_version);
1443*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1444*4882a593Smuzhiyun 	}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	/* request the device information and store it in ctl_msg_buffer
1447*4882a593Smuzhiyun 	 *
1448*4882a593Smuzhiyun 	 * note: ucan_ctrl_command_* wrappers cannot be used yet
1449*4882a593Smuzhiyun 	 * because `up` is initialised in Stage 3
1450*4882a593Smuzhiyun 	 */
1451*4882a593Smuzhiyun 	ret = usb_control_msg(udev,
1452*4882a593Smuzhiyun 			      usb_rcvctrlpipe(udev, 0),
1453*4882a593Smuzhiyun 			      UCAN_COMMAND_GET,
1454*4882a593Smuzhiyun 			      USB_DIR_IN | USB_TYPE_VENDOR |
1455*4882a593Smuzhiyun 					USB_RECIP_INTERFACE,
1456*4882a593Smuzhiyun 			      UCAN_COMMAND_GET_INFO,
1457*4882a593Smuzhiyun 			      iface_desc->desc.bInterfaceNumber,
1458*4882a593Smuzhiyun 			      ctl_msg_buffer,
1459*4882a593Smuzhiyun 			      sizeof(ctl_msg_buffer->cmd_get_device_info),
1460*4882a593Smuzhiyun 			      UCAN_USB_CTL_PIPE_TIMEOUT);
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun 	if (ret < 0) {
1463*4882a593Smuzhiyun 		dev_err(&udev->dev, "%s: failed to retrieve device info\n",
1464*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1465*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1466*4882a593Smuzhiyun 	}
1467*4882a593Smuzhiyun 	if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) {
1468*4882a593Smuzhiyun 		dev_err(&udev->dev, "%s: device reported invalid device info\n",
1469*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1470*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1471*4882a593Smuzhiyun 	}
1472*4882a593Smuzhiyun 	if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) {
1473*4882a593Smuzhiyun 		dev_err(&udev->dev,
1474*4882a593Smuzhiyun 			"%s: device reported invalid tx-fifo size\n",
1475*4882a593Smuzhiyun 			UCAN_DRIVER_NAME);
1476*4882a593Smuzhiyun 		goto err_firmware_needs_update;
1477*4882a593Smuzhiyun 	}
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	/* Stage 3 - Driver Initialisation
1480*4882a593Smuzhiyun 	 * -------------------------------
1481*4882a593Smuzhiyun 	 *
1482*4882a593Smuzhiyun 	 * Register device to Linux, prepare private structures and
1483*4882a593Smuzhiyun 	 * reset the device.
1484*4882a593Smuzhiyun 	 */
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	/* allocate driver resources */
1487*4882a593Smuzhiyun 	netdev = alloc_candev(sizeof(struct ucan_priv),
1488*4882a593Smuzhiyun 			      ctl_msg_buffer->cmd_get_device_info.tx_fifo);
1489*4882a593Smuzhiyun 	if (!netdev) {
1490*4882a593Smuzhiyun 		dev_err(&udev->dev,
1491*4882a593Smuzhiyun 			"%s: cannot allocate candev\n", UCAN_DRIVER_NAME);
1492*4882a593Smuzhiyun 		return -ENOMEM;
1493*4882a593Smuzhiyun 	}
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun 	up = netdev_priv(netdev);
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	/* initialize data */
1498*4882a593Smuzhiyun 	up->udev = udev;
1499*4882a593Smuzhiyun 	up->intf = intf;
1500*4882a593Smuzhiyun 	up->netdev = netdev;
1501*4882a593Smuzhiyun 	up->intf_index = iface_desc->desc.bInterfaceNumber;
1502*4882a593Smuzhiyun 	up->in_ep_addr = in_ep_addr;
1503*4882a593Smuzhiyun 	up->out_ep_addr = out_ep_addr;
1504*4882a593Smuzhiyun 	up->in_ep_size = in_ep_size;
1505*4882a593Smuzhiyun 	up->ctl_msg_buffer = ctl_msg_buffer;
1506*4882a593Smuzhiyun 	up->context_array = NULL;
1507*4882a593Smuzhiyun 	up->available_tx_urbs = 0;
1508*4882a593Smuzhiyun 
1509*4882a593Smuzhiyun 	up->can.state = CAN_STATE_STOPPED;
1510*4882a593Smuzhiyun 	up->can.bittiming_const = &up->device_info.bittiming_const;
1511*4882a593Smuzhiyun 	up->can.do_set_bittiming = ucan_set_bittiming;
1512*4882a593Smuzhiyun 	up->can.do_set_mode = &ucan_set_mode;
1513*4882a593Smuzhiyun 	spin_lock_init(&up->context_lock);
1514*4882a593Smuzhiyun 	spin_lock_init(&up->echo_skb_lock);
1515*4882a593Smuzhiyun 	netdev->netdev_ops = &ucan_netdev_ops;
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	usb_set_intfdata(intf, up);
1518*4882a593Smuzhiyun 	SET_NETDEV_DEV(netdev, &intf->dev);
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	/* parse device information
1521*4882a593Smuzhiyun 	 * the data retrieved in Stage 2 is still available in
1522*4882a593Smuzhiyun 	 * up->ctl_msg_buffer
1523*4882a593Smuzhiyun 	 */
1524*4882a593Smuzhiyun 	ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info);
1525*4882a593Smuzhiyun 
1526*4882a593Smuzhiyun 	/* just print some device information - if available */
1527*4882a593Smuzhiyun 	ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0,
1528*4882a593Smuzhiyun 				     sizeof(union ucan_ctl_payload));
1529*4882a593Smuzhiyun 	if (ret > 0) {
1530*4882a593Smuzhiyun 		/* copy string while ensuring zero terminiation */
1531*4882a593Smuzhiyun 		strncpy(firmware_str, up->ctl_msg_buffer->raw,
1532*4882a593Smuzhiyun 			sizeof(union ucan_ctl_payload));
1533*4882a593Smuzhiyun 		firmware_str[sizeof(union ucan_ctl_payload)] = '\0';
1534*4882a593Smuzhiyun 	} else {
1535*4882a593Smuzhiyun 		strcpy(firmware_str, "unknown");
1536*4882a593Smuzhiyun 	}
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	/* device is compatible, reset it */
1539*4882a593Smuzhiyun 	ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0);
1540*4882a593Smuzhiyun 	if (ret < 0)
1541*4882a593Smuzhiyun 		goto err_free_candev;
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	init_usb_anchor(&up->rx_urbs);
1544*4882a593Smuzhiyun 	init_usb_anchor(&up->tx_urbs);
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 	up->can.state = CAN_STATE_STOPPED;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	/* register the device */
1549*4882a593Smuzhiyun 	ret = register_candev(netdev);
1550*4882a593Smuzhiyun 	if (ret)
1551*4882a593Smuzhiyun 		goto err_free_candev;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/* initialisation complete, log device info */
1554*4882a593Smuzhiyun 	netdev_info(up->netdev, "registered device\n");
1555*4882a593Smuzhiyun 	netdev_info(up->netdev, "firmware string: %s\n", firmware_str);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	/* success */
1558*4882a593Smuzhiyun 	return 0;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun err_free_candev:
1561*4882a593Smuzhiyun 	free_candev(netdev);
1562*4882a593Smuzhiyun 	return ret;
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun err_firmware_needs_update:
1565*4882a593Smuzhiyun 	dev_err(&udev->dev,
1566*4882a593Smuzhiyun 		"%s: probe failed; try to update the device firmware\n",
1567*4882a593Smuzhiyun 		UCAN_DRIVER_NAME);
1568*4882a593Smuzhiyun 	return -ENODEV;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun /* disconnect the device */
ucan_disconnect(struct usb_interface * intf)1572*4882a593Smuzhiyun static void ucan_disconnect(struct usb_interface *intf)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	struct ucan_priv *up = usb_get_intfdata(intf);
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	usb_set_intfdata(intf, NULL);
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 	if (up) {
1579*4882a593Smuzhiyun 		unregister_netdev(up->netdev);
1580*4882a593Smuzhiyun 		free_candev(up->netdev);
1581*4882a593Smuzhiyun 	}
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun static struct usb_device_id ucan_table[] = {
1585*4882a593Smuzhiyun 	/* Mule (soldered onto compute modules) */
1586*4882a593Smuzhiyun 	{USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)},
1587*4882a593Smuzhiyun 	/* Seal (standalone USB stick) */
1588*4882a593Smuzhiyun 	{USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)},
1589*4882a593Smuzhiyun 	{} /* Terminating entry */
1590*4882a593Smuzhiyun };
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun MODULE_DEVICE_TABLE(usb, ucan_table);
1593*4882a593Smuzhiyun /* driver callbacks */
1594*4882a593Smuzhiyun static struct usb_driver ucan_driver = {
1595*4882a593Smuzhiyun 	.name = UCAN_DRIVER_NAME,
1596*4882a593Smuzhiyun 	.probe = ucan_probe,
1597*4882a593Smuzhiyun 	.disconnect = ucan_disconnect,
1598*4882a593Smuzhiyun 	.id_table = ucan_table,
1599*4882a593Smuzhiyun };
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun module_usb_driver(ucan_driver);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1604*4882a593Smuzhiyun MODULE_AUTHOR("Martin Elshuber <martin.elshuber@theobroma-systems.com>");
1605*4882a593Smuzhiyun MODULE_AUTHOR("Jakob Unterwurzacher <jakob.unterwurzacher@theobroma-systems.com>");
1606*4882a593Smuzhiyun MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
1607