xref: /OK3568_Linux_fs/kernel/drivers/net/can/xilinx_can.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Xilinx CAN device driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2012 - 2014 Xilinx, Inc.
5*4882a593Smuzhiyun  * Copyright (C) 2009 PetaLogix. All rights reserved.
6*4882a593Smuzhiyun  * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Description:
9*4882a593Smuzhiyun  * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/kernel.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/of_device.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun #include <linux/skbuff.h>
24*4882a593Smuzhiyun #include <linux/spinlock.h>
25*4882a593Smuzhiyun #include <linux/string.h>
26*4882a593Smuzhiyun #include <linux/types.h>
27*4882a593Smuzhiyun #include <linux/can/dev.h>
28*4882a593Smuzhiyun #include <linux/can/error.h>
29*4882a593Smuzhiyun #include <linux/can/led.h>
30*4882a593Smuzhiyun #include <linux/pm_runtime.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define DRIVER_NAME	"xilinx_can"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* CAN registers set */
35*4882a593Smuzhiyun enum xcan_reg {
36*4882a593Smuzhiyun 	XCAN_SRR_OFFSET		= 0x00, /* Software reset */
37*4882a593Smuzhiyun 	XCAN_MSR_OFFSET		= 0x04, /* Mode select */
38*4882a593Smuzhiyun 	XCAN_BRPR_OFFSET	= 0x08, /* Baud rate prescaler */
39*4882a593Smuzhiyun 	XCAN_BTR_OFFSET		= 0x0C, /* Bit timing */
40*4882a593Smuzhiyun 	XCAN_ECR_OFFSET		= 0x10, /* Error counter */
41*4882a593Smuzhiyun 	XCAN_ESR_OFFSET		= 0x14, /* Error status */
42*4882a593Smuzhiyun 	XCAN_SR_OFFSET		= 0x18, /* Status */
43*4882a593Smuzhiyun 	XCAN_ISR_OFFSET		= 0x1C, /* Interrupt status */
44*4882a593Smuzhiyun 	XCAN_IER_OFFSET		= 0x20, /* Interrupt enable */
45*4882a593Smuzhiyun 	XCAN_ICR_OFFSET		= 0x24, /* Interrupt clear */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/* not on CAN FD cores */
48*4882a593Smuzhiyun 	XCAN_TXFIFO_OFFSET	= 0x30, /* TX FIFO base */
49*4882a593Smuzhiyun 	XCAN_RXFIFO_OFFSET	= 0x50, /* RX FIFO base */
50*4882a593Smuzhiyun 	XCAN_AFR_OFFSET		= 0x60, /* Acceptance Filter */
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* only on CAN FD cores */
53*4882a593Smuzhiyun 	XCAN_F_BRPR_OFFSET	= 0x088, /* Data Phase Baud Rate
54*4882a593Smuzhiyun 					  * Prescalar
55*4882a593Smuzhiyun 					  */
56*4882a593Smuzhiyun 	XCAN_F_BTR_OFFSET	= 0x08C, /* Data Phase Bit Timing */
57*4882a593Smuzhiyun 	XCAN_TRR_OFFSET		= 0x0090, /* TX Buffer Ready Request */
58*4882a593Smuzhiyun 	XCAN_AFR_EXT_OFFSET	= 0x00E0, /* Acceptance Filter */
59*4882a593Smuzhiyun 	XCAN_FSR_OFFSET		= 0x00E8, /* RX FIFO Status */
60*4882a593Smuzhiyun 	XCAN_TXMSG_BASE_OFFSET	= 0x0100, /* TX Message Space */
61*4882a593Smuzhiyun 	XCAN_RXMSG_BASE_OFFSET	= 0x1100, /* RX Message Space */
62*4882a593Smuzhiyun 	XCAN_RXMSG_2_BASE_OFFSET	= 0x2100, /* RX Message Space */
63*4882a593Smuzhiyun 	XCAN_AFR_2_MASK_OFFSET	= 0x0A00, /* Acceptance Filter MASK */
64*4882a593Smuzhiyun 	XCAN_AFR_2_ID_OFFSET	= 0x0A04, /* Acceptance Filter ID */
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define XCAN_FRAME_ID_OFFSET(frame_base)	((frame_base) + 0x00)
68*4882a593Smuzhiyun #define XCAN_FRAME_DLC_OFFSET(frame_base)	((frame_base) + 0x04)
69*4882a593Smuzhiyun #define XCAN_FRAME_DW1_OFFSET(frame_base)	((frame_base) + 0x08)
70*4882a593Smuzhiyun #define XCAN_FRAME_DW2_OFFSET(frame_base)	((frame_base) + 0x0C)
71*4882a593Smuzhiyun #define XCANFD_FRAME_DW_OFFSET(frame_base)	((frame_base) + 0x08)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define XCAN_CANFD_FRAME_SIZE		0x48
74*4882a593Smuzhiyun #define XCAN_TXMSG_FRAME_OFFSET(n)	(XCAN_TXMSG_BASE_OFFSET + \
75*4882a593Smuzhiyun 					 XCAN_CANFD_FRAME_SIZE * (n))
76*4882a593Smuzhiyun #define XCAN_RXMSG_FRAME_OFFSET(n)	(XCAN_RXMSG_BASE_OFFSET + \
77*4882a593Smuzhiyun 					 XCAN_CANFD_FRAME_SIZE * (n))
78*4882a593Smuzhiyun #define XCAN_RXMSG_2_FRAME_OFFSET(n)	(XCAN_RXMSG_2_BASE_OFFSET + \
79*4882a593Smuzhiyun 					 XCAN_CANFD_FRAME_SIZE * (n))
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* the single TX mailbox used by this driver on CAN FD HW */
82*4882a593Smuzhiyun #define XCAN_TX_MAILBOX_IDX		0
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */
85*4882a593Smuzhiyun #define XCAN_SRR_CEN_MASK		0x00000002 /* CAN enable */
86*4882a593Smuzhiyun #define XCAN_SRR_RESET_MASK		0x00000001 /* Soft Reset the CAN core */
87*4882a593Smuzhiyun #define XCAN_MSR_LBACK_MASK		0x00000002 /* Loop back mode select */
88*4882a593Smuzhiyun #define XCAN_MSR_SLEEP_MASK		0x00000001 /* Sleep mode select */
89*4882a593Smuzhiyun #define XCAN_BRPR_BRP_MASK		0x000000FF /* Baud rate prescaler */
90*4882a593Smuzhiyun #define XCAN_BTR_SJW_MASK		0x00000180 /* Synchronous jump width */
91*4882a593Smuzhiyun #define XCAN_BTR_TS2_MASK		0x00000070 /* Time segment 2 */
92*4882a593Smuzhiyun #define XCAN_BTR_TS1_MASK		0x0000000F /* Time segment 1 */
93*4882a593Smuzhiyun #define XCAN_BTR_SJW_MASK_CANFD		0x000F0000 /* Synchronous jump width */
94*4882a593Smuzhiyun #define XCAN_BTR_TS2_MASK_CANFD		0x00000F00 /* Time segment 2 */
95*4882a593Smuzhiyun #define XCAN_BTR_TS1_MASK_CANFD		0x0000003F /* Time segment 1 */
96*4882a593Smuzhiyun #define XCAN_ECR_REC_MASK		0x0000FF00 /* Receive error counter */
97*4882a593Smuzhiyun #define XCAN_ECR_TEC_MASK		0x000000FF /* Transmit error counter */
98*4882a593Smuzhiyun #define XCAN_ESR_ACKER_MASK		0x00000010 /* ACK error */
99*4882a593Smuzhiyun #define XCAN_ESR_BERR_MASK		0x00000008 /* Bit error */
100*4882a593Smuzhiyun #define XCAN_ESR_STER_MASK		0x00000004 /* Stuff error */
101*4882a593Smuzhiyun #define XCAN_ESR_FMER_MASK		0x00000002 /* Form error */
102*4882a593Smuzhiyun #define XCAN_ESR_CRCER_MASK		0x00000001 /* CRC error */
103*4882a593Smuzhiyun #define XCAN_SR_TXFLL_MASK		0x00000400 /* TX FIFO is full */
104*4882a593Smuzhiyun #define XCAN_SR_ESTAT_MASK		0x00000180 /* Error status */
105*4882a593Smuzhiyun #define XCAN_SR_ERRWRN_MASK		0x00000040 /* Error warning */
106*4882a593Smuzhiyun #define XCAN_SR_NORMAL_MASK		0x00000008 /* Normal mode */
107*4882a593Smuzhiyun #define XCAN_SR_LBACK_MASK		0x00000002 /* Loop back mode */
108*4882a593Smuzhiyun #define XCAN_SR_CONFIG_MASK		0x00000001 /* Configuration mode */
109*4882a593Smuzhiyun #define XCAN_IXR_RXMNF_MASK		0x00020000 /* RX match not finished */
110*4882a593Smuzhiyun #define XCAN_IXR_TXFEMP_MASK		0x00004000 /* TX FIFO Empty */
111*4882a593Smuzhiyun #define XCAN_IXR_WKUP_MASK		0x00000800 /* Wake up interrupt */
112*4882a593Smuzhiyun #define XCAN_IXR_SLP_MASK		0x00000400 /* Sleep interrupt */
113*4882a593Smuzhiyun #define XCAN_IXR_BSOFF_MASK		0x00000200 /* Bus off interrupt */
114*4882a593Smuzhiyun #define XCAN_IXR_ERROR_MASK		0x00000100 /* Error interrupt */
115*4882a593Smuzhiyun #define XCAN_IXR_RXNEMP_MASK		0x00000080 /* RX FIFO NotEmpty intr */
116*4882a593Smuzhiyun #define XCAN_IXR_RXOFLW_MASK		0x00000040 /* RX FIFO Overflow intr */
117*4882a593Smuzhiyun #define XCAN_IXR_RXOK_MASK		0x00000010 /* Message received intr */
118*4882a593Smuzhiyun #define XCAN_IXR_TXFLL_MASK		0x00000004 /* Tx FIFO Full intr */
119*4882a593Smuzhiyun #define XCAN_IXR_TXOK_MASK		0x00000002 /* TX successful intr */
120*4882a593Smuzhiyun #define XCAN_IXR_ARBLST_MASK		0x00000001 /* Arbitration lost intr */
121*4882a593Smuzhiyun #define XCAN_IDR_ID1_MASK		0xFFE00000 /* Standard msg identifier */
122*4882a593Smuzhiyun #define XCAN_IDR_SRR_MASK		0x00100000 /* Substitute remote TXreq */
123*4882a593Smuzhiyun #define XCAN_IDR_IDE_MASK		0x00080000 /* Identifier extension */
124*4882a593Smuzhiyun #define XCAN_IDR_ID2_MASK		0x0007FFFE /* Extended message ident */
125*4882a593Smuzhiyun #define XCAN_IDR_RTR_MASK		0x00000001 /* Remote TX request */
126*4882a593Smuzhiyun #define XCAN_DLCR_DLC_MASK		0xF0000000 /* Data length code */
127*4882a593Smuzhiyun #define XCAN_FSR_FL_MASK		0x00003F00 /* RX Fill Level */
128*4882a593Smuzhiyun #define XCAN_2_FSR_FL_MASK		0x00007F00 /* RX Fill Level */
129*4882a593Smuzhiyun #define XCAN_FSR_IRI_MASK		0x00000080 /* RX Increment Read Index */
130*4882a593Smuzhiyun #define XCAN_FSR_RI_MASK		0x0000001F /* RX Read Index */
131*4882a593Smuzhiyun #define XCAN_2_FSR_RI_MASK		0x0000003F /* RX Read Index */
132*4882a593Smuzhiyun #define XCAN_DLCR_EDL_MASK		0x08000000 /* EDL Mask in DLC */
133*4882a593Smuzhiyun #define XCAN_DLCR_BRS_MASK		0x04000000 /* BRS Mask in DLC */
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
136*4882a593Smuzhiyun #define XCAN_BTR_SJW_SHIFT		7  /* Synchronous jump width */
137*4882a593Smuzhiyun #define XCAN_BTR_TS2_SHIFT		4  /* Time segment 2 */
138*4882a593Smuzhiyun #define XCAN_BTR_SJW_SHIFT_CANFD	16 /* Synchronous jump width */
139*4882a593Smuzhiyun #define XCAN_BTR_TS2_SHIFT_CANFD	8  /* Time segment 2 */
140*4882a593Smuzhiyun #define XCAN_IDR_ID1_SHIFT		21 /* Standard Messg Identifier */
141*4882a593Smuzhiyun #define XCAN_IDR_ID2_SHIFT		1  /* Extended Message Identifier */
142*4882a593Smuzhiyun #define XCAN_DLCR_DLC_SHIFT		28 /* Data length code */
143*4882a593Smuzhiyun #define XCAN_ESR_REC_SHIFT		8  /* Rx Error Count */
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* CAN frame length constants */
146*4882a593Smuzhiyun #define XCAN_FRAME_MAX_DATA_LEN		8
147*4882a593Smuzhiyun #define XCANFD_DW_BYTES			4
148*4882a593Smuzhiyun #define XCAN_TIMEOUT			(1 * HZ)
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /* TX-FIFO-empty interrupt available */
151*4882a593Smuzhiyun #define XCAN_FLAG_TXFEMP	0x0001
152*4882a593Smuzhiyun /* RX Match Not Finished interrupt available */
153*4882a593Smuzhiyun #define XCAN_FLAG_RXMNF		0x0002
154*4882a593Smuzhiyun /* Extended acceptance filters with control at 0xE0 */
155*4882a593Smuzhiyun #define XCAN_FLAG_EXT_FILTERS	0x0004
156*4882a593Smuzhiyun /* TX mailboxes instead of TX FIFO */
157*4882a593Smuzhiyun #define XCAN_FLAG_TX_MAILBOXES	0x0008
158*4882a593Smuzhiyun /* RX FIFO with each buffer in separate registers at 0x1100
159*4882a593Smuzhiyun  * instead of the regular FIFO at 0x50
160*4882a593Smuzhiyun  */
161*4882a593Smuzhiyun #define XCAN_FLAG_RX_FIFO_MULTI	0x0010
162*4882a593Smuzhiyun #define XCAN_FLAG_CANFD_2	0x0020
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun enum xcan_ip_type {
165*4882a593Smuzhiyun 	XAXI_CAN = 0,
166*4882a593Smuzhiyun 	XZYNQ_CANPS,
167*4882a593Smuzhiyun 	XAXI_CANFD,
168*4882a593Smuzhiyun 	XAXI_CANFD_2_0,
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun struct xcan_devtype_data {
172*4882a593Smuzhiyun 	enum xcan_ip_type cantype;
173*4882a593Smuzhiyun 	unsigned int flags;
174*4882a593Smuzhiyun 	const struct can_bittiming_const *bittiming_const;
175*4882a593Smuzhiyun 	const char *bus_clk_name;
176*4882a593Smuzhiyun 	unsigned int btr_ts2_shift;
177*4882a593Smuzhiyun 	unsigned int btr_sjw_shift;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /**
181*4882a593Smuzhiyun  * struct xcan_priv - This definition define CAN driver instance
182*4882a593Smuzhiyun  * @can:			CAN private data structure.
183*4882a593Smuzhiyun  * @tx_lock:			Lock for synchronizing TX interrupt handling
184*4882a593Smuzhiyun  * @tx_head:			Tx CAN packets ready to send on the queue
185*4882a593Smuzhiyun  * @tx_tail:			Tx CAN packets successfully sended on the queue
186*4882a593Smuzhiyun  * @tx_max:			Maximum number packets the driver can send
187*4882a593Smuzhiyun  * @napi:			NAPI structure
188*4882a593Smuzhiyun  * @read_reg:			For reading data from CAN registers
189*4882a593Smuzhiyun  * @write_reg:			For writing data to CAN registers
190*4882a593Smuzhiyun  * @dev:			Network device data structure
191*4882a593Smuzhiyun  * @reg_base:			Ioremapped address to registers
192*4882a593Smuzhiyun  * @irq_flags:			For request_irq()
193*4882a593Smuzhiyun  * @bus_clk:			Pointer to struct clk
194*4882a593Smuzhiyun  * @can_clk:			Pointer to struct clk
195*4882a593Smuzhiyun  * @devtype:			Device type specific constants
196*4882a593Smuzhiyun  */
197*4882a593Smuzhiyun struct xcan_priv {
198*4882a593Smuzhiyun 	struct can_priv can;
199*4882a593Smuzhiyun 	spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */
200*4882a593Smuzhiyun 	unsigned int tx_head;
201*4882a593Smuzhiyun 	unsigned int tx_tail;
202*4882a593Smuzhiyun 	unsigned int tx_max;
203*4882a593Smuzhiyun 	struct napi_struct napi;
204*4882a593Smuzhiyun 	u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg);
205*4882a593Smuzhiyun 	void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg,
206*4882a593Smuzhiyun 			  u32 val);
207*4882a593Smuzhiyun 	struct device *dev;
208*4882a593Smuzhiyun 	void __iomem *reg_base;
209*4882a593Smuzhiyun 	unsigned long irq_flags;
210*4882a593Smuzhiyun 	struct clk *bus_clk;
211*4882a593Smuzhiyun 	struct clk *can_clk;
212*4882a593Smuzhiyun 	struct xcan_devtype_data devtype;
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun /* CAN Bittiming constants as per Xilinx CAN specs */
216*4882a593Smuzhiyun static const struct can_bittiming_const xcan_bittiming_const = {
217*4882a593Smuzhiyun 	.name = DRIVER_NAME,
218*4882a593Smuzhiyun 	.tseg1_min = 1,
219*4882a593Smuzhiyun 	.tseg1_max = 16,
220*4882a593Smuzhiyun 	.tseg2_min = 1,
221*4882a593Smuzhiyun 	.tseg2_max = 8,
222*4882a593Smuzhiyun 	.sjw_max = 4,
223*4882a593Smuzhiyun 	.brp_min = 1,
224*4882a593Smuzhiyun 	.brp_max = 256,
225*4882a593Smuzhiyun 	.brp_inc = 1,
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */
229*4882a593Smuzhiyun static const struct can_bittiming_const xcan_bittiming_const_canfd = {
230*4882a593Smuzhiyun 	.name = DRIVER_NAME,
231*4882a593Smuzhiyun 	.tseg1_min = 1,
232*4882a593Smuzhiyun 	.tseg1_max = 64,
233*4882a593Smuzhiyun 	.tseg2_min = 1,
234*4882a593Smuzhiyun 	.tseg2_max = 16,
235*4882a593Smuzhiyun 	.sjw_max = 16,
236*4882a593Smuzhiyun 	.brp_min = 1,
237*4882a593Smuzhiyun 	.brp_max = 256,
238*4882a593Smuzhiyun 	.brp_inc = 1,
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */
242*4882a593Smuzhiyun static const struct can_bittiming_const xcan_data_bittiming_const_canfd = {
243*4882a593Smuzhiyun 	.name = DRIVER_NAME,
244*4882a593Smuzhiyun 	.tseg1_min = 1,
245*4882a593Smuzhiyun 	.tseg1_max = 16,
246*4882a593Smuzhiyun 	.tseg2_min = 1,
247*4882a593Smuzhiyun 	.tseg2_max = 8,
248*4882a593Smuzhiyun 	.sjw_max = 8,
249*4882a593Smuzhiyun 	.brp_min = 1,
250*4882a593Smuzhiyun 	.brp_max = 256,
251*4882a593Smuzhiyun 	.brp_inc = 1,
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */
255*4882a593Smuzhiyun static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
256*4882a593Smuzhiyun 	.name = DRIVER_NAME,
257*4882a593Smuzhiyun 	.tseg1_min = 1,
258*4882a593Smuzhiyun 	.tseg1_max = 256,
259*4882a593Smuzhiyun 	.tseg2_min = 1,
260*4882a593Smuzhiyun 	.tseg2_max = 128,
261*4882a593Smuzhiyun 	.sjw_max = 128,
262*4882a593Smuzhiyun 	.brp_min = 1,
263*4882a593Smuzhiyun 	.brp_max = 256,
264*4882a593Smuzhiyun 	.brp_inc = 1,
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */
268*4882a593Smuzhiyun static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
269*4882a593Smuzhiyun 	.name = DRIVER_NAME,
270*4882a593Smuzhiyun 	.tseg1_min = 1,
271*4882a593Smuzhiyun 	.tseg1_max = 32,
272*4882a593Smuzhiyun 	.tseg2_min = 1,
273*4882a593Smuzhiyun 	.tseg2_max = 16,
274*4882a593Smuzhiyun 	.sjw_max = 16,
275*4882a593Smuzhiyun 	.brp_min = 1,
276*4882a593Smuzhiyun 	.brp_max = 256,
277*4882a593Smuzhiyun 	.brp_inc = 1,
278*4882a593Smuzhiyun };
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun  * xcan_write_reg_le - Write a value to the device register little endian
282*4882a593Smuzhiyun  * @priv:	Driver private data structure
283*4882a593Smuzhiyun  * @reg:	Register offset
284*4882a593Smuzhiyun  * @val:	Value to write at the Register offset
285*4882a593Smuzhiyun  *
286*4882a593Smuzhiyun  * Write data to the paricular CAN register
287*4882a593Smuzhiyun  */
xcan_write_reg_le(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)288*4882a593Smuzhiyun static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg,
289*4882a593Smuzhiyun 			      u32 val)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	iowrite32(val, priv->reg_base + reg);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun  * xcan_read_reg_le - Read a value from the device register little endian
296*4882a593Smuzhiyun  * @priv:	Driver private data structure
297*4882a593Smuzhiyun  * @reg:	Register offset
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * Read data from the particular CAN register
300*4882a593Smuzhiyun  * Return: value read from the CAN register
301*4882a593Smuzhiyun  */
xcan_read_reg_le(const struct xcan_priv * priv,enum xcan_reg reg)302*4882a593Smuzhiyun static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	return ioread32(priv->reg_base + reg);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun  * xcan_write_reg_be - Write a value to the device register big endian
309*4882a593Smuzhiyun  * @priv:	Driver private data structure
310*4882a593Smuzhiyun  * @reg:	Register offset
311*4882a593Smuzhiyun  * @val:	Value to write at the Register offset
312*4882a593Smuzhiyun  *
313*4882a593Smuzhiyun  * Write data to the paricular CAN register
314*4882a593Smuzhiyun  */
xcan_write_reg_be(const struct xcan_priv * priv,enum xcan_reg reg,u32 val)315*4882a593Smuzhiyun static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg,
316*4882a593Smuzhiyun 			      u32 val)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	iowrite32be(val, priv->reg_base + reg);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun  * xcan_read_reg_be - Read a value from the device register big endian
323*4882a593Smuzhiyun  * @priv:	Driver private data structure
324*4882a593Smuzhiyun  * @reg:	Register offset
325*4882a593Smuzhiyun  *
326*4882a593Smuzhiyun  * Read data from the particular CAN register
327*4882a593Smuzhiyun  * Return: value read from the CAN register
328*4882a593Smuzhiyun  */
xcan_read_reg_be(const struct xcan_priv * priv,enum xcan_reg reg)329*4882a593Smuzhiyun static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	return ioread32be(priv->reg_base + reg);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /**
335*4882a593Smuzhiyun  * xcan_rx_int_mask - Get the mask for the receive interrupt
336*4882a593Smuzhiyun  * @priv:	Driver private data structure
337*4882a593Smuzhiyun  *
338*4882a593Smuzhiyun  * Return: The receive interrupt mask used by the driver on this HW
339*4882a593Smuzhiyun  */
xcan_rx_int_mask(const struct xcan_priv * priv)340*4882a593Smuzhiyun static u32 xcan_rx_int_mask(const struct xcan_priv *priv)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	/* RXNEMP is better suited for our use case as it cannot be cleared
343*4882a593Smuzhiyun 	 * while the FIFO is non-empty, but CAN FD HW does not have it
344*4882a593Smuzhiyun 	 */
345*4882a593Smuzhiyun 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
346*4882a593Smuzhiyun 		return XCAN_IXR_RXOK_MASK;
347*4882a593Smuzhiyun 	else
348*4882a593Smuzhiyun 		return XCAN_IXR_RXNEMP_MASK;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /**
352*4882a593Smuzhiyun  * set_reset_mode - Resets the CAN device mode
353*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * This is the driver reset mode routine.The driver
356*4882a593Smuzhiyun  * enters into configuration mode.
357*4882a593Smuzhiyun  *
358*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
359*4882a593Smuzhiyun  */
set_reset_mode(struct net_device * ndev)360*4882a593Smuzhiyun static int set_reset_mode(struct net_device *ndev)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
363*4882a593Smuzhiyun 	unsigned long timeout;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	timeout = jiffies + XCAN_TIMEOUT;
368*4882a593Smuzhiyun 	while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) {
369*4882a593Smuzhiyun 		if (time_after(jiffies, timeout)) {
370*4882a593Smuzhiyun 			netdev_warn(ndev, "timed out for config mode\n");
371*4882a593Smuzhiyun 			return -ETIMEDOUT;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 		usleep_range(500, 10000);
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/* reset clears FIFOs */
377*4882a593Smuzhiyun 	priv->tx_head = 0;
378*4882a593Smuzhiyun 	priv->tx_tail = 0;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun  * xcan_set_bittiming - CAN set bit timing routine
385*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
386*4882a593Smuzhiyun  *
387*4882a593Smuzhiyun  * This is the driver set bittiming  routine.
388*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
389*4882a593Smuzhiyun  */
xcan_set_bittiming(struct net_device * ndev)390*4882a593Smuzhiyun static int xcan_set_bittiming(struct net_device *ndev)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
393*4882a593Smuzhiyun 	struct can_bittiming *bt = &priv->can.bittiming;
394*4882a593Smuzhiyun 	struct can_bittiming *dbt = &priv->can.data_bittiming;
395*4882a593Smuzhiyun 	u32 btr0, btr1;
396*4882a593Smuzhiyun 	u32 is_config_mode;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	/* Check whether Xilinx CAN is in configuration mode.
399*4882a593Smuzhiyun 	 * It cannot set bit timing if Xilinx CAN is not in configuration mode.
400*4882a593Smuzhiyun 	 */
401*4882a593Smuzhiyun 	is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) &
402*4882a593Smuzhiyun 				XCAN_SR_CONFIG_MASK;
403*4882a593Smuzhiyun 	if (!is_config_mode) {
404*4882a593Smuzhiyun 		netdev_alert(ndev,
405*4882a593Smuzhiyun 			     "BUG! Cannot set bittiming - CAN is not in config mode\n");
406*4882a593Smuzhiyun 		return -EPERM;
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* Setting Baud Rate prescalar value in BRPR Register */
410*4882a593Smuzhiyun 	btr0 = (bt->brp - 1);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* Setting Time Segment 1 in BTR Register */
413*4882a593Smuzhiyun 	btr1 = (bt->prop_seg + bt->phase_seg1 - 1);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	/* Setting Time Segment 2 in BTR Register */
416*4882a593Smuzhiyun 	btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	/* Setting Synchronous jump width in BTR Register */
419*4882a593Smuzhiyun 	btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0);
422*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_BTR_OFFSET, btr1);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (priv->devtype.cantype == XAXI_CANFD ||
425*4882a593Smuzhiyun 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
426*4882a593Smuzhiyun 		/* Setting Baud Rate prescalar value in F_BRPR Register */
427*4882a593Smuzhiyun 		btr0 = dbt->brp - 1;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		/* Setting Time Segment 1 in BTR Register */
430*4882a593Smuzhiyun 		btr1 = dbt->prop_seg + dbt->phase_seg1 - 1;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		/* Setting Time Segment 2 in BTR Register */
433*4882a593Smuzhiyun 		btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		/* Setting Synchronous jump width in BTR Register */
436*4882a593Smuzhiyun 		btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0);
439*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1);
440*4882a593Smuzhiyun 	}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n",
443*4882a593Smuzhiyun 		   priv->read_reg(priv, XCAN_BRPR_OFFSET),
444*4882a593Smuzhiyun 		   priv->read_reg(priv, XCAN_BTR_OFFSET));
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	return 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun /**
450*4882a593Smuzhiyun  * xcan_chip_start - This the drivers start routine
451*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
452*4882a593Smuzhiyun  *
453*4882a593Smuzhiyun  * This is the drivers start routine.
454*4882a593Smuzhiyun  * Based on the State of the CAN device it puts
455*4882a593Smuzhiyun  * the CAN device into a proper mode.
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
458*4882a593Smuzhiyun  */
xcan_chip_start(struct net_device * ndev)459*4882a593Smuzhiyun static int xcan_chip_start(struct net_device *ndev)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
462*4882a593Smuzhiyun 	u32 reg_msr;
463*4882a593Smuzhiyun 	int err;
464*4882a593Smuzhiyun 	u32 ier;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* Check if it is in reset mode */
467*4882a593Smuzhiyun 	err = set_reset_mode(ndev);
468*4882a593Smuzhiyun 	if (err < 0)
469*4882a593Smuzhiyun 		return err;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	err = xcan_set_bittiming(ndev);
472*4882a593Smuzhiyun 	if (err < 0)
473*4882a593Smuzhiyun 		return err;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/* Enable interrupts
476*4882a593Smuzhiyun 	 *
477*4882a593Smuzhiyun 	 * We enable the ERROR interrupt even with
478*4882a593Smuzhiyun 	 * CAN_CTRLMODE_BERR_REPORTING disabled as there is no
479*4882a593Smuzhiyun 	 * dedicated interrupt for a state change to
480*4882a593Smuzhiyun 	 * ERROR_WARNING/ERROR_PASSIVE.
481*4882a593Smuzhiyun 	 */
482*4882a593Smuzhiyun 	ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |
483*4882a593Smuzhiyun 		XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK |
484*4882a593Smuzhiyun 		XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
485*4882a593Smuzhiyun 		XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (priv->devtype.flags & XCAN_FLAG_RXMNF)
488*4882a593Smuzhiyun 		ier |= XCAN_IXR_RXMNF_MASK;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_IER_OFFSET, ier);
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	/* Check whether it is loopback mode or normal mode  */
493*4882a593Smuzhiyun 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
494*4882a593Smuzhiyun 		reg_msr = XCAN_MSR_LBACK_MASK;
495*4882a593Smuzhiyun 	else
496*4882a593Smuzhiyun 		reg_msr = 0x0;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	/* enable the first extended filter, if any, as cores with extended
499*4882a593Smuzhiyun 	 * filtering default to non-receipt if all filters are disabled
500*4882a593Smuzhiyun 	 */
501*4882a593Smuzhiyun 	if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS)
502*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr);
505*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	netdev_dbg(ndev, "status:#x%08x\n",
508*4882a593Smuzhiyun 		   priv->read_reg(priv, XCAN_SR_OFFSET));
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	priv->can.state = CAN_STATE_ERROR_ACTIVE;
511*4882a593Smuzhiyun 	return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /**
515*4882a593Smuzhiyun  * xcan_do_set_mode - This sets the mode of the driver
516*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
517*4882a593Smuzhiyun  * @mode:	Tells the mode of the driver
518*4882a593Smuzhiyun  *
519*4882a593Smuzhiyun  * This check the drivers state and calls the
520*4882a593Smuzhiyun  * the corresponding modes to set.
521*4882a593Smuzhiyun  *
522*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
523*4882a593Smuzhiyun  */
xcan_do_set_mode(struct net_device * ndev,enum can_mode mode)524*4882a593Smuzhiyun static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	int ret;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	switch (mode) {
529*4882a593Smuzhiyun 	case CAN_MODE_START:
530*4882a593Smuzhiyun 		ret = xcan_chip_start(ndev);
531*4882a593Smuzhiyun 		if (ret < 0) {
532*4882a593Smuzhiyun 			netdev_err(ndev, "xcan_chip_start failed!\n");
533*4882a593Smuzhiyun 			return ret;
534*4882a593Smuzhiyun 		}
535*4882a593Smuzhiyun 		netif_wake_queue(ndev);
536*4882a593Smuzhiyun 		break;
537*4882a593Smuzhiyun 	default:
538*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
539*4882a593Smuzhiyun 		break;
540*4882a593Smuzhiyun 	}
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	return ret;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun /**
546*4882a593Smuzhiyun  * xcan_write_frame - Write a frame to HW
547*4882a593Smuzhiyun  * @ndev:		Pointer to net_device structure
548*4882a593Smuzhiyun  * @skb:		sk_buff pointer that contains data to be Txed
549*4882a593Smuzhiyun  * @frame_offset:	Register offset to write the frame to
550*4882a593Smuzhiyun  */
xcan_write_frame(struct net_device * ndev,struct sk_buff * skb,int frame_offset)551*4882a593Smuzhiyun static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb,
552*4882a593Smuzhiyun 			     int frame_offset)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	u32 id, dlc, data[2] = {0, 0};
555*4882a593Smuzhiyun 	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
556*4882a593Smuzhiyun 	u32 ramoff, dwindex = 0, i;
557*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	/* Watch carefully on the bit sequence */
560*4882a593Smuzhiyun 	if (cf->can_id & CAN_EFF_FLAG) {
561*4882a593Smuzhiyun 		/* Extended CAN ID format */
562*4882a593Smuzhiyun 		id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) &
563*4882a593Smuzhiyun 			XCAN_IDR_ID2_MASK;
564*4882a593Smuzhiyun 		id |= (((cf->can_id & CAN_EFF_MASK) >>
565*4882a593Smuzhiyun 			(CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) <<
566*4882a593Smuzhiyun 			XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK;
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 		/* The substibute remote TX request bit should be "1"
569*4882a593Smuzhiyun 		 * for extended frames as in the Xilinx CAN datasheet
570*4882a593Smuzhiyun 		 */
571*4882a593Smuzhiyun 		id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		if (cf->can_id & CAN_RTR_FLAG)
574*4882a593Smuzhiyun 			/* Extended frames remote TX request */
575*4882a593Smuzhiyun 			id |= XCAN_IDR_RTR_MASK;
576*4882a593Smuzhiyun 	} else {
577*4882a593Smuzhiyun 		/* Standard CAN ID format */
578*4882a593Smuzhiyun 		id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) &
579*4882a593Smuzhiyun 			XCAN_IDR_ID1_MASK;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 		if (cf->can_id & CAN_RTR_FLAG)
582*4882a593Smuzhiyun 			/* Standard frames remote TX request */
583*4882a593Smuzhiyun 			id |= XCAN_IDR_SRR_MASK;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	dlc = can_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT;
587*4882a593Smuzhiyun 	if (can_is_canfd_skb(skb)) {
588*4882a593Smuzhiyun 		if (cf->flags & CANFD_BRS)
589*4882a593Smuzhiyun 			dlc |= XCAN_DLCR_BRS_MASK;
590*4882a593Smuzhiyun 		dlc |= XCAN_DLCR_EDL_MASK;
591*4882a593Smuzhiyun 	}
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) &&
594*4882a593Smuzhiyun 	    (priv->devtype.flags & XCAN_FLAG_TXFEMP))
595*4882a593Smuzhiyun 		can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
596*4882a593Smuzhiyun 	else
597*4882a593Smuzhiyun 		can_put_echo_skb(skb, ndev, 0);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	priv->tx_head++;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id);
602*4882a593Smuzhiyun 	/* If the CAN frame is RTR frame this write triggers transmission
603*4882a593Smuzhiyun 	 * (not on CAN FD)
604*4882a593Smuzhiyun 	 */
605*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc);
606*4882a593Smuzhiyun 	if (priv->devtype.cantype == XAXI_CANFD ||
607*4882a593Smuzhiyun 	    priv->devtype.cantype == XAXI_CANFD_2_0) {
608*4882a593Smuzhiyun 		for (i = 0; i < cf->len; i += 4) {
609*4882a593Smuzhiyun 			ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) +
610*4882a593Smuzhiyun 					(dwindex * XCANFD_DW_BYTES);
611*4882a593Smuzhiyun 			priv->write_reg(priv, ramoff,
612*4882a593Smuzhiyun 					be32_to_cpup((__be32 *)(cf->data + i)));
613*4882a593Smuzhiyun 			dwindex++;
614*4882a593Smuzhiyun 		}
615*4882a593Smuzhiyun 	} else {
616*4882a593Smuzhiyun 		if (cf->len > 0)
617*4882a593Smuzhiyun 			data[0] = be32_to_cpup((__be32 *)(cf->data + 0));
618*4882a593Smuzhiyun 		if (cf->len > 4)
619*4882a593Smuzhiyun 			data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		if (!(cf->can_id & CAN_RTR_FLAG)) {
622*4882a593Smuzhiyun 			priv->write_reg(priv,
623*4882a593Smuzhiyun 					XCAN_FRAME_DW1_OFFSET(frame_offset),
624*4882a593Smuzhiyun 					data[0]);
625*4882a593Smuzhiyun 			/* If the CAN frame is Standard/Extended frame this
626*4882a593Smuzhiyun 			 * write triggers transmission (not on CAN FD)
627*4882a593Smuzhiyun 			 */
628*4882a593Smuzhiyun 			priv->write_reg(priv,
629*4882a593Smuzhiyun 					XCAN_FRAME_DW2_OFFSET(frame_offset),
630*4882a593Smuzhiyun 					data[1]);
631*4882a593Smuzhiyun 		}
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /**
636*4882a593Smuzhiyun  * xcan_start_xmit_fifo - Starts the transmission (FIFO mode)
637*4882a593Smuzhiyun  * @skb:	sk_buff pointer that contains data to be Txed
638*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
639*4882a593Smuzhiyun  *
640*4882a593Smuzhiyun  * Return: 0 on success, -ENOSPC if FIFO is full.
641*4882a593Smuzhiyun  */
xcan_start_xmit_fifo(struct sk_buff * skb,struct net_device * ndev)642*4882a593Smuzhiyun static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
645*4882a593Smuzhiyun 	unsigned long flags;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	/* Check if the TX buffer is full */
648*4882a593Smuzhiyun 	if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) &
649*4882a593Smuzhiyun 			XCAN_SR_TXFLL_MASK))
650*4882a593Smuzhiyun 		return -ENOSPC;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->tx_lock, flags);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
657*4882a593Smuzhiyun 	if (priv->tx_max > 1)
658*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* Check if the TX buffer is full */
661*4882a593Smuzhiyun 	if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
662*4882a593Smuzhiyun 		netif_stop_queue(ndev);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->tx_lock, flags);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /**
670*4882a593Smuzhiyun  * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode)
671*4882a593Smuzhiyun  * @skb:	sk_buff pointer that contains data to be Txed
672*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
673*4882a593Smuzhiyun  *
674*4882a593Smuzhiyun  * Return: 0 on success, -ENOSPC if there is no space
675*4882a593Smuzhiyun  */
xcan_start_xmit_mailbox(struct sk_buff * skb,struct net_device * ndev)676*4882a593Smuzhiyun static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
679*4882a593Smuzhiyun 	unsigned long flags;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) &
682*4882a593Smuzhiyun 		     BIT(XCAN_TX_MAILBOX_IDX)))
683*4882a593Smuzhiyun 		return -ENOSPC;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->tx_lock, flags);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	xcan_write_frame(ndev, skb,
688*4882a593Smuzhiyun 			 XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX));
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/* Mark buffer as ready for transmit */
691*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX));
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	netif_stop_queue(ndev);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->tx_lock, flags);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	return 0;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun /**
701*4882a593Smuzhiyun  * xcan_start_xmit - Starts the transmission
702*4882a593Smuzhiyun  * @skb:	sk_buff pointer that contains data to be Txed
703*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
704*4882a593Smuzhiyun  *
705*4882a593Smuzhiyun  * This function is invoked from upper layers to initiate transmission.
706*4882a593Smuzhiyun  *
707*4882a593Smuzhiyun  * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full
708*4882a593Smuzhiyun  */
xcan_start_xmit(struct sk_buff * skb,struct net_device * ndev)709*4882a593Smuzhiyun static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
712*4882a593Smuzhiyun 	int ret;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (can_dropped_invalid_skb(ndev, skb))
715*4882a593Smuzhiyun 		return NETDEV_TX_OK;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES)
718*4882a593Smuzhiyun 		ret = xcan_start_xmit_mailbox(skb, ndev);
719*4882a593Smuzhiyun 	else
720*4882a593Smuzhiyun 		ret = xcan_start_xmit_fifo(skb, ndev);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	if (ret < 0) {
723*4882a593Smuzhiyun 		netdev_err(ndev, "BUG!, TX full when queue awake!\n");
724*4882a593Smuzhiyun 		netif_stop_queue(ndev);
725*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	return NETDEV_TX_OK;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun /**
732*4882a593Smuzhiyun  * xcan_rx -  Is called from CAN isr to complete the received
733*4882a593Smuzhiyun  *		frame  processing
734*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
735*4882a593Smuzhiyun  * @frame_base:	Register offset to the frame to be read
736*4882a593Smuzhiyun  *
737*4882a593Smuzhiyun  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
738*4882a593Smuzhiyun  * does minimal processing and invokes "netif_receive_skb" to complete further
739*4882a593Smuzhiyun  * processing.
740*4882a593Smuzhiyun  * Return: 1 on success and 0 on failure.
741*4882a593Smuzhiyun  */
xcan_rx(struct net_device * ndev,int frame_base)742*4882a593Smuzhiyun static int xcan_rx(struct net_device *ndev, int frame_base)
743*4882a593Smuzhiyun {
744*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
745*4882a593Smuzhiyun 	struct net_device_stats *stats = &ndev->stats;
746*4882a593Smuzhiyun 	struct can_frame *cf;
747*4882a593Smuzhiyun 	struct sk_buff *skb;
748*4882a593Smuzhiyun 	u32 id_xcan, dlc, data[2] = {0, 0};
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	skb = alloc_can_skb(ndev, &cf);
751*4882a593Smuzhiyun 	if (unlikely(!skb)) {
752*4882a593Smuzhiyun 		stats->rx_dropped++;
753*4882a593Smuzhiyun 		return 0;
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	/* Read a frame from Xilinx zynq CANPS */
757*4882a593Smuzhiyun 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
758*4882a593Smuzhiyun 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >>
759*4882a593Smuzhiyun 				   XCAN_DLCR_DLC_SHIFT;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	/* Change Xilinx CAN data length format to socketCAN data format */
762*4882a593Smuzhiyun 	cf->can_dlc = get_can_dlc(dlc);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	/* Change Xilinx CAN ID format to socketCAN ID format */
765*4882a593Smuzhiyun 	if (id_xcan & XCAN_IDR_IDE_MASK) {
766*4882a593Smuzhiyun 		/* The received frame is an Extended format frame */
767*4882a593Smuzhiyun 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
768*4882a593Smuzhiyun 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
769*4882a593Smuzhiyun 				XCAN_IDR_ID2_SHIFT;
770*4882a593Smuzhiyun 		cf->can_id |= CAN_EFF_FLAG;
771*4882a593Smuzhiyun 		if (id_xcan & XCAN_IDR_RTR_MASK)
772*4882a593Smuzhiyun 			cf->can_id |= CAN_RTR_FLAG;
773*4882a593Smuzhiyun 	} else {
774*4882a593Smuzhiyun 		/* The received frame is a standard format frame */
775*4882a593Smuzhiyun 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
776*4882a593Smuzhiyun 				XCAN_IDR_ID1_SHIFT;
777*4882a593Smuzhiyun 		if (id_xcan & XCAN_IDR_SRR_MASK)
778*4882a593Smuzhiyun 			cf->can_id |= CAN_RTR_FLAG;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	/* DW1/DW2 must always be read to remove message from RXFIFO */
782*4882a593Smuzhiyun 	data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base));
783*4882a593Smuzhiyun 	data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base));
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (!(cf->can_id & CAN_RTR_FLAG)) {
786*4882a593Smuzhiyun 		/* Change Xilinx CAN data format to socketCAN data format */
787*4882a593Smuzhiyun 		if (cf->can_dlc > 0)
788*4882a593Smuzhiyun 			*(__be32 *)(cf->data) = cpu_to_be32(data[0]);
789*4882a593Smuzhiyun 		if (cf->can_dlc > 4)
790*4882a593Smuzhiyun 			*(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]);
791*4882a593Smuzhiyun 	}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	stats->rx_bytes += cf->can_dlc;
794*4882a593Smuzhiyun 	stats->rx_packets++;
795*4882a593Smuzhiyun 	netif_receive_skb(skb);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	return 1;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun /**
801*4882a593Smuzhiyun  * xcanfd_rx -  Is called from CAN isr to complete the received
802*4882a593Smuzhiyun  *		frame  processing
803*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
804*4882a593Smuzhiyun  * @frame_base:	Register offset to the frame to be read
805*4882a593Smuzhiyun  *
806*4882a593Smuzhiyun  * This function is invoked from the CAN isr(poll) to process the Rx frames. It
807*4882a593Smuzhiyun  * does minimal processing and invokes "netif_receive_skb" to complete further
808*4882a593Smuzhiyun  * processing.
809*4882a593Smuzhiyun  * Return: 1 on success and 0 on failure.
810*4882a593Smuzhiyun  */
xcanfd_rx(struct net_device * ndev,int frame_base)811*4882a593Smuzhiyun static int xcanfd_rx(struct net_device *ndev, int frame_base)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
814*4882a593Smuzhiyun 	struct net_device_stats *stats = &ndev->stats;
815*4882a593Smuzhiyun 	struct canfd_frame *cf;
816*4882a593Smuzhiyun 	struct sk_buff *skb;
817*4882a593Smuzhiyun 	u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base));
820*4882a593Smuzhiyun 	dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base));
821*4882a593Smuzhiyun 	if (dlc & XCAN_DLCR_EDL_MASK)
822*4882a593Smuzhiyun 		skb = alloc_canfd_skb(ndev, &cf);
823*4882a593Smuzhiyun 	else
824*4882a593Smuzhiyun 		skb = alloc_can_skb(ndev, (struct can_frame **)&cf);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (unlikely(!skb)) {
827*4882a593Smuzhiyun 		stats->rx_dropped++;
828*4882a593Smuzhiyun 		return 0;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Change Xilinx CANFD data length format to socketCAN data
832*4882a593Smuzhiyun 	 * format
833*4882a593Smuzhiyun 	 */
834*4882a593Smuzhiyun 	if (dlc & XCAN_DLCR_EDL_MASK)
835*4882a593Smuzhiyun 		cf->len = can_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >>
836*4882a593Smuzhiyun 				  XCAN_DLCR_DLC_SHIFT);
837*4882a593Smuzhiyun 	else
838*4882a593Smuzhiyun 		cf->len = get_can_dlc((dlc & XCAN_DLCR_DLC_MASK) >>
839*4882a593Smuzhiyun 					  XCAN_DLCR_DLC_SHIFT);
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	/* Change Xilinx CAN ID format to socketCAN ID format */
842*4882a593Smuzhiyun 	if (id_xcan & XCAN_IDR_IDE_MASK) {
843*4882a593Smuzhiyun 		/* The received frame is an Extended format frame */
844*4882a593Smuzhiyun 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3;
845*4882a593Smuzhiyun 		cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >>
846*4882a593Smuzhiyun 				XCAN_IDR_ID2_SHIFT;
847*4882a593Smuzhiyun 		cf->can_id |= CAN_EFF_FLAG;
848*4882a593Smuzhiyun 		if (id_xcan & XCAN_IDR_RTR_MASK)
849*4882a593Smuzhiyun 			cf->can_id |= CAN_RTR_FLAG;
850*4882a593Smuzhiyun 	} else {
851*4882a593Smuzhiyun 		/* The received frame is a standard format frame */
852*4882a593Smuzhiyun 		cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >>
853*4882a593Smuzhiyun 				XCAN_IDR_ID1_SHIFT;
854*4882a593Smuzhiyun 		if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan &
855*4882a593Smuzhiyun 					XCAN_IDR_SRR_MASK))
856*4882a593Smuzhiyun 			cf->can_id |= CAN_RTR_FLAG;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* Check the frame received is FD or not*/
860*4882a593Smuzhiyun 	if (dlc & XCAN_DLCR_EDL_MASK) {
861*4882a593Smuzhiyun 		for (i = 0; i < cf->len; i += 4) {
862*4882a593Smuzhiyun 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) +
863*4882a593Smuzhiyun 					(dwindex * XCANFD_DW_BYTES);
864*4882a593Smuzhiyun 			data[0] = priv->read_reg(priv, dw_offset);
865*4882a593Smuzhiyun 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
866*4882a593Smuzhiyun 			dwindex++;
867*4882a593Smuzhiyun 		}
868*4882a593Smuzhiyun 	} else {
869*4882a593Smuzhiyun 		for (i = 0; i < cf->len; i += 4) {
870*4882a593Smuzhiyun 			dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base);
871*4882a593Smuzhiyun 			data[0] = priv->read_reg(priv, dw_offset + i);
872*4882a593Smuzhiyun 			*(__be32 *)(cf->data + i) = cpu_to_be32(data[0]);
873*4882a593Smuzhiyun 		}
874*4882a593Smuzhiyun 	}
875*4882a593Smuzhiyun 	stats->rx_bytes += cf->len;
876*4882a593Smuzhiyun 	stats->rx_packets++;
877*4882a593Smuzhiyun 	netif_receive_skb(skb);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	return 1;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun /**
883*4882a593Smuzhiyun  * xcan_current_error_state - Get current error state from HW
884*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
885*4882a593Smuzhiyun  *
886*4882a593Smuzhiyun  * Checks the current CAN error state from the HW. Note that this
887*4882a593Smuzhiyun  * only checks for ERROR_PASSIVE and ERROR_WARNING.
888*4882a593Smuzhiyun  *
889*4882a593Smuzhiyun  * Return:
890*4882a593Smuzhiyun  * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
891*4882a593Smuzhiyun  * otherwise.
892*4882a593Smuzhiyun  */
xcan_current_error_state(struct net_device * ndev)893*4882a593Smuzhiyun static enum can_state xcan_current_error_state(struct net_device *ndev)
894*4882a593Smuzhiyun {
895*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
896*4882a593Smuzhiyun 	u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
899*4882a593Smuzhiyun 		return CAN_STATE_ERROR_PASSIVE;
900*4882a593Smuzhiyun 	else if (status & XCAN_SR_ERRWRN_MASK)
901*4882a593Smuzhiyun 		return CAN_STATE_ERROR_WARNING;
902*4882a593Smuzhiyun 	else
903*4882a593Smuzhiyun 		return CAN_STATE_ERROR_ACTIVE;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun /**
907*4882a593Smuzhiyun  * xcan_set_error_state - Set new CAN error state
908*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
909*4882a593Smuzhiyun  * @new_state:	The new CAN state to be set
910*4882a593Smuzhiyun  * @cf:		Error frame to be populated or NULL
911*4882a593Smuzhiyun  *
912*4882a593Smuzhiyun  * Set new CAN error state for the device, updating statistics and
913*4882a593Smuzhiyun  * populating the error frame if given.
914*4882a593Smuzhiyun  */
xcan_set_error_state(struct net_device * ndev,enum can_state new_state,struct can_frame * cf)915*4882a593Smuzhiyun static void xcan_set_error_state(struct net_device *ndev,
916*4882a593Smuzhiyun 				 enum can_state new_state,
917*4882a593Smuzhiyun 				 struct can_frame *cf)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
920*4882a593Smuzhiyun 	u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
921*4882a593Smuzhiyun 	u32 txerr = ecr & XCAN_ECR_TEC_MASK;
922*4882a593Smuzhiyun 	u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
923*4882a593Smuzhiyun 	enum can_state tx_state = txerr >= rxerr ? new_state : 0;
924*4882a593Smuzhiyun 	enum can_state rx_state = txerr <= rxerr ? new_state : 0;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* non-ERROR states are handled elsewhere */
927*4882a593Smuzhiyun 	if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE))
928*4882a593Smuzhiyun 		return;
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	can_change_state(ndev, cf, tx_state, rx_state);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	if (cf) {
933*4882a593Smuzhiyun 		cf->data[6] = txerr;
934*4882a593Smuzhiyun 		cf->data[7] = rxerr;
935*4882a593Smuzhiyun 	}
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun /**
939*4882a593Smuzhiyun  * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
940*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
941*4882a593Smuzhiyun  *
942*4882a593Smuzhiyun  * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
943*4882a593Smuzhiyun  * the performed RX/TX has caused it to drop to a lesser state and set
944*4882a593Smuzhiyun  * the interface state accordingly.
945*4882a593Smuzhiyun  */
xcan_update_error_state_after_rxtx(struct net_device * ndev)946*4882a593Smuzhiyun static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
949*4882a593Smuzhiyun 	enum can_state old_state = priv->can.state;
950*4882a593Smuzhiyun 	enum can_state new_state;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	/* changing error state due to successful frame RX/TX can only
953*4882a593Smuzhiyun 	 * occur from these states
954*4882a593Smuzhiyun 	 */
955*4882a593Smuzhiyun 	if (old_state != CAN_STATE_ERROR_WARNING &&
956*4882a593Smuzhiyun 	    old_state != CAN_STATE_ERROR_PASSIVE)
957*4882a593Smuzhiyun 		return;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	new_state = xcan_current_error_state(ndev);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	if (new_state != old_state) {
962*4882a593Smuzhiyun 		struct sk_buff *skb;
963*4882a593Smuzhiyun 		struct can_frame *cf;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 		skb = alloc_can_err_skb(ndev, &cf);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 		xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 		if (skb) {
970*4882a593Smuzhiyun 			struct net_device_stats *stats = &ndev->stats;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 			stats->rx_packets++;
973*4882a593Smuzhiyun 			stats->rx_bytes += cf->can_dlc;
974*4882a593Smuzhiyun 			netif_rx(skb);
975*4882a593Smuzhiyun 		}
976*4882a593Smuzhiyun 	}
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun /**
980*4882a593Smuzhiyun  * xcan_err_interrupt - error frame Isr
981*4882a593Smuzhiyun  * @ndev:	net_device pointer
982*4882a593Smuzhiyun  * @isr:	interrupt status register value
983*4882a593Smuzhiyun  *
984*4882a593Smuzhiyun  * This is the CAN error interrupt and it will
985*4882a593Smuzhiyun  * check the the type of error and forward the error
986*4882a593Smuzhiyun  * frame to upper layers.
987*4882a593Smuzhiyun  */
xcan_err_interrupt(struct net_device * ndev,u32 isr)988*4882a593Smuzhiyun static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
991*4882a593Smuzhiyun 	struct net_device_stats *stats = &ndev->stats;
992*4882a593Smuzhiyun 	struct can_frame cf = { };
993*4882a593Smuzhiyun 	u32 err_status;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
996*4882a593Smuzhiyun 	priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	if (isr & XCAN_IXR_BSOFF_MASK) {
999*4882a593Smuzhiyun 		priv->can.state = CAN_STATE_BUS_OFF;
1000*4882a593Smuzhiyun 		priv->can.can_stats.bus_off++;
1001*4882a593Smuzhiyun 		/* Leave device in Config Mode in bus-off state */
1002*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1003*4882a593Smuzhiyun 		can_bus_off(ndev);
1004*4882a593Smuzhiyun 		cf.can_id |= CAN_ERR_BUSOFF;
1005*4882a593Smuzhiyun 	} else {
1006*4882a593Smuzhiyun 		enum can_state new_state = xcan_current_error_state(ndev);
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 		if (new_state != priv->can.state)
1009*4882a593Smuzhiyun 			xcan_set_error_state(ndev, new_state, &cf);
1010*4882a593Smuzhiyun 	}
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	/* Check for Arbitration lost interrupt */
1013*4882a593Smuzhiyun 	if (isr & XCAN_IXR_ARBLST_MASK) {
1014*4882a593Smuzhiyun 		priv->can.can_stats.arbitration_lost++;
1015*4882a593Smuzhiyun 		cf.can_id |= CAN_ERR_LOSTARB;
1016*4882a593Smuzhiyun 		cf.data[0] = CAN_ERR_LOSTARB_UNSPEC;
1017*4882a593Smuzhiyun 	}
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/* Check for RX FIFO Overflow interrupt */
1020*4882a593Smuzhiyun 	if (isr & XCAN_IXR_RXOFLW_MASK) {
1021*4882a593Smuzhiyun 		stats->rx_over_errors++;
1022*4882a593Smuzhiyun 		stats->rx_errors++;
1023*4882a593Smuzhiyun 		cf.can_id |= CAN_ERR_CRTL;
1024*4882a593Smuzhiyun 		cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	/* Check for RX Match Not Finished interrupt */
1028*4882a593Smuzhiyun 	if (isr & XCAN_IXR_RXMNF_MASK) {
1029*4882a593Smuzhiyun 		stats->rx_dropped++;
1030*4882a593Smuzhiyun 		stats->rx_errors++;
1031*4882a593Smuzhiyun 		netdev_err(ndev, "RX match not finished, frame discarded\n");
1032*4882a593Smuzhiyun 		cf.can_id |= CAN_ERR_CRTL;
1033*4882a593Smuzhiyun 		cf.data[1] |= CAN_ERR_CRTL_UNSPEC;
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	/* Check for error interrupt */
1037*4882a593Smuzhiyun 	if (isr & XCAN_IXR_ERROR_MASK) {
1038*4882a593Smuzhiyun 		bool berr_reporting = false;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
1041*4882a593Smuzhiyun 			berr_reporting = true;
1042*4882a593Smuzhiyun 			cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1043*4882a593Smuzhiyun 		}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 		/* Check for Ack error interrupt */
1046*4882a593Smuzhiyun 		if (err_status & XCAN_ESR_ACKER_MASK) {
1047*4882a593Smuzhiyun 			stats->tx_errors++;
1048*4882a593Smuzhiyun 			if (berr_reporting) {
1049*4882a593Smuzhiyun 				cf.can_id |= CAN_ERR_ACK;
1050*4882a593Smuzhiyun 				cf.data[3] = CAN_ERR_PROT_LOC_ACK;
1051*4882a593Smuzhiyun 			}
1052*4882a593Smuzhiyun 		}
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 		/* Check for Bit error interrupt */
1055*4882a593Smuzhiyun 		if (err_status & XCAN_ESR_BERR_MASK) {
1056*4882a593Smuzhiyun 			stats->tx_errors++;
1057*4882a593Smuzhiyun 			if (berr_reporting) {
1058*4882a593Smuzhiyun 				cf.can_id |= CAN_ERR_PROT;
1059*4882a593Smuzhiyun 				cf.data[2] = CAN_ERR_PROT_BIT;
1060*4882a593Smuzhiyun 			}
1061*4882a593Smuzhiyun 		}
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 		/* Check for Stuff error interrupt */
1064*4882a593Smuzhiyun 		if (err_status & XCAN_ESR_STER_MASK) {
1065*4882a593Smuzhiyun 			stats->rx_errors++;
1066*4882a593Smuzhiyun 			if (berr_reporting) {
1067*4882a593Smuzhiyun 				cf.can_id |= CAN_ERR_PROT;
1068*4882a593Smuzhiyun 				cf.data[2] = CAN_ERR_PROT_STUFF;
1069*4882a593Smuzhiyun 			}
1070*4882a593Smuzhiyun 		}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 		/* Check for Form error interrupt */
1073*4882a593Smuzhiyun 		if (err_status & XCAN_ESR_FMER_MASK) {
1074*4882a593Smuzhiyun 			stats->rx_errors++;
1075*4882a593Smuzhiyun 			if (berr_reporting) {
1076*4882a593Smuzhiyun 				cf.can_id |= CAN_ERR_PROT;
1077*4882a593Smuzhiyun 				cf.data[2] = CAN_ERR_PROT_FORM;
1078*4882a593Smuzhiyun 			}
1079*4882a593Smuzhiyun 		}
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		/* Check for CRC error interrupt */
1082*4882a593Smuzhiyun 		if (err_status & XCAN_ESR_CRCER_MASK) {
1083*4882a593Smuzhiyun 			stats->rx_errors++;
1084*4882a593Smuzhiyun 			if (berr_reporting) {
1085*4882a593Smuzhiyun 				cf.can_id |= CAN_ERR_PROT;
1086*4882a593Smuzhiyun 				cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
1087*4882a593Smuzhiyun 			}
1088*4882a593Smuzhiyun 		}
1089*4882a593Smuzhiyun 		priv->can.can_stats.bus_error++;
1090*4882a593Smuzhiyun 	}
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	if (cf.can_id) {
1093*4882a593Smuzhiyun 		struct can_frame *skb_cf;
1094*4882a593Smuzhiyun 		struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf);
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 		if (skb) {
1097*4882a593Smuzhiyun 			skb_cf->can_id |= cf.can_id;
1098*4882a593Smuzhiyun 			memcpy(skb_cf->data, cf.data, CAN_ERR_DLC);
1099*4882a593Smuzhiyun 			stats->rx_packets++;
1100*4882a593Smuzhiyun 			stats->rx_bytes += CAN_ERR_DLC;
1101*4882a593Smuzhiyun 			netif_rx(skb);
1102*4882a593Smuzhiyun 		}
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	netdev_dbg(ndev, "%s: error status register:0x%x\n",
1106*4882a593Smuzhiyun 		   __func__, priv->read_reg(priv, XCAN_ESR_OFFSET));
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun /**
1110*4882a593Smuzhiyun  * xcan_state_interrupt - It will check the state of the CAN device
1111*4882a593Smuzhiyun  * @ndev:	net_device pointer
1112*4882a593Smuzhiyun  * @isr:	interrupt status register value
1113*4882a593Smuzhiyun  *
1114*4882a593Smuzhiyun  * This will checks the state of the CAN device
1115*4882a593Smuzhiyun  * and puts the device into appropriate state.
1116*4882a593Smuzhiyun  */
xcan_state_interrupt(struct net_device * ndev,u32 isr)1117*4882a593Smuzhiyun static void xcan_state_interrupt(struct net_device *ndev, u32 isr)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	/* Check for Sleep interrupt if set put CAN device in sleep state */
1122*4882a593Smuzhiyun 	if (isr & XCAN_IXR_SLP_MASK)
1123*4882a593Smuzhiyun 		priv->can.state = CAN_STATE_SLEEPING;
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	/* Check for Wake up interrupt if set put CAN device in Active state */
1126*4882a593Smuzhiyun 	if (isr & XCAN_IXR_WKUP_MASK)
1127*4882a593Smuzhiyun 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun /**
1131*4882a593Smuzhiyun  * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame
1132*4882a593Smuzhiyun  * @priv:	Driver private data structure
1133*4882a593Smuzhiyun  *
1134*4882a593Smuzhiyun  * Return: Register offset of the next frame in RX FIFO.
1135*4882a593Smuzhiyun  */
xcan_rx_fifo_get_next_frame(struct xcan_priv * priv)1136*4882a593Smuzhiyun static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv)
1137*4882a593Smuzhiyun {
1138*4882a593Smuzhiyun 	int offset;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) {
1141*4882a593Smuzhiyun 		u32 fsr, mask;
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 		/* clear RXOK before the is-empty check so that any newly
1144*4882a593Smuzhiyun 		 * received frame will reassert it without a race
1145*4882a593Smuzhiyun 		 */
1146*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK);
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 		fsr = priv->read_reg(priv, XCAN_FSR_OFFSET);
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		/* check if RX FIFO is empty */
1151*4882a593Smuzhiyun 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1152*4882a593Smuzhiyun 			mask = XCAN_2_FSR_FL_MASK;
1153*4882a593Smuzhiyun 		else
1154*4882a593Smuzhiyun 			mask = XCAN_FSR_FL_MASK;
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 		if (!(fsr & mask))
1157*4882a593Smuzhiyun 			return -ENOENT;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 		if (priv->devtype.flags & XCAN_FLAG_CANFD_2)
1160*4882a593Smuzhiyun 			offset =
1161*4882a593Smuzhiyun 			  XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK);
1162*4882a593Smuzhiyun 		else
1163*4882a593Smuzhiyun 			offset =
1164*4882a593Smuzhiyun 			  XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK);
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	} else {
1167*4882a593Smuzhiyun 		/* check if RX FIFO is empty */
1168*4882a593Smuzhiyun 		if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) &
1169*4882a593Smuzhiyun 		      XCAN_IXR_RXNEMP_MASK))
1170*4882a593Smuzhiyun 			return -ENOENT;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 		/* frames are read from a static offset */
1173*4882a593Smuzhiyun 		offset = XCAN_RXFIFO_OFFSET;
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	return offset;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun /**
1180*4882a593Smuzhiyun  * xcan_rx_poll - Poll routine for rx packets (NAPI)
1181*4882a593Smuzhiyun  * @napi:	napi structure pointer
1182*4882a593Smuzhiyun  * @quota:	Max number of rx packets to be processed.
1183*4882a593Smuzhiyun  *
1184*4882a593Smuzhiyun  * This is the poll routine for rx part.
1185*4882a593Smuzhiyun  * It will process the packets maximux quota value.
1186*4882a593Smuzhiyun  *
1187*4882a593Smuzhiyun  * Return: number of packets received
1188*4882a593Smuzhiyun  */
xcan_rx_poll(struct napi_struct * napi,int quota)1189*4882a593Smuzhiyun static int xcan_rx_poll(struct napi_struct *napi, int quota)
1190*4882a593Smuzhiyun {
1191*4882a593Smuzhiyun 	struct net_device *ndev = napi->dev;
1192*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1193*4882a593Smuzhiyun 	u32 ier;
1194*4882a593Smuzhiyun 	int work_done = 0;
1195*4882a593Smuzhiyun 	int frame_offset;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 &&
1198*4882a593Smuzhiyun 	       (work_done < quota)) {
1199*4882a593Smuzhiyun 		if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK)
1200*4882a593Smuzhiyun 			work_done += xcanfd_rx(ndev, frame_offset);
1201*4882a593Smuzhiyun 		else
1202*4882a593Smuzhiyun 			work_done += xcan_rx(ndev, frame_offset);
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 		if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI)
1205*4882a593Smuzhiyun 			/* increment read index */
1206*4882a593Smuzhiyun 			priv->write_reg(priv, XCAN_FSR_OFFSET,
1207*4882a593Smuzhiyun 					XCAN_FSR_IRI_MASK);
1208*4882a593Smuzhiyun 		else
1209*4882a593Smuzhiyun 			/* clear rx-not-empty (will actually clear only if
1210*4882a593Smuzhiyun 			 * empty)
1211*4882a593Smuzhiyun 			 */
1212*4882a593Smuzhiyun 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1213*4882a593Smuzhiyun 					XCAN_IXR_RXNEMP_MASK);
1214*4882a593Smuzhiyun 	}
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	if (work_done) {
1217*4882a593Smuzhiyun 		can_led_event(ndev, CAN_LED_EVENT_RX);
1218*4882a593Smuzhiyun 		xcan_update_error_state_after_rxtx(ndev);
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	if (work_done < quota) {
1222*4882a593Smuzhiyun 		napi_complete_done(napi, work_done);
1223*4882a593Smuzhiyun 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1224*4882a593Smuzhiyun 		ier |= xcan_rx_int_mask(priv);
1225*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 	return work_done;
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun /**
1231*4882a593Smuzhiyun  * xcan_tx_interrupt - Tx Done Isr
1232*4882a593Smuzhiyun  * @ndev:	net_device pointer
1233*4882a593Smuzhiyun  * @isr:	Interrupt status register value
1234*4882a593Smuzhiyun  */
xcan_tx_interrupt(struct net_device * ndev,u32 isr)1235*4882a593Smuzhiyun static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1238*4882a593Smuzhiyun 	struct net_device_stats *stats = &ndev->stats;
1239*4882a593Smuzhiyun 	unsigned int frames_in_fifo;
1240*4882a593Smuzhiyun 	int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1241*4882a593Smuzhiyun 	unsigned long flags;
1242*4882a593Smuzhiyun 	int retries = 0;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 	/* Synchronize with xmit as we need to know the exact number
1245*4882a593Smuzhiyun 	 * of frames in the FIFO to stay in sync due to the TXFEMP
1246*4882a593Smuzhiyun 	 * handling.
1247*4882a593Smuzhiyun 	 * This also prevents a race between netif_wake_queue() and
1248*4882a593Smuzhiyun 	 * netif_stop_queue().
1249*4882a593Smuzhiyun 	 */
1250*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->tx_lock, flags);
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 	frames_in_fifo = priv->tx_head - priv->tx_tail;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1255*4882a593Smuzhiyun 		/* clear TXOK anyway to avoid getting back here */
1256*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1257*4882a593Smuzhiyun 		spin_unlock_irqrestore(&priv->tx_lock, flags);
1258*4882a593Smuzhiyun 		return;
1259*4882a593Smuzhiyun 	}
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	/* Check if 2 frames were sent (TXOK only means that at least 1
1262*4882a593Smuzhiyun 	 * frame was sent).
1263*4882a593Smuzhiyun 	 */
1264*4882a593Smuzhiyun 	if (frames_in_fifo > 1) {
1265*4882a593Smuzhiyun 		WARN_ON(frames_in_fifo > priv->tx_max);
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 		/* Synchronize TXOK and isr so that after the loop:
1268*4882a593Smuzhiyun 		 * (1) isr variable is up-to-date at least up to TXOK clear
1269*4882a593Smuzhiyun 		 *     time. This avoids us clearing a TXOK of a second frame
1270*4882a593Smuzhiyun 		 *     but not noticing that the FIFO is now empty and thus
1271*4882a593Smuzhiyun 		 *     marking only a single frame as sent.
1272*4882a593Smuzhiyun 		 * (2) No TXOK is left. Having one could mean leaving a
1273*4882a593Smuzhiyun 		 *     stray TXOK as we might process the associated frame
1274*4882a593Smuzhiyun 		 *     via TXFEMP handling as we read TXFEMP *after* TXOK
1275*4882a593Smuzhiyun 		 *     clear to satisfy (1).
1276*4882a593Smuzhiyun 		 */
1277*4882a593Smuzhiyun 		while ((isr & XCAN_IXR_TXOK_MASK) &&
1278*4882a593Smuzhiyun 		       !WARN_ON(++retries == 100)) {
1279*4882a593Smuzhiyun 			priv->write_reg(priv, XCAN_ICR_OFFSET,
1280*4882a593Smuzhiyun 					XCAN_IXR_TXOK_MASK);
1281*4882a593Smuzhiyun 			isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1282*4882a593Smuzhiyun 		}
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 		if (isr & XCAN_IXR_TXFEMP_MASK) {
1285*4882a593Smuzhiyun 			/* nothing in FIFO anymore */
1286*4882a593Smuzhiyun 			frames_sent = frames_in_fifo;
1287*4882a593Smuzhiyun 		}
1288*4882a593Smuzhiyun 	} else {
1289*4882a593Smuzhiyun 		/* single frame in fifo, just clear TXOK */
1290*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1291*4882a593Smuzhiyun 	}
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 	while (frames_sent--) {
1294*4882a593Smuzhiyun 		stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail %
1295*4882a593Smuzhiyun 						    priv->tx_max);
1296*4882a593Smuzhiyun 		priv->tx_tail++;
1297*4882a593Smuzhiyun 		stats->tx_packets++;
1298*4882a593Smuzhiyun 	}
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	netif_wake_queue(ndev);
1301*4882a593Smuzhiyun 
1302*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->tx_lock, flags);
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	can_led_event(ndev, CAN_LED_EVENT_TX);
1305*4882a593Smuzhiyun 	xcan_update_error_state_after_rxtx(ndev);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun /**
1309*4882a593Smuzhiyun  * xcan_interrupt - CAN Isr
1310*4882a593Smuzhiyun  * @irq:	irq number
1311*4882a593Smuzhiyun  * @dev_id:	device id pointer
1312*4882a593Smuzhiyun  *
1313*4882a593Smuzhiyun  * This is the xilinx CAN Isr. It checks for the type of interrupt
1314*4882a593Smuzhiyun  * and invokes the corresponding ISR.
1315*4882a593Smuzhiyun  *
1316*4882a593Smuzhiyun  * Return:
1317*4882a593Smuzhiyun  * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise
1318*4882a593Smuzhiyun  */
xcan_interrupt(int irq,void * dev_id)1319*4882a593Smuzhiyun static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	struct net_device *ndev = (struct net_device *)dev_id;
1322*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1323*4882a593Smuzhiyun 	u32 isr, ier;
1324*4882a593Smuzhiyun 	u32 isr_errors;
1325*4882a593Smuzhiyun 	u32 rx_int_mask = xcan_rx_int_mask(priv);
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	/* Get the interrupt status from Xilinx CAN */
1328*4882a593Smuzhiyun 	isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1329*4882a593Smuzhiyun 	if (!isr)
1330*4882a593Smuzhiyun 		return IRQ_NONE;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	/* Check for the type of interrupt and Processing it */
1333*4882a593Smuzhiyun 	if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) {
1334*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK |
1335*4882a593Smuzhiyun 				XCAN_IXR_WKUP_MASK));
1336*4882a593Smuzhiyun 		xcan_state_interrupt(ndev, isr);
1337*4882a593Smuzhiyun 	}
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	/* Check for Tx interrupt and Processing it */
1340*4882a593Smuzhiyun 	if (isr & XCAN_IXR_TXOK_MASK)
1341*4882a593Smuzhiyun 		xcan_tx_interrupt(ndev, isr);
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/* Check for the type of error interrupt and Processing it */
1344*4882a593Smuzhiyun 	isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1345*4882a593Smuzhiyun 			    XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK |
1346*4882a593Smuzhiyun 			    XCAN_IXR_RXMNF_MASK);
1347*4882a593Smuzhiyun 	if (isr_errors) {
1348*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1349*4882a593Smuzhiyun 		xcan_err_interrupt(ndev, isr);
1350*4882a593Smuzhiyun 	}
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	/* Check for the type of receive interrupt and Processing it */
1353*4882a593Smuzhiyun 	if (isr & rx_int_mask) {
1354*4882a593Smuzhiyun 		ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1355*4882a593Smuzhiyun 		ier &= ~rx_int_mask;
1356*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1357*4882a593Smuzhiyun 		napi_schedule(&priv->napi);
1358*4882a593Smuzhiyun 	}
1359*4882a593Smuzhiyun 	return IRQ_HANDLED;
1360*4882a593Smuzhiyun }
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun /**
1363*4882a593Smuzhiyun  * xcan_chip_stop - Driver stop routine
1364*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
1365*4882a593Smuzhiyun  *
1366*4882a593Smuzhiyun  * This is the drivers stop routine. It will disable the
1367*4882a593Smuzhiyun  * interrupts and put the device into configuration mode.
1368*4882a593Smuzhiyun  */
xcan_chip_stop(struct net_device * ndev)1369*4882a593Smuzhiyun static void xcan_chip_stop(struct net_device *ndev)
1370*4882a593Smuzhiyun {
1371*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1372*4882a593Smuzhiyun 	int ret;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	/* Disable interrupts and leave the can in configuration mode */
1375*4882a593Smuzhiyun 	ret = set_reset_mode(ndev);
1376*4882a593Smuzhiyun 	if (ret < 0)
1377*4882a593Smuzhiyun 		netdev_dbg(ndev, "set_reset_mode() Failed\n");
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	priv->can.state = CAN_STATE_STOPPED;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun /**
1383*4882a593Smuzhiyun  * xcan_open - Driver open routine
1384*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
1385*4882a593Smuzhiyun  *
1386*4882a593Smuzhiyun  * This is the driver open routine.
1387*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
1388*4882a593Smuzhiyun  */
xcan_open(struct net_device * ndev)1389*4882a593Smuzhiyun static int xcan_open(struct net_device *ndev)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1392*4882a593Smuzhiyun 	int ret;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(priv->dev);
1395*4882a593Smuzhiyun 	if (ret < 0) {
1396*4882a593Smuzhiyun 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1397*4882a593Smuzhiyun 			   __func__, ret);
1398*4882a593Smuzhiyun 		goto err;
1399*4882a593Smuzhiyun 	}
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags,
1402*4882a593Smuzhiyun 			  ndev->name, ndev);
1403*4882a593Smuzhiyun 	if (ret < 0) {
1404*4882a593Smuzhiyun 		netdev_err(ndev, "irq allocation for CAN failed\n");
1405*4882a593Smuzhiyun 		goto err;
1406*4882a593Smuzhiyun 	}
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/* Set chip into reset mode */
1409*4882a593Smuzhiyun 	ret = set_reset_mode(ndev);
1410*4882a593Smuzhiyun 	if (ret < 0) {
1411*4882a593Smuzhiyun 		netdev_err(ndev, "mode resetting failed!\n");
1412*4882a593Smuzhiyun 		goto err_irq;
1413*4882a593Smuzhiyun 	}
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	/* Common open */
1416*4882a593Smuzhiyun 	ret = open_candev(ndev);
1417*4882a593Smuzhiyun 	if (ret)
1418*4882a593Smuzhiyun 		goto err_irq;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	ret = xcan_chip_start(ndev);
1421*4882a593Smuzhiyun 	if (ret < 0) {
1422*4882a593Smuzhiyun 		netdev_err(ndev, "xcan_chip_start failed!\n");
1423*4882a593Smuzhiyun 		goto err_candev;
1424*4882a593Smuzhiyun 	}
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	can_led_event(ndev, CAN_LED_EVENT_OPEN);
1427*4882a593Smuzhiyun 	napi_enable(&priv->napi);
1428*4882a593Smuzhiyun 	netif_start_queue(ndev);
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	return 0;
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun err_candev:
1433*4882a593Smuzhiyun 	close_candev(ndev);
1434*4882a593Smuzhiyun err_irq:
1435*4882a593Smuzhiyun 	free_irq(ndev->irq, ndev);
1436*4882a593Smuzhiyun err:
1437*4882a593Smuzhiyun 	pm_runtime_put(priv->dev);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	return ret;
1440*4882a593Smuzhiyun }
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun /**
1443*4882a593Smuzhiyun  * xcan_close - Driver close routine
1444*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
1445*4882a593Smuzhiyun  *
1446*4882a593Smuzhiyun  * Return: 0 always
1447*4882a593Smuzhiyun  */
xcan_close(struct net_device * ndev)1448*4882a593Smuzhiyun static int xcan_close(struct net_device *ndev)
1449*4882a593Smuzhiyun {
1450*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	netif_stop_queue(ndev);
1453*4882a593Smuzhiyun 	napi_disable(&priv->napi);
1454*4882a593Smuzhiyun 	xcan_chip_stop(ndev);
1455*4882a593Smuzhiyun 	free_irq(ndev->irq, ndev);
1456*4882a593Smuzhiyun 	close_candev(ndev);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	can_led_event(ndev, CAN_LED_EVENT_STOP);
1459*4882a593Smuzhiyun 	pm_runtime_put(priv->dev);
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	return 0;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun /**
1465*4882a593Smuzhiyun  * xcan_get_berr_counter - error counter routine
1466*4882a593Smuzhiyun  * @ndev:	Pointer to net_device structure
1467*4882a593Smuzhiyun  * @bec:	Pointer to can_berr_counter structure
1468*4882a593Smuzhiyun  *
1469*4882a593Smuzhiyun  * This is the driver error counter routine.
1470*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
1471*4882a593Smuzhiyun  */
xcan_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)1472*4882a593Smuzhiyun static int xcan_get_berr_counter(const struct net_device *ndev,
1473*4882a593Smuzhiyun 				 struct can_berr_counter *bec)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1476*4882a593Smuzhiyun 	int ret;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(priv->dev);
1479*4882a593Smuzhiyun 	if (ret < 0) {
1480*4882a593Smuzhiyun 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1481*4882a593Smuzhiyun 			   __func__, ret);
1482*4882a593Smuzhiyun 		pm_runtime_put(priv->dev);
1483*4882a593Smuzhiyun 		return ret;
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1487*4882a593Smuzhiyun 	bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1488*4882a593Smuzhiyun 			XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	pm_runtime_put(priv->dev);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	return 0;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun static const struct net_device_ops xcan_netdev_ops = {
1496*4882a593Smuzhiyun 	.ndo_open	= xcan_open,
1497*4882a593Smuzhiyun 	.ndo_stop	= xcan_close,
1498*4882a593Smuzhiyun 	.ndo_start_xmit	= xcan_start_xmit,
1499*4882a593Smuzhiyun 	.ndo_change_mtu	= can_change_mtu,
1500*4882a593Smuzhiyun };
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun /**
1503*4882a593Smuzhiyun  * xcan_suspend - Suspend method for the driver
1504*4882a593Smuzhiyun  * @dev:	Address of the device structure
1505*4882a593Smuzhiyun  *
1506*4882a593Smuzhiyun  * Put the driver into low power mode.
1507*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
1508*4882a593Smuzhiyun  */
xcan_suspend(struct device * dev)1509*4882a593Smuzhiyun static int __maybe_unused xcan_suspend(struct device *dev)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	if (netif_running(ndev)) {
1514*4882a593Smuzhiyun 		netif_stop_queue(ndev);
1515*4882a593Smuzhiyun 		netif_device_detach(ndev);
1516*4882a593Smuzhiyun 		xcan_chip_stop(ndev);
1517*4882a593Smuzhiyun 	}
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	return pm_runtime_force_suspend(dev);
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun /**
1523*4882a593Smuzhiyun  * xcan_resume - Resume from suspend
1524*4882a593Smuzhiyun  * @dev:	Address of the device structure
1525*4882a593Smuzhiyun  *
1526*4882a593Smuzhiyun  * Resume operation after suspend.
1527*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
1528*4882a593Smuzhiyun  */
xcan_resume(struct device * dev)1529*4882a593Smuzhiyun static int __maybe_unused xcan_resume(struct device *dev)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
1532*4882a593Smuzhiyun 	int ret;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	ret = pm_runtime_force_resume(dev);
1535*4882a593Smuzhiyun 	if (ret) {
1536*4882a593Smuzhiyun 		dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1537*4882a593Smuzhiyun 		return ret;
1538*4882a593Smuzhiyun 	}
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun 	if (netif_running(ndev)) {
1541*4882a593Smuzhiyun 		ret = xcan_chip_start(ndev);
1542*4882a593Smuzhiyun 		if (ret) {
1543*4882a593Smuzhiyun 			dev_err(dev, "xcan_chip_start failed on resume\n");
1544*4882a593Smuzhiyun 			return ret;
1545*4882a593Smuzhiyun 		}
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		netif_device_attach(ndev);
1548*4882a593Smuzhiyun 		netif_start_queue(ndev);
1549*4882a593Smuzhiyun 	}
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	return 0;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun 
1554*4882a593Smuzhiyun /**
1555*4882a593Smuzhiyun  * xcan_runtime_suspend - Runtime suspend method for the driver
1556*4882a593Smuzhiyun  * @dev:	Address of the device structure
1557*4882a593Smuzhiyun  *
1558*4882a593Smuzhiyun  * Put the driver into low power mode.
1559*4882a593Smuzhiyun  * Return: 0 always
1560*4882a593Smuzhiyun  */
xcan_runtime_suspend(struct device * dev)1561*4882a593Smuzhiyun static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
1564*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	clk_disable_unprepare(priv->bus_clk);
1567*4882a593Smuzhiyun 	clk_disable_unprepare(priv->can_clk);
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	return 0;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun /**
1573*4882a593Smuzhiyun  * xcan_runtime_resume - Runtime resume from suspend
1574*4882a593Smuzhiyun  * @dev:	Address of the device structure
1575*4882a593Smuzhiyun  *
1576*4882a593Smuzhiyun  * Resume operation after suspend.
1577*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
1578*4882a593Smuzhiyun  */
xcan_runtime_resume(struct device * dev)1579*4882a593Smuzhiyun static int __maybe_unused xcan_runtime_resume(struct device *dev)
1580*4882a593Smuzhiyun {
1581*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
1582*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1583*4882a593Smuzhiyun 	int ret;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->bus_clk);
1586*4882a593Smuzhiyun 	if (ret) {
1587*4882a593Smuzhiyun 		dev_err(dev, "Cannot enable clock.\n");
1588*4882a593Smuzhiyun 		return ret;
1589*4882a593Smuzhiyun 	}
1590*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->can_clk);
1591*4882a593Smuzhiyun 	if (ret) {
1592*4882a593Smuzhiyun 		dev_err(dev, "Cannot enable clock.\n");
1593*4882a593Smuzhiyun 		clk_disable_unprepare(priv->bus_clk);
1594*4882a593Smuzhiyun 		return ret;
1595*4882a593Smuzhiyun 	}
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	return 0;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun static const struct dev_pm_ops xcan_dev_pm_ops = {
1601*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume)
1602*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1603*4882a593Smuzhiyun };
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun static const struct xcan_devtype_data xcan_zynq_data = {
1606*4882a593Smuzhiyun 	.cantype = XZYNQ_CANPS,
1607*4882a593Smuzhiyun 	.flags = XCAN_FLAG_TXFEMP,
1608*4882a593Smuzhiyun 	.bittiming_const = &xcan_bittiming_const,
1609*4882a593Smuzhiyun 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1610*4882a593Smuzhiyun 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1611*4882a593Smuzhiyun 	.bus_clk_name = "pclk",
1612*4882a593Smuzhiyun };
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun static const struct xcan_devtype_data xcan_axi_data = {
1615*4882a593Smuzhiyun 	.cantype = XAXI_CAN,
1616*4882a593Smuzhiyun 	.bittiming_const = &xcan_bittiming_const,
1617*4882a593Smuzhiyun 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1618*4882a593Smuzhiyun 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
1619*4882a593Smuzhiyun 	.bus_clk_name = "s_axi_aclk",
1620*4882a593Smuzhiyun };
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun static const struct xcan_devtype_data xcan_canfd_data = {
1623*4882a593Smuzhiyun 	.cantype = XAXI_CANFD,
1624*4882a593Smuzhiyun 	.flags = XCAN_FLAG_EXT_FILTERS |
1625*4882a593Smuzhiyun 		 XCAN_FLAG_RXMNF |
1626*4882a593Smuzhiyun 		 XCAN_FLAG_TX_MAILBOXES |
1627*4882a593Smuzhiyun 		 XCAN_FLAG_RX_FIFO_MULTI,
1628*4882a593Smuzhiyun 	.bittiming_const = &xcan_bittiming_const_canfd,
1629*4882a593Smuzhiyun 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1630*4882a593Smuzhiyun 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1631*4882a593Smuzhiyun 	.bus_clk_name = "s_axi_aclk",
1632*4882a593Smuzhiyun };
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun static const struct xcan_devtype_data xcan_canfd2_data = {
1635*4882a593Smuzhiyun 	.cantype = XAXI_CANFD_2_0,
1636*4882a593Smuzhiyun 	.flags = XCAN_FLAG_EXT_FILTERS |
1637*4882a593Smuzhiyun 		 XCAN_FLAG_RXMNF |
1638*4882a593Smuzhiyun 		 XCAN_FLAG_TX_MAILBOXES |
1639*4882a593Smuzhiyun 		 XCAN_FLAG_CANFD_2 |
1640*4882a593Smuzhiyun 		 XCAN_FLAG_RX_FIFO_MULTI,
1641*4882a593Smuzhiyun 	.bittiming_const = &xcan_bittiming_const_canfd2,
1642*4882a593Smuzhiyun 	.btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD,
1643*4882a593Smuzhiyun 	.btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD,
1644*4882a593Smuzhiyun 	.bus_clk_name = "s_axi_aclk",
1645*4882a593Smuzhiyun };
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun /* Match table for OF platform binding */
1648*4882a593Smuzhiyun static const struct of_device_id xcan_of_match[] = {
1649*4882a593Smuzhiyun 	{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1650*4882a593Smuzhiyun 	{ .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data },
1651*4882a593Smuzhiyun 	{ .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data },
1652*4882a593Smuzhiyun 	{ .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data },
1653*4882a593Smuzhiyun 	{ /* end of list */ },
1654*4882a593Smuzhiyun };
1655*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, xcan_of_match);
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun /**
1658*4882a593Smuzhiyun  * xcan_probe - Platform registration call
1659*4882a593Smuzhiyun  * @pdev:	Handle to the platform device structure
1660*4882a593Smuzhiyun  *
1661*4882a593Smuzhiyun  * This function does all the memory allocation and registration for the CAN
1662*4882a593Smuzhiyun  * device.
1663*4882a593Smuzhiyun  *
1664*4882a593Smuzhiyun  * Return: 0 on success and failure value on error
1665*4882a593Smuzhiyun  */
xcan_probe(struct platform_device * pdev)1666*4882a593Smuzhiyun static int xcan_probe(struct platform_device *pdev)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun 	struct net_device *ndev;
1669*4882a593Smuzhiyun 	struct xcan_priv *priv;
1670*4882a593Smuzhiyun 	const struct of_device_id *of_id;
1671*4882a593Smuzhiyun 	const struct xcan_devtype_data *devtype = &xcan_axi_data;
1672*4882a593Smuzhiyun 	void __iomem *addr;
1673*4882a593Smuzhiyun 	int ret;
1674*4882a593Smuzhiyun 	int rx_max, tx_max;
1675*4882a593Smuzhiyun 	u32 hw_tx_max = 0, hw_rx_max = 0;
1676*4882a593Smuzhiyun 	const char *hw_tx_max_property;
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	/* Get the virtual base address for the device */
1679*4882a593Smuzhiyun 	addr = devm_platform_ioremap_resource(pdev, 0);
1680*4882a593Smuzhiyun 	if (IS_ERR(addr)) {
1681*4882a593Smuzhiyun 		ret = PTR_ERR(addr);
1682*4882a593Smuzhiyun 		goto err;
1683*4882a593Smuzhiyun 	}
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	of_id = of_match_device(xcan_of_match, &pdev->dev);
1686*4882a593Smuzhiyun 	if (of_id && of_id->data)
1687*4882a593Smuzhiyun 		devtype = of_id->data;
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ?
1690*4882a593Smuzhiyun 			     "tx-mailbox-count" : "tx-fifo-depth";
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun 	ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property,
1693*4882a593Smuzhiyun 				   &hw_tx_max);
1694*4882a593Smuzhiyun 	if (ret < 0) {
1695*4882a593Smuzhiyun 		dev_err(&pdev->dev, "missing %s property\n",
1696*4882a593Smuzhiyun 			hw_tx_max_property);
1697*4882a593Smuzhiyun 		goto err;
1698*4882a593Smuzhiyun 	}
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth",
1701*4882a593Smuzhiyun 				   &hw_rx_max);
1702*4882a593Smuzhiyun 	if (ret < 0) {
1703*4882a593Smuzhiyun 		dev_err(&pdev->dev,
1704*4882a593Smuzhiyun 			"missing rx-fifo-depth property (mailbox mode is not supported)\n");
1705*4882a593Smuzhiyun 		goto err;
1706*4882a593Smuzhiyun 	}
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	/* With TX FIFO:
1709*4882a593Smuzhiyun 	 *
1710*4882a593Smuzhiyun 	 * There is no way to directly figure out how many frames have been
1711*4882a593Smuzhiyun 	 * sent when the TXOK interrupt is processed. If TXFEMP
1712*4882a593Smuzhiyun 	 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1713*4882a593Smuzhiyun 	 * to determine if 1 or 2 frames have been sent.
1714*4882a593Smuzhiyun 	 * Theoretically we should be able to use TXFWMEMP to determine up
1715*4882a593Smuzhiyun 	 * to 3 frames, but it seems that after putting a second frame in the
1716*4882a593Smuzhiyun 	 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1717*4882a593Smuzhiyun 	 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1718*4882a593Smuzhiyun 	 * sent), which is not a sensible state - possibly TXFWMEMP is not
1719*4882a593Smuzhiyun 	 * completely synchronized with the rest of the bits?
1720*4882a593Smuzhiyun 	 *
1721*4882a593Smuzhiyun 	 * With TX mailboxes:
1722*4882a593Smuzhiyun 	 *
1723*4882a593Smuzhiyun 	 * HW sends frames in CAN ID priority order. To preserve FIFO ordering
1724*4882a593Smuzhiyun 	 * we submit frames one at a time.
1725*4882a593Smuzhiyun 	 */
1726*4882a593Smuzhiyun 	if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
1727*4882a593Smuzhiyun 	    (devtype->flags & XCAN_FLAG_TXFEMP))
1728*4882a593Smuzhiyun 		tx_max = min(hw_tx_max, 2U);
1729*4882a593Smuzhiyun 	else
1730*4882a593Smuzhiyun 		tx_max = 1;
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	rx_max = hw_rx_max;
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	/* Create a CAN device instance */
1735*4882a593Smuzhiyun 	ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1736*4882a593Smuzhiyun 	if (!ndev)
1737*4882a593Smuzhiyun 		return -ENOMEM;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	priv = netdev_priv(ndev);
1740*4882a593Smuzhiyun 	priv->dev = &pdev->dev;
1741*4882a593Smuzhiyun 	priv->can.bittiming_const = devtype->bittiming_const;
1742*4882a593Smuzhiyun 	priv->can.do_set_mode = xcan_do_set_mode;
1743*4882a593Smuzhiyun 	priv->can.do_get_berr_counter = xcan_get_berr_counter;
1744*4882a593Smuzhiyun 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1745*4882a593Smuzhiyun 					CAN_CTRLMODE_BERR_REPORTING;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	if (devtype->cantype == XAXI_CANFD)
1748*4882a593Smuzhiyun 		priv->can.data_bittiming_const =
1749*4882a593Smuzhiyun 			&xcan_data_bittiming_const_canfd;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	if (devtype->cantype == XAXI_CANFD_2_0)
1752*4882a593Smuzhiyun 		priv->can.data_bittiming_const =
1753*4882a593Smuzhiyun 			&xcan_data_bittiming_const_canfd2;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	if (devtype->cantype == XAXI_CANFD ||
1756*4882a593Smuzhiyun 	    devtype->cantype == XAXI_CANFD_2_0)
1757*4882a593Smuzhiyun 		priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	priv->reg_base = addr;
1760*4882a593Smuzhiyun 	priv->tx_max = tx_max;
1761*4882a593Smuzhiyun 	priv->devtype = *devtype;
1762*4882a593Smuzhiyun 	spin_lock_init(&priv->tx_lock);
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun 	/* Get IRQ for the device */
1765*4882a593Smuzhiyun 	ret = platform_get_irq(pdev, 0);
1766*4882a593Smuzhiyun 	if (ret < 0)
1767*4882a593Smuzhiyun 		goto err_free;
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	ndev->irq = ret;
1770*4882a593Smuzhiyun 
1771*4882a593Smuzhiyun 	ndev->flags |= IFF_ECHO;	/* We support local echo */
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	platform_set_drvdata(pdev, ndev);
1774*4882a593Smuzhiyun 	SET_NETDEV_DEV(ndev, &pdev->dev);
1775*4882a593Smuzhiyun 	ndev->netdev_ops = &xcan_netdev_ops;
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	/* Getting the CAN can_clk info */
1778*4882a593Smuzhiyun 	priv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
1779*4882a593Smuzhiyun 	if (IS_ERR(priv->can_clk)) {
1780*4882a593Smuzhiyun 		if (PTR_ERR(priv->can_clk) != -EPROBE_DEFER)
1781*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Device clock not found.\n");
1782*4882a593Smuzhiyun 		ret = PTR_ERR(priv->can_clk);
1783*4882a593Smuzhiyun 		goto err_free;
1784*4882a593Smuzhiyun 	}
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name);
1787*4882a593Smuzhiyun 	if (IS_ERR(priv->bus_clk)) {
1788*4882a593Smuzhiyun 		if (PTR_ERR(priv->bus_clk) != -EPROBE_DEFER)
1789*4882a593Smuzhiyun 			dev_err(&pdev->dev, "bus clock not found\n");
1790*4882a593Smuzhiyun 		ret = PTR_ERR(priv->bus_clk);
1791*4882a593Smuzhiyun 		goto err_free;
1792*4882a593Smuzhiyun 	}
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	priv->write_reg = xcan_write_reg_le;
1795*4882a593Smuzhiyun 	priv->read_reg = xcan_read_reg_le;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	pm_runtime_enable(&pdev->dev);
1798*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(&pdev->dev);
1799*4882a593Smuzhiyun 	if (ret < 0) {
1800*4882a593Smuzhiyun 		netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n",
1801*4882a593Smuzhiyun 			   __func__, ret);
1802*4882a593Smuzhiyun 		goto err_disableclks;
1803*4882a593Smuzhiyun 	}
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) {
1806*4882a593Smuzhiyun 		priv->write_reg = xcan_write_reg_be;
1807*4882a593Smuzhiyun 		priv->read_reg = xcan_read_reg_be;
1808*4882a593Smuzhiyun 	}
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	priv->can.clock.freq = clk_get_rate(priv->can_clk);
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	ret = register_candev(ndev);
1815*4882a593Smuzhiyun 	if (ret) {
1816*4882a593Smuzhiyun 		dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret);
1817*4882a593Smuzhiyun 		goto err_disableclks;
1818*4882a593Smuzhiyun 	}
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	devm_can_led_init(ndev);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	pm_runtime_put(&pdev->dev);
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	if (priv->devtype.flags & XCAN_FLAG_CANFD_2) {
1825*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000);
1826*4882a593Smuzhiyun 		priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000);
1827*4882a593Smuzhiyun 	}
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n",
1830*4882a593Smuzhiyun 		   priv->reg_base, ndev->irq, priv->can.clock.freq,
1831*4882a593Smuzhiyun 		   hw_tx_max, priv->tx_max);
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	return 0;
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun err_disableclks:
1836*4882a593Smuzhiyun 	pm_runtime_put(priv->dev);
1837*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1838*4882a593Smuzhiyun err_free:
1839*4882a593Smuzhiyun 	free_candev(ndev);
1840*4882a593Smuzhiyun err:
1841*4882a593Smuzhiyun 	return ret;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun /**
1845*4882a593Smuzhiyun  * xcan_remove - Unregister the device after releasing the resources
1846*4882a593Smuzhiyun  * @pdev:	Handle to the platform device structure
1847*4882a593Smuzhiyun  *
1848*4882a593Smuzhiyun  * This function frees all the resources allocated to the device.
1849*4882a593Smuzhiyun  * Return: 0 always
1850*4882a593Smuzhiyun  */
xcan_remove(struct platform_device * pdev)1851*4882a593Smuzhiyun static int xcan_remove(struct platform_device *pdev)
1852*4882a593Smuzhiyun {
1853*4882a593Smuzhiyun 	struct net_device *ndev = platform_get_drvdata(pdev);
1854*4882a593Smuzhiyun 	struct xcan_priv *priv = netdev_priv(ndev);
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun 	unregister_candev(ndev);
1857*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1858*4882a593Smuzhiyun 	netif_napi_del(&priv->napi);
1859*4882a593Smuzhiyun 	free_candev(ndev);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	return 0;
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun static struct platform_driver xcan_driver = {
1865*4882a593Smuzhiyun 	.probe = xcan_probe,
1866*4882a593Smuzhiyun 	.remove	= xcan_remove,
1867*4882a593Smuzhiyun 	.driver	= {
1868*4882a593Smuzhiyun 		.name = DRIVER_NAME,
1869*4882a593Smuzhiyun 		.pm = &xcan_dev_pm_ops,
1870*4882a593Smuzhiyun 		.of_match_table	= xcan_of_match,
1871*4882a593Smuzhiyun 	},
1872*4882a593Smuzhiyun };
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun module_platform_driver(xcan_driver);
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1877*4882a593Smuzhiyun MODULE_AUTHOR("Xilinx Inc");
1878*4882a593Smuzhiyun MODULE_DESCRIPTION("Xilinx CAN interface");
1879