1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2*4882a593Smuzhiyun /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved.
3*4882a593Smuzhiyun * Parts of this driver are based on the following:
4*4882a593Smuzhiyun * - Kvaser linux pciefd driver (version 5.25)
5*4882a593Smuzhiyun * - PEAK linux canfd driver
6*4882a593Smuzhiyun * - Altera Avalon EPCS flash controller driver
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/can/dev.h>
14*4882a593Smuzhiyun #include <linux/timer.h>
15*4882a593Smuzhiyun #include <linux/netdevice.h>
16*4882a593Smuzhiyun #include <linux/crc32.h>
17*4882a593Smuzhiyun #include <linux/iopoll.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
20*4882a593Smuzhiyun MODULE_AUTHOR("Kvaser AB <support@kvaser.com>");
21*4882a593Smuzhiyun MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000)
26*4882a593Smuzhiyun #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200))
27*4882a593Smuzhiyun #define KVASER_PCIEFD_MAX_ERR_REP 256
28*4882a593Smuzhiyun #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17
29*4882a593Smuzhiyun #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4
30*4882a593Smuzhiyun #define KVASER_PCIEFD_DMA_COUNT 2
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define KVASER_PCIEFD_DMA_SIZE (4 * 1024)
33*4882a593Smuzhiyun #define KVASER_PCIEFD_64BIT_DMA_BIT BIT(0)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define KVASER_PCIEFD_VENDOR 0x1a07
36*4882a593Smuzhiyun #define KVASER_PCIEFD_4HS_ID 0x0d
37*4882a593Smuzhiyun #define KVASER_PCIEFD_2HS_ID 0x0e
38*4882a593Smuzhiyun #define KVASER_PCIEFD_HS_ID 0x0f
39*4882a593Smuzhiyun #define KVASER_PCIEFD_MINIPCIE_HS_ID 0x10
40*4882a593Smuzhiyun #define KVASER_PCIEFD_MINIPCIE_2HS_ID 0x11
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* PCIe IRQ registers */
43*4882a593Smuzhiyun #define KVASER_PCIEFD_IRQ_REG 0x40
44*4882a593Smuzhiyun #define KVASER_PCIEFD_IEN_REG 0x50
45*4882a593Smuzhiyun /* DMA map */
46*4882a593Smuzhiyun #define KVASER_PCIEFD_DMA_MAP_BASE 0x1000
47*4882a593Smuzhiyun /* Kvaser KCAN CAN controller registers */
48*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN0_BASE 0x10000
49*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BASE_OFFSET 0x1000
50*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100
51*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180
52*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0
53*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CMD_REG 0x400
54*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IEN_REG 0x408
55*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410
56*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_TX_NPACKETS_REG 0x414
57*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
58*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
59*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
60*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
61*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
62*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
63*4882a593Smuzhiyun /* Loopback control register */
64*4882a593Smuzhiyun #define KVASER_PCIEFD_LOOP_REG 0x1f000
65*4882a593Smuzhiyun /* System identification and information registers */
66*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_BASE 0x1f020
67*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_VERSION_REG (KVASER_PCIEFD_SYSID_BASE + 0x8)
68*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_CANFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0xc)
69*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_BUSFREQ_REG (KVASER_PCIEFD_SYSID_BASE + 0x10)
70*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
71*4882a593Smuzhiyun /* Shared receive buffer registers */
72*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_BASE 0x1f200
73*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
74*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
75*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
76*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
77*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
78*4882a593Smuzhiyun /* EPCS flash controller registers */
79*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_BASE 0x1fc00
80*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_RX_REG KVASER_PCIEFD_SPI_BASE
81*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_TX_REG (KVASER_PCIEFD_SPI_BASE + 0x4)
82*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_STATUS_REG (KVASER_PCIEFD_SPI_BASE + 0x8)
83*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_CTRL_REG (KVASER_PCIEFD_SPI_BASE + 0xc)
84*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_SSEL_REG (KVASER_PCIEFD_SPI_BASE + 0x14)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #define KVASER_PCIEFD_IRQ_ALL_MSK 0x1f
87*4882a593Smuzhiyun #define KVASER_PCIEFD_IRQ_SRB BIT(4)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_NRCHAN_SHIFT 24
90*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT 16
91*4882a593Smuzhiyun #define KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT 1
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Reset DMA buffer 0, 1 and FIFO offset */
94*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4)
95*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5)
96*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0)
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* DMA packet done, buffer 0 and 1 */
99*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8)
100*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9)
101*4882a593Smuzhiyun /* DMA overflow, buffer 0 and 1 */
102*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10)
103*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11)
104*4882a593Smuzhiyun /* DMA underflow, buffer 0 and 1 */
105*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12)
106*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13)
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* DMA idle */
109*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_STAT_DI BIT(15)
110*4882a593Smuzhiyun /* DMA support */
111*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* DMA Enable */
114*4882a593Smuzhiyun #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* EPCS flash controller definitions */
117*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_IMG_SZ (64 * 1024)
118*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_IMG_OFFSET (31 * 65536L)
119*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_MAX_PARAMS 256
120*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_MAGIC 0xcafef00d
121*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_PARAM_MAX_SZ 24
122*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_SYS_VER 1
123*4882a593Smuzhiyun #define KVASER_PCIEFD_CFG_PARAM_NR_CHAN 130
124*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_TMT BIT(5)
125*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_TRDY BIT(6)
126*4882a593Smuzhiyun #define KVASER_PCIEFD_SPI_RRDY BIT(7)
127*4882a593Smuzhiyun #define KVASER_PCIEFD_FLASH_ID_EPCS16 0x14
128*4882a593Smuzhiyun /* Commands for controlling the onboard flash */
129*4882a593Smuzhiyun #define KVASER_PCIEFD_FLASH_RES_CMD 0xab
130*4882a593Smuzhiyun #define KVASER_PCIEFD_FLASH_READ_CMD 0x3
131*4882a593Smuzhiyun #define KVASER_PCIEFD_FLASH_STATUS_CMD 0x5
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Kvaser KCAN definitions */
134*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CTRL_EFLUSH (4 << 29)
135*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CTRL_EFRAME (5 << 29)
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT 16
138*4882a593Smuzhiyun /* Request status packet */
139*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0)
140*4882a593Smuzhiyun /* Abort, flush and reset */
141*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1)
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* Tx FIFO unaligned read */
144*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0)
145*4882a593Smuzhiyun /* Tx FIFO unaligned end */
146*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1)
147*4882a593Smuzhiyun /* Bus parameter protection error */
148*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2)
149*4882a593Smuzhiyun /* FDF bit when controller is in classic mode */
150*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3)
151*4882a593Smuzhiyun /* Rx FIFO overflow */
152*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5)
153*4882a593Smuzhiyun /* Abort done */
154*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13)
155*4882a593Smuzhiyun /* Tx buffer flush done */
156*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14)
157*4882a593Smuzhiyun /* Tx FIFO overflow */
158*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15)
159*4882a593Smuzhiyun /* Tx FIFO empty */
160*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16)
161*4882a593Smuzhiyun /* Transmitter unaligned */
162*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17)
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT 16
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT 24
167*4882a593Smuzhiyun /* Abort request */
168*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7)
169*4882a593Smuzhiyun /* Idle state. Controller in reset mode and no abort or flush pending */
170*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10)
171*4882a593Smuzhiyun /* Bus off */
172*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11)
173*4882a593Smuzhiyun /* Reset mode request */
174*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14)
175*4882a593Smuzhiyun /* Controller in reset mode */
176*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15)
177*4882a593Smuzhiyun /* Controller got one-shot capability */
178*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16)
179*4882a593Smuzhiyun /* Controller got CAN FD capability */
180*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19)
181*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK (KVASER_PCIEFD_KCAN_STAT_AR | \
182*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_STAT_BOFF | KVASER_PCIEFD_KCAN_STAT_RMR | \
183*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_STAT_IRM)
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Reset mode */
186*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8)
187*4882a593Smuzhiyun /* Listen only mode */
188*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9)
189*4882a593Smuzhiyun /* Error packet enable */
190*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12)
191*4882a593Smuzhiyun /* CAN FD non-ISO */
192*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15)
193*4882a593Smuzhiyun /* Acknowledgment packet type */
194*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20)
195*4882a593Smuzhiyun /* Active error flag enable. Clear to force error passive */
196*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23)
197*4882a593Smuzhiyun /* Classic CAN mode */
198*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31)
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT 13
201*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT 17
202*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT 26
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun #define KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT 16
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* Kvaser KCAN packet types */
207*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_DATA 0
208*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_ACK 1
209*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_TXRQ 2
210*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_ERROR 3
211*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 4
212*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 5
213*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 6
214*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_STATUS 8
215*4882a593Smuzhiyun #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 9
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* Kvaser KCAN packet common definitions */
218*4882a593Smuzhiyun #define KVASER_PCIEFD_PACKET_SEQ_MSK 0xff
219*4882a593Smuzhiyun #define KVASER_PCIEFD_PACKET_CHID_SHIFT 25
220*4882a593Smuzhiyun #define KVASER_PCIEFD_PACKET_TYPE_SHIFT 28
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* Kvaser KCAN TDATA and RDATA first word */
223*4882a593Smuzhiyun #define KVASER_PCIEFD_RPACKET_IDE BIT(30)
224*4882a593Smuzhiyun #define KVASER_PCIEFD_RPACKET_RTR BIT(29)
225*4882a593Smuzhiyun /* Kvaser KCAN TDATA and RDATA second word */
226*4882a593Smuzhiyun #define KVASER_PCIEFD_RPACKET_ESI BIT(13)
227*4882a593Smuzhiyun #define KVASER_PCIEFD_RPACKET_BRS BIT(14)
228*4882a593Smuzhiyun #define KVASER_PCIEFD_RPACKET_FDF BIT(15)
229*4882a593Smuzhiyun #define KVASER_PCIEFD_RPACKET_DLC_SHIFT 8
230*4882a593Smuzhiyun /* Kvaser KCAN TDATA second word */
231*4882a593Smuzhiyun #define KVASER_PCIEFD_TPACKET_SMS BIT(16)
232*4882a593Smuzhiyun #define KVASER_PCIEFD_TPACKET_AREQ BIT(31)
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* Kvaser KCAN APACKET */
235*4882a593Smuzhiyun #define KVASER_PCIEFD_APACKET_FLU BIT(8)
236*4882a593Smuzhiyun #define KVASER_PCIEFD_APACKET_CT BIT(9)
237*4882a593Smuzhiyun #define KVASER_PCIEFD_APACKET_ABL BIT(10)
238*4882a593Smuzhiyun #define KVASER_PCIEFD_APACKET_NACK BIT(11)
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Kvaser KCAN SPACK first word */
241*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_RXERR_SHIFT 8
242*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_BOFF BIT(16)
243*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_IDET BIT(20)
244*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_IRM BIT(21)
245*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_RMCD BIT(22)
246*4882a593Smuzhiyun /* Kvaser KCAN SPACK second word */
247*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_AUTO BIT(21)
248*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_EWLR BIT(23)
249*4882a593Smuzhiyun #define KVASER_PCIEFD_SPACK_EPLR BIT(24)
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Kvaser KCAN_EPACK second word */
252*4882a593Smuzhiyun #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0)
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun struct kvaser_pciefd;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun struct kvaser_pciefd_can {
257*4882a593Smuzhiyun struct can_priv can;
258*4882a593Smuzhiyun struct kvaser_pciefd *kv_pcie;
259*4882a593Smuzhiyun void __iomem *reg_base;
260*4882a593Smuzhiyun struct can_berr_counter bec;
261*4882a593Smuzhiyun u8 cmd_seq;
262*4882a593Smuzhiyun int err_rep_cnt;
263*4882a593Smuzhiyun int echo_idx;
264*4882a593Smuzhiyun spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */
265*4882a593Smuzhiyun spinlock_t echo_lock; /* Locks the message echo buffer */
266*4882a593Smuzhiyun struct timer_list bec_poll_timer;
267*4882a593Smuzhiyun struct completion start_comp, flush_comp;
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun struct kvaser_pciefd {
271*4882a593Smuzhiyun struct pci_dev *pci;
272*4882a593Smuzhiyun void __iomem *reg_base;
273*4882a593Smuzhiyun struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS];
274*4882a593Smuzhiyun void *dma_data[KVASER_PCIEFD_DMA_COUNT];
275*4882a593Smuzhiyun u8 nr_channels;
276*4882a593Smuzhiyun u32 bus_freq;
277*4882a593Smuzhiyun u32 freq;
278*4882a593Smuzhiyun u32 freq_to_ticks_div;
279*4882a593Smuzhiyun };
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet {
282*4882a593Smuzhiyun u32 header[2];
283*4882a593Smuzhiyun u64 timestamp;
284*4882a593Smuzhiyun };
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun struct kvaser_pciefd_tx_packet {
287*4882a593Smuzhiyun u32 header[2];
288*4882a593Smuzhiyun u8 data[64];
289*4882a593Smuzhiyun };
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun static const struct can_bittiming_const kvaser_pciefd_bittiming_const = {
292*4882a593Smuzhiyun .name = KVASER_PCIEFD_DRV_NAME,
293*4882a593Smuzhiyun .tseg1_min = 1,
294*4882a593Smuzhiyun .tseg1_max = 512,
295*4882a593Smuzhiyun .tseg2_min = 1,
296*4882a593Smuzhiyun .tseg2_max = 32,
297*4882a593Smuzhiyun .sjw_max = 16,
298*4882a593Smuzhiyun .brp_min = 1,
299*4882a593Smuzhiyun .brp_max = 8192,
300*4882a593Smuzhiyun .brp_inc = 1,
301*4882a593Smuzhiyun };
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun struct kvaser_pciefd_cfg_param {
304*4882a593Smuzhiyun __le32 magic;
305*4882a593Smuzhiyun __le32 nr;
306*4882a593Smuzhiyun __le32 len;
307*4882a593Smuzhiyun u8 data[KVASER_PCIEFD_CFG_PARAM_MAX_SZ];
308*4882a593Smuzhiyun };
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun struct kvaser_pciefd_cfg_img {
311*4882a593Smuzhiyun __le32 version;
312*4882a593Smuzhiyun __le32 magic;
313*4882a593Smuzhiyun __le32 crc;
314*4882a593Smuzhiyun struct kvaser_pciefd_cfg_param params[KVASER_PCIEFD_CFG_MAX_PARAMS];
315*4882a593Smuzhiyun };
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun static struct pci_device_id kvaser_pciefd_id_table[] = {
318*4882a593Smuzhiyun { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_ID), },
319*4882a593Smuzhiyun { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_ID), },
320*4882a593Smuzhiyun { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_ID), },
321*4882a593Smuzhiyun { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_ID), },
322*4882a593Smuzhiyun { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_ID), },
323*4882a593Smuzhiyun { 0,},
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* Onboard flash memory functions */
kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd * pcie,int msk)328*4882a593Smuzhiyun static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun u32 res;
331*4882a593Smuzhiyun int ret;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
334*4882a593Smuzhiyun res, res & msk, 0, 10);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
kvaser_pciefd_spi_cmd(struct kvaser_pciefd * pcie,const u8 * tx,u32 tx_len,u8 * rx,u32 rx_len)339*4882a593Smuzhiyun static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
340*4882a593Smuzhiyun u32 tx_len, u8 *rx, u32 rx_len)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun int c;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun iowrite32(BIT(0), pcie->reg_base + KVASER_PCIEFD_SPI_SSEL_REG);
345*4882a593Smuzhiyun iowrite32(BIT(10), pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
346*4882a593Smuzhiyun ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun c = tx_len;
349*4882a593Smuzhiyun while (c--) {
350*4882a593Smuzhiyun if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
351*4882a593Smuzhiyun return -EIO;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun iowrite32(*tx++, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
356*4882a593Smuzhiyun return -EIO;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun c = rx_len;
362*4882a593Smuzhiyun while (c-- > 0) {
363*4882a593Smuzhiyun if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TRDY))
364*4882a593Smuzhiyun return -EIO;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_TX_REG);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_RRDY))
369*4882a593Smuzhiyun return -EIO;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun *rx++ = ioread32(pcie->reg_base + KVASER_PCIEFD_SPI_RX_REG);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (kvaser_pciefd_spi_wait_loop(pcie, KVASER_PCIEFD_SPI_TMT))
375*4882a593Smuzhiyun return -EIO;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SPI_CTRL_REG);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun if (c != -1) {
380*4882a593Smuzhiyun dev_err(&pcie->pci->dev, "Flash SPI transfer failed\n");
381*4882a593Smuzhiyun return -EIO;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd * pcie,struct kvaser_pciefd_cfg_img * img)387*4882a593Smuzhiyun static int kvaser_pciefd_cfg_read_and_verify(struct kvaser_pciefd *pcie,
388*4882a593Smuzhiyun struct kvaser_pciefd_cfg_img *img)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun int offset = KVASER_PCIEFD_CFG_IMG_OFFSET;
391*4882a593Smuzhiyun int res, crc;
392*4882a593Smuzhiyun u8 *crc_buff;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun u8 cmd[] = {
395*4882a593Smuzhiyun KVASER_PCIEFD_FLASH_READ_CMD,
396*4882a593Smuzhiyun (u8)((offset >> 16) & 0xff),
397*4882a593Smuzhiyun (u8)((offset >> 8) & 0xff),
398*4882a593Smuzhiyun (u8)(offset & 0xff)
399*4882a593Smuzhiyun };
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), (u8 *)img,
402*4882a593Smuzhiyun KVASER_PCIEFD_CFG_IMG_SZ);
403*4882a593Smuzhiyun if (res)
404*4882a593Smuzhiyun return res;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun crc_buff = (u8 *)img->params;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun if (le32_to_cpu(img->version) != KVASER_PCIEFD_CFG_SYS_VER) {
409*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
410*4882a593Smuzhiyun "Config flash corrupted, version number is wrong\n");
411*4882a593Smuzhiyun return -ENODEV;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (le32_to_cpu(img->magic) != KVASER_PCIEFD_CFG_MAGIC) {
415*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
416*4882a593Smuzhiyun "Config flash corrupted, magic number is wrong\n");
417*4882a593Smuzhiyun return -ENODEV;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun crc = ~crc32_be(0xffffffff, crc_buff, sizeof(img->params));
421*4882a593Smuzhiyun if (le32_to_cpu(img->crc) != crc) {
422*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
423*4882a593Smuzhiyun "Stored CRC does not match flash image contents\n");
424*4882a593Smuzhiyun return -EIO;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return 0;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
kvaser_pciefd_cfg_read_params(struct kvaser_pciefd * pcie,struct kvaser_pciefd_cfg_img * img)430*4882a593Smuzhiyun static void kvaser_pciefd_cfg_read_params(struct kvaser_pciefd *pcie,
431*4882a593Smuzhiyun struct kvaser_pciefd_cfg_img *img)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct kvaser_pciefd_cfg_param *param;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun param = &img->params[KVASER_PCIEFD_CFG_PARAM_NR_CHAN];
436*4882a593Smuzhiyun memcpy(&pcie->nr_channels, param->data, le32_to_cpu(param->len));
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
kvaser_pciefd_read_cfg(struct kvaser_pciefd * pcie)439*4882a593Smuzhiyun static int kvaser_pciefd_read_cfg(struct kvaser_pciefd *pcie)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun int res;
442*4882a593Smuzhiyun struct kvaser_pciefd_cfg_img *img;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /* Read electronic signature */
445*4882a593Smuzhiyun u8 cmd[] = {KVASER_PCIEFD_FLASH_RES_CMD, 0, 0, 0};
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun res = kvaser_pciefd_spi_cmd(pcie, cmd, ARRAY_SIZE(cmd), cmd, 1);
448*4882a593Smuzhiyun if (res)
449*4882a593Smuzhiyun return -EIO;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun img = kmalloc(KVASER_PCIEFD_CFG_IMG_SZ, GFP_KERNEL);
452*4882a593Smuzhiyun if (!img)
453*4882a593Smuzhiyun return -ENOMEM;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (cmd[0] != KVASER_PCIEFD_FLASH_ID_EPCS16) {
456*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
457*4882a593Smuzhiyun "Flash id is 0x%x instead of expected EPCS16 (0x%x)\n",
458*4882a593Smuzhiyun cmd[0], KVASER_PCIEFD_FLASH_ID_EPCS16);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun res = -ENODEV;
461*4882a593Smuzhiyun goto image_free;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun cmd[0] = KVASER_PCIEFD_FLASH_STATUS_CMD;
465*4882a593Smuzhiyun res = kvaser_pciefd_spi_cmd(pcie, cmd, 1, cmd, 1);
466*4882a593Smuzhiyun if (res) {
467*4882a593Smuzhiyun goto image_free;
468*4882a593Smuzhiyun } else if (cmd[0] & 1) {
469*4882a593Smuzhiyun res = -EIO;
470*4882a593Smuzhiyun /* No write is ever done, the WIP should never be set */
471*4882a593Smuzhiyun dev_err(&pcie->pci->dev, "Unexpected WIP bit set in flash\n");
472*4882a593Smuzhiyun goto image_free;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun res = kvaser_pciefd_cfg_read_and_verify(pcie, img);
476*4882a593Smuzhiyun if (res) {
477*4882a593Smuzhiyun res = -EIO;
478*4882a593Smuzhiyun goto image_free;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun kvaser_pciefd_cfg_read_params(pcie, img);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun image_free:
484*4882a593Smuzhiyun kfree(img);
485*4882a593Smuzhiyun return res;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
kvaser_pciefd_request_status(struct kvaser_pciefd_can * can)488*4882a593Smuzhiyun static void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun u32 cmd;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun cmd = KVASER_PCIEFD_KCAN_CMD_SRQ;
493*4882a593Smuzhiyun cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
494*4882a593Smuzhiyun iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can * can)497*4882a593Smuzhiyun static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun u32 mode;
500*4882a593Smuzhiyun unsigned long irq;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
503*4882a593Smuzhiyun mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
504*4882a593Smuzhiyun if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) {
505*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
506*4882a593Smuzhiyun iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can * can)511*4882a593Smuzhiyun static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun u32 mode;
514*4882a593Smuzhiyun unsigned long irq;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
517*4882a593Smuzhiyun mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
518*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN;
519*4882a593Smuzhiyun iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
520*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can * can)523*4882a593Smuzhiyun static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun u32 msk;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF |
528*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
529*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
530*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
531*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun return 0;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
kvaser_pciefd_setup_controller(struct kvaser_pciefd_can * can)538*4882a593Smuzhiyun static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun u32 mode;
541*4882a593Smuzhiyun unsigned long irq;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
546*4882a593Smuzhiyun if (can->can.ctrlmode & CAN_CTRLMODE_FD) {
547*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM;
548*4882a593Smuzhiyun if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
549*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN;
550*4882a593Smuzhiyun else
551*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
552*4882a593Smuzhiyun } else {
553*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_CCM;
554*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
558*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
561*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
562*4882a593Smuzhiyun /* Use ACK packet type */
563*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_APT;
564*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
565*4882a593Smuzhiyun iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can * can)570*4882a593Smuzhiyun static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun u32 status;
573*4882a593Smuzhiyun unsigned long irq;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
576*4882a593Smuzhiyun iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
577*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
578*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
581*4882a593Smuzhiyun if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
582*4882a593Smuzhiyun u32 cmd;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* If controller is already idle, run abort, flush and reset */
585*4882a593Smuzhiyun cmd = KVASER_PCIEFD_KCAN_CMD_AT;
586*4882a593Smuzhiyun cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
587*4882a593Smuzhiyun iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
588*4882a593Smuzhiyun } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) {
589*4882a593Smuzhiyun u32 mode;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* Put controller in reset mode */
592*4882a593Smuzhiyun mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
593*4882a593Smuzhiyun mode |= KVASER_PCIEFD_KCAN_MODE_RM;
594*4882a593Smuzhiyun iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun
kvaser_pciefd_bus_on(struct kvaser_pciefd_can * can)600*4882a593Smuzhiyun static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun u32 mode;
603*4882a593Smuzhiyun unsigned long irq;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun del_timer(&can->bec_poll_timer);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (!completion_done(&can->flush_comp))
608*4882a593Smuzhiyun kvaser_pciefd_start_controller_flush(can);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (!wait_for_completion_timeout(&can->flush_comp,
611*4882a593Smuzhiyun KVASER_PCIEFD_WAIT_TIMEOUT)) {
612*4882a593Smuzhiyun netdev_err(can->can.dev, "Timeout during bus on flush\n");
613*4882a593Smuzhiyun return -ETIMEDOUT;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
617*4882a593Smuzhiyun iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
618*4882a593Smuzhiyun iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
621*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
624*4882a593Smuzhiyun mode &= ~KVASER_PCIEFD_KCAN_MODE_RM;
625*4882a593Smuzhiyun iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
626*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!wait_for_completion_timeout(&can->start_comp,
629*4882a593Smuzhiyun KVASER_PCIEFD_WAIT_TIMEOUT)) {
630*4882a593Smuzhiyun netdev_err(can->can.dev, "Timeout during bus on reset\n");
631*4882a593Smuzhiyun return -ETIMEDOUT;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun /* Reset interrupt handling */
634*4882a593Smuzhiyun iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
635*4882a593Smuzhiyun iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun kvaser_pciefd_set_tx_irq(can);
638*4882a593Smuzhiyun kvaser_pciefd_setup_controller(can);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun can->can.state = CAN_STATE_ERROR_ACTIVE;
641*4882a593Smuzhiyun netif_wake_queue(can->can.dev);
642*4882a593Smuzhiyun can->bec.txerr = 0;
643*4882a593Smuzhiyun can->bec.rxerr = 0;
644*4882a593Smuzhiyun can->err_rep_cnt = 0;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun return 0;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can * can)649*4882a593Smuzhiyun static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun u8 top;
652*4882a593Smuzhiyun u32 pwm_ctrl;
653*4882a593Smuzhiyun unsigned long irq;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
656*4882a593Smuzhiyun pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
657*4882a593Smuzhiyun top = (pwm_ctrl >> KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT) & 0xff;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /* Set duty cycle to zero */
660*4882a593Smuzhiyun pwm_ctrl |= top;
661*4882a593Smuzhiyun iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
662*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
kvaser_pciefd_pwm_start(struct kvaser_pciefd_can * can)665*4882a593Smuzhiyun static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun int top, trigger;
668*4882a593Smuzhiyun u32 pwm_ctrl;
669*4882a593Smuzhiyun unsigned long irq;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun kvaser_pciefd_pwm_stop(can);
672*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /* Set frequency to 500 KHz*/
675*4882a593Smuzhiyun top = can->kv_pcie->bus_freq / (2 * 500000) - 1;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun pwm_ctrl = top & 0xff;
678*4882a593Smuzhiyun pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
679*4882a593Smuzhiyun iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Set duty cycle to 95 */
682*4882a593Smuzhiyun trigger = (100 * top - 95 * (top + 1) + 50) / 100;
683*4882a593Smuzhiyun pwm_ctrl = trigger & 0xff;
684*4882a593Smuzhiyun pwm_ctrl |= (top & 0xff) << KVASER_PCIEFD_KCAN_PWM_TOP_SHIFT;
685*4882a593Smuzhiyun iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG);
686*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
kvaser_pciefd_open(struct net_device * netdev)689*4882a593Smuzhiyun static int kvaser_pciefd_open(struct net_device *netdev)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun int err;
692*4882a593Smuzhiyun struct kvaser_pciefd_can *can = netdev_priv(netdev);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun err = open_candev(netdev);
695*4882a593Smuzhiyun if (err)
696*4882a593Smuzhiyun return err;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun err = kvaser_pciefd_bus_on(can);
699*4882a593Smuzhiyun if (err) {
700*4882a593Smuzhiyun close_candev(netdev);
701*4882a593Smuzhiyun return err;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun return 0;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
kvaser_pciefd_stop(struct net_device * netdev)707*4882a593Smuzhiyun static int kvaser_pciefd_stop(struct net_device *netdev)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun struct kvaser_pciefd_can *can = netdev_priv(netdev);
710*4882a593Smuzhiyun int ret = 0;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* Don't interrupt ongoing flush */
713*4882a593Smuzhiyun if (!completion_done(&can->flush_comp))
714*4882a593Smuzhiyun kvaser_pciefd_start_controller_flush(can);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (!wait_for_completion_timeout(&can->flush_comp,
717*4882a593Smuzhiyun KVASER_PCIEFD_WAIT_TIMEOUT)) {
718*4882a593Smuzhiyun netdev_err(can->can.dev, "Timeout during stop\n");
719*4882a593Smuzhiyun ret = -ETIMEDOUT;
720*4882a593Smuzhiyun } else {
721*4882a593Smuzhiyun iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
722*4882a593Smuzhiyun del_timer(&can->bec_poll_timer);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun close_candev(netdev);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return ret;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet * p,struct kvaser_pciefd_can * can,struct sk_buff * skb)729*4882a593Smuzhiyun static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p,
730*4882a593Smuzhiyun struct kvaser_pciefd_can *can,
731*4882a593Smuzhiyun struct sk_buff *skb)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun struct canfd_frame *cf = (struct canfd_frame *)skb->data;
734*4882a593Smuzhiyun int packet_size;
735*4882a593Smuzhiyun int seq = can->echo_idx;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun memset(p, 0, sizeof(*p));
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
740*4882a593Smuzhiyun p->header[1] |= KVASER_PCIEFD_TPACKET_SMS;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun if (cf->can_id & CAN_RTR_FLAG)
743*4882a593Smuzhiyun p->header[0] |= KVASER_PCIEFD_RPACKET_RTR;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun if (cf->can_id & CAN_EFF_FLAG)
746*4882a593Smuzhiyun p->header[0] |= KVASER_PCIEFD_RPACKET_IDE;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun p->header[0] |= cf->can_id & CAN_EFF_MASK;
749*4882a593Smuzhiyun p->header[1] |= can_len2dlc(cf->len) << KVASER_PCIEFD_RPACKET_DLC_SHIFT;
750*4882a593Smuzhiyun p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun if (can_is_canfd_skb(skb)) {
753*4882a593Smuzhiyun p->header[1] |= KVASER_PCIEFD_RPACKET_FDF;
754*4882a593Smuzhiyun if (cf->flags & CANFD_BRS)
755*4882a593Smuzhiyun p->header[1] |= KVASER_PCIEFD_RPACKET_BRS;
756*4882a593Smuzhiyun if (cf->flags & CANFD_ESI)
757*4882a593Smuzhiyun p->header[1] |= KVASER_PCIEFD_RPACKET_ESI;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun p->header[1] |= seq & KVASER_PCIEFD_PACKET_SEQ_MSK;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun packet_size = cf->len;
763*4882a593Smuzhiyun memcpy(p->data, cf->data, packet_size);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun return DIV_ROUND_UP(packet_size, 4);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
kvaser_pciefd_start_xmit(struct sk_buff * skb,struct net_device * netdev)768*4882a593Smuzhiyun static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb,
769*4882a593Smuzhiyun struct net_device *netdev)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun struct kvaser_pciefd_can *can = netdev_priv(netdev);
772*4882a593Smuzhiyun unsigned long irq_flags;
773*4882a593Smuzhiyun struct kvaser_pciefd_tx_packet packet;
774*4882a593Smuzhiyun int nwords;
775*4882a593Smuzhiyun u8 count;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (can_dropped_invalid_skb(netdev, skb))
778*4882a593Smuzhiyun return NETDEV_TX_OK;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun spin_lock_irqsave(&can->echo_lock, irq_flags);
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /* Prepare and save echo skb in internal slot */
785*4882a593Smuzhiyun can_put_echo_skb(skb, netdev, can->echo_idx);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /* Move echo index to the next slot */
788*4882a593Smuzhiyun can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* Write header to fifo */
791*4882a593Smuzhiyun iowrite32(packet.header[0],
792*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
793*4882a593Smuzhiyun iowrite32(packet.header[1],
794*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG);
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun if (nwords) {
797*4882a593Smuzhiyun u32 data_last = ((u32 *)packet.data)[nwords - 1];
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* Write data to fifo, except last word */
800*4882a593Smuzhiyun iowrite32_rep(can->reg_base +
801*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_FIFO_REG, packet.data,
802*4882a593Smuzhiyun nwords - 1);
803*4882a593Smuzhiyun /* Write last word to end of fifo */
804*4882a593Smuzhiyun __raw_writel(data_last, can->reg_base +
805*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
806*4882a593Smuzhiyun } else {
807*4882a593Smuzhiyun /* Complete write to fifo */
808*4882a593Smuzhiyun __raw_writel(0, can->reg_base +
809*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_FIFO_LAST_REG);
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun count = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
813*4882a593Smuzhiyun /* No room for a new message, stop the queue until at least one
814*4882a593Smuzhiyun * successful transmit
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun if (count >= KVASER_PCIEFD_CAN_TX_MAX_COUNT ||
817*4882a593Smuzhiyun can->can.echo_skb[can->echo_idx])
818*4882a593Smuzhiyun netif_stop_queue(netdev);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun spin_unlock_irqrestore(&can->echo_lock, irq_flags);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun return NETDEV_TX_OK;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can * can,bool data)825*4882a593Smuzhiyun static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun u32 mode, test, btrn;
828*4882a593Smuzhiyun unsigned long irq_flags;
829*4882a593Smuzhiyun int ret;
830*4882a593Smuzhiyun struct can_bittiming *bt;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (data)
833*4882a593Smuzhiyun bt = &can->can.data_bittiming;
834*4882a593Smuzhiyun else
835*4882a593Smuzhiyun bt = &can->can.bittiming;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun btrn = ((bt->phase_seg2 - 1) & 0x1f) <<
838*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_BTRN_TSEG2_SHIFT |
839*4882a593Smuzhiyun (((bt->prop_seg + bt->phase_seg1) - 1) & 0x1ff) <<
840*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_BTRN_TSEG1_SHIFT |
841*4882a593Smuzhiyun ((bt->sjw - 1) & 0xf) << KVASER_PCIEFD_KCAN_BTRN_SJW_SHIFT |
842*4882a593Smuzhiyun ((bt->brp - 1) & 0x1fff);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq_flags);
845*4882a593Smuzhiyun mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* Put the circuit in reset mode */
848*4882a593Smuzhiyun iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM,
849*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* Can only set bittiming if in reset mode */
852*4882a593Smuzhiyun ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG,
853*4882a593Smuzhiyun test, test & KVASER_PCIEFD_KCAN_MODE_RM,
854*4882a593Smuzhiyun 0, 10);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if (ret) {
857*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq_flags);
858*4882a593Smuzhiyun return -EBUSY;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun if (data)
862*4882a593Smuzhiyun iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG);
863*4882a593Smuzhiyun else
864*4882a593Smuzhiyun iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Restore previous reset mode status */
867*4882a593Smuzhiyun iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq_flags);
870*4882a593Smuzhiyun return 0;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
kvaser_pciefd_set_nominal_bittiming(struct net_device * ndev)873*4882a593Smuzhiyun static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
kvaser_pciefd_set_data_bittiming(struct net_device * ndev)878*4882a593Smuzhiyun static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true);
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
kvaser_pciefd_set_mode(struct net_device * ndev,enum can_mode mode)883*4882a593Smuzhiyun static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun struct kvaser_pciefd_can *can = netdev_priv(ndev);
886*4882a593Smuzhiyun int ret = 0;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun switch (mode) {
889*4882a593Smuzhiyun case CAN_MODE_START:
890*4882a593Smuzhiyun if (!can->can.restart_ms)
891*4882a593Smuzhiyun ret = kvaser_pciefd_bus_on(can);
892*4882a593Smuzhiyun break;
893*4882a593Smuzhiyun default:
894*4882a593Smuzhiyun return -EOPNOTSUPP;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun return ret;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
kvaser_pciefd_get_berr_counter(const struct net_device * ndev,struct can_berr_counter * bec)900*4882a593Smuzhiyun static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev,
901*4882a593Smuzhiyun struct can_berr_counter *bec)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun struct kvaser_pciefd_can *can = netdev_priv(ndev);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun bec->rxerr = can->bec.rxerr;
906*4882a593Smuzhiyun bec->txerr = can->bec.txerr;
907*4882a593Smuzhiyun return 0;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
kvaser_pciefd_bec_poll_timer(struct timer_list * data)910*4882a593Smuzhiyun static void kvaser_pciefd_bec_poll_timer(struct timer_list *data)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun kvaser_pciefd_enable_err_gen(can);
915*4882a593Smuzhiyun kvaser_pciefd_request_status(can);
916*4882a593Smuzhiyun can->err_rep_cnt = 0;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun static const struct net_device_ops kvaser_pciefd_netdev_ops = {
920*4882a593Smuzhiyun .ndo_open = kvaser_pciefd_open,
921*4882a593Smuzhiyun .ndo_stop = kvaser_pciefd_stop,
922*4882a593Smuzhiyun .ndo_start_xmit = kvaser_pciefd_start_xmit,
923*4882a593Smuzhiyun .ndo_change_mtu = can_change_mtu,
924*4882a593Smuzhiyun };
925*4882a593Smuzhiyun
kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd * pcie)926*4882a593Smuzhiyun static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun int i;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun for (i = 0; i < pcie->nr_channels; i++) {
931*4882a593Smuzhiyun struct net_device *netdev;
932*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
933*4882a593Smuzhiyun u32 status, tx_npackets;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun netdev = alloc_candev(sizeof(struct kvaser_pciefd_can),
936*4882a593Smuzhiyun KVASER_PCIEFD_CAN_TX_MAX_COUNT);
937*4882a593Smuzhiyun if (!netdev)
938*4882a593Smuzhiyun return -ENOMEM;
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun can = netdev_priv(netdev);
941*4882a593Smuzhiyun netdev->netdev_ops = &kvaser_pciefd_netdev_ops;
942*4882a593Smuzhiyun can->reg_base = pcie->reg_base + KVASER_PCIEFD_KCAN0_BASE +
943*4882a593Smuzhiyun i * KVASER_PCIEFD_KCAN_BASE_OFFSET;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun can->kv_pcie = pcie;
946*4882a593Smuzhiyun can->cmd_seq = 0;
947*4882a593Smuzhiyun can->err_rep_cnt = 0;
948*4882a593Smuzhiyun can->bec.txerr = 0;
949*4882a593Smuzhiyun can->bec.rxerr = 0;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun init_completion(&can->start_comp);
952*4882a593Smuzhiyun init_completion(&can->flush_comp);
953*4882a593Smuzhiyun timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
954*4882a593Smuzhiyun 0);
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun /* Disable Bus load reporting */
957*4882a593Smuzhiyun iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun tx_npackets = ioread32(can->reg_base +
960*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
961*4882a593Smuzhiyun if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
962*4882a593Smuzhiyun 0xff) < KVASER_PCIEFD_CAN_TX_MAX_COUNT) {
963*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
964*4882a593Smuzhiyun "Max Tx count is smaller than expected\n");
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun free_candev(netdev);
967*4882a593Smuzhiyun return -ENODEV;
968*4882a593Smuzhiyun }
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun can->can.clock.freq = pcie->freq;
971*4882a593Smuzhiyun can->can.echo_skb_max = KVASER_PCIEFD_CAN_TX_MAX_COUNT;
972*4882a593Smuzhiyun can->echo_idx = 0;
973*4882a593Smuzhiyun spin_lock_init(&can->echo_lock);
974*4882a593Smuzhiyun spin_lock_init(&can->lock);
975*4882a593Smuzhiyun can->can.bittiming_const = &kvaser_pciefd_bittiming_const;
976*4882a593Smuzhiyun can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming;
979*4882a593Smuzhiyun can->can.do_set_data_bittiming =
980*4882a593Smuzhiyun kvaser_pciefd_set_data_bittiming;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun can->can.do_set_mode = kvaser_pciefd_set_mode;
983*4882a593Smuzhiyun can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
986*4882a593Smuzhiyun CAN_CTRLMODE_FD |
987*4882a593Smuzhiyun CAN_CTRLMODE_FD_NON_ISO;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
990*4882a593Smuzhiyun if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) {
991*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
992*4882a593Smuzhiyun "CAN FD not supported as expected %d\n", i);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun free_candev(netdev);
995*4882a593Smuzhiyun return -ENODEV;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (status & KVASER_PCIEFD_KCAN_STAT_CAP)
999*4882a593Smuzhiyun can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun netdev->flags |= IFF_ECHO;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, &pcie->pci->dev);
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1006*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
1007*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_IRQ_TFD,
1008*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun pcie->can[i] = can;
1011*4882a593Smuzhiyun kvaser_pciefd_pwm_start(can);
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun return 0;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
kvaser_pciefd_reg_candev(struct kvaser_pciefd * pcie)1017*4882a593Smuzhiyun static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun int i;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun for (i = 0; i < pcie->nr_channels; i++) {
1022*4882a593Smuzhiyun int err = register_candev(pcie->can[i]->can.dev);
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun if (err) {
1025*4882a593Smuzhiyun int j;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /* Unregister all successfully registered devices. */
1028*4882a593Smuzhiyun for (j = 0; j < i; j++)
1029*4882a593Smuzhiyun unregister_candev(pcie->can[j]->can.dev);
1030*4882a593Smuzhiyun return err;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return 0;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
kvaser_pciefd_write_dma_map(struct kvaser_pciefd * pcie,dma_addr_t addr,int offset)1037*4882a593Smuzhiyun static void kvaser_pciefd_write_dma_map(struct kvaser_pciefd *pcie,
1038*4882a593Smuzhiyun dma_addr_t addr, int offset)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun u32 word1, word2;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
1043*4882a593Smuzhiyun word1 = addr | KVASER_PCIEFD_64BIT_DMA_BIT;
1044*4882a593Smuzhiyun word2 = addr >> 32;
1045*4882a593Smuzhiyun #else
1046*4882a593Smuzhiyun word1 = addr;
1047*4882a593Smuzhiyun word2 = 0;
1048*4882a593Smuzhiyun #endif
1049*4882a593Smuzhiyun iowrite32(word1, pcie->reg_base + offset);
1050*4882a593Smuzhiyun iowrite32(word2, pcie->reg_base + offset + 4);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
kvaser_pciefd_setup_dma(struct kvaser_pciefd * pcie)1053*4882a593Smuzhiyun static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun int i;
1056*4882a593Smuzhiyun u32 srb_status;
1057*4882a593Smuzhiyun dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /* Disable the DMA */
1060*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1061*4882a593Smuzhiyun for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) {
1062*4882a593Smuzhiyun unsigned int offset = KVASER_PCIEFD_DMA_MAP_BASE + 8 * i;
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun pcie->dma_data[i] =
1065*4882a593Smuzhiyun dmam_alloc_coherent(&pcie->pci->dev,
1066*4882a593Smuzhiyun KVASER_PCIEFD_DMA_SIZE,
1067*4882a593Smuzhiyun &dma_addr[i],
1068*4882a593Smuzhiyun GFP_KERNEL);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun if (!pcie->dma_data[i] || !dma_addr[i]) {
1071*4882a593Smuzhiyun dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n",
1072*4882a593Smuzhiyun KVASER_PCIEFD_DMA_SIZE);
1073*4882a593Smuzhiyun return -ENOMEM;
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun kvaser_pciefd_write_dma_map(pcie, dma_addr[i], offset);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* Reset Rx FIFO, and both DMA buffers */
1080*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 |
1081*4882a593Smuzhiyun KVASER_PCIEFD_SRB_CMD_RDB1,
1082*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1085*4882a593Smuzhiyun if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
1086*4882a593Smuzhiyun dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
1087*4882a593Smuzhiyun return -EIO;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun /* Enable the DMA */
1091*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE,
1092*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun return 0;
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun
kvaser_pciefd_setup_board(struct kvaser_pciefd * pcie)1097*4882a593Smuzhiyun static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun u32 sysid, srb_status, build;
1100*4882a593Smuzhiyun u8 sysid_nr_chan;
1101*4882a593Smuzhiyun int ret;
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun ret = kvaser_pciefd_read_cfg(pcie);
1104*4882a593Smuzhiyun if (ret)
1105*4882a593Smuzhiyun return ret;
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun sysid = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_VERSION_REG);
1108*4882a593Smuzhiyun sysid_nr_chan = (sysid >> KVASER_PCIEFD_SYSID_NRCHAN_SHIFT) & 0xff;
1109*4882a593Smuzhiyun if (pcie->nr_channels != sysid_nr_chan) {
1110*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
1111*4882a593Smuzhiyun "Number of channels does not match: %u vs %u\n",
1112*4882a593Smuzhiyun pcie->nr_channels,
1113*4882a593Smuzhiyun sysid_nr_chan);
1114*4882a593Smuzhiyun return -ENODEV;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun if (pcie->nr_channels > KVASER_PCIEFD_MAX_CAN_CHANNELS)
1118*4882a593Smuzhiyun pcie->nr_channels = KVASER_PCIEFD_MAX_CAN_CHANNELS;
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun build = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_BUILD_REG);
1121*4882a593Smuzhiyun dev_dbg(&pcie->pci->dev, "Version %u.%u.%u\n",
1122*4882a593Smuzhiyun (sysid >> KVASER_PCIEFD_SYSID_MAJOR_VER_SHIFT) & 0xff,
1123*4882a593Smuzhiyun sysid & 0xff,
1124*4882a593Smuzhiyun (build >> KVASER_PCIEFD_SYSID_BUILD_VER_SHIFT) & 0x7fff);
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
1127*4882a593Smuzhiyun if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) {
1128*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
1129*4882a593Smuzhiyun "Hardware without DMA is not supported\n");
1130*4882a593Smuzhiyun return -ENODEV;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun pcie->bus_freq = ioread32(pcie->reg_base +
1134*4882a593Smuzhiyun KVASER_PCIEFD_SYSID_BUSFREQ_REG);
1135*4882a593Smuzhiyun pcie->freq = ioread32(pcie->reg_base + KVASER_PCIEFD_SYSID_CANFREQ_REG);
1136*4882a593Smuzhiyun pcie->freq_to_ticks_div = pcie->freq / 1000000;
1137*4882a593Smuzhiyun if (pcie->freq_to_ticks_div == 0)
1138*4882a593Smuzhiyun pcie->freq_to_ticks_div = 1;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun /* Turn off all loopback functionality */
1141*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_LOOP_REG);
1142*4882a593Smuzhiyun return ret;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
kvaser_pciefd_handle_data_packet(struct kvaser_pciefd * pcie,struct kvaser_pciefd_rx_packet * p,__le32 * data)1145*4882a593Smuzhiyun static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie,
1146*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p,
1147*4882a593Smuzhiyun __le32 *data)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun struct sk_buff *skb;
1150*4882a593Smuzhiyun struct canfd_frame *cf;
1151*4882a593Smuzhiyun struct can_priv *priv;
1152*4882a593Smuzhiyun struct net_device_stats *stats;
1153*4882a593Smuzhiyun struct skb_shared_hwtstamps *shhwtstamps;
1154*4882a593Smuzhiyun u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (ch_id >= pcie->nr_channels)
1157*4882a593Smuzhiyun return -EIO;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun priv = &pcie->can[ch_id]->can;
1160*4882a593Smuzhiyun stats = &priv->dev->stats;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) {
1163*4882a593Smuzhiyun skb = alloc_canfd_skb(priv->dev, &cf);
1164*4882a593Smuzhiyun if (!skb) {
1165*4882a593Smuzhiyun stats->rx_dropped++;
1166*4882a593Smuzhiyun return -ENOMEM;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS)
1170*4882a593Smuzhiyun cf->flags |= CANFD_BRS;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI)
1173*4882a593Smuzhiyun cf->flags |= CANFD_ESI;
1174*4882a593Smuzhiyun } else {
1175*4882a593Smuzhiyun skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf);
1176*4882a593Smuzhiyun if (!skb) {
1177*4882a593Smuzhiyun stats->rx_dropped++;
1178*4882a593Smuzhiyun return -ENOMEM;
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun cf->can_id = p->header[0] & CAN_EFF_MASK;
1183*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE)
1184*4882a593Smuzhiyun cf->can_id |= CAN_EFF_FLAG;
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun cf->len = can_dlc2len(p->header[1] >> KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR)
1189*4882a593Smuzhiyun cf->can_id |= CAN_RTR_FLAG;
1190*4882a593Smuzhiyun else
1191*4882a593Smuzhiyun memcpy(cf->data, data, cf->len);
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun shhwtstamps = skb_hwtstamps(skb);
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun shhwtstamps->hwtstamp =
1196*4882a593Smuzhiyun ns_to_ktime(div_u64(p->timestamp * 1000,
1197*4882a593Smuzhiyun pcie->freq_to_ticks_div));
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun stats->rx_bytes += cf->len;
1200*4882a593Smuzhiyun stats->rx_packets++;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun return netif_rx(skb);
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
kvaser_pciefd_change_state(struct kvaser_pciefd_can * can,struct can_frame * cf,enum can_state new_state,enum can_state tx_state,enum can_state rx_state)1205*4882a593Smuzhiyun static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can,
1206*4882a593Smuzhiyun struct can_frame *cf,
1207*4882a593Smuzhiyun enum can_state new_state,
1208*4882a593Smuzhiyun enum can_state tx_state,
1209*4882a593Smuzhiyun enum can_state rx_state)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun can_change_state(can->can.dev, cf, tx_state, rx_state);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (new_state == CAN_STATE_BUS_OFF) {
1214*4882a593Smuzhiyun struct net_device *ndev = can->can.dev;
1215*4882a593Smuzhiyun unsigned long irq_flags;
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun spin_lock_irqsave(&can->lock, irq_flags);
1218*4882a593Smuzhiyun netif_stop_queue(can->can.dev);
1219*4882a593Smuzhiyun spin_unlock_irqrestore(&can->lock, irq_flags);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun /* Prevent CAN controller from auto recover from bus off */
1222*4882a593Smuzhiyun if (!can->can.restart_ms) {
1223*4882a593Smuzhiyun kvaser_pciefd_start_controller_flush(can);
1224*4882a593Smuzhiyun can_bus_off(ndev);
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet * p,struct can_berr_counter * bec,enum can_state * new_state,enum can_state * tx_state,enum can_state * rx_state)1229*4882a593Smuzhiyun static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p,
1230*4882a593Smuzhiyun struct can_berr_counter *bec,
1231*4882a593Smuzhiyun enum can_state *new_state,
1232*4882a593Smuzhiyun enum can_state *tx_state,
1233*4882a593Smuzhiyun enum can_state *rx_state)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF ||
1236*4882a593Smuzhiyun p->header[0] & KVASER_PCIEFD_SPACK_IRM)
1237*4882a593Smuzhiyun *new_state = CAN_STATE_BUS_OFF;
1238*4882a593Smuzhiyun else if (bec->txerr >= 255 || bec->rxerr >= 255)
1239*4882a593Smuzhiyun *new_state = CAN_STATE_BUS_OFF;
1240*4882a593Smuzhiyun else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR)
1241*4882a593Smuzhiyun *new_state = CAN_STATE_ERROR_PASSIVE;
1242*4882a593Smuzhiyun else if (bec->txerr >= 128 || bec->rxerr >= 128)
1243*4882a593Smuzhiyun *new_state = CAN_STATE_ERROR_PASSIVE;
1244*4882a593Smuzhiyun else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR)
1245*4882a593Smuzhiyun *new_state = CAN_STATE_ERROR_WARNING;
1246*4882a593Smuzhiyun else if (bec->txerr >= 96 || bec->rxerr >= 96)
1247*4882a593Smuzhiyun *new_state = CAN_STATE_ERROR_WARNING;
1248*4882a593Smuzhiyun else
1249*4882a593Smuzhiyun *new_state = CAN_STATE_ERROR_ACTIVE;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0;
1252*4882a593Smuzhiyun *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0;
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can * can,struct kvaser_pciefd_rx_packet * p)1255*4882a593Smuzhiyun static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can,
1256*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun struct can_berr_counter bec;
1259*4882a593Smuzhiyun enum can_state old_state, new_state, tx_state, rx_state;
1260*4882a593Smuzhiyun struct net_device *ndev = can->can.dev;
1261*4882a593Smuzhiyun struct sk_buff *skb;
1262*4882a593Smuzhiyun struct can_frame *cf = NULL;
1263*4882a593Smuzhiyun struct skb_shared_hwtstamps *shhwtstamps;
1264*4882a593Smuzhiyun struct net_device_stats *stats = &ndev->stats;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun old_state = can->can.state;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun bec.txerr = p->header[0] & 0xff;
1269*4882a593Smuzhiyun bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1272*4882a593Smuzhiyun &rx_state);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun skb = alloc_can_err_skb(ndev, &cf);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun if (new_state != old_state) {
1277*4882a593Smuzhiyun kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1278*4882a593Smuzhiyun rx_state);
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun if (old_state == CAN_STATE_BUS_OFF &&
1281*4882a593Smuzhiyun new_state == CAN_STATE_ERROR_ACTIVE &&
1282*4882a593Smuzhiyun can->can.restart_ms) {
1283*4882a593Smuzhiyun can->can.can_stats.restarts++;
1284*4882a593Smuzhiyun if (skb)
1285*4882a593Smuzhiyun cf->can_id |= CAN_ERR_RESTARTED;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun can->err_rep_cnt++;
1290*4882a593Smuzhiyun can->can.can_stats.bus_error++;
1291*4882a593Smuzhiyun if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX)
1292*4882a593Smuzhiyun stats->tx_errors++;
1293*4882a593Smuzhiyun else
1294*4882a593Smuzhiyun stats->rx_errors++;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun can->bec.txerr = bec.txerr;
1297*4882a593Smuzhiyun can->bec.rxerr = bec.rxerr;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun if (!skb) {
1300*4882a593Smuzhiyun stats->rx_dropped++;
1301*4882a593Smuzhiyun return -ENOMEM;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun shhwtstamps = skb_hwtstamps(skb);
1305*4882a593Smuzhiyun shhwtstamps->hwtstamp =
1306*4882a593Smuzhiyun ns_to_ktime(div_u64(p->timestamp * 1000,
1307*4882a593Smuzhiyun can->kv_pcie->freq_to_ticks_div));
1308*4882a593Smuzhiyun cf->can_id |= CAN_ERR_BUSERROR;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun cf->data[6] = bec.txerr;
1311*4882a593Smuzhiyun cf->data[7] = bec.rxerr;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun stats->rx_packets++;
1314*4882a593Smuzhiyun stats->rx_bytes += cf->can_dlc;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun netif_rx(skb);
1317*4882a593Smuzhiyun return 0;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
kvaser_pciefd_handle_error_packet(struct kvaser_pciefd * pcie,struct kvaser_pciefd_rx_packet * p)1320*4882a593Smuzhiyun static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie,
1321*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1324*4882a593Smuzhiyun u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun if (ch_id >= pcie->nr_channels)
1327*4882a593Smuzhiyun return -EIO;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun can = pcie->can[ch_id];
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun kvaser_pciefd_rx_error_frame(can, p);
1332*4882a593Smuzhiyun if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP)
1333*4882a593Smuzhiyun /* Do not report more errors, until bec_poll_timer expires */
1334*4882a593Smuzhiyun kvaser_pciefd_disable_err_gen(can);
1335*4882a593Smuzhiyun /* Start polling the error counters */
1336*4882a593Smuzhiyun mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1337*4882a593Smuzhiyun return 0;
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can * can,struct kvaser_pciefd_rx_packet * p)1340*4882a593Smuzhiyun static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can,
1341*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun struct can_berr_counter bec;
1344*4882a593Smuzhiyun enum can_state old_state, new_state, tx_state, rx_state;
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun old_state = can->can.state;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun bec.txerr = p->header[0] & 0xff;
1349*4882a593Smuzhiyun bec.rxerr = (p->header[0] >> KVASER_PCIEFD_SPACK_RXERR_SHIFT) & 0xff;
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state,
1352*4882a593Smuzhiyun &rx_state);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun if (new_state != old_state) {
1355*4882a593Smuzhiyun struct net_device *ndev = can->can.dev;
1356*4882a593Smuzhiyun struct sk_buff *skb;
1357*4882a593Smuzhiyun struct can_frame *cf;
1358*4882a593Smuzhiyun struct skb_shared_hwtstamps *shhwtstamps;
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun skb = alloc_can_err_skb(ndev, &cf);
1361*4882a593Smuzhiyun if (!skb) {
1362*4882a593Smuzhiyun struct net_device_stats *stats = &ndev->stats;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun stats->rx_dropped++;
1365*4882a593Smuzhiyun return -ENOMEM;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun kvaser_pciefd_change_state(can, cf, new_state, tx_state,
1369*4882a593Smuzhiyun rx_state);
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun if (old_state == CAN_STATE_BUS_OFF &&
1372*4882a593Smuzhiyun new_state == CAN_STATE_ERROR_ACTIVE &&
1373*4882a593Smuzhiyun can->can.restart_ms) {
1374*4882a593Smuzhiyun can->can.can_stats.restarts++;
1375*4882a593Smuzhiyun cf->can_id |= CAN_ERR_RESTARTED;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun shhwtstamps = skb_hwtstamps(skb);
1379*4882a593Smuzhiyun shhwtstamps->hwtstamp =
1380*4882a593Smuzhiyun ns_to_ktime(div_u64(p->timestamp * 1000,
1381*4882a593Smuzhiyun can->kv_pcie->freq_to_ticks_div));
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun cf->data[6] = bec.txerr;
1384*4882a593Smuzhiyun cf->data[7] = bec.rxerr;
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun netif_rx(skb);
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun can->bec.txerr = bec.txerr;
1389*4882a593Smuzhiyun can->bec.rxerr = bec.rxerr;
1390*4882a593Smuzhiyun /* Check if we need to poll the error counters */
1391*4882a593Smuzhiyun if (bec.txerr || bec.rxerr)
1392*4882a593Smuzhiyun mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ);
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun return 0;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
kvaser_pciefd_handle_status_packet(struct kvaser_pciefd * pcie,struct kvaser_pciefd_rx_packet * p)1397*4882a593Smuzhiyun static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
1398*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1401*4882a593Smuzhiyun u8 cmdseq;
1402*4882a593Smuzhiyun u32 status;
1403*4882a593Smuzhiyun u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun if (ch_id >= pcie->nr_channels)
1406*4882a593Smuzhiyun return -EIO;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun can = pcie->can[ch_id];
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
1411*4882a593Smuzhiyun cmdseq = (status >> KVASER_PCIEFD_KCAN_STAT_SEQNO_SHIFT) & 0xff;
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun /* Reset done, start abort and flush */
1414*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1415*4882a593Smuzhiyun p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1416*4882a593Smuzhiyun p->header[1] & KVASER_PCIEFD_SPACK_AUTO &&
1417*4882a593Smuzhiyun cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1418*4882a593Smuzhiyun status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1419*4882a593Smuzhiyun u32 cmd;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
1422*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1423*4882a593Smuzhiyun cmd = KVASER_PCIEFD_KCAN_CMD_AT;
1424*4882a593Smuzhiyun cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
1425*4882a593Smuzhiyun iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
1428*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1429*4882a593Smuzhiyun } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
1430*4882a593Smuzhiyun p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
1431*4882a593Smuzhiyun cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
1432*4882a593Smuzhiyun status & KVASER_PCIEFD_KCAN_STAT_IDLE) {
1433*4882a593Smuzhiyun /* Reset detected, send end of flush if no packet are in FIFO */
1434*4882a593Smuzhiyun u8 count = ioread32(can->reg_base +
1435*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun if (!count)
1438*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1439*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1440*4882a593Smuzhiyun } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) &&
1441*4882a593Smuzhiyun cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK)) {
1442*4882a593Smuzhiyun /* Response to status request received */
1443*4882a593Smuzhiyun kvaser_pciefd_handle_status_resp(can, p);
1444*4882a593Smuzhiyun if (can->can.state != CAN_STATE_BUS_OFF &&
1445*4882a593Smuzhiyun can->can.state != CAN_STATE_ERROR_ACTIVE) {
1446*4882a593Smuzhiyun mod_timer(&can->bec_poll_timer,
1447*4882a593Smuzhiyun KVASER_PCIEFD_BEC_POLL_FREQ);
1448*4882a593Smuzhiyun }
1449*4882a593Smuzhiyun } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD &&
1450*4882a593Smuzhiyun !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MSK)) {
1451*4882a593Smuzhiyun /* Reset to bus on detected */
1452*4882a593Smuzhiyun if (!completion_done(&can->start_comp))
1453*4882a593Smuzhiyun complete(&can->start_comp);
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun return 0;
1457*4882a593Smuzhiyun }
1458*4882a593Smuzhiyun
kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd * pcie,struct kvaser_pciefd_rx_packet * p)1459*4882a593Smuzhiyun static int kvaser_pciefd_handle_eack_packet(struct kvaser_pciefd *pcie,
1460*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1463*4882a593Smuzhiyun u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun if (ch_id >= pcie->nr_channels)
1466*4882a593Smuzhiyun return -EIO;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun can = pcie->can[ch_id];
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /* If this is the last flushed packet, send end of flush */
1471*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1472*4882a593Smuzhiyun u8 count = ioread32(can->reg_base +
1473*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun if (count == 0)
1476*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1477*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1478*4882a593Smuzhiyun } else {
1479*4882a593Smuzhiyun int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1480*4882a593Smuzhiyun int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1481*4882a593Smuzhiyun struct net_device_stats *stats = &can->can.dev->stats;
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun stats->tx_bytes += dlc;
1484*4882a593Smuzhiyun stats->tx_packets++;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun if (netif_queue_stopped(can->can.dev))
1487*4882a593Smuzhiyun netif_wake_queue(can->can.dev);
1488*4882a593Smuzhiyun }
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun return 0;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can * can,struct kvaser_pciefd_rx_packet * p)1493*4882a593Smuzhiyun static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can,
1494*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun struct sk_buff *skb;
1497*4882a593Smuzhiyun struct net_device_stats *stats = &can->can.dev->stats;
1498*4882a593Smuzhiyun struct can_frame *cf;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun skb = alloc_can_err_skb(can->can.dev, &cf);
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun stats->tx_errors++;
1503*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) {
1504*4882a593Smuzhiyun if (skb)
1505*4882a593Smuzhiyun cf->can_id |= CAN_ERR_LOSTARB;
1506*4882a593Smuzhiyun can->can.can_stats.arbitration_lost++;
1507*4882a593Smuzhiyun } else if (skb) {
1508*4882a593Smuzhiyun cf->can_id |= CAN_ERR_ACK;
1509*4882a593Smuzhiyun }
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun if (skb) {
1512*4882a593Smuzhiyun cf->can_id |= CAN_ERR_BUSERROR;
1513*4882a593Smuzhiyun stats->rx_bytes += cf->can_dlc;
1514*4882a593Smuzhiyun stats->rx_packets++;
1515*4882a593Smuzhiyun netif_rx(skb);
1516*4882a593Smuzhiyun } else {
1517*4882a593Smuzhiyun stats->rx_dropped++;
1518*4882a593Smuzhiyun netdev_warn(can->can.dev, "No memory left for err_skb\n");
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd * pcie,struct kvaser_pciefd_rx_packet * p)1522*4882a593Smuzhiyun static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie,
1523*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1526*4882a593Smuzhiyun bool one_shot_fail = false;
1527*4882a593Smuzhiyun u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1528*4882a593Smuzhiyun
1529*4882a593Smuzhiyun if (ch_id >= pcie->nr_channels)
1530*4882a593Smuzhiyun return -EIO;
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun can = pcie->can[ch_id];
1533*4882a593Smuzhiyun /* Ignore control packet ACK */
1534*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_APACKET_CT)
1535*4882a593Smuzhiyun return 0;
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) {
1538*4882a593Smuzhiyun kvaser_pciefd_handle_nack_packet(can, p);
1539*4882a593Smuzhiyun one_shot_fail = true;
1540*4882a593Smuzhiyun }
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) {
1543*4882a593Smuzhiyun netdev_dbg(can->can.dev, "Packet was flushed\n");
1544*4882a593Smuzhiyun } else {
1545*4882a593Smuzhiyun int echo_idx = p->header[0] & KVASER_PCIEFD_PACKET_SEQ_MSK;
1546*4882a593Smuzhiyun int dlc = can_get_echo_skb(can->can.dev, echo_idx);
1547*4882a593Smuzhiyun u8 count = ioread32(can->reg_base +
1548*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun if (count < KVASER_PCIEFD_CAN_TX_MAX_COUNT &&
1551*4882a593Smuzhiyun netif_queue_stopped(can->can.dev))
1552*4882a593Smuzhiyun netif_wake_queue(can->can.dev);
1553*4882a593Smuzhiyun
1554*4882a593Smuzhiyun if (!one_shot_fail) {
1555*4882a593Smuzhiyun struct net_device_stats *stats = &can->can.dev->stats;
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun stats->tx_bytes += dlc;
1558*4882a593Smuzhiyun stats->tx_packets++;
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun return 0;
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd * pcie,struct kvaser_pciefd_rx_packet * p)1565*4882a593Smuzhiyun static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie,
1566*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1569*4882a593Smuzhiyun u8 ch_id = (p->header[1] >> KVASER_PCIEFD_PACKET_CHID_SHIFT) & 0x7;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun if (ch_id >= pcie->nr_channels)
1572*4882a593Smuzhiyun return -EIO;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun can = pcie->can[ch_id];
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun if (!completion_done(&can->flush_comp))
1577*4882a593Smuzhiyun complete(&can->flush_comp);
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun return 0;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
kvaser_pciefd_read_packet(struct kvaser_pciefd * pcie,int * start_pos,int dma_buf)1582*4882a593Smuzhiyun static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
1583*4882a593Smuzhiyun int dma_buf)
1584*4882a593Smuzhiyun {
1585*4882a593Smuzhiyun __le32 *buffer = pcie->dma_data[dma_buf];
1586*4882a593Smuzhiyun __le64 timestamp;
1587*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet packet;
1588*4882a593Smuzhiyun struct kvaser_pciefd_rx_packet *p = &packet;
1589*4882a593Smuzhiyun u8 type;
1590*4882a593Smuzhiyun int pos = *start_pos;
1591*4882a593Smuzhiyun int size;
1592*4882a593Smuzhiyun int ret = 0;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun size = le32_to_cpu(buffer[pos++]);
1595*4882a593Smuzhiyun if (!size) {
1596*4882a593Smuzhiyun *start_pos = 0;
1597*4882a593Smuzhiyun return 0;
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun
1600*4882a593Smuzhiyun p->header[0] = le32_to_cpu(buffer[pos++]);
1601*4882a593Smuzhiyun p->header[1] = le32_to_cpu(buffer[pos++]);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun /* Read 64-bit timestamp */
1604*4882a593Smuzhiyun memcpy(×tamp, &buffer[pos], sizeof(__le64));
1605*4882a593Smuzhiyun pos += 2;
1606*4882a593Smuzhiyun p->timestamp = le64_to_cpu(timestamp);
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun type = (p->header[1] >> KVASER_PCIEFD_PACKET_TYPE_SHIFT) & 0xf;
1609*4882a593Smuzhiyun switch (type) {
1610*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_DATA:
1611*4882a593Smuzhiyun ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]);
1612*4882a593Smuzhiyun if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) {
1613*4882a593Smuzhiyun u8 data_len;
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun data_len = can_dlc2len(p->header[1] >>
1616*4882a593Smuzhiyun KVASER_PCIEFD_RPACKET_DLC_SHIFT);
1617*4882a593Smuzhiyun pos += DIV_ROUND_UP(data_len, 4);
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun break;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_ACK:
1622*4882a593Smuzhiyun ret = kvaser_pciefd_handle_ack_packet(pcie, p);
1623*4882a593Smuzhiyun break;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_STATUS:
1626*4882a593Smuzhiyun ret = kvaser_pciefd_handle_status_packet(pcie, p);
1627*4882a593Smuzhiyun break;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_ERROR:
1630*4882a593Smuzhiyun ret = kvaser_pciefd_handle_error_packet(pcie, p);
1631*4882a593Smuzhiyun break;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK:
1634*4882a593Smuzhiyun ret = kvaser_pciefd_handle_eack_packet(pcie, p);
1635*4882a593Smuzhiyun break;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK:
1638*4882a593Smuzhiyun ret = kvaser_pciefd_handle_eflush_packet(pcie, p);
1639*4882a593Smuzhiyun break;
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_ACK_DATA:
1642*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD:
1643*4882a593Smuzhiyun case KVASER_PCIEFD_PACK_TYPE_TXRQ:
1644*4882a593Smuzhiyun dev_info(&pcie->pci->dev,
1645*4882a593Smuzhiyun "Received unexpected packet type 0x%08X\n", type);
1646*4882a593Smuzhiyun break;
1647*4882a593Smuzhiyun
1648*4882a593Smuzhiyun default:
1649*4882a593Smuzhiyun dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type);
1650*4882a593Smuzhiyun ret = -EIO;
1651*4882a593Smuzhiyun break;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (ret)
1655*4882a593Smuzhiyun return ret;
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun /* Position does not point to the end of the package,
1658*4882a593Smuzhiyun * corrupted packet size?
1659*4882a593Smuzhiyun */
1660*4882a593Smuzhiyun if ((*start_pos + size) != pos)
1661*4882a593Smuzhiyun return -EIO;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun /* Point to the next packet header, if any */
1664*4882a593Smuzhiyun *start_pos = pos;
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun return ret;
1667*4882a593Smuzhiyun }
1668*4882a593Smuzhiyun
kvaser_pciefd_read_buffer(struct kvaser_pciefd * pcie,int dma_buf)1669*4882a593Smuzhiyun static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun int pos = 0;
1672*4882a593Smuzhiyun int res = 0;
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun do {
1675*4882a593Smuzhiyun res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf);
1676*4882a593Smuzhiyun } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE);
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun return res;
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
kvaser_pciefd_receive_irq(struct kvaser_pciefd * pcie)1681*4882a593Smuzhiyun static int kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
1682*4882a593Smuzhiyun {
1683*4882a593Smuzhiyun u32 irq;
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun irq = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1686*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
1687*4882a593Smuzhiyun kvaser_pciefd_read_buffer(pcie, 0);
1688*4882a593Smuzhiyun /* Reset DMA buffer 0 */
1689*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1690*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun
1693*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
1694*4882a593Smuzhiyun kvaser_pciefd_read_buffer(pcie, 1);
1695*4882a593Smuzhiyun /* Reset DMA buffer 1 */
1696*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1697*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
1701*4882a593Smuzhiyun irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
1702*4882a593Smuzhiyun irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
1703*4882a593Smuzhiyun irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
1704*4882a593Smuzhiyun dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun iowrite32(irq, pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1707*4882a593Smuzhiyun return 0;
1708*4882a593Smuzhiyun }
1709*4882a593Smuzhiyun
kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can * can)1710*4882a593Smuzhiyun static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
1715*4882a593Smuzhiyun netdev_err(can->can.dev, "Tx FIFO overflow\n");
1716*4882a593Smuzhiyun
1717*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
1718*4882a593Smuzhiyun u8 count = ioread32(can->reg_base +
1719*4882a593Smuzhiyun KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun if (count == 0)
1722*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
1723*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
1727*4882a593Smuzhiyun netdev_err(can->can.dev,
1728*4882a593Smuzhiyun "Fail to change bittiming, when not in reset mode\n");
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC)
1731*4882a593Smuzhiyun netdev_err(can->can.dev, "CAN FD frame in CAN mode\n");
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF)
1734*4882a593Smuzhiyun netdev_err(can->can.dev, "Rx FIFO overflow\n");
1735*4882a593Smuzhiyun
1736*4882a593Smuzhiyun iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
1737*4882a593Smuzhiyun return 0;
1738*4882a593Smuzhiyun }
1739*4882a593Smuzhiyun
kvaser_pciefd_irq_handler(int irq,void * dev)1740*4882a593Smuzhiyun static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
1741*4882a593Smuzhiyun {
1742*4882a593Smuzhiyun struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
1743*4882a593Smuzhiyun u32 board_irq;
1744*4882a593Smuzhiyun int i;
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun board_irq = ioread32(pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun if (!(board_irq & KVASER_PCIEFD_IRQ_ALL_MSK))
1749*4882a593Smuzhiyun return IRQ_NONE;
1750*4882a593Smuzhiyun
1751*4882a593Smuzhiyun if (board_irq & KVASER_PCIEFD_IRQ_SRB)
1752*4882a593Smuzhiyun kvaser_pciefd_receive_irq(pcie);
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun for (i = 0; i < pcie->nr_channels; i++) {
1755*4882a593Smuzhiyun if (!pcie->can[i]) {
1756*4882a593Smuzhiyun dev_err(&pcie->pci->dev,
1757*4882a593Smuzhiyun "IRQ mask points to unallocated controller\n");
1758*4882a593Smuzhiyun break;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun /* Check that mask matches channel (i) IRQ mask */
1762*4882a593Smuzhiyun if (board_irq & (1 << i))
1763*4882a593Smuzhiyun kvaser_pciefd_transmit_irq(pcie->can[i]);
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun iowrite32(board_irq, pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1767*4882a593Smuzhiyun return IRQ_HANDLED;
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun
kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd * pcie)1770*4882a593Smuzhiyun static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
1771*4882a593Smuzhiyun {
1772*4882a593Smuzhiyun int i;
1773*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun for (i = 0; i < pcie->nr_channels; i++) {
1776*4882a593Smuzhiyun can = pcie->can[i];
1777*4882a593Smuzhiyun if (can) {
1778*4882a593Smuzhiyun iowrite32(0,
1779*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1780*4882a593Smuzhiyun kvaser_pciefd_pwm_stop(can);
1781*4882a593Smuzhiyun free_candev(can->can.dev);
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun }
1785*4882a593Smuzhiyun
kvaser_pciefd_probe(struct pci_dev * pdev,const struct pci_device_id * id)1786*4882a593Smuzhiyun static int kvaser_pciefd_probe(struct pci_dev *pdev,
1787*4882a593Smuzhiyun const struct pci_device_id *id)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun int err;
1790*4882a593Smuzhiyun struct kvaser_pciefd *pcie;
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1793*4882a593Smuzhiyun if (!pcie)
1794*4882a593Smuzhiyun return -ENOMEM;
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun pci_set_drvdata(pdev, pcie);
1797*4882a593Smuzhiyun pcie->pci = pdev;
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun err = pci_enable_device(pdev);
1800*4882a593Smuzhiyun if (err)
1801*4882a593Smuzhiyun return err;
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
1804*4882a593Smuzhiyun if (err)
1805*4882a593Smuzhiyun goto err_disable_pci;
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun pcie->reg_base = pci_iomap(pdev, 0, 0);
1808*4882a593Smuzhiyun if (!pcie->reg_base) {
1809*4882a593Smuzhiyun err = -ENOMEM;
1810*4882a593Smuzhiyun goto err_release_regions;
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun
1813*4882a593Smuzhiyun err = kvaser_pciefd_setup_board(pcie);
1814*4882a593Smuzhiyun if (err)
1815*4882a593Smuzhiyun goto err_pci_iounmap;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun err = kvaser_pciefd_setup_dma(pcie);
1818*4882a593Smuzhiyun if (err)
1819*4882a593Smuzhiyun goto err_pci_iounmap;
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun pci_set_master(pdev);
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun err = kvaser_pciefd_setup_can_ctrls(pcie);
1824*4882a593Smuzhiyun if (err)
1825*4882a593Smuzhiyun goto err_teardown_can_ctrls;
1826*4882a593Smuzhiyun
1827*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
1828*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 |
1831*4882a593Smuzhiyun KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 |
1832*4882a593Smuzhiyun KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1,
1833*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_IEN_REG);
1834*4882a593Smuzhiyun
1835*4882a593Smuzhiyun /* Reset IRQ handling, expected to be off before */
1836*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1837*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1838*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1839*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1840*4882a593Smuzhiyun
1841*4882a593Smuzhiyun /* Ready the DMA buffers */
1842*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
1843*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1844*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
1845*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
1846*4882a593Smuzhiyun
1847*4882a593Smuzhiyun err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
1848*4882a593Smuzhiyun IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
1849*4882a593Smuzhiyun if (err)
1850*4882a593Smuzhiyun goto err_teardown_can_ctrls;
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun err = kvaser_pciefd_reg_candev(pcie);
1853*4882a593Smuzhiyun if (err)
1854*4882a593Smuzhiyun goto err_free_irq;
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun return 0;
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun err_free_irq:
1859*4882a593Smuzhiyun free_irq(pcie->pci->irq, pcie);
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun err_teardown_can_ctrls:
1862*4882a593Smuzhiyun kvaser_pciefd_teardown_can_ctrls(pcie);
1863*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1864*4882a593Smuzhiyun pci_clear_master(pdev);
1865*4882a593Smuzhiyun
1866*4882a593Smuzhiyun err_pci_iounmap:
1867*4882a593Smuzhiyun pci_iounmap(pdev, pcie->reg_base);
1868*4882a593Smuzhiyun
1869*4882a593Smuzhiyun err_release_regions:
1870*4882a593Smuzhiyun pci_release_regions(pdev);
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun err_disable_pci:
1873*4882a593Smuzhiyun pci_disable_device(pdev);
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun return err;
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun
kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd * pcie)1878*4882a593Smuzhiyun static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
1879*4882a593Smuzhiyun {
1880*4882a593Smuzhiyun struct kvaser_pciefd_can *can;
1881*4882a593Smuzhiyun int i;
1882*4882a593Smuzhiyun
1883*4882a593Smuzhiyun for (i = 0; i < pcie->nr_channels; i++) {
1884*4882a593Smuzhiyun can = pcie->can[i];
1885*4882a593Smuzhiyun if (can) {
1886*4882a593Smuzhiyun iowrite32(0,
1887*4882a593Smuzhiyun can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
1888*4882a593Smuzhiyun unregister_candev(can->can.dev);
1889*4882a593Smuzhiyun del_timer(&can->bec_poll_timer);
1890*4882a593Smuzhiyun kvaser_pciefd_pwm_stop(can);
1891*4882a593Smuzhiyun free_candev(can->can.dev);
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun }
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun
kvaser_pciefd_remove(struct pci_dev * pdev)1896*4882a593Smuzhiyun static void kvaser_pciefd_remove(struct pci_dev *pdev)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun struct kvaser_pciefd *pcie = pci_get_drvdata(pdev);
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun kvaser_pciefd_remove_all_ctrls(pcie);
1901*4882a593Smuzhiyun
1902*4882a593Smuzhiyun /* Turn off IRQ generation */
1903*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_SRB_CTRL_REG);
1904*4882a593Smuzhiyun iowrite32(KVASER_PCIEFD_IRQ_ALL_MSK,
1905*4882a593Smuzhiyun pcie->reg_base + KVASER_PCIEFD_IRQ_REG);
1906*4882a593Smuzhiyun iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun free_irq(pcie->pci->irq, pcie);
1909*4882a593Smuzhiyun
1910*4882a593Smuzhiyun pci_clear_master(pdev);
1911*4882a593Smuzhiyun pci_iounmap(pdev, pcie->reg_base);
1912*4882a593Smuzhiyun pci_release_regions(pdev);
1913*4882a593Smuzhiyun pci_disable_device(pdev);
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun
1916*4882a593Smuzhiyun static struct pci_driver kvaser_pciefd = {
1917*4882a593Smuzhiyun .name = KVASER_PCIEFD_DRV_NAME,
1918*4882a593Smuzhiyun .id_table = kvaser_pciefd_id_table,
1919*4882a593Smuzhiyun .probe = kvaser_pciefd_probe,
1920*4882a593Smuzhiyun .remove = kvaser_pciefd_remove,
1921*4882a593Smuzhiyun };
1922*4882a593Smuzhiyun
1923*4882a593Smuzhiyun module_pci_driver(kvaser_pciefd)
1924