xref: /OK3568_Linux_fs/kernel/drivers/net/hamradio/dmascc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for high-speed SCC boards (those with DMA support)
4*4882a593Smuzhiyun  * Copyright (C) 1997-2000 Klaus Kudielka
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * S5SCC/DMA support by Janko Koleznik S52HI
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/if_arp.h>
15*4882a593Smuzhiyun #include <linux/in.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/ioport.h>
19*4882a593Smuzhiyun #include <linux/kernel.h>
20*4882a593Smuzhiyun #include <linux/mm.h>
21*4882a593Smuzhiyun #include <linux/netdevice.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/rtnetlink.h>
24*4882a593Smuzhiyun #include <linux/sockios.h>
25*4882a593Smuzhiyun #include <linux/workqueue.h>
26*4882a593Smuzhiyun #include <linux/atomic.h>
27*4882a593Smuzhiyun #include <asm/dma.h>
28*4882a593Smuzhiyun #include <asm/io.h>
29*4882a593Smuzhiyun #include <asm/irq.h>
30*4882a593Smuzhiyun #include <linux/uaccess.h>
31*4882a593Smuzhiyun #include <net/ax25.h>
32*4882a593Smuzhiyun #include "z8530.h"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /* Number of buffers per channel */
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define NUM_TX_BUF      2	/* NUM_TX_BUF >= 1 (min. 2 recommended) */
38*4882a593Smuzhiyun #define NUM_RX_BUF      6	/* NUM_RX_BUF >= 1 (min. 2 recommended) */
39*4882a593Smuzhiyun #define BUF_SIZE        1576	/* BUF_SIZE >= mtu + hard_header_len */
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* Cards supported */
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define HW_PI           { "Ottawa PI", 0x300, 0x20, 0x10, 8, \
45*4882a593Smuzhiyun                             0, 8, 1843200, 3686400 }
46*4882a593Smuzhiyun #define HW_PI2          { "Ottawa PI2", 0x300, 0x20, 0x10, 8, \
47*4882a593Smuzhiyun 			    0, 8, 3686400, 7372800 }
48*4882a593Smuzhiyun #define HW_TWIN         { "Gracilis PackeTwin", 0x200, 0x10, 0x10, 32, \
49*4882a593Smuzhiyun 			    0, 4, 6144000, 6144000 }
50*4882a593Smuzhiyun #define HW_S5           { "S5SCC/DMA", 0x200, 0x10, 0x10, 32, \
51*4882a593Smuzhiyun                           0, 8, 4915200, 9830400 }
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define HARDWARE        { HW_PI, HW_PI2, HW_TWIN, HW_S5 }
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define TMR_0_HZ        25600	/* Frequency of timer 0 */
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define TYPE_PI         0
58*4882a593Smuzhiyun #define TYPE_PI2        1
59*4882a593Smuzhiyun #define TYPE_TWIN       2
60*4882a593Smuzhiyun #define TYPE_S5         3
61*4882a593Smuzhiyun #define NUM_TYPES       4
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #define MAX_NUM_DEVS    32
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* SCC chips supported */
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define Z8530           0
69*4882a593Smuzhiyun #define Z85C30          1
70*4882a593Smuzhiyun #define Z85230          2
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define CHIPNAMES       { "Z8530", "Z85C30", "Z85230" }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* I/O registers */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* 8530 registers relative to card base */
78*4882a593Smuzhiyun #define SCCB_CMD        0x00
79*4882a593Smuzhiyun #define SCCB_DATA       0x01
80*4882a593Smuzhiyun #define SCCA_CMD        0x02
81*4882a593Smuzhiyun #define SCCA_DATA       0x03
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* 8253/8254 registers relative to card base */
84*4882a593Smuzhiyun #define TMR_CNT0        0x00
85*4882a593Smuzhiyun #define TMR_CNT1        0x01
86*4882a593Smuzhiyun #define TMR_CNT2        0x02
87*4882a593Smuzhiyun #define TMR_CTRL        0x03
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Additional PI/PI2 registers relative to card base */
90*4882a593Smuzhiyun #define PI_DREQ_MASK    0x04
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* Additional PackeTwin registers relative to card base */
93*4882a593Smuzhiyun #define TWIN_INT_REG    0x08
94*4882a593Smuzhiyun #define TWIN_CLR_TMR1   0x09
95*4882a593Smuzhiyun #define TWIN_CLR_TMR2   0x0a
96*4882a593Smuzhiyun #define TWIN_SPARE_1    0x0b
97*4882a593Smuzhiyun #define TWIN_DMA_CFG    0x08
98*4882a593Smuzhiyun #define TWIN_SERIAL_CFG 0x09
99*4882a593Smuzhiyun #define TWIN_DMA_CLR_FF 0x0a
100*4882a593Smuzhiyun #define TWIN_SPARE_2    0x0b
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* PackeTwin I/O register values */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* INT_REG */
106*4882a593Smuzhiyun #define TWIN_SCC_MSK       0x01
107*4882a593Smuzhiyun #define TWIN_TMR1_MSK      0x02
108*4882a593Smuzhiyun #define TWIN_TMR2_MSK      0x04
109*4882a593Smuzhiyun #define TWIN_INT_MSK       0x07
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* SERIAL_CFG */
112*4882a593Smuzhiyun #define TWIN_DTRA_ON       0x01
113*4882a593Smuzhiyun #define TWIN_DTRB_ON       0x02
114*4882a593Smuzhiyun #define TWIN_EXTCLKA       0x04
115*4882a593Smuzhiyun #define TWIN_EXTCLKB       0x08
116*4882a593Smuzhiyun #define TWIN_LOOPA_ON      0x10
117*4882a593Smuzhiyun #define TWIN_LOOPB_ON      0x20
118*4882a593Smuzhiyun #define TWIN_EI            0x80
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* DMA_CFG */
121*4882a593Smuzhiyun #define TWIN_DMA_HDX_T1    0x08
122*4882a593Smuzhiyun #define TWIN_DMA_HDX_R1    0x0a
123*4882a593Smuzhiyun #define TWIN_DMA_HDX_T3    0x14
124*4882a593Smuzhiyun #define TWIN_DMA_HDX_R3    0x16
125*4882a593Smuzhiyun #define TWIN_DMA_FDX_T3R1  0x1b
126*4882a593Smuzhiyun #define TWIN_DMA_FDX_T1R3  0x1d
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* Status values */
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #define IDLE      0
132*4882a593Smuzhiyun #define TX_HEAD   1
133*4882a593Smuzhiyun #define TX_DATA   2
134*4882a593Smuzhiyun #define TX_PAUSE  3
135*4882a593Smuzhiyun #define TX_TAIL   4
136*4882a593Smuzhiyun #define RTS_OFF   5
137*4882a593Smuzhiyun #define WAIT      6
138*4882a593Smuzhiyun #define DCD_ON    7
139*4882a593Smuzhiyun #define RX_ON     8
140*4882a593Smuzhiyun #define DCD_OFF   9
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* Ioctls */
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #define SIOCGSCCPARAM SIOCDEVPRIVATE
146*4882a593Smuzhiyun #define SIOCSSCCPARAM (SIOCDEVPRIVATE+1)
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /* Data types */
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun struct scc_param {
152*4882a593Smuzhiyun 	int pclk_hz;		/* frequency of BRG input (don't change) */
153*4882a593Smuzhiyun 	int brg_tc;		/* BRG terminal count; BRG disabled if < 0 */
154*4882a593Smuzhiyun 	int nrzi;		/* 0 (nrz), 1 (nrzi) */
155*4882a593Smuzhiyun 	int clocks;		/* see dmascc_cfg documentation */
156*4882a593Smuzhiyun 	int txdelay;		/* [1/TMR_0_HZ] */
157*4882a593Smuzhiyun 	int txtimeout;		/* [1/HZ] */
158*4882a593Smuzhiyun 	int txtail;		/* [1/TMR_0_HZ] */
159*4882a593Smuzhiyun 	int waittime;		/* [1/TMR_0_HZ] */
160*4882a593Smuzhiyun 	int slottime;		/* [1/TMR_0_HZ] */
161*4882a593Smuzhiyun 	int persist;		/* 1 ... 256 */
162*4882a593Smuzhiyun 	int dma;		/* -1 (disable), 0, 1, 3 */
163*4882a593Smuzhiyun 	int txpause;		/* [1/TMR_0_HZ] */
164*4882a593Smuzhiyun 	int rtsoff;		/* [1/TMR_0_HZ] */
165*4882a593Smuzhiyun 	int dcdon;		/* [1/TMR_0_HZ] */
166*4882a593Smuzhiyun 	int dcdoff;		/* [1/TMR_0_HZ] */
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun struct scc_hardware {
170*4882a593Smuzhiyun 	char *name;
171*4882a593Smuzhiyun 	int io_region;
172*4882a593Smuzhiyun 	int io_delta;
173*4882a593Smuzhiyun 	int io_size;
174*4882a593Smuzhiyun 	int num_devs;
175*4882a593Smuzhiyun 	int scc_offset;
176*4882a593Smuzhiyun 	int tmr_offset;
177*4882a593Smuzhiyun 	int tmr_hz;
178*4882a593Smuzhiyun 	int pclk_hz;
179*4882a593Smuzhiyun };
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun struct scc_priv {
182*4882a593Smuzhiyun 	int type;
183*4882a593Smuzhiyun 	int chip;
184*4882a593Smuzhiyun 	struct net_device *dev;
185*4882a593Smuzhiyun 	struct scc_info *info;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	int channel;
188*4882a593Smuzhiyun 	int card_base, scc_cmd, scc_data;
189*4882a593Smuzhiyun 	int tmr_cnt, tmr_ctrl, tmr_mode;
190*4882a593Smuzhiyun 	struct scc_param param;
191*4882a593Smuzhiyun 	char rx_buf[NUM_RX_BUF][BUF_SIZE];
192*4882a593Smuzhiyun 	int rx_len[NUM_RX_BUF];
193*4882a593Smuzhiyun 	int rx_ptr;
194*4882a593Smuzhiyun 	struct work_struct rx_work;
195*4882a593Smuzhiyun 	int rx_head, rx_tail, rx_count;
196*4882a593Smuzhiyun 	int rx_over;
197*4882a593Smuzhiyun 	char tx_buf[NUM_TX_BUF][BUF_SIZE];
198*4882a593Smuzhiyun 	int tx_len[NUM_TX_BUF];
199*4882a593Smuzhiyun 	int tx_ptr;
200*4882a593Smuzhiyun 	int tx_head, tx_tail, tx_count;
201*4882a593Smuzhiyun 	int state;
202*4882a593Smuzhiyun 	unsigned long tx_start;
203*4882a593Smuzhiyun 	int rr0;
204*4882a593Smuzhiyun 	spinlock_t *register_lock;	/* Per scc_info */
205*4882a593Smuzhiyun 	spinlock_t ring_lock;
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun struct scc_info {
209*4882a593Smuzhiyun 	int irq_used;
210*4882a593Smuzhiyun 	int twin_serial_cfg;
211*4882a593Smuzhiyun 	struct net_device *dev[2];
212*4882a593Smuzhiyun 	struct scc_priv priv[2];
213*4882a593Smuzhiyun 	struct scc_info *next;
214*4882a593Smuzhiyun 	spinlock_t register_lock;	/* Per device register lock */
215*4882a593Smuzhiyun };
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /* Function declarations */
219*4882a593Smuzhiyun static int setup_adapter(int card_base, int type, int n) __init;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun static void write_scc(struct scc_priv *priv, int reg, int val);
222*4882a593Smuzhiyun static void write_scc_data(struct scc_priv *priv, int val, int fast);
223*4882a593Smuzhiyun static int read_scc(struct scc_priv *priv, int reg);
224*4882a593Smuzhiyun static int read_scc_data(struct scc_priv *priv);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun static int scc_open(struct net_device *dev);
227*4882a593Smuzhiyun static int scc_close(struct net_device *dev);
228*4882a593Smuzhiyun static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
229*4882a593Smuzhiyun static int scc_send_packet(struct sk_buff *skb, struct net_device *dev);
230*4882a593Smuzhiyun static int scc_set_mac_address(struct net_device *dev, void *sa);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun static inline void tx_on(struct scc_priv *priv);
233*4882a593Smuzhiyun static inline void rx_on(struct scc_priv *priv);
234*4882a593Smuzhiyun static inline void rx_off(struct scc_priv *priv);
235*4882a593Smuzhiyun static void start_timer(struct scc_priv *priv, int t, int r15);
236*4882a593Smuzhiyun static inline unsigned char random(void);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun static inline void z8530_isr(struct scc_info *info);
239*4882a593Smuzhiyun static irqreturn_t scc_isr(int irq, void *dev_id);
240*4882a593Smuzhiyun static void rx_isr(struct scc_priv *priv);
241*4882a593Smuzhiyun static void special_condition(struct scc_priv *priv, int rc);
242*4882a593Smuzhiyun static void rx_bh(struct work_struct *);
243*4882a593Smuzhiyun static void tx_isr(struct scc_priv *priv);
244*4882a593Smuzhiyun static void es_isr(struct scc_priv *priv);
245*4882a593Smuzhiyun static void tm_isr(struct scc_priv *priv);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun /* Initialization variables */
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun static int io[MAX_NUM_DEVS] __initdata = { 0, };
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun /* Beware! hw[] is also used in dmascc_exit(). */
253*4882a593Smuzhiyun static struct scc_hardware hw[NUM_TYPES] = HARDWARE;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /* Global variables */
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun static struct scc_info *first;
259*4882a593Smuzhiyun static unsigned long rand;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun MODULE_AUTHOR("Klaus Kudielka");
263*4882a593Smuzhiyun MODULE_DESCRIPTION("Driver for high-speed SCC boards");
264*4882a593Smuzhiyun module_param_hw_array(io, int, ioport, NULL, 0);
265*4882a593Smuzhiyun MODULE_LICENSE("GPL");
266*4882a593Smuzhiyun 
dmascc_exit(void)267*4882a593Smuzhiyun static void __exit dmascc_exit(void)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	int i;
270*4882a593Smuzhiyun 	struct scc_info *info;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	while (first) {
273*4882a593Smuzhiyun 		info = first;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		/* Unregister devices */
276*4882a593Smuzhiyun 		for (i = 0; i < 2; i++)
277*4882a593Smuzhiyun 			unregister_netdev(info->dev[i]);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		/* Reset board */
280*4882a593Smuzhiyun 		if (info->priv[0].type == TYPE_TWIN)
281*4882a593Smuzhiyun 			outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
282*4882a593Smuzhiyun 		write_scc(&info->priv[0], R9, FHWRES);
283*4882a593Smuzhiyun 		release_region(info->dev[0]->base_addr,
284*4882a593Smuzhiyun 			       hw[info->priv[0].type].io_size);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 		for (i = 0; i < 2; i++)
287*4882a593Smuzhiyun 			free_netdev(info->dev[i]);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		/* Free memory */
290*4882a593Smuzhiyun 		first = info->next;
291*4882a593Smuzhiyun 		kfree(info);
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
dmascc_init(void)295*4882a593Smuzhiyun static int __init dmascc_init(void)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	int h, i, j, n;
298*4882a593Smuzhiyun 	int base[MAX_NUM_DEVS], tcmd[MAX_NUM_DEVS], t0[MAX_NUM_DEVS],
299*4882a593Smuzhiyun 	    t1[MAX_NUM_DEVS];
300*4882a593Smuzhiyun 	unsigned t_val;
301*4882a593Smuzhiyun 	unsigned long time, start[MAX_NUM_DEVS], delay[MAX_NUM_DEVS],
302*4882a593Smuzhiyun 	    counting[MAX_NUM_DEVS];
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	/* Initialize random number generator */
305*4882a593Smuzhiyun 	rand = jiffies;
306*4882a593Smuzhiyun 	/* Cards found = 0 */
307*4882a593Smuzhiyun 	n = 0;
308*4882a593Smuzhiyun 	/* Warning message */
309*4882a593Smuzhiyun 	if (!io[0])
310*4882a593Smuzhiyun 		printk(KERN_INFO "dmascc: autoprobing (dangerous)\n");
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	/* Run autodetection for each card type */
313*4882a593Smuzhiyun 	for (h = 0; h < NUM_TYPES; h++) {
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 		if (io[0]) {
316*4882a593Smuzhiyun 			/* User-specified I/O address regions */
317*4882a593Smuzhiyun 			for (i = 0; i < hw[h].num_devs; i++)
318*4882a593Smuzhiyun 				base[i] = 0;
319*4882a593Smuzhiyun 			for (i = 0; i < MAX_NUM_DEVS && io[i]; i++) {
320*4882a593Smuzhiyun 				j = (io[i] -
321*4882a593Smuzhiyun 				     hw[h].io_region) / hw[h].io_delta;
322*4882a593Smuzhiyun 				if (j >= 0 && j < hw[h].num_devs &&
323*4882a593Smuzhiyun 				    hw[h].io_region +
324*4882a593Smuzhiyun 				    j * hw[h].io_delta == io[i]) {
325*4882a593Smuzhiyun 					base[j] = io[i];
326*4882a593Smuzhiyun 				}
327*4882a593Smuzhiyun 			}
328*4882a593Smuzhiyun 		} else {
329*4882a593Smuzhiyun 			/* Default I/O address regions */
330*4882a593Smuzhiyun 			for (i = 0; i < hw[h].num_devs; i++) {
331*4882a593Smuzhiyun 				base[i] =
332*4882a593Smuzhiyun 				    hw[h].io_region + i * hw[h].io_delta;
333*4882a593Smuzhiyun 			}
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		/* Check valid I/O address regions */
337*4882a593Smuzhiyun 		for (i = 0; i < hw[h].num_devs; i++)
338*4882a593Smuzhiyun 			if (base[i]) {
339*4882a593Smuzhiyun 				if (!request_region
340*4882a593Smuzhiyun 				    (base[i], hw[h].io_size, "dmascc"))
341*4882a593Smuzhiyun 					base[i] = 0;
342*4882a593Smuzhiyun 				else {
343*4882a593Smuzhiyun 					tcmd[i] =
344*4882a593Smuzhiyun 					    base[i] + hw[h].tmr_offset +
345*4882a593Smuzhiyun 					    TMR_CTRL;
346*4882a593Smuzhiyun 					t0[i] =
347*4882a593Smuzhiyun 					    base[i] + hw[h].tmr_offset +
348*4882a593Smuzhiyun 					    TMR_CNT0;
349*4882a593Smuzhiyun 					t1[i] =
350*4882a593Smuzhiyun 					    base[i] + hw[h].tmr_offset +
351*4882a593Smuzhiyun 					    TMR_CNT1;
352*4882a593Smuzhiyun 				}
353*4882a593Smuzhiyun 			}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 		/* Start timers */
356*4882a593Smuzhiyun 		for (i = 0; i < hw[h].num_devs; i++)
357*4882a593Smuzhiyun 			if (base[i]) {
358*4882a593Smuzhiyun 				/* Timer 0: LSB+MSB, Mode 3, TMR_0_HZ */
359*4882a593Smuzhiyun 				outb(0x36, tcmd[i]);
360*4882a593Smuzhiyun 				outb((hw[h].tmr_hz / TMR_0_HZ) & 0xFF,
361*4882a593Smuzhiyun 				     t0[i]);
362*4882a593Smuzhiyun 				outb((hw[h].tmr_hz / TMR_0_HZ) >> 8,
363*4882a593Smuzhiyun 				     t0[i]);
364*4882a593Smuzhiyun 				/* Timer 1: LSB+MSB, Mode 0, HZ/10 */
365*4882a593Smuzhiyun 				outb(0x70, tcmd[i]);
366*4882a593Smuzhiyun 				outb((TMR_0_HZ / HZ * 10) & 0xFF, t1[i]);
367*4882a593Smuzhiyun 				outb((TMR_0_HZ / HZ * 10) >> 8, t1[i]);
368*4882a593Smuzhiyun 				start[i] = jiffies;
369*4882a593Smuzhiyun 				delay[i] = 0;
370*4882a593Smuzhiyun 				counting[i] = 1;
371*4882a593Smuzhiyun 				/* Timer 2: LSB+MSB, Mode 0 */
372*4882a593Smuzhiyun 				outb(0xb0, tcmd[i]);
373*4882a593Smuzhiyun 			}
374*4882a593Smuzhiyun 		time = jiffies;
375*4882a593Smuzhiyun 		/* Wait until counter registers are loaded */
376*4882a593Smuzhiyun 		udelay(2000000 / TMR_0_HZ);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		/* Timing loop */
379*4882a593Smuzhiyun 		while (jiffies - time < 13) {
380*4882a593Smuzhiyun 			for (i = 0; i < hw[h].num_devs; i++)
381*4882a593Smuzhiyun 				if (base[i] && counting[i]) {
382*4882a593Smuzhiyun 					/* Read back Timer 1: latch; read LSB; read MSB */
383*4882a593Smuzhiyun 					outb(0x40, tcmd[i]);
384*4882a593Smuzhiyun 					t_val =
385*4882a593Smuzhiyun 					    inb(t1[i]) + (inb(t1[i]) << 8);
386*4882a593Smuzhiyun 					/* Also check whether counter did wrap */
387*4882a593Smuzhiyun 					if (t_val == 0 ||
388*4882a593Smuzhiyun 					    t_val > TMR_0_HZ / HZ * 10)
389*4882a593Smuzhiyun 						counting[i] = 0;
390*4882a593Smuzhiyun 					delay[i] = jiffies - start[i];
391*4882a593Smuzhiyun 				}
392*4882a593Smuzhiyun 		}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 		/* Evaluate measurements */
395*4882a593Smuzhiyun 		for (i = 0; i < hw[h].num_devs; i++)
396*4882a593Smuzhiyun 			if (base[i]) {
397*4882a593Smuzhiyun 				if ((delay[i] >= 9 && delay[i] <= 11) &&
398*4882a593Smuzhiyun 				    /* Ok, we have found an adapter */
399*4882a593Smuzhiyun 				    (setup_adapter(base[i], h, n) == 0))
400*4882a593Smuzhiyun 					n++;
401*4882a593Smuzhiyun 				else
402*4882a593Smuzhiyun 					release_region(base[i],
403*4882a593Smuzhiyun 						       hw[h].io_size);
404*4882a593Smuzhiyun 			}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	}			/* NUM_TYPES */
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* If any adapter was successfully initialized, return ok */
409*4882a593Smuzhiyun 	if (n)
410*4882a593Smuzhiyun 		return 0;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* If no adapter found, return error */
413*4882a593Smuzhiyun 	printk(KERN_INFO "dmascc: no adapters found\n");
414*4882a593Smuzhiyun 	return -EIO;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun module_init(dmascc_init);
418*4882a593Smuzhiyun module_exit(dmascc_exit);
419*4882a593Smuzhiyun 
dev_setup(struct net_device * dev)420*4882a593Smuzhiyun static void __init dev_setup(struct net_device *dev)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	dev->type = ARPHRD_AX25;
423*4882a593Smuzhiyun 	dev->hard_header_len = AX25_MAX_HEADER_LEN;
424*4882a593Smuzhiyun 	dev->mtu = 1500;
425*4882a593Smuzhiyun 	dev->addr_len = AX25_ADDR_LEN;
426*4882a593Smuzhiyun 	dev->tx_queue_len = 64;
427*4882a593Smuzhiyun 	memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
428*4882a593Smuzhiyun 	memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun static const struct net_device_ops scc_netdev_ops = {
432*4882a593Smuzhiyun 	.ndo_open = scc_open,
433*4882a593Smuzhiyun 	.ndo_stop = scc_close,
434*4882a593Smuzhiyun 	.ndo_start_xmit = scc_send_packet,
435*4882a593Smuzhiyun 	.ndo_do_ioctl = scc_ioctl,
436*4882a593Smuzhiyun 	.ndo_set_mac_address = scc_set_mac_address,
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun 
setup_adapter(int card_base,int type,int n)439*4882a593Smuzhiyun static int __init setup_adapter(int card_base, int type, int n)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	int i, irq, chip, err;
442*4882a593Smuzhiyun 	struct scc_info *info;
443*4882a593Smuzhiyun 	struct net_device *dev;
444*4882a593Smuzhiyun 	struct scc_priv *priv;
445*4882a593Smuzhiyun 	unsigned long time;
446*4882a593Smuzhiyun 	unsigned int irqs;
447*4882a593Smuzhiyun 	int tmr_base = card_base + hw[type].tmr_offset;
448*4882a593Smuzhiyun 	int scc_base = card_base + hw[type].scc_offset;
449*4882a593Smuzhiyun 	char *chipnames[] = CHIPNAMES;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/* Initialize what is necessary for write_scc and write_scc_data */
452*4882a593Smuzhiyun 	info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA);
453*4882a593Smuzhiyun 	if (!info) {
454*4882a593Smuzhiyun 		err = -ENOMEM;
455*4882a593Smuzhiyun 		goto out;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
459*4882a593Smuzhiyun 	if (!info->dev[0]) {
460*4882a593Smuzhiyun 		printk(KERN_ERR "dmascc: "
461*4882a593Smuzhiyun 		       "could not allocate memory for %s at %#3x\n",
462*4882a593Smuzhiyun 		       hw[type].name, card_base);
463*4882a593Smuzhiyun 		err = -ENOMEM;
464*4882a593Smuzhiyun 		goto out1;
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
468*4882a593Smuzhiyun 	if (!info->dev[1]) {
469*4882a593Smuzhiyun 		printk(KERN_ERR "dmascc: "
470*4882a593Smuzhiyun 		       "could not allocate memory for %s at %#3x\n",
471*4882a593Smuzhiyun 		       hw[type].name, card_base);
472*4882a593Smuzhiyun 		err = -ENOMEM;
473*4882a593Smuzhiyun 		goto out2;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 	spin_lock_init(&info->register_lock);
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	priv = &info->priv[0];
478*4882a593Smuzhiyun 	priv->type = type;
479*4882a593Smuzhiyun 	priv->card_base = card_base;
480*4882a593Smuzhiyun 	priv->scc_cmd = scc_base + SCCA_CMD;
481*4882a593Smuzhiyun 	priv->scc_data = scc_base + SCCA_DATA;
482*4882a593Smuzhiyun 	priv->register_lock = &info->register_lock;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	/* Reset SCC */
485*4882a593Smuzhiyun 	write_scc(priv, R9, FHWRES | MIE | NV);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	/* Determine type of chip by enabling SDLC/HDLC enhancements */
488*4882a593Smuzhiyun 	write_scc(priv, R15, SHDLCE);
489*4882a593Smuzhiyun 	if (!read_scc(priv, R15)) {
490*4882a593Smuzhiyun 		/* WR7' not present. This is an ordinary Z8530 SCC. */
491*4882a593Smuzhiyun 		chip = Z8530;
492*4882a593Smuzhiyun 	} else {
493*4882a593Smuzhiyun 		/* Put one character in TX FIFO */
494*4882a593Smuzhiyun 		write_scc_data(priv, 0, 0);
495*4882a593Smuzhiyun 		if (read_scc(priv, R0) & Tx_BUF_EMP) {
496*4882a593Smuzhiyun 			/* TX FIFO not full. This is a Z85230 ESCC with a 4-byte FIFO. */
497*4882a593Smuzhiyun 			chip = Z85230;
498*4882a593Smuzhiyun 		} else {
499*4882a593Smuzhiyun 			/* TX FIFO full. This is a Z85C30 SCC with a 1-byte FIFO. */
500*4882a593Smuzhiyun 			chip = Z85C30;
501*4882a593Smuzhiyun 		}
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun 	write_scc(priv, R15, 0);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/* Start IRQ auto-detection */
506*4882a593Smuzhiyun 	irqs = probe_irq_on();
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/* Enable interrupts */
509*4882a593Smuzhiyun 	if (type == TYPE_TWIN) {
510*4882a593Smuzhiyun 		outb(0, card_base + TWIN_DMA_CFG);
511*4882a593Smuzhiyun 		inb(card_base + TWIN_CLR_TMR1);
512*4882a593Smuzhiyun 		inb(card_base + TWIN_CLR_TMR2);
513*4882a593Smuzhiyun 		info->twin_serial_cfg = TWIN_EI;
514*4882a593Smuzhiyun 		outb(info->twin_serial_cfg, card_base + TWIN_SERIAL_CFG);
515*4882a593Smuzhiyun 	} else {
516*4882a593Smuzhiyun 		write_scc(priv, R15, CTSIE);
517*4882a593Smuzhiyun 		write_scc(priv, R0, RES_EXT_INT);
518*4882a593Smuzhiyun 		write_scc(priv, R1, EXT_INT_ENAB);
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Start timer */
522*4882a593Smuzhiyun 	outb(1, tmr_base + TMR_CNT1);
523*4882a593Smuzhiyun 	outb(0, tmr_base + TMR_CNT1);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* Wait and detect IRQ */
526*4882a593Smuzhiyun 	time = jiffies;
527*4882a593Smuzhiyun 	while (jiffies - time < 2 + HZ / TMR_0_HZ);
528*4882a593Smuzhiyun 	irq = probe_irq_off(irqs);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	/* Clear pending interrupt, disable interrupts */
531*4882a593Smuzhiyun 	if (type == TYPE_TWIN) {
532*4882a593Smuzhiyun 		inb(card_base + TWIN_CLR_TMR1);
533*4882a593Smuzhiyun 	} else {
534*4882a593Smuzhiyun 		write_scc(priv, R1, 0);
535*4882a593Smuzhiyun 		write_scc(priv, R15, 0);
536*4882a593Smuzhiyun 		write_scc(priv, R0, RES_EXT_INT);
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (irq <= 0) {
540*4882a593Smuzhiyun 		printk(KERN_ERR
541*4882a593Smuzhiyun 		       "dmascc: could not find irq of %s at %#3x (irq=%d)\n",
542*4882a593Smuzhiyun 		       hw[type].name, card_base, irq);
543*4882a593Smuzhiyun 		err = -ENODEV;
544*4882a593Smuzhiyun 		goto out3;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/* Set up data structures */
548*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
549*4882a593Smuzhiyun 		dev = info->dev[i];
550*4882a593Smuzhiyun 		priv = &info->priv[i];
551*4882a593Smuzhiyun 		priv->type = type;
552*4882a593Smuzhiyun 		priv->chip = chip;
553*4882a593Smuzhiyun 		priv->dev = dev;
554*4882a593Smuzhiyun 		priv->info = info;
555*4882a593Smuzhiyun 		priv->channel = i;
556*4882a593Smuzhiyun 		spin_lock_init(&priv->ring_lock);
557*4882a593Smuzhiyun 		priv->register_lock = &info->register_lock;
558*4882a593Smuzhiyun 		priv->card_base = card_base;
559*4882a593Smuzhiyun 		priv->scc_cmd = scc_base + (i ? SCCB_CMD : SCCA_CMD);
560*4882a593Smuzhiyun 		priv->scc_data = scc_base + (i ? SCCB_DATA : SCCA_DATA);
561*4882a593Smuzhiyun 		priv->tmr_cnt = tmr_base + (i ? TMR_CNT2 : TMR_CNT1);
562*4882a593Smuzhiyun 		priv->tmr_ctrl = tmr_base + TMR_CTRL;
563*4882a593Smuzhiyun 		priv->tmr_mode = i ? 0xb0 : 0x70;
564*4882a593Smuzhiyun 		priv->param.pclk_hz = hw[type].pclk_hz;
565*4882a593Smuzhiyun 		priv->param.brg_tc = -1;
566*4882a593Smuzhiyun 		priv->param.clocks = TCTRxCP | RCRTxCP;
567*4882a593Smuzhiyun 		priv->param.persist = 256;
568*4882a593Smuzhiyun 		priv->param.dma = -1;
569*4882a593Smuzhiyun 		INIT_WORK(&priv->rx_work, rx_bh);
570*4882a593Smuzhiyun 		dev->ml_priv = priv;
571*4882a593Smuzhiyun 		snprintf(dev->name, sizeof(dev->name), "dmascc%i", 2 * n + i);
572*4882a593Smuzhiyun 		dev->base_addr = card_base;
573*4882a593Smuzhiyun 		dev->irq = irq;
574*4882a593Smuzhiyun 		dev->netdev_ops = &scc_netdev_ops;
575*4882a593Smuzhiyun 		dev->header_ops = &ax25_header_ops;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 	if (register_netdev(info->dev[0])) {
578*4882a593Smuzhiyun 		printk(KERN_ERR "dmascc: could not register %s\n",
579*4882a593Smuzhiyun 		       info->dev[0]->name);
580*4882a593Smuzhiyun 		err = -ENODEV;
581*4882a593Smuzhiyun 		goto out3;
582*4882a593Smuzhiyun 	}
583*4882a593Smuzhiyun 	if (register_netdev(info->dev[1])) {
584*4882a593Smuzhiyun 		printk(KERN_ERR "dmascc: could not register %s\n",
585*4882a593Smuzhiyun 		       info->dev[1]->name);
586*4882a593Smuzhiyun 		err = -ENODEV;
587*4882a593Smuzhiyun 		goto out4;
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	info->next = first;
592*4882a593Smuzhiyun 	first = info;
593*4882a593Smuzhiyun 	printk(KERN_INFO "dmascc: found %s (%s) at %#3x, irq %d\n",
594*4882a593Smuzhiyun 	       hw[type].name, chipnames[chip], card_base, irq);
595*4882a593Smuzhiyun 	return 0;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun       out4:
598*4882a593Smuzhiyun 	unregister_netdev(info->dev[0]);
599*4882a593Smuzhiyun       out3:
600*4882a593Smuzhiyun 	if (info->priv[0].type == TYPE_TWIN)
601*4882a593Smuzhiyun 		outb(0, info->dev[0]->base_addr + TWIN_SERIAL_CFG);
602*4882a593Smuzhiyun 	write_scc(&info->priv[0], R9, FHWRES);
603*4882a593Smuzhiyun 	free_netdev(info->dev[1]);
604*4882a593Smuzhiyun       out2:
605*4882a593Smuzhiyun 	free_netdev(info->dev[0]);
606*4882a593Smuzhiyun       out1:
607*4882a593Smuzhiyun 	kfree(info);
608*4882a593Smuzhiyun       out:
609*4882a593Smuzhiyun 	return err;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun /* Driver functions */
614*4882a593Smuzhiyun 
write_scc(struct scc_priv * priv,int reg,int val)615*4882a593Smuzhiyun static void write_scc(struct scc_priv *priv, int reg, int val)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	unsigned long flags;
618*4882a593Smuzhiyun 	switch (priv->type) {
619*4882a593Smuzhiyun 	case TYPE_S5:
620*4882a593Smuzhiyun 		if (reg)
621*4882a593Smuzhiyun 			outb(reg, priv->scc_cmd);
622*4882a593Smuzhiyun 		outb(val, priv->scc_cmd);
623*4882a593Smuzhiyun 		return;
624*4882a593Smuzhiyun 	case TYPE_TWIN:
625*4882a593Smuzhiyun 		if (reg)
626*4882a593Smuzhiyun 			outb_p(reg, priv->scc_cmd);
627*4882a593Smuzhiyun 		outb_p(val, priv->scc_cmd);
628*4882a593Smuzhiyun 		return;
629*4882a593Smuzhiyun 	default:
630*4882a593Smuzhiyun 		spin_lock_irqsave(priv->register_lock, flags);
631*4882a593Smuzhiyun 		outb_p(0, priv->card_base + PI_DREQ_MASK);
632*4882a593Smuzhiyun 		if (reg)
633*4882a593Smuzhiyun 			outb_p(reg, priv->scc_cmd);
634*4882a593Smuzhiyun 		outb_p(val, priv->scc_cmd);
635*4882a593Smuzhiyun 		outb(1, priv->card_base + PI_DREQ_MASK);
636*4882a593Smuzhiyun 		spin_unlock_irqrestore(priv->register_lock, flags);
637*4882a593Smuzhiyun 		return;
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 
write_scc_data(struct scc_priv * priv,int val,int fast)642*4882a593Smuzhiyun static void write_scc_data(struct scc_priv *priv, int val, int fast)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	unsigned long flags;
645*4882a593Smuzhiyun 	switch (priv->type) {
646*4882a593Smuzhiyun 	case TYPE_S5:
647*4882a593Smuzhiyun 		outb(val, priv->scc_data);
648*4882a593Smuzhiyun 		return;
649*4882a593Smuzhiyun 	case TYPE_TWIN:
650*4882a593Smuzhiyun 		outb_p(val, priv->scc_data);
651*4882a593Smuzhiyun 		return;
652*4882a593Smuzhiyun 	default:
653*4882a593Smuzhiyun 		if (fast)
654*4882a593Smuzhiyun 			outb_p(val, priv->scc_data);
655*4882a593Smuzhiyun 		else {
656*4882a593Smuzhiyun 			spin_lock_irqsave(priv->register_lock, flags);
657*4882a593Smuzhiyun 			outb_p(0, priv->card_base + PI_DREQ_MASK);
658*4882a593Smuzhiyun 			outb_p(val, priv->scc_data);
659*4882a593Smuzhiyun 			outb(1, priv->card_base + PI_DREQ_MASK);
660*4882a593Smuzhiyun 			spin_unlock_irqrestore(priv->register_lock, flags);
661*4882a593Smuzhiyun 		}
662*4882a593Smuzhiyun 		return;
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 
read_scc(struct scc_priv * priv,int reg)667*4882a593Smuzhiyun static int read_scc(struct scc_priv *priv, int reg)
668*4882a593Smuzhiyun {
669*4882a593Smuzhiyun 	int rc;
670*4882a593Smuzhiyun 	unsigned long flags;
671*4882a593Smuzhiyun 	switch (priv->type) {
672*4882a593Smuzhiyun 	case TYPE_S5:
673*4882a593Smuzhiyun 		if (reg)
674*4882a593Smuzhiyun 			outb(reg, priv->scc_cmd);
675*4882a593Smuzhiyun 		return inb(priv->scc_cmd);
676*4882a593Smuzhiyun 	case TYPE_TWIN:
677*4882a593Smuzhiyun 		if (reg)
678*4882a593Smuzhiyun 			outb_p(reg, priv->scc_cmd);
679*4882a593Smuzhiyun 		return inb_p(priv->scc_cmd);
680*4882a593Smuzhiyun 	default:
681*4882a593Smuzhiyun 		spin_lock_irqsave(priv->register_lock, flags);
682*4882a593Smuzhiyun 		outb_p(0, priv->card_base + PI_DREQ_MASK);
683*4882a593Smuzhiyun 		if (reg)
684*4882a593Smuzhiyun 			outb_p(reg, priv->scc_cmd);
685*4882a593Smuzhiyun 		rc = inb_p(priv->scc_cmd);
686*4882a593Smuzhiyun 		outb(1, priv->card_base + PI_DREQ_MASK);
687*4882a593Smuzhiyun 		spin_unlock_irqrestore(priv->register_lock, flags);
688*4882a593Smuzhiyun 		return rc;
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 
read_scc_data(struct scc_priv * priv)693*4882a593Smuzhiyun static int read_scc_data(struct scc_priv *priv)
694*4882a593Smuzhiyun {
695*4882a593Smuzhiyun 	int rc;
696*4882a593Smuzhiyun 	unsigned long flags;
697*4882a593Smuzhiyun 	switch (priv->type) {
698*4882a593Smuzhiyun 	case TYPE_S5:
699*4882a593Smuzhiyun 		return inb(priv->scc_data);
700*4882a593Smuzhiyun 	case TYPE_TWIN:
701*4882a593Smuzhiyun 		return inb_p(priv->scc_data);
702*4882a593Smuzhiyun 	default:
703*4882a593Smuzhiyun 		spin_lock_irqsave(priv->register_lock, flags);
704*4882a593Smuzhiyun 		outb_p(0, priv->card_base + PI_DREQ_MASK);
705*4882a593Smuzhiyun 		rc = inb_p(priv->scc_data);
706*4882a593Smuzhiyun 		outb(1, priv->card_base + PI_DREQ_MASK);
707*4882a593Smuzhiyun 		spin_unlock_irqrestore(priv->register_lock, flags);
708*4882a593Smuzhiyun 		return rc;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 
scc_open(struct net_device * dev)713*4882a593Smuzhiyun static int scc_open(struct net_device *dev)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct scc_priv *priv = dev->ml_priv;
716*4882a593Smuzhiyun 	struct scc_info *info = priv->info;
717*4882a593Smuzhiyun 	int card_base = priv->card_base;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	/* Request IRQ if not already used by other channel */
720*4882a593Smuzhiyun 	if (!info->irq_used) {
721*4882a593Smuzhiyun 		if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
722*4882a593Smuzhiyun 			return -EAGAIN;
723*4882a593Smuzhiyun 		}
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun 	info->irq_used++;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/* Request DMA if required */
728*4882a593Smuzhiyun 	if (priv->param.dma >= 0) {
729*4882a593Smuzhiyun 		if (request_dma(priv->param.dma, "dmascc")) {
730*4882a593Smuzhiyun 			if (--info->irq_used == 0)
731*4882a593Smuzhiyun 				free_irq(dev->irq, info);
732*4882a593Smuzhiyun 			return -EAGAIN;
733*4882a593Smuzhiyun 		} else {
734*4882a593Smuzhiyun 			unsigned long flags = claim_dma_lock();
735*4882a593Smuzhiyun 			clear_dma_ff(priv->param.dma);
736*4882a593Smuzhiyun 			release_dma_lock(flags);
737*4882a593Smuzhiyun 		}
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/* Initialize local variables */
741*4882a593Smuzhiyun 	priv->rx_ptr = 0;
742*4882a593Smuzhiyun 	priv->rx_over = 0;
743*4882a593Smuzhiyun 	priv->rx_head = priv->rx_tail = priv->rx_count = 0;
744*4882a593Smuzhiyun 	priv->state = IDLE;
745*4882a593Smuzhiyun 	priv->tx_head = priv->tx_tail = priv->tx_count = 0;
746*4882a593Smuzhiyun 	priv->tx_ptr = 0;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	/* Reset channel */
749*4882a593Smuzhiyun 	write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
750*4882a593Smuzhiyun 	/* X1 clock, SDLC mode */
751*4882a593Smuzhiyun 	write_scc(priv, R4, SDLC | X1CLK);
752*4882a593Smuzhiyun 	/* DMA */
753*4882a593Smuzhiyun 	write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
754*4882a593Smuzhiyun 	/* 8 bit RX char, RX disable */
755*4882a593Smuzhiyun 	write_scc(priv, R3, Rx8);
756*4882a593Smuzhiyun 	/* 8 bit TX char, TX disable */
757*4882a593Smuzhiyun 	write_scc(priv, R5, Tx8);
758*4882a593Smuzhiyun 	/* SDLC address field */
759*4882a593Smuzhiyun 	write_scc(priv, R6, 0);
760*4882a593Smuzhiyun 	/* SDLC flag */
761*4882a593Smuzhiyun 	write_scc(priv, R7, FLAG);
762*4882a593Smuzhiyun 	switch (priv->chip) {
763*4882a593Smuzhiyun 	case Z85C30:
764*4882a593Smuzhiyun 		/* Select WR7' */
765*4882a593Smuzhiyun 		write_scc(priv, R15, SHDLCE);
766*4882a593Smuzhiyun 		/* Auto EOM reset */
767*4882a593Smuzhiyun 		write_scc(priv, R7, AUTOEOM);
768*4882a593Smuzhiyun 		write_scc(priv, R15, 0);
769*4882a593Smuzhiyun 		break;
770*4882a593Smuzhiyun 	case Z85230:
771*4882a593Smuzhiyun 		/* Select WR7' */
772*4882a593Smuzhiyun 		write_scc(priv, R15, SHDLCE);
773*4882a593Smuzhiyun 		/* The following bits are set (see 2.5.2.1):
774*4882a593Smuzhiyun 		   - Automatic EOM reset
775*4882a593Smuzhiyun 		   - Interrupt request if RX FIFO is half full
776*4882a593Smuzhiyun 		   This bit should be ignored in DMA mode (according to the
777*4882a593Smuzhiyun 		   documentation), but actually isn't. The receiver doesn't work if
778*4882a593Smuzhiyun 		   it is set. Thus, we have to clear it in DMA mode.
779*4882a593Smuzhiyun 		   - Interrupt/DMA request if TX FIFO is completely empty
780*4882a593Smuzhiyun 		   a) If set, the ESCC behaves as if it had no TX FIFO (Z85C30
781*4882a593Smuzhiyun 		   compatibility).
782*4882a593Smuzhiyun 		   b) If cleared, DMA requests may follow each other very quickly,
783*4882a593Smuzhiyun 		   filling up the TX FIFO.
784*4882a593Smuzhiyun 		   Advantage: TX works even in case of high bus latency.
785*4882a593Smuzhiyun 		   Disadvantage: Edge-triggered DMA request circuitry may miss
786*4882a593Smuzhiyun 		   a request. No more data is delivered, resulting
787*4882a593Smuzhiyun 		   in a TX FIFO underrun.
788*4882a593Smuzhiyun 		   Both PI2 and S5SCC/DMA seem to work fine with TXFIFOE cleared.
789*4882a593Smuzhiyun 		   The PackeTwin doesn't. I don't know about the PI, but let's
790*4882a593Smuzhiyun 		   assume it behaves like the PI2.
791*4882a593Smuzhiyun 		 */
792*4882a593Smuzhiyun 		if (priv->param.dma >= 0) {
793*4882a593Smuzhiyun 			if (priv->type == TYPE_TWIN)
794*4882a593Smuzhiyun 				write_scc(priv, R7, AUTOEOM | TXFIFOE);
795*4882a593Smuzhiyun 			else
796*4882a593Smuzhiyun 				write_scc(priv, R7, AUTOEOM);
797*4882a593Smuzhiyun 		} else {
798*4882a593Smuzhiyun 			write_scc(priv, R7, AUTOEOM | RXFIFOH);
799*4882a593Smuzhiyun 		}
800*4882a593Smuzhiyun 		write_scc(priv, R15, 0);
801*4882a593Smuzhiyun 		break;
802*4882a593Smuzhiyun 	}
803*4882a593Smuzhiyun 	/* Preset CRC, NRZ(I) encoding */
804*4882a593Smuzhiyun 	write_scc(priv, R10, CRCPS | (priv->param.nrzi ? NRZI : NRZ));
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	/* Configure baud rate generator */
807*4882a593Smuzhiyun 	if (priv->param.brg_tc >= 0) {
808*4882a593Smuzhiyun 		/* Program BR generator */
809*4882a593Smuzhiyun 		write_scc(priv, R12, priv->param.brg_tc & 0xFF);
810*4882a593Smuzhiyun 		write_scc(priv, R13, (priv->param.brg_tc >> 8) & 0xFF);
811*4882a593Smuzhiyun 		/* BRG source = SYS CLK; enable BRG; DTR REQ function (required by
812*4882a593Smuzhiyun 		   PackeTwin, not connected on the PI2); set DPLL source to BRG */
813*4882a593Smuzhiyun 		write_scc(priv, R14, SSBR | DTRREQ | BRSRC | BRENABL);
814*4882a593Smuzhiyun 		/* Enable DPLL */
815*4882a593Smuzhiyun 		write_scc(priv, R14, SEARCH | DTRREQ | BRSRC | BRENABL);
816*4882a593Smuzhiyun 	} else {
817*4882a593Smuzhiyun 		/* Disable BR generator */
818*4882a593Smuzhiyun 		write_scc(priv, R14, DTRREQ | BRSRC);
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	/* Configure clocks */
822*4882a593Smuzhiyun 	if (priv->type == TYPE_TWIN) {
823*4882a593Smuzhiyun 		/* Disable external TX clock receiver */
824*4882a593Smuzhiyun 		outb((info->twin_serial_cfg &=
825*4882a593Smuzhiyun 		      ~(priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
826*4882a593Smuzhiyun 		     card_base + TWIN_SERIAL_CFG);
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 	write_scc(priv, R11, priv->param.clocks);
829*4882a593Smuzhiyun 	if ((priv->type == TYPE_TWIN) && !(priv->param.clocks & TRxCOI)) {
830*4882a593Smuzhiyun 		/* Enable external TX clock receiver */
831*4882a593Smuzhiyun 		outb((info->twin_serial_cfg |=
832*4882a593Smuzhiyun 		      (priv->channel ? TWIN_EXTCLKB : TWIN_EXTCLKA)),
833*4882a593Smuzhiyun 		     card_base + TWIN_SERIAL_CFG);
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	/* Configure PackeTwin */
837*4882a593Smuzhiyun 	if (priv->type == TYPE_TWIN) {
838*4882a593Smuzhiyun 		/* Assert DTR, enable interrupts */
839*4882a593Smuzhiyun 		outb((info->twin_serial_cfg |= TWIN_EI |
840*4882a593Smuzhiyun 		      (priv->channel ? TWIN_DTRB_ON : TWIN_DTRA_ON)),
841*4882a593Smuzhiyun 		     card_base + TWIN_SERIAL_CFG);
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* Read current status */
845*4882a593Smuzhiyun 	priv->rr0 = read_scc(priv, R0);
846*4882a593Smuzhiyun 	/* Enable DCD interrupt */
847*4882a593Smuzhiyun 	write_scc(priv, R15, DCDIE);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	netif_start_queue(dev);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	return 0;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 
scc_close(struct net_device * dev)855*4882a593Smuzhiyun static int scc_close(struct net_device *dev)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun 	struct scc_priv *priv = dev->ml_priv;
858*4882a593Smuzhiyun 	struct scc_info *info = priv->info;
859*4882a593Smuzhiyun 	int card_base = priv->card_base;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	netif_stop_queue(dev);
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	if (priv->type == TYPE_TWIN) {
864*4882a593Smuzhiyun 		/* Drop DTR */
865*4882a593Smuzhiyun 		outb((info->twin_serial_cfg &=
866*4882a593Smuzhiyun 		      (priv->channel ? ~TWIN_DTRB_ON : ~TWIN_DTRA_ON)),
867*4882a593Smuzhiyun 		     card_base + TWIN_SERIAL_CFG);
868*4882a593Smuzhiyun 	}
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	/* Reset channel, free DMA and IRQ */
871*4882a593Smuzhiyun 	write_scc(priv, R9, (priv->channel ? CHRB : CHRA) | MIE | NV);
872*4882a593Smuzhiyun 	if (priv->param.dma >= 0) {
873*4882a593Smuzhiyun 		if (priv->type == TYPE_TWIN)
874*4882a593Smuzhiyun 			outb(0, card_base + TWIN_DMA_CFG);
875*4882a593Smuzhiyun 		free_dma(priv->param.dma);
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 	if (--info->irq_used == 0)
878*4882a593Smuzhiyun 		free_irq(dev->irq, info);
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	return 0;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 
scc_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)884*4882a593Smuzhiyun static int scc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	struct scc_priv *priv = dev->ml_priv;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	switch (cmd) {
889*4882a593Smuzhiyun 	case SIOCGSCCPARAM:
890*4882a593Smuzhiyun 		if (copy_to_user
891*4882a593Smuzhiyun 		    (ifr->ifr_data, &priv->param,
892*4882a593Smuzhiyun 		     sizeof(struct scc_param)))
893*4882a593Smuzhiyun 			return -EFAULT;
894*4882a593Smuzhiyun 		return 0;
895*4882a593Smuzhiyun 	case SIOCSSCCPARAM:
896*4882a593Smuzhiyun 		if (!capable(CAP_NET_ADMIN))
897*4882a593Smuzhiyun 			return -EPERM;
898*4882a593Smuzhiyun 		if (netif_running(dev))
899*4882a593Smuzhiyun 			return -EAGAIN;
900*4882a593Smuzhiyun 		if (copy_from_user
901*4882a593Smuzhiyun 		    (&priv->param, ifr->ifr_data,
902*4882a593Smuzhiyun 		     sizeof(struct scc_param)))
903*4882a593Smuzhiyun 			return -EFAULT;
904*4882a593Smuzhiyun 		return 0;
905*4882a593Smuzhiyun 	default:
906*4882a593Smuzhiyun 		return -EINVAL;
907*4882a593Smuzhiyun 	}
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 
scc_send_packet(struct sk_buff * skb,struct net_device * dev)911*4882a593Smuzhiyun static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct scc_priv *priv = dev->ml_priv;
914*4882a593Smuzhiyun 	unsigned long flags;
915*4882a593Smuzhiyun 	int i;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (skb->protocol == htons(ETH_P_IP))
918*4882a593Smuzhiyun 		return ax25_ip_xmit(skb);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* Temporarily stop the scheduler feeding us packets */
921*4882a593Smuzhiyun 	netif_stop_queue(dev);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* Transfer data to DMA buffer */
924*4882a593Smuzhiyun 	i = priv->tx_head;
925*4882a593Smuzhiyun 	skb_copy_from_linear_data_offset(skb, 1, priv->tx_buf[i], skb->len - 1);
926*4882a593Smuzhiyun 	priv->tx_len[i] = skb->len - 1;
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	/* Clear interrupts while we touch our circular buffers */
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->ring_lock, flags);
931*4882a593Smuzhiyun 	/* Move the ring buffer's head */
932*4882a593Smuzhiyun 	priv->tx_head = (i + 1) % NUM_TX_BUF;
933*4882a593Smuzhiyun 	priv->tx_count++;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 	/* If we just filled up the last buffer, leave queue stopped.
936*4882a593Smuzhiyun 	   The higher layers must wait until we have a DMA buffer
937*4882a593Smuzhiyun 	   to accept the data. */
938*4882a593Smuzhiyun 	if (priv->tx_count < NUM_TX_BUF)
939*4882a593Smuzhiyun 		netif_wake_queue(dev);
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	/* Set new TX state */
942*4882a593Smuzhiyun 	if (priv->state == IDLE) {
943*4882a593Smuzhiyun 		/* Assert RTS, start timer */
944*4882a593Smuzhiyun 		priv->state = TX_HEAD;
945*4882a593Smuzhiyun 		priv->tx_start = jiffies;
946*4882a593Smuzhiyun 		write_scc(priv, R5, TxCRC_ENAB | RTS | TxENAB | Tx8);
947*4882a593Smuzhiyun 		write_scc(priv, R15, 0);
948*4882a593Smuzhiyun 		start_timer(priv, priv->param.txdelay, 0);
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	/* Turn interrupts back on and free buffer */
952*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->ring_lock, flags);
953*4882a593Smuzhiyun 	dev_kfree_skb(skb);
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	return NETDEV_TX_OK;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 
scc_set_mac_address(struct net_device * dev,void * sa)959*4882a593Smuzhiyun static int scc_set_mac_address(struct net_device *dev, void *sa)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun 	memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
962*4882a593Smuzhiyun 	       dev->addr_len);
963*4882a593Smuzhiyun 	return 0;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 
tx_on(struct scc_priv * priv)967*4882a593Smuzhiyun static inline void tx_on(struct scc_priv *priv)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	int i, n;
970*4882a593Smuzhiyun 	unsigned long flags;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	if (priv->param.dma >= 0) {
973*4882a593Smuzhiyun 		n = (priv->chip == Z85230) ? 3 : 1;
974*4882a593Smuzhiyun 		/* Program DMA controller */
975*4882a593Smuzhiyun 		flags = claim_dma_lock();
976*4882a593Smuzhiyun 		set_dma_mode(priv->param.dma, DMA_MODE_WRITE);
977*4882a593Smuzhiyun 		set_dma_addr(priv->param.dma,
978*4882a593Smuzhiyun 			     (int) priv->tx_buf[priv->tx_tail] + n);
979*4882a593Smuzhiyun 		set_dma_count(priv->param.dma,
980*4882a593Smuzhiyun 			      priv->tx_len[priv->tx_tail] - n);
981*4882a593Smuzhiyun 		release_dma_lock(flags);
982*4882a593Smuzhiyun 		/* Enable TX underrun interrupt */
983*4882a593Smuzhiyun 		write_scc(priv, R15, TxUIE);
984*4882a593Smuzhiyun 		/* Configure DREQ */
985*4882a593Smuzhiyun 		if (priv->type == TYPE_TWIN)
986*4882a593Smuzhiyun 			outb((priv->param.dma ==
987*4882a593Smuzhiyun 			      1) ? TWIN_DMA_HDX_T1 : TWIN_DMA_HDX_T3,
988*4882a593Smuzhiyun 			     priv->card_base + TWIN_DMA_CFG);
989*4882a593Smuzhiyun 		else
990*4882a593Smuzhiyun 			write_scc(priv, R1,
991*4882a593Smuzhiyun 				  EXT_INT_ENAB | WT_FN_RDYFN |
992*4882a593Smuzhiyun 				  WT_RDY_ENAB);
993*4882a593Smuzhiyun 		/* Write first byte(s) */
994*4882a593Smuzhiyun 		spin_lock_irqsave(priv->register_lock, flags);
995*4882a593Smuzhiyun 		for (i = 0; i < n; i++)
996*4882a593Smuzhiyun 			write_scc_data(priv,
997*4882a593Smuzhiyun 				       priv->tx_buf[priv->tx_tail][i], 1);
998*4882a593Smuzhiyun 		enable_dma(priv->param.dma);
999*4882a593Smuzhiyun 		spin_unlock_irqrestore(priv->register_lock, flags);
1000*4882a593Smuzhiyun 	} else {
1001*4882a593Smuzhiyun 		write_scc(priv, R15, TxUIE);
1002*4882a593Smuzhiyun 		write_scc(priv, R1,
1003*4882a593Smuzhiyun 			  EXT_INT_ENAB | WT_FN_RDYFN | TxINT_ENAB);
1004*4882a593Smuzhiyun 		tx_isr(priv);
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 	/* Reset EOM latch if we do not have the AUTOEOM feature */
1007*4882a593Smuzhiyun 	if (priv->chip == Z8530)
1008*4882a593Smuzhiyun 		write_scc(priv, R0, RES_EOM_L);
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 
rx_on(struct scc_priv * priv)1012*4882a593Smuzhiyun static inline void rx_on(struct scc_priv *priv)
1013*4882a593Smuzhiyun {
1014*4882a593Smuzhiyun 	unsigned long flags;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	/* Clear RX FIFO */
1017*4882a593Smuzhiyun 	while (read_scc(priv, R0) & Rx_CH_AV)
1018*4882a593Smuzhiyun 		read_scc_data(priv);
1019*4882a593Smuzhiyun 	priv->rx_over = 0;
1020*4882a593Smuzhiyun 	if (priv->param.dma >= 0) {
1021*4882a593Smuzhiyun 		/* Program DMA controller */
1022*4882a593Smuzhiyun 		flags = claim_dma_lock();
1023*4882a593Smuzhiyun 		set_dma_mode(priv->param.dma, DMA_MODE_READ);
1024*4882a593Smuzhiyun 		set_dma_addr(priv->param.dma,
1025*4882a593Smuzhiyun 			     (int) priv->rx_buf[priv->rx_head]);
1026*4882a593Smuzhiyun 		set_dma_count(priv->param.dma, BUF_SIZE);
1027*4882a593Smuzhiyun 		release_dma_lock(flags);
1028*4882a593Smuzhiyun 		enable_dma(priv->param.dma);
1029*4882a593Smuzhiyun 		/* Configure PackeTwin DMA */
1030*4882a593Smuzhiyun 		if (priv->type == TYPE_TWIN) {
1031*4882a593Smuzhiyun 			outb((priv->param.dma ==
1032*4882a593Smuzhiyun 			      1) ? TWIN_DMA_HDX_R1 : TWIN_DMA_HDX_R3,
1033*4882a593Smuzhiyun 			     priv->card_base + TWIN_DMA_CFG);
1034*4882a593Smuzhiyun 		}
1035*4882a593Smuzhiyun 		/* Sp. cond. intr. only, ext int enable, RX DMA enable */
1036*4882a593Smuzhiyun 		write_scc(priv, R1, EXT_INT_ENAB | INT_ERR_Rx |
1037*4882a593Smuzhiyun 			  WT_RDY_RT | WT_FN_RDYFN | WT_RDY_ENAB);
1038*4882a593Smuzhiyun 	} else {
1039*4882a593Smuzhiyun 		/* Reset current frame */
1040*4882a593Smuzhiyun 		priv->rx_ptr = 0;
1041*4882a593Smuzhiyun 		/* Intr. on all Rx characters and Sp. cond., ext int enable */
1042*4882a593Smuzhiyun 		write_scc(priv, R1, EXT_INT_ENAB | INT_ALL_Rx | WT_RDY_RT |
1043*4882a593Smuzhiyun 			  WT_FN_RDYFN);
1044*4882a593Smuzhiyun 	}
1045*4882a593Smuzhiyun 	write_scc(priv, R0, ERR_RES);
1046*4882a593Smuzhiyun 	write_scc(priv, R3, RxENABLE | Rx8 | RxCRC_ENAB);
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 
rx_off(struct scc_priv * priv)1050*4882a593Smuzhiyun static inline void rx_off(struct scc_priv *priv)
1051*4882a593Smuzhiyun {
1052*4882a593Smuzhiyun 	/* Disable receiver */
1053*4882a593Smuzhiyun 	write_scc(priv, R3, Rx8);
1054*4882a593Smuzhiyun 	/* Disable DREQ / RX interrupt */
1055*4882a593Smuzhiyun 	if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1056*4882a593Smuzhiyun 		outb(0, priv->card_base + TWIN_DMA_CFG);
1057*4882a593Smuzhiyun 	else
1058*4882a593Smuzhiyun 		write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1059*4882a593Smuzhiyun 	/* Disable DMA */
1060*4882a593Smuzhiyun 	if (priv->param.dma >= 0)
1061*4882a593Smuzhiyun 		disable_dma(priv->param.dma);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 
start_timer(struct scc_priv * priv,int t,int r15)1065*4882a593Smuzhiyun static void start_timer(struct scc_priv *priv, int t, int r15)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun 	outb(priv->tmr_mode, priv->tmr_ctrl);
1068*4882a593Smuzhiyun 	if (t == 0) {
1069*4882a593Smuzhiyun 		tm_isr(priv);
1070*4882a593Smuzhiyun 	} else if (t > 0) {
1071*4882a593Smuzhiyun 		outb(t & 0xFF, priv->tmr_cnt);
1072*4882a593Smuzhiyun 		outb((t >> 8) & 0xFF, priv->tmr_cnt);
1073*4882a593Smuzhiyun 		if (priv->type != TYPE_TWIN) {
1074*4882a593Smuzhiyun 			write_scc(priv, R15, r15 | CTSIE);
1075*4882a593Smuzhiyun 			priv->rr0 |= CTS;
1076*4882a593Smuzhiyun 		}
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun }
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 
random(void)1081*4882a593Smuzhiyun static inline unsigned char random(void)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	/* See "Numerical Recipes in C", second edition, p. 284 */
1084*4882a593Smuzhiyun 	rand = rand * 1664525L + 1013904223L;
1085*4882a593Smuzhiyun 	return (unsigned char) (rand >> 24);
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun 
z8530_isr(struct scc_info * info)1088*4882a593Smuzhiyun static inline void z8530_isr(struct scc_info *info)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun 	int is, i = 100;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	while ((is = read_scc(&info->priv[0], R3)) && i--) {
1093*4882a593Smuzhiyun 		if (is & CHARxIP) {
1094*4882a593Smuzhiyun 			rx_isr(&info->priv[0]);
1095*4882a593Smuzhiyun 		} else if (is & CHATxIP) {
1096*4882a593Smuzhiyun 			tx_isr(&info->priv[0]);
1097*4882a593Smuzhiyun 		} else if (is & CHAEXT) {
1098*4882a593Smuzhiyun 			es_isr(&info->priv[0]);
1099*4882a593Smuzhiyun 		} else if (is & CHBRxIP) {
1100*4882a593Smuzhiyun 			rx_isr(&info->priv[1]);
1101*4882a593Smuzhiyun 		} else if (is & CHBTxIP) {
1102*4882a593Smuzhiyun 			tx_isr(&info->priv[1]);
1103*4882a593Smuzhiyun 		} else {
1104*4882a593Smuzhiyun 			es_isr(&info->priv[1]);
1105*4882a593Smuzhiyun 		}
1106*4882a593Smuzhiyun 		write_scc(&info->priv[0], R0, RES_H_IUS);
1107*4882a593Smuzhiyun 		i++;
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 	if (i < 0) {
1110*4882a593Smuzhiyun 		printk(KERN_ERR "dmascc: stuck in ISR with RR3=0x%02x.\n",
1111*4882a593Smuzhiyun 		       is);
1112*4882a593Smuzhiyun 	}
1113*4882a593Smuzhiyun 	/* Ok, no interrupts pending from this 8530. The INT line should
1114*4882a593Smuzhiyun 	   be inactive now. */
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 
scc_isr(int irq,void * dev_id)1118*4882a593Smuzhiyun static irqreturn_t scc_isr(int irq, void *dev_id)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	struct scc_info *info = dev_id;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	spin_lock(info->priv[0].register_lock);
1123*4882a593Smuzhiyun 	/* At this point interrupts are enabled, and the interrupt under service
1124*4882a593Smuzhiyun 	   is already acknowledged, but masked off.
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	   Interrupt processing: We loop until we know that the IRQ line is
1127*4882a593Smuzhiyun 	   low. If another positive edge occurs afterwards during the ISR,
1128*4882a593Smuzhiyun 	   another interrupt will be triggered by the interrupt controller
1129*4882a593Smuzhiyun 	   as soon as the IRQ level is enabled again (see asm/irq.h).
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun 	   Bottom-half handlers will be processed after scc_isr(). This is
1132*4882a593Smuzhiyun 	   important, since we only have small ringbuffers and want new data
1133*4882a593Smuzhiyun 	   to be fetched/delivered immediately. */
1134*4882a593Smuzhiyun 
1135*4882a593Smuzhiyun 	if (info->priv[0].type == TYPE_TWIN) {
1136*4882a593Smuzhiyun 		int is, card_base = info->priv[0].card_base;
1137*4882a593Smuzhiyun 		while ((is = ~inb(card_base + TWIN_INT_REG)) &
1138*4882a593Smuzhiyun 		       TWIN_INT_MSK) {
1139*4882a593Smuzhiyun 			if (is & TWIN_SCC_MSK) {
1140*4882a593Smuzhiyun 				z8530_isr(info);
1141*4882a593Smuzhiyun 			} else if (is & TWIN_TMR1_MSK) {
1142*4882a593Smuzhiyun 				inb(card_base + TWIN_CLR_TMR1);
1143*4882a593Smuzhiyun 				tm_isr(&info->priv[0]);
1144*4882a593Smuzhiyun 			} else {
1145*4882a593Smuzhiyun 				inb(card_base + TWIN_CLR_TMR2);
1146*4882a593Smuzhiyun 				tm_isr(&info->priv[1]);
1147*4882a593Smuzhiyun 			}
1148*4882a593Smuzhiyun 		}
1149*4882a593Smuzhiyun 	} else
1150*4882a593Smuzhiyun 		z8530_isr(info);
1151*4882a593Smuzhiyun 	spin_unlock(info->priv[0].register_lock);
1152*4882a593Smuzhiyun 	return IRQ_HANDLED;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 
rx_isr(struct scc_priv * priv)1156*4882a593Smuzhiyun static void rx_isr(struct scc_priv *priv)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	if (priv->param.dma >= 0) {
1159*4882a593Smuzhiyun 		/* Check special condition and perform error reset. See 2.4.7.5. */
1160*4882a593Smuzhiyun 		special_condition(priv, read_scc(priv, R1));
1161*4882a593Smuzhiyun 		write_scc(priv, R0, ERR_RES);
1162*4882a593Smuzhiyun 	} else {
1163*4882a593Smuzhiyun 		/* Check special condition for each character. Error reset not necessary.
1164*4882a593Smuzhiyun 		   Same algorithm for SCC and ESCC. See 2.4.7.1 and 2.4.7.4. */
1165*4882a593Smuzhiyun 		int rc;
1166*4882a593Smuzhiyun 		while (read_scc(priv, R0) & Rx_CH_AV) {
1167*4882a593Smuzhiyun 			rc = read_scc(priv, R1);
1168*4882a593Smuzhiyun 			if (priv->rx_ptr < BUF_SIZE)
1169*4882a593Smuzhiyun 				priv->rx_buf[priv->rx_head][priv->
1170*4882a593Smuzhiyun 							    rx_ptr++] =
1171*4882a593Smuzhiyun 				    read_scc_data(priv);
1172*4882a593Smuzhiyun 			else {
1173*4882a593Smuzhiyun 				priv->rx_over = 2;
1174*4882a593Smuzhiyun 				read_scc_data(priv);
1175*4882a593Smuzhiyun 			}
1176*4882a593Smuzhiyun 			special_condition(priv, rc);
1177*4882a593Smuzhiyun 		}
1178*4882a593Smuzhiyun 	}
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 
special_condition(struct scc_priv * priv,int rc)1182*4882a593Smuzhiyun static void special_condition(struct scc_priv *priv, int rc)
1183*4882a593Smuzhiyun {
1184*4882a593Smuzhiyun 	int cb;
1185*4882a593Smuzhiyun 	unsigned long flags;
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	/* See Figure 2-15. Only overrun and EOF need to be checked. */
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 	if (rc & Rx_OVR) {
1190*4882a593Smuzhiyun 		/* Receiver overrun */
1191*4882a593Smuzhiyun 		priv->rx_over = 1;
1192*4882a593Smuzhiyun 		if (priv->param.dma < 0)
1193*4882a593Smuzhiyun 			write_scc(priv, R0, ERR_RES);
1194*4882a593Smuzhiyun 	} else if (rc & END_FR) {
1195*4882a593Smuzhiyun 		/* End of frame. Get byte count */
1196*4882a593Smuzhiyun 		if (priv->param.dma >= 0) {
1197*4882a593Smuzhiyun 			flags = claim_dma_lock();
1198*4882a593Smuzhiyun 			cb = BUF_SIZE - get_dma_residue(priv->param.dma) -
1199*4882a593Smuzhiyun 			    2;
1200*4882a593Smuzhiyun 			release_dma_lock(flags);
1201*4882a593Smuzhiyun 		} else {
1202*4882a593Smuzhiyun 			cb = priv->rx_ptr - 2;
1203*4882a593Smuzhiyun 		}
1204*4882a593Smuzhiyun 		if (priv->rx_over) {
1205*4882a593Smuzhiyun 			/* We had an overrun */
1206*4882a593Smuzhiyun 			priv->dev->stats.rx_errors++;
1207*4882a593Smuzhiyun 			if (priv->rx_over == 2)
1208*4882a593Smuzhiyun 				priv->dev->stats.rx_length_errors++;
1209*4882a593Smuzhiyun 			else
1210*4882a593Smuzhiyun 				priv->dev->stats.rx_fifo_errors++;
1211*4882a593Smuzhiyun 			priv->rx_over = 0;
1212*4882a593Smuzhiyun 		} else if (rc & CRC_ERR) {
1213*4882a593Smuzhiyun 			/* Count invalid CRC only if packet length >= minimum */
1214*4882a593Smuzhiyun 			if (cb >= 15) {
1215*4882a593Smuzhiyun 				priv->dev->stats.rx_errors++;
1216*4882a593Smuzhiyun 				priv->dev->stats.rx_crc_errors++;
1217*4882a593Smuzhiyun 			}
1218*4882a593Smuzhiyun 		} else {
1219*4882a593Smuzhiyun 			if (cb >= 15) {
1220*4882a593Smuzhiyun 				if (priv->rx_count < NUM_RX_BUF - 1) {
1221*4882a593Smuzhiyun 					/* Put good frame in FIFO */
1222*4882a593Smuzhiyun 					priv->rx_len[priv->rx_head] = cb;
1223*4882a593Smuzhiyun 					priv->rx_head =
1224*4882a593Smuzhiyun 					    (priv->rx_head +
1225*4882a593Smuzhiyun 					     1) % NUM_RX_BUF;
1226*4882a593Smuzhiyun 					priv->rx_count++;
1227*4882a593Smuzhiyun 					schedule_work(&priv->rx_work);
1228*4882a593Smuzhiyun 				} else {
1229*4882a593Smuzhiyun 					priv->dev->stats.rx_errors++;
1230*4882a593Smuzhiyun 					priv->dev->stats.rx_over_errors++;
1231*4882a593Smuzhiyun 				}
1232*4882a593Smuzhiyun 			}
1233*4882a593Smuzhiyun 		}
1234*4882a593Smuzhiyun 		/* Get ready for new frame */
1235*4882a593Smuzhiyun 		if (priv->param.dma >= 0) {
1236*4882a593Smuzhiyun 			flags = claim_dma_lock();
1237*4882a593Smuzhiyun 			set_dma_addr(priv->param.dma,
1238*4882a593Smuzhiyun 				     (int) priv->rx_buf[priv->rx_head]);
1239*4882a593Smuzhiyun 			set_dma_count(priv->param.dma, BUF_SIZE);
1240*4882a593Smuzhiyun 			release_dma_lock(flags);
1241*4882a593Smuzhiyun 		} else {
1242*4882a593Smuzhiyun 			priv->rx_ptr = 0;
1243*4882a593Smuzhiyun 		}
1244*4882a593Smuzhiyun 	}
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 
rx_bh(struct work_struct * ugli_api)1248*4882a593Smuzhiyun static void rx_bh(struct work_struct *ugli_api)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun 	struct scc_priv *priv = container_of(ugli_api, struct scc_priv, rx_work);
1251*4882a593Smuzhiyun 	int i = priv->rx_tail;
1252*4882a593Smuzhiyun 	int cb;
1253*4882a593Smuzhiyun 	unsigned long flags;
1254*4882a593Smuzhiyun 	struct sk_buff *skb;
1255*4882a593Smuzhiyun 	unsigned char *data;
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	spin_lock_irqsave(&priv->ring_lock, flags);
1258*4882a593Smuzhiyun 	while (priv->rx_count) {
1259*4882a593Smuzhiyun 		spin_unlock_irqrestore(&priv->ring_lock, flags);
1260*4882a593Smuzhiyun 		cb = priv->rx_len[i];
1261*4882a593Smuzhiyun 		/* Allocate buffer */
1262*4882a593Smuzhiyun 		skb = dev_alloc_skb(cb + 1);
1263*4882a593Smuzhiyun 		if (skb == NULL) {
1264*4882a593Smuzhiyun 			/* Drop packet */
1265*4882a593Smuzhiyun 			priv->dev->stats.rx_dropped++;
1266*4882a593Smuzhiyun 		} else {
1267*4882a593Smuzhiyun 			/* Fill buffer */
1268*4882a593Smuzhiyun 			data = skb_put(skb, cb + 1);
1269*4882a593Smuzhiyun 			data[0] = 0;
1270*4882a593Smuzhiyun 			memcpy(&data[1], priv->rx_buf[i], cb);
1271*4882a593Smuzhiyun 			skb->protocol = ax25_type_trans(skb, priv->dev);
1272*4882a593Smuzhiyun 			netif_rx(skb);
1273*4882a593Smuzhiyun 			priv->dev->stats.rx_packets++;
1274*4882a593Smuzhiyun 			priv->dev->stats.rx_bytes += cb;
1275*4882a593Smuzhiyun 		}
1276*4882a593Smuzhiyun 		spin_lock_irqsave(&priv->ring_lock, flags);
1277*4882a593Smuzhiyun 		/* Move tail */
1278*4882a593Smuzhiyun 		priv->rx_tail = i = (i + 1) % NUM_RX_BUF;
1279*4882a593Smuzhiyun 		priv->rx_count--;
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun 	spin_unlock_irqrestore(&priv->ring_lock, flags);
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun 
1284*4882a593Smuzhiyun 
tx_isr(struct scc_priv * priv)1285*4882a593Smuzhiyun static void tx_isr(struct scc_priv *priv)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun 	int i = priv->tx_tail, p = priv->tx_ptr;
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	/* Suspend TX interrupts if we don't want to send anything.
1290*4882a593Smuzhiyun 	   See Figure 2-22. */
1291*4882a593Smuzhiyun 	if (p == priv->tx_len[i]) {
1292*4882a593Smuzhiyun 		write_scc(priv, R0, RES_Tx_P);
1293*4882a593Smuzhiyun 		return;
1294*4882a593Smuzhiyun 	}
1295*4882a593Smuzhiyun 
1296*4882a593Smuzhiyun 	/* Write characters */
1297*4882a593Smuzhiyun 	while ((read_scc(priv, R0) & Tx_BUF_EMP) && p < priv->tx_len[i]) {
1298*4882a593Smuzhiyun 		write_scc_data(priv, priv->tx_buf[i][p++], 0);
1299*4882a593Smuzhiyun 	}
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	/* Reset EOM latch of Z8530 */
1302*4882a593Smuzhiyun 	if (!priv->tx_ptr && p && priv->chip == Z8530)
1303*4882a593Smuzhiyun 		write_scc(priv, R0, RES_EOM_L);
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	priv->tx_ptr = p;
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 
es_isr(struct scc_priv * priv)1309*4882a593Smuzhiyun static void es_isr(struct scc_priv *priv)
1310*4882a593Smuzhiyun {
1311*4882a593Smuzhiyun 	int i, rr0, drr0, res;
1312*4882a593Smuzhiyun 	unsigned long flags;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	/* Read status, reset interrupt bit (open latches) */
1315*4882a593Smuzhiyun 	rr0 = read_scc(priv, R0);
1316*4882a593Smuzhiyun 	write_scc(priv, R0, RES_EXT_INT);
1317*4882a593Smuzhiyun 	drr0 = priv->rr0 ^ rr0;
1318*4882a593Smuzhiyun 	priv->rr0 = rr0;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	/* Transmit underrun (2.4.9.6). We can't check the TxEOM flag, since
1321*4882a593Smuzhiyun 	   it might have already been cleared again by AUTOEOM. */
1322*4882a593Smuzhiyun 	if (priv->state == TX_DATA) {
1323*4882a593Smuzhiyun 		/* Get remaining bytes */
1324*4882a593Smuzhiyun 		i = priv->tx_tail;
1325*4882a593Smuzhiyun 		if (priv->param.dma >= 0) {
1326*4882a593Smuzhiyun 			disable_dma(priv->param.dma);
1327*4882a593Smuzhiyun 			flags = claim_dma_lock();
1328*4882a593Smuzhiyun 			res = get_dma_residue(priv->param.dma);
1329*4882a593Smuzhiyun 			release_dma_lock(flags);
1330*4882a593Smuzhiyun 		} else {
1331*4882a593Smuzhiyun 			res = priv->tx_len[i] - priv->tx_ptr;
1332*4882a593Smuzhiyun 			priv->tx_ptr = 0;
1333*4882a593Smuzhiyun 		}
1334*4882a593Smuzhiyun 		/* Disable DREQ / TX interrupt */
1335*4882a593Smuzhiyun 		if (priv->param.dma >= 0 && priv->type == TYPE_TWIN)
1336*4882a593Smuzhiyun 			outb(0, priv->card_base + TWIN_DMA_CFG);
1337*4882a593Smuzhiyun 		else
1338*4882a593Smuzhiyun 			write_scc(priv, R1, EXT_INT_ENAB | WT_FN_RDYFN);
1339*4882a593Smuzhiyun 		if (res) {
1340*4882a593Smuzhiyun 			/* Update packet statistics */
1341*4882a593Smuzhiyun 			priv->dev->stats.tx_errors++;
1342*4882a593Smuzhiyun 			priv->dev->stats.tx_fifo_errors++;
1343*4882a593Smuzhiyun 			/* Other underrun interrupts may already be waiting */
1344*4882a593Smuzhiyun 			write_scc(priv, R0, RES_EXT_INT);
1345*4882a593Smuzhiyun 			write_scc(priv, R0, RES_EXT_INT);
1346*4882a593Smuzhiyun 		} else {
1347*4882a593Smuzhiyun 			/* Update packet statistics */
1348*4882a593Smuzhiyun 			priv->dev->stats.tx_packets++;
1349*4882a593Smuzhiyun 			priv->dev->stats.tx_bytes += priv->tx_len[i];
1350*4882a593Smuzhiyun 			/* Remove frame from FIFO */
1351*4882a593Smuzhiyun 			priv->tx_tail = (i + 1) % NUM_TX_BUF;
1352*4882a593Smuzhiyun 			priv->tx_count--;
1353*4882a593Smuzhiyun 			/* Inform upper layers */
1354*4882a593Smuzhiyun 			netif_wake_queue(priv->dev);
1355*4882a593Smuzhiyun 		}
1356*4882a593Smuzhiyun 		/* Switch state */
1357*4882a593Smuzhiyun 		write_scc(priv, R15, 0);
1358*4882a593Smuzhiyun 		if (priv->tx_count &&
1359*4882a593Smuzhiyun 		    (jiffies - priv->tx_start) < priv->param.txtimeout) {
1360*4882a593Smuzhiyun 			priv->state = TX_PAUSE;
1361*4882a593Smuzhiyun 			start_timer(priv, priv->param.txpause, 0);
1362*4882a593Smuzhiyun 		} else {
1363*4882a593Smuzhiyun 			priv->state = TX_TAIL;
1364*4882a593Smuzhiyun 			start_timer(priv, priv->param.txtail, 0);
1365*4882a593Smuzhiyun 		}
1366*4882a593Smuzhiyun 	}
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	/* DCD transition */
1369*4882a593Smuzhiyun 	if (drr0 & DCD) {
1370*4882a593Smuzhiyun 		if (rr0 & DCD) {
1371*4882a593Smuzhiyun 			switch (priv->state) {
1372*4882a593Smuzhiyun 			case IDLE:
1373*4882a593Smuzhiyun 			case WAIT:
1374*4882a593Smuzhiyun 				priv->state = DCD_ON;
1375*4882a593Smuzhiyun 				write_scc(priv, R15, 0);
1376*4882a593Smuzhiyun 				start_timer(priv, priv->param.dcdon, 0);
1377*4882a593Smuzhiyun 			}
1378*4882a593Smuzhiyun 		} else {
1379*4882a593Smuzhiyun 			switch (priv->state) {
1380*4882a593Smuzhiyun 			case RX_ON:
1381*4882a593Smuzhiyun 				rx_off(priv);
1382*4882a593Smuzhiyun 				priv->state = DCD_OFF;
1383*4882a593Smuzhiyun 				write_scc(priv, R15, 0);
1384*4882a593Smuzhiyun 				start_timer(priv, priv->param.dcdoff, 0);
1385*4882a593Smuzhiyun 			}
1386*4882a593Smuzhiyun 		}
1387*4882a593Smuzhiyun 	}
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	/* CTS transition */
1390*4882a593Smuzhiyun 	if ((drr0 & CTS) && (~rr0 & CTS) && priv->type != TYPE_TWIN)
1391*4882a593Smuzhiyun 		tm_isr(priv);
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 
tm_isr(struct scc_priv * priv)1396*4882a593Smuzhiyun static void tm_isr(struct scc_priv *priv)
1397*4882a593Smuzhiyun {
1398*4882a593Smuzhiyun 	switch (priv->state) {
1399*4882a593Smuzhiyun 	case TX_HEAD:
1400*4882a593Smuzhiyun 	case TX_PAUSE:
1401*4882a593Smuzhiyun 		tx_on(priv);
1402*4882a593Smuzhiyun 		priv->state = TX_DATA;
1403*4882a593Smuzhiyun 		break;
1404*4882a593Smuzhiyun 	case TX_TAIL:
1405*4882a593Smuzhiyun 		write_scc(priv, R5, TxCRC_ENAB | Tx8);
1406*4882a593Smuzhiyun 		priv->state = RTS_OFF;
1407*4882a593Smuzhiyun 		if (priv->type != TYPE_TWIN)
1408*4882a593Smuzhiyun 			write_scc(priv, R15, 0);
1409*4882a593Smuzhiyun 		start_timer(priv, priv->param.rtsoff, 0);
1410*4882a593Smuzhiyun 		break;
1411*4882a593Smuzhiyun 	case RTS_OFF:
1412*4882a593Smuzhiyun 		write_scc(priv, R15, DCDIE);
1413*4882a593Smuzhiyun 		priv->rr0 = read_scc(priv, R0);
1414*4882a593Smuzhiyun 		if (priv->rr0 & DCD) {
1415*4882a593Smuzhiyun 			priv->dev->stats.collisions++;
1416*4882a593Smuzhiyun 			rx_on(priv);
1417*4882a593Smuzhiyun 			priv->state = RX_ON;
1418*4882a593Smuzhiyun 		} else {
1419*4882a593Smuzhiyun 			priv->state = WAIT;
1420*4882a593Smuzhiyun 			start_timer(priv, priv->param.waittime, DCDIE);
1421*4882a593Smuzhiyun 		}
1422*4882a593Smuzhiyun 		break;
1423*4882a593Smuzhiyun 	case WAIT:
1424*4882a593Smuzhiyun 		if (priv->tx_count) {
1425*4882a593Smuzhiyun 			priv->state = TX_HEAD;
1426*4882a593Smuzhiyun 			priv->tx_start = jiffies;
1427*4882a593Smuzhiyun 			write_scc(priv, R5,
1428*4882a593Smuzhiyun 				  TxCRC_ENAB | RTS | TxENAB | Tx8);
1429*4882a593Smuzhiyun 			write_scc(priv, R15, 0);
1430*4882a593Smuzhiyun 			start_timer(priv, priv->param.txdelay, 0);
1431*4882a593Smuzhiyun 		} else {
1432*4882a593Smuzhiyun 			priv->state = IDLE;
1433*4882a593Smuzhiyun 			if (priv->type != TYPE_TWIN)
1434*4882a593Smuzhiyun 				write_scc(priv, R15, DCDIE);
1435*4882a593Smuzhiyun 		}
1436*4882a593Smuzhiyun 		break;
1437*4882a593Smuzhiyun 	case DCD_ON:
1438*4882a593Smuzhiyun 	case DCD_OFF:
1439*4882a593Smuzhiyun 		write_scc(priv, R15, DCDIE);
1440*4882a593Smuzhiyun 		priv->rr0 = read_scc(priv, R0);
1441*4882a593Smuzhiyun 		if (priv->rr0 & DCD) {
1442*4882a593Smuzhiyun 			rx_on(priv);
1443*4882a593Smuzhiyun 			priv->state = RX_ON;
1444*4882a593Smuzhiyun 		} else {
1445*4882a593Smuzhiyun 			priv->state = WAIT;
1446*4882a593Smuzhiyun 			start_timer(priv,
1447*4882a593Smuzhiyun 				    random() / priv->param.persist *
1448*4882a593Smuzhiyun 				    priv->param.slottime, DCDIE);
1449*4882a593Smuzhiyun 		}
1450*4882a593Smuzhiyun 		break;
1451*4882a593Smuzhiyun 	}
1452*4882a593Smuzhiyun }
1453