1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * (c) Copyright 1998 Alan Cox <alan@lxorguk.ukuu.org.uk>
5*4882a593Smuzhiyun * (c) Copyright 2000, 2001 Red Hat Inc
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Development of this driver was funded by Equiinet Ltd
8*4882a593Smuzhiyun * http://www.equiinet.com
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * ChangeLog:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Asynchronous mode dropped for 2.2. For 2.5 we will attempt the
13*4882a593Smuzhiyun * unification of all the Z85x30 asynchronous drivers for real.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * DMA now uses get_free_page as kmalloc buffers may span a 64K
16*4882a593Smuzhiyun * boundary.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Modified for SMP safety and SMP locking by Alan Cox
19*4882a593Smuzhiyun * <alan@lxorguk.ukuu.org.uk>
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Performance
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Z85230:
24*4882a593Smuzhiyun * Non DMA you want a 486DX50 or better to do 64Kbits. 9600 baud
25*4882a593Smuzhiyun * X.25 is not unrealistic on all machines. DMA mode can in theory
26*4882a593Smuzhiyun * handle T1/E1 quite nicely. In practice the limit seems to be about
27*4882a593Smuzhiyun * 512Kbit->1Mbit depending on motherboard.
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Z85C30:
30*4882a593Smuzhiyun * 64K will take DMA, 9600 baud X.25 should be ok.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Z8530:
33*4882a593Smuzhiyun * Synchronous mode without DMA is unlikely to pass about 2400 baud.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/module.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/mm.h>
41*4882a593Smuzhiyun #include <linux/net.h>
42*4882a593Smuzhiyun #include <linux/skbuff.h>
43*4882a593Smuzhiyun #include <linux/netdevice.h>
44*4882a593Smuzhiyun #include <linux/if_arp.h>
45*4882a593Smuzhiyun #include <linux/delay.h>
46*4882a593Smuzhiyun #include <linux/hdlc.h>
47*4882a593Smuzhiyun #include <linux/ioport.h>
48*4882a593Smuzhiyun #include <linux/init.h>
49*4882a593Smuzhiyun #include <linux/gfp.h>
50*4882a593Smuzhiyun #include <asm/dma.h>
51*4882a593Smuzhiyun #include <asm/io.h>
52*4882a593Smuzhiyun #define RT_LOCK
53*4882a593Smuzhiyun #define RT_UNLOCK
54*4882a593Smuzhiyun #include <linux/spinlock.h>
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #include "z85230.h"
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun * z8530_read_port - Architecture specific interface function
61*4882a593Smuzhiyun * @p: port to read
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * Provided port access methods. The Comtrol SV11 requires no delays
64*4882a593Smuzhiyun * between accesses and uses PC I/O. Some drivers may need a 5uS delay
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * In the longer term this should become an architecture specific
67*4882a593Smuzhiyun * section so that this can become a generic driver interface for all
68*4882a593Smuzhiyun * platforms. For now we only handle PC I/O ports with or without the
69*4882a593Smuzhiyun * dread 5uS sanity delay.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * The caller must hold sufficient locks to avoid violating the horrible
72*4882a593Smuzhiyun * 5uS delay rule.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun
z8530_read_port(unsigned long p)75*4882a593Smuzhiyun static inline int z8530_read_port(unsigned long p)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun u8 r=inb(Z8530_PORT_OF(p));
78*4882a593Smuzhiyun if(p&Z8530_PORT_SLEEP) /* gcc should figure this out efficiently ! */
79*4882a593Smuzhiyun udelay(5);
80*4882a593Smuzhiyun return r;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun * z8530_write_port - Architecture specific interface function
85*4882a593Smuzhiyun * @p: port to write
86*4882a593Smuzhiyun * @d: value to write
87*4882a593Smuzhiyun *
88*4882a593Smuzhiyun * Write a value to a port with delays if need be. Note that the
89*4882a593Smuzhiyun * caller must hold locks to avoid read/writes from other contexts
90*4882a593Smuzhiyun * violating the 5uS rule
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * In the longer term this should become an architecture specific
93*4882a593Smuzhiyun * section so that this can become a generic driver interface for all
94*4882a593Smuzhiyun * platforms. For now we only handle PC I/O ports with or without the
95*4882a593Smuzhiyun * dread 5uS sanity delay.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun
z8530_write_port(unsigned long p,u8 d)99*4882a593Smuzhiyun static inline void z8530_write_port(unsigned long p, u8 d)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun outb(d,Z8530_PORT_OF(p));
102*4882a593Smuzhiyun if(p&Z8530_PORT_SLEEP)
103*4882a593Smuzhiyun udelay(5);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun static void z8530_rx_done(struct z8530_channel *c);
109*4882a593Smuzhiyun static void z8530_tx_done(struct z8530_channel *c);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun * read_zsreg - Read a register from a Z85230
114*4882a593Smuzhiyun * @c: Z8530 channel to read from (2 per chip)
115*4882a593Smuzhiyun * @reg: Register to read
116*4882a593Smuzhiyun * FIXME: Use a spinlock.
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun * Most of the Z8530 registers are indexed off the control registers.
119*4882a593Smuzhiyun * A read is done by writing to the control register and reading the
120*4882a593Smuzhiyun * register back. The caller must hold the lock
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun
read_zsreg(struct z8530_channel * c,u8 reg)123*4882a593Smuzhiyun static inline u8 read_zsreg(struct z8530_channel *c, u8 reg)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun if(reg)
126*4882a593Smuzhiyun z8530_write_port(c->ctrlio, reg);
127*4882a593Smuzhiyun return z8530_read_port(c->ctrlio);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * read_zsdata - Read the data port of a Z8530 channel
132*4882a593Smuzhiyun * @c: The Z8530 channel to read the data port from
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * The data port provides fast access to some things. We still
135*4882a593Smuzhiyun * have all the 5uS delays to worry about.
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun
read_zsdata(struct z8530_channel * c)138*4882a593Smuzhiyun static inline u8 read_zsdata(struct z8530_channel *c)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun u8 r;
141*4882a593Smuzhiyun r=z8530_read_port(c->dataio);
142*4882a593Smuzhiyun return r;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun * write_zsreg - Write to a Z8530 channel register
147*4882a593Smuzhiyun * @c: The Z8530 channel
148*4882a593Smuzhiyun * @reg: Register number
149*4882a593Smuzhiyun * @val: Value to write
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Write a value to an indexed register. The caller must hold the lock
152*4882a593Smuzhiyun * to honour the irritating delay rules. We know about register 0
153*4882a593Smuzhiyun * being fast to access.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * Assumes c->lock is held.
156*4882a593Smuzhiyun */
write_zsreg(struct z8530_channel * c,u8 reg,u8 val)157*4882a593Smuzhiyun static inline void write_zsreg(struct z8530_channel *c, u8 reg, u8 val)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun if(reg)
160*4882a593Smuzhiyun z8530_write_port(c->ctrlio, reg);
161*4882a593Smuzhiyun z8530_write_port(c->ctrlio, val);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun * write_zsctrl - Write to a Z8530 control register
167*4882a593Smuzhiyun * @c: The Z8530 channel
168*4882a593Smuzhiyun * @val: Value to write
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * Write directly to the control register on the Z8530
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun
write_zsctrl(struct z8530_channel * c,u8 val)173*4882a593Smuzhiyun static inline void write_zsctrl(struct z8530_channel *c, u8 val)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun z8530_write_port(c->ctrlio, val);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun * write_zsdata - Write to a Z8530 control register
180*4882a593Smuzhiyun * @c: The Z8530 channel
181*4882a593Smuzhiyun * @val: Value to write
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * Write directly to the data register on the Z8530
184*4882a593Smuzhiyun */
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun
write_zsdata(struct z8530_channel * c,u8 val)187*4882a593Smuzhiyun static inline void write_zsdata(struct z8530_channel *c, u8 val)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun z8530_write_port(c->dataio, val);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * Register loading parameters for a dead port
194*4882a593Smuzhiyun */
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun u8 z8530_dead_port[]=
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 255
199*4882a593Smuzhiyun };
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_dead_port);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Register loading parameters for currently supported circuit types
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * Data clocked by telco end. This is the correct data for the UK
210*4882a593Smuzhiyun * "kilostream" service, and most other similar services.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun u8 z8530_hdlc_kilostream[]=
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 4, SYNC_ENAB|SDLC|X1CLK,
216*4882a593Smuzhiyun 2, 0, /* No vector */
217*4882a593Smuzhiyun 1, 0,
218*4882a593Smuzhiyun 3, ENT_HM|RxCRC_ENAB|Rx8,
219*4882a593Smuzhiyun 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
220*4882a593Smuzhiyun 9, 0, /* Disable interrupts */
221*4882a593Smuzhiyun 6, 0xFF,
222*4882a593Smuzhiyun 7, FLAG,
223*4882a593Smuzhiyun 10, ABUNDER|NRZ|CRCPS,/*MARKIDLE ??*/
224*4882a593Smuzhiyun 11, TCTRxCP,
225*4882a593Smuzhiyun 14, DISDPLL,
226*4882a593Smuzhiyun 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
227*4882a593Smuzhiyun 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
228*4882a593Smuzhiyun 9, NV|MIE|NORESET,
229*4882a593Smuzhiyun 255
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_hdlc_kilostream);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * As above but for enhanced chips.
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun u8 z8530_hdlc_kilostream_85230[]=
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 4, SYNC_ENAB|SDLC|X1CLK,
241*4882a593Smuzhiyun 2, 0, /* No vector */
242*4882a593Smuzhiyun 1, 0,
243*4882a593Smuzhiyun 3, ENT_HM|RxCRC_ENAB|Rx8,
244*4882a593Smuzhiyun 5, TxCRC_ENAB|RTS|TxENAB|Tx8|DTR,
245*4882a593Smuzhiyun 9, 0, /* Disable interrupts */
246*4882a593Smuzhiyun 6, 0xFF,
247*4882a593Smuzhiyun 7, FLAG,
248*4882a593Smuzhiyun 10, ABUNDER|NRZ|CRCPS, /* MARKIDLE?? */
249*4882a593Smuzhiyun 11, TCTRxCP,
250*4882a593Smuzhiyun 14, DISDPLL,
251*4882a593Smuzhiyun 15, DCDIE|SYNCIE|CTSIE|TxUIE|BRKIE,
252*4882a593Smuzhiyun 1, EXT_INT_ENAB|TxINT_ENAB|INT_ALL_Rx,
253*4882a593Smuzhiyun 9, NV|MIE|NORESET,
254*4882a593Smuzhiyun 23, 3, /* Extended mode AUTO TX and EOM*/
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun 255
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_hdlc_kilostream_85230);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun * z8530_flush_fifo - Flush on chip RX FIFO
263*4882a593Smuzhiyun * @c: Channel to flush
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * Flush the receive FIFO. There is no specific option for this, we
266*4882a593Smuzhiyun * blindly read bytes and discard them. Reading when there is no data
267*4882a593Smuzhiyun * is harmless. The 8530 has a 4 byte FIFO, the 85230 has 8 bytes.
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * All locking is handled for the caller. On return data may still be
270*4882a593Smuzhiyun * present if it arrived during the flush.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun
z8530_flush_fifo(struct z8530_channel * c)273*4882a593Smuzhiyun static void z8530_flush_fifo(struct z8530_channel *c)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun read_zsreg(c, R1);
276*4882a593Smuzhiyun read_zsreg(c, R1);
277*4882a593Smuzhiyun read_zsreg(c, R1);
278*4882a593Smuzhiyun read_zsreg(c, R1);
279*4882a593Smuzhiyun if(c->dev->type==Z85230)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun read_zsreg(c, R1);
282*4882a593Smuzhiyun read_zsreg(c, R1);
283*4882a593Smuzhiyun read_zsreg(c, R1);
284*4882a593Smuzhiyun read_zsreg(c, R1);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /**
289*4882a593Smuzhiyun * z8530_rtsdtr - Control the outgoing DTS/RTS line
290*4882a593Smuzhiyun * @c: The Z8530 channel to control;
291*4882a593Smuzhiyun * @set: 1 to set, 0 to clear
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * Sets or clears DTR/RTS on the requested line. All locking is handled
294*4882a593Smuzhiyun * by the caller. For now we assume all boards use the actual RTS/DTR
295*4882a593Smuzhiyun * on the chip. Apparently one or two don't. We'll scream about them
296*4882a593Smuzhiyun * later.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun
z8530_rtsdtr(struct z8530_channel * c,int set)299*4882a593Smuzhiyun static void z8530_rtsdtr(struct z8530_channel *c, int set)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun if (set)
302*4882a593Smuzhiyun c->regs[5] |= (RTS | DTR);
303*4882a593Smuzhiyun else
304*4882a593Smuzhiyun c->regs[5] &= ~(RTS | DTR);
305*4882a593Smuzhiyun write_zsreg(c, R5, c->regs[5]);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /**
309*4882a593Smuzhiyun * z8530_rx - Handle a PIO receive event
310*4882a593Smuzhiyun * @c: Z8530 channel to process
311*4882a593Smuzhiyun *
312*4882a593Smuzhiyun * Receive handler for receiving in PIO mode. This is much like the
313*4882a593Smuzhiyun * async one but not quite the same or as complex
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * Note: Its intended that this handler can easily be separated from
316*4882a593Smuzhiyun * the main code to run realtime. That'll be needed for some machines
317*4882a593Smuzhiyun * (eg to ever clock 64kbits on a sparc ;)).
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * The RT_LOCK macros don't do anything now. Keep the code covered
320*4882a593Smuzhiyun * by them as short as possible in all circumstances - clocks cost
321*4882a593Smuzhiyun * baud. The interrupt handler is assumed to be atomic w.r.t. to
322*4882a593Smuzhiyun * other code - this is true in the RT case too.
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun * We only cover the sync cases for this. If you want 2Mbit async
325*4882a593Smuzhiyun * do it yourself but consider medical assistance first. This non DMA
326*4882a593Smuzhiyun * synchronous mode is portable code. The DMA mode assumes PCI like
327*4882a593Smuzhiyun * ISA DMA
328*4882a593Smuzhiyun *
329*4882a593Smuzhiyun * Called with the device lock held
330*4882a593Smuzhiyun */
331*4882a593Smuzhiyun
z8530_rx(struct z8530_channel * c)332*4882a593Smuzhiyun static void z8530_rx(struct z8530_channel *c)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun u8 ch,stat;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun while(1)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun /* FIFO empty ? */
339*4882a593Smuzhiyun if(!(read_zsreg(c, R0)&1))
340*4882a593Smuzhiyun break;
341*4882a593Smuzhiyun ch=read_zsdata(c);
342*4882a593Smuzhiyun stat=read_zsreg(c, R1);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * Overrun ?
346*4882a593Smuzhiyun */
347*4882a593Smuzhiyun if(c->count < c->max)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun *c->dptr++=ch;
350*4882a593Smuzhiyun c->count++;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if(stat&END_FR)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * Error ?
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun if(stat&(Rx_OVR|CRC_ERR))
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun /* Rewind the buffer and return */
362*4882a593Smuzhiyun if(c->skb)
363*4882a593Smuzhiyun c->dptr=c->skb->data;
364*4882a593Smuzhiyun c->count=0;
365*4882a593Smuzhiyun if(stat&Rx_OVR)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun pr_warn("%s: overrun\n", c->dev->name);
368*4882a593Smuzhiyun c->rx_overrun++;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun if(stat&CRC_ERR)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun c->rx_crc_err++;
373*4882a593Smuzhiyun /* printk("crc error\n"); */
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun /* Shove the frame upstream */
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun else
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun /*
380*4882a593Smuzhiyun * Drop the lock for RX processing, or
381*4882a593Smuzhiyun * there are deadlocks
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun z8530_rx_done(c);
384*4882a593Smuzhiyun write_zsctrl(c, RES_Rx_CRC);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun /*
389*4882a593Smuzhiyun * Clear irq
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyun write_zsctrl(c, ERR_RES);
392*4882a593Smuzhiyun write_zsctrl(c, RES_H_IUS);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /**
397*4882a593Smuzhiyun * z8530_tx - Handle a PIO transmit event
398*4882a593Smuzhiyun * @c: Z8530 channel to process
399*4882a593Smuzhiyun *
400*4882a593Smuzhiyun * Z8530 transmit interrupt handler for the PIO mode. The basic
401*4882a593Smuzhiyun * idea is to attempt to keep the FIFO fed. We fill as many bytes
402*4882a593Smuzhiyun * in as possible, its quite possible that we won't keep up with the
403*4882a593Smuzhiyun * data rate otherwise.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun
z8530_tx(struct z8530_channel * c)406*4882a593Smuzhiyun static void z8530_tx(struct z8530_channel *c)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun while(c->txcount) {
409*4882a593Smuzhiyun /* FIFO full ? */
410*4882a593Smuzhiyun if(!(read_zsreg(c, R0)&4))
411*4882a593Smuzhiyun return;
412*4882a593Smuzhiyun c->txcount--;
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun * Shovel out the byte
415*4882a593Smuzhiyun */
416*4882a593Smuzhiyun write_zsreg(c, R8, *c->tx_ptr++);
417*4882a593Smuzhiyun write_zsctrl(c, RES_H_IUS);
418*4882a593Smuzhiyun /* We are about to underflow */
419*4882a593Smuzhiyun if(c->txcount==0)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun write_zsctrl(c, RES_EOM_L);
422*4882a593Smuzhiyun write_zsreg(c, R10, c->regs[10]&~ABUNDER);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun * End of frame TX - fire another one
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun write_zsctrl(c, RES_Tx_P);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun z8530_tx_done(c);
434*4882a593Smuzhiyun write_zsctrl(c, RES_H_IUS);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /**
438*4882a593Smuzhiyun * z8530_status - Handle a PIO status exception
439*4882a593Smuzhiyun * @chan: Z8530 channel to process
440*4882a593Smuzhiyun *
441*4882a593Smuzhiyun * A status event occurred in PIO synchronous mode. There are several
442*4882a593Smuzhiyun * reasons the chip will bother us here. A transmit underrun means we
443*4882a593Smuzhiyun * failed to feed the chip fast enough and just broke a packet. A DCD
444*4882a593Smuzhiyun * change is a line up or down.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun
z8530_status(struct z8530_channel * chan)447*4882a593Smuzhiyun static void z8530_status(struct z8530_channel *chan)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun u8 status, altered;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun status = read_zsreg(chan, R0);
452*4882a593Smuzhiyun altered = chan->status ^ status;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun chan->status = status;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (status & TxEOM) {
457*4882a593Smuzhiyun /* printk("%s: Tx underrun.\n", chan->dev->name); */
458*4882a593Smuzhiyun chan->netdevice->stats.tx_fifo_errors++;
459*4882a593Smuzhiyun write_zsctrl(chan, ERR_RES);
460*4882a593Smuzhiyun z8530_tx_done(chan);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (altered & chan->dcdcheck)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun if (status & chan->dcdcheck) {
466*4882a593Smuzhiyun pr_info("%s: DCD raised\n", chan->dev->name);
467*4882a593Smuzhiyun write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
468*4882a593Smuzhiyun if (chan->netdevice)
469*4882a593Smuzhiyun netif_carrier_on(chan->netdevice);
470*4882a593Smuzhiyun } else {
471*4882a593Smuzhiyun pr_info("%s: DCD lost\n", chan->dev->name);
472*4882a593Smuzhiyun write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
473*4882a593Smuzhiyun z8530_flush_fifo(chan);
474*4882a593Smuzhiyun if (chan->netdevice)
475*4882a593Smuzhiyun netif_carrier_off(chan->netdevice);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun write_zsctrl(chan, RES_EXT_INT);
480*4882a593Smuzhiyun write_zsctrl(chan, RES_H_IUS);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun struct z8530_irqhandler z8530_sync = {
484*4882a593Smuzhiyun .rx = z8530_rx,
485*4882a593Smuzhiyun .tx = z8530_tx,
486*4882a593Smuzhiyun .status = z8530_status,
487*4882a593Smuzhiyun };
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /**
492*4882a593Smuzhiyun * z8530_dma_rx - Handle a DMA RX event
493*4882a593Smuzhiyun * @chan: Channel to handle
494*4882a593Smuzhiyun *
495*4882a593Smuzhiyun * Non bus mastering DMA interfaces for the Z8x30 devices. This
496*4882a593Smuzhiyun * is really pretty PC specific. The DMA mode means that most receive
497*4882a593Smuzhiyun * events are handled by the DMA hardware. We get a kick here only if
498*4882a593Smuzhiyun * a frame ended.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun
z8530_dma_rx(struct z8530_channel * chan)501*4882a593Smuzhiyun static void z8530_dma_rx(struct z8530_channel *chan)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun if(chan->rxdma_on)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun /* Special condition check only */
506*4882a593Smuzhiyun u8 status;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun read_zsreg(chan, R7);
509*4882a593Smuzhiyun read_zsreg(chan, R6);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun status=read_zsreg(chan, R1);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun if(status&END_FR)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun z8530_rx_done(chan); /* Fire up the next one */
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun write_zsctrl(chan, ERR_RES);
518*4882a593Smuzhiyun write_zsctrl(chan, RES_H_IUS);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun else
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun /* DMA is off right now, drain the slow way */
523*4882a593Smuzhiyun z8530_rx(chan);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /**
528*4882a593Smuzhiyun * z8530_dma_tx - Handle a DMA TX event
529*4882a593Smuzhiyun * @chan: The Z8530 channel to handle
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * We have received an interrupt while doing DMA transmissions. It
532*4882a593Smuzhiyun * shouldn't happen. Scream loudly if it does.
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun
z8530_dma_tx(struct z8530_channel * chan)535*4882a593Smuzhiyun static void z8530_dma_tx(struct z8530_channel *chan)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun if(!chan->dma_tx)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun pr_warn("Hey who turned the DMA off?\n");
540*4882a593Smuzhiyun z8530_tx(chan);
541*4882a593Smuzhiyun return;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun /* This shouldn't occur in DMA mode */
544*4882a593Smuzhiyun pr_err("DMA tx - bogus event!\n");
545*4882a593Smuzhiyun z8530_tx(chan);
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /**
549*4882a593Smuzhiyun * z8530_dma_status - Handle a DMA status exception
550*4882a593Smuzhiyun * @chan: Z8530 channel to process
551*4882a593Smuzhiyun *
552*4882a593Smuzhiyun * A status event occurred on the Z8530. We receive these for two reasons
553*4882a593Smuzhiyun * when in DMA mode. Firstly if we finished a packet transfer we get one
554*4882a593Smuzhiyun * and kick the next packet out. Secondly we may see a DCD change.
555*4882a593Smuzhiyun *
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun
z8530_dma_status(struct z8530_channel * chan)558*4882a593Smuzhiyun static void z8530_dma_status(struct z8530_channel *chan)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun u8 status, altered;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun status=read_zsreg(chan, R0);
563*4882a593Smuzhiyun altered=chan->status^status;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun chan->status=status;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if(chan->dma_tx)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun if(status&TxEOM)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun unsigned long flags;
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun flags=claim_dma_lock();
575*4882a593Smuzhiyun disable_dma(chan->txdma);
576*4882a593Smuzhiyun clear_dma_ff(chan->txdma);
577*4882a593Smuzhiyun chan->txdma_on=0;
578*4882a593Smuzhiyun release_dma_lock(flags);
579*4882a593Smuzhiyun z8530_tx_done(chan);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (altered & chan->dcdcheck)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun if (status & chan->dcdcheck) {
586*4882a593Smuzhiyun pr_info("%s: DCD raised\n", chan->dev->name);
587*4882a593Smuzhiyun write_zsreg(chan, R3, chan->regs[3] | RxENABLE);
588*4882a593Smuzhiyun if (chan->netdevice)
589*4882a593Smuzhiyun netif_carrier_on(chan->netdevice);
590*4882a593Smuzhiyun } else {
591*4882a593Smuzhiyun pr_info("%s: DCD lost\n", chan->dev->name);
592*4882a593Smuzhiyun write_zsreg(chan, R3, chan->regs[3] & ~RxENABLE);
593*4882a593Smuzhiyun z8530_flush_fifo(chan);
594*4882a593Smuzhiyun if (chan->netdevice)
595*4882a593Smuzhiyun netif_carrier_off(chan->netdevice);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun write_zsctrl(chan, RES_EXT_INT);
600*4882a593Smuzhiyun write_zsctrl(chan, RES_H_IUS);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun static struct z8530_irqhandler z8530_dma_sync = {
604*4882a593Smuzhiyun .rx = z8530_dma_rx,
605*4882a593Smuzhiyun .tx = z8530_dma_tx,
606*4882a593Smuzhiyun .status = z8530_dma_status,
607*4882a593Smuzhiyun };
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun static struct z8530_irqhandler z8530_txdma_sync = {
610*4882a593Smuzhiyun .rx = z8530_rx,
611*4882a593Smuzhiyun .tx = z8530_dma_tx,
612*4882a593Smuzhiyun .status = z8530_dma_status,
613*4882a593Smuzhiyun };
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /**
616*4882a593Smuzhiyun * z8530_rx_clear - Handle RX events from a stopped chip
617*4882a593Smuzhiyun * @c: Z8530 channel to shut up
618*4882a593Smuzhiyun *
619*4882a593Smuzhiyun * Receive interrupt vectors for a Z8530 that is in 'parked' mode.
620*4882a593Smuzhiyun * For machines with PCI Z85x30 cards, or level triggered interrupts
621*4882a593Smuzhiyun * (eg the MacII) we must clear the interrupt cause or die.
622*4882a593Smuzhiyun */
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun
z8530_rx_clear(struct z8530_channel * c)625*4882a593Smuzhiyun static void z8530_rx_clear(struct z8530_channel *c)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun /*
628*4882a593Smuzhiyun * Data and status bytes
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun u8 stat;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun read_zsdata(c);
633*4882a593Smuzhiyun stat=read_zsreg(c, R1);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if(stat&END_FR)
636*4882a593Smuzhiyun write_zsctrl(c, RES_Rx_CRC);
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * Clear irq
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun write_zsctrl(c, ERR_RES);
641*4882a593Smuzhiyun write_zsctrl(c, RES_H_IUS);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /**
645*4882a593Smuzhiyun * z8530_tx_clear - Handle TX events from a stopped chip
646*4882a593Smuzhiyun * @c: Z8530 channel to shut up
647*4882a593Smuzhiyun *
648*4882a593Smuzhiyun * Transmit interrupt vectors for a Z8530 that is in 'parked' mode.
649*4882a593Smuzhiyun * For machines with PCI Z85x30 cards, or level triggered interrupts
650*4882a593Smuzhiyun * (eg the MacII) we must clear the interrupt cause or die.
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun
z8530_tx_clear(struct z8530_channel * c)653*4882a593Smuzhiyun static void z8530_tx_clear(struct z8530_channel *c)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun write_zsctrl(c, RES_Tx_P);
656*4882a593Smuzhiyun write_zsctrl(c, RES_H_IUS);
657*4882a593Smuzhiyun }
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun /**
660*4882a593Smuzhiyun * z8530_status_clear - Handle status events from a stopped chip
661*4882a593Smuzhiyun * @chan: Z8530 channel to shut up
662*4882a593Smuzhiyun *
663*4882a593Smuzhiyun * Status interrupt vectors for a Z8530 that is in 'parked' mode.
664*4882a593Smuzhiyun * For machines with PCI Z85x30 cards, or level triggered interrupts
665*4882a593Smuzhiyun * (eg the MacII) we must clear the interrupt cause or die.
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun
z8530_status_clear(struct z8530_channel * chan)668*4882a593Smuzhiyun static void z8530_status_clear(struct z8530_channel *chan)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun u8 status=read_zsreg(chan, R0);
671*4882a593Smuzhiyun if(status&TxEOM)
672*4882a593Smuzhiyun write_zsctrl(chan, ERR_RES);
673*4882a593Smuzhiyun write_zsctrl(chan, RES_EXT_INT);
674*4882a593Smuzhiyun write_zsctrl(chan, RES_H_IUS);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun struct z8530_irqhandler z8530_nop = {
678*4882a593Smuzhiyun .rx = z8530_rx_clear,
679*4882a593Smuzhiyun .tx = z8530_tx_clear,
680*4882a593Smuzhiyun .status = z8530_status_clear,
681*4882a593Smuzhiyun };
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_nop);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /**
687*4882a593Smuzhiyun * z8530_interrupt - Handle an interrupt from a Z8530
688*4882a593Smuzhiyun * @irq: Interrupt number
689*4882a593Smuzhiyun * @dev_id: The Z8530 device that is interrupting.
690*4882a593Smuzhiyun *
691*4882a593Smuzhiyun * A Z85[2]30 device has stuck its hand in the air for attention.
692*4882a593Smuzhiyun * We scan both the channels on the chip for events and then call
693*4882a593Smuzhiyun * the channel specific call backs for each channel that has events.
694*4882a593Smuzhiyun * We have to use callback functions because the two channels can be
695*4882a593Smuzhiyun * in different modes.
696*4882a593Smuzhiyun *
697*4882a593Smuzhiyun * Locking is done for the handlers. Note that locking is done
698*4882a593Smuzhiyun * at the chip level (the 5uS delay issue is per chip not per
699*4882a593Smuzhiyun * channel). c->lock for both channels points to dev->lock
700*4882a593Smuzhiyun */
701*4882a593Smuzhiyun
z8530_interrupt(int irq,void * dev_id)702*4882a593Smuzhiyun irqreturn_t z8530_interrupt(int irq, void *dev_id)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun struct z8530_dev *dev=dev_id;
705*4882a593Smuzhiyun u8 intr;
706*4882a593Smuzhiyun static volatile int locker=0;
707*4882a593Smuzhiyun int work=0;
708*4882a593Smuzhiyun struct z8530_irqhandler *irqs;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if(locker)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun pr_err("IRQ re-enter\n");
713*4882a593Smuzhiyun return IRQ_NONE;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun locker=1;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun spin_lock(&dev->lock);
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun while(++work<5000)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun intr = read_zsreg(&dev->chanA, R3);
723*4882a593Smuzhiyun if(!(intr & (CHARxIP|CHATxIP|CHAEXT|CHBRxIP|CHBTxIP|CHBEXT)))
724*4882a593Smuzhiyun break;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* This holds the IRQ status. On the 8530 you must read it from chan
727*4882a593Smuzhiyun A even though it applies to the whole chip */
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* Now walk the chip and see what it is wanting - it may be
730*4882a593Smuzhiyun an IRQ for someone else remember */
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun irqs=dev->chanA.irqs;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if(intr & (CHARxIP|CHATxIP|CHAEXT))
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun if(intr&CHARxIP)
737*4882a593Smuzhiyun irqs->rx(&dev->chanA);
738*4882a593Smuzhiyun if(intr&CHATxIP)
739*4882a593Smuzhiyun irqs->tx(&dev->chanA);
740*4882a593Smuzhiyun if(intr&CHAEXT)
741*4882a593Smuzhiyun irqs->status(&dev->chanA);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun irqs=dev->chanB.irqs;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if(intr & (CHBRxIP|CHBTxIP|CHBEXT))
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun if(intr&CHBRxIP)
749*4882a593Smuzhiyun irqs->rx(&dev->chanB);
750*4882a593Smuzhiyun if(intr&CHBTxIP)
751*4882a593Smuzhiyun irqs->tx(&dev->chanB);
752*4882a593Smuzhiyun if(intr&CHBEXT)
753*4882a593Smuzhiyun irqs->status(&dev->chanB);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun spin_unlock(&dev->lock);
757*4882a593Smuzhiyun if(work==5000)
758*4882a593Smuzhiyun pr_err("%s: interrupt jammed - abort(0x%X)!\n",
759*4882a593Smuzhiyun dev->name, intr);
760*4882a593Smuzhiyun /* Ok all done */
761*4882a593Smuzhiyun locker=0;
762*4882a593Smuzhiyun return IRQ_HANDLED;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_interrupt);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun static const u8 reg_init[16]=
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun 0,0,0,0,
770*4882a593Smuzhiyun 0,0,0,0,
771*4882a593Smuzhiyun 0,0,0,0,
772*4882a593Smuzhiyun 0x55,0,0,0
773*4882a593Smuzhiyun };
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun /**
777*4882a593Smuzhiyun * z8530_sync_open - Open a Z8530 channel for PIO
778*4882a593Smuzhiyun * @dev: The network interface we are using
779*4882a593Smuzhiyun * @c: The Z8530 channel to open in synchronous PIO mode
780*4882a593Smuzhiyun *
781*4882a593Smuzhiyun * Switch a Z8530 into synchronous mode without DMA assist. We
782*4882a593Smuzhiyun * raise the RTS/DTR and commence network operation.
783*4882a593Smuzhiyun */
784*4882a593Smuzhiyun
z8530_sync_open(struct net_device * dev,struct z8530_channel * c)785*4882a593Smuzhiyun int z8530_sync_open(struct net_device *dev, struct z8530_channel *c)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun unsigned long flags;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun spin_lock_irqsave(c->lock, flags);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun c->sync = 1;
792*4882a593Smuzhiyun c->mtu = dev->mtu+64;
793*4882a593Smuzhiyun c->count = 0;
794*4882a593Smuzhiyun c->skb = NULL;
795*4882a593Smuzhiyun c->skb2 = NULL;
796*4882a593Smuzhiyun c->irqs = &z8530_sync;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /* This loads the double buffer up */
799*4882a593Smuzhiyun z8530_rx_done(c); /* Load the frame ring */
800*4882a593Smuzhiyun z8530_rx_done(c); /* Load the backup frame */
801*4882a593Smuzhiyun z8530_rtsdtr(c,1);
802*4882a593Smuzhiyun c->dma_tx = 0;
803*4882a593Smuzhiyun c->regs[R1]|=TxINT_ENAB;
804*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
805*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]|RxENABLE);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, flags);
808*4882a593Smuzhiyun return 0;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync_open);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /**
815*4882a593Smuzhiyun * z8530_sync_close - Close a PIO Z8530 channel
816*4882a593Smuzhiyun * @dev: Network device to close
817*4882a593Smuzhiyun * @c: Z8530 channel to disassociate and move to idle
818*4882a593Smuzhiyun *
819*4882a593Smuzhiyun * Close down a Z8530 interface and switch its interrupt handlers
820*4882a593Smuzhiyun * to discard future events.
821*4882a593Smuzhiyun */
822*4882a593Smuzhiyun
z8530_sync_close(struct net_device * dev,struct z8530_channel * c)823*4882a593Smuzhiyun int z8530_sync_close(struct net_device *dev, struct z8530_channel *c)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun u8 chk;
826*4882a593Smuzhiyun unsigned long flags;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun spin_lock_irqsave(c->lock, flags);
829*4882a593Smuzhiyun c->irqs = &z8530_nop;
830*4882a593Smuzhiyun c->max = 0;
831*4882a593Smuzhiyun c->sync = 0;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun chk=read_zsreg(c,R0);
834*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]);
835*4882a593Smuzhiyun z8530_rtsdtr(c,0);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, flags);
838*4882a593Smuzhiyun return 0;
839*4882a593Smuzhiyun }
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync_close);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /**
844*4882a593Smuzhiyun * z8530_sync_dma_open - Open a Z8530 for DMA I/O
845*4882a593Smuzhiyun * @dev: The network device to attach
846*4882a593Smuzhiyun * @c: The Z8530 channel to configure in sync DMA mode.
847*4882a593Smuzhiyun *
848*4882a593Smuzhiyun * Set up a Z85x30 device for synchronous DMA in both directions. Two
849*4882a593Smuzhiyun * ISA DMA channels must be available for this to work. We assume ISA
850*4882a593Smuzhiyun * DMA driven I/O and PC limits on access.
851*4882a593Smuzhiyun */
852*4882a593Smuzhiyun
z8530_sync_dma_open(struct net_device * dev,struct z8530_channel * c)853*4882a593Smuzhiyun int z8530_sync_dma_open(struct net_device *dev, struct z8530_channel *c)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun unsigned long cflags, dflags;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun c->sync = 1;
858*4882a593Smuzhiyun c->mtu = dev->mtu+64;
859*4882a593Smuzhiyun c->count = 0;
860*4882a593Smuzhiyun c->skb = NULL;
861*4882a593Smuzhiyun c->skb2 = NULL;
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * Load the DMA interfaces up
864*4882a593Smuzhiyun */
865*4882a593Smuzhiyun c->rxdma_on = 0;
866*4882a593Smuzhiyun c->txdma_on = 0;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * Allocate the DMA flip buffers. Limit by page size.
870*4882a593Smuzhiyun * Everyone runs 1500 mtu or less on wan links so this
871*4882a593Smuzhiyun * should be fine.
872*4882a593Smuzhiyun */
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if(c->mtu > PAGE_SIZE/2)
875*4882a593Smuzhiyun return -EMSGSIZE;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun c->rx_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
878*4882a593Smuzhiyun if(c->rx_buf[0]==NULL)
879*4882a593Smuzhiyun return -ENOBUFS;
880*4882a593Smuzhiyun c->rx_buf[1]=c->rx_buf[0]+PAGE_SIZE/2;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
883*4882a593Smuzhiyun if(c->tx_dma_buf[0]==NULL)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun free_page((unsigned long)c->rx_buf[0]);
886*4882a593Smuzhiyun c->rx_buf[0]=NULL;
887*4882a593Smuzhiyun return -ENOBUFS;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun c->tx_dma_buf[1]=c->tx_dma_buf[0]+PAGE_SIZE/2;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun c->tx_dma_used=0;
892*4882a593Smuzhiyun c->dma_tx = 1;
893*4882a593Smuzhiyun c->dma_num=0;
894*4882a593Smuzhiyun c->dma_ready=1;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /*
897*4882a593Smuzhiyun * Enable DMA control mode
898*4882a593Smuzhiyun */
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun spin_lock_irqsave(c->lock, cflags);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun /*
903*4882a593Smuzhiyun * TX DMA via DIR/REQ
904*4882a593Smuzhiyun */
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun c->regs[R14]|= DTRREQ;
907*4882a593Smuzhiyun write_zsreg(c, R14, c->regs[R14]);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun c->regs[R1]&= ~TxINT_ENAB;
910*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /*
913*4882a593Smuzhiyun * RX DMA via W/Req
914*4882a593Smuzhiyun */
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun c->regs[R1]|= WT_FN_RDYFN;
917*4882a593Smuzhiyun c->regs[R1]|= WT_RDY_RT;
918*4882a593Smuzhiyun c->regs[R1]|= INT_ERR_Rx;
919*4882a593Smuzhiyun c->regs[R1]&= ~TxINT_ENAB;
920*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
921*4882a593Smuzhiyun c->regs[R1]|= WT_RDY_ENAB;
922*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun /*
925*4882a593Smuzhiyun * DMA interrupts
926*4882a593Smuzhiyun */
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /*
929*4882a593Smuzhiyun * Set up the DMA configuration
930*4882a593Smuzhiyun */
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun dflags=claim_dma_lock();
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun disable_dma(c->rxdma);
935*4882a593Smuzhiyun clear_dma_ff(c->rxdma);
936*4882a593Smuzhiyun set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
937*4882a593Smuzhiyun set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[0]));
938*4882a593Smuzhiyun set_dma_count(c->rxdma, c->mtu);
939*4882a593Smuzhiyun enable_dma(c->rxdma);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun disable_dma(c->txdma);
942*4882a593Smuzhiyun clear_dma_ff(c->txdma);
943*4882a593Smuzhiyun set_dma_mode(c->txdma, DMA_MODE_WRITE);
944*4882a593Smuzhiyun disable_dma(c->txdma);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun release_dma_lock(dflags);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /*
949*4882a593Smuzhiyun * Select the DMA interrupt handlers
950*4882a593Smuzhiyun */
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun c->rxdma_on = 1;
953*4882a593Smuzhiyun c->txdma_on = 1;
954*4882a593Smuzhiyun c->tx_dma_used = 1;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun c->irqs = &z8530_dma_sync;
957*4882a593Smuzhiyun z8530_rtsdtr(c,1);
958*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]|RxENABLE);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, cflags);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun return 0;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync_dma_open);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /**
968*4882a593Smuzhiyun * z8530_sync_dma_close - Close down DMA I/O
969*4882a593Smuzhiyun * @dev: Network device to detach
970*4882a593Smuzhiyun * @c: Z8530 channel to move into discard mode
971*4882a593Smuzhiyun *
972*4882a593Smuzhiyun * Shut down a DMA mode synchronous interface. Halt the DMA, and
973*4882a593Smuzhiyun * free the buffers.
974*4882a593Smuzhiyun */
975*4882a593Smuzhiyun
z8530_sync_dma_close(struct net_device * dev,struct z8530_channel * c)976*4882a593Smuzhiyun int z8530_sync_dma_close(struct net_device *dev, struct z8530_channel *c)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun u8 chk;
979*4882a593Smuzhiyun unsigned long flags;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun c->irqs = &z8530_nop;
982*4882a593Smuzhiyun c->max = 0;
983*4882a593Smuzhiyun c->sync = 0;
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun /*
986*4882a593Smuzhiyun * Disable the PC DMA channels
987*4882a593Smuzhiyun */
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun flags=claim_dma_lock();
990*4882a593Smuzhiyun disable_dma(c->rxdma);
991*4882a593Smuzhiyun clear_dma_ff(c->rxdma);
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun c->rxdma_on = 0;
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun disable_dma(c->txdma);
996*4882a593Smuzhiyun clear_dma_ff(c->txdma);
997*4882a593Smuzhiyun release_dma_lock(flags);
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun c->txdma_on = 0;
1000*4882a593Smuzhiyun c->tx_dma_used = 0;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun spin_lock_irqsave(c->lock, flags);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /*
1005*4882a593Smuzhiyun * Disable DMA control mode
1006*4882a593Smuzhiyun */
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun c->regs[R1]&= ~WT_RDY_ENAB;
1009*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
1010*4882a593Smuzhiyun c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1011*4882a593Smuzhiyun c->regs[R1]|= INT_ALL_Rx;
1012*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
1013*4882a593Smuzhiyun c->regs[R14]&= ~DTRREQ;
1014*4882a593Smuzhiyun write_zsreg(c, R14, c->regs[R14]);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun if(c->rx_buf[0])
1017*4882a593Smuzhiyun {
1018*4882a593Smuzhiyun free_page((unsigned long)c->rx_buf[0]);
1019*4882a593Smuzhiyun c->rx_buf[0]=NULL;
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun if(c->tx_dma_buf[0])
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun free_page((unsigned long)c->tx_dma_buf[0]);
1024*4882a593Smuzhiyun c->tx_dma_buf[0]=NULL;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun chk=read_zsreg(c,R0);
1027*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]);
1028*4882a593Smuzhiyun z8530_rtsdtr(c,0);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, flags);
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun return 0;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync_dma_close);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /**
1038*4882a593Smuzhiyun * z8530_sync_txdma_open - Open a Z8530 for TX driven DMA
1039*4882a593Smuzhiyun * @dev: The network device to attach
1040*4882a593Smuzhiyun * @c: The Z8530 channel to configure in sync DMA mode.
1041*4882a593Smuzhiyun *
1042*4882a593Smuzhiyun * Set up a Z85x30 device for synchronous DMA transmission. One
1043*4882a593Smuzhiyun * ISA DMA channel must be available for this to work. The receive
1044*4882a593Smuzhiyun * side is run in PIO mode, but then it has the bigger FIFO.
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun
z8530_sync_txdma_open(struct net_device * dev,struct z8530_channel * c)1047*4882a593Smuzhiyun int z8530_sync_txdma_open(struct net_device *dev, struct z8530_channel *c)
1048*4882a593Smuzhiyun {
1049*4882a593Smuzhiyun unsigned long cflags, dflags;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun printk("Opening sync interface for TX-DMA\n");
1052*4882a593Smuzhiyun c->sync = 1;
1053*4882a593Smuzhiyun c->mtu = dev->mtu+64;
1054*4882a593Smuzhiyun c->count = 0;
1055*4882a593Smuzhiyun c->skb = NULL;
1056*4882a593Smuzhiyun c->skb2 = NULL;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun /*
1059*4882a593Smuzhiyun * Allocate the DMA flip buffers. Limit by page size.
1060*4882a593Smuzhiyun * Everyone runs 1500 mtu or less on wan links so this
1061*4882a593Smuzhiyun * should be fine.
1062*4882a593Smuzhiyun */
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun if(c->mtu > PAGE_SIZE/2)
1065*4882a593Smuzhiyun return -EMSGSIZE;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun c->tx_dma_buf[0]=(void *)get_zeroed_page(GFP_KERNEL|GFP_DMA);
1068*4882a593Smuzhiyun if(c->tx_dma_buf[0]==NULL)
1069*4882a593Smuzhiyun return -ENOBUFS;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun c->tx_dma_buf[1] = c->tx_dma_buf[0] + PAGE_SIZE/2;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun spin_lock_irqsave(c->lock, cflags);
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /*
1077*4882a593Smuzhiyun * Load the PIO receive ring
1078*4882a593Smuzhiyun */
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun z8530_rx_done(c);
1081*4882a593Smuzhiyun z8530_rx_done(c);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun /*
1084*4882a593Smuzhiyun * Load the DMA interfaces up
1085*4882a593Smuzhiyun */
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun c->rxdma_on = 0;
1088*4882a593Smuzhiyun c->txdma_on = 0;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun c->tx_dma_used=0;
1091*4882a593Smuzhiyun c->dma_num=0;
1092*4882a593Smuzhiyun c->dma_ready=1;
1093*4882a593Smuzhiyun c->dma_tx = 1;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /*
1096*4882a593Smuzhiyun * Enable DMA control mode
1097*4882a593Smuzhiyun */
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /*
1100*4882a593Smuzhiyun * TX DMA via DIR/REQ
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun c->regs[R14]|= DTRREQ;
1103*4882a593Smuzhiyun write_zsreg(c, R14, c->regs[R14]);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun c->regs[R1]&= ~TxINT_ENAB;
1106*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /*
1109*4882a593Smuzhiyun * Set up the DMA configuration
1110*4882a593Smuzhiyun */
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun dflags = claim_dma_lock();
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun disable_dma(c->txdma);
1115*4882a593Smuzhiyun clear_dma_ff(c->txdma);
1116*4882a593Smuzhiyun set_dma_mode(c->txdma, DMA_MODE_WRITE);
1117*4882a593Smuzhiyun disable_dma(c->txdma);
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun release_dma_lock(dflags);
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun /*
1122*4882a593Smuzhiyun * Select the DMA interrupt handlers
1123*4882a593Smuzhiyun */
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun c->rxdma_on = 0;
1126*4882a593Smuzhiyun c->txdma_on = 1;
1127*4882a593Smuzhiyun c->tx_dma_used = 1;
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun c->irqs = &z8530_txdma_sync;
1130*4882a593Smuzhiyun z8530_rtsdtr(c,1);
1131*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1132*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, cflags);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync_txdma_open);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /**
1140*4882a593Smuzhiyun * z8530_sync_txdma_close - Close down a TX driven DMA channel
1141*4882a593Smuzhiyun * @dev: Network device to detach
1142*4882a593Smuzhiyun * @c: Z8530 channel to move into discard mode
1143*4882a593Smuzhiyun *
1144*4882a593Smuzhiyun * Shut down a DMA/PIO split mode synchronous interface. Halt the DMA,
1145*4882a593Smuzhiyun * and free the buffers.
1146*4882a593Smuzhiyun */
1147*4882a593Smuzhiyun
z8530_sync_txdma_close(struct net_device * dev,struct z8530_channel * c)1148*4882a593Smuzhiyun int z8530_sync_txdma_close(struct net_device *dev, struct z8530_channel *c)
1149*4882a593Smuzhiyun {
1150*4882a593Smuzhiyun unsigned long dflags, cflags;
1151*4882a593Smuzhiyun u8 chk;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun spin_lock_irqsave(c->lock, cflags);
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun c->irqs = &z8530_nop;
1157*4882a593Smuzhiyun c->max = 0;
1158*4882a593Smuzhiyun c->sync = 0;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun /*
1161*4882a593Smuzhiyun * Disable the PC DMA channels
1162*4882a593Smuzhiyun */
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun dflags = claim_dma_lock();
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun disable_dma(c->txdma);
1167*4882a593Smuzhiyun clear_dma_ff(c->txdma);
1168*4882a593Smuzhiyun c->txdma_on = 0;
1169*4882a593Smuzhiyun c->tx_dma_used = 0;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun release_dma_lock(dflags);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /*
1174*4882a593Smuzhiyun * Disable DMA control mode
1175*4882a593Smuzhiyun */
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun c->regs[R1]&= ~WT_RDY_ENAB;
1178*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
1179*4882a593Smuzhiyun c->regs[R1]&= ~(WT_RDY_RT|WT_FN_RDYFN|INT_ERR_Rx);
1180*4882a593Smuzhiyun c->regs[R1]|= INT_ALL_Rx;
1181*4882a593Smuzhiyun write_zsreg(c, R1, c->regs[R1]);
1182*4882a593Smuzhiyun c->regs[R14]&= ~DTRREQ;
1183*4882a593Smuzhiyun write_zsreg(c, R14, c->regs[R14]);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun if(c->tx_dma_buf[0])
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun free_page((unsigned long)c->tx_dma_buf[0]);
1188*4882a593Smuzhiyun c->tx_dma_buf[0]=NULL;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun chk=read_zsreg(c,R0);
1191*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]);
1192*4882a593Smuzhiyun z8530_rtsdtr(c,0);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, cflags);
1195*4882a593Smuzhiyun return 0;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_sync_txdma_close);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun /*
1203*4882a593Smuzhiyun * Name strings for Z8530 chips. SGI claim to have a 130, Zilog deny
1204*4882a593Smuzhiyun * it exists...
1205*4882a593Smuzhiyun */
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun static const char *z8530_type_name[]={
1208*4882a593Smuzhiyun "Z8530",
1209*4882a593Smuzhiyun "Z85C30",
1210*4882a593Smuzhiyun "Z85230"
1211*4882a593Smuzhiyun };
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /**
1214*4882a593Smuzhiyun * z8530_describe - Uniformly describe a Z8530 port
1215*4882a593Smuzhiyun * @dev: Z8530 device to describe
1216*4882a593Smuzhiyun * @mapping: string holding mapping type (eg "I/O" or "Mem")
1217*4882a593Smuzhiyun * @io: the port value in question
1218*4882a593Smuzhiyun *
1219*4882a593Smuzhiyun * Describe a Z8530 in a standard format. We must pass the I/O as
1220*4882a593Smuzhiyun * the port offset isn't predictable. The main reason for this function
1221*4882a593Smuzhiyun * is to try and get a common format of report.
1222*4882a593Smuzhiyun */
1223*4882a593Smuzhiyun
z8530_describe(struct z8530_dev * dev,char * mapping,unsigned long io)1224*4882a593Smuzhiyun void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun pr_info("%s: %s found at %s 0x%lX, IRQ %d\n",
1227*4882a593Smuzhiyun dev->name,
1228*4882a593Smuzhiyun z8530_type_name[dev->type],
1229*4882a593Smuzhiyun mapping,
1230*4882a593Smuzhiyun Z8530_PORT_OF(io),
1231*4882a593Smuzhiyun dev->irq);
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_describe);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun /*
1237*4882a593Smuzhiyun * Locked operation part of the z8530 init code
1238*4882a593Smuzhiyun */
1239*4882a593Smuzhiyun
do_z8530_init(struct z8530_dev * dev)1240*4882a593Smuzhiyun static inline int do_z8530_init(struct z8530_dev *dev)
1241*4882a593Smuzhiyun {
1242*4882a593Smuzhiyun /* NOP the interrupt handlers first - we might get a
1243*4882a593Smuzhiyun floating IRQ transition when we reset the chip */
1244*4882a593Smuzhiyun dev->chanA.irqs=&z8530_nop;
1245*4882a593Smuzhiyun dev->chanB.irqs=&z8530_nop;
1246*4882a593Smuzhiyun dev->chanA.dcdcheck=DCD;
1247*4882a593Smuzhiyun dev->chanB.dcdcheck=DCD;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* Reset the chip */
1250*4882a593Smuzhiyun write_zsreg(&dev->chanA, R9, 0xC0);
1251*4882a593Smuzhiyun udelay(200);
1252*4882a593Smuzhiyun /* Now check its valid */
1253*4882a593Smuzhiyun write_zsreg(&dev->chanA, R12, 0xAA);
1254*4882a593Smuzhiyun if(read_zsreg(&dev->chanA, R12)!=0xAA)
1255*4882a593Smuzhiyun return -ENODEV;
1256*4882a593Smuzhiyun write_zsreg(&dev->chanA, R12, 0x55);
1257*4882a593Smuzhiyun if(read_zsreg(&dev->chanA, R12)!=0x55)
1258*4882a593Smuzhiyun return -ENODEV;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun dev->type=Z8530;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun /*
1263*4882a593Smuzhiyun * See the application note.
1264*4882a593Smuzhiyun */
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun write_zsreg(&dev->chanA, R15, 0x01);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun * If we can set the low bit of R15 then
1270*4882a593Smuzhiyun * the chip is enhanced.
1271*4882a593Smuzhiyun */
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun if(read_zsreg(&dev->chanA, R15)==0x01)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun /* This C30 versus 230 detect is from Klaus Kudielka's dmascc */
1276*4882a593Smuzhiyun /* Put a char in the fifo */
1277*4882a593Smuzhiyun write_zsreg(&dev->chanA, R8, 0);
1278*4882a593Smuzhiyun if(read_zsreg(&dev->chanA, R0)&Tx_BUF_EMP)
1279*4882a593Smuzhiyun dev->type = Z85230; /* Has a FIFO */
1280*4882a593Smuzhiyun else
1281*4882a593Smuzhiyun dev->type = Z85C30; /* Z85C30, 1 byte FIFO */
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun /*
1285*4882a593Smuzhiyun * The code assumes R7' and friends are
1286*4882a593Smuzhiyun * off. Use write_zsext() for these and keep
1287*4882a593Smuzhiyun * this bit clear.
1288*4882a593Smuzhiyun */
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun write_zsreg(&dev->chanA, R15, 0);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /*
1293*4882a593Smuzhiyun * At this point it looks like the chip is behaving
1294*4882a593Smuzhiyun */
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun memcpy(dev->chanA.regs, reg_init, 16);
1297*4882a593Smuzhiyun memcpy(dev->chanB.regs, reg_init ,16);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun return 0;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun /**
1303*4882a593Smuzhiyun * z8530_init - Initialise a Z8530 device
1304*4882a593Smuzhiyun * @dev: Z8530 device to initialise.
1305*4882a593Smuzhiyun *
1306*4882a593Smuzhiyun * Configure up a Z8530/Z85C30 or Z85230 chip. We check the device
1307*4882a593Smuzhiyun * is present, identify the type and then program it to hopefully
1308*4882a593Smuzhiyun * keep quite and behave. This matters a lot, a Z8530 in the wrong
1309*4882a593Smuzhiyun * state will sometimes get into stupid modes generating 10Khz
1310*4882a593Smuzhiyun * interrupt streams and the like.
1311*4882a593Smuzhiyun *
1312*4882a593Smuzhiyun * We set the interrupt handler up to discard any events, in case
1313*4882a593Smuzhiyun * we get them during reset or setp.
1314*4882a593Smuzhiyun *
1315*4882a593Smuzhiyun * Return 0 for success, or a negative value indicating the problem
1316*4882a593Smuzhiyun * in errno form.
1317*4882a593Smuzhiyun */
1318*4882a593Smuzhiyun
z8530_init(struct z8530_dev * dev)1319*4882a593Smuzhiyun int z8530_init(struct z8530_dev *dev)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun unsigned long flags;
1322*4882a593Smuzhiyun int ret;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /* Set up the chip level lock */
1325*4882a593Smuzhiyun spin_lock_init(&dev->lock);
1326*4882a593Smuzhiyun dev->chanA.lock = &dev->lock;
1327*4882a593Smuzhiyun dev->chanB.lock = &dev->lock;
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1330*4882a593Smuzhiyun ret = do_z8530_init(dev);
1331*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun return ret;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_init);
1338*4882a593Smuzhiyun
1339*4882a593Smuzhiyun /**
1340*4882a593Smuzhiyun * z8530_shutdown - Shutdown a Z8530 device
1341*4882a593Smuzhiyun * @dev: The Z8530 chip to shutdown
1342*4882a593Smuzhiyun *
1343*4882a593Smuzhiyun * We set the interrupt handlers to silence any interrupts. We then
1344*4882a593Smuzhiyun * reset the chip and wait 100uS to be sure the reset completed. Just
1345*4882a593Smuzhiyun * in case the caller then tries to do stuff.
1346*4882a593Smuzhiyun *
1347*4882a593Smuzhiyun * This is called without the lock held
1348*4882a593Smuzhiyun */
1349*4882a593Smuzhiyun
z8530_shutdown(struct z8530_dev * dev)1350*4882a593Smuzhiyun int z8530_shutdown(struct z8530_dev *dev)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun unsigned long flags;
1353*4882a593Smuzhiyun /* Reset the chip */
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1356*4882a593Smuzhiyun dev->chanA.irqs=&z8530_nop;
1357*4882a593Smuzhiyun dev->chanB.irqs=&z8530_nop;
1358*4882a593Smuzhiyun write_zsreg(&dev->chanA, R9, 0xC0);
1359*4882a593Smuzhiyun /* We must lock the udelay, the chip is offlimits here */
1360*4882a593Smuzhiyun udelay(100);
1361*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1362*4882a593Smuzhiyun return 0;
1363*4882a593Smuzhiyun }
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_shutdown);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /**
1368*4882a593Smuzhiyun * z8530_channel_load - Load channel data
1369*4882a593Smuzhiyun * @c: Z8530 channel to configure
1370*4882a593Smuzhiyun * @rtable: table of register, value pairs
1371*4882a593Smuzhiyun * FIXME: ioctl to allow user uploaded tables
1372*4882a593Smuzhiyun *
1373*4882a593Smuzhiyun * Load a Z8530 channel up from the system data. We use +16 to
1374*4882a593Smuzhiyun * indicate the "prime" registers. The value 255 terminates the
1375*4882a593Smuzhiyun * table.
1376*4882a593Smuzhiyun */
1377*4882a593Smuzhiyun
z8530_channel_load(struct z8530_channel * c,u8 * rtable)1378*4882a593Smuzhiyun int z8530_channel_load(struct z8530_channel *c, u8 *rtable)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun unsigned long flags;
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun spin_lock_irqsave(c->lock, flags);
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun while(*rtable!=255)
1385*4882a593Smuzhiyun {
1386*4882a593Smuzhiyun int reg=*rtable++;
1387*4882a593Smuzhiyun if(reg>0x0F)
1388*4882a593Smuzhiyun write_zsreg(c, R15, c->regs[15]|1);
1389*4882a593Smuzhiyun write_zsreg(c, reg&0x0F, *rtable);
1390*4882a593Smuzhiyun if(reg>0x0F)
1391*4882a593Smuzhiyun write_zsreg(c, R15, c->regs[15]&~1);
1392*4882a593Smuzhiyun c->regs[reg]=*rtable++;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun c->rx_function=z8530_null_rx;
1395*4882a593Smuzhiyun c->skb=NULL;
1396*4882a593Smuzhiyun c->tx_skb=NULL;
1397*4882a593Smuzhiyun c->tx_next_skb=NULL;
1398*4882a593Smuzhiyun c->mtu=1500;
1399*4882a593Smuzhiyun c->max=0;
1400*4882a593Smuzhiyun c->count=0;
1401*4882a593Smuzhiyun c->status=read_zsreg(c, R0);
1402*4882a593Smuzhiyun c->sync=1;
1403*4882a593Smuzhiyun write_zsreg(c, R3, c->regs[R3]|RxENABLE);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, flags);
1406*4882a593Smuzhiyun return 0;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_channel_load);
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun /**
1413*4882a593Smuzhiyun * z8530_tx_begin - Begin packet transmission
1414*4882a593Smuzhiyun * @c: The Z8530 channel to kick
1415*4882a593Smuzhiyun *
1416*4882a593Smuzhiyun * This is the speed sensitive side of transmission. If we are called
1417*4882a593Smuzhiyun * and no buffer is being transmitted we commence the next buffer. If
1418*4882a593Smuzhiyun * nothing is queued we idle the sync.
1419*4882a593Smuzhiyun *
1420*4882a593Smuzhiyun * Note: We are handling this code path in the interrupt path, keep it
1421*4882a593Smuzhiyun * fast or bad things will happen.
1422*4882a593Smuzhiyun *
1423*4882a593Smuzhiyun * Called with the lock held.
1424*4882a593Smuzhiyun */
1425*4882a593Smuzhiyun
z8530_tx_begin(struct z8530_channel * c)1426*4882a593Smuzhiyun static void z8530_tx_begin(struct z8530_channel *c)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun unsigned long flags;
1429*4882a593Smuzhiyun if(c->tx_skb)
1430*4882a593Smuzhiyun return;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun c->tx_skb=c->tx_next_skb;
1433*4882a593Smuzhiyun c->tx_next_skb=NULL;
1434*4882a593Smuzhiyun c->tx_ptr=c->tx_next_ptr;
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun if(c->tx_skb==NULL)
1437*4882a593Smuzhiyun {
1438*4882a593Smuzhiyun /* Idle on */
1439*4882a593Smuzhiyun if(c->dma_tx)
1440*4882a593Smuzhiyun {
1441*4882a593Smuzhiyun flags=claim_dma_lock();
1442*4882a593Smuzhiyun disable_dma(c->txdma);
1443*4882a593Smuzhiyun /*
1444*4882a593Smuzhiyun * Check if we crapped out.
1445*4882a593Smuzhiyun */
1446*4882a593Smuzhiyun if (get_dma_residue(c->txdma))
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun c->netdevice->stats.tx_dropped++;
1449*4882a593Smuzhiyun c->netdevice->stats.tx_fifo_errors++;
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun release_dma_lock(flags);
1452*4882a593Smuzhiyun }
1453*4882a593Smuzhiyun c->txcount=0;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun else
1456*4882a593Smuzhiyun {
1457*4882a593Smuzhiyun c->txcount=c->tx_skb->len;
1458*4882a593Smuzhiyun
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun if(c->dma_tx)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun /*
1463*4882a593Smuzhiyun * FIXME. DMA is broken for the original 8530,
1464*4882a593Smuzhiyun * on the older parts we need to set a flag and
1465*4882a593Smuzhiyun * wait for a further TX interrupt to fire this
1466*4882a593Smuzhiyun * stage off
1467*4882a593Smuzhiyun */
1468*4882a593Smuzhiyun
1469*4882a593Smuzhiyun flags=claim_dma_lock();
1470*4882a593Smuzhiyun disable_dma(c->txdma);
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun /*
1473*4882a593Smuzhiyun * These two are needed by the 8530/85C30
1474*4882a593Smuzhiyun * and must be issued when idling.
1475*4882a593Smuzhiyun */
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if(c->dev->type!=Z85230)
1478*4882a593Smuzhiyun {
1479*4882a593Smuzhiyun write_zsctrl(c, RES_Tx_CRC);
1480*4882a593Smuzhiyun write_zsctrl(c, RES_EOM_L);
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun write_zsreg(c, R10, c->regs[10]&~ABUNDER);
1483*4882a593Smuzhiyun clear_dma_ff(c->txdma);
1484*4882a593Smuzhiyun set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr));
1485*4882a593Smuzhiyun set_dma_count(c->txdma, c->txcount);
1486*4882a593Smuzhiyun enable_dma(c->txdma);
1487*4882a593Smuzhiyun release_dma_lock(flags);
1488*4882a593Smuzhiyun write_zsctrl(c, RES_EOM_L);
1489*4882a593Smuzhiyun write_zsreg(c, R5, c->regs[R5]|TxENAB);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun else
1492*4882a593Smuzhiyun {
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun /* ABUNDER off */
1495*4882a593Smuzhiyun write_zsreg(c, R10, c->regs[10]);
1496*4882a593Smuzhiyun write_zsctrl(c, RES_Tx_CRC);
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP))
1499*4882a593Smuzhiyun {
1500*4882a593Smuzhiyun write_zsreg(c, R8, *c->tx_ptr++);
1501*4882a593Smuzhiyun c->txcount--;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun /*
1507*4882a593Smuzhiyun * Since we emptied tx_skb we can ask for more
1508*4882a593Smuzhiyun */
1509*4882a593Smuzhiyun netif_wake_queue(c->netdevice);
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun /**
1513*4882a593Smuzhiyun * z8530_tx_done - TX complete callback
1514*4882a593Smuzhiyun * @c: The channel that completed a transmit.
1515*4882a593Smuzhiyun *
1516*4882a593Smuzhiyun * This is called when we complete a packet send. We wake the queue,
1517*4882a593Smuzhiyun * start the next packet going and then free the buffer of the existing
1518*4882a593Smuzhiyun * packet. This code is fairly timing sensitive.
1519*4882a593Smuzhiyun *
1520*4882a593Smuzhiyun * Called with the register lock held.
1521*4882a593Smuzhiyun */
1522*4882a593Smuzhiyun
z8530_tx_done(struct z8530_channel * c)1523*4882a593Smuzhiyun static void z8530_tx_done(struct z8530_channel *c)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun struct sk_buff *skb;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun /* Actually this can happen.*/
1528*4882a593Smuzhiyun if (c->tx_skb == NULL)
1529*4882a593Smuzhiyun return;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun skb = c->tx_skb;
1532*4882a593Smuzhiyun c->tx_skb = NULL;
1533*4882a593Smuzhiyun z8530_tx_begin(c);
1534*4882a593Smuzhiyun c->netdevice->stats.tx_packets++;
1535*4882a593Smuzhiyun c->netdevice->stats.tx_bytes += skb->len;
1536*4882a593Smuzhiyun dev_consume_skb_irq(skb);
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun /**
1540*4882a593Smuzhiyun * z8530_null_rx - Discard a packet
1541*4882a593Smuzhiyun * @c: The channel the packet arrived on
1542*4882a593Smuzhiyun * @skb: The buffer
1543*4882a593Smuzhiyun *
1544*4882a593Smuzhiyun * We point the receive handler at this function when idle. Instead
1545*4882a593Smuzhiyun * of processing the frames we get to throw them away.
1546*4882a593Smuzhiyun */
1547*4882a593Smuzhiyun
z8530_null_rx(struct z8530_channel * c,struct sk_buff * skb)1548*4882a593Smuzhiyun void z8530_null_rx(struct z8530_channel *c, struct sk_buff *skb)
1549*4882a593Smuzhiyun {
1550*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1551*4882a593Smuzhiyun }
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_null_rx);
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun /**
1556*4882a593Smuzhiyun * z8530_rx_done - Receive completion callback
1557*4882a593Smuzhiyun * @c: The channel that completed a receive
1558*4882a593Smuzhiyun *
1559*4882a593Smuzhiyun * A new packet is complete. Our goal here is to get back into receive
1560*4882a593Smuzhiyun * mode as fast as possible. On the Z85230 we could change to using
1561*4882a593Smuzhiyun * ESCC mode, but on the older chips we have no choice. We flip to the
1562*4882a593Smuzhiyun * new buffer immediately in DMA mode so that the DMA of the next
1563*4882a593Smuzhiyun * frame can occur while we are copying the previous buffer to an sk_buff
1564*4882a593Smuzhiyun *
1565*4882a593Smuzhiyun * Called with the lock held
1566*4882a593Smuzhiyun */
1567*4882a593Smuzhiyun
z8530_rx_done(struct z8530_channel * c)1568*4882a593Smuzhiyun static void z8530_rx_done(struct z8530_channel *c)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun struct sk_buff *skb;
1571*4882a593Smuzhiyun int ct;
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun /*
1574*4882a593Smuzhiyun * Is our receive engine in DMA mode
1575*4882a593Smuzhiyun */
1576*4882a593Smuzhiyun
1577*4882a593Smuzhiyun if(c->rxdma_on)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun /*
1580*4882a593Smuzhiyun * Save the ready state and the buffer currently
1581*4882a593Smuzhiyun * being used as the DMA target
1582*4882a593Smuzhiyun */
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun int ready=c->dma_ready;
1585*4882a593Smuzhiyun unsigned char *rxb=c->rx_buf[c->dma_num];
1586*4882a593Smuzhiyun unsigned long flags;
1587*4882a593Smuzhiyun
1588*4882a593Smuzhiyun /*
1589*4882a593Smuzhiyun * Complete this DMA. Necessary to find the length
1590*4882a593Smuzhiyun */
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun flags=claim_dma_lock();
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun disable_dma(c->rxdma);
1595*4882a593Smuzhiyun clear_dma_ff(c->rxdma);
1596*4882a593Smuzhiyun c->rxdma_on=0;
1597*4882a593Smuzhiyun ct=c->mtu-get_dma_residue(c->rxdma);
1598*4882a593Smuzhiyun if(ct<0)
1599*4882a593Smuzhiyun ct=2; /* Shit happens.. */
1600*4882a593Smuzhiyun c->dma_ready=0;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /*
1603*4882a593Smuzhiyun * Normal case: the other slot is free, start the next DMA
1604*4882a593Smuzhiyun * into it immediately.
1605*4882a593Smuzhiyun */
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun if(ready)
1608*4882a593Smuzhiyun {
1609*4882a593Smuzhiyun c->dma_num^=1;
1610*4882a593Smuzhiyun set_dma_mode(c->rxdma, DMA_MODE_READ|0x10);
1611*4882a593Smuzhiyun set_dma_addr(c->rxdma, virt_to_bus(c->rx_buf[c->dma_num]));
1612*4882a593Smuzhiyun set_dma_count(c->rxdma, c->mtu);
1613*4882a593Smuzhiyun c->rxdma_on = 1;
1614*4882a593Smuzhiyun enable_dma(c->rxdma);
1615*4882a593Smuzhiyun /* Stop any frames that we missed the head of
1616*4882a593Smuzhiyun from passing */
1617*4882a593Smuzhiyun write_zsreg(c, R0, RES_Rx_CRC);
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun else
1620*4882a593Smuzhiyun /* Can't occur as we dont reenable the DMA irq until
1621*4882a593Smuzhiyun after the flip is done */
1622*4882a593Smuzhiyun netdev_warn(c->netdevice, "DMA flip overrun!\n");
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun release_dma_lock(flags);
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun /*
1627*4882a593Smuzhiyun * Shove the old buffer into an sk_buff. We can't DMA
1628*4882a593Smuzhiyun * directly into one on a PC - it might be above the 16Mb
1629*4882a593Smuzhiyun * boundary. Optimisation - we could check to see if we
1630*4882a593Smuzhiyun * can avoid the copy. Optimisation 2 - make the memcpy
1631*4882a593Smuzhiyun * a copychecksum.
1632*4882a593Smuzhiyun */
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun skb = dev_alloc_skb(ct);
1635*4882a593Smuzhiyun if (skb == NULL) {
1636*4882a593Smuzhiyun c->netdevice->stats.rx_dropped++;
1637*4882a593Smuzhiyun netdev_warn(c->netdevice, "Memory squeeze\n");
1638*4882a593Smuzhiyun } else {
1639*4882a593Smuzhiyun skb_put(skb, ct);
1640*4882a593Smuzhiyun skb_copy_to_linear_data(skb, rxb, ct);
1641*4882a593Smuzhiyun c->netdevice->stats.rx_packets++;
1642*4882a593Smuzhiyun c->netdevice->stats.rx_bytes += ct;
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun c->dma_ready = 1;
1645*4882a593Smuzhiyun } else {
1646*4882a593Smuzhiyun RT_LOCK;
1647*4882a593Smuzhiyun skb = c->skb;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun /*
1650*4882a593Smuzhiyun * The game we play for non DMA is similar. We want to
1651*4882a593Smuzhiyun * get the controller set up for the next packet as fast
1652*4882a593Smuzhiyun * as possible. We potentially only have one byte + the
1653*4882a593Smuzhiyun * fifo length for this. Thus we want to flip to the new
1654*4882a593Smuzhiyun * buffer and then mess around copying and allocating
1655*4882a593Smuzhiyun * things. For the current case it doesn't matter but
1656*4882a593Smuzhiyun * if you build a system where the sync irq isn't blocked
1657*4882a593Smuzhiyun * by the kernel IRQ disable then you need only block the
1658*4882a593Smuzhiyun * sync IRQ for the RT_LOCK area.
1659*4882a593Smuzhiyun *
1660*4882a593Smuzhiyun */
1661*4882a593Smuzhiyun ct=c->count;
1662*4882a593Smuzhiyun
1663*4882a593Smuzhiyun c->skb = c->skb2;
1664*4882a593Smuzhiyun c->count = 0;
1665*4882a593Smuzhiyun c->max = c->mtu;
1666*4882a593Smuzhiyun if (c->skb) {
1667*4882a593Smuzhiyun c->dptr = c->skb->data;
1668*4882a593Smuzhiyun c->max = c->mtu;
1669*4882a593Smuzhiyun } else {
1670*4882a593Smuzhiyun c->count = 0;
1671*4882a593Smuzhiyun c->max = 0;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun RT_UNLOCK;
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun c->skb2 = dev_alloc_skb(c->mtu);
1676*4882a593Smuzhiyun if (c->skb2 == NULL)
1677*4882a593Smuzhiyun netdev_warn(c->netdevice, "memory squeeze\n");
1678*4882a593Smuzhiyun else
1679*4882a593Smuzhiyun skb_put(c->skb2, c->mtu);
1680*4882a593Smuzhiyun c->netdevice->stats.rx_packets++;
1681*4882a593Smuzhiyun c->netdevice->stats.rx_bytes += ct;
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun /*
1684*4882a593Smuzhiyun * If we received a frame we must now process it.
1685*4882a593Smuzhiyun */
1686*4882a593Smuzhiyun if (skb) {
1687*4882a593Smuzhiyun skb_trim(skb, ct);
1688*4882a593Smuzhiyun c->rx_function(c, skb);
1689*4882a593Smuzhiyun } else {
1690*4882a593Smuzhiyun c->netdevice->stats.rx_dropped++;
1691*4882a593Smuzhiyun netdev_err(c->netdevice, "Lost a frame\n");
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun /**
1696*4882a593Smuzhiyun * spans_boundary - Check a packet can be ISA DMA'd
1697*4882a593Smuzhiyun * @skb: The buffer to check
1698*4882a593Smuzhiyun *
1699*4882a593Smuzhiyun * Returns true if the buffer cross a DMA boundary on a PC. The poor
1700*4882a593Smuzhiyun * thing can only DMA within a 64K block not across the edges of it.
1701*4882a593Smuzhiyun */
1702*4882a593Smuzhiyun
spans_boundary(struct sk_buff * skb)1703*4882a593Smuzhiyun static inline int spans_boundary(struct sk_buff *skb)
1704*4882a593Smuzhiyun {
1705*4882a593Smuzhiyun unsigned long a=(unsigned long)skb->data;
1706*4882a593Smuzhiyun a^=(a+skb->len);
1707*4882a593Smuzhiyun if(a&0x00010000) /* If the 64K bit is different.. */
1708*4882a593Smuzhiyun return 1;
1709*4882a593Smuzhiyun return 0;
1710*4882a593Smuzhiyun }
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun /**
1713*4882a593Smuzhiyun * z8530_queue_xmit - Queue a packet
1714*4882a593Smuzhiyun * @c: The channel to use
1715*4882a593Smuzhiyun * @skb: The packet to kick down the channel
1716*4882a593Smuzhiyun *
1717*4882a593Smuzhiyun * Queue a packet for transmission. Because we have rather
1718*4882a593Smuzhiyun * hard to hit interrupt latencies for the Z85230 per packet
1719*4882a593Smuzhiyun * even in DMA mode we do the flip to DMA buffer if needed here
1720*4882a593Smuzhiyun * not in the IRQ.
1721*4882a593Smuzhiyun *
1722*4882a593Smuzhiyun * Called from the network code. The lock is not held at this
1723*4882a593Smuzhiyun * point.
1724*4882a593Smuzhiyun */
1725*4882a593Smuzhiyun
z8530_queue_xmit(struct z8530_channel * c,struct sk_buff * skb)1726*4882a593Smuzhiyun netdev_tx_t z8530_queue_xmit(struct z8530_channel *c, struct sk_buff *skb)
1727*4882a593Smuzhiyun {
1728*4882a593Smuzhiyun unsigned long flags;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun netif_stop_queue(c->netdevice);
1731*4882a593Smuzhiyun if(c->tx_next_skb)
1732*4882a593Smuzhiyun return NETDEV_TX_BUSY;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun /* PC SPECIFIC - DMA limits */
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun /*
1738*4882a593Smuzhiyun * If we will DMA the transmit and its gone over the ISA bus
1739*4882a593Smuzhiyun * limit, then copy to the flip buffer
1740*4882a593Smuzhiyun */
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun if(c->dma_tx && ((unsigned long)(virt_to_bus(skb->data+skb->len))>=16*1024*1024 || spans_boundary(skb)))
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun /*
1745*4882a593Smuzhiyun * Send the flip buffer, and flip the flippy bit.
1746*4882a593Smuzhiyun * We don't care which is used when just so long as
1747*4882a593Smuzhiyun * we never use the same buffer twice in a row. Since
1748*4882a593Smuzhiyun * only one buffer can be going out at a time the other
1749*4882a593Smuzhiyun * has to be safe.
1750*4882a593Smuzhiyun */
1751*4882a593Smuzhiyun c->tx_next_ptr=c->tx_dma_buf[c->tx_dma_used];
1752*4882a593Smuzhiyun c->tx_dma_used^=1; /* Flip temp buffer */
1753*4882a593Smuzhiyun skb_copy_from_linear_data(skb, c->tx_next_ptr, skb->len);
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun else
1756*4882a593Smuzhiyun c->tx_next_ptr=skb->data;
1757*4882a593Smuzhiyun RT_LOCK;
1758*4882a593Smuzhiyun c->tx_next_skb=skb;
1759*4882a593Smuzhiyun RT_UNLOCK;
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun spin_lock_irqsave(c->lock, flags);
1762*4882a593Smuzhiyun z8530_tx_begin(c);
1763*4882a593Smuzhiyun spin_unlock_irqrestore(c->lock, flags);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun return NETDEV_TX_OK;
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun
1768*4882a593Smuzhiyun EXPORT_SYMBOL(z8530_queue_xmit);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun /*
1771*4882a593Smuzhiyun * Module support
1772*4882a593Smuzhiyun */
1773*4882a593Smuzhiyun static const char banner[] __initconst =
1774*4882a593Smuzhiyun KERN_INFO "Generic Z85C30/Z85230 interface driver v0.02\n";
1775*4882a593Smuzhiyun
z85230_init_driver(void)1776*4882a593Smuzhiyun static int __init z85230_init_driver(void)
1777*4882a593Smuzhiyun {
1778*4882a593Smuzhiyun printk(banner);
1779*4882a593Smuzhiyun return 0;
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun module_init(z85230_init_driver);
1782*4882a593Smuzhiyun
z85230_cleanup_driver(void)1783*4882a593Smuzhiyun static void __exit z85230_cleanup_driver(void)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun }
1786*4882a593Smuzhiyun module_exit(z85230_cleanup_driver);
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun MODULE_AUTHOR("Red Hat Inc.");
1789*4882a593Smuzhiyun MODULE_DESCRIPTION("Z85x30 synchronous driver core");
1790*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1791