1*4882a593Smuzhiyun /* ppa.c -- low level driver for the IOMEGA PPA3
2*4882a593Smuzhiyun * parallel port SCSI host adapter.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * (The PPA3 is the embedded controller in the ZIP drive.)
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * (c) 1995,1996 Grant R. Guenther, grant@torque.net,
7*4882a593Smuzhiyun * under the terms of the GNU General Public License.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/blkdev.h>
16*4882a593Smuzhiyun #include <linux/parport.h>
17*4882a593Smuzhiyun #include <linux/workqueue.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/jiffies.h>
20*4882a593Smuzhiyun #include <asm/io.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <scsi/scsi.h>
23*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
24*4882a593Smuzhiyun #include <scsi/scsi_device.h>
25*4882a593Smuzhiyun #include <scsi/scsi_host.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static void ppa_reset_pulse(unsigned int base);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun typedef struct {
31*4882a593Smuzhiyun struct pardevice *dev; /* Parport device entry */
32*4882a593Smuzhiyun int base; /* Actual port address */
33*4882a593Smuzhiyun int mode; /* Transfer mode */
34*4882a593Smuzhiyun struct scsi_cmnd *cur_cmd; /* Current queued command */
35*4882a593Smuzhiyun struct delayed_work ppa_tq; /* Polling interrupt stuff */
36*4882a593Smuzhiyun unsigned long jstart; /* Jiffies at start */
37*4882a593Smuzhiyun unsigned long recon_tmo; /* How many usecs to wait for reconnection (6th bit) */
38*4882a593Smuzhiyun unsigned int failed:1; /* Failure flag */
39*4882a593Smuzhiyun unsigned wanted:1; /* Parport sharing busy flag */
40*4882a593Smuzhiyun unsigned int dev_no; /* Device number */
41*4882a593Smuzhiyun wait_queue_head_t *waiting;
42*4882a593Smuzhiyun struct Scsi_Host *host;
43*4882a593Smuzhiyun struct list_head list;
44*4882a593Smuzhiyun } ppa_struct;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #include "ppa.h"
47*4882a593Smuzhiyun
ppa_dev(struct Scsi_Host * host)48*4882a593Smuzhiyun static inline ppa_struct *ppa_dev(struct Scsi_Host *host)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun return *(ppa_struct **)&host->hostdata;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun static DEFINE_SPINLOCK(arbitration_lock);
54*4882a593Smuzhiyun
got_it(ppa_struct * dev)55*4882a593Smuzhiyun static void got_it(ppa_struct *dev)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun dev->base = dev->dev->port->base;
58*4882a593Smuzhiyun if (dev->cur_cmd)
59*4882a593Smuzhiyun dev->cur_cmd->SCp.phase = 1;
60*4882a593Smuzhiyun else
61*4882a593Smuzhiyun wake_up(dev->waiting);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
ppa_wakeup(void * ref)64*4882a593Smuzhiyun static void ppa_wakeup(void *ref)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun ppa_struct *dev = (ppa_struct *) ref;
67*4882a593Smuzhiyun unsigned long flags;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun spin_lock_irqsave(&arbitration_lock, flags);
70*4882a593Smuzhiyun if (dev->wanted) {
71*4882a593Smuzhiyun parport_claim(dev->dev);
72*4882a593Smuzhiyun got_it(dev);
73*4882a593Smuzhiyun dev->wanted = 0;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun spin_unlock_irqrestore(&arbitration_lock, flags);
76*4882a593Smuzhiyun return;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
ppa_pb_claim(ppa_struct * dev)79*4882a593Smuzhiyun static int ppa_pb_claim(ppa_struct *dev)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun unsigned long flags;
82*4882a593Smuzhiyun int res = 1;
83*4882a593Smuzhiyun spin_lock_irqsave(&arbitration_lock, flags);
84*4882a593Smuzhiyun if (parport_claim(dev->dev) == 0) {
85*4882a593Smuzhiyun got_it(dev);
86*4882a593Smuzhiyun res = 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun dev->wanted = res;
89*4882a593Smuzhiyun spin_unlock_irqrestore(&arbitration_lock, flags);
90*4882a593Smuzhiyun return res;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
ppa_pb_dismiss(ppa_struct * dev)93*4882a593Smuzhiyun static void ppa_pb_dismiss(ppa_struct *dev)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun unsigned long flags;
96*4882a593Smuzhiyun int wanted;
97*4882a593Smuzhiyun spin_lock_irqsave(&arbitration_lock, flags);
98*4882a593Smuzhiyun wanted = dev->wanted;
99*4882a593Smuzhiyun dev->wanted = 0;
100*4882a593Smuzhiyun spin_unlock_irqrestore(&arbitration_lock, flags);
101*4882a593Smuzhiyun if (!wanted)
102*4882a593Smuzhiyun parport_release(dev->dev);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
ppa_pb_release(ppa_struct * dev)105*4882a593Smuzhiyun static inline void ppa_pb_release(ppa_struct *dev)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun parport_release(dev->dev);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * Start of Chipset kludges
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* This is to give the ppa driver a way to modify the timings (and other
115*4882a593Smuzhiyun * parameters) by writing to the /proc/scsi/ppa/0 file.
116*4882a593Smuzhiyun * Very simple method really... (To simple, no error checking :( )
117*4882a593Smuzhiyun * Reason: Kernel hackers HATE having to unload and reload modules for
118*4882a593Smuzhiyun * testing...
119*4882a593Smuzhiyun * Also gives a method to use a script to obtain optimum timings (TODO)
120*4882a593Smuzhiyun */
121*4882a593Smuzhiyun
ppa_write_info(struct Scsi_Host * host,char * buffer,int length)122*4882a593Smuzhiyun static inline int ppa_write_info(struct Scsi_Host *host, char *buffer, int length)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(host);
125*4882a593Smuzhiyun unsigned long x;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if ((length > 5) && (strncmp(buffer, "mode=", 5) == 0)) {
128*4882a593Smuzhiyun x = simple_strtoul(buffer + 5, NULL, 0);
129*4882a593Smuzhiyun dev->mode = x;
130*4882a593Smuzhiyun return length;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun if ((length > 10) && (strncmp(buffer, "recon_tmo=", 10) == 0)) {
133*4882a593Smuzhiyun x = simple_strtoul(buffer + 10, NULL, 0);
134*4882a593Smuzhiyun dev->recon_tmo = x;
135*4882a593Smuzhiyun printk(KERN_INFO "ppa: recon_tmo set to %ld\n", x);
136*4882a593Smuzhiyun return length;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun printk(KERN_WARNING "ppa /proc: invalid variable\n");
139*4882a593Smuzhiyun return -EINVAL;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
ppa_show_info(struct seq_file * m,struct Scsi_Host * host)142*4882a593Smuzhiyun static int ppa_show_info(struct seq_file *m, struct Scsi_Host *host)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(host);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun seq_printf(m, "Version : %s\n", PPA_VERSION);
147*4882a593Smuzhiyun seq_printf(m, "Parport : %s\n", dev->dev->port->name);
148*4882a593Smuzhiyun seq_printf(m, "Mode : %s\n", PPA_MODE_STRING[dev->mode]);
149*4882a593Smuzhiyun #if PPA_DEBUG > 0
150*4882a593Smuzhiyun seq_printf(m, "recon_tmo : %lu\n", dev->recon_tmo);
151*4882a593Smuzhiyun #endif
152*4882a593Smuzhiyun return 0;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun static int device_check(ppa_struct *dev);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun #if PPA_DEBUG > 0
158*4882a593Smuzhiyun #define ppa_fail(x,y) printk("ppa: ppa_fail(%i) from %s at line %d\n",\
159*4882a593Smuzhiyun y, __func__, __LINE__); ppa_fail_func(x,y);
ppa_fail_func(ppa_struct * dev,int error_code)160*4882a593Smuzhiyun static inline void ppa_fail_func(ppa_struct *dev, int error_code)
161*4882a593Smuzhiyun #else
162*4882a593Smuzhiyun static inline void ppa_fail(ppa_struct *dev, int error_code)
163*4882a593Smuzhiyun #endif
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun /* If we fail a device then we trash status / message bytes */
166*4882a593Smuzhiyun if (dev->cur_cmd) {
167*4882a593Smuzhiyun dev->cur_cmd->result = error_code << 16;
168*4882a593Smuzhiyun dev->failed = 1;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * Wait for the high bit to be set.
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * In principle, this could be tied to an interrupt, but the adapter
176*4882a593Smuzhiyun * doesn't appear to be designed to support interrupts. We spin on
177*4882a593Smuzhiyun * the 0x80 ready bit.
178*4882a593Smuzhiyun */
ppa_wait(ppa_struct * dev)179*4882a593Smuzhiyun static unsigned char ppa_wait(ppa_struct *dev)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun int k;
182*4882a593Smuzhiyun unsigned short ppb = dev->base;
183*4882a593Smuzhiyun unsigned char r;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun k = PPA_SPIN_TMO;
186*4882a593Smuzhiyun /* Wait for bit 6 and 7 - PJC */
187*4882a593Smuzhiyun for (r = r_str(ppb); ((r & 0xc0) != 0xc0) && (k); k--) {
188*4882a593Smuzhiyun udelay(1);
189*4882a593Smuzhiyun r = r_str(ppb);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * return some status information.
194*4882a593Smuzhiyun * Semantics: 0xc0 = ZIP wants more data
195*4882a593Smuzhiyun * 0xd0 = ZIP wants to send more data
196*4882a593Smuzhiyun * 0xe0 = ZIP is expecting SCSI command data
197*4882a593Smuzhiyun * 0xf0 = end of transfer, ZIP is sending status
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun if (k)
200*4882a593Smuzhiyun return (r & 0xf0);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Counter expired - Time out occurred */
203*4882a593Smuzhiyun ppa_fail(dev, DID_TIME_OUT);
204*4882a593Smuzhiyun printk(KERN_WARNING "ppa timeout in ppa_wait\n");
205*4882a593Smuzhiyun return 0; /* command timed out */
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * Clear EPP Timeout Bit
210*4882a593Smuzhiyun */
epp_reset(unsigned short ppb)211*4882a593Smuzhiyun static inline void epp_reset(unsigned short ppb)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun int i;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun i = r_str(ppb);
216*4882a593Smuzhiyun w_str(ppb, i);
217*4882a593Smuzhiyun w_str(ppb, i & 0xfe);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /*
221*4882a593Smuzhiyun * Wait for empty ECP fifo (if we are in ECP fifo mode only)
222*4882a593Smuzhiyun */
ecp_sync(ppa_struct * dev)223*4882a593Smuzhiyun static inline void ecp_sync(ppa_struct *dev)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun int i, ppb_hi = dev->dev->port->base_hi;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (ppb_hi == 0)
228*4882a593Smuzhiyun return;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if ((r_ecr(ppb_hi) & 0xe0) == 0x60) { /* mode 011 == ECP fifo mode */
231*4882a593Smuzhiyun for (i = 0; i < 100; i++) {
232*4882a593Smuzhiyun if (r_ecr(ppb_hi) & 0x01)
233*4882a593Smuzhiyun return;
234*4882a593Smuzhiyun udelay(5);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun printk(KERN_WARNING "ppa: ECP sync failed as data still present in FIFO.\n");
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
ppa_byte_out(unsigned short base,const char * buffer,int len)240*4882a593Smuzhiyun static int ppa_byte_out(unsigned short base, const char *buffer, int len)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun int i;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun for (i = len; i; i--) {
245*4882a593Smuzhiyun w_dtr(base, *buffer++);
246*4882a593Smuzhiyun w_ctr(base, 0xe);
247*4882a593Smuzhiyun w_ctr(base, 0xc);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun return 1; /* All went well - we hope! */
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
ppa_byte_in(unsigned short base,char * buffer,int len)252*4882a593Smuzhiyun static int ppa_byte_in(unsigned short base, char *buffer, int len)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun int i;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun for (i = len; i; i--) {
257*4882a593Smuzhiyun *buffer++ = r_dtr(base);
258*4882a593Smuzhiyun w_ctr(base, 0x27);
259*4882a593Smuzhiyun w_ctr(base, 0x25);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun return 1; /* All went well - we hope! */
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
ppa_nibble_in(unsigned short base,char * buffer,int len)264*4882a593Smuzhiyun static int ppa_nibble_in(unsigned short base, char *buffer, int len)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun for (; len; len--) {
267*4882a593Smuzhiyun unsigned char h;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun w_ctr(base, 0x4);
270*4882a593Smuzhiyun h = r_str(base) & 0xf0;
271*4882a593Smuzhiyun w_ctr(base, 0x6);
272*4882a593Smuzhiyun *buffer++ = h | ((r_str(base) & 0xf0) >> 4);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun return 1; /* All went well - we hope! */
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
ppa_out(ppa_struct * dev,char * buffer,int len)277*4882a593Smuzhiyun static int ppa_out(ppa_struct *dev, char *buffer, int len)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun int r;
280*4882a593Smuzhiyun unsigned short ppb = dev->base;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun r = ppa_wait(dev);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun if ((r & 0x50) != 0x40) {
285*4882a593Smuzhiyun ppa_fail(dev, DID_ERROR);
286*4882a593Smuzhiyun return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun switch (dev->mode) {
289*4882a593Smuzhiyun case PPA_NIBBLE:
290*4882a593Smuzhiyun case PPA_PS2:
291*4882a593Smuzhiyun /* 8 bit output, with a loop */
292*4882a593Smuzhiyun r = ppa_byte_out(ppb, buffer, len);
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun case PPA_EPP_32:
296*4882a593Smuzhiyun case PPA_EPP_16:
297*4882a593Smuzhiyun case PPA_EPP_8:
298*4882a593Smuzhiyun epp_reset(ppb);
299*4882a593Smuzhiyun w_ctr(ppb, 0x4);
300*4882a593Smuzhiyun #ifdef CONFIG_SCSI_IZIP_EPP16
301*4882a593Smuzhiyun if (!(((long) buffer | len) & 0x01))
302*4882a593Smuzhiyun outsw(ppb + 4, buffer, len >> 1);
303*4882a593Smuzhiyun #else
304*4882a593Smuzhiyun if (!(((long) buffer | len) & 0x03))
305*4882a593Smuzhiyun outsl(ppb + 4, buffer, len >> 2);
306*4882a593Smuzhiyun #endif
307*4882a593Smuzhiyun else
308*4882a593Smuzhiyun outsb(ppb + 4, buffer, len);
309*4882a593Smuzhiyun w_ctr(ppb, 0xc);
310*4882a593Smuzhiyun r = !(r_str(ppb) & 0x01);
311*4882a593Smuzhiyun w_ctr(ppb, 0xc);
312*4882a593Smuzhiyun ecp_sync(dev);
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun default:
316*4882a593Smuzhiyun printk(KERN_ERR "PPA: bug in ppa_out()\n");
317*4882a593Smuzhiyun r = 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun return r;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
ppa_in(ppa_struct * dev,char * buffer,int len)322*4882a593Smuzhiyun static int ppa_in(ppa_struct *dev, char *buffer, int len)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun int r;
325*4882a593Smuzhiyun unsigned short ppb = dev->base;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun r = ppa_wait(dev);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if ((r & 0x50) != 0x50) {
330*4882a593Smuzhiyun ppa_fail(dev, DID_ERROR);
331*4882a593Smuzhiyun return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun switch (dev->mode) {
334*4882a593Smuzhiyun case PPA_NIBBLE:
335*4882a593Smuzhiyun /* 4 bit input, with a loop */
336*4882a593Smuzhiyun r = ppa_nibble_in(ppb, buffer, len);
337*4882a593Smuzhiyun w_ctr(ppb, 0xc);
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun case PPA_PS2:
341*4882a593Smuzhiyun /* 8 bit input, with a loop */
342*4882a593Smuzhiyun w_ctr(ppb, 0x25);
343*4882a593Smuzhiyun r = ppa_byte_in(ppb, buffer, len);
344*4882a593Smuzhiyun w_ctr(ppb, 0x4);
345*4882a593Smuzhiyun w_ctr(ppb, 0xc);
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun case PPA_EPP_32:
349*4882a593Smuzhiyun case PPA_EPP_16:
350*4882a593Smuzhiyun case PPA_EPP_8:
351*4882a593Smuzhiyun epp_reset(ppb);
352*4882a593Smuzhiyun w_ctr(ppb, 0x24);
353*4882a593Smuzhiyun #ifdef CONFIG_SCSI_IZIP_EPP16
354*4882a593Smuzhiyun if (!(((long) buffer | len) & 0x01))
355*4882a593Smuzhiyun insw(ppb + 4, buffer, len >> 1);
356*4882a593Smuzhiyun #else
357*4882a593Smuzhiyun if (!(((long) buffer | len) & 0x03))
358*4882a593Smuzhiyun insl(ppb + 4, buffer, len >> 2);
359*4882a593Smuzhiyun #endif
360*4882a593Smuzhiyun else
361*4882a593Smuzhiyun insb(ppb + 4, buffer, len);
362*4882a593Smuzhiyun w_ctr(ppb, 0x2c);
363*4882a593Smuzhiyun r = !(r_str(ppb) & 0x01);
364*4882a593Smuzhiyun w_ctr(ppb, 0x2c);
365*4882a593Smuzhiyun ecp_sync(dev);
366*4882a593Smuzhiyun break;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun default:
369*4882a593Smuzhiyun printk(KERN_ERR "PPA: bug in ppa_ins()\n");
370*4882a593Smuzhiyun r = 0;
371*4882a593Smuzhiyun break;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun return r;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /* end of ppa_io.h */
ppa_d_pulse(unsigned short ppb,unsigned char b)377*4882a593Smuzhiyun static inline void ppa_d_pulse(unsigned short ppb, unsigned char b)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun w_dtr(ppb, b);
380*4882a593Smuzhiyun w_ctr(ppb, 0xc);
381*4882a593Smuzhiyun w_ctr(ppb, 0xe);
382*4882a593Smuzhiyun w_ctr(ppb, 0xc);
383*4882a593Smuzhiyun w_ctr(ppb, 0x4);
384*4882a593Smuzhiyun w_ctr(ppb, 0xc);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
ppa_disconnect(ppa_struct * dev)387*4882a593Smuzhiyun static void ppa_disconnect(ppa_struct *dev)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun unsigned short ppb = dev->base;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun ppa_d_pulse(ppb, 0);
392*4882a593Smuzhiyun ppa_d_pulse(ppb, 0x3c);
393*4882a593Smuzhiyun ppa_d_pulse(ppb, 0x20);
394*4882a593Smuzhiyun ppa_d_pulse(ppb, 0xf);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
ppa_c_pulse(unsigned short ppb,unsigned char b)397*4882a593Smuzhiyun static inline void ppa_c_pulse(unsigned short ppb, unsigned char b)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun w_dtr(ppb, b);
400*4882a593Smuzhiyun w_ctr(ppb, 0x4);
401*4882a593Smuzhiyun w_ctr(ppb, 0x6);
402*4882a593Smuzhiyun w_ctr(ppb, 0x4);
403*4882a593Smuzhiyun w_ctr(ppb, 0xc);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
ppa_connect(ppa_struct * dev,int flag)406*4882a593Smuzhiyun static inline void ppa_connect(ppa_struct *dev, int flag)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun unsigned short ppb = dev->base;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun ppa_c_pulse(ppb, 0);
411*4882a593Smuzhiyun ppa_c_pulse(ppb, 0x3c);
412*4882a593Smuzhiyun ppa_c_pulse(ppb, 0x20);
413*4882a593Smuzhiyun if ((flag == CONNECT_EPP_MAYBE) && IN_EPP_MODE(dev->mode))
414*4882a593Smuzhiyun ppa_c_pulse(ppb, 0xcf);
415*4882a593Smuzhiyun else
416*4882a593Smuzhiyun ppa_c_pulse(ppb, 0x8f);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
ppa_select(ppa_struct * dev,int target)419*4882a593Smuzhiyun static int ppa_select(ppa_struct *dev, int target)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun int k;
422*4882a593Smuzhiyun unsigned short ppb = dev->base;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun * Bit 6 (0x40) is the device selected bit.
426*4882a593Smuzhiyun * First we must wait till the current device goes off line...
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun k = PPA_SELECT_TMO;
429*4882a593Smuzhiyun do {
430*4882a593Smuzhiyun k--;
431*4882a593Smuzhiyun udelay(1);
432*4882a593Smuzhiyun } while ((r_str(ppb) & 0x40) && (k));
433*4882a593Smuzhiyun if (!k)
434*4882a593Smuzhiyun return 0;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun w_dtr(ppb, (1 << target));
437*4882a593Smuzhiyun w_ctr(ppb, 0xe);
438*4882a593Smuzhiyun w_ctr(ppb, 0xc);
439*4882a593Smuzhiyun w_dtr(ppb, 0x80); /* This is NOT the initator */
440*4882a593Smuzhiyun w_ctr(ppb, 0x8);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun k = PPA_SELECT_TMO;
443*4882a593Smuzhiyun do {
444*4882a593Smuzhiyun k--;
445*4882a593Smuzhiyun udelay(1);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun while (!(r_str(ppb) & 0x40) && (k));
448*4882a593Smuzhiyun if (!k)
449*4882a593Smuzhiyun return 0;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return 1;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /*
455*4882a593Smuzhiyun * This is based on a trace of what the Iomega DOS 'guest' driver does.
456*4882a593Smuzhiyun * I've tried several different kinds of parallel ports with guest and
457*4882a593Smuzhiyun * coded this to react in the same ways that it does.
458*4882a593Smuzhiyun *
459*4882a593Smuzhiyun * The return value from this function is just a hint about where the
460*4882a593Smuzhiyun * handshaking failed.
461*4882a593Smuzhiyun *
462*4882a593Smuzhiyun */
ppa_init(ppa_struct * dev)463*4882a593Smuzhiyun static int ppa_init(ppa_struct *dev)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun int retv;
466*4882a593Smuzhiyun unsigned short ppb = dev->base;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun ppa_disconnect(dev);
469*4882a593Smuzhiyun ppa_connect(dev, CONNECT_NORMAL);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun retv = 2; /* Failed */
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun w_ctr(ppb, 0xe);
474*4882a593Smuzhiyun if ((r_str(ppb) & 0x08) == 0x08)
475*4882a593Smuzhiyun retv--;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun w_ctr(ppb, 0xc);
478*4882a593Smuzhiyun if ((r_str(ppb) & 0x08) == 0x00)
479*4882a593Smuzhiyun retv--;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!retv)
482*4882a593Smuzhiyun ppa_reset_pulse(ppb);
483*4882a593Smuzhiyun udelay(1000); /* Allow devices to settle down */
484*4882a593Smuzhiyun ppa_disconnect(dev);
485*4882a593Smuzhiyun udelay(1000); /* Another delay to allow devices to settle */
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (retv)
488*4882a593Smuzhiyun return -EIO;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun return device_check(dev);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
ppa_send_command(struct scsi_cmnd * cmd)493*4882a593Smuzhiyun static inline int ppa_send_command(struct scsi_cmnd *cmd)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(cmd->device->host);
496*4882a593Smuzhiyun int k;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun w_ctr(dev->base, 0x0c);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun for (k = 0; k < cmd->cmd_len; k++)
501*4882a593Smuzhiyun if (!ppa_out(dev, &cmd->cmnd[k], 1))
502*4882a593Smuzhiyun return 0;
503*4882a593Smuzhiyun return 1;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * The bulk flag enables some optimisations in the data transfer loops,
508*4882a593Smuzhiyun * it should be true for any command that transfers data in integral
509*4882a593Smuzhiyun * numbers of sectors.
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * The driver appears to remain stable if we speed up the parallel port
512*4882a593Smuzhiyun * i/o in this function, but not elsewhere.
513*4882a593Smuzhiyun */
ppa_completion(struct scsi_cmnd * cmd)514*4882a593Smuzhiyun static int ppa_completion(struct scsi_cmnd *cmd)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun /* Return codes:
517*4882a593Smuzhiyun * -1 Error
518*4882a593Smuzhiyun * 0 Told to schedule
519*4882a593Smuzhiyun * 1 Finished data transfer
520*4882a593Smuzhiyun */
521*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(cmd->device->host);
522*4882a593Smuzhiyun unsigned short ppb = dev->base;
523*4882a593Smuzhiyun unsigned long start_jiffies = jiffies;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun unsigned char r, v;
526*4882a593Smuzhiyun int fast, bulk, status;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun v = cmd->cmnd[0];
529*4882a593Smuzhiyun bulk = ((v == READ_6) ||
530*4882a593Smuzhiyun (v == READ_10) || (v == WRITE_6) || (v == WRITE_10));
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /*
533*4882a593Smuzhiyun * We only get here if the drive is ready to comunicate,
534*4882a593Smuzhiyun * hence no need for a full ppa_wait.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun r = (r_str(ppb) & 0xf0);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun while (r != (unsigned char) 0xf0) {
539*4882a593Smuzhiyun /*
540*4882a593Smuzhiyun * If we have been running for more than a full timer tick
541*4882a593Smuzhiyun * then take a rest.
542*4882a593Smuzhiyun */
543*4882a593Smuzhiyun if (time_after(jiffies, start_jiffies + 1))
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun if ((cmd->SCp.this_residual <= 0)) {
547*4882a593Smuzhiyun ppa_fail(dev, DID_ERROR);
548*4882a593Smuzhiyun return -1; /* ERROR_RETURN */
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* On some hardware we have SCSI disconnected (6th bit low)
552*4882a593Smuzhiyun * for about 100usecs. It is too expensive to wait a
553*4882a593Smuzhiyun * tick on every loop so we busy wait for no more than
554*4882a593Smuzhiyun * 500usecs to give the drive a chance first. We do not
555*4882a593Smuzhiyun * change things for "normal" hardware since generally
556*4882a593Smuzhiyun * the 6th bit is always high.
557*4882a593Smuzhiyun * This makes the CPU load higher on some hardware
558*4882a593Smuzhiyun * but otherwise we can not get more than 50K/secs
559*4882a593Smuzhiyun * on this problem hardware.
560*4882a593Smuzhiyun */
561*4882a593Smuzhiyun if ((r & 0xc0) != 0xc0) {
562*4882a593Smuzhiyun /* Wait for reconnection should be no more than
563*4882a593Smuzhiyun * jiffy/2 = 5ms = 5000 loops
564*4882a593Smuzhiyun */
565*4882a593Smuzhiyun unsigned long k = dev->recon_tmo;
566*4882a593Smuzhiyun for (; k && ((r = (r_str(ppb) & 0xf0)) & 0xc0) != 0xc0;
567*4882a593Smuzhiyun k--)
568*4882a593Smuzhiyun udelay(1);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (!k)
571*4882a593Smuzhiyun return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* determine if we should use burst I/O */
575*4882a593Smuzhiyun fast = (bulk && (cmd->SCp.this_residual >= PPA_BURST_SIZE))
576*4882a593Smuzhiyun ? PPA_BURST_SIZE : 1;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (r == (unsigned char) 0xc0)
579*4882a593Smuzhiyun status = ppa_out(dev, cmd->SCp.ptr, fast);
580*4882a593Smuzhiyun else
581*4882a593Smuzhiyun status = ppa_in(dev, cmd->SCp.ptr, fast);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun cmd->SCp.ptr += fast;
584*4882a593Smuzhiyun cmd->SCp.this_residual -= fast;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (!status) {
587*4882a593Smuzhiyun ppa_fail(dev, DID_BUS_BUSY);
588*4882a593Smuzhiyun return -1; /* ERROR_RETURN */
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun if (cmd->SCp.buffer && !cmd->SCp.this_residual) {
591*4882a593Smuzhiyun /* if scatter/gather, advance to the next segment */
592*4882a593Smuzhiyun if (cmd->SCp.buffers_residual--) {
593*4882a593Smuzhiyun cmd->SCp.buffer = sg_next(cmd->SCp.buffer);
594*4882a593Smuzhiyun cmd->SCp.this_residual =
595*4882a593Smuzhiyun cmd->SCp.buffer->length;
596*4882a593Smuzhiyun cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun /* Now check to see if the drive is ready to comunicate */
600*4882a593Smuzhiyun r = (r_str(ppb) & 0xf0);
601*4882a593Smuzhiyun /* If not, drop back down to the scheduler and wait a timer tick */
602*4882a593Smuzhiyun if (!(r & 0x80))
603*4882a593Smuzhiyun return 0;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun return 1; /* FINISH_RETURN */
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /*
609*4882a593Smuzhiyun * Since the PPA itself doesn't generate interrupts, we use
610*4882a593Smuzhiyun * the scheduler's task queue to generate a stream of call-backs and
611*4882a593Smuzhiyun * complete the request when the drive is ready.
612*4882a593Smuzhiyun */
ppa_interrupt(struct work_struct * work)613*4882a593Smuzhiyun static void ppa_interrupt(struct work_struct *work)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun ppa_struct *dev = container_of(work, ppa_struct, ppa_tq.work);
616*4882a593Smuzhiyun struct scsi_cmnd *cmd = dev->cur_cmd;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if (!cmd) {
619*4882a593Smuzhiyun printk(KERN_ERR "PPA: bug in ppa_interrupt\n");
620*4882a593Smuzhiyun return;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun if (ppa_engine(dev, cmd)) {
623*4882a593Smuzhiyun schedule_delayed_work(&dev->ppa_tq, 1);
624*4882a593Smuzhiyun return;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun /* Command must of completed hence it is safe to let go... */
627*4882a593Smuzhiyun #if PPA_DEBUG > 0
628*4882a593Smuzhiyun switch ((cmd->result >> 16) & 0xff) {
629*4882a593Smuzhiyun case DID_OK:
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun case DID_NO_CONNECT:
632*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: no device at SCSI ID %i\n", cmd->device->target);
633*4882a593Smuzhiyun break;
634*4882a593Smuzhiyun case DID_BUS_BUSY:
635*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: BUS BUSY - EPP timeout detected\n");
636*4882a593Smuzhiyun break;
637*4882a593Smuzhiyun case DID_TIME_OUT:
638*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: unknown timeout\n");
639*4882a593Smuzhiyun break;
640*4882a593Smuzhiyun case DID_ABORT:
641*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: told to abort\n");
642*4882a593Smuzhiyun break;
643*4882a593Smuzhiyun case DID_PARITY:
644*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: parity error (???)\n");
645*4882a593Smuzhiyun break;
646*4882a593Smuzhiyun case DID_ERROR:
647*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: internal driver error\n");
648*4882a593Smuzhiyun break;
649*4882a593Smuzhiyun case DID_RESET:
650*4882a593Smuzhiyun printk(KERN_DEBUG "ppa: told to reset device\n");
651*4882a593Smuzhiyun break;
652*4882a593Smuzhiyun case DID_BAD_INTR:
653*4882a593Smuzhiyun printk(KERN_WARNING "ppa: bad interrupt (???)\n");
654*4882a593Smuzhiyun break;
655*4882a593Smuzhiyun default:
656*4882a593Smuzhiyun printk(KERN_WARNING "ppa: bad return code (%02x)\n",
657*4882a593Smuzhiyun (cmd->result >> 16) & 0xff);
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun #endif
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (cmd->SCp.phase > 1)
662*4882a593Smuzhiyun ppa_disconnect(dev);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun ppa_pb_dismiss(dev);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun dev->cur_cmd = NULL;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun cmd->scsi_done(cmd);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
ppa_engine(ppa_struct * dev,struct scsi_cmnd * cmd)671*4882a593Smuzhiyun static int ppa_engine(ppa_struct *dev, struct scsi_cmnd *cmd)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun unsigned short ppb = dev->base;
674*4882a593Smuzhiyun unsigned char l = 0, h = 0;
675*4882a593Smuzhiyun int retv;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* First check for any errors that may of occurred
678*4882a593Smuzhiyun * Here we check for internal errors
679*4882a593Smuzhiyun */
680*4882a593Smuzhiyun if (dev->failed)
681*4882a593Smuzhiyun return 0;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun switch (cmd->SCp.phase) {
684*4882a593Smuzhiyun case 0: /* Phase 0 - Waiting for parport */
685*4882a593Smuzhiyun if (time_after(jiffies, dev->jstart + HZ)) {
686*4882a593Smuzhiyun /*
687*4882a593Smuzhiyun * We waited more than a second
688*4882a593Smuzhiyun * for parport to call us
689*4882a593Smuzhiyun */
690*4882a593Smuzhiyun ppa_fail(dev, DID_BUS_BUSY);
691*4882a593Smuzhiyun return 0;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun return 1; /* wait until ppa_wakeup claims parport */
694*4882a593Smuzhiyun case 1: /* Phase 1 - Connected */
695*4882a593Smuzhiyun { /* Perform a sanity check for cable unplugged */
696*4882a593Smuzhiyun int retv = 2; /* Failed */
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun ppa_connect(dev, CONNECT_EPP_MAYBE);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun w_ctr(ppb, 0xe);
701*4882a593Smuzhiyun if ((r_str(ppb) & 0x08) == 0x08)
702*4882a593Smuzhiyun retv--;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun w_ctr(ppb, 0xc);
705*4882a593Smuzhiyun if ((r_str(ppb) & 0x08) == 0x00)
706*4882a593Smuzhiyun retv--;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun if (retv) {
709*4882a593Smuzhiyun if (time_after(jiffies, dev->jstart + (1 * HZ))) {
710*4882a593Smuzhiyun printk(KERN_ERR "ppa: Parallel port cable is unplugged.\n");
711*4882a593Smuzhiyun ppa_fail(dev, DID_BUS_BUSY);
712*4882a593Smuzhiyun return 0;
713*4882a593Smuzhiyun } else {
714*4882a593Smuzhiyun ppa_disconnect(dev);
715*4882a593Smuzhiyun return 1; /* Try again in a jiffy */
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun cmd->SCp.phase++;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun fallthrough;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun case 2: /* Phase 2 - We are now talking to the scsi bus */
723*4882a593Smuzhiyun if (!ppa_select(dev, scmd_id(cmd))) {
724*4882a593Smuzhiyun ppa_fail(dev, DID_NO_CONNECT);
725*4882a593Smuzhiyun return 0;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun cmd->SCp.phase++;
728*4882a593Smuzhiyun fallthrough;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun case 3: /* Phase 3 - Ready to accept a command */
731*4882a593Smuzhiyun w_ctr(ppb, 0x0c);
732*4882a593Smuzhiyun if (!(r_str(ppb) & 0x80))
733*4882a593Smuzhiyun return 1;
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun if (!ppa_send_command(cmd))
736*4882a593Smuzhiyun return 0;
737*4882a593Smuzhiyun cmd->SCp.phase++;
738*4882a593Smuzhiyun fallthrough;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun case 4: /* Phase 4 - Setup scatter/gather buffers */
741*4882a593Smuzhiyun if (scsi_bufflen(cmd)) {
742*4882a593Smuzhiyun cmd->SCp.buffer = scsi_sglist(cmd);
743*4882a593Smuzhiyun cmd->SCp.this_residual = cmd->SCp.buffer->length;
744*4882a593Smuzhiyun cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
745*4882a593Smuzhiyun } else {
746*4882a593Smuzhiyun cmd->SCp.buffer = NULL;
747*4882a593Smuzhiyun cmd->SCp.this_residual = 0;
748*4882a593Smuzhiyun cmd->SCp.ptr = NULL;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun cmd->SCp.buffers_residual = scsi_sg_count(cmd) - 1;
751*4882a593Smuzhiyun cmd->SCp.phase++;
752*4882a593Smuzhiyun fallthrough;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun case 5: /* Phase 5 - Data transfer stage */
755*4882a593Smuzhiyun w_ctr(ppb, 0x0c);
756*4882a593Smuzhiyun if (!(r_str(ppb) & 0x80))
757*4882a593Smuzhiyun return 1;
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun retv = ppa_completion(cmd);
760*4882a593Smuzhiyun if (retv == -1)
761*4882a593Smuzhiyun return 0;
762*4882a593Smuzhiyun if (retv == 0)
763*4882a593Smuzhiyun return 1;
764*4882a593Smuzhiyun cmd->SCp.phase++;
765*4882a593Smuzhiyun fallthrough;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun case 6: /* Phase 6 - Read status/message */
768*4882a593Smuzhiyun cmd->result = DID_OK << 16;
769*4882a593Smuzhiyun /* Check for data overrun */
770*4882a593Smuzhiyun if (ppa_wait(dev) != (unsigned char) 0xf0) {
771*4882a593Smuzhiyun ppa_fail(dev, DID_ERROR);
772*4882a593Smuzhiyun return 0;
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun if (ppa_in(dev, &l, 1)) { /* read status byte */
775*4882a593Smuzhiyun /* Check for optional message byte */
776*4882a593Smuzhiyun if (ppa_wait(dev) == (unsigned char) 0xf0)
777*4882a593Smuzhiyun ppa_in(dev, &h, 1);
778*4882a593Smuzhiyun cmd->result =
779*4882a593Smuzhiyun (DID_OK << 16) + (h << 8) + (l & STATUS_MASK);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun return 0; /* Finished */
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun default:
784*4882a593Smuzhiyun printk(KERN_ERR "ppa: Invalid scsi phase\n");
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun return 0;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
ppa_queuecommand_lck(struct scsi_cmnd * cmd,void (* done)(struct scsi_cmnd *))789*4882a593Smuzhiyun static int ppa_queuecommand_lck(struct scsi_cmnd *cmd,
790*4882a593Smuzhiyun void (*done) (struct scsi_cmnd *))
791*4882a593Smuzhiyun {
792*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(cmd->device->host);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun if (dev->cur_cmd) {
795*4882a593Smuzhiyun printk(KERN_ERR "PPA: bug in ppa_queuecommand\n");
796*4882a593Smuzhiyun return 0;
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun dev->failed = 0;
799*4882a593Smuzhiyun dev->jstart = jiffies;
800*4882a593Smuzhiyun dev->cur_cmd = cmd;
801*4882a593Smuzhiyun cmd->scsi_done = done;
802*4882a593Smuzhiyun cmd->result = DID_ERROR << 16; /* default return code */
803*4882a593Smuzhiyun cmd->SCp.phase = 0; /* bus free */
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun schedule_delayed_work(&dev->ppa_tq, 0);
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun ppa_pb_claim(dev);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun return 0;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
DEF_SCSI_QCMD(ppa_queuecommand)812*4882a593Smuzhiyun static DEF_SCSI_QCMD(ppa_queuecommand)
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /*
815*4882a593Smuzhiyun * Apparently the disk->capacity attribute is off by 1 sector
816*4882a593Smuzhiyun * for all disk drives. We add the one here, but it should really
817*4882a593Smuzhiyun * be done in sd.c. Even if it gets fixed there, this will still
818*4882a593Smuzhiyun * work.
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun static int ppa_biosparam(struct scsi_device *sdev, struct block_device *dev,
821*4882a593Smuzhiyun sector_t capacity, int ip[])
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun ip[0] = 0x40;
824*4882a593Smuzhiyun ip[1] = 0x20;
825*4882a593Smuzhiyun ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
826*4882a593Smuzhiyun if (ip[2] > 1024) {
827*4882a593Smuzhiyun ip[0] = 0xff;
828*4882a593Smuzhiyun ip[1] = 0x3f;
829*4882a593Smuzhiyun ip[2] = ((unsigned long) capacity + 1) / (ip[0] * ip[1]);
830*4882a593Smuzhiyun if (ip[2] > 1023)
831*4882a593Smuzhiyun ip[2] = 1023;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun return 0;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
ppa_abort(struct scsi_cmnd * cmd)836*4882a593Smuzhiyun static int ppa_abort(struct scsi_cmnd *cmd)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(cmd->device->host);
839*4882a593Smuzhiyun /*
840*4882a593Smuzhiyun * There is no method for aborting commands since Iomega
841*4882a593Smuzhiyun * have tied the SCSI_MESSAGE line high in the interface
842*4882a593Smuzhiyun */
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun switch (cmd->SCp.phase) {
845*4882a593Smuzhiyun case 0: /* Do not have access to parport */
846*4882a593Smuzhiyun case 1: /* Have not connected to interface */
847*4882a593Smuzhiyun dev->cur_cmd = NULL; /* Forget the problem */
848*4882a593Smuzhiyun return SUCCESS;
849*4882a593Smuzhiyun default: /* SCSI command sent, can not abort */
850*4882a593Smuzhiyun return FAILED;
851*4882a593Smuzhiyun }
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
ppa_reset_pulse(unsigned int base)854*4882a593Smuzhiyun static void ppa_reset_pulse(unsigned int base)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun w_dtr(base, 0x40);
857*4882a593Smuzhiyun w_ctr(base, 0x8);
858*4882a593Smuzhiyun udelay(30);
859*4882a593Smuzhiyun w_ctr(base, 0xc);
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
ppa_reset(struct scsi_cmnd * cmd)862*4882a593Smuzhiyun static int ppa_reset(struct scsi_cmnd *cmd)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun ppa_struct *dev = ppa_dev(cmd->device->host);
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (cmd->SCp.phase)
867*4882a593Smuzhiyun ppa_disconnect(dev);
868*4882a593Smuzhiyun dev->cur_cmd = NULL; /* Forget the problem */
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun ppa_connect(dev, CONNECT_NORMAL);
871*4882a593Smuzhiyun ppa_reset_pulse(dev->base);
872*4882a593Smuzhiyun mdelay(1); /* device settle delay */
873*4882a593Smuzhiyun ppa_disconnect(dev);
874*4882a593Smuzhiyun mdelay(1); /* device settle delay */
875*4882a593Smuzhiyun return SUCCESS;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
device_check(ppa_struct * dev)878*4882a593Smuzhiyun static int device_check(ppa_struct *dev)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun /* This routine looks for a device and then attempts to use EPP
881*4882a593Smuzhiyun to send a command. If all goes as planned then EPP is available. */
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun static u8 cmd[6] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
884*4882a593Smuzhiyun int loop, old_mode, status, k, ppb = dev->base;
885*4882a593Smuzhiyun unsigned char l;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun old_mode = dev->mode;
888*4882a593Smuzhiyun for (loop = 0; loop < 8; loop++) {
889*4882a593Smuzhiyun /* Attempt to use EPP for Test Unit Ready */
890*4882a593Smuzhiyun if ((ppb & 0x0007) == 0x0000)
891*4882a593Smuzhiyun dev->mode = PPA_EPP_32;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun second_pass:
894*4882a593Smuzhiyun ppa_connect(dev, CONNECT_EPP_MAYBE);
895*4882a593Smuzhiyun /* Select SCSI device */
896*4882a593Smuzhiyun if (!ppa_select(dev, loop)) {
897*4882a593Smuzhiyun ppa_disconnect(dev);
898*4882a593Smuzhiyun continue;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun printk(KERN_INFO "ppa: Found device at ID %i, Attempting to use %s\n",
901*4882a593Smuzhiyun loop, PPA_MODE_STRING[dev->mode]);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /* Send SCSI command */
904*4882a593Smuzhiyun status = 1;
905*4882a593Smuzhiyun w_ctr(ppb, 0x0c);
906*4882a593Smuzhiyun for (l = 0; (l < 6) && (status); l++)
907*4882a593Smuzhiyun status = ppa_out(dev, cmd, 1);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (!status) {
910*4882a593Smuzhiyun ppa_disconnect(dev);
911*4882a593Smuzhiyun ppa_connect(dev, CONNECT_EPP_MAYBE);
912*4882a593Smuzhiyun w_dtr(ppb, 0x40);
913*4882a593Smuzhiyun w_ctr(ppb, 0x08);
914*4882a593Smuzhiyun udelay(30);
915*4882a593Smuzhiyun w_ctr(ppb, 0x0c);
916*4882a593Smuzhiyun udelay(1000);
917*4882a593Smuzhiyun ppa_disconnect(dev);
918*4882a593Smuzhiyun udelay(1000);
919*4882a593Smuzhiyun if (dev->mode == PPA_EPP_32) {
920*4882a593Smuzhiyun dev->mode = old_mode;
921*4882a593Smuzhiyun goto second_pass;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun return -EIO;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun w_ctr(ppb, 0x0c);
926*4882a593Smuzhiyun k = 1000000; /* 1 Second */
927*4882a593Smuzhiyun do {
928*4882a593Smuzhiyun l = r_str(ppb);
929*4882a593Smuzhiyun k--;
930*4882a593Smuzhiyun udelay(1);
931*4882a593Smuzhiyun } while (!(l & 0x80) && (k));
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun l &= 0xf0;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun if (l != 0xf0) {
936*4882a593Smuzhiyun ppa_disconnect(dev);
937*4882a593Smuzhiyun ppa_connect(dev, CONNECT_EPP_MAYBE);
938*4882a593Smuzhiyun ppa_reset_pulse(ppb);
939*4882a593Smuzhiyun udelay(1000);
940*4882a593Smuzhiyun ppa_disconnect(dev);
941*4882a593Smuzhiyun udelay(1000);
942*4882a593Smuzhiyun if (dev->mode == PPA_EPP_32) {
943*4882a593Smuzhiyun dev->mode = old_mode;
944*4882a593Smuzhiyun goto second_pass;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun return -EIO;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun ppa_disconnect(dev);
949*4882a593Smuzhiyun printk(KERN_INFO "ppa: Communication established with ID %i using %s\n",
950*4882a593Smuzhiyun loop, PPA_MODE_STRING[dev->mode]);
951*4882a593Smuzhiyun ppa_connect(dev, CONNECT_EPP_MAYBE);
952*4882a593Smuzhiyun ppa_reset_pulse(ppb);
953*4882a593Smuzhiyun udelay(1000);
954*4882a593Smuzhiyun ppa_disconnect(dev);
955*4882a593Smuzhiyun udelay(1000);
956*4882a593Smuzhiyun return 0;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun return -ENODEV;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun
ppa_adjust_queue(struct scsi_device * device)961*4882a593Smuzhiyun static int ppa_adjust_queue(struct scsi_device *device)
962*4882a593Smuzhiyun {
963*4882a593Smuzhiyun blk_queue_bounce_limit(device->request_queue, BLK_BOUNCE_HIGH);
964*4882a593Smuzhiyun return 0;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun static struct scsi_host_template ppa_template = {
968*4882a593Smuzhiyun .module = THIS_MODULE,
969*4882a593Smuzhiyun .proc_name = "ppa",
970*4882a593Smuzhiyun .show_info = ppa_show_info,
971*4882a593Smuzhiyun .write_info = ppa_write_info,
972*4882a593Smuzhiyun .name = "Iomega VPI0 (ppa) interface",
973*4882a593Smuzhiyun .queuecommand = ppa_queuecommand,
974*4882a593Smuzhiyun .eh_abort_handler = ppa_abort,
975*4882a593Smuzhiyun .eh_host_reset_handler = ppa_reset,
976*4882a593Smuzhiyun .bios_param = ppa_biosparam,
977*4882a593Smuzhiyun .this_id = -1,
978*4882a593Smuzhiyun .sg_tablesize = SG_ALL,
979*4882a593Smuzhiyun .can_queue = 1,
980*4882a593Smuzhiyun .slave_alloc = ppa_adjust_queue,
981*4882a593Smuzhiyun };
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun /***************************************************************************
984*4882a593Smuzhiyun * Parallel port probing routines *
985*4882a593Smuzhiyun ***************************************************************************/
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun static LIST_HEAD(ppa_hosts);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun /*
990*4882a593Smuzhiyun * Finds the first available device number that can be alloted to the
991*4882a593Smuzhiyun * new ppa device and returns the address of the previous node so that
992*4882a593Smuzhiyun * we can add to the tail and have a list in the ascending order.
993*4882a593Smuzhiyun */
994*4882a593Smuzhiyun
find_parent(void)995*4882a593Smuzhiyun static inline ppa_struct *find_parent(void)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun ppa_struct *dev, *par = NULL;
998*4882a593Smuzhiyun unsigned int cnt = 0;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (list_empty(&ppa_hosts))
1001*4882a593Smuzhiyun return NULL;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun list_for_each_entry(dev, &ppa_hosts, list) {
1004*4882a593Smuzhiyun if (dev->dev_no != cnt)
1005*4882a593Smuzhiyun return par;
1006*4882a593Smuzhiyun cnt++;
1007*4882a593Smuzhiyun par = dev;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun return par;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
__ppa_attach(struct parport * pb)1013*4882a593Smuzhiyun static int __ppa_attach(struct parport *pb)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun struct Scsi_Host *host;
1016*4882a593Smuzhiyun DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waiting);
1017*4882a593Smuzhiyun DEFINE_WAIT(wait);
1018*4882a593Smuzhiyun ppa_struct *dev, *temp;
1019*4882a593Smuzhiyun int ports;
1020*4882a593Smuzhiyun int modes, ppb, ppb_hi;
1021*4882a593Smuzhiyun int err = -ENOMEM;
1022*4882a593Smuzhiyun struct pardev_cb ppa_cb;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun dev = kzalloc(sizeof(ppa_struct), GFP_KERNEL);
1025*4882a593Smuzhiyun if (!dev)
1026*4882a593Smuzhiyun return -ENOMEM;
1027*4882a593Smuzhiyun dev->base = -1;
1028*4882a593Smuzhiyun dev->mode = PPA_AUTODETECT;
1029*4882a593Smuzhiyun dev->recon_tmo = PPA_RECON_TMO;
1030*4882a593Smuzhiyun init_waitqueue_head(&waiting);
1031*4882a593Smuzhiyun temp = find_parent();
1032*4882a593Smuzhiyun if (temp)
1033*4882a593Smuzhiyun dev->dev_no = temp->dev_no + 1;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun memset(&ppa_cb, 0, sizeof(ppa_cb));
1036*4882a593Smuzhiyun ppa_cb.private = dev;
1037*4882a593Smuzhiyun ppa_cb.wakeup = ppa_wakeup;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun dev->dev = parport_register_dev_model(pb, "ppa", &ppa_cb, dev->dev_no);
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (!dev->dev)
1042*4882a593Smuzhiyun goto out;
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* Claim the bus so it remembers what we do to the control
1045*4882a593Smuzhiyun * registers. [ CTR and ECP ]
1046*4882a593Smuzhiyun */
1047*4882a593Smuzhiyun err = -EBUSY;
1048*4882a593Smuzhiyun dev->waiting = &waiting;
1049*4882a593Smuzhiyun prepare_to_wait(&waiting, &wait, TASK_UNINTERRUPTIBLE);
1050*4882a593Smuzhiyun if (ppa_pb_claim(dev))
1051*4882a593Smuzhiyun schedule_timeout(3 * HZ);
1052*4882a593Smuzhiyun if (dev->wanted) {
1053*4882a593Smuzhiyun printk(KERN_ERR "ppa%d: failed to claim parport because "
1054*4882a593Smuzhiyun "a pardevice is owning the port for too long "
1055*4882a593Smuzhiyun "time!\n", pb->number);
1056*4882a593Smuzhiyun ppa_pb_dismiss(dev);
1057*4882a593Smuzhiyun dev->waiting = NULL;
1058*4882a593Smuzhiyun finish_wait(&waiting, &wait);
1059*4882a593Smuzhiyun goto out1;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun dev->waiting = NULL;
1062*4882a593Smuzhiyun finish_wait(&waiting, &wait);
1063*4882a593Smuzhiyun ppb = dev->base = dev->dev->port->base;
1064*4882a593Smuzhiyun ppb_hi = dev->dev->port->base_hi;
1065*4882a593Smuzhiyun w_ctr(ppb, 0x0c);
1066*4882a593Smuzhiyun modes = dev->dev->port->modes;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* Mode detection works up the chain of speed
1069*4882a593Smuzhiyun * This avoids a nasty if-then-else-if-... tree
1070*4882a593Smuzhiyun */
1071*4882a593Smuzhiyun dev->mode = PPA_NIBBLE;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun if (modes & PARPORT_MODE_TRISTATE)
1074*4882a593Smuzhiyun dev->mode = PPA_PS2;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (modes & PARPORT_MODE_ECP) {
1077*4882a593Smuzhiyun w_ecr(ppb_hi, 0x20);
1078*4882a593Smuzhiyun dev->mode = PPA_PS2;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun if ((modes & PARPORT_MODE_EPP) && (modes & PARPORT_MODE_ECP))
1081*4882a593Smuzhiyun w_ecr(ppb_hi, 0x80);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun /* Done configuration */
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun err = ppa_init(dev);
1086*4882a593Smuzhiyun ppa_pb_release(dev);
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun if (err)
1089*4882a593Smuzhiyun goto out1;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun /* now the glue ... */
1092*4882a593Smuzhiyun if (dev->mode == PPA_NIBBLE || dev->mode == PPA_PS2)
1093*4882a593Smuzhiyun ports = 3;
1094*4882a593Smuzhiyun else
1095*4882a593Smuzhiyun ports = 8;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun INIT_DELAYED_WORK(&dev->ppa_tq, ppa_interrupt);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun err = -ENOMEM;
1100*4882a593Smuzhiyun host = scsi_host_alloc(&ppa_template, sizeof(ppa_struct *));
1101*4882a593Smuzhiyun if (!host)
1102*4882a593Smuzhiyun goto out1;
1103*4882a593Smuzhiyun host->io_port = pb->base;
1104*4882a593Smuzhiyun host->n_io_port = ports;
1105*4882a593Smuzhiyun host->dma_channel = -1;
1106*4882a593Smuzhiyun host->unique_id = pb->number;
1107*4882a593Smuzhiyun *(ppa_struct **)&host->hostdata = dev;
1108*4882a593Smuzhiyun dev->host = host;
1109*4882a593Smuzhiyun list_add_tail(&dev->list, &ppa_hosts);
1110*4882a593Smuzhiyun err = scsi_add_host(host, NULL);
1111*4882a593Smuzhiyun if (err)
1112*4882a593Smuzhiyun goto out2;
1113*4882a593Smuzhiyun scsi_scan_host(host);
1114*4882a593Smuzhiyun return 0;
1115*4882a593Smuzhiyun out2:
1116*4882a593Smuzhiyun list_del_init(&dev->list);
1117*4882a593Smuzhiyun scsi_host_put(host);
1118*4882a593Smuzhiyun out1:
1119*4882a593Smuzhiyun parport_unregister_device(dev->dev);
1120*4882a593Smuzhiyun out:
1121*4882a593Smuzhiyun kfree(dev);
1122*4882a593Smuzhiyun return err;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
ppa_attach(struct parport * pb)1125*4882a593Smuzhiyun static void ppa_attach(struct parport *pb)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun __ppa_attach(pb);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
ppa_detach(struct parport * pb)1130*4882a593Smuzhiyun static void ppa_detach(struct parport *pb)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun ppa_struct *dev;
1133*4882a593Smuzhiyun list_for_each_entry(dev, &ppa_hosts, list) {
1134*4882a593Smuzhiyun if (dev->dev->port == pb) {
1135*4882a593Smuzhiyun list_del_init(&dev->list);
1136*4882a593Smuzhiyun scsi_remove_host(dev->host);
1137*4882a593Smuzhiyun scsi_host_put(dev->host);
1138*4882a593Smuzhiyun parport_unregister_device(dev->dev);
1139*4882a593Smuzhiyun kfree(dev);
1140*4882a593Smuzhiyun break;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun static struct parport_driver ppa_driver = {
1146*4882a593Smuzhiyun .name = "ppa",
1147*4882a593Smuzhiyun .match_port = ppa_attach,
1148*4882a593Smuzhiyun .detach = ppa_detach,
1149*4882a593Smuzhiyun .devmodel = true,
1150*4882a593Smuzhiyun };
1151*4882a593Smuzhiyun
ppa_driver_init(void)1152*4882a593Smuzhiyun static int __init ppa_driver_init(void)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun printk(KERN_INFO "ppa: Version %s\n", PPA_VERSION);
1155*4882a593Smuzhiyun return parport_register_driver(&ppa_driver);
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
ppa_driver_exit(void)1158*4882a593Smuzhiyun static void __exit ppa_driver_exit(void)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun parport_unregister_driver(&ppa_driver);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun module_init(ppa_driver_init);
1164*4882a593Smuzhiyun module_exit(ppa_driver_exit);
1165*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1166