1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * pata-legacy.c - Legacy port PATA/SATA controller driver.
4*4882a593Smuzhiyun * Copyright 2005/2006 Red Hat, all rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * An ATA driver for the legacy ATA ports.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Data Sources:
9*4882a593Smuzhiyun * Opti 82C465/82C611 support: Data sheets at opti-inc.com
10*4882a593Smuzhiyun * HT6560 series:
11*4882a593Smuzhiyun * Promise 20230/20620:
12*4882a593Smuzhiyun * http://www.ryston.cz/petr/vlb/pdc20230b.html
13*4882a593Smuzhiyun * http://www.ryston.cz/petr/vlb/pdc20230c.html
14*4882a593Smuzhiyun * http://www.ryston.cz/petr/vlb/pdc20630.html
15*4882a593Smuzhiyun * QDI65x0:
16*4882a593Smuzhiyun * http://www.ryston.cz/petr/vlb/qd6500.html
17*4882a593Smuzhiyun * http://www.ryston.cz/petr/vlb/qd6580.html
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * QDI65x0 probe code based on drivers/ide/legacy/qd65xx.c
20*4882a593Smuzhiyun * Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
21*4882a593Smuzhiyun * Samuel Thibault <samuel.thibault@ens-lyon.org>
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * Unsupported but docs exist:
24*4882a593Smuzhiyun * Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * This driver handles legacy (that is "ISA/VLB side") IDE ports found
27*4882a593Smuzhiyun * on PC class systems. There are three hybrid devices that are exceptions
28*4882a593Smuzhiyun * The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
29*4882a593Smuzhiyun * the MPIIX where the tuning is PCI side but the IDE is "ISA side".
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Specific support is included for the ht6560a/ht6560b/opti82c611a/
32*4882a593Smuzhiyun * opti82c465mv/promise 20230c/20630/qdi65x0/winbond83759A
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * Support for the Winbond 83759A when operating in advanced mode.
35*4882a593Smuzhiyun * Multichip mode is not currently supported.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Use the autospeed and pio_mask options with:
38*4882a593Smuzhiyun * Appian ADI/2 aka CLPD7220 or AIC25VL01.
39*4882a593Smuzhiyun * Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
40*4882a593Smuzhiyun * Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
41*4882a593Smuzhiyun * Winbond W83759A, Promise PDC20230-B
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * For now use autospeed and pio_mask as above with the W83759A. This may
44*4882a593Smuzhiyun * change.
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include <linux/async.h>
48*4882a593Smuzhiyun #include <linux/kernel.h>
49*4882a593Smuzhiyun #include <linux/module.h>
50*4882a593Smuzhiyun #include <linux/pci.h>
51*4882a593Smuzhiyun #include <linux/init.h>
52*4882a593Smuzhiyun #include <linux/blkdev.h>
53*4882a593Smuzhiyun #include <linux/delay.h>
54*4882a593Smuzhiyun #include <scsi/scsi_host.h>
55*4882a593Smuzhiyun #include <linux/ata.h>
56*4882a593Smuzhiyun #include <linux/libata.h>
57*4882a593Smuzhiyun #include <linux/platform_device.h>
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define DRV_NAME "pata_legacy"
60*4882a593Smuzhiyun #define DRV_VERSION "0.6.5"
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define NR_HOST 6
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun static int all;
65*4882a593Smuzhiyun module_param(all, int, 0444);
66*4882a593Smuzhiyun MODULE_PARM_DESC(all, "Grab all legacy port devices, even if PCI(0=off, 1=on)");
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun enum controller {
69*4882a593Smuzhiyun BIOS = 0,
70*4882a593Smuzhiyun SNOOP = 1,
71*4882a593Smuzhiyun PDC20230 = 2,
72*4882a593Smuzhiyun HT6560A = 3,
73*4882a593Smuzhiyun HT6560B = 4,
74*4882a593Smuzhiyun OPTI611A = 5,
75*4882a593Smuzhiyun OPTI46X = 6,
76*4882a593Smuzhiyun QDI6500 = 7,
77*4882a593Smuzhiyun QDI6580 = 8,
78*4882a593Smuzhiyun QDI6580DP = 9, /* Dual channel mode is different */
79*4882a593Smuzhiyun W83759A = 10,
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun UNKNOWN = -1
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun struct legacy_data {
85*4882a593Smuzhiyun unsigned long timing;
86*4882a593Smuzhiyun u8 clock[2];
87*4882a593Smuzhiyun u8 last;
88*4882a593Smuzhiyun int fast;
89*4882a593Smuzhiyun enum controller type;
90*4882a593Smuzhiyun struct platform_device *platform_dev;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun struct legacy_probe {
94*4882a593Smuzhiyun unsigned char *name;
95*4882a593Smuzhiyun unsigned long port;
96*4882a593Smuzhiyun unsigned int irq;
97*4882a593Smuzhiyun unsigned int slot;
98*4882a593Smuzhiyun enum controller type;
99*4882a593Smuzhiyun unsigned long private;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun struct legacy_controller {
103*4882a593Smuzhiyun const char *name;
104*4882a593Smuzhiyun struct ata_port_operations *ops;
105*4882a593Smuzhiyun unsigned int pio_mask;
106*4882a593Smuzhiyun unsigned int flags;
107*4882a593Smuzhiyun unsigned int pflags;
108*4882a593Smuzhiyun int (*setup)(struct platform_device *, struct legacy_probe *probe,
109*4882a593Smuzhiyun struct legacy_data *data);
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun static struct legacy_probe probe_list[NR_HOST];
115*4882a593Smuzhiyun static struct legacy_data legacy_data[NR_HOST];
116*4882a593Smuzhiyun static struct ata_host *legacy_host[NR_HOST];
117*4882a593Smuzhiyun static int nr_legacy_host;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun static int probe_all; /* Set to check all ISA port ranges */
121*4882a593Smuzhiyun static int ht6560a; /* HT 6560A on primary 1, second 2, both 3 */
122*4882a593Smuzhiyun static int ht6560b; /* HT 6560A on primary 1, second 2, both 3 */
123*4882a593Smuzhiyun static int opti82c611a; /* Opti82c611A on primary 1, sec 2, both 3 */
124*4882a593Smuzhiyun static int opti82c46x; /* Opti 82c465MV present(pri/sec autodetect) */
125*4882a593Smuzhiyun static int autospeed; /* Chip present which snoops speed changes */
126*4882a593Smuzhiyun static int pio_mask = ATA_PIO4; /* PIO range for autospeed devices */
127*4882a593Smuzhiyun static int iordy_mask = 0xFFFFFFFF; /* Use iordy if available */
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Set to probe QDI controllers */
130*4882a593Smuzhiyun #ifdef CONFIG_PATA_QDI_MODULE
131*4882a593Smuzhiyun static int qdi = 1;
132*4882a593Smuzhiyun #else
133*4882a593Smuzhiyun static int qdi;
134*4882a593Smuzhiyun #endif
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #ifdef CONFIG_PATA_WINBOND_VLB_MODULE
137*4882a593Smuzhiyun static int winbond = 1; /* Set to probe Winbond controllers,
138*4882a593Smuzhiyun give I/O port if non standard */
139*4882a593Smuzhiyun #else
140*4882a593Smuzhiyun static int winbond; /* Set to probe Winbond controllers,
141*4882a593Smuzhiyun give I/O port if non standard */
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /**
145*4882a593Smuzhiyun * legacy_probe_add - Add interface to probe list
146*4882a593Smuzhiyun * @port: Controller port
147*4882a593Smuzhiyun * @irq: IRQ number
148*4882a593Smuzhiyun * @type: Controller type
149*4882a593Smuzhiyun * @private: Controller specific info
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Add an entry into the probe list for ATA controllers. This is used
152*4882a593Smuzhiyun * to add the default ISA slots and then to build up the table
153*4882a593Smuzhiyun * further according to other ISA/VLB/Weird device scans
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * An I/O port list is used to keep ordering stable and sane, as we
156*4882a593Smuzhiyun * don't have any good way to talk about ordering otherwise
157*4882a593Smuzhiyun */
158*4882a593Smuzhiyun
legacy_probe_add(unsigned long port,unsigned int irq,enum controller type,unsigned long private)159*4882a593Smuzhiyun static int legacy_probe_add(unsigned long port, unsigned int irq,
160*4882a593Smuzhiyun enum controller type, unsigned long private)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun struct legacy_probe *lp = &probe_list[0];
163*4882a593Smuzhiyun int i;
164*4882a593Smuzhiyun struct legacy_probe *free = NULL;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun for (i = 0; i < NR_HOST; i++) {
167*4882a593Smuzhiyun if (lp->port == 0 && free == NULL)
168*4882a593Smuzhiyun free = lp;
169*4882a593Smuzhiyun /* Matching port, or the correct slot for ordering */
170*4882a593Smuzhiyun if (lp->port == port || legacy_port[i] == port) {
171*4882a593Smuzhiyun free = lp;
172*4882a593Smuzhiyun break;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun lp++;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun if (free == NULL) {
177*4882a593Smuzhiyun printk(KERN_ERR "pata_legacy: Too many interfaces.\n");
178*4882a593Smuzhiyun return -1;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun /* Fill in the entry for later probing */
181*4882a593Smuzhiyun free->port = port;
182*4882a593Smuzhiyun free->irq = irq;
183*4882a593Smuzhiyun free->type = type;
184*4882a593Smuzhiyun free->private = private;
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * legacy_set_mode - mode setting
191*4882a593Smuzhiyun * @link: IDE link
192*4882a593Smuzhiyun * @unused: Device that failed when error is returned
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * Use a non standard set_mode function. We don't want to be tuned.
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun * The BIOS configured everything. Our job is not to fiddle. Just use
197*4882a593Smuzhiyun * whatever PIO the hardware is using and leave it at that. When we
198*4882a593Smuzhiyun * get some kind of nice user driven API for control then we can
199*4882a593Smuzhiyun * expand on this as per hdparm in the base kernel.
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun
legacy_set_mode(struct ata_link * link,struct ata_device ** unused)202*4882a593Smuzhiyun static int legacy_set_mode(struct ata_link *link, struct ata_device **unused)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun struct ata_device *dev;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun ata_for_each_dev(dev, link, ENABLED) {
207*4882a593Smuzhiyun ata_dev_info(dev, "configured for PIO\n");
208*4882a593Smuzhiyun dev->pio_mode = XFER_PIO_0;
209*4882a593Smuzhiyun dev->xfer_mode = XFER_PIO_0;
210*4882a593Smuzhiyun dev->xfer_shift = ATA_SHIFT_PIO;
211*4882a593Smuzhiyun dev->flags |= ATA_DFLAG_PIO;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun static struct scsi_host_template legacy_sht = {
217*4882a593Smuzhiyun ATA_PIO_SHT(DRV_NAME),
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun static const struct ata_port_operations legacy_base_port_ops = {
221*4882a593Smuzhiyun .inherits = &ata_sff_port_ops,
222*4882a593Smuzhiyun .cable_detect = ata_cable_40wire,
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * These ops are used if the user indicates the hardware
227*4882a593Smuzhiyun * snoops the commands to decide on the mode and handles the
228*4882a593Smuzhiyun * mode selection "magically" itself. Several legacy controllers
229*4882a593Smuzhiyun * do this. The mode range can be set if it is not 0x1F by setting
230*4882a593Smuzhiyun * pio_mask as well.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun static struct ata_port_operations simple_port_ops = {
234*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
235*4882a593Smuzhiyun .sff_data_xfer = ata_sff_data_xfer32,
236*4882a593Smuzhiyun };
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun static struct ata_port_operations legacy_port_ops = {
239*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
240*4882a593Smuzhiyun .sff_data_xfer = ata_sff_data_xfer32,
241*4882a593Smuzhiyun .set_mode = legacy_set_mode,
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * Promise 20230C and 20620 support
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * This controller supports PIO0 to PIO2. We set PIO timings
248*4882a593Smuzhiyun * conservatively to allow for 50MHz Vesa Local Bus. The 20620 DMA
249*4882a593Smuzhiyun * support is weird being DMA to controller and PIO'd to the host
250*4882a593Smuzhiyun * and not supported.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun
pdc20230_set_piomode(struct ata_port * ap,struct ata_device * adev)253*4882a593Smuzhiyun static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun int tries = 5;
256*4882a593Smuzhiyun int pio = adev->pio_mode - XFER_PIO_0;
257*4882a593Smuzhiyun u8 rt;
258*4882a593Smuzhiyun unsigned long flags;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Safe as UP only. Force I/Os to occur together */
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun local_irq_save(flags);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Unlock the control interface */
265*4882a593Smuzhiyun do {
266*4882a593Smuzhiyun inb(0x1F5);
267*4882a593Smuzhiyun outb(inb(0x1F2) | 0x80, 0x1F2);
268*4882a593Smuzhiyun inb(0x1F2);
269*4882a593Smuzhiyun inb(0x3F6);
270*4882a593Smuzhiyun inb(0x3F6);
271*4882a593Smuzhiyun inb(0x1F2);
272*4882a593Smuzhiyun inb(0x1F2);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun while ((inb(0x1F2) & 0x80) && --tries);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun local_irq_restore(flags);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun outb(inb(0x1F4) & 0x07, 0x1F4);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun rt = inb(0x1F3);
281*4882a593Smuzhiyun rt &= ~(0x07 << (3 * !adev->devno));
282*4882a593Smuzhiyun if (pio)
283*4882a593Smuzhiyun rt |= (1 + 3 * pio) << (3 * !adev->devno);
284*4882a593Smuzhiyun outb(rt, 0x1F3);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun udelay(100);
287*4882a593Smuzhiyun outb(inb(0x1F2) | 0x01, 0x1F2);
288*4882a593Smuzhiyun udelay(100);
289*4882a593Smuzhiyun inb(0x1F5);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
pdc_data_xfer_vlb(struct ata_queued_cmd * qc,unsigned char * buf,unsigned int buflen,int rw)293*4882a593Smuzhiyun static unsigned int pdc_data_xfer_vlb(struct ata_queued_cmd *qc,
294*4882a593Smuzhiyun unsigned char *buf, unsigned int buflen, int rw)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun struct ata_device *dev = qc->dev;
297*4882a593Smuzhiyun struct ata_port *ap = dev->link->ap;
298*4882a593Smuzhiyun int slop = buflen & 3;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* 32bit I/O capable *and* we need to write a whole number of dwords */
301*4882a593Smuzhiyun if (ata_id_has_dword_io(dev->id) && (slop == 0 || slop == 3)
302*4882a593Smuzhiyun && (ap->pflags & ATA_PFLAG_PIO32)) {
303*4882a593Smuzhiyun unsigned long flags;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun local_irq_save(flags);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Perform the 32bit I/O synchronization sequence */
308*4882a593Smuzhiyun ioread8(ap->ioaddr.nsect_addr);
309*4882a593Smuzhiyun ioread8(ap->ioaddr.nsect_addr);
310*4882a593Smuzhiyun ioread8(ap->ioaddr.nsect_addr);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Now the data */
313*4882a593Smuzhiyun if (rw == READ)
314*4882a593Smuzhiyun ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
315*4882a593Smuzhiyun else
316*4882a593Smuzhiyun iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (unlikely(slop)) {
319*4882a593Smuzhiyun __le32 pad = 0;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (rw == READ) {
322*4882a593Smuzhiyun pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
323*4882a593Smuzhiyun memcpy(buf + buflen - slop, &pad, slop);
324*4882a593Smuzhiyun } else {
325*4882a593Smuzhiyun memcpy(&pad, buf + buflen - slop, slop);
326*4882a593Smuzhiyun iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun buflen += 4 - slop;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun local_irq_restore(flags);
331*4882a593Smuzhiyun } else
332*4882a593Smuzhiyun buflen = ata_sff_data_xfer32(qc, buf, buflen, rw);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun return buflen;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun static struct ata_port_operations pdc20230_port_ops = {
338*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
339*4882a593Smuzhiyun .set_piomode = pdc20230_set_piomode,
340*4882a593Smuzhiyun .sff_data_xfer = pdc_data_xfer_vlb,
341*4882a593Smuzhiyun };
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun * Holtek 6560A support
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * This controller supports PIO0 to PIO2 (no IORDY even though higher
347*4882a593Smuzhiyun * timings can be loaded).
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun
ht6560a_set_piomode(struct ata_port * ap,struct ata_device * adev)350*4882a593Smuzhiyun static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun u8 active, recover;
353*4882a593Smuzhiyun struct ata_timing t;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /* Get the timing data in cycles. For now play safe at 50Mhz */
356*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun active = clamp_val(t.active, 2, 15);
359*4882a593Smuzhiyun recover = clamp_val(t.recover, 4, 15);
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun inb(0x3E6);
362*4882a593Smuzhiyun inb(0x3E6);
363*4882a593Smuzhiyun inb(0x3E6);
364*4882a593Smuzhiyun inb(0x3E6);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
367*4882a593Smuzhiyun ioread8(ap->ioaddr.status_addr);
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun static struct ata_port_operations ht6560a_port_ops = {
371*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
372*4882a593Smuzhiyun .set_piomode = ht6560a_set_piomode,
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Holtek 6560B support
377*4882a593Smuzhiyun *
378*4882a593Smuzhiyun * This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO
379*4882a593Smuzhiyun * setting unless we see an ATAPI device in which case we force it off.
380*4882a593Smuzhiyun *
381*4882a593Smuzhiyun * FIXME: need to implement 2nd channel support.
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun
ht6560b_set_piomode(struct ata_port * ap,struct ata_device * adev)384*4882a593Smuzhiyun static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun u8 active, recover;
387*4882a593Smuzhiyun struct ata_timing t;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* Get the timing data in cycles. For now play safe at 50Mhz */
390*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun active = clamp_val(t.active, 2, 15);
393*4882a593Smuzhiyun recover = clamp_val(t.recover, 2, 16) & 0x0F;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun inb(0x3E6);
396*4882a593Smuzhiyun inb(0x3E6);
397*4882a593Smuzhiyun inb(0x3E6);
398*4882a593Smuzhiyun inb(0x3E6);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (adev->class != ATA_DEV_ATA) {
403*4882a593Smuzhiyun u8 rconf = inb(0x3E6);
404*4882a593Smuzhiyun if (rconf & 0x24) {
405*4882a593Smuzhiyun rconf &= ~0x24;
406*4882a593Smuzhiyun outb(rconf, 0x3E6);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun ioread8(ap->ioaddr.status_addr);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun static struct ata_port_operations ht6560b_port_ops = {
413*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
414*4882a593Smuzhiyun .set_piomode = ht6560b_set_piomode,
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * Opti core chipset helpers
419*4882a593Smuzhiyun */
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * opti_syscfg - read OPTI chipset configuration
423*4882a593Smuzhiyun * @reg: Configuration register to read
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * Returns the value of an OPTI system board configuration register.
426*4882a593Smuzhiyun */
427*4882a593Smuzhiyun
opti_syscfg(u8 reg)428*4882a593Smuzhiyun static u8 opti_syscfg(u8 reg)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun unsigned long flags;
431*4882a593Smuzhiyun u8 r;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* Uniprocessor chipset and must force cycles adjancent */
434*4882a593Smuzhiyun local_irq_save(flags);
435*4882a593Smuzhiyun outb(reg, 0x22);
436*4882a593Smuzhiyun r = inb(0x24);
437*4882a593Smuzhiyun local_irq_restore(flags);
438*4882a593Smuzhiyun return r;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /*
442*4882a593Smuzhiyun * Opti 82C611A
443*4882a593Smuzhiyun *
444*4882a593Smuzhiyun * This controller supports PIO0 to PIO3.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun
opti82c611a_set_piomode(struct ata_port * ap,struct ata_device * adev)447*4882a593Smuzhiyun static void opti82c611a_set_piomode(struct ata_port *ap,
448*4882a593Smuzhiyun struct ata_device *adev)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun u8 active, recover, setup;
451*4882a593Smuzhiyun struct ata_timing t;
452*4882a593Smuzhiyun struct ata_device *pair = ata_dev_pair(adev);
453*4882a593Smuzhiyun int clock;
454*4882a593Smuzhiyun int khz[4] = { 50000, 40000, 33000, 25000 };
455*4882a593Smuzhiyun u8 rc;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /* Enter configuration mode */
458*4882a593Smuzhiyun ioread16(ap->ioaddr.error_addr);
459*4882a593Smuzhiyun ioread16(ap->ioaddr.error_addr);
460*4882a593Smuzhiyun iowrite8(3, ap->ioaddr.nsect_addr);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun /* Read VLB clock strapping */
463*4882a593Smuzhiyun clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /* Get the timing data in cycles */
466*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /* Setup timing is shared */
469*4882a593Smuzhiyun if (pair) {
470*4882a593Smuzhiyun struct ata_timing tp;
471*4882a593Smuzhiyun ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun active = clamp_val(t.active, 2, 17) - 2;
477*4882a593Smuzhiyun recover = clamp_val(t.recover, 1, 16) - 1;
478*4882a593Smuzhiyun setup = clamp_val(t.setup, 1, 4) - 1;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* Select the right timing bank for write timing */
481*4882a593Smuzhiyun rc = ioread8(ap->ioaddr.lbal_addr);
482*4882a593Smuzhiyun rc &= 0x7F;
483*4882a593Smuzhiyun rc |= (adev->devno << 7);
484*4882a593Smuzhiyun iowrite8(rc, ap->ioaddr.lbal_addr);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* Write the timings */
487*4882a593Smuzhiyun iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* Select the right bank for read timings, also
490*4882a593Smuzhiyun load the shared timings for address */
491*4882a593Smuzhiyun rc = ioread8(ap->ioaddr.device_addr);
492*4882a593Smuzhiyun rc &= 0xC0;
493*4882a593Smuzhiyun rc |= adev->devno; /* Index select */
494*4882a593Smuzhiyun rc |= (setup << 4) | 0x04;
495*4882a593Smuzhiyun iowrite8(rc, ap->ioaddr.device_addr);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* Load the read timings */
498*4882a593Smuzhiyun iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /* Ensure the timing register mode is right */
501*4882a593Smuzhiyun rc = ioread8(ap->ioaddr.lbal_addr);
502*4882a593Smuzhiyun rc &= 0x73;
503*4882a593Smuzhiyun rc |= 0x84;
504*4882a593Smuzhiyun iowrite8(rc, ap->ioaddr.lbal_addr);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* Exit command mode */
507*4882a593Smuzhiyun iowrite8(0x83, ap->ioaddr.nsect_addr);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun static struct ata_port_operations opti82c611a_port_ops = {
512*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
513*4882a593Smuzhiyun .set_piomode = opti82c611a_set_piomode,
514*4882a593Smuzhiyun };
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /*
517*4882a593Smuzhiyun * Opti 82C465MV
518*4882a593Smuzhiyun *
519*4882a593Smuzhiyun * This controller supports PIO0 to PIO3. Unlike the 611A the MVB
520*4882a593Smuzhiyun * version is dual channel but doesn't have a lot of unique registers.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun
opti82c46x_set_piomode(struct ata_port * ap,struct ata_device * adev)523*4882a593Smuzhiyun static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun u8 active, recover, setup;
526*4882a593Smuzhiyun struct ata_timing t;
527*4882a593Smuzhiyun struct ata_device *pair = ata_dev_pair(adev);
528*4882a593Smuzhiyun int clock;
529*4882a593Smuzhiyun int khz[4] = { 50000, 40000, 33000, 25000 };
530*4882a593Smuzhiyun u8 rc;
531*4882a593Smuzhiyun u8 sysclk;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* Get the clock */
534*4882a593Smuzhiyun sysclk = (opti_syscfg(0xAC) & 0xC0) >> 6; /* BIOS set */
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* Enter configuration mode */
537*4882a593Smuzhiyun ioread16(ap->ioaddr.error_addr);
538*4882a593Smuzhiyun ioread16(ap->ioaddr.error_addr);
539*4882a593Smuzhiyun iowrite8(3, ap->ioaddr.nsect_addr);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* Read VLB clock strapping */
542*4882a593Smuzhiyun clock = 1000000000 / khz[sysclk];
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Get the timing data in cycles */
545*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Setup timing is shared */
548*4882a593Smuzhiyun if (pair) {
549*4882a593Smuzhiyun struct ata_timing tp;
550*4882a593Smuzhiyun ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun active = clamp_val(t.active, 2, 17) - 2;
556*4882a593Smuzhiyun recover = clamp_val(t.recover, 1, 16) - 1;
557*4882a593Smuzhiyun setup = clamp_val(t.setup, 1, 4) - 1;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* Select the right timing bank for write timing */
560*4882a593Smuzhiyun rc = ioread8(ap->ioaddr.lbal_addr);
561*4882a593Smuzhiyun rc &= 0x7F;
562*4882a593Smuzhiyun rc |= (adev->devno << 7);
563*4882a593Smuzhiyun iowrite8(rc, ap->ioaddr.lbal_addr);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Write the timings */
566*4882a593Smuzhiyun iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun /* Select the right bank for read timings, also
569*4882a593Smuzhiyun load the shared timings for address */
570*4882a593Smuzhiyun rc = ioread8(ap->ioaddr.device_addr);
571*4882a593Smuzhiyun rc &= 0xC0;
572*4882a593Smuzhiyun rc |= adev->devno; /* Index select */
573*4882a593Smuzhiyun rc |= (setup << 4) | 0x04;
574*4882a593Smuzhiyun iowrite8(rc, ap->ioaddr.device_addr);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* Load the read timings */
577*4882a593Smuzhiyun iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* Ensure the timing register mode is right */
580*4882a593Smuzhiyun rc = ioread8(ap->ioaddr.lbal_addr);
581*4882a593Smuzhiyun rc &= 0x73;
582*4882a593Smuzhiyun rc |= 0x84;
583*4882a593Smuzhiyun iowrite8(rc, ap->ioaddr.lbal_addr);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun /* Exit command mode */
586*4882a593Smuzhiyun iowrite8(0x83, ap->ioaddr.nsect_addr);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /* We need to know this for quad device on the MVB */
589*4882a593Smuzhiyun ap->host->private_data = ap;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /**
593*4882a593Smuzhiyun * opt82c465mv_qc_issue - command issue
594*4882a593Smuzhiyun * @qc: command pending
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Called when the libata layer is about to issue a command. We wrap
597*4882a593Smuzhiyun * this interface so that we can load the correct ATA timings. The
598*4882a593Smuzhiyun * MVB has a single set of timing registers and these are shared
599*4882a593Smuzhiyun * across channels. As there are two registers we really ought to
600*4882a593Smuzhiyun * track the last two used values as a sort of register window. For
601*4882a593Smuzhiyun * now we just reload on a channel switch. On the single channel
602*4882a593Smuzhiyun * setup this condition never fires so we do nothing extra.
603*4882a593Smuzhiyun *
604*4882a593Smuzhiyun * FIXME: dual channel needs ->serialize support
605*4882a593Smuzhiyun */
606*4882a593Smuzhiyun
opti82c46x_qc_issue(struct ata_queued_cmd * qc)607*4882a593Smuzhiyun static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
610*4882a593Smuzhiyun struct ata_device *adev = qc->dev;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* If timings are set and for the wrong channel (2nd test is
613*4882a593Smuzhiyun due to a libata shortcoming and will eventually go I hope) */
614*4882a593Smuzhiyun if (ap->host->private_data != ap->host
615*4882a593Smuzhiyun && ap->host->private_data != NULL)
616*4882a593Smuzhiyun opti82c46x_set_piomode(ap, adev);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return ata_sff_qc_issue(qc);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun static struct ata_port_operations opti82c46x_port_ops = {
622*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
623*4882a593Smuzhiyun .set_piomode = opti82c46x_set_piomode,
624*4882a593Smuzhiyun .qc_issue = opti82c46x_qc_issue,
625*4882a593Smuzhiyun };
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /**
628*4882a593Smuzhiyun * qdi65x0_set_piomode - PIO setup for QDI65x0
629*4882a593Smuzhiyun * @ap: Port
630*4882a593Smuzhiyun * @adev: Device
631*4882a593Smuzhiyun *
632*4882a593Smuzhiyun * In single channel mode the 6580 has one clock per device and we can
633*4882a593Smuzhiyun * avoid the requirement to clock switch. We also have to load the timing
634*4882a593Smuzhiyun * into the right clock according to whether we are master or slave.
635*4882a593Smuzhiyun *
636*4882a593Smuzhiyun * In dual channel mode the 6580 has one clock per channel and we have
637*4882a593Smuzhiyun * to software clockswitch in qc_issue.
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun
qdi65x0_set_piomode(struct ata_port * ap,struct ata_device * adev)640*4882a593Smuzhiyun static void qdi65x0_set_piomode(struct ata_port *ap, struct ata_device *adev)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun struct ata_timing t;
643*4882a593Smuzhiyun struct legacy_data *ld_qdi = ap->host->private_data;
644*4882a593Smuzhiyun int active, recovery;
645*4882a593Smuzhiyun u8 timing;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Get the timing data in cycles */
648*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (ld_qdi->fast) {
651*4882a593Smuzhiyun active = 8 - clamp_val(t.active, 1, 8);
652*4882a593Smuzhiyun recovery = 18 - clamp_val(t.recover, 3, 18);
653*4882a593Smuzhiyun } else {
654*4882a593Smuzhiyun active = 9 - clamp_val(t.active, 2, 9);
655*4882a593Smuzhiyun recovery = 15 - clamp_val(t.recover, 0, 15);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun timing = (recovery << 4) | active | 0x08;
658*4882a593Smuzhiyun ld_qdi->clock[adev->devno] = timing;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (ld_qdi->type == QDI6580)
661*4882a593Smuzhiyun outb(timing, ld_qdi->timing + 2 * adev->devno);
662*4882a593Smuzhiyun else
663*4882a593Smuzhiyun outb(timing, ld_qdi->timing + 2 * ap->port_no);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* Clear the FIFO */
666*4882a593Smuzhiyun if (ld_qdi->type != QDI6500 && adev->class != ATA_DEV_ATA)
667*4882a593Smuzhiyun outb(0x5F, (ld_qdi->timing & 0xFFF0) + 3);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /**
671*4882a593Smuzhiyun * qdi_qc_issue - command issue
672*4882a593Smuzhiyun * @qc: command pending
673*4882a593Smuzhiyun *
674*4882a593Smuzhiyun * Called when the libata layer is about to issue a command. We wrap
675*4882a593Smuzhiyun * this interface so that we can load the correct ATA timings.
676*4882a593Smuzhiyun */
677*4882a593Smuzhiyun
qdi_qc_issue(struct ata_queued_cmd * qc)678*4882a593Smuzhiyun static unsigned int qdi_qc_issue(struct ata_queued_cmd *qc)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
681*4882a593Smuzhiyun struct ata_device *adev = qc->dev;
682*4882a593Smuzhiyun struct legacy_data *ld_qdi = ap->host->private_data;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun if (ld_qdi->clock[adev->devno] != ld_qdi->last) {
685*4882a593Smuzhiyun if (adev->pio_mode) {
686*4882a593Smuzhiyun ld_qdi->last = ld_qdi->clock[adev->devno];
687*4882a593Smuzhiyun outb(ld_qdi->clock[adev->devno], ld_qdi->timing +
688*4882a593Smuzhiyun 2 * ap->port_no);
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun return ata_sff_qc_issue(qc);
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
vlb32_data_xfer(struct ata_queued_cmd * qc,unsigned char * buf,unsigned int buflen,int rw)694*4882a593Smuzhiyun static unsigned int vlb32_data_xfer(struct ata_queued_cmd *qc,
695*4882a593Smuzhiyun unsigned char *buf,
696*4882a593Smuzhiyun unsigned int buflen, int rw)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun struct ata_device *adev = qc->dev;
699*4882a593Smuzhiyun struct ata_port *ap = adev->link->ap;
700*4882a593Smuzhiyun int slop = buflen & 3;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (ata_id_has_dword_io(adev->id) && (slop == 0 || slop == 3)
703*4882a593Smuzhiyun && (ap->pflags & ATA_PFLAG_PIO32)) {
704*4882a593Smuzhiyun if (rw == WRITE)
705*4882a593Smuzhiyun iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
706*4882a593Smuzhiyun else
707*4882a593Smuzhiyun ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (unlikely(slop)) {
710*4882a593Smuzhiyun __le32 pad = 0;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun if (rw == WRITE) {
713*4882a593Smuzhiyun memcpy(&pad, buf + buflen - slop, slop);
714*4882a593Smuzhiyun iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
715*4882a593Smuzhiyun } else {
716*4882a593Smuzhiyun pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
717*4882a593Smuzhiyun memcpy(buf + buflen - slop, &pad, slop);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun return (buflen + 3) & ~3;
721*4882a593Smuzhiyun } else
722*4882a593Smuzhiyun return ata_sff_data_xfer(qc, buf, buflen, rw);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
qdi_port(struct platform_device * dev,struct legacy_probe * lp,struct legacy_data * ld)725*4882a593Smuzhiyun static int qdi_port(struct platform_device *dev,
726*4882a593Smuzhiyun struct legacy_probe *lp, struct legacy_data *ld)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun if (devm_request_region(&dev->dev, lp->private, 4, "qdi") == NULL)
729*4882a593Smuzhiyun return -EBUSY;
730*4882a593Smuzhiyun ld->timing = lp->private;
731*4882a593Smuzhiyun return 0;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun static struct ata_port_operations qdi6500_port_ops = {
735*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
736*4882a593Smuzhiyun .set_piomode = qdi65x0_set_piomode,
737*4882a593Smuzhiyun .qc_issue = qdi_qc_issue,
738*4882a593Smuzhiyun .sff_data_xfer = vlb32_data_xfer,
739*4882a593Smuzhiyun };
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun static struct ata_port_operations qdi6580_port_ops = {
742*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
743*4882a593Smuzhiyun .set_piomode = qdi65x0_set_piomode,
744*4882a593Smuzhiyun .sff_data_xfer = vlb32_data_xfer,
745*4882a593Smuzhiyun };
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun static struct ata_port_operations qdi6580dp_port_ops = {
748*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
749*4882a593Smuzhiyun .set_piomode = qdi65x0_set_piomode,
750*4882a593Smuzhiyun .qc_issue = qdi_qc_issue,
751*4882a593Smuzhiyun .sff_data_xfer = vlb32_data_xfer,
752*4882a593Smuzhiyun };
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun static DEFINE_SPINLOCK(winbond_lock);
755*4882a593Smuzhiyun
winbond_writecfg(unsigned long port,u8 reg,u8 val)756*4882a593Smuzhiyun static void winbond_writecfg(unsigned long port, u8 reg, u8 val)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun unsigned long flags;
759*4882a593Smuzhiyun spin_lock_irqsave(&winbond_lock, flags);
760*4882a593Smuzhiyun outb(reg, port + 0x01);
761*4882a593Smuzhiyun outb(val, port + 0x02);
762*4882a593Smuzhiyun spin_unlock_irqrestore(&winbond_lock, flags);
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
winbond_readcfg(unsigned long port,u8 reg)765*4882a593Smuzhiyun static u8 winbond_readcfg(unsigned long port, u8 reg)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun u8 val;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun unsigned long flags;
770*4882a593Smuzhiyun spin_lock_irqsave(&winbond_lock, flags);
771*4882a593Smuzhiyun outb(reg, port + 0x01);
772*4882a593Smuzhiyun val = inb(port + 0x02);
773*4882a593Smuzhiyun spin_unlock_irqrestore(&winbond_lock, flags);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun return val;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
winbond_set_piomode(struct ata_port * ap,struct ata_device * adev)778*4882a593Smuzhiyun static void winbond_set_piomode(struct ata_port *ap, struct ata_device *adev)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun struct ata_timing t;
781*4882a593Smuzhiyun struct legacy_data *ld_winbond = ap->host->private_data;
782*4882a593Smuzhiyun int active, recovery;
783*4882a593Smuzhiyun u8 reg;
784*4882a593Smuzhiyun int timing = 0x88 + (ap->port_no * 4) + (adev->devno * 2);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun reg = winbond_readcfg(ld_winbond->timing, 0x81);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun /* Get the timing data in cycles */
789*4882a593Smuzhiyun if (reg & 0x40) /* Fast VLB bus, assume 50MHz */
790*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
791*4882a593Smuzhiyun else
792*4882a593Smuzhiyun ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun active = (clamp_val(t.active, 3, 17) - 1) & 0x0F;
795*4882a593Smuzhiyun recovery = (clamp_val(t.recover, 1, 15) + 1) & 0x0F;
796*4882a593Smuzhiyun timing = (active << 4) | recovery;
797*4882a593Smuzhiyun winbond_writecfg(ld_winbond->timing, timing, reg);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* Load the setup timing */
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun reg = 0x35;
802*4882a593Smuzhiyun if (adev->class != ATA_DEV_ATA)
803*4882a593Smuzhiyun reg |= 0x08; /* FIFO off */
804*4882a593Smuzhiyun if (!ata_pio_need_iordy(adev))
805*4882a593Smuzhiyun reg |= 0x02; /* IORDY off */
806*4882a593Smuzhiyun reg |= (clamp_val(t.setup, 0, 3) << 6);
807*4882a593Smuzhiyun winbond_writecfg(ld_winbond->timing, timing + 1, reg);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
winbond_port(struct platform_device * dev,struct legacy_probe * lp,struct legacy_data * ld)810*4882a593Smuzhiyun static int winbond_port(struct platform_device *dev,
811*4882a593Smuzhiyun struct legacy_probe *lp, struct legacy_data *ld)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun if (devm_request_region(&dev->dev, lp->private, 4, "winbond") == NULL)
814*4882a593Smuzhiyun return -EBUSY;
815*4882a593Smuzhiyun ld->timing = lp->private;
816*4882a593Smuzhiyun return 0;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun static struct ata_port_operations winbond_port_ops = {
820*4882a593Smuzhiyun .inherits = &legacy_base_port_ops,
821*4882a593Smuzhiyun .set_piomode = winbond_set_piomode,
822*4882a593Smuzhiyun .sff_data_xfer = vlb32_data_xfer,
823*4882a593Smuzhiyun };
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun static struct legacy_controller controllers[] = {
826*4882a593Smuzhiyun {"BIOS", &legacy_port_ops, ATA_PIO4,
827*4882a593Smuzhiyun ATA_FLAG_NO_IORDY, 0, NULL },
828*4882a593Smuzhiyun {"Snooping", &simple_port_ops, ATA_PIO4,
829*4882a593Smuzhiyun 0, 0, NULL },
830*4882a593Smuzhiyun {"PDC20230", &pdc20230_port_ops, ATA_PIO2,
831*4882a593Smuzhiyun ATA_FLAG_NO_IORDY,
832*4882a593Smuzhiyun ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, NULL },
833*4882a593Smuzhiyun {"HT6560A", &ht6560a_port_ops, ATA_PIO2,
834*4882a593Smuzhiyun ATA_FLAG_NO_IORDY, 0, NULL },
835*4882a593Smuzhiyun {"HT6560B", &ht6560b_port_ops, ATA_PIO4,
836*4882a593Smuzhiyun ATA_FLAG_NO_IORDY, 0, NULL },
837*4882a593Smuzhiyun {"OPTI82C611A", &opti82c611a_port_ops, ATA_PIO3,
838*4882a593Smuzhiyun 0, 0, NULL },
839*4882a593Smuzhiyun {"OPTI82C46X", &opti82c46x_port_ops, ATA_PIO3,
840*4882a593Smuzhiyun 0, 0, NULL },
841*4882a593Smuzhiyun {"QDI6500", &qdi6500_port_ops, ATA_PIO2,
842*4882a593Smuzhiyun ATA_FLAG_NO_IORDY,
843*4882a593Smuzhiyun ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
844*4882a593Smuzhiyun {"QDI6580", &qdi6580_port_ops, ATA_PIO4,
845*4882a593Smuzhiyun 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
846*4882a593Smuzhiyun {"QDI6580DP", &qdi6580dp_port_ops, ATA_PIO4,
847*4882a593Smuzhiyun 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE, qdi_port },
848*4882a593Smuzhiyun {"W83759A", &winbond_port_ops, ATA_PIO4,
849*4882a593Smuzhiyun 0, ATA_PFLAG_PIO32 | ATA_PFLAG_PIO32CHANGE,
850*4882a593Smuzhiyun winbond_port }
851*4882a593Smuzhiyun };
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun /**
854*4882a593Smuzhiyun * probe_chip_type - Discover controller
855*4882a593Smuzhiyun * @probe: Probe entry to check
856*4882a593Smuzhiyun *
857*4882a593Smuzhiyun * Probe an ATA port and identify the type of controller. We don't
858*4882a593Smuzhiyun * check if the controller appears to be driveless at this point.
859*4882a593Smuzhiyun */
860*4882a593Smuzhiyun
probe_chip_type(struct legacy_probe * probe)861*4882a593Smuzhiyun static __init int probe_chip_type(struct legacy_probe *probe)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun int mask = 1 << probe->slot;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun if (winbond && (probe->port == 0x1F0 || probe->port == 0x170)) {
866*4882a593Smuzhiyun u8 reg = winbond_readcfg(winbond, 0x81);
867*4882a593Smuzhiyun reg |= 0x80; /* jumpered mode off */
868*4882a593Smuzhiyun winbond_writecfg(winbond, 0x81, reg);
869*4882a593Smuzhiyun reg = winbond_readcfg(winbond, 0x83);
870*4882a593Smuzhiyun reg |= 0xF0; /* local control */
871*4882a593Smuzhiyun winbond_writecfg(winbond, 0x83, reg);
872*4882a593Smuzhiyun reg = winbond_readcfg(winbond, 0x85);
873*4882a593Smuzhiyun reg |= 0xF0; /* programmable timing */
874*4882a593Smuzhiyun winbond_writecfg(winbond, 0x85, reg);
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun reg = winbond_readcfg(winbond, 0x81);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (reg & mask)
879*4882a593Smuzhiyun return W83759A;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun if (probe->port == 0x1F0) {
882*4882a593Smuzhiyun unsigned long flags;
883*4882a593Smuzhiyun local_irq_save(flags);
884*4882a593Smuzhiyun /* Probes */
885*4882a593Smuzhiyun outb(inb(0x1F2) | 0x80, 0x1F2);
886*4882a593Smuzhiyun inb(0x1F5);
887*4882a593Smuzhiyun inb(0x1F2);
888*4882a593Smuzhiyun inb(0x3F6);
889*4882a593Smuzhiyun inb(0x3F6);
890*4882a593Smuzhiyun inb(0x1F2);
891*4882a593Smuzhiyun inb(0x1F2);
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun if ((inb(0x1F2) & 0x80) == 0) {
894*4882a593Smuzhiyun /* PDC20230c or 20630 ? */
895*4882a593Smuzhiyun printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller"
896*4882a593Smuzhiyun " detected.\n");
897*4882a593Smuzhiyun udelay(100);
898*4882a593Smuzhiyun inb(0x1F5);
899*4882a593Smuzhiyun local_irq_restore(flags);
900*4882a593Smuzhiyun return PDC20230;
901*4882a593Smuzhiyun } else {
902*4882a593Smuzhiyun outb(0x55, 0x1F2);
903*4882a593Smuzhiyun inb(0x1F2);
904*4882a593Smuzhiyun inb(0x1F2);
905*4882a593Smuzhiyun if (inb(0x1F2) == 0x00)
906*4882a593Smuzhiyun printk(KERN_INFO "PDC20230-B VLB ATA "
907*4882a593Smuzhiyun "controller detected.\n");
908*4882a593Smuzhiyun local_irq_restore(flags);
909*4882a593Smuzhiyun return BIOS;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun if (ht6560a & mask)
914*4882a593Smuzhiyun return HT6560A;
915*4882a593Smuzhiyun if (ht6560b & mask)
916*4882a593Smuzhiyun return HT6560B;
917*4882a593Smuzhiyun if (opti82c611a & mask)
918*4882a593Smuzhiyun return OPTI611A;
919*4882a593Smuzhiyun if (opti82c46x & mask)
920*4882a593Smuzhiyun return OPTI46X;
921*4882a593Smuzhiyun if (autospeed & mask)
922*4882a593Smuzhiyun return SNOOP;
923*4882a593Smuzhiyun return BIOS;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun /**
928*4882a593Smuzhiyun * legacy_init_one - attach a legacy interface
929*4882a593Smuzhiyun * @pl: probe record
930*4882a593Smuzhiyun *
931*4882a593Smuzhiyun * Register an ISA bus IDE interface. Such interfaces are PIO and we
932*4882a593Smuzhiyun * assume do not support IRQ sharing.
933*4882a593Smuzhiyun */
934*4882a593Smuzhiyun
legacy_init_one(struct legacy_probe * probe)935*4882a593Smuzhiyun static __init int legacy_init_one(struct legacy_probe *probe)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct legacy_controller *controller = &controllers[probe->type];
938*4882a593Smuzhiyun int pio_modes = controller->pio_mask;
939*4882a593Smuzhiyun unsigned long io = probe->port;
940*4882a593Smuzhiyun u32 mask = (1 << probe->slot);
941*4882a593Smuzhiyun struct ata_port_operations *ops = controller->ops;
942*4882a593Smuzhiyun struct legacy_data *ld = &legacy_data[probe->slot];
943*4882a593Smuzhiyun struct ata_host *host = NULL;
944*4882a593Smuzhiyun struct ata_port *ap;
945*4882a593Smuzhiyun struct platform_device *pdev;
946*4882a593Smuzhiyun struct ata_device *dev;
947*4882a593Smuzhiyun void __iomem *io_addr, *ctrl_addr;
948*4882a593Smuzhiyun u32 iordy = (iordy_mask & mask) ? 0: ATA_FLAG_NO_IORDY;
949*4882a593Smuzhiyun int ret;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun iordy |= controller->flags;
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun pdev = platform_device_register_simple(DRV_NAME, probe->slot, NULL, 0);
954*4882a593Smuzhiyun if (IS_ERR(pdev))
955*4882a593Smuzhiyun return PTR_ERR(pdev);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun ret = -EBUSY;
958*4882a593Smuzhiyun if (devm_request_region(&pdev->dev, io, 8, "pata_legacy") == NULL ||
959*4882a593Smuzhiyun devm_request_region(&pdev->dev, io + 0x0206, 1,
960*4882a593Smuzhiyun "pata_legacy") == NULL)
961*4882a593Smuzhiyun goto fail;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun ret = -ENOMEM;
964*4882a593Smuzhiyun io_addr = devm_ioport_map(&pdev->dev, io, 8);
965*4882a593Smuzhiyun ctrl_addr = devm_ioport_map(&pdev->dev, io + 0x0206, 1);
966*4882a593Smuzhiyun if (!io_addr || !ctrl_addr)
967*4882a593Smuzhiyun goto fail;
968*4882a593Smuzhiyun ld->type = probe->type;
969*4882a593Smuzhiyun if (controller->setup)
970*4882a593Smuzhiyun if (controller->setup(pdev, probe, ld) < 0)
971*4882a593Smuzhiyun goto fail;
972*4882a593Smuzhiyun host = ata_host_alloc(&pdev->dev, 1);
973*4882a593Smuzhiyun if (!host)
974*4882a593Smuzhiyun goto fail;
975*4882a593Smuzhiyun ap = host->ports[0];
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun ap->ops = ops;
978*4882a593Smuzhiyun ap->pio_mask = pio_modes;
979*4882a593Smuzhiyun ap->flags |= ATA_FLAG_SLAVE_POSS | iordy;
980*4882a593Smuzhiyun ap->pflags |= controller->pflags;
981*4882a593Smuzhiyun ap->ioaddr.cmd_addr = io_addr;
982*4882a593Smuzhiyun ap->ioaddr.altstatus_addr = ctrl_addr;
983*4882a593Smuzhiyun ap->ioaddr.ctl_addr = ctrl_addr;
984*4882a593Smuzhiyun ata_sff_std_ports(&ap->ioaddr);
985*4882a593Smuzhiyun ap->host->private_data = ld;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx", io, io + 0x0206);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun ret = ata_host_activate(host, probe->irq, ata_sff_interrupt, 0,
990*4882a593Smuzhiyun &legacy_sht);
991*4882a593Smuzhiyun if (ret)
992*4882a593Smuzhiyun goto fail;
993*4882a593Smuzhiyun async_synchronize_full();
994*4882a593Smuzhiyun ld->platform_dev = pdev;
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /* Nothing found means we drop the port as its probably not there */
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun ret = -ENODEV;
999*4882a593Smuzhiyun ata_for_each_dev(dev, &ap->link, ALL) {
1000*4882a593Smuzhiyun if (!ata_dev_absent(dev)) {
1001*4882a593Smuzhiyun legacy_host[probe->slot] = host;
1002*4882a593Smuzhiyun ld->platform_dev = pdev;
1003*4882a593Smuzhiyun return 0;
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun }
1006*4882a593Smuzhiyun ata_host_detach(host);
1007*4882a593Smuzhiyun fail:
1008*4882a593Smuzhiyun platform_device_unregister(pdev);
1009*4882a593Smuzhiyun return ret;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun /**
1013*4882a593Smuzhiyun * legacy_check_special_cases - ATA special cases
1014*4882a593Smuzhiyun * @p: PCI device to check
1015*4882a593Smuzhiyun * @master: set this if we find an ATA master
1016*4882a593Smuzhiyun * @master: set this if we find an ATA secondary
1017*4882a593Smuzhiyun *
1018*4882a593Smuzhiyun * A small number of vendors implemented early PCI ATA interfaces
1019*4882a593Smuzhiyun * on bridge logic without the ATA interface being PCI visible.
1020*4882a593Smuzhiyun * Where we have a matching PCI driver we must skip the relevant
1021*4882a593Smuzhiyun * device here. If we don't know about it then the legacy driver
1022*4882a593Smuzhiyun * is the right driver anyway.
1023*4882a593Smuzhiyun */
1024*4882a593Smuzhiyun
legacy_check_special_cases(struct pci_dev * p,int * primary,int * secondary)1025*4882a593Smuzhiyun static void __init legacy_check_special_cases(struct pci_dev *p, int *primary,
1026*4882a593Smuzhiyun int *secondary)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun /* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
1029*4882a593Smuzhiyun if (p->vendor == 0x1078 && p->device == 0x0000) {
1030*4882a593Smuzhiyun *primary = *secondary = 1;
1031*4882a593Smuzhiyun return;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun /* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
1034*4882a593Smuzhiyun if (p->vendor == 0x1078 && p->device == 0x0002) {
1035*4882a593Smuzhiyun *primary = *secondary = 1;
1036*4882a593Smuzhiyun return;
1037*4882a593Smuzhiyun }
1038*4882a593Smuzhiyun /* Intel MPIIX - PIO ATA on non PCI side of bridge */
1039*4882a593Smuzhiyun if (p->vendor == 0x8086 && p->device == 0x1234) {
1040*4882a593Smuzhiyun u16 r;
1041*4882a593Smuzhiyun pci_read_config_word(p, 0x6C, &r);
1042*4882a593Smuzhiyun if (r & 0x8000) {
1043*4882a593Smuzhiyun /* ATA port enabled */
1044*4882a593Smuzhiyun if (r & 0x4000)
1045*4882a593Smuzhiyun *secondary = 1;
1046*4882a593Smuzhiyun else
1047*4882a593Smuzhiyun *primary = 1;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun return;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
probe_opti_vlb(void)1053*4882a593Smuzhiyun static __init void probe_opti_vlb(void)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun /* If an OPTI 82C46X is present find out where the channels are */
1056*4882a593Smuzhiyun static const char *optis[4] = {
1057*4882a593Smuzhiyun "3/463MV", "5MV",
1058*4882a593Smuzhiyun "5MVA", "5MVB"
1059*4882a593Smuzhiyun };
1060*4882a593Smuzhiyun u8 chans = 1;
1061*4882a593Smuzhiyun u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun opti82c46x = 3; /* Assume master and slave first */
1064*4882a593Smuzhiyun printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n",
1065*4882a593Smuzhiyun optis[ctrl]);
1066*4882a593Smuzhiyun if (ctrl == 3)
1067*4882a593Smuzhiyun chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
1068*4882a593Smuzhiyun ctrl = opti_syscfg(0xAC);
1069*4882a593Smuzhiyun /* Check enabled and this port is the 465MV port. On the
1070*4882a593Smuzhiyun MVB we may have two channels */
1071*4882a593Smuzhiyun if (ctrl & 8) {
1072*4882a593Smuzhiyun if (chans == 2) {
1073*4882a593Smuzhiyun legacy_probe_add(0x1F0, 14, OPTI46X, 0);
1074*4882a593Smuzhiyun legacy_probe_add(0x170, 15, OPTI46X, 0);
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun if (ctrl & 4)
1077*4882a593Smuzhiyun legacy_probe_add(0x170, 15, OPTI46X, 0);
1078*4882a593Smuzhiyun else
1079*4882a593Smuzhiyun legacy_probe_add(0x1F0, 14, OPTI46X, 0);
1080*4882a593Smuzhiyun } else
1081*4882a593Smuzhiyun legacy_probe_add(0x1F0, 14, OPTI46X, 0);
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
qdi65_identify_port(u8 r,u8 res,unsigned long port)1084*4882a593Smuzhiyun static __init void qdi65_identify_port(u8 r, u8 res, unsigned long port)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
1087*4882a593Smuzhiyun /* Check card type */
1088*4882a593Smuzhiyun if ((r & 0xF0) == 0xC0) {
1089*4882a593Smuzhiyun /* QD6500: single channel */
1090*4882a593Smuzhiyun if (r & 8)
1091*4882a593Smuzhiyun /* Disabled ? */
1092*4882a593Smuzhiyun return;
1093*4882a593Smuzhiyun legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
1094*4882a593Smuzhiyun QDI6500, port);
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
1097*4882a593Smuzhiyun /* QD6580: dual channel */
1098*4882a593Smuzhiyun if (!request_region(port + 2 , 2, "pata_qdi")) {
1099*4882a593Smuzhiyun release_region(port, 2);
1100*4882a593Smuzhiyun return;
1101*4882a593Smuzhiyun }
1102*4882a593Smuzhiyun res = inb(port + 3);
1103*4882a593Smuzhiyun /* Single channel mode ? */
1104*4882a593Smuzhiyun if (res & 1)
1105*4882a593Smuzhiyun legacy_probe_add(ide_port[r & 0x01], 14 + (r & 0x01),
1106*4882a593Smuzhiyun QDI6580, port);
1107*4882a593Smuzhiyun else { /* Dual channel mode */
1108*4882a593Smuzhiyun legacy_probe_add(0x1F0, 14, QDI6580DP, port);
1109*4882a593Smuzhiyun /* port + 0x02, r & 0x04 */
1110*4882a593Smuzhiyun legacy_probe_add(0x170, 15, QDI6580DP, port + 2);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun release_region(port + 2, 2);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
probe_qdi_vlb(void)1116*4882a593Smuzhiyun static __init void probe_qdi_vlb(void)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun unsigned long flags;
1119*4882a593Smuzhiyun static const unsigned long qd_port[2] = { 0x30, 0xB0 };
1120*4882a593Smuzhiyun int i;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /*
1123*4882a593Smuzhiyun * Check each possible QD65xx base address
1124*4882a593Smuzhiyun */
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun for (i = 0; i < 2; i++) {
1127*4882a593Smuzhiyun unsigned long port = qd_port[i];
1128*4882a593Smuzhiyun u8 r, res;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (request_region(port, 2, "pata_qdi")) {
1132*4882a593Smuzhiyun /* Check for a card */
1133*4882a593Smuzhiyun local_irq_save(flags);
1134*4882a593Smuzhiyun /* I have no h/w that needs this delay but it
1135*4882a593Smuzhiyun is present in the historic code */
1136*4882a593Smuzhiyun r = inb(port);
1137*4882a593Smuzhiyun udelay(1);
1138*4882a593Smuzhiyun outb(0x19, port);
1139*4882a593Smuzhiyun udelay(1);
1140*4882a593Smuzhiyun res = inb(port);
1141*4882a593Smuzhiyun udelay(1);
1142*4882a593Smuzhiyun outb(r, port);
1143*4882a593Smuzhiyun udelay(1);
1144*4882a593Smuzhiyun local_irq_restore(flags);
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun /* Fail */
1147*4882a593Smuzhiyun if (res == 0x19) {
1148*4882a593Smuzhiyun release_region(port, 2);
1149*4882a593Smuzhiyun continue;
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun /* Passes the presence test */
1152*4882a593Smuzhiyun r = inb(port + 1);
1153*4882a593Smuzhiyun udelay(1);
1154*4882a593Smuzhiyun /* Check port agrees with port set */
1155*4882a593Smuzhiyun if ((r & 2) >> 1 == i)
1156*4882a593Smuzhiyun qdi65_identify_port(r, res, port);
1157*4882a593Smuzhiyun release_region(port, 2);
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun /**
1163*4882a593Smuzhiyun * legacy_init - attach legacy interfaces
1164*4882a593Smuzhiyun *
1165*4882a593Smuzhiyun * Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
1166*4882a593Smuzhiyun * Right now we do not scan the ide0 and ide1 address but should do so
1167*4882a593Smuzhiyun * for non PCI systems or systems with no PCI IDE legacy mode devices.
1168*4882a593Smuzhiyun * If you fix that note there are special cases to consider like VLB
1169*4882a593Smuzhiyun * drivers and CS5510/20.
1170*4882a593Smuzhiyun */
1171*4882a593Smuzhiyun
legacy_init(void)1172*4882a593Smuzhiyun static __init int legacy_init(void)
1173*4882a593Smuzhiyun {
1174*4882a593Smuzhiyun int i;
1175*4882a593Smuzhiyun int ct = 0;
1176*4882a593Smuzhiyun int primary = 0;
1177*4882a593Smuzhiyun int secondary = 0;
1178*4882a593Smuzhiyun int pci_present = 0;
1179*4882a593Smuzhiyun struct legacy_probe *pl = &probe_list[0];
1180*4882a593Smuzhiyun int slot = 0;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun struct pci_dev *p = NULL;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun for_each_pci_dev(p) {
1185*4882a593Smuzhiyun int r;
1186*4882a593Smuzhiyun /* Check for any overlap of the system ATA mappings. Native
1187*4882a593Smuzhiyun mode controllers stuck on these addresses or some devices
1188*4882a593Smuzhiyun in 'raid' mode won't be found by the storage class test */
1189*4882a593Smuzhiyun for (r = 0; r < 6; r++) {
1190*4882a593Smuzhiyun if (pci_resource_start(p, r) == 0x1f0)
1191*4882a593Smuzhiyun primary = 1;
1192*4882a593Smuzhiyun if (pci_resource_start(p, r) == 0x170)
1193*4882a593Smuzhiyun secondary = 1;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun /* Check for special cases */
1196*4882a593Smuzhiyun legacy_check_special_cases(p, &primary, &secondary);
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun /* If PCI bus is present then don't probe for tertiary
1199*4882a593Smuzhiyun legacy ports */
1200*4882a593Smuzhiyun pci_present = 1;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun if (winbond == 1)
1204*4882a593Smuzhiyun winbond = 0x130; /* Default port, alt is 1B0 */
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun if (primary == 0 || all)
1207*4882a593Smuzhiyun legacy_probe_add(0x1F0, 14, UNKNOWN, 0);
1208*4882a593Smuzhiyun if (secondary == 0 || all)
1209*4882a593Smuzhiyun legacy_probe_add(0x170, 15, UNKNOWN, 0);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (probe_all || !pci_present) {
1212*4882a593Smuzhiyun /* ISA/VLB extra ports */
1213*4882a593Smuzhiyun legacy_probe_add(0x1E8, 11, UNKNOWN, 0);
1214*4882a593Smuzhiyun legacy_probe_add(0x168, 10, UNKNOWN, 0);
1215*4882a593Smuzhiyun legacy_probe_add(0x1E0, 8, UNKNOWN, 0);
1216*4882a593Smuzhiyun legacy_probe_add(0x160, 12, UNKNOWN, 0);
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun if (opti82c46x)
1220*4882a593Smuzhiyun probe_opti_vlb();
1221*4882a593Smuzhiyun if (qdi)
1222*4882a593Smuzhiyun probe_qdi_vlb();
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun for (i = 0; i < NR_HOST; i++, pl++) {
1225*4882a593Smuzhiyun if (pl->port == 0)
1226*4882a593Smuzhiyun continue;
1227*4882a593Smuzhiyun if (pl->type == UNKNOWN)
1228*4882a593Smuzhiyun pl->type = probe_chip_type(pl);
1229*4882a593Smuzhiyun pl->slot = slot++;
1230*4882a593Smuzhiyun if (legacy_init_one(pl) == 0)
1231*4882a593Smuzhiyun ct++;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun if (ct != 0)
1234*4882a593Smuzhiyun return 0;
1235*4882a593Smuzhiyun return -ENODEV;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
legacy_exit(void)1238*4882a593Smuzhiyun static __exit void legacy_exit(void)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun int i;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun for (i = 0; i < nr_legacy_host; i++) {
1243*4882a593Smuzhiyun struct legacy_data *ld = &legacy_data[i];
1244*4882a593Smuzhiyun ata_host_detach(legacy_host[i]);
1245*4882a593Smuzhiyun platform_device_unregister(ld->platform_dev);
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun MODULE_AUTHOR("Alan Cox");
1250*4882a593Smuzhiyun MODULE_DESCRIPTION("low-level driver for legacy ATA");
1251*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1252*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
1253*4882a593Smuzhiyun MODULE_ALIAS("pata_qdi");
1254*4882a593Smuzhiyun MODULE_ALIAS("pata_winbond");
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun module_param(probe_all, int, 0);
1257*4882a593Smuzhiyun module_param(autospeed, int, 0);
1258*4882a593Smuzhiyun module_param(ht6560a, int, 0);
1259*4882a593Smuzhiyun module_param(ht6560b, int, 0);
1260*4882a593Smuzhiyun module_param(opti82c611a, int, 0);
1261*4882a593Smuzhiyun module_param(opti82c46x, int, 0);
1262*4882a593Smuzhiyun module_param(qdi, int, 0);
1263*4882a593Smuzhiyun module_param(winbond, int, 0);
1264*4882a593Smuzhiyun module_param(pio_mask, int, 0);
1265*4882a593Smuzhiyun module_param(iordy_mask, int, 0);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun module_init(legacy_init);
1268*4882a593Smuzhiyun module_exit(legacy_exit);
1269