1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * sata_via.c - VIA Serial ATA controllers
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Maintained by: Tejun Heo <tj@kernel.org>
6*4882a593Smuzhiyun * Please ALWAYS copy linux-ide@vger.kernel.org
7*4882a593Smuzhiyun * on emails.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
10*4882a593Smuzhiyun * Copyright 2003-2004 Jeff Garzik
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * libata documentation is available via 'make {ps|pdf}docs',
13*4882a593Smuzhiyun * as Documentation/driver-api/libata.rst
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Hardware documentation available under NDA.
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/pci.h>
21*4882a593Smuzhiyun #include <linux/blkdev.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun #include <linux/device.h>
24*4882a593Smuzhiyun #include <scsi/scsi.h>
25*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
26*4882a593Smuzhiyun #include <scsi/scsi_host.h>
27*4882a593Smuzhiyun #include <linux/libata.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define DRV_NAME "sata_via"
30*4882a593Smuzhiyun #define DRV_VERSION "2.6"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * vt8251 is different from other sata controllers of VIA. It has two
34*4882a593Smuzhiyun * channels, each channel has both Master and Slave slot.
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun enum board_ids_enum {
37*4882a593Smuzhiyun vt6420,
38*4882a593Smuzhiyun vt6421,
39*4882a593Smuzhiyun vt8251,
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun enum {
43*4882a593Smuzhiyun SATA_CHAN_ENAB = 0x40, /* SATA channel enable */
44*4882a593Smuzhiyun SATA_INT_GATE = 0x41, /* SATA interrupt gating */
45*4882a593Smuzhiyun SATA_NATIVE_MODE = 0x42, /* Native mode enable */
46*4882a593Smuzhiyun SVIA_MISC_3 = 0x46, /* Miscellaneous Control III */
47*4882a593Smuzhiyun PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */
48*4882a593Smuzhiyun PATA_PIO_TIMING = 0xAB, /* PATA timing register */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun PORT0 = (1 << 1),
51*4882a593Smuzhiyun PORT1 = (1 << 0),
52*4882a593Smuzhiyun ALL_PORTS = PORT0 | PORT1,
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4),
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun SATA_HOTPLUG = (1 << 5), /* enable IRQ on hotplug */
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun struct svia_priv {
62*4882a593Smuzhiyun bool wd_workaround;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static int vt6420_hotplug;
66*4882a593Smuzhiyun module_param_named(vt6420_hotplug, vt6420_hotplug, int, 0644);
67*4882a593Smuzhiyun MODULE_PARM_DESC(vt6420_hotplug, "Enable hot-plug support for VT6420 (0=Don't support, 1=support)");
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
70*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
71*4882a593Smuzhiyun static int svia_pci_device_resume(struct pci_dev *pdev);
72*4882a593Smuzhiyun #endif
73*4882a593Smuzhiyun static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
74*4882a593Smuzhiyun static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
75*4882a593Smuzhiyun static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val);
76*4882a593Smuzhiyun static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val);
77*4882a593Smuzhiyun static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
78*4882a593Smuzhiyun static void svia_noop_freeze(struct ata_port *ap);
79*4882a593Smuzhiyun static int vt6420_prereset(struct ata_link *link, unsigned long deadline);
80*4882a593Smuzhiyun static void vt6420_bmdma_start(struct ata_queued_cmd *qc);
81*4882a593Smuzhiyun static int vt6421_pata_cable_detect(struct ata_port *ap);
82*4882a593Smuzhiyun static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev);
83*4882a593Smuzhiyun static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev);
84*4882a593Smuzhiyun static void vt6421_error_handler(struct ata_port *ap);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun static const struct pci_device_id svia_pci_tbl[] = {
87*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x5337), vt6420 },
88*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x0591), vt6420 }, /* 2 sata chnls (Master) */
89*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x3149), vt6420 }, /* 2 sata chnls (Master) */
90*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x3249), vt6421 }, /* 2 sata chnls, 1 pata chnl */
91*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x5372), vt6420 },
92*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x7372), vt6420 },
93*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x5287), vt8251 }, /* 2 sata chnls (Master/Slave) */
94*4882a593Smuzhiyun { PCI_VDEVICE(VIA, 0x9000), vt8251 },
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun { } /* terminate list */
97*4882a593Smuzhiyun };
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun static struct pci_driver svia_pci_driver = {
100*4882a593Smuzhiyun .name = DRV_NAME,
101*4882a593Smuzhiyun .id_table = svia_pci_tbl,
102*4882a593Smuzhiyun .probe = svia_init_one,
103*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
104*4882a593Smuzhiyun .suspend = ata_pci_device_suspend,
105*4882a593Smuzhiyun .resume = svia_pci_device_resume,
106*4882a593Smuzhiyun #endif
107*4882a593Smuzhiyun .remove = ata_pci_remove_one,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static struct scsi_host_template svia_sht = {
111*4882a593Smuzhiyun ATA_BMDMA_SHT(DRV_NAME),
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun static struct ata_port_operations svia_base_ops = {
115*4882a593Smuzhiyun .inherits = &ata_bmdma_port_ops,
116*4882a593Smuzhiyun .sff_tf_load = svia_tf_load,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun static struct ata_port_operations vt6420_sata_ops = {
120*4882a593Smuzhiyun .inherits = &svia_base_ops,
121*4882a593Smuzhiyun .freeze = svia_noop_freeze,
122*4882a593Smuzhiyun .prereset = vt6420_prereset,
123*4882a593Smuzhiyun .bmdma_start = vt6420_bmdma_start,
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun static struct ata_port_operations vt6421_pata_ops = {
127*4882a593Smuzhiyun .inherits = &svia_base_ops,
128*4882a593Smuzhiyun .cable_detect = vt6421_pata_cable_detect,
129*4882a593Smuzhiyun .set_piomode = vt6421_set_pio_mode,
130*4882a593Smuzhiyun .set_dmamode = vt6421_set_dma_mode,
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun static struct ata_port_operations vt6421_sata_ops = {
134*4882a593Smuzhiyun .inherits = &svia_base_ops,
135*4882a593Smuzhiyun .scr_read = svia_scr_read,
136*4882a593Smuzhiyun .scr_write = svia_scr_write,
137*4882a593Smuzhiyun .error_handler = vt6421_error_handler,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun static struct ata_port_operations vt8251_ops = {
141*4882a593Smuzhiyun .inherits = &svia_base_ops,
142*4882a593Smuzhiyun .hardreset = sata_std_hardreset,
143*4882a593Smuzhiyun .scr_read = vt8251_scr_read,
144*4882a593Smuzhiyun .scr_write = vt8251_scr_write,
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun static const struct ata_port_info vt6420_port_info = {
148*4882a593Smuzhiyun .flags = ATA_FLAG_SATA,
149*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
150*4882a593Smuzhiyun .mwdma_mask = ATA_MWDMA2,
151*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
152*4882a593Smuzhiyun .port_ops = &vt6420_sata_ops,
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun static const struct ata_port_info vt6421_sport_info = {
156*4882a593Smuzhiyun .flags = ATA_FLAG_SATA,
157*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
158*4882a593Smuzhiyun .mwdma_mask = ATA_MWDMA2,
159*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
160*4882a593Smuzhiyun .port_ops = &vt6421_sata_ops,
161*4882a593Smuzhiyun };
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun static const struct ata_port_info vt6421_pport_info = {
164*4882a593Smuzhiyun .flags = ATA_FLAG_SLAVE_POSS,
165*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
166*4882a593Smuzhiyun /* No MWDMA */
167*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
168*4882a593Smuzhiyun .port_ops = &vt6421_pata_ops,
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun static const struct ata_port_info vt8251_port_info = {
172*4882a593Smuzhiyun .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS,
173*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
174*4882a593Smuzhiyun .mwdma_mask = ATA_MWDMA2,
175*4882a593Smuzhiyun .udma_mask = ATA_UDMA6,
176*4882a593Smuzhiyun .port_ops = &vt8251_ops,
177*4882a593Smuzhiyun };
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun MODULE_AUTHOR("Jeff Garzik");
180*4882a593Smuzhiyun MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers");
181*4882a593Smuzhiyun MODULE_LICENSE("GPL");
182*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, svia_pci_tbl);
183*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
184*4882a593Smuzhiyun
svia_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)185*4882a593Smuzhiyun static int svia_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun if (sc_reg > SCR_CONTROL)
188*4882a593Smuzhiyun return -EINVAL;
189*4882a593Smuzhiyun *val = ioread32(link->ap->ioaddr.scr_addr + (4 * sc_reg));
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
svia_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)193*4882a593Smuzhiyun static int svia_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun if (sc_reg > SCR_CONTROL)
196*4882a593Smuzhiyun return -EINVAL;
197*4882a593Smuzhiyun iowrite32(val, link->ap->ioaddr.scr_addr + (4 * sc_reg));
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
vt8251_scr_read(struct ata_link * link,unsigned int scr,u32 * val)201*4882a593Smuzhiyun static int vt8251_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun static const u8 ipm_tbl[] = { 1, 2, 6, 0 };
204*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
205*4882a593Smuzhiyun int slot = 2 * link->ap->port_no + link->pmp;
206*4882a593Smuzhiyun u32 v = 0;
207*4882a593Smuzhiyun u8 raw;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun switch (scr) {
210*4882a593Smuzhiyun case SCR_STATUS:
211*4882a593Smuzhiyun pci_read_config_byte(pdev, 0xA0 + slot, &raw);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* read the DET field, bit0 and 1 of the config byte */
214*4882a593Smuzhiyun v |= raw & 0x03;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* read the SPD field, bit4 of the configure byte */
217*4882a593Smuzhiyun if (raw & (1 << 4))
218*4882a593Smuzhiyun v |= 0x02 << 4;
219*4882a593Smuzhiyun else
220*4882a593Smuzhiyun v |= 0x01 << 4;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /* read the IPM field, bit2 and 3 of the config byte */
223*4882a593Smuzhiyun v |= ipm_tbl[(raw >> 2) & 0x3];
224*4882a593Smuzhiyun break;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun case SCR_ERROR:
227*4882a593Smuzhiyun /* devices other than 5287 uses 0xA8 as base */
228*4882a593Smuzhiyun WARN_ON(pdev->device != 0x5287);
229*4882a593Smuzhiyun pci_read_config_dword(pdev, 0xB0 + slot * 4, &v);
230*4882a593Smuzhiyun break;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun case SCR_CONTROL:
233*4882a593Smuzhiyun pci_read_config_byte(pdev, 0xA4 + slot, &raw);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* read the DET field, bit0 and bit1 */
236*4882a593Smuzhiyun v |= ((raw & 0x02) << 1) | (raw & 0x01);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* read the IPM field, bit2 and bit3 */
239*4882a593Smuzhiyun v |= ((raw >> 2) & 0x03) << 8;
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun default:
243*4882a593Smuzhiyun return -EINVAL;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun *val = v;
247*4882a593Smuzhiyun return 0;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun
vt8251_scr_write(struct ata_link * link,unsigned int scr,u32 val)250*4882a593Smuzhiyun static int vt8251_scr_write(struct ata_link *link, unsigned int scr, u32 val)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(link->ap->host->dev);
253*4882a593Smuzhiyun int slot = 2 * link->ap->port_no + link->pmp;
254*4882a593Smuzhiyun u32 v = 0;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun switch (scr) {
257*4882a593Smuzhiyun case SCR_ERROR:
258*4882a593Smuzhiyun /* devices other than 5287 uses 0xA8 as base */
259*4882a593Smuzhiyun WARN_ON(pdev->device != 0x5287);
260*4882a593Smuzhiyun pci_write_config_dword(pdev, 0xB0 + slot * 4, val);
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun case SCR_CONTROL:
264*4882a593Smuzhiyun /* set the DET field */
265*4882a593Smuzhiyun v |= ((val & 0x4) >> 1) | (val & 0x1);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* set the IPM field */
268*4882a593Smuzhiyun v |= ((val >> 8) & 0x3) << 2;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun pci_write_config_byte(pdev, 0xA4 + slot, v);
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun default:
274*4882a593Smuzhiyun return -EINVAL;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /**
279*4882a593Smuzhiyun * svia_tf_load - send taskfile registers to host controller
280*4882a593Smuzhiyun * @ap: Port to which output is sent
281*4882a593Smuzhiyun * @tf: ATA taskfile register set
282*4882a593Smuzhiyun *
283*4882a593Smuzhiyun * Outputs ATA taskfile to standard ATA host controller.
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * This is to fix the internal bug of via chipsets, which will
286*4882a593Smuzhiyun * reset the device register after changing the IEN bit on ctl
287*4882a593Smuzhiyun * register.
288*4882a593Smuzhiyun */
svia_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)289*4882a593Smuzhiyun static void svia_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun struct ata_taskfile ttf;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (tf->ctl != ap->last_ctl) {
294*4882a593Smuzhiyun ttf = *tf;
295*4882a593Smuzhiyun ttf.flags |= ATA_TFLAG_DEVICE;
296*4882a593Smuzhiyun tf = &ttf;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun ata_sff_tf_load(ap, tf);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
svia_noop_freeze(struct ata_port * ap)301*4882a593Smuzhiyun static void svia_noop_freeze(struct ata_port *ap)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun /* Some VIA controllers choke if ATA_NIEN is manipulated in
304*4882a593Smuzhiyun * certain way. Leave it alone and just clear pending IRQ.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun ap->ops->sff_check_status(ap);
307*4882a593Smuzhiyun ata_bmdma_irq_clear(ap);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /**
311*4882a593Smuzhiyun * vt6420_prereset - prereset for vt6420
312*4882a593Smuzhiyun * @link: target ATA link
313*4882a593Smuzhiyun * @deadline: deadline jiffies for the operation
314*4882a593Smuzhiyun *
315*4882a593Smuzhiyun * SCR registers on vt6420 are pieces of shit and may hang the
316*4882a593Smuzhiyun * whole machine completely if accessed with the wrong timing.
317*4882a593Smuzhiyun * To avoid such catastrophe, vt6420 doesn't provide generic SCR
318*4882a593Smuzhiyun * access operations, but uses SStatus and SControl only during
319*4882a593Smuzhiyun * boot probing in controlled way.
320*4882a593Smuzhiyun *
321*4882a593Smuzhiyun * As the old (pre EH update) probing code is proven to work, we
322*4882a593Smuzhiyun * strictly follow the access pattern.
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun * LOCKING:
325*4882a593Smuzhiyun * Kernel thread context (may sleep)
326*4882a593Smuzhiyun *
327*4882a593Smuzhiyun * RETURNS:
328*4882a593Smuzhiyun * 0 on success, -errno otherwise.
329*4882a593Smuzhiyun */
vt6420_prereset(struct ata_link * link,unsigned long deadline)330*4882a593Smuzhiyun static int vt6420_prereset(struct ata_link *link, unsigned long deadline)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct ata_port *ap = link->ap;
333*4882a593Smuzhiyun struct ata_eh_context *ehc = &ap->link.eh_context;
334*4882a593Smuzhiyun unsigned long timeout = jiffies + (HZ * 5);
335*4882a593Smuzhiyun u32 sstatus, scontrol;
336*4882a593Smuzhiyun int online;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* don't do any SCR stuff if we're not loading */
339*4882a593Smuzhiyun if (!(ap->pflags & ATA_PFLAG_LOADING))
340*4882a593Smuzhiyun goto skip_scr;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Resume phy. This is the old SATA resume sequence */
343*4882a593Smuzhiyun svia_scr_write(link, SCR_CONTROL, 0x300);
344*4882a593Smuzhiyun svia_scr_read(link, SCR_CONTROL, &scontrol); /* flush */
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* wait for phy to become ready, if necessary */
347*4882a593Smuzhiyun do {
348*4882a593Smuzhiyun ata_msleep(link->ap, 200);
349*4882a593Smuzhiyun svia_scr_read(link, SCR_STATUS, &sstatus);
350*4882a593Smuzhiyun if ((sstatus & 0xf) != 1)
351*4882a593Smuzhiyun break;
352*4882a593Smuzhiyun } while (time_before(jiffies, timeout));
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* open code sata_print_link_status() */
355*4882a593Smuzhiyun svia_scr_read(link, SCR_STATUS, &sstatus);
356*4882a593Smuzhiyun svia_scr_read(link, SCR_CONTROL, &scontrol);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun online = (sstatus & 0xf) == 0x3;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun ata_port_info(ap,
361*4882a593Smuzhiyun "SATA link %s 1.5 Gbps (SStatus %X SControl %X)\n",
362*4882a593Smuzhiyun online ? "up" : "down", sstatus, scontrol);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* SStatus is read one more time */
365*4882a593Smuzhiyun svia_scr_read(link, SCR_STATUS, &sstatus);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (!online) {
368*4882a593Smuzhiyun /* tell EH to bail */
369*4882a593Smuzhiyun ehc->i.action &= ~ATA_EH_RESET;
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun skip_scr:
374*4882a593Smuzhiyun /* wait for !BSY */
375*4882a593Smuzhiyun ata_sff_wait_ready(link, deadline);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return 0;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
vt6420_bmdma_start(struct ata_queued_cmd * qc)380*4882a593Smuzhiyun static void vt6420_bmdma_start(struct ata_queued_cmd *qc)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
383*4882a593Smuzhiyun if ((qc->tf.command == ATA_CMD_PACKET) &&
384*4882a593Smuzhiyun (qc->scsicmd->sc_data_direction == DMA_TO_DEVICE)) {
385*4882a593Smuzhiyun /* Prevents corruption on some ATAPI burners */
386*4882a593Smuzhiyun ata_sff_pause(ap);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun ata_bmdma_start(qc);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
vt6421_pata_cable_detect(struct ata_port * ap)391*4882a593Smuzhiyun static int vt6421_pata_cable_detect(struct ata_port *ap)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(ap->host->dev);
394*4882a593Smuzhiyun u8 tmp;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun pci_read_config_byte(pdev, PATA_UDMA_TIMING, &tmp);
397*4882a593Smuzhiyun if (tmp & 0x10)
398*4882a593Smuzhiyun return ATA_CBL_PATA40;
399*4882a593Smuzhiyun return ATA_CBL_PATA80;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
vt6421_set_pio_mode(struct ata_port * ap,struct ata_device * adev)402*4882a593Smuzhiyun static void vt6421_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(ap->host->dev);
405*4882a593Smuzhiyun static const u8 pio_bits[] = { 0xA8, 0x65, 0x65, 0x31, 0x20 };
406*4882a593Smuzhiyun pci_write_config_byte(pdev, PATA_PIO_TIMING - adev->devno,
407*4882a593Smuzhiyun pio_bits[adev->pio_mode - XFER_PIO_0]);
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
vt6421_set_dma_mode(struct ata_port * ap,struct ata_device * adev)410*4882a593Smuzhiyun static void vt6421_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(ap->host->dev);
413*4882a593Smuzhiyun static const u8 udma_bits[] = { 0xEE, 0xE8, 0xE6, 0xE4, 0xE2, 0xE1, 0xE0, 0xE0 };
414*4882a593Smuzhiyun pci_write_config_byte(pdev, PATA_UDMA_TIMING - adev->devno,
415*4882a593Smuzhiyun udma_bits[adev->dma_mode - XFER_UDMA_0]);
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun static const unsigned int svia_bar_sizes[] = {
419*4882a593Smuzhiyun 8, 4, 8, 4, 16, 256
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun static const unsigned int vt6421_bar_sizes[] = {
423*4882a593Smuzhiyun 16, 16, 16, 16, 32, 128
424*4882a593Smuzhiyun };
425*4882a593Smuzhiyun
svia_scr_addr(void __iomem * addr,unsigned int port)426*4882a593Smuzhiyun static void __iomem *svia_scr_addr(void __iomem *addr, unsigned int port)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun return addr + (port * 128);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
vt6421_scr_addr(void __iomem * addr,unsigned int port)431*4882a593Smuzhiyun static void __iomem *vt6421_scr_addr(void __iomem *addr, unsigned int port)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun return addr + (port * 64);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
vt6421_init_addrs(struct ata_port * ap)436*4882a593Smuzhiyun static void vt6421_init_addrs(struct ata_port *ap)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun void __iomem * const * iomap = ap->host->iomap;
439*4882a593Smuzhiyun void __iomem *reg_addr = iomap[ap->port_no];
440*4882a593Smuzhiyun void __iomem *bmdma_addr = iomap[4] + (ap->port_no * 8);
441*4882a593Smuzhiyun struct ata_ioports *ioaddr = &ap->ioaddr;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun ioaddr->cmd_addr = reg_addr;
444*4882a593Smuzhiyun ioaddr->altstatus_addr =
445*4882a593Smuzhiyun ioaddr->ctl_addr = (void __iomem *)
446*4882a593Smuzhiyun ((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
447*4882a593Smuzhiyun ioaddr->bmdma_addr = bmdma_addr;
448*4882a593Smuzhiyun ioaddr->scr_addr = vt6421_scr_addr(iomap[5], ap->port_no);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun ata_sff_std_ports(ioaddr);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun ata_port_pbar_desc(ap, ap->port_no, -1, "port");
453*4882a593Smuzhiyun ata_port_pbar_desc(ap, 4, ap->port_no * 8, "bmdma");
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
vt6420_prepare_host(struct pci_dev * pdev,struct ata_host ** r_host)456*4882a593Smuzhiyun static int vt6420_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun const struct ata_port_info *ppi[] = { &vt6420_port_info, NULL };
459*4882a593Smuzhiyun struct ata_host *host;
460*4882a593Smuzhiyun int rc;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun if (vt6420_hotplug) {
463*4882a593Smuzhiyun ppi[0]->port_ops->scr_read = svia_scr_read;
464*4882a593Smuzhiyun ppi[0]->port_ops->scr_write = svia_scr_write;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
468*4882a593Smuzhiyun if (rc)
469*4882a593Smuzhiyun return rc;
470*4882a593Smuzhiyun *r_host = host;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
473*4882a593Smuzhiyun if (rc) {
474*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
475*4882a593Smuzhiyun return rc;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun host->ports[0]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 0);
479*4882a593Smuzhiyun host->ports[1]->ioaddr.scr_addr = svia_scr_addr(host->iomap[5], 1);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun return 0;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
vt6421_prepare_host(struct pci_dev * pdev,struct ata_host ** r_host)484*4882a593Smuzhiyun static int vt6421_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun const struct ata_port_info *ppi[] =
487*4882a593Smuzhiyun { &vt6421_sport_info, &vt6421_sport_info, &vt6421_pport_info };
488*4882a593Smuzhiyun struct ata_host *host;
489*4882a593Smuzhiyun int i, rc;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun *r_host = host = ata_host_alloc_pinfo(&pdev->dev, ppi, ARRAY_SIZE(ppi));
492*4882a593Smuzhiyun if (!host) {
493*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to allocate host\n");
494*4882a593Smuzhiyun return -ENOMEM;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
498*4882a593Smuzhiyun if (rc) {
499*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to request/iomap PCI BARs (errno=%d)\n",
500*4882a593Smuzhiyun rc);
501*4882a593Smuzhiyun return rc;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun host->iomap = pcim_iomap_table(pdev);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun for (i = 0; i < host->n_ports; i++)
506*4882a593Smuzhiyun vt6421_init_addrs(host->ports[i]);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return dma_set_mask_and_coherent(&pdev->dev, ATA_DMA_MASK);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
vt8251_prepare_host(struct pci_dev * pdev,struct ata_host ** r_host)511*4882a593Smuzhiyun static int vt8251_prepare_host(struct pci_dev *pdev, struct ata_host **r_host)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun const struct ata_port_info *ppi[] = { &vt8251_port_info, NULL };
514*4882a593Smuzhiyun struct ata_host *host;
515*4882a593Smuzhiyun int i, rc;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
518*4882a593Smuzhiyun if (rc)
519*4882a593Smuzhiyun return rc;
520*4882a593Smuzhiyun *r_host = host;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
523*4882a593Smuzhiyun if (rc) {
524*4882a593Smuzhiyun dev_err(&pdev->dev, "failed to iomap PCI BAR 5\n");
525*4882a593Smuzhiyun return rc;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* 8251 hosts four sata ports as M/S of the two channels */
529*4882a593Smuzhiyun for (i = 0; i < host->n_ports; i++)
530*4882a593Smuzhiyun ata_slave_link_init(host->ports[i]);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun return 0;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
svia_wd_fix(struct pci_dev * pdev)535*4882a593Smuzhiyun static void svia_wd_fix(struct pci_dev *pdev)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun u8 tmp8;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun pci_read_config_byte(pdev, 0x52, &tmp8);
540*4882a593Smuzhiyun pci_write_config_byte(pdev, 0x52, tmp8 | BIT(2));
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
vt642x_interrupt(int irq,void * dev_instance)543*4882a593Smuzhiyun static irqreturn_t vt642x_interrupt(int irq, void *dev_instance)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun struct ata_host *host = dev_instance;
546*4882a593Smuzhiyun irqreturn_t rc = ata_bmdma_interrupt(irq, dev_instance);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* if the IRQ was not handled, it might be a hotplug IRQ */
549*4882a593Smuzhiyun if (rc != IRQ_HANDLED) {
550*4882a593Smuzhiyun u32 serror;
551*4882a593Smuzhiyun unsigned long flags;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
554*4882a593Smuzhiyun /* check for hotplug on port 0 */
555*4882a593Smuzhiyun svia_scr_read(&host->ports[0]->link, SCR_ERROR, &serror);
556*4882a593Smuzhiyun if (serror & SERR_PHYRDY_CHG) {
557*4882a593Smuzhiyun ata_ehi_hotplugged(&host->ports[0]->link.eh_info);
558*4882a593Smuzhiyun ata_port_freeze(host->ports[0]);
559*4882a593Smuzhiyun rc = IRQ_HANDLED;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun /* check for hotplug on port 1 */
562*4882a593Smuzhiyun svia_scr_read(&host->ports[1]->link, SCR_ERROR, &serror);
563*4882a593Smuzhiyun if (serror & SERR_PHYRDY_CHG) {
564*4882a593Smuzhiyun ata_ehi_hotplugged(&host->ports[1]->link.eh_info);
565*4882a593Smuzhiyun ata_port_freeze(host->ports[1]);
566*4882a593Smuzhiyun rc = IRQ_HANDLED;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun return rc;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
vt6421_error_handler(struct ata_port * ap)574*4882a593Smuzhiyun static void vt6421_error_handler(struct ata_port *ap)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun struct svia_priv *hpriv = ap->host->private_data;
577*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(ap->host->dev);
578*4882a593Smuzhiyun u32 serror;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* see svia_configure() for description */
581*4882a593Smuzhiyun if (!hpriv->wd_workaround) {
582*4882a593Smuzhiyun svia_scr_read(&ap->link, SCR_ERROR, &serror);
583*4882a593Smuzhiyun if (serror == 0x1000500) {
584*4882a593Smuzhiyun ata_port_warn(ap, "Incompatible drive: enabling workaround. This slows down transfer rate to ~60 MB/s");
585*4882a593Smuzhiyun svia_wd_fix(pdev);
586*4882a593Smuzhiyun hpriv->wd_workaround = true;
587*4882a593Smuzhiyun ap->link.eh_context.i.flags |= ATA_EHI_QUIET;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun ata_sff_error_handler(ap);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
svia_configure(struct pci_dev * pdev,int board_id,struct svia_priv * hpriv)594*4882a593Smuzhiyun static void svia_configure(struct pci_dev *pdev, int board_id,
595*4882a593Smuzhiyun struct svia_priv *hpriv)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun u8 tmp8;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8);
600*4882a593Smuzhiyun dev_info(&pdev->dev, "routed to hard irq line %d\n",
601*4882a593Smuzhiyun (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* make sure SATA channels are enabled */
604*4882a593Smuzhiyun pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8);
605*4882a593Smuzhiyun if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
606*4882a593Smuzhiyun dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n",
607*4882a593Smuzhiyun (int)tmp8);
608*4882a593Smuzhiyun tmp8 |= ALL_PORTS;
609*4882a593Smuzhiyun pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun /* make sure interrupts for each channel sent to us */
613*4882a593Smuzhiyun pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8);
614*4882a593Smuzhiyun if ((tmp8 & ALL_PORTS) != ALL_PORTS) {
615*4882a593Smuzhiyun dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n",
616*4882a593Smuzhiyun (int) tmp8);
617*4882a593Smuzhiyun tmp8 |= ALL_PORTS;
618*4882a593Smuzhiyun pci_write_config_byte(pdev, SATA_INT_GATE, tmp8);
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* make sure native mode is enabled */
622*4882a593Smuzhiyun pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8);
623*4882a593Smuzhiyun if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) {
624*4882a593Smuzhiyun dev_dbg(&pdev->dev,
625*4882a593Smuzhiyun "enabling SATA channel native mode (0x%x)\n",
626*4882a593Smuzhiyun (int) tmp8);
627*4882a593Smuzhiyun tmp8 |= NATIVE_MODE_ALL;
628*4882a593Smuzhiyun pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421) {
632*4882a593Smuzhiyun /* enable IRQ on hotplug */
633*4882a593Smuzhiyun pci_read_config_byte(pdev, SVIA_MISC_3, &tmp8);
634*4882a593Smuzhiyun if ((tmp8 & SATA_HOTPLUG) != SATA_HOTPLUG) {
635*4882a593Smuzhiyun dev_dbg(&pdev->dev,
636*4882a593Smuzhiyun "enabling SATA hotplug (0x%x)\n",
637*4882a593Smuzhiyun (int) tmp8);
638*4882a593Smuzhiyun tmp8 |= SATA_HOTPLUG;
639*4882a593Smuzhiyun pci_write_config_byte(pdev, SVIA_MISC_3, tmp8);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun * vt6420/1 has problems talking to some drives. The following
645*4882a593Smuzhiyun * is the fix from Joseph Chan <JosephChan@via.com.tw>.
646*4882a593Smuzhiyun *
647*4882a593Smuzhiyun * When host issues HOLD, device may send up to 20DW of data
648*4882a593Smuzhiyun * before acknowledging it with HOLDA and the host should be
649*4882a593Smuzhiyun * able to buffer them in FIFO. Unfortunately, some WD drives
650*4882a593Smuzhiyun * send up to 40DW before acknowledging HOLD and, in the
651*4882a593Smuzhiyun * default configuration, this ends up overflowing vt6421's
652*4882a593Smuzhiyun * FIFO, making the controller abort the transaction with
653*4882a593Smuzhiyun * R_ERR.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * Rx52[2] is the internal 128DW FIFO Flow control watermark
656*4882a593Smuzhiyun * adjusting mechanism enable bit and the default value 0
657*4882a593Smuzhiyun * means host will issue HOLD to device when the left FIFO
658*4882a593Smuzhiyun * size goes below 32DW. Setting it to 1 makes the watermark
659*4882a593Smuzhiyun * 64DW.
660*4882a593Smuzhiyun *
661*4882a593Smuzhiyun * https://bugzilla.kernel.org/show_bug.cgi?id=15173
662*4882a593Smuzhiyun * http://article.gmane.org/gmane.linux.ide/46352
663*4882a593Smuzhiyun * http://thread.gmane.org/gmane.linux.kernel/1062139
664*4882a593Smuzhiyun *
665*4882a593Smuzhiyun * As the fix slows down data transfer, apply it only if the error
666*4882a593Smuzhiyun * actually appears - see vt6421_error_handler()
667*4882a593Smuzhiyun * Apply the fix always on vt6420 as we don't know if SCR_ERROR can be
668*4882a593Smuzhiyun * read safely.
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun if (board_id == vt6420) {
671*4882a593Smuzhiyun svia_wd_fix(pdev);
672*4882a593Smuzhiyun hpriv->wd_workaround = true;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
svia_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)676*4882a593Smuzhiyun static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun unsigned int i;
679*4882a593Smuzhiyun int rc;
680*4882a593Smuzhiyun struct ata_host *host = NULL;
681*4882a593Smuzhiyun int board_id = (int) ent->driver_data;
682*4882a593Smuzhiyun const unsigned *bar_sizes;
683*4882a593Smuzhiyun struct svia_priv *hpriv;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun ata_print_version_once(&pdev->dev, DRV_VERSION);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun rc = pcim_enable_device(pdev);
688*4882a593Smuzhiyun if (rc)
689*4882a593Smuzhiyun return rc;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (board_id == vt6421)
692*4882a593Smuzhiyun bar_sizes = &vt6421_bar_sizes[0];
693*4882a593Smuzhiyun else
694*4882a593Smuzhiyun bar_sizes = &svia_bar_sizes[0];
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++)
697*4882a593Smuzhiyun if ((pci_resource_start(pdev, i) == 0) ||
698*4882a593Smuzhiyun (pci_resource_len(pdev, i) < bar_sizes[i])) {
699*4882a593Smuzhiyun dev_err(&pdev->dev,
700*4882a593Smuzhiyun "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n",
701*4882a593Smuzhiyun i,
702*4882a593Smuzhiyun (unsigned long long)pci_resource_start(pdev, i),
703*4882a593Smuzhiyun (unsigned long long)pci_resource_len(pdev, i));
704*4882a593Smuzhiyun return -ENODEV;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun switch (board_id) {
708*4882a593Smuzhiyun case vt6420:
709*4882a593Smuzhiyun rc = vt6420_prepare_host(pdev, &host);
710*4882a593Smuzhiyun break;
711*4882a593Smuzhiyun case vt6421:
712*4882a593Smuzhiyun rc = vt6421_prepare_host(pdev, &host);
713*4882a593Smuzhiyun break;
714*4882a593Smuzhiyun case vt8251:
715*4882a593Smuzhiyun rc = vt8251_prepare_host(pdev, &host);
716*4882a593Smuzhiyun break;
717*4882a593Smuzhiyun default:
718*4882a593Smuzhiyun rc = -EINVAL;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun if (rc)
721*4882a593Smuzhiyun return rc;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
724*4882a593Smuzhiyun if (!hpriv)
725*4882a593Smuzhiyun return -ENOMEM;
726*4882a593Smuzhiyun host->private_data = hpriv;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun svia_configure(pdev, board_id, hpriv);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun pci_set_master(pdev);
731*4882a593Smuzhiyun if ((board_id == vt6420 && vt6420_hotplug) || board_id == vt6421)
732*4882a593Smuzhiyun return ata_host_activate(host, pdev->irq, vt642x_interrupt,
733*4882a593Smuzhiyun IRQF_SHARED, &svia_sht);
734*4882a593Smuzhiyun else
735*4882a593Smuzhiyun return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
736*4882a593Smuzhiyun IRQF_SHARED, &svia_sht);
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
svia_pci_device_resume(struct pci_dev * pdev)740*4882a593Smuzhiyun static int svia_pci_device_resume(struct pci_dev *pdev)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun struct ata_host *host = pci_get_drvdata(pdev);
743*4882a593Smuzhiyun struct svia_priv *hpriv = host->private_data;
744*4882a593Smuzhiyun int rc;
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun rc = ata_pci_device_do_resume(pdev);
747*4882a593Smuzhiyun if (rc)
748*4882a593Smuzhiyun return rc;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (hpriv->wd_workaround)
751*4882a593Smuzhiyun svia_wd_fix(pdev);
752*4882a593Smuzhiyun ata_host_resume(host);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun #endif
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun module_pci_driver(svia_pci_driver);
759