1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * pata_rdc - Driver for later RDC PATA controllers
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This is actually a driver for hardware meeting
6*4882a593Smuzhiyun * INCITS 370-2004 (1510D): ATA Host Adapter Standards
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Based on ata_piix.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/blkdev.h>
15*4882a593Smuzhiyun #include <linux/delay.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <linux/gfp.h>
18*4882a593Smuzhiyun #include <scsi/scsi_host.h>
19*4882a593Smuzhiyun #include <linux/libata.h>
20*4882a593Smuzhiyun #include <linux/dmi.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define DRV_NAME "pata_rdc"
23*4882a593Smuzhiyun #define DRV_VERSION "0.01"
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct rdc_host_priv {
26*4882a593Smuzhiyun u32 saved_iocfg;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /**
30*4882a593Smuzhiyun * rdc_pata_cable_detect - Probe host controller cable detect info
31*4882a593Smuzhiyun * @ap: Port for which cable detect info is desired
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * Read 80c cable indicator from ATA PCI device's PCI config
34*4882a593Smuzhiyun * register. This register is normally set by firmware (BIOS).
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * LOCKING:
37*4882a593Smuzhiyun * None (inherited from caller).
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun
rdc_pata_cable_detect(struct ata_port * ap)40*4882a593Smuzhiyun static int rdc_pata_cable_detect(struct ata_port *ap)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun struct rdc_host_priv *hpriv = ap->host->private_data;
43*4882a593Smuzhiyun u8 mask;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* check BIOS cable detect results */
46*4882a593Smuzhiyun mask = 0x30 << (2 * ap->port_no);
47*4882a593Smuzhiyun if ((hpriv->saved_iocfg & mask) == 0)
48*4882a593Smuzhiyun return ATA_CBL_PATA40;
49*4882a593Smuzhiyun return ATA_CBL_PATA80;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * rdc_pata_prereset - prereset for PATA host controller
54*4882a593Smuzhiyun * @link: Target link
55*4882a593Smuzhiyun * @deadline: deadline jiffies for the operation
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * LOCKING:
58*4882a593Smuzhiyun * None (inherited from caller).
59*4882a593Smuzhiyun */
rdc_pata_prereset(struct ata_link * link,unsigned long deadline)60*4882a593Smuzhiyun static int rdc_pata_prereset(struct ata_link *link, unsigned long deadline)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct ata_port *ap = link->ap;
63*4882a593Smuzhiyun struct pci_dev *pdev = to_pci_dev(ap->host->dev);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static const struct pci_bits rdc_enable_bits[] = {
66*4882a593Smuzhiyun { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
67*4882a593Smuzhiyun { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (!pci_test_config_bits(pdev, &rdc_enable_bits[ap->port_no]))
71*4882a593Smuzhiyun return -ENOENT;
72*4882a593Smuzhiyun return ata_sff_prereset(link, deadline);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun static DEFINE_SPINLOCK(rdc_lock);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * rdc_set_piomode - Initialize host controller PATA PIO timings
79*4882a593Smuzhiyun * @ap: Port whose timings we are configuring
80*4882a593Smuzhiyun * @adev: um
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * Set PIO mode for device, in host controller PCI config space.
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * LOCKING:
85*4882a593Smuzhiyun * None (inherited from caller).
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun
rdc_set_piomode(struct ata_port * ap,struct ata_device * adev)88*4882a593Smuzhiyun static void rdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun unsigned int pio = adev->pio_mode - XFER_PIO_0;
91*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(ap->host->dev);
92*4882a593Smuzhiyun unsigned long flags;
93*4882a593Smuzhiyun unsigned int is_slave = (adev->devno != 0);
94*4882a593Smuzhiyun unsigned int master_port= ap->port_no ? 0x42 : 0x40;
95*4882a593Smuzhiyun unsigned int slave_port = 0x44;
96*4882a593Smuzhiyun u16 master_data;
97*4882a593Smuzhiyun u8 slave_data;
98*4882a593Smuzhiyun u8 udma_enable;
99*4882a593Smuzhiyun int control = 0;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static const /* ISP RTC */
102*4882a593Smuzhiyun u8 timings[][2] = { { 0, 0 },
103*4882a593Smuzhiyun { 0, 0 },
104*4882a593Smuzhiyun { 1, 0 },
105*4882a593Smuzhiyun { 2, 1 },
106*4882a593Smuzhiyun { 2, 3 }, };
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (pio >= 2)
109*4882a593Smuzhiyun control |= 1; /* TIME1 enable */
110*4882a593Smuzhiyun if (ata_pio_need_iordy(adev))
111*4882a593Smuzhiyun control |= 2; /* IE enable */
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (adev->class == ATA_DEV_ATA)
114*4882a593Smuzhiyun control |= 4; /* PPE enable */
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun spin_lock_irqsave(&rdc_lock, flags);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* PIO configuration clears DTE unconditionally. It will be
119*4882a593Smuzhiyun * programmed in set_dmamode which is guaranteed to be called
120*4882a593Smuzhiyun * after set_piomode if any DMA mode is available.
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun pci_read_config_word(dev, master_port, &master_data);
123*4882a593Smuzhiyun if (is_slave) {
124*4882a593Smuzhiyun /* clear TIME1|IE1|PPE1|DTE1 */
125*4882a593Smuzhiyun master_data &= 0xff0f;
126*4882a593Smuzhiyun /* Enable SITRE (separate slave timing register) */
127*4882a593Smuzhiyun master_data |= 0x4000;
128*4882a593Smuzhiyun /* enable PPE1, IE1 and TIME1 as needed */
129*4882a593Smuzhiyun master_data |= (control << 4);
130*4882a593Smuzhiyun pci_read_config_byte(dev, slave_port, &slave_data);
131*4882a593Smuzhiyun slave_data &= (ap->port_no ? 0x0f : 0xf0);
132*4882a593Smuzhiyun /* Load the timing nibble for this slave */
133*4882a593Smuzhiyun slave_data |= ((timings[pio][0] << 2) | timings[pio][1])
134*4882a593Smuzhiyun << (ap->port_no ? 4 : 0);
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun /* clear ISP|RCT|TIME0|IE0|PPE0|DTE0 */
137*4882a593Smuzhiyun master_data &= 0xccf0;
138*4882a593Smuzhiyun /* Enable PPE, IE and TIME as appropriate */
139*4882a593Smuzhiyun master_data |= control;
140*4882a593Smuzhiyun /* load ISP and RCT */
141*4882a593Smuzhiyun master_data |=
142*4882a593Smuzhiyun (timings[pio][0] << 12) |
143*4882a593Smuzhiyun (timings[pio][1] << 8);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun pci_write_config_word(dev, master_port, master_data);
146*4882a593Smuzhiyun if (is_slave)
147*4882a593Smuzhiyun pci_write_config_byte(dev, slave_port, slave_data);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Ensure the UDMA bit is off - it will be turned back on if
150*4882a593Smuzhiyun UDMA is selected */
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun pci_read_config_byte(dev, 0x48, &udma_enable);
153*4882a593Smuzhiyun udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
154*4882a593Smuzhiyun pci_write_config_byte(dev, 0x48, udma_enable);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun spin_unlock_irqrestore(&rdc_lock, flags);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /**
160*4882a593Smuzhiyun * rdc_set_dmamode - Initialize host controller PATA PIO timings
161*4882a593Smuzhiyun * @ap: Port whose timings we are configuring
162*4882a593Smuzhiyun * @adev: Drive in question
163*4882a593Smuzhiyun *
164*4882a593Smuzhiyun * Set UDMA mode for device, in host controller PCI config space.
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun * LOCKING:
167*4882a593Smuzhiyun * None (inherited from caller).
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyun
rdc_set_dmamode(struct ata_port * ap,struct ata_device * adev)170*4882a593Smuzhiyun static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct pci_dev *dev = to_pci_dev(ap->host->dev);
173*4882a593Smuzhiyun unsigned long flags;
174*4882a593Smuzhiyun u8 master_port = ap->port_no ? 0x42 : 0x40;
175*4882a593Smuzhiyun u16 master_data;
176*4882a593Smuzhiyun u8 speed = adev->dma_mode;
177*4882a593Smuzhiyun int devid = adev->devno + 2 * ap->port_no;
178*4882a593Smuzhiyun u8 udma_enable = 0;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun static const /* ISP RTC */
181*4882a593Smuzhiyun u8 timings[][2] = { { 0, 0 },
182*4882a593Smuzhiyun { 0, 0 },
183*4882a593Smuzhiyun { 1, 0 },
184*4882a593Smuzhiyun { 2, 1 },
185*4882a593Smuzhiyun { 2, 3 }, };
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun spin_lock_irqsave(&rdc_lock, flags);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun pci_read_config_word(dev, master_port, &master_data);
190*4882a593Smuzhiyun pci_read_config_byte(dev, 0x48, &udma_enable);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (speed >= XFER_UDMA_0) {
193*4882a593Smuzhiyun unsigned int udma = adev->dma_mode - XFER_UDMA_0;
194*4882a593Smuzhiyun u16 udma_timing;
195*4882a593Smuzhiyun u16 ideconf;
196*4882a593Smuzhiyun int u_clock, u_speed;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * UDMA is handled by a combination of clock switching and
200*4882a593Smuzhiyun * selection of dividers
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Handy rule: Odd modes are UDMATIMx 01, even are 02
203*4882a593Smuzhiyun * except UDMA0 which is 00
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun u_speed = min(2 - (udma & 1), udma);
206*4882a593Smuzhiyun if (udma == 5)
207*4882a593Smuzhiyun u_clock = 0x1000; /* 100Mhz */
208*4882a593Smuzhiyun else if (udma > 2)
209*4882a593Smuzhiyun u_clock = 1; /* 66Mhz */
210*4882a593Smuzhiyun else
211*4882a593Smuzhiyun u_clock = 0; /* 33Mhz */
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun udma_enable |= (1 << devid);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /* Load the CT/RP selection */
216*4882a593Smuzhiyun pci_read_config_word(dev, 0x4A, &udma_timing);
217*4882a593Smuzhiyun udma_timing &= ~(3 << (4 * devid));
218*4882a593Smuzhiyun udma_timing |= u_speed << (4 * devid);
219*4882a593Smuzhiyun pci_write_config_word(dev, 0x4A, udma_timing);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Select a 33/66/100Mhz clock */
222*4882a593Smuzhiyun pci_read_config_word(dev, 0x54, &ideconf);
223*4882a593Smuzhiyun ideconf &= ~(0x1001 << devid);
224*4882a593Smuzhiyun ideconf |= u_clock << devid;
225*4882a593Smuzhiyun pci_write_config_word(dev, 0x54, ideconf);
226*4882a593Smuzhiyun } else {
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * MWDMA is driven by the PIO timings. We must also enable
229*4882a593Smuzhiyun * IORDY unconditionally along with TIME1. PPE has already
230*4882a593Smuzhiyun * been set when the PIO timing was set.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
233*4882a593Smuzhiyun unsigned int control;
234*4882a593Smuzhiyun u8 slave_data;
235*4882a593Smuzhiyun const unsigned int needed_pio[3] = {
236*4882a593Smuzhiyun XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
237*4882a593Smuzhiyun };
238*4882a593Smuzhiyun int pio = needed_pio[mwdma] - XFER_PIO_0;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun control = 3; /* IORDY|TIME1 */
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* If the drive MWDMA is faster than it can do PIO then
243*4882a593Smuzhiyun we must force PIO into PIO0 */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (adev->pio_mode < needed_pio[mwdma])
246*4882a593Smuzhiyun /* Enable DMA timing only */
247*4882a593Smuzhiyun control |= 8; /* PIO cycles in PIO0 */
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (adev->devno) { /* Slave */
250*4882a593Smuzhiyun master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
251*4882a593Smuzhiyun master_data |= control << 4;
252*4882a593Smuzhiyun pci_read_config_byte(dev, 0x44, &slave_data);
253*4882a593Smuzhiyun slave_data &= (ap->port_no ? 0x0f : 0xf0);
254*4882a593Smuzhiyun /* Load the matching timing */
255*4882a593Smuzhiyun slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
256*4882a593Smuzhiyun pci_write_config_byte(dev, 0x44, slave_data);
257*4882a593Smuzhiyun } else { /* Master */
258*4882a593Smuzhiyun master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
259*4882a593Smuzhiyun and master timing bits */
260*4882a593Smuzhiyun master_data |= control;
261*4882a593Smuzhiyun master_data |=
262*4882a593Smuzhiyun (timings[pio][0] << 12) |
263*4882a593Smuzhiyun (timings[pio][1] << 8);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun udma_enable &= ~(1 << devid);
267*4882a593Smuzhiyun pci_write_config_word(dev, master_port, master_data);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun pci_write_config_byte(dev, 0x48, udma_enable);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun spin_unlock_irqrestore(&rdc_lock, flags);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun static struct ata_port_operations rdc_pata_ops = {
275*4882a593Smuzhiyun .inherits = &ata_bmdma32_port_ops,
276*4882a593Smuzhiyun .cable_detect = rdc_pata_cable_detect,
277*4882a593Smuzhiyun .set_piomode = rdc_set_piomode,
278*4882a593Smuzhiyun .set_dmamode = rdc_set_dmamode,
279*4882a593Smuzhiyun .prereset = rdc_pata_prereset,
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun static const struct ata_port_info rdc_port_info = {
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun .flags = ATA_FLAG_SLAVE_POSS,
285*4882a593Smuzhiyun .pio_mask = ATA_PIO4,
286*4882a593Smuzhiyun .mwdma_mask = ATA_MWDMA12_ONLY,
287*4882a593Smuzhiyun .udma_mask = ATA_UDMA5,
288*4882a593Smuzhiyun .port_ops = &rdc_pata_ops,
289*4882a593Smuzhiyun };
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun static struct scsi_host_template rdc_sht = {
292*4882a593Smuzhiyun ATA_BMDMA_SHT(DRV_NAME),
293*4882a593Smuzhiyun };
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /**
296*4882a593Smuzhiyun * rdc_init_one - Register PIIX ATA PCI device with kernel services
297*4882a593Smuzhiyun * @pdev: PCI device to register
298*4882a593Smuzhiyun * @ent: Entry in rdc_pci_tbl matching with @pdev
299*4882a593Smuzhiyun *
300*4882a593Smuzhiyun * Called from kernel PCI layer. We probe for combined mode (sigh),
301*4882a593Smuzhiyun * and then hand over control to libata, for it to do the rest.
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun * LOCKING:
304*4882a593Smuzhiyun * Inherited from PCI layer (may sleep).
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * RETURNS:
307*4882a593Smuzhiyun * Zero on success, or -ERRNO value.
308*4882a593Smuzhiyun */
309*4882a593Smuzhiyun
rdc_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)310*4882a593Smuzhiyun static int rdc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun struct device *dev = &pdev->dev;
313*4882a593Smuzhiyun struct ata_port_info port_info[2];
314*4882a593Smuzhiyun const struct ata_port_info *ppi[] = { &port_info[0], &port_info[1] };
315*4882a593Smuzhiyun struct ata_host *host;
316*4882a593Smuzhiyun struct rdc_host_priv *hpriv;
317*4882a593Smuzhiyun int rc;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun ata_print_version_once(&pdev->dev, DRV_VERSION);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun port_info[0] = rdc_port_info;
322*4882a593Smuzhiyun port_info[1] = rdc_port_info;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* enable device and prepare host */
325*4882a593Smuzhiyun rc = pcim_enable_device(pdev);
326*4882a593Smuzhiyun if (rc)
327*4882a593Smuzhiyun return rc;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
330*4882a593Smuzhiyun if (!hpriv)
331*4882a593Smuzhiyun return -ENOMEM;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* Save IOCFG, this will be used for cable detection, quirk
334*4882a593Smuzhiyun * detection and restoration on detach.
335*4882a593Smuzhiyun */
336*4882a593Smuzhiyun pci_read_config_dword(pdev, 0x54, &hpriv->saved_iocfg);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
339*4882a593Smuzhiyun if (rc)
340*4882a593Smuzhiyun return rc;
341*4882a593Smuzhiyun host->private_data = hpriv;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun pci_intx(pdev, 1);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun host->flags |= ATA_HOST_PARALLEL_SCAN;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun pci_set_master(pdev);
348*4882a593Smuzhiyun return ata_pci_sff_activate_host(host, ata_bmdma_interrupt, &rdc_sht);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
rdc_remove_one(struct pci_dev * pdev)351*4882a593Smuzhiyun static void rdc_remove_one(struct pci_dev *pdev)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct ata_host *host = pci_get_drvdata(pdev);
354*4882a593Smuzhiyun struct rdc_host_priv *hpriv = host->private_data;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun pci_write_config_dword(pdev, 0x54, hpriv->saved_iocfg);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun ata_pci_remove_one(pdev);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun static const struct pci_device_id rdc_pci_tbl[] = {
362*4882a593Smuzhiyun { PCI_DEVICE(0x17F3, 0x1011), },
363*4882a593Smuzhiyun { PCI_DEVICE(0x17F3, 0x1012), },
364*4882a593Smuzhiyun { } /* terminate list */
365*4882a593Smuzhiyun };
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun static struct pci_driver rdc_pci_driver = {
368*4882a593Smuzhiyun .name = DRV_NAME,
369*4882a593Smuzhiyun .id_table = rdc_pci_tbl,
370*4882a593Smuzhiyun .probe = rdc_init_one,
371*4882a593Smuzhiyun .remove = rdc_remove_one,
372*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
373*4882a593Smuzhiyun .suspend = ata_pci_device_suspend,
374*4882a593Smuzhiyun .resume = ata_pci_device_resume,
375*4882a593Smuzhiyun #endif
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun module_pci_driver(rdc_pci_driver);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun MODULE_AUTHOR("Alan Cox (based on ata_piix)");
382*4882a593Smuzhiyun MODULE_DESCRIPTION("SCSI low-level driver for RDC PATA controllers");
383*4882a593Smuzhiyun MODULE_LICENSE("GPL");
384*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, rdc_pci_tbl);
385*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
386