xref: /OK3568_Linux_fs/kernel/drivers/ata/pata_pdc202xx_old.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * pata_pdc202xx_old.c 	- Promise PDC202xx PATA for new ATA layer
4*4882a593Smuzhiyun  *			  (C) 2005 Red Hat Inc
5*4882a593Smuzhiyun  *			  Alan Cox <alan@lxorguk.ukuu.org.uk>
6*4882a593Smuzhiyun  *			  (C) 2007,2009,2010 Bartlomiej Zolnierkiewicz
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Based in part on linux/drivers/ide/pci/pdc202xx_old.c
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * First cut with LBA48/ATAPI
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * TODO:
13*4882a593Smuzhiyun  *	Channel interlock/reset on both required ?
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/blkdev.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <scsi/scsi_host.h>
22*4882a593Smuzhiyun #include <linux/libata.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define DRV_NAME "pata_pdc202xx_old"
25*4882a593Smuzhiyun #define DRV_VERSION "0.4.3"
26*4882a593Smuzhiyun 
pdc2026x_cable_detect(struct ata_port * ap)27*4882a593Smuzhiyun static int pdc2026x_cable_detect(struct ata_port *ap)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
30*4882a593Smuzhiyun 	u16 cis;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	pci_read_config_word(pdev, 0x50, &cis);
33*4882a593Smuzhiyun 	if (cis & (1 << (10 + ap->port_no)))
34*4882a593Smuzhiyun 		return ATA_CBL_PATA40;
35*4882a593Smuzhiyun 	return ATA_CBL_PATA80;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
pdc202xx_exec_command(struct ata_port * ap,const struct ata_taskfile * tf)38*4882a593Smuzhiyun static void pdc202xx_exec_command(struct ata_port *ap,
39*4882a593Smuzhiyun 				  const struct ata_taskfile *tf)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	iowrite8(tf->command, ap->ioaddr.command_addr);
44*4882a593Smuzhiyun 	ndelay(400);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
pdc202xx_irq_check(struct ata_port * ap)47*4882a593Smuzhiyun static bool pdc202xx_irq_check(struct ata_port *ap)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct pci_dev *pdev	= to_pci_dev(ap->host->dev);
50*4882a593Smuzhiyun 	unsigned long master	= pci_resource_start(pdev, 4);
51*4882a593Smuzhiyun 	u8 sc1d			= inb(master + 0x1d);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (ap->port_no) {
54*4882a593Smuzhiyun 		/*
55*4882a593Smuzhiyun 		 * bit 7: error, bit 6: interrupting,
56*4882a593Smuzhiyun 		 * bit 5: FIFO full, bit 4: FIFO empty
57*4882a593Smuzhiyun 		 */
58*4882a593Smuzhiyun 		return sc1d & 0x40;
59*4882a593Smuzhiyun 	} else	{
60*4882a593Smuzhiyun 		/*
61*4882a593Smuzhiyun 		 * bit 3: error, bit 2: interrupting,
62*4882a593Smuzhiyun 		 * bit 1: FIFO full, bit 0: FIFO empty
63*4882a593Smuzhiyun 		 */
64*4882a593Smuzhiyun 		return sc1d & 0x04;
65*4882a593Smuzhiyun 	}
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /**
69*4882a593Smuzhiyun  *	pdc202xx_configure_piomode	-	set chip PIO timing
70*4882a593Smuzhiyun  *	@ap: ATA interface
71*4882a593Smuzhiyun  *	@adev: ATA device
72*4882a593Smuzhiyun  *	@pio: PIO mode
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  *	Called to do the PIO mode setup. Our timing registers are shared
75*4882a593Smuzhiyun  *	so a configure_dmamode call will undo any work we do here and vice
76*4882a593Smuzhiyun  *	versa
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun 
pdc202xx_configure_piomode(struct ata_port * ap,struct ata_device * adev,int pio)79*4882a593Smuzhiyun static void pdc202xx_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
82*4882a593Smuzhiyun 	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
83*4882a593Smuzhiyun 	static u16 pio_timing[5] = {
84*4882a593Smuzhiyun 		0x0913, 0x050C , 0x0308, 0x0206, 0x0104
85*4882a593Smuzhiyun 	};
86*4882a593Smuzhiyun 	u8 r_ap, r_bp;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	pci_read_config_byte(pdev, port, &r_ap);
89*4882a593Smuzhiyun 	pci_read_config_byte(pdev, port + 1, &r_bp);
90*4882a593Smuzhiyun 	r_ap &= ~0x3F;	/* Preserve ERRDY_EN, SYNC_IN */
91*4882a593Smuzhiyun 	r_bp &= ~0x1F;
92*4882a593Smuzhiyun 	r_ap |= (pio_timing[pio] >> 8);
93*4882a593Smuzhiyun 	r_bp |= (pio_timing[pio] & 0xFF);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	if (ata_pio_need_iordy(adev))
96*4882a593Smuzhiyun 		r_ap |= 0x20;	/* IORDY enable */
97*4882a593Smuzhiyun 	if (adev->class == ATA_DEV_ATA)
98*4882a593Smuzhiyun 		r_ap |= 0x10;	/* FIFO enable */
99*4882a593Smuzhiyun 	pci_write_config_byte(pdev, port, r_ap);
100*4882a593Smuzhiyun 	pci_write_config_byte(pdev, port + 1, r_bp);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /**
104*4882a593Smuzhiyun  *	pdc202xx_set_piomode	-	set initial PIO mode data
105*4882a593Smuzhiyun  *	@ap: ATA interface
106*4882a593Smuzhiyun  *	@adev: ATA device
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  *	Called to do the PIO mode setup. Our timing registers are shared
109*4882a593Smuzhiyun  *	but we want to set the PIO timing by default.
110*4882a593Smuzhiyun  */
111*4882a593Smuzhiyun 
pdc202xx_set_piomode(struct ata_port * ap,struct ata_device * adev)112*4882a593Smuzhiyun static void pdc202xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	pdc202xx_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun  *	pdc202xx_configure_dmamode	-	set DMA mode in chip
119*4882a593Smuzhiyun  *	@ap: ATA interface
120*4882a593Smuzhiyun  *	@adev: ATA device
121*4882a593Smuzhiyun  *
122*4882a593Smuzhiyun  *	Load DMA cycle times into the chip ready for a DMA transfer
123*4882a593Smuzhiyun  *	to occur.
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun 
pdc202xx_set_dmamode(struct ata_port * ap,struct ata_device * adev)126*4882a593Smuzhiyun static void pdc202xx_set_dmamode(struct ata_port *ap, struct ata_device *adev)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
129*4882a593Smuzhiyun 	int port = 0x60 + 8 * ap->port_no + 4 * adev->devno;
130*4882a593Smuzhiyun 	static u8 udma_timing[6][2] = {
131*4882a593Smuzhiyun 		{ 0x60, 0x03 },	/* 33 Mhz Clock */
132*4882a593Smuzhiyun 		{ 0x40, 0x02 },
133*4882a593Smuzhiyun 		{ 0x20, 0x01 },
134*4882a593Smuzhiyun 		{ 0x40, 0x02 },	/* 66 Mhz Clock */
135*4882a593Smuzhiyun 		{ 0x20, 0x01 },
136*4882a593Smuzhiyun 		{ 0x20, 0x01 }
137*4882a593Smuzhiyun 	};
138*4882a593Smuzhiyun 	static u8 mdma_timing[3][2] = {
139*4882a593Smuzhiyun 		{ 0xe0, 0x0f },
140*4882a593Smuzhiyun 		{ 0x60, 0x04 },
141*4882a593Smuzhiyun 		{ 0x60, 0x03 },
142*4882a593Smuzhiyun 	};
143*4882a593Smuzhiyun 	u8 r_bp, r_cp;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	pci_read_config_byte(pdev, port + 1, &r_bp);
146*4882a593Smuzhiyun 	pci_read_config_byte(pdev, port + 2, &r_cp);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	r_bp &= ~0xE0;
149*4882a593Smuzhiyun 	r_cp &= ~0x0F;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	if (adev->dma_mode >= XFER_UDMA_0) {
152*4882a593Smuzhiyun 		int speed = adev->dma_mode - XFER_UDMA_0;
153*4882a593Smuzhiyun 		r_bp |= udma_timing[speed][0];
154*4882a593Smuzhiyun 		r_cp |= udma_timing[speed][1];
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	} else {
157*4882a593Smuzhiyun 		int speed = adev->dma_mode - XFER_MW_DMA_0;
158*4882a593Smuzhiyun 		r_bp |= mdma_timing[speed][0];
159*4882a593Smuzhiyun 		r_cp |= mdma_timing[speed][1];
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 	pci_write_config_byte(pdev, port + 1, r_bp);
162*4882a593Smuzhiyun 	pci_write_config_byte(pdev, port + 2, r_cp);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  *	pdc2026x_bmdma_start		-	DMA engine begin
168*4882a593Smuzhiyun  *	@qc: ATA command
169*4882a593Smuzhiyun  *
170*4882a593Smuzhiyun  *	In UDMA3 or higher we have to clock switch for the duration of the
171*4882a593Smuzhiyun  *	DMA transfer sequence.
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  *	Note: The host lock held by the libata layer protects
174*4882a593Smuzhiyun  *	us from two channels both trying to set DMA bits at once
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun 
pdc2026x_bmdma_start(struct ata_queued_cmd * qc)177*4882a593Smuzhiyun static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	struct ata_port *ap = qc->ap;
180*4882a593Smuzhiyun 	struct ata_device *adev = qc->dev;
181*4882a593Smuzhiyun 	struct ata_taskfile *tf = &qc->tf;
182*4882a593Smuzhiyun 	int sel66 = ap->port_no ? 0x08: 0x02;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
185*4882a593Smuzhiyun 	void __iomem *clock = master + 0x11;
186*4882a593Smuzhiyun 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	u32 len;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* Check we keep host level locking here */
191*4882a593Smuzhiyun 	if (adev->dma_mode > XFER_UDMA_2)
192*4882a593Smuzhiyun 		iowrite8(ioread8(clock) | sel66, clock);
193*4882a593Smuzhiyun 	else
194*4882a593Smuzhiyun 		iowrite8(ioread8(clock) & ~sel66, clock);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
197*4882a593Smuzhiyun 	   and move to qc_issue ? */
198*4882a593Smuzhiyun 	pdc202xx_set_dmamode(ap, qc->dev);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* Cases the state machine will not complete correctly without help */
201*4882a593Smuzhiyun 	if ((tf->flags & ATA_TFLAG_LBA48) ||  tf->protocol == ATAPI_PROT_DMA) {
202*4882a593Smuzhiyun 		len = qc->nbytes / 2;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 		if (tf->flags & ATA_TFLAG_WRITE)
205*4882a593Smuzhiyun 			len |= 0x06000000;
206*4882a593Smuzhiyun 		else
207*4882a593Smuzhiyun 			len |= 0x05000000;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		iowrite32(len, atapi_reg);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* Activate DMA */
213*4882a593Smuzhiyun 	ata_bmdma_start(qc);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun  *	pdc2026x_bmdma_end		-	DMA engine stop
218*4882a593Smuzhiyun  *	@qc: ATA command
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  *	After a DMA completes we need to put the clock back to 33MHz for
221*4882a593Smuzhiyun  *	PIO timings.
222*4882a593Smuzhiyun  *
223*4882a593Smuzhiyun  *	Note: The host lock held by the libata layer protects
224*4882a593Smuzhiyun  *	us from two channels both trying to set DMA bits at once
225*4882a593Smuzhiyun  */
226*4882a593Smuzhiyun 
pdc2026x_bmdma_stop(struct ata_queued_cmd * qc)227*4882a593Smuzhiyun static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct ata_port *ap = qc->ap;
230*4882a593Smuzhiyun 	struct ata_device *adev = qc->dev;
231*4882a593Smuzhiyun 	struct ata_taskfile *tf = &qc->tf;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	int sel66 = ap->port_no ? 0x08: 0x02;
234*4882a593Smuzhiyun 	/* The clock bits are in the same register for both channels */
235*4882a593Smuzhiyun 	void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
236*4882a593Smuzhiyun 	void __iomem *clock = master + 0x11;
237*4882a593Smuzhiyun 	void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/* Cases the state machine will not complete correctly */
240*4882a593Smuzhiyun 	if (tf->protocol == ATAPI_PROT_DMA || (tf->flags & ATA_TFLAG_LBA48)) {
241*4882a593Smuzhiyun 		iowrite32(0, atapi_reg);
242*4882a593Smuzhiyun 		iowrite8(ioread8(clock) & ~sel66, clock);
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 	/* Flip back to 33Mhz for PIO */
245*4882a593Smuzhiyun 	if (adev->dma_mode > XFER_UDMA_2)
246*4882a593Smuzhiyun 		iowrite8(ioread8(clock) & ~sel66, clock);
247*4882a593Smuzhiyun 	ata_bmdma_stop(qc);
248*4882a593Smuzhiyun 	pdc202xx_set_piomode(ap, adev);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /**
252*4882a593Smuzhiyun  *	pdc2026x_dev_config	-	device setup hook
253*4882a593Smuzhiyun  *	@adev: newly found device
254*4882a593Smuzhiyun  *
255*4882a593Smuzhiyun  *	Perform chip specific early setup. We need to lock the transfer
256*4882a593Smuzhiyun  *	sizes to 8bit to avoid making the state engine on the 2026x cards
257*4882a593Smuzhiyun  *	barf.
258*4882a593Smuzhiyun  */
259*4882a593Smuzhiyun 
pdc2026x_dev_config(struct ata_device * adev)260*4882a593Smuzhiyun static void pdc2026x_dev_config(struct ata_device *adev)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	adev->max_sectors = 256;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
pdc2026x_port_start(struct ata_port * ap)265*4882a593Smuzhiyun static int pdc2026x_port_start(struct ata_port *ap)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	void __iomem *bmdma = ap->ioaddr.bmdma_addr;
268*4882a593Smuzhiyun 	if (bmdma) {
269*4882a593Smuzhiyun 		/* Enable burst mode */
270*4882a593Smuzhiyun 		u8 burst = ioread8(bmdma + 0x1f);
271*4882a593Smuzhiyun 		iowrite8(burst | 0x01, bmdma + 0x1f);
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 	return ata_bmdma_port_start(ap);
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /**
277*4882a593Smuzhiyun  *	pdc2026x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
278*4882a593Smuzhiyun  *	@qc: Metadata associated with taskfile to check
279*4882a593Smuzhiyun  *
280*4882a593Smuzhiyun  *	Just say no - not supported on older Promise.
281*4882a593Smuzhiyun  *
282*4882a593Smuzhiyun  *	LOCKING:
283*4882a593Smuzhiyun  *	None (inherited from caller).
284*4882a593Smuzhiyun  *
285*4882a593Smuzhiyun  *	RETURNS: 0 when ATAPI DMA can be used
286*4882a593Smuzhiyun  *		 1 otherwise
287*4882a593Smuzhiyun  */
288*4882a593Smuzhiyun 
pdc2026x_check_atapi_dma(struct ata_queued_cmd * qc)289*4882a593Smuzhiyun static int pdc2026x_check_atapi_dma(struct ata_queued_cmd *qc)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	return 1;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun static struct scsi_host_template pdc202xx_sht = {
295*4882a593Smuzhiyun 	ATA_BMDMA_SHT(DRV_NAME),
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun static struct ata_port_operations pdc2024x_port_ops = {
299*4882a593Smuzhiyun 	.inherits		= &ata_bmdma_port_ops,
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	.cable_detect		= ata_cable_40wire,
302*4882a593Smuzhiyun 	.set_piomode		= pdc202xx_set_piomode,
303*4882a593Smuzhiyun 	.set_dmamode		= pdc202xx_set_dmamode,
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	.sff_exec_command	= pdc202xx_exec_command,
306*4882a593Smuzhiyun 	.sff_irq_check		= pdc202xx_irq_check,
307*4882a593Smuzhiyun };
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun static struct ata_port_operations pdc2026x_port_ops = {
310*4882a593Smuzhiyun 	.inherits		= &pdc2024x_port_ops,
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	.check_atapi_dma	= pdc2026x_check_atapi_dma,
313*4882a593Smuzhiyun 	.bmdma_start		= pdc2026x_bmdma_start,
314*4882a593Smuzhiyun 	.bmdma_stop		= pdc2026x_bmdma_stop,
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	.cable_detect		= pdc2026x_cable_detect,
317*4882a593Smuzhiyun 	.dev_config		= pdc2026x_dev_config,
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	.port_start		= pdc2026x_port_start,
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	.sff_exec_command	= pdc202xx_exec_command,
322*4882a593Smuzhiyun 	.sff_irq_check		= pdc202xx_irq_check,
323*4882a593Smuzhiyun };
324*4882a593Smuzhiyun 
pdc202xx_init_one(struct pci_dev * dev,const struct pci_device_id * id)325*4882a593Smuzhiyun static int pdc202xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	static const struct ata_port_info info[3] = {
328*4882a593Smuzhiyun 		{
329*4882a593Smuzhiyun 			.flags = ATA_FLAG_SLAVE_POSS,
330*4882a593Smuzhiyun 			.pio_mask = ATA_PIO4,
331*4882a593Smuzhiyun 			.mwdma_mask = ATA_MWDMA2,
332*4882a593Smuzhiyun 			.udma_mask = ATA_UDMA2,
333*4882a593Smuzhiyun 			.port_ops = &pdc2024x_port_ops
334*4882a593Smuzhiyun 		},
335*4882a593Smuzhiyun 		{
336*4882a593Smuzhiyun 			.flags = ATA_FLAG_SLAVE_POSS,
337*4882a593Smuzhiyun 			.pio_mask = ATA_PIO4,
338*4882a593Smuzhiyun 			.mwdma_mask = ATA_MWDMA2,
339*4882a593Smuzhiyun 			.udma_mask = ATA_UDMA4,
340*4882a593Smuzhiyun 			.port_ops = &pdc2026x_port_ops
341*4882a593Smuzhiyun 		},
342*4882a593Smuzhiyun 		{
343*4882a593Smuzhiyun 			.flags = ATA_FLAG_SLAVE_POSS,
344*4882a593Smuzhiyun 			.pio_mask = ATA_PIO4,
345*4882a593Smuzhiyun 			.mwdma_mask = ATA_MWDMA2,
346*4882a593Smuzhiyun 			.udma_mask = ATA_UDMA5,
347*4882a593Smuzhiyun 			.port_ops = &pdc2026x_port_ops
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	};
351*4882a593Smuzhiyun 	const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL };
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
354*4882a593Smuzhiyun 		struct pci_dev *bridge = dev->bus->self;
355*4882a593Smuzhiyun 		/* Don't grab anything behind a Promise I2O RAID */
356*4882a593Smuzhiyun 		if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
357*4882a593Smuzhiyun 			if (bridge->device == PCI_DEVICE_ID_INTEL_I960)
358*4882a593Smuzhiyun 				return -ENODEV;
359*4882a593Smuzhiyun 			if (bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
360*4882a593Smuzhiyun 				return -ENODEV;
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 	return ata_pci_bmdma_init_one(dev, ppi, &pdc202xx_sht, NULL, 0);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun static const struct pci_device_id pdc202xx[] = {
367*4882a593Smuzhiyun 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0 },
368*4882a593Smuzhiyun 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1 },
369*4882a593Smuzhiyun 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1 },
370*4882a593Smuzhiyun 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2 },
371*4882a593Smuzhiyun 	{ PCI_VDEVICE(PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2 },
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	{ },
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun static struct pci_driver pdc202xx_pci_driver = {
377*4882a593Smuzhiyun 	.name 		= DRV_NAME,
378*4882a593Smuzhiyun 	.id_table	= pdc202xx,
379*4882a593Smuzhiyun 	.probe 		= pdc202xx_init_one,
380*4882a593Smuzhiyun 	.remove		= ata_pci_remove_one,
381*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
382*4882a593Smuzhiyun 	.suspend	= ata_pci_device_suspend,
383*4882a593Smuzhiyun 	.resume		= ata_pci_device_resume,
384*4882a593Smuzhiyun #endif
385*4882a593Smuzhiyun };
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun module_pci_driver(pdc202xx_pci_driver);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun MODULE_AUTHOR("Alan Cox");
390*4882a593Smuzhiyun MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
391*4882a593Smuzhiyun MODULE_LICENSE("GPL");
392*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, pdc202xx);
393*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
394