xref: /OK3568_Linux_fs/kernel/drivers/ata/pata_atiixp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * pata_atiixp.c 	- ATI PATA for new ATA layer
4*4882a593Smuzhiyun  *			  (C) 2005 Red Hat Inc
5*4882a593Smuzhiyun  *			  (C) 2009-2010 Bartlomiej Zolnierkiewicz
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Based on
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *  linux/drivers/ide/pci/atiixp.c	Version 0.01-bart2	Feb. 26, 2004
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *  Copyright (C) 2003 ATI Inc. <hyu@ati.com>
12*4882a593Smuzhiyun  *  Copyright (C) 2004 Bartlomiej Zolnierkiewicz
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/blkdev.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <scsi/scsi_host.h>
22*4882a593Smuzhiyun #include <linux/libata.h>
23*4882a593Smuzhiyun #include <linux/dmi.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define DRV_NAME "pata_atiixp"
26*4882a593Smuzhiyun #define DRV_VERSION "0.4.6"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun enum {
29*4882a593Smuzhiyun 	ATIIXP_IDE_PIO_TIMING	= 0x40,
30*4882a593Smuzhiyun 	ATIIXP_IDE_MWDMA_TIMING	= 0x44,
31*4882a593Smuzhiyun 	ATIIXP_IDE_PIO_CONTROL	= 0x48,
32*4882a593Smuzhiyun 	ATIIXP_IDE_PIO_MODE	= 0x4a,
33*4882a593Smuzhiyun 	ATIIXP_IDE_UDMA_CONTROL	= 0x54,
34*4882a593Smuzhiyun 	ATIIXP_IDE_UDMA_MODE 	= 0x56
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static const struct dmi_system_id attixp_cable_override_dmi_table[] = {
38*4882a593Smuzhiyun 	{
39*4882a593Smuzhiyun 		/* Board has onboard PATA<->SATA converters */
40*4882a593Smuzhiyun 		.ident = "MSI E350DM-E33",
41*4882a593Smuzhiyun 		.matches = {
42*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
43*4882a593Smuzhiyun 			DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"),
44*4882a593Smuzhiyun 		},
45*4882a593Smuzhiyun 	},
46*4882a593Smuzhiyun 	{ }
47*4882a593Smuzhiyun };
48*4882a593Smuzhiyun 
atiixp_cable_detect(struct ata_port * ap)49*4882a593Smuzhiyun static int atiixp_cable_detect(struct ata_port *ap)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
52*4882a593Smuzhiyun 	u8 udma;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (dmi_check_system(attixp_cable_override_dmi_table))
55*4882a593Smuzhiyun 		return ATA_CBL_PATA40_SHORT;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	/* Hack from drivers/ide/pci. Really we want to know how to do the
58*4882a593Smuzhiyun 	   raw detection not play follow the bios mode guess */
59*4882a593Smuzhiyun 	pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma);
60*4882a593Smuzhiyun 	if ((udma & 0x07) >= 0x04 || (udma & 0x70) >= 0x40)
61*4882a593Smuzhiyun 		return  ATA_CBL_PATA80;
62*4882a593Smuzhiyun 	return ATA_CBL_PATA40;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun static DEFINE_SPINLOCK(atiixp_lock);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /**
68*4882a593Smuzhiyun  *	atiixp_prereset	-	perform reset handling
69*4882a593Smuzhiyun  *	@link: ATA link
70*4882a593Smuzhiyun  *	@deadline: deadline jiffies for the operation
71*4882a593Smuzhiyun  *
72*4882a593Smuzhiyun  *	Reset sequence checking enable bits to see which ports are
73*4882a593Smuzhiyun  *	active.
74*4882a593Smuzhiyun  */
75*4882a593Smuzhiyun 
atiixp_prereset(struct ata_link * link,unsigned long deadline)76*4882a593Smuzhiyun static int atiixp_prereset(struct ata_link *link, unsigned long deadline)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	static const struct pci_bits atiixp_enable_bits[] = {
79*4882a593Smuzhiyun 		{ 0x48, 1, 0x01, 0x00 },
80*4882a593Smuzhiyun 		{ 0x48, 1, 0x08, 0x00 }
81*4882a593Smuzhiyun 	};
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	struct ata_port *ap = link->ap;
84*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no]))
87*4882a593Smuzhiyun 		return -ENOENT;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	return ata_sff_prereset(link, deadline);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  *	atiixp_set_pio_timing	-	set initial PIO mode data
94*4882a593Smuzhiyun  *	@ap: ATA interface
95*4882a593Smuzhiyun  *	@adev: ATA device
96*4882a593Smuzhiyun  *
97*4882a593Smuzhiyun  *	Called by both the pio and dma setup functions to set the controller
98*4882a593Smuzhiyun  *	timings for PIO transfers. We must load both the mode number and
99*4882a593Smuzhiyun  *	timing values into the controller.
100*4882a593Smuzhiyun  */
101*4882a593Smuzhiyun 
atiixp_set_pio_timing(struct ata_port * ap,struct ata_device * adev,int pio)102*4882a593Smuzhiyun static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
107*4882a593Smuzhiyun 	int dn = 2 * ap->port_no + adev->devno;
108*4882a593Smuzhiyun 	int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
109*4882a593Smuzhiyun 	u32 pio_timing_data;
110*4882a593Smuzhiyun 	u16 pio_mode_data;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
113*4882a593Smuzhiyun 	pio_mode_data &= ~(0x7 << (4 * dn));
114*4882a593Smuzhiyun 	pio_mode_data |= pio << (4 * dn);
115*4882a593Smuzhiyun 	pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	pci_read_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
118*4882a593Smuzhiyun 	pio_timing_data &= ~(0xFF << timing_shift);
119*4882a593Smuzhiyun 	pio_timing_data |= (pio_timings[pio] << timing_shift);
120*4882a593Smuzhiyun 	pci_write_config_dword(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun  *	atiixp_set_piomode	-	set initial PIO mode data
125*4882a593Smuzhiyun  *	@ap: ATA interface
126*4882a593Smuzhiyun  *	@adev: ATA device
127*4882a593Smuzhiyun  *
128*4882a593Smuzhiyun  *	Called to do the PIO mode setup. We use a shared helper for this
129*4882a593Smuzhiyun  *	as the DMA setup must also adjust the PIO timing information.
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun 
atiixp_set_piomode(struct ata_port * ap,struct ata_device * adev)132*4882a593Smuzhiyun static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	unsigned long flags;
135*4882a593Smuzhiyun 	spin_lock_irqsave(&atiixp_lock, flags);
136*4882a593Smuzhiyun 	atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
137*4882a593Smuzhiyun 	spin_unlock_irqrestore(&atiixp_lock, flags);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun  *	atiixp_set_dmamode	-	set initial DMA mode data
142*4882a593Smuzhiyun  *	@ap: ATA interface
143*4882a593Smuzhiyun  *	@adev: ATA device
144*4882a593Smuzhiyun  *
145*4882a593Smuzhiyun  *	Called to do the DMA mode setup. We use timing tables for most
146*4882a593Smuzhiyun  *	modes but must tune an appropriate PIO mode to match.
147*4882a593Smuzhiyun  */
148*4882a593Smuzhiyun 
atiixp_set_dmamode(struct ata_port * ap,struct ata_device * adev)149*4882a593Smuzhiyun static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
154*4882a593Smuzhiyun 	int dma = adev->dma_mode;
155*4882a593Smuzhiyun 	int dn = 2 * ap->port_no + adev->devno;
156*4882a593Smuzhiyun 	int wanted_pio;
157*4882a593Smuzhiyun 	unsigned long flags;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	spin_lock_irqsave(&atiixp_lock, flags);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (adev->dma_mode >= XFER_UDMA_0) {
162*4882a593Smuzhiyun 		u16 udma_mode_data;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 		dma -= XFER_UDMA_0;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 		pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
167*4882a593Smuzhiyun 		udma_mode_data &= ~(0x7 << (4 * dn));
168*4882a593Smuzhiyun 		udma_mode_data |= dma << (4 * dn);
169*4882a593Smuzhiyun 		pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
170*4882a593Smuzhiyun 	} else {
171*4882a593Smuzhiyun 		int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
172*4882a593Smuzhiyun 		u32 mwdma_timing_data;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 		dma -= XFER_MW_DMA_0;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		pci_read_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
177*4882a593Smuzhiyun 				      &mwdma_timing_data);
178*4882a593Smuzhiyun 		mwdma_timing_data &= ~(0xFF << timing_shift);
179*4882a593Smuzhiyun 		mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
180*4882a593Smuzhiyun 		pci_write_config_dword(pdev, ATIIXP_IDE_MWDMA_TIMING,
181*4882a593Smuzhiyun 				       mwdma_timing_data);
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	/*
184*4882a593Smuzhiyun 	 *	We must now look at the PIO mode situation. We may need to
185*4882a593Smuzhiyun 	 *	adjust the PIO mode to keep the timings acceptable
186*4882a593Smuzhiyun 	 */
187*4882a593Smuzhiyun 	if (adev->dma_mode >= XFER_MW_DMA_2)
188*4882a593Smuzhiyun 		wanted_pio = 4;
189*4882a593Smuzhiyun 	else if (adev->dma_mode == XFER_MW_DMA_1)
190*4882a593Smuzhiyun 		wanted_pio = 3;
191*4882a593Smuzhiyun 	else if (adev->dma_mode == XFER_MW_DMA_0)
192*4882a593Smuzhiyun 		wanted_pio = 0;
193*4882a593Smuzhiyun 	else BUG();
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	if (adev->pio_mode != wanted_pio)
196*4882a593Smuzhiyun 		atiixp_set_pio_timing(ap, adev, wanted_pio);
197*4882a593Smuzhiyun 	spin_unlock_irqrestore(&atiixp_lock, flags);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /**
201*4882a593Smuzhiyun  *	atiixp_bmdma_start	-	DMA start callback
202*4882a593Smuzhiyun  *	@qc: Command in progress
203*4882a593Smuzhiyun  *
204*4882a593Smuzhiyun  *	When DMA begins we need to ensure that the UDMA control
205*4882a593Smuzhiyun  *	register for the channel is correctly set.
206*4882a593Smuzhiyun  *
207*4882a593Smuzhiyun  *	Note: The host lock held by the libata layer protects
208*4882a593Smuzhiyun  *	us from two channels both trying to set DMA bits at once
209*4882a593Smuzhiyun  */
210*4882a593Smuzhiyun 
atiixp_bmdma_start(struct ata_queued_cmd * qc)211*4882a593Smuzhiyun static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct ata_port *ap = qc->ap;
214*4882a593Smuzhiyun 	struct ata_device *adev = qc->dev;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
217*4882a593Smuzhiyun 	int dn = (2 * ap->port_no) + adev->devno;
218*4882a593Smuzhiyun 	u16 tmp16;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
221*4882a593Smuzhiyun 	if (ata_using_udma(adev))
222*4882a593Smuzhiyun 		tmp16 |= (1 << dn);
223*4882a593Smuzhiyun 	else
224*4882a593Smuzhiyun 		tmp16 &= ~(1 << dn);
225*4882a593Smuzhiyun 	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
226*4882a593Smuzhiyun 	ata_bmdma_start(qc);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun  *	atiixp_dma_stop	-	DMA stop callback
231*4882a593Smuzhiyun  *	@qc: Command in progress
232*4882a593Smuzhiyun  *
233*4882a593Smuzhiyun  *	DMA has completed. Clear the UDMA flag as the next operations will
234*4882a593Smuzhiyun  *	be PIO ones not UDMA data transfer.
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  *	Note: The host lock held by the libata layer protects
237*4882a593Smuzhiyun  *	us from two channels both trying to set DMA bits at once
238*4882a593Smuzhiyun  */
239*4882a593Smuzhiyun 
atiixp_bmdma_stop(struct ata_queued_cmd * qc)240*4882a593Smuzhiyun static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	struct ata_port *ap = qc->ap;
243*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(ap->host->dev);
244*4882a593Smuzhiyun 	int dn = (2 * ap->port_no) + qc->dev->devno;
245*4882a593Smuzhiyun 	u16 tmp16;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
248*4882a593Smuzhiyun 	tmp16 &= ~(1 << dn);
249*4882a593Smuzhiyun 	pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
250*4882a593Smuzhiyun 	ata_bmdma_stop(qc);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun static struct scsi_host_template atiixp_sht = {
254*4882a593Smuzhiyun 	ATA_BMDMA_SHT(DRV_NAME),
255*4882a593Smuzhiyun 	.sg_tablesize		= LIBATA_DUMB_MAX_PRD,
256*4882a593Smuzhiyun };
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun static struct ata_port_operations atiixp_port_ops = {
259*4882a593Smuzhiyun 	.inherits	= &ata_bmdma_port_ops,
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	.qc_prep 	= ata_bmdma_dumb_qc_prep,
262*4882a593Smuzhiyun 	.bmdma_start 	= atiixp_bmdma_start,
263*4882a593Smuzhiyun 	.bmdma_stop	= atiixp_bmdma_stop,
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	.prereset	= atiixp_prereset,
266*4882a593Smuzhiyun 	.cable_detect	= atiixp_cable_detect,
267*4882a593Smuzhiyun 	.set_piomode	= atiixp_set_piomode,
268*4882a593Smuzhiyun 	.set_dmamode	= atiixp_set_dmamode,
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun 
atiixp_init_one(struct pci_dev * pdev,const struct pci_device_id * id)271*4882a593Smuzhiyun static int atiixp_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	static const struct ata_port_info info = {
274*4882a593Smuzhiyun 		.flags = ATA_FLAG_SLAVE_POSS,
275*4882a593Smuzhiyun 		.pio_mask = ATA_PIO4,
276*4882a593Smuzhiyun 		.mwdma_mask = ATA_MWDMA12_ONLY,
277*4882a593Smuzhiyun 		.udma_mask = ATA_UDMA5,
278*4882a593Smuzhiyun 		.port_ops = &atiixp_port_ops
279*4882a593Smuzhiyun 	};
280*4882a593Smuzhiyun 	const struct ata_port_info *ppi[] = { &info, &info };
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* SB600 doesn't have secondary port wired */
283*4882a593Smuzhiyun 	if (pdev->device == PCI_DEVICE_ID_ATI_IXP600_IDE)
284*4882a593Smuzhiyun 		ppi[1] = &ata_dummy_port_info;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return ata_pci_bmdma_init_one(pdev, ppi, &atiixp_sht, NULL,
287*4882a593Smuzhiyun 				      ATA_HOST_PARALLEL_SCAN);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun static const struct pci_device_id atiixp[] = {
291*4882a593Smuzhiyun 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
292*4882a593Smuzhiyun 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
293*4882a593Smuzhiyun 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
294*4882a593Smuzhiyun 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
295*4882a593Smuzhiyun 	{ PCI_VDEVICE(ATI, PCI_DEVICE_ID_ATI_IXP700_IDE), },
296*4882a593Smuzhiyun 	{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_HUDSON2_IDE), },
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	{ },
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun static struct pci_driver atiixp_pci_driver = {
302*4882a593Smuzhiyun 	.name 		= DRV_NAME,
303*4882a593Smuzhiyun 	.id_table	= atiixp,
304*4882a593Smuzhiyun 	.probe 		= atiixp_init_one,
305*4882a593Smuzhiyun 	.remove		= ata_pci_remove_one,
306*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
307*4882a593Smuzhiyun 	.resume		= ata_pci_device_resume,
308*4882a593Smuzhiyun 	.suspend	= ata_pci_device_suspend,
309*4882a593Smuzhiyun #endif
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun module_pci_driver(atiixp_pci_driver);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun MODULE_AUTHOR("Alan Cox");
315*4882a593Smuzhiyun MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
316*4882a593Smuzhiyun MODULE_LICENSE("GPL");
317*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, atiixp);
318*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
319