xref: /OK3568_Linux_fs/kernel/drivers/ide/setup-pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  *  Copyright (C) 1998-2000  Andre Hedrick <andre@linux-ide.org>
3*4882a593Smuzhiyun  *  Copyright (C) 1995-1998  Mark Lord
4*4882a593Smuzhiyun  *  Copyright (C) 2007-2009  Bartlomiej Zolnierkiewicz
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *  May be copied or modified under the terms of the GNU General Public License
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/ide.h>
16*4882a593Smuzhiyun #include <linux/dma-mapping.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/io.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /**
21*4882a593Smuzhiyun  *	ide_setup_pci_baseregs	-	place a PCI IDE controller native
22*4882a593Smuzhiyun  *	@dev: PCI device of interface to switch native
23*4882a593Smuzhiyun  *	@name: Name of interface
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  *	We attempt to place the PCI interface into PCI native mode. If
26*4882a593Smuzhiyun  *	we succeed the BARs are ok and the controller is in PCI mode.
27*4882a593Smuzhiyun  *	Returns 0 on success or an errno code.
28*4882a593Smuzhiyun  *
29*4882a593Smuzhiyun  *	FIXME: if we program the interface and then fail to set the BARS
30*4882a593Smuzhiyun  *	we don't switch it back to legacy mode. Do we actually care ??
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
ide_setup_pci_baseregs(struct pci_dev * dev,const char * name)33*4882a593Smuzhiyun static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun 	u8 progif = 0;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/*
38*4882a593Smuzhiyun 	 * Place both IDE interfaces into PCI "native" mode:
39*4882a593Smuzhiyun 	 */
40*4882a593Smuzhiyun 	if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
41*4882a593Smuzhiyun 			 (progif & 5) != 5) {
42*4882a593Smuzhiyun 		if ((progif & 0xa) != 0xa) {
43*4882a593Smuzhiyun 			printk(KERN_INFO "%s %s: device not capable of full "
44*4882a593Smuzhiyun 				"native PCI mode\n", name, pci_name(dev));
45*4882a593Smuzhiyun 			return -EOPNOTSUPP;
46*4882a593Smuzhiyun 		}
47*4882a593Smuzhiyun 		printk(KERN_INFO "%s %s: placing both ports into native PCI "
48*4882a593Smuzhiyun 			"mode\n", name, pci_name(dev));
49*4882a593Smuzhiyun 		(void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
50*4882a593Smuzhiyun 		if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
51*4882a593Smuzhiyun 		    (progif & 5) != 5) {
52*4882a593Smuzhiyun 			printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
53*4882a593Smuzhiyun 				"wanted 0x%04x, got 0x%04x\n",
54*4882a593Smuzhiyun 				name, pci_name(dev), progif | 5, progif);
55*4882a593Smuzhiyun 			return -EOPNOTSUPP;
56*4882a593Smuzhiyun 		}
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 	return 0;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
ide_pci_clear_simplex(unsigned long dma_base,const char * name)62*4882a593Smuzhiyun static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	u8 dma_stat = inb(dma_base + 2);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	outb(dma_stat & 0x60, dma_base + 2);
67*4882a593Smuzhiyun 	dma_stat = inb(dma_base + 2);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return (dma_stat & 0x80) ? 1 : 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  *	ide_pci_dma_base	-	setup BMIBA
74*4882a593Smuzhiyun  *	@hwif: IDE interface
75*4882a593Smuzhiyun  *	@d: IDE port info
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  *	Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun 
ide_pci_dma_base(ide_hwif_t * hwif,const struct ide_port_info * d)80*4882a593Smuzhiyun unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct pci_dev *dev = to_pci_dev(hwif->dev);
83*4882a593Smuzhiyun 	unsigned long dma_base = 0;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (hwif->host_flags & IDE_HFLAG_MMIO)
86*4882a593Smuzhiyun 		return hwif->dma_base;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (hwif->mate && hwif->mate->dma_base) {
89*4882a593Smuzhiyun 		dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
90*4882a593Smuzhiyun 	} else {
91*4882a593Smuzhiyun 		u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 		dma_base = pci_resource_start(dev, baridx);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 		if (dma_base == 0) {
96*4882a593Smuzhiyun 			printk(KERN_ERR "%s %s: DMA base is invalid\n",
97*4882a593Smuzhiyun 				d->name, pci_name(dev));
98*4882a593Smuzhiyun 			return 0;
99*4882a593Smuzhiyun 		}
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (hwif->channel)
103*4882a593Smuzhiyun 		dma_base += 8;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return dma_base;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_dma_base);
108*4882a593Smuzhiyun 
ide_pci_check_simplex(ide_hwif_t * hwif,const struct ide_port_info * d)109*4882a593Smuzhiyun int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct pci_dev *dev = to_pci_dev(hwif->dev);
112*4882a593Smuzhiyun 	u8 dma_stat;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
115*4882a593Smuzhiyun 		goto out;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
118*4882a593Smuzhiyun 		if (ide_pci_clear_simplex(hwif->dma_base, d->name))
119*4882a593Smuzhiyun 			printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
120*4882a593Smuzhiyun 				d->name, pci_name(dev));
121*4882a593Smuzhiyun 		goto out;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	/*
125*4882a593Smuzhiyun 	 * If the device claims "simplex" DMA, this means that only one of
126*4882a593Smuzhiyun 	 * the two interfaces can be trusted with DMA at any point in time
127*4882a593Smuzhiyun 	 * (so we should enable DMA only on one of the two interfaces).
128*4882a593Smuzhiyun 	 *
129*4882a593Smuzhiyun 	 * FIXME: At this point we haven't probed the drives so we can't make
130*4882a593Smuzhiyun 	 * the appropriate decision.  Really we should defer this problem until
131*4882a593Smuzhiyun 	 * we tune the drive then try to grab DMA ownership if we want to be
132*4882a593Smuzhiyun 	 * the DMA end.  This has to be become dynamic to handle hot-plug.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
135*4882a593Smuzhiyun 	if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
136*4882a593Smuzhiyun 		printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
137*4882a593Smuzhiyun 			d->name, pci_name(dev));
138*4882a593Smuzhiyun 		return -1;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun out:
141*4882a593Smuzhiyun 	return 0;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /*
146*4882a593Smuzhiyun  * Set up BM-DMA capability (PnP BIOS should have done this)
147*4882a593Smuzhiyun  */
ide_pci_set_master(struct pci_dev * dev,const char * name)148*4882a593Smuzhiyun int ide_pci_set_master(struct pci_dev *dev, const char *name)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	u16 pcicmd;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
155*4882a593Smuzhiyun 		pci_set_master(dev);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
158*4882a593Smuzhiyun 		    (pcicmd & PCI_COMMAND_MASTER) == 0) {
159*4882a593Smuzhiyun 			printk(KERN_ERR "%s %s: error updating PCICMD\n",
160*4882a593Smuzhiyun 				name, pci_name(dev));
161*4882a593Smuzhiyun 			return -EIO;
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_set_master);
168*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
169*4882a593Smuzhiyun 
ide_setup_pci_noise(struct pci_dev * dev,const struct ide_port_info * d)170*4882a593Smuzhiyun void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
173*4882a593Smuzhiyun 		d->name, pci_name(dev),
174*4882a593Smuzhiyun 		dev->vendor, dev->device, dev->revision);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /**
180*4882a593Smuzhiyun  *	ide_pci_enable	-	do PCI enables
181*4882a593Smuzhiyun  *	@dev: PCI device
182*4882a593Smuzhiyun  *	@bars: PCI BARs mask
183*4882a593Smuzhiyun  *	@d: IDE port info
184*4882a593Smuzhiyun  *
185*4882a593Smuzhiyun  *	Enable the IDE PCI device. We attempt to enable the device in full
186*4882a593Smuzhiyun  *	but if that fails then we only need IO space. The PCI code should
187*4882a593Smuzhiyun  *	have setup the proper resources for us already for controllers in
188*4882a593Smuzhiyun  *	legacy mode.
189*4882a593Smuzhiyun  *
190*4882a593Smuzhiyun  *	Returns zero on success or an error code
191*4882a593Smuzhiyun  */
192*4882a593Smuzhiyun 
ide_pci_enable(struct pci_dev * dev,int bars,const struct ide_port_info * d)193*4882a593Smuzhiyun static int ide_pci_enable(struct pci_dev *dev, int bars,
194*4882a593Smuzhiyun 			  const struct ide_port_info *d)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	int ret;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (pci_enable_device(dev)) {
199*4882a593Smuzhiyun 		ret = pci_enable_device_io(dev);
200*4882a593Smuzhiyun 		if (ret < 0) {
201*4882a593Smuzhiyun 			printk(KERN_WARNING "%s %s: couldn't enable device\n",
202*4882a593Smuzhiyun 				d->name, pci_name(dev));
203*4882a593Smuzhiyun 			goto out;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 		printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
206*4882a593Smuzhiyun 			d->name, pci_name(dev));
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/*
210*4882a593Smuzhiyun 	 * assume all devices can do 32-bit DMA for now, we can add
211*4882a593Smuzhiyun 	 * a DMA mask field to the struct ide_port_info if we need it
212*4882a593Smuzhiyun 	 * (or let lower level driver set the DMA mask)
213*4882a593Smuzhiyun 	 */
214*4882a593Smuzhiyun 	ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
215*4882a593Smuzhiyun 	if (ret < 0) {
216*4882a593Smuzhiyun 		printk(KERN_ERR "%s %s: can't set DMA mask\n",
217*4882a593Smuzhiyun 			d->name, pci_name(dev));
218*4882a593Smuzhiyun 		goto out;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	ret = pci_request_selected_regions(dev, bars, d->name);
222*4882a593Smuzhiyun 	if (ret < 0)
223*4882a593Smuzhiyun 		printk(KERN_ERR "%s %s: can't reserve resources\n",
224*4882a593Smuzhiyun 			d->name, pci_name(dev));
225*4882a593Smuzhiyun out:
226*4882a593Smuzhiyun 	return ret;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun  *	ide_pci_configure	-	configure an unconfigured device
231*4882a593Smuzhiyun  *	@dev: PCI device
232*4882a593Smuzhiyun  *	@d: IDE port info
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  *	Enable and configure the PCI device we have been passed.
235*4882a593Smuzhiyun  *	Returns zero on success or an error code.
236*4882a593Smuzhiyun  */
237*4882a593Smuzhiyun 
ide_pci_configure(struct pci_dev * dev,const struct ide_port_info * d)238*4882a593Smuzhiyun static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	u16 pcicmd = 0;
241*4882a593Smuzhiyun 	/*
242*4882a593Smuzhiyun 	 * PnP BIOS was *supposed* to have setup this device, but we
243*4882a593Smuzhiyun 	 * can do it ourselves, so long as the BIOS has assigned an IRQ
244*4882a593Smuzhiyun 	 * (or possibly the device is using a "legacy header" for IRQs).
245*4882a593Smuzhiyun 	 * Maybe the user deliberately *disabled* the device,
246*4882a593Smuzhiyun 	 * but we'll eventually ignore it again if no drives respond.
247*4882a593Smuzhiyun 	 */
248*4882a593Smuzhiyun 	if (ide_setup_pci_baseregs(dev, d->name) ||
249*4882a593Smuzhiyun 	    pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
250*4882a593Smuzhiyun 		printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
251*4882a593Smuzhiyun 			d->name, pci_name(dev));
252*4882a593Smuzhiyun 		return -ENODEV;
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 	if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
255*4882a593Smuzhiyun 		printk(KERN_ERR "%s %s: error accessing PCI regs\n",
256*4882a593Smuzhiyun 			d->name, pci_name(dev));
257*4882a593Smuzhiyun 		return -EIO;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 	if (!(pcicmd & PCI_COMMAND_IO)) {
260*4882a593Smuzhiyun 		printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
261*4882a593Smuzhiyun 			d->name, pci_name(dev));
262*4882a593Smuzhiyun 		return -ENXIO;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 	return 0;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /**
268*4882a593Smuzhiyun  *	ide_pci_check_iomem	-	check a register is I/O
269*4882a593Smuzhiyun  *	@dev: PCI device
270*4882a593Smuzhiyun  *	@d: IDE port info
271*4882a593Smuzhiyun  *	@bar: BAR number
272*4882a593Smuzhiyun  *
273*4882a593Smuzhiyun  *	Checks if a BAR is configured and points to MMIO space. If so,
274*4882a593Smuzhiyun  *	return an error code. Otherwise return 0
275*4882a593Smuzhiyun  */
276*4882a593Smuzhiyun 
ide_pci_check_iomem(struct pci_dev * dev,const struct ide_port_info * d,int bar)277*4882a593Smuzhiyun static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
278*4882a593Smuzhiyun 			       int bar)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	ulong flags = pci_resource_flags(dev, bar);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	/* Unconfigured ? */
283*4882a593Smuzhiyun 	if (!flags || pci_resource_len(dev, bar) == 0)
284*4882a593Smuzhiyun 		return 0;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	/* I/O space */
287*4882a593Smuzhiyun 	if (flags & IORESOURCE_IO)
288*4882a593Smuzhiyun 		return 0;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	/* Bad */
291*4882a593Smuzhiyun 	return -EINVAL;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun  *	ide_hw_configure	-	configure a struct ide_hw instance
296*4882a593Smuzhiyun  *	@dev: PCI device holding interface
297*4882a593Smuzhiyun  *	@d: IDE port info
298*4882a593Smuzhiyun  *	@port: port number
299*4882a593Smuzhiyun  *	@hw: struct ide_hw instance corresponding to this port
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  *	Perform the initial set up for the hardware interface structure. This
302*4882a593Smuzhiyun  *	is done per interface port rather than per PCI device. There may be
303*4882a593Smuzhiyun  *	more than one port per device.
304*4882a593Smuzhiyun  *
305*4882a593Smuzhiyun  *	Returns zero on success or an error code.
306*4882a593Smuzhiyun  */
307*4882a593Smuzhiyun 
ide_hw_configure(struct pci_dev * dev,const struct ide_port_info * d,unsigned int port,struct ide_hw * hw)308*4882a593Smuzhiyun static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
309*4882a593Smuzhiyun 			    unsigned int port, struct ide_hw *hw)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	unsigned long ctl = 0, base = 0;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
314*4882a593Smuzhiyun 		if (ide_pci_check_iomem(dev, d, 2 * port) ||
315*4882a593Smuzhiyun 		    ide_pci_check_iomem(dev, d, 2 * port + 1)) {
316*4882a593Smuzhiyun 			printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
317*4882a593Smuzhiyun 				"reported as MEM for port %d!\n",
318*4882a593Smuzhiyun 				d->name, pci_name(dev), port);
319*4882a593Smuzhiyun 			return -EINVAL;
320*4882a593Smuzhiyun 		}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 		ctl  = pci_resource_start(dev, 2*port+1);
323*4882a593Smuzhiyun 		base = pci_resource_start(dev, 2*port);
324*4882a593Smuzhiyun 	} else {
325*4882a593Smuzhiyun 		/* Use default values */
326*4882a593Smuzhiyun 		ctl = port ? 0x374 : 0x3f4;
327*4882a593Smuzhiyun 		base = port ? 0x170 : 0x1f0;
328*4882a593Smuzhiyun 	}
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (!base || !ctl) {
331*4882a593Smuzhiyun 		printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
332*4882a593Smuzhiyun 			d->name, pci_name(dev), port);
333*4882a593Smuzhiyun 		return -EINVAL;
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	memset(hw, 0, sizeof(*hw));
337*4882a593Smuzhiyun 	hw->dev = &dev->dev;
338*4882a593Smuzhiyun 	ide_std_init_ports(hw, base, ctl | 2);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun  *	ide_hwif_setup_dma	-	configure DMA interface
346*4882a593Smuzhiyun  *	@hwif: IDE interface
347*4882a593Smuzhiyun  *	@d: IDE port info
348*4882a593Smuzhiyun  *
349*4882a593Smuzhiyun  *	Set up the DMA base for the interface. Enable the master bits as
350*4882a593Smuzhiyun  *	necessary and attempt to bring the device DMA into a ready to use
351*4882a593Smuzhiyun  *	state
352*4882a593Smuzhiyun  */
353*4882a593Smuzhiyun 
ide_hwif_setup_dma(ide_hwif_t * hwif,const struct ide_port_info * d)354*4882a593Smuzhiyun int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	struct pci_dev *dev = to_pci_dev(hwif->dev);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
359*4882a593Smuzhiyun 	    ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
360*4882a593Smuzhiyun 	     (dev->class & 0x80))) {
361*4882a593Smuzhiyun 		unsigned long base = ide_pci_dma_base(hwif, d);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		if (base == 0)
364*4882a593Smuzhiyun 			return -1;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		hwif->dma_base = base;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		if (hwif->dma_ops == NULL)
369*4882a593Smuzhiyun 			hwif->dma_ops = &sff_dma_ops;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		if (ide_pci_check_simplex(hwif, d) < 0)
372*4882a593Smuzhiyun 			return -1;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		if (ide_pci_set_master(dev, d->name) < 0)
375*4882a593Smuzhiyun 			return -1;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		if (hwif->host_flags & IDE_HFLAG_MMIO)
378*4882a593Smuzhiyun 			printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);
379*4882a593Smuzhiyun 		else
380*4882a593Smuzhiyun 			printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
381*4882a593Smuzhiyun 					 hwif->name, base, base + 7);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 		hwif->extra_base = base + (hwif->channel ? 8 : 16);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 		if (ide_allocate_dma_engine(hwif))
386*4882a593Smuzhiyun 			return -1;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return 0;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun  *	ide_setup_pci_controller	-	set up IDE PCI
395*4882a593Smuzhiyun  *	@dev: PCI device
396*4882a593Smuzhiyun  *	@bars: PCI BARs mask
397*4882a593Smuzhiyun  *	@d: IDE port info
398*4882a593Smuzhiyun  *	@noisy: verbose flag
399*4882a593Smuzhiyun  *
400*4882a593Smuzhiyun  *	Set up the PCI and controller side of the IDE interface. This brings
401*4882a593Smuzhiyun  *	up the PCI side of the device, checks that the device is enabled
402*4882a593Smuzhiyun  *	and enables it if need be
403*4882a593Smuzhiyun  */
404*4882a593Smuzhiyun 
ide_setup_pci_controller(struct pci_dev * dev,int bars,const struct ide_port_info * d,int noisy)405*4882a593Smuzhiyun static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
406*4882a593Smuzhiyun 				    const struct ide_port_info *d, int noisy)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	int ret;
409*4882a593Smuzhiyun 	u16 pcicmd;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	if (noisy)
412*4882a593Smuzhiyun 		ide_setup_pci_noise(dev, d);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	ret = ide_pci_enable(dev, bars, d);
415*4882a593Smuzhiyun 	if (ret < 0)
416*4882a593Smuzhiyun 		goto out;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
419*4882a593Smuzhiyun 	if (ret < 0) {
420*4882a593Smuzhiyun 		printk(KERN_ERR "%s %s: error accessing PCI regs\n",
421*4882a593Smuzhiyun 			d->name, pci_name(dev));
422*4882a593Smuzhiyun 		goto out_free_bars;
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 	if (!(pcicmd & PCI_COMMAND_IO)) {	/* is device disabled? */
425*4882a593Smuzhiyun 		ret = ide_pci_configure(dev, d);
426*4882a593Smuzhiyun 		if (ret < 0)
427*4882a593Smuzhiyun 			goto out_free_bars;
428*4882a593Smuzhiyun 		printk(KERN_INFO "%s %s: device enabled (Linux)\n",
429*4882a593Smuzhiyun 			d->name, pci_name(dev));
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	goto out;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun out_free_bars:
435*4882a593Smuzhiyun 	pci_release_selected_regions(dev, bars);
436*4882a593Smuzhiyun out:
437*4882a593Smuzhiyun 	return ret;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun  *	ide_pci_setup_ports	-	configure ports/devices on PCI IDE
442*4882a593Smuzhiyun  *	@dev: PCI device
443*4882a593Smuzhiyun  *	@d: IDE port info
444*4882a593Smuzhiyun  *	@hw: struct ide_hw instances corresponding to this PCI IDE device
445*4882a593Smuzhiyun  *	@hws: struct ide_hw pointers table to update
446*4882a593Smuzhiyun  *
447*4882a593Smuzhiyun  *	Scan the interfaces attached to this device and do any
448*4882a593Smuzhiyun  *	necessary per port setup. Attach the devices and ask the
449*4882a593Smuzhiyun  *	generic DMA layer to do its work for us.
450*4882a593Smuzhiyun  *
451*4882a593Smuzhiyun  *	Normally called automaticall from do_ide_pci_setup_device,
452*4882a593Smuzhiyun  *	but is also used directly as a helper function by some controllers
453*4882a593Smuzhiyun  *	where the chipset setup is not the default PCI IDE one.
454*4882a593Smuzhiyun  */
455*4882a593Smuzhiyun 
ide_pci_setup_ports(struct pci_dev * dev,const struct ide_port_info * d,struct ide_hw * hw,struct ide_hw ** hws)456*4882a593Smuzhiyun void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
457*4882a593Smuzhiyun 			 struct ide_hw *hw, struct ide_hw **hws)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
460*4882a593Smuzhiyun 	u8 tmp;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/*
463*4882a593Smuzhiyun 	 * Set up the IDE ports
464*4882a593Smuzhiyun 	 */
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	for (port = 0; port < channels; ++port) {
467*4882a593Smuzhiyun 		const struct ide_pci_enablebit *e = &d->enablebits[port];
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 		if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
470*4882a593Smuzhiyun 		    (tmp & e->mask) != e->val)) {
471*4882a593Smuzhiyun 			printk(KERN_INFO "%s %s: IDE port disabled\n",
472*4882a593Smuzhiyun 				d->name, pci_name(dev));
473*4882a593Smuzhiyun 			continue;	/* port not enabled */
474*4882a593Smuzhiyun 		}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 		if (ide_hw_configure(dev, d, port, hw + port))
477*4882a593Smuzhiyun 			continue;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		*(hws + port) = hw + port;
480*4882a593Smuzhiyun 	}
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun  * ide_setup_pci_device() looks at the primary/secondary interfaces
486*4882a593Smuzhiyun  * on a PCI IDE device and, if they are enabled, prepares the IDE driver
487*4882a593Smuzhiyun  * for use with them.  This generic code works for most PCI chipsets.
488*4882a593Smuzhiyun  *
489*4882a593Smuzhiyun  * One thing that is not standardized is the location of the
490*4882a593Smuzhiyun  * primary/secondary interface "enable/disable" bits.  For chipsets that
491*4882a593Smuzhiyun  * we "know" about, this information is in the struct ide_port_info;
492*4882a593Smuzhiyun  * for all other chipsets, we just assume both interfaces are enabled.
493*4882a593Smuzhiyun  */
do_ide_setup_pci_device(struct pci_dev * dev,const struct ide_port_info * d,u8 noisy)494*4882a593Smuzhiyun static int do_ide_setup_pci_device(struct pci_dev *dev,
495*4882a593Smuzhiyun 				   const struct ide_port_info *d,
496*4882a593Smuzhiyun 				   u8 noisy)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	int pciirq, ret;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/*
501*4882a593Smuzhiyun 	 * Can we trust the reported IRQ?
502*4882a593Smuzhiyun 	 */
503*4882a593Smuzhiyun 	pciirq = dev->irq;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/*
506*4882a593Smuzhiyun 	 * This allows offboard ide-pci cards the enable a BIOS,
507*4882a593Smuzhiyun 	 * verify interrupt settings of split-mirror pci-config
508*4882a593Smuzhiyun 	 * space, place chipset into init-mode, and/or preserve
509*4882a593Smuzhiyun 	 * an interrupt if the card is not native ide support.
510*4882a593Smuzhiyun 	 */
511*4882a593Smuzhiyun 	ret = d->init_chipset ? d->init_chipset(dev) : 0;
512*4882a593Smuzhiyun 	if (ret < 0)
513*4882a593Smuzhiyun 		goto out;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (ide_pci_is_in_compatibility_mode(dev)) {
516*4882a593Smuzhiyun 		if (noisy)
517*4882a593Smuzhiyun 			printk(KERN_INFO "%s %s: not 100%% native mode: will "
518*4882a593Smuzhiyun 				"probe irqs later\n", d->name, pci_name(dev));
519*4882a593Smuzhiyun 		pciirq = 0;
520*4882a593Smuzhiyun 	} else if (!pciirq && noisy) {
521*4882a593Smuzhiyun 		printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
522*4882a593Smuzhiyun 			d->name, pci_name(dev), pciirq);
523*4882a593Smuzhiyun 	} else if (noisy) {
524*4882a593Smuzhiyun 		printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
525*4882a593Smuzhiyun 			d->name, pci_name(dev), pciirq);
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	ret = pciirq;
529*4882a593Smuzhiyun out:
530*4882a593Smuzhiyun 	return ret;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
ide_pci_init_two(struct pci_dev * dev1,struct pci_dev * dev2,const struct ide_port_info * d,void * priv)533*4882a593Smuzhiyun int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
534*4882a593Smuzhiyun 		     const struct ide_port_info *d, void *priv)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	struct pci_dev *pdev[] = { dev1, dev2 };
537*4882a593Smuzhiyun 	struct ide_host *host;
538*4882a593Smuzhiyun 	int ret, i, n_ports = dev2 ? 4 : 2, bars;
539*4882a593Smuzhiyun 	struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (d->host_flags & IDE_HFLAG_SINGLE)
542*4882a593Smuzhiyun 		bars = (1 << 2) - 1;
543*4882a593Smuzhiyun 	else
544*4882a593Smuzhiyun 		bars = (1 << 4) - 1;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
547*4882a593Smuzhiyun 		if (d->host_flags & IDE_HFLAG_CS5520)
548*4882a593Smuzhiyun 			bars |= (1 << 2);
549*4882a593Smuzhiyun 		else
550*4882a593Smuzhiyun 			bars |= (1 << 4);
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	for (i = 0; i < n_ports / 2; i++) {
554*4882a593Smuzhiyun 		ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
555*4882a593Smuzhiyun 		if (ret < 0) {
556*4882a593Smuzhiyun 			if (i == 1)
557*4882a593Smuzhiyun 				pci_release_selected_regions(pdev[0], bars);
558*4882a593Smuzhiyun 			goto out;
559*4882a593Smuzhiyun 		}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	host = ide_host_alloc(d, hws, n_ports);
565*4882a593Smuzhiyun 	if (host == NULL) {
566*4882a593Smuzhiyun 		ret = -ENOMEM;
567*4882a593Smuzhiyun 		goto out_free_bars;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	host->dev[0] = &dev1->dev;
571*4882a593Smuzhiyun 	if (dev2)
572*4882a593Smuzhiyun 		host->dev[1] = &dev2->dev;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	host->host_priv = priv;
575*4882a593Smuzhiyun 	host->irq_flags = IRQF_SHARED;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	pci_set_drvdata(pdev[0], host);
578*4882a593Smuzhiyun 	if (dev2)
579*4882a593Smuzhiyun 		pci_set_drvdata(pdev[1], host);
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	for (i = 0; i < n_ports / 2; i++) {
582*4882a593Smuzhiyun 		ret = do_ide_setup_pci_device(pdev[i], d, !i);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 		/*
585*4882a593Smuzhiyun 		 * FIXME: Mom, mom, they stole me the helper function to undo
586*4882a593Smuzhiyun 		 * do_ide_setup_pci_device() on the first device!
587*4882a593Smuzhiyun 		 */
588*4882a593Smuzhiyun 		if (ret < 0)
589*4882a593Smuzhiyun 			goto out_free_bars;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		/* fixup IRQ */
592*4882a593Smuzhiyun 		if (ide_pci_is_in_compatibility_mode(pdev[i])) {
593*4882a593Smuzhiyun 			hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0);
594*4882a593Smuzhiyun 			hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1);
595*4882a593Smuzhiyun 		} else
596*4882a593Smuzhiyun 			hw[i*2 + 1].irq = hw[i*2].irq = ret;
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	ret = ide_host_register(host, d, hws);
600*4882a593Smuzhiyun 	if (ret)
601*4882a593Smuzhiyun 		ide_host_free(host);
602*4882a593Smuzhiyun 	else
603*4882a593Smuzhiyun 		goto out;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun out_free_bars:
606*4882a593Smuzhiyun 	i = n_ports / 2;
607*4882a593Smuzhiyun 	while (i--)
608*4882a593Smuzhiyun 		pci_release_selected_regions(pdev[i], bars);
609*4882a593Smuzhiyun out:
610*4882a593Smuzhiyun 	return ret;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_init_two);
613*4882a593Smuzhiyun 
ide_pci_init_one(struct pci_dev * dev,const struct ide_port_info * d,void * priv)614*4882a593Smuzhiyun int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
615*4882a593Smuzhiyun 		     void *priv)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	return ide_pci_init_two(dev, NULL, d, priv);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_init_one);
620*4882a593Smuzhiyun 
ide_pci_remove(struct pci_dev * dev)621*4882a593Smuzhiyun void ide_pci_remove(struct pci_dev *dev)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	struct ide_host *host = pci_get_drvdata(dev);
624*4882a593Smuzhiyun 	struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
625*4882a593Smuzhiyun 	int bars;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (host->host_flags & IDE_HFLAG_SINGLE)
628*4882a593Smuzhiyun 		bars = (1 << 2) - 1;
629*4882a593Smuzhiyun 	else
630*4882a593Smuzhiyun 		bars = (1 << 4) - 1;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
633*4882a593Smuzhiyun 		if (host->host_flags & IDE_HFLAG_CS5520)
634*4882a593Smuzhiyun 			bars |= (1 << 2);
635*4882a593Smuzhiyun 		else
636*4882a593Smuzhiyun 			bars |= (1 << 4);
637*4882a593Smuzhiyun 	}
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	ide_host_remove(host);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (dev2)
642*4882a593Smuzhiyun 		pci_release_selected_regions(dev2, bars);
643*4882a593Smuzhiyun 	pci_release_selected_regions(dev, bars);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (dev2)
646*4882a593Smuzhiyun 		pci_disable_device(dev2);
647*4882a593Smuzhiyun 	pci_disable_device(dev);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_remove);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun #ifdef CONFIG_PM
ide_pci_suspend(struct pci_dev * dev,pm_message_t state)652*4882a593Smuzhiyun int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun 	pci_save_state(dev);
655*4882a593Smuzhiyun 	pci_disable_device(dev);
656*4882a593Smuzhiyun 	pci_set_power_state(dev, pci_choose_state(dev, state));
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	return 0;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_suspend);
661*4882a593Smuzhiyun 
ide_pci_resume(struct pci_dev * dev)662*4882a593Smuzhiyun int ide_pci_resume(struct pci_dev *dev)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct ide_host *host = pci_get_drvdata(dev);
665*4882a593Smuzhiyun 	int rc;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	pci_set_power_state(dev, PCI_D0);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	rc = pci_enable_device(dev);
670*4882a593Smuzhiyun 	if (rc)
671*4882a593Smuzhiyun 		return rc;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	pci_restore_state(dev);
674*4882a593Smuzhiyun 	pci_set_master(dev);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	if (host->init_chipset)
677*4882a593Smuzhiyun 		host->init_chipset(dev);
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	return 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ide_pci_resume);
682*4882a593Smuzhiyun #endif
683