xref: /OK3568_Linux_fs/kernel/drivers/ata/sata_vsc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  sata_vsc.c - Vitesse VSC7174 4 port DPA SATA
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Maintained by:  Jeremy Higdon @ SGI
6*4882a593Smuzhiyun  * 		    Please ALWAYS copy linux-ide@vger.kernel.org
7*4882a593Smuzhiyun  *		    on emails.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *  Copyright 2004 SGI
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *  Bits from Jeff Garzik, Copyright RedHat, Inc.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *  libata documentation is available via 'make {ps|pdf}docs',
14*4882a593Smuzhiyun  *  as Documentation/driver-api/libata.rst
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *  Vitesse hardware documentation presumably available under NDA.
17*4882a593Smuzhiyun  *  Intel 31244 (same hardware interface) documentation presumably
18*4882a593Smuzhiyun  *  available from http://developer.intel.com/
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <linux/kernel.h>
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/pci.h>
24*4882a593Smuzhiyun #include <linux/blkdev.h>
25*4882a593Smuzhiyun #include <linux/delay.h>
26*4882a593Smuzhiyun #include <linux/interrupt.h>
27*4882a593Smuzhiyun #include <linux/dma-mapping.h>
28*4882a593Smuzhiyun #include <linux/device.h>
29*4882a593Smuzhiyun #include <scsi/scsi_host.h>
30*4882a593Smuzhiyun #include <linux/libata.h>
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define DRV_NAME	"sata_vsc"
33*4882a593Smuzhiyun #define DRV_VERSION	"2.3"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun enum {
36*4882a593Smuzhiyun 	VSC_MMIO_BAR			= 0,
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	/* Interrupt register offsets (from chip base address) */
39*4882a593Smuzhiyun 	VSC_SATA_INT_STAT_OFFSET	= 0x00,
40*4882a593Smuzhiyun 	VSC_SATA_INT_MASK_OFFSET	= 0x04,
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* Taskfile registers offsets */
43*4882a593Smuzhiyun 	VSC_SATA_TF_CMD_OFFSET		= 0x00,
44*4882a593Smuzhiyun 	VSC_SATA_TF_DATA_OFFSET		= 0x00,
45*4882a593Smuzhiyun 	VSC_SATA_TF_ERROR_OFFSET	= 0x04,
46*4882a593Smuzhiyun 	VSC_SATA_TF_FEATURE_OFFSET	= 0x06,
47*4882a593Smuzhiyun 	VSC_SATA_TF_NSECT_OFFSET	= 0x08,
48*4882a593Smuzhiyun 	VSC_SATA_TF_LBAL_OFFSET		= 0x0c,
49*4882a593Smuzhiyun 	VSC_SATA_TF_LBAM_OFFSET		= 0x10,
50*4882a593Smuzhiyun 	VSC_SATA_TF_LBAH_OFFSET		= 0x14,
51*4882a593Smuzhiyun 	VSC_SATA_TF_DEVICE_OFFSET	= 0x18,
52*4882a593Smuzhiyun 	VSC_SATA_TF_STATUS_OFFSET	= 0x1c,
53*4882a593Smuzhiyun 	VSC_SATA_TF_COMMAND_OFFSET	= 0x1d,
54*4882a593Smuzhiyun 	VSC_SATA_TF_ALTSTATUS_OFFSET	= 0x28,
55*4882a593Smuzhiyun 	VSC_SATA_TF_CTL_OFFSET		= 0x29,
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	/* DMA base */
58*4882a593Smuzhiyun 	VSC_SATA_UP_DESCRIPTOR_OFFSET	= 0x64,
59*4882a593Smuzhiyun 	VSC_SATA_UP_DATA_BUFFER_OFFSET	= 0x6C,
60*4882a593Smuzhiyun 	VSC_SATA_DMA_CMD_OFFSET		= 0x70,
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	/* SCRs base */
63*4882a593Smuzhiyun 	VSC_SATA_SCR_STATUS_OFFSET	= 0x100,
64*4882a593Smuzhiyun 	VSC_SATA_SCR_ERROR_OFFSET	= 0x104,
65*4882a593Smuzhiyun 	VSC_SATA_SCR_CONTROL_OFFSET	= 0x108,
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Port stride */
68*4882a593Smuzhiyun 	VSC_SATA_PORT_OFFSET		= 0x200,
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* Error interrupt status bit offsets */
71*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR_CRC		= 0x40,
72*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR_T		= 0x20,
73*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR_P		= 0x10,
74*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR_R		= 0x8,
75*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR_E		= 0x4,
76*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR_M		= 0x2,
77*4882a593Smuzhiyun 	VSC_SATA_INT_PHY_CHANGE		= 0x1,
78*4882a593Smuzhiyun 	VSC_SATA_INT_ERROR = (VSC_SATA_INT_ERROR_CRC  | VSC_SATA_INT_ERROR_T | \
79*4882a593Smuzhiyun 			      VSC_SATA_INT_ERROR_P    | VSC_SATA_INT_ERROR_R | \
80*4882a593Smuzhiyun 			      VSC_SATA_INT_ERROR_E    | VSC_SATA_INT_ERROR_M | \
81*4882a593Smuzhiyun 			      VSC_SATA_INT_PHY_CHANGE),
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun 
vsc_sata_scr_read(struct ata_link * link,unsigned int sc_reg,u32 * val)84*4882a593Smuzhiyun static int vsc_sata_scr_read(struct ata_link *link,
85*4882a593Smuzhiyun 			     unsigned int sc_reg, u32 *val)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	if (sc_reg > SCR_CONTROL)
88*4882a593Smuzhiyun 		return -EINVAL;
89*4882a593Smuzhiyun 	*val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4));
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 
vsc_sata_scr_write(struct ata_link * link,unsigned int sc_reg,u32 val)94*4882a593Smuzhiyun static int vsc_sata_scr_write(struct ata_link *link,
95*4882a593Smuzhiyun 			      unsigned int sc_reg, u32 val)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	if (sc_reg > SCR_CONTROL)
98*4882a593Smuzhiyun 		return -EINVAL;
99*4882a593Smuzhiyun 	writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
100*4882a593Smuzhiyun 	return 0;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 
vsc_freeze(struct ata_port * ap)104*4882a593Smuzhiyun static void vsc_freeze(struct ata_port *ap)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	void __iomem *mask_addr;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
109*4882a593Smuzhiyun 		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	writeb(0, mask_addr);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 
vsc_thaw(struct ata_port * ap)115*4882a593Smuzhiyun static void vsc_thaw(struct ata_port *ap)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	void __iomem *mask_addr;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
120*4882a593Smuzhiyun 		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	writeb(0xff, mask_addr);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 
vsc_intr_mask_update(struct ata_port * ap,u8 ctl)126*4882a593Smuzhiyun static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	void __iomem *mask_addr;
129*4882a593Smuzhiyun 	u8 mask;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
132*4882a593Smuzhiyun 		VSC_SATA_INT_MASK_OFFSET + ap->port_no;
133*4882a593Smuzhiyun 	mask = readb(mask_addr);
134*4882a593Smuzhiyun 	if (ctl & ATA_NIEN)
135*4882a593Smuzhiyun 		mask |= 0x80;
136*4882a593Smuzhiyun 	else
137*4882a593Smuzhiyun 		mask &= 0x7F;
138*4882a593Smuzhiyun 	writeb(mask, mask_addr);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 
vsc_sata_tf_load(struct ata_port * ap,const struct ata_taskfile * tf)142*4882a593Smuzhiyun static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct ata_ioports *ioaddr = &ap->ioaddr;
145*4882a593Smuzhiyun 	unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	/*
148*4882a593Smuzhiyun 	 * The only thing the ctl register is used for is SRST.
149*4882a593Smuzhiyun 	 * That is not enabled or disabled via tf_load.
150*4882a593Smuzhiyun 	 * However, if ATA_NIEN is changed, then we need to change
151*4882a593Smuzhiyun 	 * the interrupt register.
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) {
154*4882a593Smuzhiyun 		ap->last_ctl = tf->ctl;
155*4882a593Smuzhiyun 		vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN);
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 	if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
158*4882a593Smuzhiyun 		writew(tf->feature | (((u16)tf->hob_feature) << 8),
159*4882a593Smuzhiyun 		       ioaddr->feature_addr);
160*4882a593Smuzhiyun 		writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
161*4882a593Smuzhiyun 		       ioaddr->nsect_addr);
162*4882a593Smuzhiyun 		writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
163*4882a593Smuzhiyun 		       ioaddr->lbal_addr);
164*4882a593Smuzhiyun 		writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
165*4882a593Smuzhiyun 		       ioaddr->lbam_addr);
166*4882a593Smuzhiyun 		writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
167*4882a593Smuzhiyun 		       ioaddr->lbah_addr);
168*4882a593Smuzhiyun 	} else if (is_addr) {
169*4882a593Smuzhiyun 		writew(tf->feature, ioaddr->feature_addr);
170*4882a593Smuzhiyun 		writew(tf->nsect, ioaddr->nsect_addr);
171*4882a593Smuzhiyun 		writew(tf->lbal, ioaddr->lbal_addr);
172*4882a593Smuzhiyun 		writew(tf->lbam, ioaddr->lbam_addr);
173*4882a593Smuzhiyun 		writew(tf->lbah, ioaddr->lbah_addr);
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (tf->flags & ATA_TFLAG_DEVICE)
177*4882a593Smuzhiyun 		writeb(tf->device, ioaddr->device_addr);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	ata_wait_idle(ap);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 
vsc_sata_tf_read(struct ata_port * ap,struct ata_taskfile * tf)183*4882a593Smuzhiyun static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct ata_ioports *ioaddr = &ap->ioaddr;
186*4882a593Smuzhiyun 	u16 nsect, lbal, lbam, lbah, feature;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	tf->command = ata_sff_check_status(ap);
189*4882a593Smuzhiyun 	tf->device = readw(ioaddr->device_addr);
190*4882a593Smuzhiyun 	feature = readw(ioaddr->error_addr);
191*4882a593Smuzhiyun 	nsect = readw(ioaddr->nsect_addr);
192*4882a593Smuzhiyun 	lbal = readw(ioaddr->lbal_addr);
193*4882a593Smuzhiyun 	lbam = readw(ioaddr->lbam_addr);
194*4882a593Smuzhiyun 	lbah = readw(ioaddr->lbah_addr);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	tf->feature = feature;
197*4882a593Smuzhiyun 	tf->nsect = nsect;
198*4882a593Smuzhiyun 	tf->lbal = lbal;
199*4882a593Smuzhiyun 	tf->lbam = lbam;
200*4882a593Smuzhiyun 	tf->lbah = lbah;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (tf->flags & ATA_TFLAG_LBA48) {
203*4882a593Smuzhiyun 		tf->hob_feature = feature >> 8;
204*4882a593Smuzhiyun 		tf->hob_nsect = nsect >> 8;
205*4882a593Smuzhiyun 		tf->hob_lbal = lbal >> 8;
206*4882a593Smuzhiyun 		tf->hob_lbam = lbam >> 8;
207*4882a593Smuzhiyun 		tf->hob_lbah = lbah >> 8;
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
vsc_error_intr(u8 port_status,struct ata_port * ap)211*4882a593Smuzhiyun static inline void vsc_error_intr(u8 port_status, struct ata_port *ap)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	if (port_status & (VSC_SATA_INT_PHY_CHANGE | VSC_SATA_INT_ERROR_M))
214*4882a593Smuzhiyun 		ata_port_freeze(ap);
215*4882a593Smuzhiyun 	else
216*4882a593Smuzhiyun 		ata_port_abort(ap);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
vsc_port_intr(u8 port_status,struct ata_port * ap)219*4882a593Smuzhiyun static void vsc_port_intr(u8 port_status, struct ata_port *ap)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct ata_queued_cmd *qc;
222*4882a593Smuzhiyun 	int handled = 0;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	if (unlikely(port_status & VSC_SATA_INT_ERROR)) {
225*4882a593Smuzhiyun 		vsc_error_intr(port_status, ap);
226*4882a593Smuzhiyun 		return;
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	qc = ata_qc_from_tag(ap, ap->link.active_tag);
230*4882a593Smuzhiyun 	if (qc && likely(!(qc->tf.flags & ATA_TFLAG_POLLING)))
231*4882a593Smuzhiyun 		handled = ata_bmdma_port_intr(ap, qc);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* We received an interrupt during a polled command,
234*4882a593Smuzhiyun 	 * or some other spurious condition.  Interrupt reporting
235*4882a593Smuzhiyun 	 * with this hardware is fairly reliable so it is safe to
236*4882a593Smuzhiyun 	 * simply clear the interrupt
237*4882a593Smuzhiyun 	 */
238*4882a593Smuzhiyun 	if (unlikely(!handled))
239*4882a593Smuzhiyun 		ap->ops->sff_check_status(ap);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /*
243*4882a593Smuzhiyun  * vsc_sata_interrupt
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * Read the interrupt register and process for the devices that have
246*4882a593Smuzhiyun  * them pending.
247*4882a593Smuzhiyun  */
vsc_sata_interrupt(int irq,void * dev_instance)248*4882a593Smuzhiyun static irqreturn_t vsc_sata_interrupt(int irq, void *dev_instance)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct ata_host *host = dev_instance;
251*4882a593Smuzhiyun 	unsigned int i;
252*4882a593Smuzhiyun 	unsigned int handled = 0;
253*4882a593Smuzhiyun 	u32 status;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	status = readl(host->iomap[VSC_MMIO_BAR] + VSC_SATA_INT_STAT_OFFSET);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (unlikely(status == 0xffffffff || status == 0)) {
258*4882a593Smuzhiyun 		if (status)
259*4882a593Smuzhiyun 			dev_err(host->dev,
260*4882a593Smuzhiyun 				": IRQ status == 0xffffffff, PCI fault or device removal?\n");
261*4882a593Smuzhiyun 		goto out;
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	spin_lock(&host->lock);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	for (i = 0; i < host->n_ports; i++) {
267*4882a593Smuzhiyun 		u8 port_status = (status >> (8 * i)) & 0xff;
268*4882a593Smuzhiyun 		if (port_status) {
269*4882a593Smuzhiyun 			vsc_port_intr(port_status, host->ports[i]);
270*4882a593Smuzhiyun 			handled++;
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	spin_unlock(&host->lock);
275*4882a593Smuzhiyun out:
276*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun static struct scsi_host_template vsc_sata_sht = {
281*4882a593Smuzhiyun 	ATA_BMDMA_SHT(DRV_NAME),
282*4882a593Smuzhiyun };
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun static struct ata_port_operations vsc_sata_ops = {
286*4882a593Smuzhiyun 	.inherits		= &ata_bmdma_port_ops,
287*4882a593Smuzhiyun 	/* The IRQ handling is not quite standard SFF behaviour so we
288*4882a593Smuzhiyun 	   cannot use the default lost interrupt handler */
289*4882a593Smuzhiyun 	.lost_interrupt		= ATA_OP_NULL,
290*4882a593Smuzhiyun 	.sff_tf_load		= vsc_sata_tf_load,
291*4882a593Smuzhiyun 	.sff_tf_read		= vsc_sata_tf_read,
292*4882a593Smuzhiyun 	.freeze			= vsc_freeze,
293*4882a593Smuzhiyun 	.thaw			= vsc_thaw,
294*4882a593Smuzhiyun 	.scr_read		= vsc_sata_scr_read,
295*4882a593Smuzhiyun 	.scr_write		= vsc_sata_scr_write,
296*4882a593Smuzhiyun };
297*4882a593Smuzhiyun 
vsc_sata_setup_port(struct ata_ioports * port,void __iomem * base)298*4882a593Smuzhiyun static void vsc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	port->cmd_addr		= base + VSC_SATA_TF_CMD_OFFSET;
301*4882a593Smuzhiyun 	port->data_addr		= base + VSC_SATA_TF_DATA_OFFSET;
302*4882a593Smuzhiyun 	port->error_addr	= base + VSC_SATA_TF_ERROR_OFFSET;
303*4882a593Smuzhiyun 	port->feature_addr	= base + VSC_SATA_TF_FEATURE_OFFSET;
304*4882a593Smuzhiyun 	port->nsect_addr	= base + VSC_SATA_TF_NSECT_OFFSET;
305*4882a593Smuzhiyun 	port->lbal_addr		= base + VSC_SATA_TF_LBAL_OFFSET;
306*4882a593Smuzhiyun 	port->lbam_addr		= base + VSC_SATA_TF_LBAM_OFFSET;
307*4882a593Smuzhiyun 	port->lbah_addr		= base + VSC_SATA_TF_LBAH_OFFSET;
308*4882a593Smuzhiyun 	port->device_addr	= base + VSC_SATA_TF_DEVICE_OFFSET;
309*4882a593Smuzhiyun 	port->status_addr	= base + VSC_SATA_TF_STATUS_OFFSET;
310*4882a593Smuzhiyun 	port->command_addr	= base + VSC_SATA_TF_COMMAND_OFFSET;
311*4882a593Smuzhiyun 	port->altstatus_addr	= base + VSC_SATA_TF_ALTSTATUS_OFFSET;
312*4882a593Smuzhiyun 	port->ctl_addr		= base + VSC_SATA_TF_CTL_OFFSET;
313*4882a593Smuzhiyun 	port->bmdma_addr	= base + VSC_SATA_DMA_CMD_OFFSET;
314*4882a593Smuzhiyun 	port->scr_addr		= base + VSC_SATA_SCR_STATUS_OFFSET;
315*4882a593Smuzhiyun 	writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
316*4882a593Smuzhiyun 	writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 
vsc_sata_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)320*4882a593Smuzhiyun static int vsc_sata_init_one(struct pci_dev *pdev,
321*4882a593Smuzhiyun 			     const struct pci_device_id *ent)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	static const struct ata_port_info pi = {
324*4882a593Smuzhiyun 		.flags		= ATA_FLAG_SATA,
325*4882a593Smuzhiyun 		.pio_mask	= ATA_PIO4,
326*4882a593Smuzhiyun 		.mwdma_mask	= ATA_MWDMA2,
327*4882a593Smuzhiyun 		.udma_mask	= ATA_UDMA6,
328*4882a593Smuzhiyun 		.port_ops	= &vsc_sata_ops,
329*4882a593Smuzhiyun 	};
330*4882a593Smuzhiyun 	const struct ata_port_info *ppi[] = { &pi, NULL };
331*4882a593Smuzhiyun 	struct ata_host *host;
332*4882a593Smuzhiyun 	void __iomem *mmio_base;
333*4882a593Smuzhiyun 	int i, rc;
334*4882a593Smuzhiyun 	u8 cls;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	ata_print_version_once(&pdev->dev, DRV_VERSION);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* allocate host */
339*4882a593Smuzhiyun 	host = ata_host_alloc_pinfo(&pdev->dev, ppi, 4);
340*4882a593Smuzhiyun 	if (!host)
341*4882a593Smuzhiyun 		return -ENOMEM;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	rc = pcim_enable_device(pdev);
344*4882a593Smuzhiyun 	if (rc)
345*4882a593Smuzhiyun 		return rc;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* check if we have needed resource mapped */
348*4882a593Smuzhiyun 	if (pci_resource_len(pdev, 0) == 0)
349*4882a593Smuzhiyun 		return -ENODEV;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	/* map IO regions and initialize host accordingly */
352*4882a593Smuzhiyun 	rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
353*4882a593Smuzhiyun 	if (rc == -EBUSY)
354*4882a593Smuzhiyun 		pcim_pin_device(pdev);
355*4882a593Smuzhiyun 	if (rc)
356*4882a593Smuzhiyun 		return rc;
357*4882a593Smuzhiyun 	host->iomap = pcim_iomap_table(pdev);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	mmio_base = host->iomap[VSC_MMIO_BAR];
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	for (i = 0; i < host->n_ports; i++) {
362*4882a593Smuzhiyun 		struct ata_port *ap = host->ports[i];
363*4882a593Smuzhiyun 		unsigned int offset = (i + 1) * VSC_SATA_PORT_OFFSET;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		vsc_sata_setup_port(&ap->ioaddr, mmio_base + offset);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		ata_port_pbar_desc(ap, VSC_MMIO_BAR, -1, "mmio");
368*4882a593Smuzhiyun 		ata_port_pbar_desc(ap, VSC_MMIO_BAR, offset, "port");
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/*
372*4882a593Smuzhiyun 	 * Use 32 bit DMA mask, because 64 bit address support is poor.
373*4882a593Smuzhiyun 	 */
374*4882a593Smuzhiyun 	rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
375*4882a593Smuzhiyun 	if (rc)
376*4882a593Smuzhiyun 		return rc;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	/*
379*4882a593Smuzhiyun 	 * Due to a bug in the chip, the default cache line size can't be
380*4882a593Smuzhiyun 	 * used (unless the default is non-zero).
381*4882a593Smuzhiyun 	 */
382*4882a593Smuzhiyun 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls);
383*4882a593Smuzhiyun 	if (cls == 0x00)
384*4882a593Smuzhiyun 		pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (pci_enable_msi(pdev) == 0)
387*4882a593Smuzhiyun 		pci_intx(pdev, 0);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/*
390*4882a593Smuzhiyun 	 * Config offset 0x98 is "Extended Control and Status Register 0"
391*4882a593Smuzhiyun 	 * Default value is (1 << 28).  All bits except bit 28 are reserved in
392*4882a593Smuzhiyun 	 * DPA mode.  If bit 28 is set, LED 0 reflects all ports' activity.
393*4882a593Smuzhiyun 	 * If bit 28 is clear, each port has its own LED.
394*4882a593Smuzhiyun 	 */
395*4882a593Smuzhiyun 	pci_write_config_dword(pdev, 0x98, 0);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	pci_set_master(pdev);
398*4882a593Smuzhiyun 	return ata_host_activate(host, pdev->irq, vsc_sata_interrupt,
399*4882a593Smuzhiyun 				 IRQF_SHARED, &vsc_sata_sht);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun static const struct pci_device_id vsc_sata_pci_tbl[] = {
403*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_VITESSE, 0x7174,
404*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
405*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_INTEL, 0x3200,
406*4882a593Smuzhiyun 	  PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	{ }	/* terminate list */
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun static struct pci_driver vsc_sata_pci_driver = {
412*4882a593Smuzhiyun 	.name			= DRV_NAME,
413*4882a593Smuzhiyun 	.id_table		= vsc_sata_pci_tbl,
414*4882a593Smuzhiyun 	.probe			= vsc_sata_init_one,
415*4882a593Smuzhiyun 	.remove			= ata_pci_remove_one,
416*4882a593Smuzhiyun };
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun module_pci_driver(vsc_sata_pci_driver);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun MODULE_AUTHOR("Jeremy Higdon");
421*4882a593Smuzhiyun MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller");
422*4882a593Smuzhiyun MODULE_LICENSE("GPL");
423*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl);
424*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
425