xref: /OK3568_Linux_fs/kernel/drivers/scsi/sun_esp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* sun_esp.c: ESP front-end for Sparc SBUS systems.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/mm.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/dma-mapping.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/of_device.h>
16*4882a593Smuzhiyun #include <linux/gfp.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/irq.h>
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun #include <asm/dma.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <scsi/scsi_host.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "esp_scsi.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define DRV_MODULE_NAME		"sun_esp"
27*4882a593Smuzhiyun #define PFX DRV_MODULE_NAME	": "
28*4882a593Smuzhiyun #define DRV_VERSION		"1.100"
29*4882a593Smuzhiyun #define DRV_MODULE_RELDATE	"August 27, 2008"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define dma_read32(REG) \
32*4882a593Smuzhiyun 	sbus_readl(esp->dma_regs + (REG))
33*4882a593Smuzhiyun #define dma_write32(VAL, REG) \
34*4882a593Smuzhiyun 	sbus_writel((VAL), esp->dma_regs + (REG))
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* DVMA chip revisions */
37*4882a593Smuzhiyun enum dvma_rev {
38*4882a593Smuzhiyun 	dvmarev0,
39*4882a593Smuzhiyun 	dvmaesc1,
40*4882a593Smuzhiyun 	dvmarev1,
41*4882a593Smuzhiyun 	dvmarev2,
42*4882a593Smuzhiyun 	dvmarev3,
43*4882a593Smuzhiyun 	dvmarevplus,
44*4882a593Smuzhiyun 	dvmahme
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
esp_sbus_setup_dma(struct esp * esp,struct platform_device * dma_of)47*4882a593Smuzhiyun static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	esp->dma = dma_of;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
52*4882a593Smuzhiyun 				   resource_size(&dma_of->resource[0]),
53*4882a593Smuzhiyun 				   "espdma");
54*4882a593Smuzhiyun 	if (!esp->dma_regs)
55*4882a593Smuzhiyun 		return -ENOMEM;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
58*4882a593Smuzhiyun 	case DMA_VERS0:
59*4882a593Smuzhiyun 		esp->dmarev = dvmarev0;
60*4882a593Smuzhiyun 		break;
61*4882a593Smuzhiyun 	case DMA_ESCV1:
62*4882a593Smuzhiyun 		esp->dmarev = dvmaesc1;
63*4882a593Smuzhiyun 		break;
64*4882a593Smuzhiyun 	case DMA_VERS1:
65*4882a593Smuzhiyun 		esp->dmarev = dvmarev1;
66*4882a593Smuzhiyun 		break;
67*4882a593Smuzhiyun 	case DMA_VERS2:
68*4882a593Smuzhiyun 		esp->dmarev = dvmarev2;
69*4882a593Smuzhiyun 		break;
70*4882a593Smuzhiyun 	case DMA_VERHME:
71*4882a593Smuzhiyun 		esp->dmarev = dvmahme;
72*4882a593Smuzhiyun 		break;
73*4882a593Smuzhiyun 	case DMA_VERSPLUS:
74*4882a593Smuzhiyun 		esp->dmarev = dvmarevplus;
75*4882a593Smuzhiyun 		break;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return 0;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
esp_sbus_map_regs(struct esp * esp,int hme)82*4882a593Smuzhiyun static int esp_sbus_map_regs(struct esp *esp, int hme)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
85*4882a593Smuzhiyun 	struct resource *res;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* On HME, two reg sets exist, first is DVMA,
88*4882a593Smuzhiyun 	 * second is ESP registers.
89*4882a593Smuzhiyun 	 */
90*4882a593Smuzhiyun 	if (hme)
91*4882a593Smuzhiyun 		res = &op->resource[1];
92*4882a593Smuzhiyun 	else
93*4882a593Smuzhiyun 		res = &op->resource[0];
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	esp->regs = of_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
96*4882a593Smuzhiyun 	if (!esp->regs)
97*4882a593Smuzhiyun 		return -ENOMEM;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
esp_sbus_map_command_block(struct esp * esp)102*4882a593Smuzhiyun static int esp_sbus_map_command_block(struct esp *esp)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	esp->command_block = dma_alloc_coherent(esp->dev, 16,
105*4882a593Smuzhiyun 						&esp->command_block_dma,
106*4882a593Smuzhiyun 						GFP_KERNEL);
107*4882a593Smuzhiyun 	if (!esp->command_block)
108*4882a593Smuzhiyun 		return -ENOMEM;
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
esp_sbus_register_irq(struct esp * esp)112*4882a593Smuzhiyun static int esp_sbus_register_irq(struct esp *esp)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct Scsi_Host *host = esp->host;
115*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	host->irq = op->archdata.irqs[0];
118*4882a593Smuzhiyun 	return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
esp_get_scsi_id(struct esp * esp,struct platform_device * espdma)121*4882a593Smuzhiyun static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
124*4882a593Smuzhiyun 	struct device_node *dp;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	dp = op->dev.of_node;
127*4882a593Smuzhiyun 	esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
128*4882a593Smuzhiyun 	if (esp->scsi_id != 0xff)
129*4882a593Smuzhiyun 		goto done;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
132*4882a593Smuzhiyun 	if (esp->scsi_id != 0xff)
133*4882a593Smuzhiyun 		goto done;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	esp->scsi_id = of_getintprop_default(espdma->dev.of_node,
136*4882a593Smuzhiyun 					     "scsi-initiator-id", 7);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun done:
139*4882a593Smuzhiyun 	esp->host->this_id = esp->scsi_id;
140*4882a593Smuzhiyun 	esp->scsi_id_mask = (1 << esp->scsi_id);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
esp_get_differential(struct esp * esp)143*4882a593Smuzhiyun static void esp_get_differential(struct esp *esp)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
146*4882a593Smuzhiyun 	struct device_node *dp;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	dp = op->dev.of_node;
149*4882a593Smuzhiyun 	if (of_find_property(dp, "differential", NULL))
150*4882a593Smuzhiyun 		esp->flags |= ESP_FLAG_DIFFERENTIAL;
151*4882a593Smuzhiyun 	else
152*4882a593Smuzhiyun 		esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
esp_get_clock_params(struct esp * esp)155*4882a593Smuzhiyun static void esp_get_clock_params(struct esp *esp)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
158*4882a593Smuzhiyun 	struct device_node *bus_dp, *dp;
159*4882a593Smuzhiyun 	int fmhz;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	dp = op->dev.of_node;
162*4882a593Smuzhiyun 	bus_dp = dp->parent;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	fmhz = of_getintprop_default(dp, "clock-frequency", 0);
165*4882a593Smuzhiyun 	if (fmhz == 0)
166*4882a593Smuzhiyun 		fmhz = of_getintprop_default(bus_dp, "clock-frequency", 0);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	esp->cfreq = fmhz;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
esp_get_bursts(struct esp * esp,struct platform_device * dma_of)171*4882a593Smuzhiyun static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct device_node *dma_dp = dma_of->dev.of_node;
174*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
175*4882a593Smuzhiyun 	struct device_node *dp;
176*4882a593Smuzhiyun 	u8 bursts, val;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	dp = op->dev.of_node;
179*4882a593Smuzhiyun 	bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
180*4882a593Smuzhiyun 	val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
181*4882a593Smuzhiyun 	if (val != 0xff)
182*4882a593Smuzhiyun 		bursts &= val;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	val = of_getintprop_default(dma_dp->parent, "burst-sizes", 0xff);
185*4882a593Smuzhiyun 	if (val != 0xff)
186*4882a593Smuzhiyun 		bursts &= val;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	if (bursts == 0xff ||
189*4882a593Smuzhiyun 	    (bursts & DMA_BURST16) == 0 ||
190*4882a593Smuzhiyun 	    (bursts & DMA_BURST32) == 0)
191*4882a593Smuzhiyun 		bursts = (DMA_BURST32 - 1);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	esp->bursts = bursts;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
esp_sbus_get_props(struct esp * esp,struct platform_device * espdma)196*4882a593Smuzhiyun static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	esp_get_scsi_id(esp, espdma);
199*4882a593Smuzhiyun 	esp_get_differential(esp);
200*4882a593Smuzhiyun 	esp_get_clock_params(esp);
201*4882a593Smuzhiyun 	esp_get_bursts(esp, espdma);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
sbus_esp_write8(struct esp * esp,u8 val,unsigned long reg)204*4882a593Smuzhiyun static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	sbus_writeb(val, esp->regs + (reg * 4UL));
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
sbus_esp_read8(struct esp * esp,unsigned long reg)209*4882a593Smuzhiyun static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun 	return sbus_readb(esp->regs + (reg * 4UL));
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
sbus_esp_irq_pending(struct esp * esp)214*4882a593Smuzhiyun static int sbus_esp_irq_pending(struct esp *esp)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
217*4882a593Smuzhiyun 		return 1;
218*4882a593Smuzhiyun 	return 0;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
sbus_esp_reset_dma(struct esp * esp)221*4882a593Smuzhiyun static void sbus_esp_reset_dma(struct esp *esp)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	int can_do_burst16, can_do_burst32, can_do_burst64;
224*4882a593Smuzhiyun 	int can_do_sbus64, lim;
225*4882a593Smuzhiyun 	struct platform_device *op = to_platform_device(esp->dev);
226*4882a593Smuzhiyun 	u32 val;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
229*4882a593Smuzhiyun 	can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
230*4882a593Smuzhiyun 	can_do_burst64 = 0;
231*4882a593Smuzhiyun 	can_do_sbus64 = 0;
232*4882a593Smuzhiyun 	if (sbus_can_dma_64bit())
233*4882a593Smuzhiyun 		can_do_sbus64 = 1;
234*4882a593Smuzhiyun 	if (sbus_can_burst64())
235*4882a593Smuzhiyun 		can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	/* Put the DVMA into a known state. */
238*4882a593Smuzhiyun 	if (esp->dmarev != dvmahme) {
239*4882a593Smuzhiyun 		val = dma_read32(DMA_CSR);
240*4882a593Smuzhiyun 		dma_write32(val | DMA_RST_SCSI, DMA_CSR);
241*4882a593Smuzhiyun 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 	switch (esp->dmarev) {
244*4882a593Smuzhiyun 	case dvmahme:
245*4882a593Smuzhiyun 		dma_write32(DMA_RESET_FAS366, DMA_CSR);
246*4882a593Smuzhiyun 		dma_write32(DMA_RST_SCSI, DMA_CSR);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
249*4882a593Smuzhiyun 					DMA_SCSI_DISAB | DMA_INT_ENAB);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
252*4882a593Smuzhiyun 					  DMA_BRST_SZ);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 		if (can_do_burst64)
255*4882a593Smuzhiyun 			esp->prev_hme_dmacsr |= DMA_BRST64;
256*4882a593Smuzhiyun 		else if (can_do_burst32)
257*4882a593Smuzhiyun 			esp->prev_hme_dmacsr |= DMA_BRST32;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		if (can_do_sbus64) {
260*4882a593Smuzhiyun 			esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
261*4882a593Smuzhiyun 			sbus_set_sbus64(&op->dev, esp->bursts);
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 		lim = 1000;
265*4882a593Smuzhiyun 		while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
266*4882a593Smuzhiyun 			if (--lim == 0) {
267*4882a593Smuzhiyun 				printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
268*4882a593Smuzhiyun 				       "will not clear!\n",
269*4882a593Smuzhiyun 				       esp->host->unique_id);
270*4882a593Smuzhiyun 				break;
271*4882a593Smuzhiyun 			}
272*4882a593Smuzhiyun 			udelay(1);
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		dma_write32(0, DMA_CSR);
276*4882a593Smuzhiyun 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 		dma_write32(0, DMA_ADDR);
279*4882a593Smuzhiyun 		break;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	case dvmarev2:
282*4882a593Smuzhiyun 		if (esp->rev != ESP100) {
283*4882a593Smuzhiyun 			val = dma_read32(DMA_CSR);
284*4882a593Smuzhiyun 			dma_write32(val | DMA_3CLKS, DMA_CSR);
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	case dvmarev3:
289*4882a593Smuzhiyun 		val = dma_read32(DMA_CSR);
290*4882a593Smuzhiyun 		val &= ~DMA_3CLKS;
291*4882a593Smuzhiyun 		val |= DMA_2CLKS;
292*4882a593Smuzhiyun 		if (can_do_burst32) {
293*4882a593Smuzhiyun 			val &= ~DMA_BRST_SZ;
294*4882a593Smuzhiyun 			val |= DMA_BRST32;
295*4882a593Smuzhiyun 		}
296*4882a593Smuzhiyun 		dma_write32(val, DMA_CSR);
297*4882a593Smuzhiyun 		break;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	case dvmaesc1:
300*4882a593Smuzhiyun 		val = dma_read32(DMA_CSR);
301*4882a593Smuzhiyun 		val |= DMA_ADD_ENABLE;
302*4882a593Smuzhiyun 		val &= ~DMA_BCNT_ENAB;
303*4882a593Smuzhiyun 		if (!can_do_burst32 && can_do_burst16) {
304*4882a593Smuzhiyun 			val |= DMA_ESC_BURST;
305*4882a593Smuzhiyun 		} else {
306*4882a593Smuzhiyun 			val &= ~(DMA_ESC_BURST);
307*4882a593Smuzhiyun 		}
308*4882a593Smuzhiyun 		dma_write32(val, DMA_CSR);
309*4882a593Smuzhiyun 		break;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	default:
312*4882a593Smuzhiyun 		break;
313*4882a593Smuzhiyun 	}
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	/* Enable interrupts.  */
316*4882a593Smuzhiyun 	val = dma_read32(DMA_CSR);
317*4882a593Smuzhiyun 	dma_write32(val | DMA_INT_ENAB, DMA_CSR);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
sbus_esp_dma_drain(struct esp * esp)320*4882a593Smuzhiyun static void sbus_esp_dma_drain(struct esp *esp)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	u32 csr;
323*4882a593Smuzhiyun 	int lim;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (esp->dmarev == dvmahme)
326*4882a593Smuzhiyun 		return;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	csr = dma_read32(DMA_CSR);
329*4882a593Smuzhiyun 	if (!(csr & DMA_FIFO_ISDRAIN))
330*4882a593Smuzhiyun 		return;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
333*4882a593Smuzhiyun 		dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	lim = 1000;
336*4882a593Smuzhiyun 	while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
337*4882a593Smuzhiyun 		if (--lim == 0) {
338*4882a593Smuzhiyun 			printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
339*4882a593Smuzhiyun 			       esp->host->unique_id);
340*4882a593Smuzhiyun 			break;
341*4882a593Smuzhiyun 		}
342*4882a593Smuzhiyun 		udelay(1);
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
sbus_esp_dma_invalidate(struct esp * esp)346*4882a593Smuzhiyun static void sbus_esp_dma_invalidate(struct esp *esp)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	if (esp->dmarev == dvmahme) {
349*4882a593Smuzhiyun 		dma_write32(DMA_RST_SCSI, DMA_CSR);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
352*4882a593Smuzhiyun 					 (DMA_PARITY_OFF | DMA_2CLKS |
353*4882a593Smuzhiyun 					  DMA_SCSI_DISAB | DMA_INT_ENAB)) &
354*4882a593Smuzhiyun 					~(DMA_ST_WRITE | DMA_ENABLE));
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		dma_write32(0, DMA_CSR);
357*4882a593Smuzhiyun 		dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		/* This is necessary to avoid having the SCSI channel
360*4882a593Smuzhiyun 		 * engine lock up on us.
361*4882a593Smuzhiyun 		 */
362*4882a593Smuzhiyun 		dma_write32(0, DMA_ADDR);
363*4882a593Smuzhiyun 	} else {
364*4882a593Smuzhiyun 		u32 val;
365*4882a593Smuzhiyun 		int lim;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 		lim = 1000;
368*4882a593Smuzhiyun 		while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
369*4882a593Smuzhiyun 			if (--lim == 0) {
370*4882a593Smuzhiyun 				printk(KERN_ALERT PFX "esp%d: DMA will not "
371*4882a593Smuzhiyun 				       "invalidate!\n", esp->host->unique_id);
372*4882a593Smuzhiyun 				break;
373*4882a593Smuzhiyun 			}
374*4882a593Smuzhiyun 			udelay(1);
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 		val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
378*4882a593Smuzhiyun 		val |= DMA_FIFO_INV;
379*4882a593Smuzhiyun 		dma_write32(val, DMA_CSR);
380*4882a593Smuzhiyun 		val &= ~DMA_FIFO_INV;
381*4882a593Smuzhiyun 		dma_write32(val, DMA_CSR);
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
sbus_esp_send_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)385*4882a593Smuzhiyun static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
386*4882a593Smuzhiyun 				  u32 dma_count, int write, u8 cmd)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	u32 csr;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	BUG_ON(!(cmd & ESP_CMD_DMA));
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
393*4882a593Smuzhiyun 	sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
394*4882a593Smuzhiyun 	if (esp->rev == FASHME) {
395*4882a593Smuzhiyun 		sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
396*4882a593Smuzhiyun 		sbus_esp_write8(esp, 0, FAS_RHI);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		scsi_esp_cmd(esp, cmd);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		csr = esp->prev_hme_dmacsr;
401*4882a593Smuzhiyun 		csr |= DMA_SCSI_DISAB | DMA_ENABLE;
402*4882a593Smuzhiyun 		if (write)
403*4882a593Smuzhiyun 			csr |= DMA_ST_WRITE;
404*4882a593Smuzhiyun 		else
405*4882a593Smuzhiyun 			csr &= ~DMA_ST_WRITE;
406*4882a593Smuzhiyun 		esp->prev_hme_dmacsr = csr;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		dma_write32(dma_count, DMA_COUNT);
409*4882a593Smuzhiyun 		dma_write32(addr, DMA_ADDR);
410*4882a593Smuzhiyun 		dma_write32(csr, DMA_CSR);
411*4882a593Smuzhiyun 	} else {
412*4882a593Smuzhiyun 		csr = dma_read32(DMA_CSR);
413*4882a593Smuzhiyun 		csr |= DMA_ENABLE;
414*4882a593Smuzhiyun 		if (write)
415*4882a593Smuzhiyun 			csr |= DMA_ST_WRITE;
416*4882a593Smuzhiyun 		else
417*4882a593Smuzhiyun 			csr &= ~DMA_ST_WRITE;
418*4882a593Smuzhiyun 		dma_write32(csr, DMA_CSR);
419*4882a593Smuzhiyun 		if (esp->dmarev == dvmaesc1) {
420*4882a593Smuzhiyun 			u32 end = PAGE_ALIGN(addr + dma_count + 16U);
421*4882a593Smuzhiyun 			dma_write32(end - addr, DMA_COUNT);
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 		dma_write32(addr, DMA_ADDR);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		scsi_esp_cmd(esp, cmd);
426*4882a593Smuzhiyun 	}
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
sbus_esp_dma_error(struct esp * esp)430*4882a593Smuzhiyun static int sbus_esp_dma_error(struct esp *esp)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	u32 csr = dma_read32(DMA_CSR);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (csr & DMA_HNDL_ERROR)
435*4882a593Smuzhiyun 		return 1;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	return 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun static const struct esp_driver_ops sbus_esp_ops = {
441*4882a593Smuzhiyun 	.esp_write8	=	sbus_esp_write8,
442*4882a593Smuzhiyun 	.esp_read8	=	sbus_esp_read8,
443*4882a593Smuzhiyun 	.irq_pending	=	sbus_esp_irq_pending,
444*4882a593Smuzhiyun 	.reset_dma	=	sbus_esp_reset_dma,
445*4882a593Smuzhiyun 	.dma_drain	=	sbus_esp_dma_drain,
446*4882a593Smuzhiyun 	.dma_invalidate	=	sbus_esp_dma_invalidate,
447*4882a593Smuzhiyun 	.send_dma_cmd	=	sbus_esp_send_dma_cmd,
448*4882a593Smuzhiyun 	.dma_error	=	sbus_esp_dma_error,
449*4882a593Smuzhiyun };
450*4882a593Smuzhiyun 
esp_sbus_probe_one(struct platform_device * op,struct platform_device * espdma,int hme)451*4882a593Smuzhiyun static int esp_sbus_probe_one(struct platform_device *op,
452*4882a593Smuzhiyun 			      struct platform_device *espdma, int hme)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun 	struct scsi_host_template *tpnt = &scsi_esp_template;
455*4882a593Smuzhiyun 	struct Scsi_Host *host;
456*4882a593Smuzhiyun 	struct esp *esp;
457*4882a593Smuzhiyun 	int err;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	host = scsi_host_alloc(tpnt, sizeof(struct esp));
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	err = -ENOMEM;
462*4882a593Smuzhiyun 	if (!host)
463*4882a593Smuzhiyun 		goto fail;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	host->max_id = (hme ? 16 : 8);
466*4882a593Smuzhiyun 	esp = shost_priv(host);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	esp->host = host;
469*4882a593Smuzhiyun 	esp->dev = &op->dev;
470*4882a593Smuzhiyun 	esp->ops = &sbus_esp_ops;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (hme)
473*4882a593Smuzhiyun 		esp->flags |= ESP_FLAG_WIDE_CAPABLE;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	err = esp_sbus_setup_dma(esp, espdma);
476*4882a593Smuzhiyun 	if (err < 0)
477*4882a593Smuzhiyun 		goto fail_unlink;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	err = esp_sbus_map_regs(esp, hme);
480*4882a593Smuzhiyun 	if (err < 0)
481*4882a593Smuzhiyun 		goto fail_unlink;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	err = esp_sbus_map_command_block(esp);
484*4882a593Smuzhiyun 	if (err < 0)
485*4882a593Smuzhiyun 		goto fail_unmap_regs;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	err = esp_sbus_register_irq(esp);
488*4882a593Smuzhiyun 	if (err < 0)
489*4882a593Smuzhiyun 		goto fail_unmap_command_block;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	esp_sbus_get_props(esp, espdma);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* Before we try to touch the ESP chip, ESC1 dma can
494*4882a593Smuzhiyun 	 * come up with the reset bit set, so make sure that
495*4882a593Smuzhiyun 	 * is clear first.
496*4882a593Smuzhiyun 	 */
497*4882a593Smuzhiyun 	if (esp->dmarev == dvmaesc1) {
498*4882a593Smuzhiyun 		u32 val = dma_read32(DMA_CSR);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 		dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	dev_set_drvdata(&op->dev, esp);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	err = scsi_esp_register(esp);
506*4882a593Smuzhiyun 	if (err)
507*4882a593Smuzhiyun 		goto fail_free_irq;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	return 0;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun fail_free_irq:
512*4882a593Smuzhiyun 	free_irq(host->irq, esp);
513*4882a593Smuzhiyun fail_unmap_command_block:
514*4882a593Smuzhiyun 	dma_free_coherent(&op->dev, 16,
515*4882a593Smuzhiyun 			  esp->command_block,
516*4882a593Smuzhiyun 			  esp->command_block_dma);
517*4882a593Smuzhiyun fail_unmap_regs:
518*4882a593Smuzhiyun 	of_iounmap(&op->resource[(hme ? 1 : 0)], esp->regs, SBUS_ESP_REG_SIZE);
519*4882a593Smuzhiyun fail_unlink:
520*4882a593Smuzhiyun 	scsi_host_put(host);
521*4882a593Smuzhiyun fail:
522*4882a593Smuzhiyun 	return err;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
esp_sbus_probe(struct platform_device * op)525*4882a593Smuzhiyun static int esp_sbus_probe(struct platform_device *op)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct device_node *dma_node = NULL;
528*4882a593Smuzhiyun 	struct device_node *dp = op->dev.of_node;
529*4882a593Smuzhiyun 	struct platform_device *dma_of = NULL;
530*4882a593Smuzhiyun 	int hme = 0;
531*4882a593Smuzhiyun 	int ret;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (of_node_name_eq(dp->parent, "espdma") ||
534*4882a593Smuzhiyun 	    of_node_name_eq(dp->parent, "dma"))
535*4882a593Smuzhiyun 		dma_node = dp->parent;
536*4882a593Smuzhiyun 	else if (of_node_name_eq(dp, "SUNW,fas")) {
537*4882a593Smuzhiyun 		dma_node = op->dev.of_node;
538*4882a593Smuzhiyun 		hme = 1;
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 	if (dma_node)
541*4882a593Smuzhiyun 		dma_of = of_find_device_by_node(dma_node);
542*4882a593Smuzhiyun 	if (!dma_of)
543*4882a593Smuzhiyun 		return -ENODEV;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	ret = esp_sbus_probe_one(op, dma_of, hme);
546*4882a593Smuzhiyun 	if (ret)
547*4882a593Smuzhiyun 		put_device(&dma_of->dev);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	return ret;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun 
esp_sbus_remove(struct platform_device * op)552*4882a593Smuzhiyun static int esp_sbus_remove(struct platform_device *op)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	struct esp *esp = dev_get_drvdata(&op->dev);
555*4882a593Smuzhiyun 	struct platform_device *dma_of = esp->dma;
556*4882a593Smuzhiyun 	unsigned int irq = esp->host->irq;
557*4882a593Smuzhiyun 	bool is_hme;
558*4882a593Smuzhiyun 	u32 val;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	scsi_esp_unregister(esp);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* Disable interrupts.  */
563*4882a593Smuzhiyun 	val = dma_read32(DMA_CSR);
564*4882a593Smuzhiyun 	dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	free_irq(irq, esp);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	is_hme = (esp->dmarev == dvmahme);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	dma_free_coherent(&op->dev, 16,
571*4882a593Smuzhiyun 			  esp->command_block,
572*4882a593Smuzhiyun 			  esp->command_block_dma);
573*4882a593Smuzhiyun 	of_iounmap(&op->resource[(is_hme ? 1 : 0)], esp->regs,
574*4882a593Smuzhiyun 		   SBUS_ESP_REG_SIZE);
575*4882a593Smuzhiyun 	of_iounmap(&dma_of->resource[0], esp->dma_regs,
576*4882a593Smuzhiyun 		   resource_size(&dma_of->resource[0]));
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	scsi_host_put(esp->host);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	dev_set_drvdata(&op->dev, NULL);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	put_device(&dma_of->dev);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	return 0;
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun static const struct of_device_id esp_match[] = {
588*4882a593Smuzhiyun 	{
589*4882a593Smuzhiyun 		.name = "SUNW,esp",
590*4882a593Smuzhiyun 	},
591*4882a593Smuzhiyun 	{
592*4882a593Smuzhiyun 		.name = "SUNW,fas",
593*4882a593Smuzhiyun 	},
594*4882a593Smuzhiyun 	{
595*4882a593Smuzhiyun 		.name = "esp",
596*4882a593Smuzhiyun 	},
597*4882a593Smuzhiyun 	{},
598*4882a593Smuzhiyun };
599*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, esp_match);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun static struct platform_driver esp_sbus_driver = {
602*4882a593Smuzhiyun 	.driver = {
603*4882a593Smuzhiyun 		.name = "esp",
604*4882a593Smuzhiyun 		.of_match_table = esp_match,
605*4882a593Smuzhiyun 	},
606*4882a593Smuzhiyun 	.probe		= esp_sbus_probe,
607*4882a593Smuzhiyun 	.remove		= esp_sbus_remove,
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun module_platform_driver(esp_sbus_driver);
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun MODULE_DESCRIPTION("Sun ESP SCSI driver");
612*4882a593Smuzhiyun MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
613*4882a593Smuzhiyun MODULE_LICENSE("GPL");
614*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
615