1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * ESP front-end for Amiga ZORRO SCSI systems.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 1996 Jesper Skov (jskov@cygnus.co.uk)
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2011,2018 Michael Schmitz (schmitz@debian.org) for
8*4882a593Smuzhiyun * migration to ESP SCSI core
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
11*4882a593Smuzhiyun * Blizzard 1230 DMA and probe function fixes
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun * ZORRO bus code from:
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
18*4882a593Smuzhiyun * Amiga MacroSystemUS WarpEngine SCSI controller.
19*4882a593Smuzhiyun * Amiga Technologies/DKB A4091 SCSI controller.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
22*4882a593Smuzhiyun * plus modifications of the 53c7xx.c driver to support the Amiga.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * Rewritten to use 53c700.c by Kars de Jong <jongk@linux-m68k.org>
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <linux/module.h>
30*4882a593Smuzhiyun #include <linux/init.h>
31*4882a593Smuzhiyun #include <linux/interrupt.h>
32*4882a593Smuzhiyun #include <linux/dma-mapping.h>
33*4882a593Smuzhiyun #include <linux/scatterlist.h>
34*4882a593Smuzhiyun #include <linux/delay.h>
35*4882a593Smuzhiyun #include <linux/zorro.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/pgtable.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include <asm/page.h>
40*4882a593Smuzhiyun #include <asm/cacheflush.h>
41*4882a593Smuzhiyun #include <asm/amigahw.h>
42*4882a593Smuzhiyun #include <asm/amigaints.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <scsi/scsi_host.h>
45*4882a593Smuzhiyun #include <scsi/scsi_transport_spi.h>
46*4882a593Smuzhiyun #include <scsi/scsi_device.h>
47*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include "esp_scsi.h"
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun MODULE_AUTHOR("Michael Schmitz <schmitz@debian.org>");
52*4882a593Smuzhiyun MODULE_DESCRIPTION("Amiga Zorro NCR5C9x (ESP) driver");
53*4882a593Smuzhiyun MODULE_LICENSE("GPL");
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* per-board register layout definitions */
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Blizzard 1230 DMA interface */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct blz1230_dma_registers {
60*4882a593Smuzhiyun unsigned char dma_addr; /* DMA address [0x0000] */
61*4882a593Smuzhiyun unsigned char dmapad2[0x7fff];
62*4882a593Smuzhiyun unsigned char dma_latch; /* DMA latch [0x8000] */
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Blizzard 1230II DMA interface */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun struct blz1230II_dma_registers {
68*4882a593Smuzhiyun unsigned char dma_addr; /* DMA address [0x0000] */
69*4882a593Smuzhiyun unsigned char dmapad2[0xf];
70*4882a593Smuzhiyun unsigned char dma_latch; /* DMA latch [0x0010] */
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Blizzard 2060 DMA interface */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun struct blz2060_dma_registers {
76*4882a593Smuzhiyun unsigned char dma_led_ctrl; /* DMA led control [0x000] */
77*4882a593Smuzhiyun unsigned char dmapad1[0x0f];
78*4882a593Smuzhiyun unsigned char dma_addr0; /* DMA address (MSB) [0x010] */
79*4882a593Smuzhiyun unsigned char dmapad2[0x03];
80*4882a593Smuzhiyun unsigned char dma_addr1; /* DMA address [0x014] */
81*4882a593Smuzhiyun unsigned char dmapad3[0x03];
82*4882a593Smuzhiyun unsigned char dma_addr2; /* DMA address [0x018] */
83*4882a593Smuzhiyun unsigned char dmapad4[0x03];
84*4882a593Smuzhiyun unsigned char dma_addr3; /* DMA address (LSB) [0x01c] */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* DMA control bits */
88*4882a593Smuzhiyun #define DMA_WRITE 0x80000000
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Cyberstorm DMA interface */
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun struct cyber_dma_registers {
93*4882a593Smuzhiyun unsigned char dma_addr0; /* DMA address (MSB) [0x000] */
94*4882a593Smuzhiyun unsigned char dmapad1[1];
95*4882a593Smuzhiyun unsigned char dma_addr1; /* DMA address [0x002] */
96*4882a593Smuzhiyun unsigned char dmapad2[1];
97*4882a593Smuzhiyun unsigned char dma_addr2; /* DMA address [0x004] */
98*4882a593Smuzhiyun unsigned char dmapad3[1];
99*4882a593Smuzhiyun unsigned char dma_addr3; /* DMA address (LSB) [0x006] */
100*4882a593Smuzhiyun unsigned char dmapad4[0x3fb];
101*4882a593Smuzhiyun unsigned char cond_reg; /* DMA cond (ro) [0x402] */
102*4882a593Smuzhiyun #define ctrl_reg cond_reg /* DMA control (wo) [0x402] */
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* DMA control bits */
106*4882a593Smuzhiyun #define CYBER_DMA_WRITE 0x40 /* DMA direction. 1 = write */
107*4882a593Smuzhiyun #define CYBER_DMA_Z3 0x20 /* 16 (Z2) or 32 (CHIP/Z3) bit DMA transfer */
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* DMA status bits */
110*4882a593Smuzhiyun #define CYBER_DMA_HNDL_INTR 0x80 /* DMA IRQ pending? */
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* The CyberStorm II DMA interface */
113*4882a593Smuzhiyun struct cyberII_dma_registers {
114*4882a593Smuzhiyun unsigned char cond_reg; /* DMA cond (ro) [0x000] */
115*4882a593Smuzhiyun #define ctrl_reg cond_reg /* DMA control (wo) [0x000] */
116*4882a593Smuzhiyun unsigned char dmapad4[0x3f];
117*4882a593Smuzhiyun unsigned char dma_addr0; /* DMA address (MSB) [0x040] */
118*4882a593Smuzhiyun unsigned char dmapad1[3];
119*4882a593Smuzhiyun unsigned char dma_addr1; /* DMA address [0x044] */
120*4882a593Smuzhiyun unsigned char dmapad2[3];
121*4882a593Smuzhiyun unsigned char dma_addr2; /* DMA address [0x048] */
122*4882a593Smuzhiyun unsigned char dmapad3[3];
123*4882a593Smuzhiyun unsigned char dma_addr3; /* DMA address (LSB) [0x04c] */
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Fastlane DMA interface */
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun struct fastlane_dma_registers {
129*4882a593Smuzhiyun unsigned char cond_reg; /* DMA status (ro) [0x0000] */
130*4882a593Smuzhiyun #define ctrl_reg cond_reg /* DMA control (wo) [0x0000] */
131*4882a593Smuzhiyun char dmapad1[0x3f];
132*4882a593Smuzhiyun unsigned char clear_strobe; /* DMA clear (wo) [0x0040] */
133*4882a593Smuzhiyun };
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * The controller registers can be found in the Z2 config area at these
137*4882a593Smuzhiyun * offsets:
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun #define FASTLANE_ESP_ADDR 0x1000001
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* DMA status bits */
142*4882a593Smuzhiyun #define FASTLANE_DMA_MINT 0x80
143*4882a593Smuzhiyun #define FASTLANE_DMA_IACT 0x40
144*4882a593Smuzhiyun #define FASTLANE_DMA_CREQ 0x20
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* DMA control bits */
147*4882a593Smuzhiyun #define FASTLANE_DMA_FCODE 0xa0
148*4882a593Smuzhiyun #define FASTLANE_DMA_MASK 0xf3
149*4882a593Smuzhiyun #define FASTLANE_DMA_WRITE 0x08 /* 1 = write */
150*4882a593Smuzhiyun #define FASTLANE_DMA_ENABLE 0x04 /* Enable DMA */
151*4882a593Smuzhiyun #define FASTLANE_DMA_EDI 0x02 /* Enable DMA IRQ ? */
152*4882a593Smuzhiyun #define FASTLANE_DMA_ESI 0x01 /* Enable SCSI IRQ */
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * private data used for driver
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun struct zorro_esp_priv {
158*4882a593Smuzhiyun struct esp *esp; /* our ESP instance - for Scsi_host* */
159*4882a593Smuzhiyun void __iomem *board_base; /* virtual address (Zorro III board) */
160*4882a593Smuzhiyun int zorro3; /* board is Zorro III */
161*4882a593Smuzhiyun unsigned char ctrl_data; /* shadow copy of ctrl_reg */
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * On all implementations except for the Oktagon, padding between ESP
166*4882a593Smuzhiyun * registers is three bytes.
167*4882a593Smuzhiyun * On Oktagon, it is one byte - use a different accessor there.
168*4882a593Smuzhiyun *
169*4882a593Smuzhiyun * Oktagon needs PDMA - currently unsupported!
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun
zorro_esp_write8(struct esp * esp,u8 val,unsigned long reg)172*4882a593Smuzhiyun static void zorro_esp_write8(struct esp *esp, u8 val, unsigned long reg)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun writeb(val, esp->regs + (reg * 4UL));
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
zorro_esp_read8(struct esp * esp,unsigned long reg)177*4882a593Smuzhiyun static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun return readb(esp->regs + (reg * 4UL));
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
zorro_esp_irq_pending(struct esp * esp)182*4882a593Smuzhiyun static int zorro_esp_irq_pending(struct esp *esp)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun /* check ESP status register; DMA has no status reg. */
185*4882a593Smuzhiyun if (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
186*4882a593Smuzhiyun return 1;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
cyber_esp_irq_pending(struct esp * esp)191*4882a593Smuzhiyun static int cyber_esp_irq_pending(struct esp *esp)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
194*4882a593Smuzhiyun unsigned char dma_status = readb(&dregs->cond_reg);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* It's important to check the DMA IRQ bit in the correct way! */
197*4882a593Smuzhiyun return ((zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR) &&
198*4882a593Smuzhiyun (dma_status & CYBER_DMA_HNDL_INTR));
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
fastlane_esp_irq_pending(struct esp * esp)201*4882a593Smuzhiyun static int fastlane_esp_irq_pending(struct esp *esp)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
204*4882a593Smuzhiyun unsigned char dma_status;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun dma_status = readb(&dregs->cond_reg);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun if (dma_status & FASTLANE_DMA_IACT)
209*4882a593Smuzhiyun return 0; /* not our IRQ */
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* Return non-zero if ESP requested IRQ */
212*4882a593Smuzhiyun return (
213*4882a593Smuzhiyun (dma_status & FASTLANE_DMA_CREQ) &&
214*4882a593Smuzhiyun (!(dma_status & FASTLANE_DMA_MINT)) &&
215*4882a593Smuzhiyun (zorro_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR));
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
zorro_esp_dma_length_limit(struct esp * esp,u32 dma_addr,u32 dma_len)218*4882a593Smuzhiyun static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
219*4882a593Smuzhiyun u32 dma_len)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun return dma_len > (1U << 16) ? (1U << 16) : dma_len;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
fastlane_esp_dma_length_limit(struct esp * esp,u32 dma_addr,u32 dma_len)224*4882a593Smuzhiyun static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
225*4882a593Smuzhiyun u32 dma_len)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun /* The old driver used 0xfffc as limit, so do that here too */
228*4882a593Smuzhiyun return dma_len > 0xfffc ? 0xfffc : dma_len;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
zorro_esp_reset_dma(struct esp * esp)231*4882a593Smuzhiyun static void zorro_esp_reset_dma(struct esp *esp)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun /* nothing to do here */
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
zorro_esp_dma_drain(struct esp * esp)236*4882a593Smuzhiyun static void zorro_esp_dma_drain(struct esp *esp)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun /* nothing to do here */
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
zorro_esp_dma_invalidate(struct esp * esp)241*4882a593Smuzhiyun static void zorro_esp_dma_invalidate(struct esp *esp)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun /* nothing to do here */
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
fastlane_esp_dma_invalidate(struct esp * esp)246*4882a593Smuzhiyun static void fastlane_esp_dma_invalidate(struct esp *esp)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
249*4882a593Smuzhiyun struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
250*4882a593Smuzhiyun unsigned char *ctrl_data = &zep->ctrl_data;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK);
253*4882a593Smuzhiyun writeb(0, &dregs->clear_strobe);
254*4882a593Smuzhiyun z_writel(0, zep->board_base);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /* Blizzard 1230/60 SCSI-IV DMA */
258*4882a593Smuzhiyun
zorro_esp_send_blz1230_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)259*4882a593Smuzhiyun static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
260*4882a593Smuzhiyun u32 esp_count, u32 dma_count, int write, u8 cmd)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
263*4882a593Smuzhiyun u8 phase = esp->sreg & ESP_STAT_PMASK;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * Use PIO if transferring message bytes to esp->command_block_dma.
267*4882a593Smuzhiyun * PIO requires a virtual address, so substitute esp->command_block
268*4882a593Smuzhiyun * for addr.
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun if (phase == ESP_MIP && addr == esp->command_block_dma) {
271*4882a593Smuzhiyun esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
272*4882a593Smuzhiyun dma_count, write, cmd);
273*4882a593Smuzhiyun return;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
277*4882a593Smuzhiyun esp->send_cmd_error = 0;
278*4882a593Smuzhiyun esp->send_cmd_residual = 0;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (write)
281*4882a593Smuzhiyun /* DMA receive */
282*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
283*4882a593Smuzhiyun DMA_FROM_DEVICE);
284*4882a593Smuzhiyun else
285*4882a593Smuzhiyun /* DMA send */
286*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
287*4882a593Smuzhiyun DMA_TO_DEVICE);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun addr >>= 1;
290*4882a593Smuzhiyun if (write)
291*4882a593Smuzhiyun addr &= ~(DMA_WRITE);
292*4882a593Smuzhiyun else
293*4882a593Smuzhiyun addr |= DMA_WRITE;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun writeb((addr >> 24) & 0xff, &dregs->dma_latch);
296*4882a593Smuzhiyun writeb((addr >> 24) & 0xff, &dregs->dma_addr);
297*4882a593Smuzhiyun writeb((addr >> 16) & 0xff, &dregs->dma_addr);
298*4882a593Smuzhiyun writeb((addr >> 8) & 0xff, &dregs->dma_addr);
299*4882a593Smuzhiyun writeb(addr & 0xff, &dregs->dma_addr);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun scsi_esp_cmd(esp, ESP_CMD_DMA);
302*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
303*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun scsi_esp_cmd(esp, cmd);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* Blizzard 1230-II DMA */
309*4882a593Smuzhiyun
zorro_esp_send_blz1230II_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)310*4882a593Smuzhiyun static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
311*4882a593Smuzhiyun u32 esp_count, u32 dma_count, int write, u8 cmd)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
314*4882a593Smuzhiyun u8 phase = esp->sreg & ESP_STAT_PMASK;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Use PIO if transferring message bytes to esp->command_block_dma */
317*4882a593Smuzhiyun if (phase == ESP_MIP && addr == esp->command_block_dma) {
318*4882a593Smuzhiyun esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
319*4882a593Smuzhiyun dma_count, write, cmd);
320*4882a593Smuzhiyun return;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun esp->send_cmd_error = 0;
324*4882a593Smuzhiyun esp->send_cmd_residual = 0;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (write)
327*4882a593Smuzhiyun /* DMA receive */
328*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
329*4882a593Smuzhiyun DMA_FROM_DEVICE);
330*4882a593Smuzhiyun else
331*4882a593Smuzhiyun /* DMA send */
332*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
333*4882a593Smuzhiyun DMA_TO_DEVICE);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun addr >>= 1;
336*4882a593Smuzhiyun if (write)
337*4882a593Smuzhiyun addr &= ~(DMA_WRITE);
338*4882a593Smuzhiyun else
339*4882a593Smuzhiyun addr |= DMA_WRITE;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun writeb((addr >> 24) & 0xff, &dregs->dma_latch);
342*4882a593Smuzhiyun writeb((addr >> 16) & 0xff, &dregs->dma_addr);
343*4882a593Smuzhiyun writeb((addr >> 8) & 0xff, &dregs->dma_addr);
344*4882a593Smuzhiyun writeb(addr & 0xff, &dregs->dma_addr);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun scsi_esp_cmd(esp, ESP_CMD_DMA);
347*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
348*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun scsi_esp_cmd(esp, cmd);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Blizzard 2060 DMA */
354*4882a593Smuzhiyun
zorro_esp_send_blz2060_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)355*4882a593Smuzhiyun static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
356*4882a593Smuzhiyun u32 esp_count, u32 dma_count, int write, u8 cmd)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
359*4882a593Smuzhiyun u8 phase = esp->sreg & ESP_STAT_PMASK;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* Use PIO if transferring message bytes to esp->command_block_dma */
362*4882a593Smuzhiyun if (phase == ESP_MIP && addr == esp->command_block_dma) {
363*4882a593Smuzhiyun esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
364*4882a593Smuzhiyun dma_count, write, cmd);
365*4882a593Smuzhiyun return;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun esp->send_cmd_error = 0;
369*4882a593Smuzhiyun esp->send_cmd_residual = 0;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (write)
372*4882a593Smuzhiyun /* DMA receive */
373*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
374*4882a593Smuzhiyun DMA_FROM_DEVICE);
375*4882a593Smuzhiyun else
376*4882a593Smuzhiyun /* DMA send */
377*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
378*4882a593Smuzhiyun DMA_TO_DEVICE);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun addr >>= 1;
381*4882a593Smuzhiyun if (write)
382*4882a593Smuzhiyun addr &= ~(DMA_WRITE);
383*4882a593Smuzhiyun else
384*4882a593Smuzhiyun addr |= DMA_WRITE;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun writeb(addr & 0xff, &dregs->dma_addr3);
387*4882a593Smuzhiyun writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
388*4882a593Smuzhiyun writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
389*4882a593Smuzhiyun writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun scsi_esp_cmd(esp, ESP_CMD_DMA);
392*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
393*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun scsi_esp_cmd(esp, cmd);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* Cyberstorm I DMA */
399*4882a593Smuzhiyun
zorro_esp_send_cyber_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)400*4882a593Smuzhiyun static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
401*4882a593Smuzhiyun u32 esp_count, u32 dma_count, int write, u8 cmd)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
404*4882a593Smuzhiyun struct cyber_dma_registers __iomem *dregs = esp->dma_regs;
405*4882a593Smuzhiyun u8 phase = esp->sreg & ESP_STAT_PMASK;
406*4882a593Smuzhiyun unsigned char *ctrl_data = &zep->ctrl_data;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* Use PIO if transferring message bytes to esp->command_block_dma */
409*4882a593Smuzhiyun if (phase == ESP_MIP && addr == esp->command_block_dma) {
410*4882a593Smuzhiyun esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
411*4882a593Smuzhiyun dma_count, write, cmd);
412*4882a593Smuzhiyun return;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun esp->send_cmd_error = 0;
416*4882a593Smuzhiyun esp->send_cmd_residual = 0;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
419*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (write) {
422*4882a593Smuzhiyun /* DMA receive */
423*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
424*4882a593Smuzhiyun DMA_FROM_DEVICE);
425*4882a593Smuzhiyun addr &= ~(1);
426*4882a593Smuzhiyun } else {
427*4882a593Smuzhiyun /* DMA send */
428*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
429*4882a593Smuzhiyun DMA_TO_DEVICE);
430*4882a593Smuzhiyun addr |= 1;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
434*4882a593Smuzhiyun writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
435*4882a593Smuzhiyun writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
436*4882a593Smuzhiyun writeb(addr & 0xff, &dregs->dma_addr3);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun if (write)
439*4882a593Smuzhiyun *ctrl_data &= ~(CYBER_DMA_WRITE);
440*4882a593Smuzhiyun else
441*4882a593Smuzhiyun *ctrl_data |= CYBER_DMA_WRITE;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun *ctrl_data &= ~(CYBER_DMA_Z3); /* Z2, do 16 bit DMA */
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun writeb(*ctrl_data, &dregs->ctrl_reg);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun scsi_esp_cmd(esp, cmd);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Cyberstorm II DMA */
451*4882a593Smuzhiyun
zorro_esp_send_cyberII_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)452*4882a593Smuzhiyun static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
453*4882a593Smuzhiyun u32 esp_count, u32 dma_count, int write, u8 cmd)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
456*4882a593Smuzhiyun u8 phase = esp->sreg & ESP_STAT_PMASK;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Use PIO if transferring message bytes to esp->command_block_dma */
459*4882a593Smuzhiyun if (phase == ESP_MIP && addr == esp->command_block_dma) {
460*4882a593Smuzhiyun esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
461*4882a593Smuzhiyun dma_count, write, cmd);
462*4882a593Smuzhiyun return;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun esp->send_cmd_error = 0;
466*4882a593Smuzhiyun esp->send_cmd_residual = 0;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
469*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (write) {
472*4882a593Smuzhiyun /* DMA receive */
473*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
474*4882a593Smuzhiyun DMA_FROM_DEVICE);
475*4882a593Smuzhiyun addr &= ~(1);
476*4882a593Smuzhiyun } else {
477*4882a593Smuzhiyun /* DMA send */
478*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
479*4882a593Smuzhiyun DMA_TO_DEVICE);
480*4882a593Smuzhiyun addr |= 1;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun writeb((addr >> 24) & 0xff, &dregs->dma_addr0);
484*4882a593Smuzhiyun writeb((addr >> 16) & 0xff, &dregs->dma_addr1);
485*4882a593Smuzhiyun writeb((addr >> 8) & 0xff, &dregs->dma_addr2);
486*4882a593Smuzhiyun writeb(addr & 0xff, &dregs->dma_addr3);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun scsi_esp_cmd(esp, cmd);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /* Fastlane DMA */
492*4882a593Smuzhiyun
zorro_esp_send_fastlane_dma_cmd(struct esp * esp,u32 addr,u32 esp_count,u32 dma_count,int write,u8 cmd)493*4882a593Smuzhiyun static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
494*4882a593Smuzhiyun u32 esp_count, u32 dma_count, int write, u8 cmd)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
497*4882a593Smuzhiyun struct fastlane_dma_registers __iomem *dregs = esp->dma_regs;
498*4882a593Smuzhiyun u8 phase = esp->sreg & ESP_STAT_PMASK;
499*4882a593Smuzhiyun unsigned char *ctrl_data = &zep->ctrl_data;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Use PIO if transferring message bytes to esp->command_block_dma */
502*4882a593Smuzhiyun if (phase == ESP_MIP && addr == esp->command_block_dma) {
503*4882a593Smuzhiyun esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
504*4882a593Smuzhiyun dma_count, write, cmd);
505*4882a593Smuzhiyun return;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun esp->send_cmd_error = 0;
509*4882a593Smuzhiyun esp->send_cmd_residual = 0;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
512*4882a593Smuzhiyun zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (write) {
515*4882a593Smuzhiyun /* DMA receive */
516*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
517*4882a593Smuzhiyun DMA_FROM_DEVICE);
518*4882a593Smuzhiyun addr &= ~(1);
519*4882a593Smuzhiyun } else {
520*4882a593Smuzhiyun /* DMA send */
521*4882a593Smuzhiyun dma_sync_single_for_device(esp->dev, addr, esp_count,
522*4882a593Smuzhiyun DMA_TO_DEVICE);
523*4882a593Smuzhiyun addr |= 1;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun writeb(0, &dregs->clear_strobe);
527*4882a593Smuzhiyun z_writel(addr, ((addr & 0x00ffffff) + zep->board_base));
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun if (write) {
530*4882a593Smuzhiyun *ctrl_data = (*ctrl_data & FASTLANE_DMA_MASK) |
531*4882a593Smuzhiyun FASTLANE_DMA_ENABLE;
532*4882a593Smuzhiyun } else {
533*4882a593Smuzhiyun *ctrl_data = ((*ctrl_data & FASTLANE_DMA_MASK) |
534*4882a593Smuzhiyun FASTLANE_DMA_ENABLE |
535*4882a593Smuzhiyun FASTLANE_DMA_WRITE);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun writeb(*ctrl_data, &dregs->ctrl_reg);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun scsi_esp_cmd(esp, cmd);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
zorro_esp_dma_error(struct esp * esp)543*4882a593Smuzhiyun static int zorro_esp_dma_error(struct esp *esp)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun return esp->send_cmd_error;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* per-board ESP driver ops */
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun static const struct esp_driver_ops blz1230_esp_ops = {
551*4882a593Smuzhiyun .esp_write8 = zorro_esp_write8,
552*4882a593Smuzhiyun .esp_read8 = zorro_esp_read8,
553*4882a593Smuzhiyun .irq_pending = zorro_esp_irq_pending,
554*4882a593Smuzhiyun .dma_length_limit = zorro_esp_dma_length_limit,
555*4882a593Smuzhiyun .reset_dma = zorro_esp_reset_dma,
556*4882a593Smuzhiyun .dma_drain = zorro_esp_dma_drain,
557*4882a593Smuzhiyun .dma_invalidate = zorro_esp_dma_invalidate,
558*4882a593Smuzhiyun .send_dma_cmd = zorro_esp_send_blz1230_dma_cmd,
559*4882a593Smuzhiyun .dma_error = zorro_esp_dma_error,
560*4882a593Smuzhiyun };
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun static const struct esp_driver_ops blz1230II_esp_ops = {
563*4882a593Smuzhiyun .esp_write8 = zorro_esp_write8,
564*4882a593Smuzhiyun .esp_read8 = zorro_esp_read8,
565*4882a593Smuzhiyun .irq_pending = zorro_esp_irq_pending,
566*4882a593Smuzhiyun .dma_length_limit = zorro_esp_dma_length_limit,
567*4882a593Smuzhiyun .reset_dma = zorro_esp_reset_dma,
568*4882a593Smuzhiyun .dma_drain = zorro_esp_dma_drain,
569*4882a593Smuzhiyun .dma_invalidate = zorro_esp_dma_invalidate,
570*4882a593Smuzhiyun .send_dma_cmd = zorro_esp_send_blz1230II_dma_cmd,
571*4882a593Smuzhiyun .dma_error = zorro_esp_dma_error,
572*4882a593Smuzhiyun };
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun static const struct esp_driver_ops blz2060_esp_ops = {
575*4882a593Smuzhiyun .esp_write8 = zorro_esp_write8,
576*4882a593Smuzhiyun .esp_read8 = zorro_esp_read8,
577*4882a593Smuzhiyun .irq_pending = zorro_esp_irq_pending,
578*4882a593Smuzhiyun .dma_length_limit = zorro_esp_dma_length_limit,
579*4882a593Smuzhiyun .reset_dma = zorro_esp_reset_dma,
580*4882a593Smuzhiyun .dma_drain = zorro_esp_dma_drain,
581*4882a593Smuzhiyun .dma_invalidate = zorro_esp_dma_invalidate,
582*4882a593Smuzhiyun .send_dma_cmd = zorro_esp_send_blz2060_dma_cmd,
583*4882a593Smuzhiyun .dma_error = zorro_esp_dma_error,
584*4882a593Smuzhiyun };
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun static const struct esp_driver_ops cyber_esp_ops = {
587*4882a593Smuzhiyun .esp_write8 = zorro_esp_write8,
588*4882a593Smuzhiyun .esp_read8 = zorro_esp_read8,
589*4882a593Smuzhiyun .irq_pending = cyber_esp_irq_pending,
590*4882a593Smuzhiyun .dma_length_limit = zorro_esp_dma_length_limit,
591*4882a593Smuzhiyun .reset_dma = zorro_esp_reset_dma,
592*4882a593Smuzhiyun .dma_drain = zorro_esp_dma_drain,
593*4882a593Smuzhiyun .dma_invalidate = zorro_esp_dma_invalidate,
594*4882a593Smuzhiyun .send_dma_cmd = zorro_esp_send_cyber_dma_cmd,
595*4882a593Smuzhiyun .dma_error = zorro_esp_dma_error,
596*4882a593Smuzhiyun };
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun static const struct esp_driver_ops cyberII_esp_ops = {
599*4882a593Smuzhiyun .esp_write8 = zorro_esp_write8,
600*4882a593Smuzhiyun .esp_read8 = zorro_esp_read8,
601*4882a593Smuzhiyun .irq_pending = zorro_esp_irq_pending,
602*4882a593Smuzhiyun .dma_length_limit = zorro_esp_dma_length_limit,
603*4882a593Smuzhiyun .reset_dma = zorro_esp_reset_dma,
604*4882a593Smuzhiyun .dma_drain = zorro_esp_dma_drain,
605*4882a593Smuzhiyun .dma_invalidate = zorro_esp_dma_invalidate,
606*4882a593Smuzhiyun .send_dma_cmd = zorro_esp_send_cyberII_dma_cmd,
607*4882a593Smuzhiyun .dma_error = zorro_esp_dma_error,
608*4882a593Smuzhiyun };
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun static const struct esp_driver_ops fastlane_esp_ops = {
611*4882a593Smuzhiyun .esp_write8 = zorro_esp_write8,
612*4882a593Smuzhiyun .esp_read8 = zorro_esp_read8,
613*4882a593Smuzhiyun .irq_pending = fastlane_esp_irq_pending,
614*4882a593Smuzhiyun .dma_length_limit = fastlane_esp_dma_length_limit,
615*4882a593Smuzhiyun .reset_dma = zorro_esp_reset_dma,
616*4882a593Smuzhiyun .dma_drain = zorro_esp_dma_drain,
617*4882a593Smuzhiyun .dma_invalidate = fastlane_esp_dma_invalidate,
618*4882a593Smuzhiyun .send_dma_cmd = zorro_esp_send_fastlane_dma_cmd,
619*4882a593Smuzhiyun .dma_error = zorro_esp_dma_error,
620*4882a593Smuzhiyun };
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Zorro driver config data */
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun struct zorro_driver_data {
625*4882a593Smuzhiyun const char *name;
626*4882a593Smuzhiyun unsigned long offset;
627*4882a593Smuzhiyun unsigned long dma_offset;
628*4882a593Smuzhiyun int absolute; /* offset is absolute address */
629*4882a593Smuzhiyun int scsi_option;
630*4882a593Smuzhiyun const struct esp_driver_ops *esp_ops;
631*4882a593Smuzhiyun };
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* board types */
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun enum {
636*4882a593Smuzhiyun ZORRO_BLZ1230,
637*4882a593Smuzhiyun ZORRO_BLZ1230II,
638*4882a593Smuzhiyun ZORRO_BLZ2060,
639*4882a593Smuzhiyun ZORRO_CYBER,
640*4882a593Smuzhiyun ZORRO_CYBERII,
641*4882a593Smuzhiyun ZORRO_FASTLANE,
642*4882a593Smuzhiyun };
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /* per-board config data */
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun static const struct zorro_driver_data zorro_esp_boards[] = {
647*4882a593Smuzhiyun [ZORRO_BLZ1230] = {
648*4882a593Smuzhiyun .name = "Blizzard 1230",
649*4882a593Smuzhiyun .offset = 0x8000,
650*4882a593Smuzhiyun .dma_offset = 0x10000,
651*4882a593Smuzhiyun .scsi_option = 1,
652*4882a593Smuzhiyun .esp_ops = &blz1230_esp_ops,
653*4882a593Smuzhiyun },
654*4882a593Smuzhiyun [ZORRO_BLZ1230II] = {
655*4882a593Smuzhiyun .name = "Blizzard 1230II",
656*4882a593Smuzhiyun .offset = 0x10000,
657*4882a593Smuzhiyun .dma_offset = 0x10021,
658*4882a593Smuzhiyun .scsi_option = 1,
659*4882a593Smuzhiyun .esp_ops = &blz1230II_esp_ops,
660*4882a593Smuzhiyun },
661*4882a593Smuzhiyun [ZORRO_BLZ2060] = {
662*4882a593Smuzhiyun .name = "Blizzard 2060",
663*4882a593Smuzhiyun .offset = 0x1ff00,
664*4882a593Smuzhiyun .dma_offset = 0x1ffe0,
665*4882a593Smuzhiyun .esp_ops = &blz2060_esp_ops,
666*4882a593Smuzhiyun },
667*4882a593Smuzhiyun [ZORRO_CYBER] = {
668*4882a593Smuzhiyun .name = "CyberStormI",
669*4882a593Smuzhiyun .offset = 0xf400,
670*4882a593Smuzhiyun .dma_offset = 0xf800,
671*4882a593Smuzhiyun .esp_ops = &cyber_esp_ops,
672*4882a593Smuzhiyun },
673*4882a593Smuzhiyun [ZORRO_CYBERII] = {
674*4882a593Smuzhiyun .name = "CyberStormII",
675*4882a593Smuzhiyun .offset = 0x1ff03,
676*4882a593Smuzhiyun .dma_offset = 0x1ff43,
677*4882a593Smuzhiyun .scsi_option = 1,
678*4882a593Smuzhiyun .esp_ops = &cyberII_esp_ops,
679*4882a593Smuzhiyun },
680*4882a593Smuzhiyun [ZORRO_FASTLANE] = {
681*4882a593Smuzhiyun .name = "Fastlane",
682*4882a593Smuzhiyun .offset = 0x1000001,
683*4882a593Smuzhiyun .dma_offset = 0x1000041,
684*4882a593Smuzhiyun .esp_ops = &fastlane_esp_ops,
685*4882a593Smuzhiyun },
686*4882a593Smuzhiyun };
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun static const struct zorro_device_id zorro_esp_zorro_tbl[] = {
689*4882a593Smuzhiyun { /* Blizzard 1230 IV */
690*4882a593Smuzhiyun .id = ZORRO_ID(PHASE5, 0x11, 0),
691*4882a593Smuzhiyun .driver_data = ZORRO_BLZ1230,
692*4882a593Smuzhiyun },
693*4882a593Smuzhiyun { /* Blizzard 1230 II (Zorro II) or Fastlane (Zorro III) */
694*4882a593Smuzhiyun .id = ZORRO_ID(PHASE5, 0x0B, 0),
695*4882a593Smuzhiyun .driver_data = ZORRO_BLZ1230II,
696*4882a593Smuzhiyun },
697*4882a593Smuzhiyun { /* Blizzard 2060 */
698*4882a593Smuzhiyun .id = ZORRO_ID(PHASE5, 0x18, 0),
699*4882a593Smuzhiyun .driver_data = ZORRO_BLZ2060,
700*4882a593Smuzhiyun },
701*4882a593Smuzhiyun { /* Cyberstorm */
702*4882a593Smuzhiyun .id = ZORRO_ID(PHASE5, 0x0C, 0),
703*4882a593Smuzhiyun .driver_data = ZORRO_CYBER,
704*4882a593Smuzhiyun },
705*4882a593Smuzhiyun { /* Cyberstorm II */
706*4882a593Smuzhiyun .id = ZORRO_ID(PHASE5, 0x19, 0),
707*4882a593Smuzhiyun .driver_data = ZORRO_CYBERII,
708*4882a593Smuzhiyun },
709*4882a593Smuzhiyun { 0 }
710*4882a593Smuzhiyun };
711*4882a593Smuzhiyun MODULE_DEVICE_TABLE(zorro, zorro_esp_zorro_tbl);
712*4882a593Smuzhiyun
zorro_esp_probe(struct zorro_dev * z,const struct zorro_device_id * ent)713*4882a593Smuzhiyun static int zorro_esp_probe(struct zorro_dev *z,
714*4882a593Smuzhiyun const struct zorro_device_id *ent)
715*4882a593Smuzhiyun {
716*4882a593Smuzhiyun struct scsi_host_template *tpnt = &scsi_esp_template;
717*4882a593Smuzhiyun struct Scsi_Host *host;
718*4882a593Smuzhiyun struct esp *esp;
719*4882a593Smuzhiyun const struct zorro_driver_data *zdd;
720*4882a593Smuzhiyun struct zorro_esp_priv *zep;
721*4882a593Smuzhiyun unsigned long board, ioaddr, dmaaddr;
722*4882a593Smuzhiyun int err;
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun board = zorro_resource_start(z);
725*4882a593Smuzhiyun zdd = &zorro_esp_boards[ent->driver_data];
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun pr_info("%s found at address 0x%lx.\n", zdd->name, board);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun zep = kzalloc(sizeof(*zep), GFP_KERNEL);
730*4882a593Smuzhiyun if (!zep) {
731*4882a593Smuzhiyun pr_err("Can't allocate device private data!\n");
732*4882a593Smuzhiyun return -ENOMEM;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun /* let's figure out whether we have a Zorro II or Zorro III board */
736*4882a593Smuzhiyun if ((z->rom.er_Type & ERT_TYPEMASK) == ERT_ZORROIII) {
737*4882a593Smuzhiyun if (board > 0xffffff)
738*4882a593Smuzhiyun zep->zorro3 = 1;
739*4882a593Smuzhiyun } else {
740*4882a593Smuzhiyun /*
741*4882a593Smuzhiyun * Even though most of these boards identify as Zorro II,
742*4882a593Smuzhiyun * they are in fact CPU expansion slot boards and have full
743*4882a593Smuzhiyun * access to all of memory. Fix up DMA bitmask here.
744*4882a593Smuzhiyun */
745*4882a593Smuzhiyun z->dev.coherent_dma_mask = DMA_BIT_MASK(32);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /*
749*4882a593Smuzhiyun * If Zorro III and ID matches Fastlane, our device table entry
750*4882a593Smuzhiyun * contains data for the Blizzard 1230 II board which does share the
751*4882a593Smuzhiyun * same ID. Fix up device table entry here.
752*4882a593Smuzhiyun * TODO: Some Cyberstom060 boards also share this ID but would need
753*4882a593Smuzhiyun * to use the Cyberstorm I driver data ... we catch this by checking
754*4882a593Smuzhiyun * for presence of ESP chip later, but don't try to fix up yet.
755*4882a593Smuzhiyun */
756*4882a593Smuzhiyun if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
757*4882a593Smuzhiyun pr_info("%s at address 0x%lx is Fastlane Z3, fixing data!\n",
758*4882a593Smuzhiyun zdd->name, board);
759*4882a593Smuzhiyun zdd = &zorro_esp_boards[ZORRO_FASTLANE];
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (zdd->absolute) {
763*4882a593Smuzhiyun ioaddr = zdd->offset;
764*4882a593Smuzhiyun dmaaddr = zdd->dma_offset;
765*4882a593Smuzhiyun } else {
766*4882a593Smuzhiyun ioaddr = board + zdd->offset;
767*4882a593Smuzhiyun dmaaddr = board + zdd->dma_offset;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun if (!zorro_request_device(z, zdd->name)) {
771*4882a593Smuzhiyun pr_err("cannot reserve region 0x%lx, abort\n",
772*4882a593Smuzhiyun board);
773*4882a593Smuzhiyun err = -EBUSY;
774*4882a593Smuzhiyun goto fail_free_zep;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun host = scsi_host_alloc(tpnt, sizeof(struct esp));
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (!host) {
780*4882a593Smuzhiyun pr_err("No host detected; board configuration problem?\n");
781*4882a593Smuzhiyun err = -ENOMEM;
782*4882a593Smuzhiyun goto fail_release_device;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun host->base = ioaddr;
786*4882a593Smuzhiyun host->this_id = 7;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun esp = shost_priv(host);
789*4882a593Smuzhiyun esp->host = host;
790*4882a593Smuzhiyun esp->dev = &z->dev;
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun esp->scsi_id = host->this_id;
793*4882a593Smuzhiyun esp->scsi_id_mask = (1 << esp->scsi_id);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun esp->cfreq = 40000000;
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun zep->esp = esp;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun dev_set_drvdata(esp->dev, zep);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /* additional setup required for Fastlane */
802*4882a593Smuzhiyun if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) {
803*4882a593Smuzhiyun /* map full address space up to ESP base for DMA */
804*4882a593Smuzhiyun zep->board_base = ioremap(board, FASTLANE_ESP_ADDR - 1);
805*4882a593Smuzhiyun if (!zep->board_base) {
806*4882a593Smuzhiyun pr_err("Cannot allocate board address space\n");
807*4882a593Smuzhiyun err = -ENOMEM;
808*4882a593Smuzhiyun goto fail_free_host;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun /* initialize DMA control shadow register */
811*4882a593Smuzhiyun zep->ctrl_data = (FASTLANE_DMA_FCODE |
812*4882a593Smuzhiyun FASTLANE_DMA_EDI | FASTLANE_DMA_ESI);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun esp->ops = zdd->esp_ops;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun if (ioaddr > 0xffffff)
818*4882a593Smuzhiyun esp->regs = ioremap(ioaddr, 0x20);
819*4882a593Smuzhiyun else
820*4882a593Smuzhiyun /* ZorroII address space remapped nocache by early startup */
821*4882a593Smuzhiyun esp->regs = ZTWO_VADDR(ioaddr);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun if (!esp->regs) {
824*4882a593Smuzhiyun err = -ENOMEM;
825*4882a593Smuzhiyun goto fail_unmap_fastlane;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun esp->fifo_reg = esp->regs + ESP_FDATA * 4;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
831*4882a593Smuzhiyun if (zdd->scsi_option) {
832*4882a593Smuzhiyun zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
833*4882a593Smuzhiyun if (zorro_esp_read8(esp, ESP_CFG1) != (ESP_CONFIG1_PENABLE|7)) {
834*4882a593Smuzhiyun err = -ENODEV;
835*4882a593Smuzhiyun goto fail_unmap_regs;
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (zep->zorro3) {
840*4882a593Smuzhiyun /*
841*4882a593Smuzhiyun * Only Fastlane Z3 for now - add switch for correct struct
842*4882a593Smuzhiyun * dma_registers size if adding any more
843*4882a593Smuzhiyun */
844*4882a593Smuzhiyun esp->dma_regs = ioremap(dmaaddr,
845*4882a593Smuzhiyun sizeof(struct fastlane_dma_registers));
846*4882a593Smuzhiyun } else
847*4882a593Smuzhiyun /* ZorroII address space remapped nocache by early startup */
848*4882a593Smuzhiyun esp->dma_regs = ZTWO_VADDR(dmaaddr);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun if (!esp->dma_regs) {
851*4882a593Smuzhiyun err = -ENOMEM;
852*4882a593Smuzhiyun goto fail_unmap_regs;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun esp->command_block = dma_alloc_coherent(esp->dev, 16,
856*4882a593Smuzhiyun &esp->command_block_dma,
857*4882a593Smuzhiyun GFP_KERNEL);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (!esp->command_block) {
860*4882a593Smuzhiyun err = -ENOMEM;
861*4882a593Smuzhiyun goto fail_unmap_dma_regs;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun host->irq = IRQ_AMIGA_PORTS;
865*4882a593Smuzhiyun err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
866*4882a593Smuzhiyun "Amiga Zorro ESP", esp);
867*4882a593Smuzhiyun if (err < 0) {
868*4882a593Smuzhiyun err = -ENODEV;
869*4882a593Smuzhiyun goto fail_free_command_block;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun /* register the chip */
873*4882a593Smuzhiyun err = scsi_esp_register(esp);
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (err) {
876*4882a593Smuzhiyun err = -ENOMEM;
877*4882a593Smuzhiyun goto fail_free_irq;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun return 0;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun fail_free_irq:
883*4882a593Smuzhiyun free_irq(host->irq, esp);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun fail_free_command_block:
886*4882a593Smuzhiyun dma_free_coherent(esp->dev, 16,
887*4882a593Smuzhiyun esp->command_block,
888*4882a593Smuzhiyun esp->command_block_dma);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun fail_unmap_dma_regs:
891*4882a593Smuzhiyun if (zep->zorro3)
892*4882a593Smuzhiyun iounmap(esp->dma_regs);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun fail_unmap_regs:
895*4882a593Smuzhiyun if (ioaddr > 0xffffff)
896*4882a593Smuzhiyun iounmap(esp->regs);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun fail_unmap_fastlane:
899*4882a593Smuzhiyun if (zep->zorro3)
900*4882a593Smuzhiyun iounmap(zep->board_base);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun fail_free_host:
903*4882a593Smuzhiyun scsi_host_put(host);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun fail_release_device:
906*4882a593Smuzhiyun zorro_release_device(z);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun fail_free_zep:
909*4882a593Smuzhiyun kfree(zep);
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun return err;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
zorro_esp_remove(struct zorro_dev * z)914*4882a593Smuzhiyun static void zorro_esp_remove(struct zorro_dev *z)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun struct zorro_esp_priv *zep = dev_get_drvdata(&z->dev);
917*4882a593Smuzhiyun struct esp *esp = zep->esp;
918*4882a593Smuzhiyun struct Scsi_Host *host = esp->host;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun scsi_esp_unregister(esp);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun free_irq(host->irq, esp);
923*4882a593Smuzhiyun dma_free_coherent(esp->dev, 16,
924*4882a593Smuzhiyun esp->command_block,
925*4882a593Smuzhiyun esp->command_block_dma);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun if (zep->zorro3) {
928*4882a593Smuzhiyun iounmap(zep->board_base);
929*4882a593Smuzhiyun iounmap(esp->dma_regs);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (host->base > 0xffffff)
933*4882a593Smuzhiyun iounmap(esp->regs);
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun scsi_host_put(host);
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun zorro_release_device(z);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun kfree(zep);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun static struct zorro_driver zorro_esp_driver = {
943*4882a593Smuzhiyun .name = KBUILD_MODNAME,
944*4882a593Smuzhiyun .id_table = zorro_esp_zorro_tbl,
945*4882a593Smuzhiyun .probe = zorro_esp_probe,
946*4882a593Smuzhiyun .remove = zorro_esp_remove,
947*4882a593Smuzhiyun };
948*4882a593Smuzhiyun
zorro_esp_scsi_init(void)949*4882a593Smuzhiyun static int __init zorro_esp_scsi_init(void)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun return zorro_register_driver(&zorro_esp_driver);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
zorro_esp_scsi_exit(void)954*4882a593Smuzhiyun static void __exit zorro_esp_scsi_exit(void)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun zorro_unregister_driver(&zorro_esp_driver);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun module_init(zorro_esp_scsi_init);
960*4882a593Smuzhiyun module_exit(zorro_esp_scsi_exit);
961