1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 1996-2004 Russell King.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Please note that this platform does not support 32-bit IDE IO.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/string.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/ioport.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/blkdev.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/ide.h>
15*4882a593Smuzhiyun #include <linux/dma-mapping.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/scatterlist.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/dma.h>
22*4882a593Smuzhiyun #include <asm/ecard.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define DRV_NAME "icside"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define ICS_IDENT_OFFSET 0x2280
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define ICS_ARCIN_V5_INTRSTAT 0x0000
29*4882a593Smuzhiyun #define ICS_ARCIN_V5_INTROFFSET 0x0004
30*4882a593Smuzhiyun #define ICS_ARCIN_V5_IDEOFFSET 0x2800
31*4882a593Smuzhiyun #define ICS_ARCIN_V5_IDEALTOFFSET 0x2b80
32*4882a593Smuzhiyun #define ICS_ARCIN_V5_IDESTEPPING 6
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define ICS_ARCIN_V6_IDEOFFSET_1 0x2000
35*4882a593Smuzhiyun #define ICS_ARCIN_V6_INTROFFSET_1 0x2200
36*4882a593Smuzhiyun #define ICS_ARCIN_V6_INTRSTAT_1 0x2290
37*4882a593Smuzhiyun #define ICS_ARCIN_V6_IDEALTOFFSET_1 0x2380
38*4882a593Smuzhiyun #define ICS_ARCIN_V6_IDEOFFSET_2 0x3000
39*4882a593Smuzhiyun #define ICS_ARCIN_V6_INTROFFSET_2 0x3200
40*4882a593Smuzhiyun #define ICS_ARCIN_V6_INTRSTAT_2 0x3290
41*4882a593Smuzhiyun #define ICS_ARCIN_V6_IDEALTOFFSET_2 0x3380
42*4882a593Smuzhiyun #define ICS_ARCIN_V6_IDESTEPPING 6
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct cardinfo {
45*4882a593Smuzhiyun unsigned int dataoffset;
46*4882a593Smuzhiyun unsigned int ctrloffset;
47*4882a593Smuzhiyun unsigned int stepping;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun static struct cardinfo icside_cardinfo_v5 = {
51*4882a593Smuzhiyun .dataoffset = ICS_ARCIN_V5_IDEOFFSET,
52*4882a593Smuzhiyun .ctrloffset = ICS_ARCIN_V5_IDEALTOFFSET,
53*4882a593Smuzhiyun .stepping = ICS_ARCIN_V5_IDESTEPPING,
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static struct cardinfo icside_cardinfo_v6_1 = {
57*4882a593Smuzhiyun .dataoffset = ICS_ARCIN_V6_IDEOFFSET_1,
58*4882a593Smuzhiyun .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_1,
59*4882a593Smuzhiyun .stepping = ICS_ARCIN_V6_IDESTEPPING,
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static struct cardinfo icside_cardinfo_v6_2 = {
63*4882a593Smuzhiyun .dataoffset = ICS_ARCIN_V6_IDEOFFSET_2,
64*4882a593Smuzhiyun .ctrloffset = ICS_ARCIN_V6_IDEALTOFFSET_2,
65*4882a593Smuzhiyun .stepping = ICS_ARCIN_V6_IDESTEPPING,
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct icside_state {
69*4882a593Smuzhiyun unsigned int channel;
70*4882a593Smuzhiyun unsigned int enabled;
71*4882a593Smuzhiyun void __iomem *irq_port;
72*4882a593Smuzhiyun void __iomem *ioc_base;
73*4882a593Smuzhiyun unsigned int sel;
74*4882a593Smuzhiyun unsigned int type;
75*4882a593Smuzhiyun struct ide_host *host;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun #define ICS_TYPE_A3IN 0
79*4882a593Smuzhiyun #define ICS_TYPE_A3USER 1
80*4882a593Smuzhiyun #define ICS_TYPE_V6 3
81*4882a593Smuzhiyun #define ICS_TYPE_V5 15
82*4882a593Smuzhiyun #define ICS_TYPE_NOTYPE ((unsigned int)-1)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* ---------------- Version 5 PCB Support Functions --------------------- */
85*4882a593Smuzhiyun /* Prototype: icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
86*4882a593Smuzhiyun * Purpose : enable interrupts from card
87*4882a593Smuzhiyun */
icside_irqenable_arcin_v5(struct expansion_card * ec,int irqnr)88*4882a593Smuzhiyun static void icside_irqenable_arcin_v5 (struct expansion_card *ec, int irqnr)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct icside_state *state = ec->irq_data;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun writeb(0, state->irq_port + ICS_ARCIN_V5_INTROFFSET);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Prototype: icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
96*4882a593Smuzhiyun * Purpose : disable interrupts from card
97*4882a593Smuzhiyun */
icside_irqdisable_arcin_v5(struct expansion_card * ec,int irqnr)98*4882a593Smuzhiyun static void icside_irqdisable_arcin_v5 (struct expansion_card *ec, int irqnr)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct icside_state *state = ec->irq_data;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V5_INTROFFSET);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun static const expansioncard_ops_t icside_ops_arcin_v5 = {
106*4882a593Smuzhiyun .irqenable = icside_irqenable_arcin_v5,
107*4882a593Smuzhiyun .irqdisable = icside_irqdisable_arcin_v5,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /* ---------------- Version 6 PCB Support Functions --------------------- */
112*4882a593Smuzhiyun /* Prototype: icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
113*4882a593Smuzhiyun * Purpose : enable interrupts from card
114*4882a593Smuzhiyun */
icside_irqenable_arcin_v6(struct expansion_card * ec,int irqnr)115*4882a593Smuzhiyun static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct icside_state *state = ec->irq_data;
118*4882a593Smuzhiyun void __iomem *base = state->irq_port;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun state->enabled = 1;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun switch (state->channel) {
123*4882a593Smuzhiyun case 0:
124*4882a593Smuzhiyun writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
125*4882a593Smuzhiyun readb(base + ICS_ARCIN_V6_INTROFFSET_2);
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun case 1:
128*4882a593Smuzhiyun writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
129*4882a593Smuzhiyun readb(base + ICS_ARCIN_V6_INTROFFSET_1);
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
135*4882a593Smuzhiyun * Purpose : disable interrupts from card
136*4882a593Smuzhiyun */
icside_irqdisable_arcin_v6(struct expansion_card * ec,int irqnr)137*4882a593Smuzhiyun static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct icside_state *state = ec->irq_data;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun state->enabled = 0;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
144*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* Prototype: icside_irqprobe(struct expansion_card *ec)
148*4882a593Smuzhiyun * Purpose : detect an active interrupt from card
149*4882a593Smuzhiyun */
icside_irqpending_arcin_v6(struct expansion_card * ec)150*4882a593Smuzhiyun static int icside_irqpending_arcin_v6(struct expansion_card *ec)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct icside_state *state = ec->irq_data;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_1) & 1 ||
155*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTRSTAT_2) & 1;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun static const expansioncard_ops_t icside_ops_arcin_v6 = {
159*4882a593Smuzhiyun .irqenable = icside_irqenable_arcin_v6,
160*4882a593Smuzhiyun .irqdisable = icside_irqdisable_arcin_v6,
161*4882a593Smuzhiyun .irqpending = icside_irqpending_arcin_v6,
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Handle routing of interrupts. This is called before
166*4882a593Smuzhiyun * we write the command to the drive.
167*4882a593Smuzhiyun */
icside_maskproc(ide_drive_t * drive,int mask)168*4882a593Smuzhiyun static void icside_maskproc(ide_drive_t *drive, int mask)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
171*4882a593Smuzhiyun struct expansion_card *ec = ECARD_DEV(hwif->dev);
172*4882a593Smuzhiyun struct icside_state *state = ecard_get_drvdata(ec);
173*4882a593Smuzhiyun unsigned long flags;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun local_irq_save(flags);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun state->channel = hwif->channel;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (state->enabled && !mask) {
180*4882a593Smuzhiyun switch (hwif->channel) {
181*4882a593Smuzhiyun case 0:
182*4882a593Smuzhiyun writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
183*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
184*4882a593Smuzhiyun break;
185*4882a593Smuzhiyun case 1:
186*4882a593Smuzhiyun writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
187*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
188*4882a593Smuzhiyun break;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun } else {
191*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
192*4882a593Smuzhiyun readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun local_irq_restore(flags);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static const struct ide_port_ops icside_v6_no_dma_port_ops = {
199*4882a593Smuzhiyun .maskproc = icside_maskproc,
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * SG-DMA support.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Similar to the BM-DMA, but we use the RiscPCs IOMD DMA controllers.
207*4882a593Smuzhiyun * There is only one DMA controller per card, which means that only
208*4882a593Smuzhiyun * one drive can be accessed at one time. NOTE! We do not enforce that
209*4882a593Smuzhiyun * here, but we rely on the main IDE driver spotting that both
210*4882a593Smuzhiyun * interfaces use the same IRQ, which should guarantee this.
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * Configure the IOMD to give the appropriate timings for the transfer
215*4882a593Smuzhiyun * mode being requested. We take the advice of the ATA standards, and
216*4882a593Smuzhiyun * calculate the cycle time based on the transfer mode, and the EIDE
217*4882a593Smuzhiyun * MW DMA specs that the drive provides in the IDENTIFY command.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * We have the following IOMD DMA modes to choose from:
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * Type Active Recovery Cycle
222*4882a593Smuzhiyun * A 250 (250) 312 (550) 562 (800)
223*4882a593Smuzhiyun * B 187 250 437
224*4882a593Smuzhiyun * C 125 (125) 125 (375) 250 (500)
225*4882a593Smuzhiyun * D 62 125 187
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * (figures in brackets are actual measured timings)
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * However, we also need to take care of the read/write active and
230*4882a593Smuzhiyun * recovery timings:
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Read Write
233*4882a593Smuzhiyun * Mode Active -- Recovery -- Cycle IOMD type
234*4882a593Smuzhiyun * MW0 215 50 215 480 A
235*4882a593Smuzhiyun * MW1 80 50 50 150 C
236*4882a593Smuzhiyun * MW2 70 25 25 120 C
237*4882a593Smuzhiyun */
icside_set_dma_mode(ide_hwif_t * hwif,ide_drive_t * drive)238*4882a593Smuzhiyun static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun unsigned long cycle_time = 0;
241*4882a593Smuzhiyun int use_dma_info = 0;
242*4882a593Smuzhiyun const u8 xfer_mode = drive->dma_mode;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun switch (xfer_mode) {
245*4882a593Smuzhiyun case XFER_MW_DMA_2:
246*4882a593Smuzhiyun cycle_time = 250;
247*4882a593Smuzhiyun use_dma_info = 1;
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun case XFER_MW_DMA_1:
251*4882a593Smuzhiyun cycle_time = 250;
252*4882a593Smuzhiyun use_dma_info = 1;
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun case XFER_MW_DMA_0:
256*4882a593Smuzhiyun cycle_time = 480;
257*4882a593Smuzhiyun break;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun case XFER_SW_DMA_2:
260*4882a593Smuzhiyun case XFER_SW_DMA_1:
261*4882a593Smuzhiyun case XFER_SW_DMA_0:
262*4882a593Smuzhiyun cycle_time = 480;
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun * If we're going to be doing MW_DMA_1 or MW_DMA_2, we should
268*4882a593Smuzhiyun * take care to note the values in the ID...
269*4882a593Smuzhiyun */
270*4882a593Smuzhiyun if (use_dma_info && drive->id[ATA_ID_EIDE_DMA_TIME] > cycle_time)
271*4882a593Smuzhiyun cycle_time = drive->id[ATA_ID_EIDE_DMA_TIME];
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun ide_set_drivedata(drive, (void *)cycle_time);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
276*4882a593Smuzhiyun drive->name, ide_xfer_verbose(xfer_mode),
277*4882a593Smuzhiyun 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun static const struct ide_port_ops icside_v6_port_ops = {
281*4882a593Smuzhiyun .set_dma_mode = icside_set_dma_mode,
282*4882a593Smuzhiyun .maskproc = icside_maskproc,
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun
icside_dma_host_set(ide_drive_t * drive,int on)285*4882a593Smuzhiyun static void icside_dma_host_set(ide_drive_t *drive, int on)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
icside_dma_end(ide_drive_t * drive)289*4882a593Smuzhiyun static int icside_dma_end(ide_drive_t *drive)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
292*4882a593Smuzhiyun struct expansion_card *ec = ECARD_DEV(hwif->dev);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun disable_dma(ec->dma);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return get_dma_residue(ec->dma) != 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
icside_dma_start(ide_drive_t * drive)299*4882a593Smuzhiyun static void icside_dma_start(ide_drive_t *drive)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
302*4882a593Smuzhiyun struct expansion_card *ec = ECARD_DEV(hwif->dev);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* We can not enable DMA on both channels simultaneously. */
305*4882a593Smuzhiyun BUG_ON(dma_channel_active(ec->dma));
306*4882a593Smuzhiyun enable_dma(ec->dma);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
icside_dma_setup(ide_drive_t * drive,struct ide_cmd * cmd)309*4882a593Smuzhiyun static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
312*4882a593Smuzhiyun struct expansion_card *ec = ECARD_DEV(hwif->dev);
313*4882a593Smuzhiyun struct icside_state *state = ecard_get_drvdata(ec);
314*4882a593Smuzhiyun unsigned int dma_mode;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (cmd->tf_flags & IDE_TFLAG_WRITE)
317*4882a593Smuzhiyun dma_mode = DMA_MODE_WRITE;
318*4882a593Smuzhiyun else
319*4882a593Smuzhiyun dma_mode = DMA_MODE_READ;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun * We can not enable DMA on both channels.
323*4882a593Smuzhiyun */
324*4882a593Smuzhiyun BUG_ON(dma_channel_active(ec->dma));
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /*
327*4882a593Smuzhiyun * Ensure that we have the right interrupt routed.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun icside_maskproc(drive, 0);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /*
332*4882a593Smuzhiyun * Route the DMA signals to the correct interface.
333*4882a593Smuzhiyun */
334*4882a593Smuzhiyun writeb(state->sel | hwif->channel, state->ioc_base);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /*
337*4882a593Smuzhiyun * Select the correct timing for this drive.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun set_dma_speed(ec->dma, (unsigned long)ide_get_drivedata(drive));
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /*
342*4882a593Smuzhiyun * Tell the DMA engine about the SG table and
343*4882a593Smuzhiyun * data direction.
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun set_dma_sg(ec->dma, hwif->sg_table, cmd->sg_nents);
346*4882a593Smuzhiyun set_dma_mode(ec->dma, dma_mode);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return 0;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
icside_dma_test_irq(ide_drive_t * drive)351*4882a593Smuzhiyun static int icside_dma_test_irq(ide_drive_t *drive)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun ide_hwif_t *hwif = drive->hwif;
354*4882a593Smuzhiyun struct expansion_card *ec = ECARD_DEV(hwif->dev);
355*4882a593Smuzhiyun struct icside_state *state = ecard_get_drvdata(ec);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun return readb(state->irq_port +
358*4882a593Smuzhiyun (hwif->channel ?
359*4882a593Smuzhiyun ICS_ARCIN_V6_INTRSTAT_2 :
360*4882a593Smuzhiyun ICS_ARCIN_V6_INTRSTAT_1)) & 1;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
icside_dma_init(ide_hwif_t * hwif,const struct ide_port_info * d)363*4882a593Smuzhiyun static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun hwif->dmatable_cpu = NULL;
366*4882a593Smuzhiyun hwif->dmatable_dma = 0;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun static const struct ide_dma_ops icside_v6_dma_ops = {
372*4882a593Smuzhiyun .dma_host_set = icside_dma_host_set,
373*4882a593Smuzhiyun .dma_setup = icside_dma_setup,
374*4882a593Smuzhiyun .dma_start = icside_dma_start,
375*4882a593Smuzhiyun .dma_end = icside_dma_end,
376*4882a593Smuzhiyun .dma_test_irq = icside_dma_test_irq,
377*4882a593Smuzhiyun .dma_lost_irq = ide_dma_lost_irq,
378*4882a593Smuzhiyun };
379*4882a593Smuzhiyun #endif
380*4882a593Smuzhiyun
icside_dma_off_init(ide_hwif_t * hwif,const struct ide_port_info * d)381*4882a593Smuzhiyun static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun return -EOPNOTSUPP;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
icside_setup_ports(struct ide_hw * hw,void __iomem * base,struct cardinfo * info,struct expansion_card * ec)386*4882a593Smuzhiyun static void icside_setup_ports(struct ide_hw *hw, void __iomem *base,
387*4882a593Smuzhiyun struct cardinfo *info, struct expansion_card *ec)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun unsigned long port = (unsigned long)base + info->dataoffset;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun hw->io_ports.data_addr = port;
392*4882a593Smuzhiyun hw->io_ports.error_addr = port + (1 << info->stepping);
393*4882a593Smuzhiyun hw->io_ports.nsect_addr = port + (2 << info->stepping);
394*4882a593Smuzhiyun hw->io_ports.lbal_addr = port + (3 << info->stepping);
395*4882a593Smuzhiyun hw->io_ports.lbam_addr = port + (4 << info->stepping);
396*4882a593Smuzhiyun hw->io_ports.lbah_addr = port + (5 << info->stepping);
397*4882a593Smuzhiyun hw->io_ports.device_addr = port + (6 << info->stepping);
398*4882a593Smuzhiyun hw->io_ports.status_addr = port + (7 << info->stepping);
399*4882a593Smuzhiyun hw->io_ports.ctl_addr = (unsigned long)base + info->ctrloffset;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun hw->irq = ec->irq;
402*4882a593Smuzhiyun hw->dev = &ec->dev;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun static const struct ide_port_info icside_v5_port_info = {
406*4882a593Smuzhiyun .host_flags = IDE_HFLAG_NO_DMA,
407*4882a593Smuzhiyun .chipset = ide_acorn,
408*4882a593Smuzhiyun };
409*4882a593Smuzhiyun
icside_register_v5(struct icside_state * state,struct expansion_card * ec)410*4882a593Smuzhiyun static int icside_register_v5(struct icside_state *state,
411*4882a593Smuzhiyun struct expansion_card *ec)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun void __iomem *base;
414*4882a593Smuzhiyun struct ide_host *host;
415*4882a593Smuzhiyun struct ide_hw hw, *hws[] = { &hw };
416*4882a593Smuzhiyun int ret;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun base = ecardm_iomap(ec, ECARD_RES_MEMC, 0, 0);
419*4882a593Smuzhiyun if (!base)
420*4882a593Smuzhiyun return -ENOMEM;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun state->irq_port = base;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun ec->irqaddr = base + ICS_ARCIN_V5_INTRSTAT;
425*4882a593Smuzhiyun ec->irqmask = 1;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ecard_setirq(ec, &icside_ops_arcin_v5, state);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun /*
430*4882a593Smuzhiyun * Be on the safe side - disable interrupts
431*4882a593Smuzhiyun */
432*4882a593Smuzhiyun icside_irqdisable_arcin_v5(ec, 0);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun icside_setup_ports(&hw, base, &icside_cardinfo_v5, ec);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun host = ide_host_alloc(&icside_v5_port_info, hws, 1);
437*4882a593Smuzhiyun if (host == NULL)
438*4882a593Smuzhiyun return -ENODEV;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun state->host = host;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun ecard_set_drvdata(ec, state);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun ret = ide_host_register(host, &icside_v5_port_info, hws);
445*4882a593Smuzhiyun if (ret)
446*4882a593Smuzhiyun goto err_free;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun err_free:
450*4882a593Smuzhiyun ide_host_free(host);
451*4882a593Smuzhiyun ecard_set_drvdata(ec, NULL);
452*4882a593Smuzhiyun return ret;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun static const struct ide_port_info icside_v6_port_info = {
456*4882a593Smuzhiyun .init_dma = icside_dma_off_init,
457*4882a593Smuzhiyun .port_ops = &icside_v6_no_dma_port_ops,
458*4882a593Smuzhiyun .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
459*4882a593Smuzhiyun .mwdma_mask = ATA_MWDMA2,
460*4882a593Smuzhiyun .swdma_mask = ATA_SWDMA2,
461*4882a593Smuzhiyun .chipset = ide_acorn,
462*4882a593Smuzhiyun };
463*4882a593Smuzhiyun
icside_register_v6(struct icside_state * state,struct expansion_card * ec)464*4882a593Smuzhiyun static int icside_register_v6(struct icside_state *state,
465*4882a593Smuzhiyun struct expansion_card *ec)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun void __iomem *ioc_base, *easi_base;
468*4882a593Smuzhiyun struct ide_host *host;
469*4882a593Smuzhiyun unsigned int sel = 0;
470*4882a593Smuzhiyun int ret;
471*4882a593Smuzhiyun struct ide_hw hw[2], *hws[] = { &hw[0], &hw[1] };
472*4882a593Smuzhiyun struct ide_port_info d = icside_v6_port_info;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun ioc_base = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
475*4882a593Smuzhiyun if (!ioc_base) {
476*4882a593Smuzhiyun ret = -ENOMEM;
477*4882a593Smuzhiyun goto out;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun easi_base = ioc_base;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (ecard_resource_flags(ec, ECARD_RES_EASI)) {
483*4882a593Smuzhiyun easi_base = ecardm_iomap(ec, ECARD_RES_EASI, 0, 0);
484*4882a593Smuzhiyun if (!easi_base) {
485*4882a593Smuzhiyun ret = -ENOMEM;
486*4882a593Smuzhiyun goto out;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * Enable access to the EASI region.
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun sel = 1 << 5;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun writeb(sel, ioc_base);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun ecard_setirq(ec, &icside_ops_arcin_v6, state);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun state->irq_port = easi_base;
500*4882a593Smuzhiyun state->ioc_base = ioc_base;
501*4882a593Smuzhiyun state->sel = sel;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /*
504*4882a593Smuzhiyun * Be on the safe side - disable interrupts
505*4882a593Smuzhiyun */
506*4882a593Smuzhiyun icside_irqdisable_arcin_v6(ec, 0);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun icside_setup_ports(&hw[0], easi_base, &icside_cardinfo_v6_1, ec);
509*4882a593Smuzhiyun icside_setup_ports(&hw[1], easi_base, &icside_cardinfo_v6_2, ec);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun host = ide_host_alloc(&d, hws, 2);
512*4882a593Smuzhiyun if (host == NULL)
513*4882a593Smuzhiyun return -ENODEV;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun state->host = host;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun ecard_set_drvdata(ec, state);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
520*4882a593Smuzhiyun if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
521*4882a593Smuzhiyun d.init_dma = icside_dma_init;
522*4882a593Smuzhiyun d.port_ops = &icside_v6_port_ops;
523*4882a593Smuzhiyun d.dma_ops = &icside_v6_dma_ops;
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun #endif
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun ret = ide_host_register(host, &d, hws);
528*4882a593Smuzhiyun if (ret)
529*4882a593Smuzhiyun goto err_free;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun return 0;
532*4882a593Smuzhiyun err_free:
533*4882a593Smuzhiyun ide_host_free(host);
534*4882a593Smuzhiyun if (d.dma_ops)
535*4882a593Smuzhiyun free_dma(ec->dma);
536*4882a593Smuzhiyun ecard_set_drvdata(ec, NULL);
537*4882a593Smuzhiyun out:
538*4882a593Smuzhiyun return ret;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
icside_probe(struct expansion_card * ec,const struct ecard_id * id)541*4882a593Smuzhiyun static int icside_probe(struct expansion_card *ec, const struct ecard_id *id)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun struct icside_state *state;
544*4882a593Smuzhiyun void __iomem *idmem;
545*4882a593Smuzhiyun int ret;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun ret = ecard_request_resources(ec);
548*4882a593Smuzhiyun if (ret)
549*4882a593Smuzhiyun goto out;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun state = kzalloc(sizeof(struct icside_state), GFP_KERNEL);
552*4882a593Smuzhiyun if (!state) {
553*4882a593Smuzhiyun ret = -ENOMEM;
554*4882a593Smuzhiyun goto release;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun state->type = ICS_TYPE_NOTYPE;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun idmem = ecardm_iomap(ec, ECARD_RES_IOCFAST, 0, 0);
560*4882a593Smuzhiyun if (idmem) {
561*4882a593Smuzhiyun unsigned int type;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun type = readb(idmem + ICS_IDENT_OFFSET) & 1;
564*4882a593Smuzhiyun type |= (readb(idmem + ICS_IDENT_OFFSET + 4) & 1) << 1;
565*4882a593Smuzhiyun type |= (readb(idmem + ICS_IDENT_OFFSET + 8) & 1) << 2;
566*4882a593Smuzhiyun type |= (readb(idmem + ICS_IDENT_OFFSET + 12) & 1) << 3;
567*4882a593Smuzhiyun ecardm_iounmap(ec, idmem);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun state->type = type;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun switch (state->type) {
573*4882a593Smuzhiyun case ICS_TYPE_A3IN:
574*4882a593Smuzhiyun dev_warn(&ec->dev, "A3IN unsupported\n");
575*4882a593Smuzhiyun ret = -ENODEV;
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun case ICS_TYPE_A3USER:
579*4882a593Smuzhiyun dev_warn(&ec->dev, "A3USER unsupported\n");
580*4882a593Smuzhiyun ret = -ENODEV;
581*4882a593Smuzhiyun break;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun case ICS_TYPE_V5:
584*4882a593Smuzhiyun ret = icside_register_v5(state, ec);
585*4882a593Smuzhiyun break;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun case ICS_TYPE_V6:
588*4882a593Smuzhiyun ret = icside_register_v6(state, ec);
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun default:
592*4882a593Smuzhiyun dev_warn(&ec->dev, "unknown interface type\n");
593*4882a593Smuzhiyun ret = -ENODEV;
594*4882a593Smuzhiyun break;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (ret == 0)
598*4882a593Smuzhiyun goto out;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun kfree(state);
601*4882a593Smuzhiyun release:
602*4882a593Smuzhiyun ecard_release_resources(ec);
603*4882a593Smuzhiyun out:
604*4882a593Smuzhiyun return ret;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
icside_remove(struct expansion_card * ec)607*4882a593Smuzhiyun static void icside_remove(struct expansion_card *ec)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct icside_state *state = ecard_get_drvdata(ec);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun switch (state->type) {
612*4882a593Smuzhiyun case ICS_TYPE_V5:
613*4882a593Smuzhiyun /* FIXME: tell IDE to stop using the interface */
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* Disable interrupts */
616*4882a593Smuzhiyun icside_irqdisable_arcin_v5(ec, 0);
617*4882a593Smuzhiyun break;
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun case ICS_TYPE_V6:
620*4882a593Smuzhiyun /* FIXME: tell IDE to stop using the interface */
621*4882a593Smuzhiyun if (ec->dma != NO_DMA)
622*4882a593Smuzhiyun free_dma(ec->dma);
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /* Disable interrupts */
625*4882a593Smuzhiyun icside_irqdisable_arcin_v6(ec, 0);
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun /* Reset the ROM pointer/EASI selection */
628*4882a593Smuzhiyun writeb(0, state->ioc_base);
629*4882a593Smuzhiyun break;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun ecard_set_drvdata(ec, NULL);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun kfree(state);
635*4882a593Smuzhiyun ecard_release_resources(ec);
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
icside_shutdown(struct expansion_card * ec)638*4882a593Smuzhiyun static void icside_shutdown(struct expansion_card *ec)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun struct icside_state *state = ecard_get_drvdata(ec);
641*4882a593Smuzhiyun unsigned long flags;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /*
644*4882a593Smuzhiyun * Disable interrupts from this card. We need to do
645*4882a593Smuzhiyun * this before disabling EASI since we may be accessing
646*4882a593Smuzhiyun * this register via that region.
647*4882a593Smuzhiyun */
648*4882a593Smuzhiyun local_irq_save(flags);
649*4882a593Smuzhiyun ec->ops->irqdisable(ec, 0);
650*4882a593Smuzhiyun local_irq_restore(flags);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /*
653*4882a593Smuzhiyun * Reset the ROM pointer so that we can read the ROM
654*4882a593Smuzhiyun * after a soft reboot. This also disables access to
655*4882a593Smuzhiyun * the IDE taskfile via the EASI region.
656*4882a593Smuzhiyun */
657*4882a593Smuzhiyun if (state->ioc_base)
658*4882a593Smuzhiyun writeb(0, state->ioc_base);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun static const struct ecard_id icside_ids[] = {
662*4882a593Smuzhiyun { MANU_ICS, PROD_ICS_IDE },
663*4882a593Smuzhiyun { MANU_ICS2, PROD_ICS2_IDE },
664*4882a593Smuzhiyun { 0xffff, 0xffff }
665*4882a593Smuzhiyun };
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun static struct ecard_driver icside_driver = {
668*4882a593Smuzhiyun .probe = icside_probe,
669*4882a593Smuzhiyun .remove = icside_remove,
670*4882a593Smuzhiyun .shutdown = icside_shutdown,
671*4882a593Smuzhiyun .id_table = icside_ids,
672*4882a593Smuzhiyun .drv = {
673*4882a593Smuzhiyun .name = "icside",
674*4882a593Smuzhiyun },
675*4882a593Smuzhiyun };
676*4882a593Smuzhiyun
icside_init(void)677*4882a593Smuzhiyun static int __init icside_init(void)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun return ecard_register_driver(&icside_driver);
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
icside_exit(void)682*4882a593Smuzhiyun static void __exit icside_exit(void)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun ecard_remove_driver(&icside_driver);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
688*4882a593Smuzhiyun MODULE_LICENSE("GPL");
689*4882a593Smuzhiyun MODULE_DESCRIPTION("ICS IDE driver");
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun module_init(icside_init);
692*4882a593Smuzhiyun module_exit(icside_exit);
693