1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * drivers/ata/pata_arasan_cf.c
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Arasan Compact Flash host controller source file
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2011 ST Microelectronics
7*4882a593Smuzhiyun * Viresh Kumar <vireshk@kernel.org>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is licensed under the terms of the GNU General Public
10*4882a593Smuzhiyun * License version 2. This program is licensed "as is" without any
11*4882a593Smuzhiyun * warranty of any kind, whether express or implied.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun * The Arasan CompactFlash Device Controller IP core has three basic modes of
16*4882a593Smuzhiyun * operation: PC card ATA using I/O mode, PC card ATA using memory mode, PC card
17*4882a593Smuzhiyun * ATA using true IDE modes. This driver supports only True IDE mode currently.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Arasan CF Controller shares global irq register with Arasan XD Controller.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Tested on arch/arm/mach-spear13xx
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/ata.h>
25*4882a593Smuzhiyun #include <linux/clk.h>
26*4882a593Smuzhiyun #include <linux/completion.h>
27*4882a593Smuzhiyun #include <linux/delay.h>
28*4882a593Smuzhiyun #include <linux/dmaengine.h>
29*4882a593Smuzhiyun #include <linux/io.h>
30*4882a593Smuzhiyun #include <linux/irq.h>
31*4882a593Smuzhiyun #include <linux/kernel.h>
32*4882a593Smuzhiyun #include <linux/libata.h>
33*4882a593Smuzhiyun #include <linux/module.h>
34*4882a593Smuzhiyun #include <linux/of.h>
35*4882a593Smuzhiyun #include <linux/pata_arasan_cf_data.h>
36*4882a593Smuzhiyun #include <linux/platform_device.h>
37*4882a593Smuzhiyun #include <linux/pm.h>
38*4882a593Smuzhiyun #include <linux/slab.h>
39*4882a593Smuzhiyun #include <linux/spinlock.h>
40*4882a593Smuzhiyun #include <linux/types.h>
41*4882a593Smuzhiyun #include <linux/workqueue.h>
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun #define DRIVER_NAME "arasan_cf"
44*4882a593Smuzhiyun #define TIMEOUT msecs_to_jiffies(3000)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Registers */
47*4882a593Smuzhiyun /* CompactFlash Interface Status */
48*4882a593Smuzhiyun #define CFI_STS 0x000
49*4882a593Smuzhiyun #define STS_CHG (1)
50*4882a593Smuzhiyun #define BIN_AUDIO_OUT (1 << 1)
51*4882a593Smuzhiyun #define CARD_DETECT1 (1 << 2)
52*4882a593Smuzhiyun #define CARD_DETECT2 (1 << 3)
53*4882a593Smuzhiyun #define INP_ACK (1 << 4)
54*4882a593Smuzhiyun #define CARD_READY (1 << 5)
55*4882a593Smuzhiyun #define IO_READY (1 << 6)
56*4882a593Smuzhiyun #define B16_IO_PORT_SEL (1 << 7)
57*4882a593Smuzhiyun /* IRQ */
58*4882a593Smuzhiyun #define IRQ_STS 0x004
59*4882a593Smuzhiyun /* Interrupt Enable */
60*4882a593Smuzhiyun #define IRQ_EN 0x008
61*4882a593Smuzhiyun #define CARD_DETECT_IRQ (1)
62*4882a593Smuzhiyun #define STATUS_CHNG_IRQ (1 << 1)
63*4882a593Smuzhiyun #define MEM_MODE_IRQ (1 << 2)
64*4882a593Smuzhiyun #define IO_MODE_IRQ (1 << 3)
65*4882a593Smuzhiyun #define TRUE_IDE_MODE_IRQ (1 << 8)
66*4882a593Smuzhiyun #define PIO_XFER_ERR_IRQ (1 << 9)
67*4882a593Smuzhiyun #define BUF_AVAIL_IRQ (1 << 10)
68*4882a593Smuzhiyun #define XFER_DONE_IRQ (1 << 11)
69*4882a593Smuzhiyun #define IGNORED_IRQS (STATUS_CHNG_IRQ | MEM_MODE_IRQ | IO_MODE_IRQ |\
70*4882a593Smuzhiyun TRUE_IDE_MODE_IRQ)
71*4882a593Smuzhiyun #define TRUE_IDE_IRQS (CARD_DETECT_IRQ | PIO_XFER_ERR_IRQ |\
72*4882a593Smuzhiyun BUF_AVAIL_IRQ | XFER_DONE_IRQ)
73*4882a593Smuzhiyun /* Operation Mode */
74*4882a593Smuzhiyun #define OP_MODE 0x00C
75*4882a593Smuzhiyun #define CARD_MODE_MASK (0x3)
76*4882a593Smuzhiyun #define MEM_MODE (0x0)
77*4882a593Smuzhiyun #define IO_MODE (0x1)
78*4882a593Smuzhiyun #define TRUE_IDE_MODE (0x2)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define CARD_TYPE_MASK (1 << 2)
81*4882a593Smuzhiyun #define CF_CARD (0)
82*4882a593Smuzhiyun #define CF_PLUS_CARD (1 << 2)
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define CARD_RESET (1 << 3)
85*4882a593Smuzhiyun #define CFHOST_ENB (1 << 4)
86*4882a593Smuzhiyun #define OUTPUTS_TRISTATE (1 << 5)
87*4882a593Smuzhiyun #define ULTRA_DMA_ENB (1 << 8)
88*4882a593Smuzhiyun #define MULTI_WORD_DMA_ENB (1 << 9)
89*4882a593Smuzhiyun #define DRQ_BLOCK_SIZE_MASK (0x3 << 11)
90*4882a593Smuzhiyun #define DRQ_BLOCK_SIZE_512 (0)
91*4882a593Smuzhiyun #define DRQ_BLOCK_SIZE_1024 (1 << 11)
92*4882a593Smuzhiyun #define DRQ_BLOCK_SIZE_2048 (2 << 11)
93*4882a593Smuzhiyun #define DRQ_BLOCK_SIZE_4096 (3 << 11)
94*4882a593Smuzhiyun /* CF Interface Clock Configuration */
95*4882a593Smuzhiyun #define CLK_CFG 0x010
96*4882a593Smuzhiyun #define CF_IF_CLK_MASK (0XF)
97*4882a593Smuzhiyun /* CF Timing Mode Configuration */
98*4882a593Smuzhiyun #define TM_CFG 0x014
99*4882a593Smuzhiyun #define MEM_MODE_TIMING_MASK (0x3)
100*4882a593Smuzhiyun #define MEM_MODE_TIMING_250NS (0x0)
101*4882a593Smuzhiyun #define MEM_MODE_TIMING_120NS (0x1)
102*4882a593Smuzhiyun #define MEM_MODE_TIMING_100NS (0x2)
103*4882a593Smuzhiyun #define MEM_MODE_TIMING_80NS (0x3)
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun #define IO_MODE_TIMING_MASK (0x3 << 2)
106*4882a593Smuzhiyun #define IO_MODE_TIMING_250NS (0x0 << 2)
107*4882a593Smuzhiyun #define IO_MODE_TIMING_120NS (0x1 << 2)
108*4882a593Smuzhiyun #define IO_MODE_TIMING_100NS (0x2 << 2)
109*4882a593Smuzhiyun #define IO_MODE_TIMING_80NS (0x3 << 2)
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define TRUEIDE_PIO_TIMING_MASK (0x7 << 4)
112*4882a593Smuzhiyun #define TRUEIDE_PIO_TIMING_SHIFT 4
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #define TRUEIDE_MWORD_DMA_TIMING_MASK (0x7 << 7)
115*4882a593Smuzhiyun #define TRUEIDE_MWORD_DMA_TIMING_SHIFT 7
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define ULTRA_DMA_TIMING_MASK (0x7 << 10)
118*4882a593Smuzhiyun #define ULTRA_DMA_TIMING_SHIFT 10
119*4882a593Smuzhiyun /* CF Transfer Address */
120*4882a593Smuzhiyun #define XFER_ADDR 0x014
121*4882a593Smuzhiyun #define XFER_ADDR_MASK (0x7FF)
122*4882a593Smuzhiyun #define MAX_XFER_COUNT 0x20000u
123*4882a593Smuzhiyun /* Transfer Control */
124*4882a593Smuzhiyun #define XFER_CTR 0x01C
125*4882a593Smuzhiyun #define XFER_COUNT_MASK (0x3FFFF)
126*4882a593Smuzhiyun #define ADDR_INC_DISABLE (1 << 24)
127*4882a593Smuzhiyun #define XFER_WIDTH_MASK (1 << 25)
128*4882a593Smuzhiyun #define XFER_WIDTH_8B (0)
129*4882a593Smuzhiyun #define XFER_WIDTH_16B (1 << 25)
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun #define MEM_TYPE_MASK (1 << 26)
132*4882a593Smuzhiyun #define MEM_TYPE_COMMON (0)
133*4882a593Smuzhiyun #define MEM_TYPE_ATTRIBUTE (1 << 26)
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #define MEM_IO_XFER_MASK (1 << 27)
136*4882a593Smuzhiyun #define MEM_XFER (0)
137*4882a593Smuzhiyun #define IO_XFER (1 << 27)
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define DMA_XFER_MODE (1 << 28)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define AHB_BUS_NORMAL_PIO_OPRTN (~(1 << 29))
142*4882a593Smuzhiyun #define XFER_DIR_MASK (1 << 30)
143*4882a593Smuzhiyun #define XFER_READ (0)
144*4882a593Smuzhiyun #define XFER_WRITE (1 << 30)
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #define XFER_START (1 << 31)
147*4882a593Smuzhiyun /* Write Data Port */
148*4882a593Smuzhiyun #define WRITE_PORT 0x024
149*4882a593Smuzhiyun /* Read Data Port */
150*4882a593Smuzhiyun #define READ_PORT 0x028
151*4882a593Smuzhiyun /* ATA Data Port */
152*4882a593Smuzhiyun #define ATA_DATA_PORT 0x030
153*4882a593Smuzhiyun #define ATA_DATA_PORT_MASK (0xFFFF)
154*4882a593Smuzhiyun /* ATA Error/Features */
155*4882a593Smuzhiyun #define ATA_ERR_FTR 0x034
156*4882a593Smuzhiyun /* ATA Sector Count */
157*4882a593Smuzhiyun #define ATA_SC 0x038
158*4882a593Smuzhiyun /* ATA Sector Number */
159*4882a593Smuzhiyun #define ATA_SN 0x03C
160*4882a593Smuzhiyun /* ATA Cylinder Low */
161*4882a593Smuzhiyun #define ATA_CL 0x040
162*4882a593Smuzhiyun /* ATA Cylinder High */
163*4882a593Smuzhiyun #define ATA_CH 0x044
164*4882a593Smuzhiyun /* ATA Select Card/Head */
165*4882a593Smuzhiyun #define ATA_SH 0x048
166*4882a593Smuzhiyun /* ATA Status-Command */
167*4882a593Smuzhiyun #define ATA_STS_CMD 0x04C
168*4882a593Smuzhiyun /* ATA Alternate Status/Device Control */
169*4882a593Smuzhiyun #define ATA_ASTS_DCTR 0x050
170*4882a593Smuzhiyun /* Extended Write Data Port 0x200-0x3FC */
171*4882a593Smuzhiyun #define EXT_WRITE_PORT 0x200
172*4882a593Smuzhiyun /* Extended Read Data Port 0x400-0x5FC */
173*4882a593Smuzhiyun #define EXT_READ_PORT 0x400
174*4882a593Smuzhiyun #define FIFO_SIZE 0x200u
175*4882a593Smuzhiyun /* Global Interrupt Status */
176*4882a593Smuzhiyun #define GIRQ_STS 0x800
177*4882a593Smuzhiyun /* Global Interrupt Status enable */
178*4882a593Smuzhiyun #define GIRQ_STS_EN 0x804
179*4882a593Smuzhiyun /* Global Interrupt Signal enable */
180*4882a593Smuzhiyun #define GIRQ_SGN_EN 0x808
181*4882a593Smuzhiyun #define GIRQ_CF (1)
182*4882a593Smuzhiyun #define GIRQ_XD (1 << 1)
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /* Compact Flash Controller Dev Structure */
185*4882a593Smuzhiyun struct arasan_cf_dev {
186*4882a593Smuzhiyun /* pointer to ata_host structure */
187*4882a593Smuzhiyun struct ata_host *host;
188*4882a593Smuzhiyun /* clk structure */
189*4882a593Smuzhiyun struct clk *clk;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* physical base address of controller */
192*4882a593Smuzhiyun dma_addr_t pbase;
193*4882a593Smuzhiyun /* virtual base address of controller */
194*4882a593Smuzhiyun void __iomem *vbase;
195*4882a593Smuzhiyun /* irq number*/
196*4882a593Smuzhiyun int irq;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* status to be updated to framework regarding DMA transfer */
199*4882a593Smuzhiyun u8 dma_status;
200*4882a593Smuzhiyun /* Card is present or Not */
201*4882a593Smuzhiyun u8 card_present;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* dma specific */
204*4882a593Smuzhiyun /* Completion for transfer complete interrupt from controller */
205*4882a593Smuzhiyun struct completion cf_completion;
206*4882a593Smuzhiyun /* Completion for DMA transfer complete. */
207*4882a593Smuzhiyun struct completion dma_completion;
208*4882a593Smuzhiyun /* Dma channel allocated */
209*4882a593Smuzhiyun struct dma_chan *dma_chan;
210*4882a593Smuzhiyun /* Mask for DMA transfers */
211*4882a593Smuzhiyun dma_cap_mask_t mask;
212*4882a593Smuzhiyun /* DMA transfer work */
213*4882a593Smuzhiyun struct work_struct work;
214*4882a593Smuzhiyun /* DMA delayed finish work */
215*4882a593Smuzhiyun struct delayed_work dwork;
216*4882a593Smuzhiyun /* qc to be transferred using DMA */
217*4882a593Smuzhiyun struct ata_queued_cmd *qc;
218*4882a593Smuzhiyun };
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun static struct scsi_host_template arasan_cf_sht = {
221*4882a593Smuzhiyun ATA_BASE_SHT(DRIVER_NAME),
222*4882a593Smuzhiyun .dma_boundary = 0xFFFFFFFFUL,
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun
cf_dumpregs(struct arasan_cf_dev * acdev)225*4882a593Smuzhiyun static void cf_dumpregs(struct arasan_cf_dev *acdev)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun struct device *dev = acdev->host->dev;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun dev_dbg(dev, ": =========== REGISTER DUMP ===========");
230*4882a593Smuzhiyun dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS));
231*4882a593Smuzhiyun dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS));
232*4882a593Smuzhiyun dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN));
233*4882a593Smuzhiyun dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE));
234*4882a593Smuzhiyun dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG));
235*4882a593Smuzhiyun dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG));
236*4882a593Smuzhiyun dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR));
237*4882a593Smuzhiyun dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS));
238*4882a593Smuzhiyun dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN));
239*4882a593Smuzhiyun dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN));
240*4882a593Smuzhiyun dev_dbg(dev, ": =====================================");
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* Enable/Disable global interrupts shared between CF and XD ctrlr. */
cf_ginterrupt_enable(struct arasan_cf_dev * acdev,bool enable)244*4882a593Smuzhiyun static void cf_ginterrupt_enable(struct arasan_cf_dev *acdev, bool enable)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun /* enable should be 0 or 1 */
247*4882a593Smuzhiyun writel(enable, acdev->vbase + GIRQ_STS_EN);
248*4882a593Smuzhiyun writel(enable, acdev->vbase + GIRQ_SGN_EN);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Enable/Disable CF interrupts */
252*4882a593Smuzhiyun static inline void
cf_interrupt_enable(struct arasan_cf_dev * acdev,u32 mask,bool enable)253*4882a593Smuzhiyun cf_interrupt_enable(struct arasan_cf_dev *acdev, u32 mask, bool enable)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun u32 val = readl(acdev->vbase + IRQ_EN);
256*4882a593Smuzhiyun /* clear & enable/disable irqs */
257*4882a593Smuzhiyun if (enable) {
258*4882a593Smuzhiyun writel(mask, acdev->vbase + IRQ_STS);
259*4882a593Smuzhiyun writel(val | mask, acdev->vbase + IRQ_EN);
260*4882a593Smuzhiyun } else
261*4882a593Smuzhiyun writel(val & ~mask, acdev->vbase + IRQ_EN);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
cf_card_reset(struct arasan_cf_dev * acdev)264*4882a593Smuzhiyun static inline void cf_card_reset(struct arasan_cf_dev *acdev)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun u32 val = readl(acdev->vbase + OP_MODE);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun writel(val | CARD_RESET, acdev->vbase + OP_MODE);
269*4882a593Smuzhiyun udelay(200);
270*4882a593Smuzhiyun writel(val & ~CARD_RESET, acdev->vbase + OP_MODE);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
cf_ctrl_reset(struct arasan_cf_dev * acdev)273*4882a593Smuzhiyun static inline void cf_ctrl_reset(struct arasan_cf_dev *acdev)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
276*4882a593Smuzhiyun acdev->vbase + OP_MODE);
277*4882a593Smuzhiyun writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB,
278*4882a593Smuzhiyun acdev->vbase + OP_MODE);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
cf_card_detect(struct arasan_cf_dev * acdev,bool hotplugged)281*4882a593Smuzhiyun static void cf_card_detect(struct arasan_cf_dev *acdev, bool hotplugged)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct ata_port *ap = acdev->host->ports[0];
284*4882a593Smuzhiyun struct ata_eh_info *ehi = &ap->link.eh_info;
285*4882a593Smuzhiyun u32 val = readl(acdev->vbase + CFI_STS);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* Both CD1 & CD2 should be low if card inserted completely */
288*4882a593Smuzhiyun if (!(val & (CARD_DETECT1 | CARD_DETECT2))) {
289*4882a593Smuzhiyun if (acdev->card_present)
290*4882a593Smuzhiyun return;
291*4882a593Smuzhiyun acdev->card_present = 1;
292*4882a593Smuzhiyun cf_card_reset(acdev);
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun if (!acdev->card_present)
295*4882a593Smuzhiyun return;
296*4882a593Smuzhiyun acdev->card_present = 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (hotplugged) {
300*4882a593Smuzhiyun ata_ehi_hotplugged(ehi);
301*4882a593Smuzhiyun ata_port_freeze(ap);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
cf_init(struct arasan_cf_dev * acdev)305*4882a593Smuzhiyun static int cf_init(struct arasan_cf_dev *acdev)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev);
308*4882a593Smuzhiyun unsigned int if_clk;
309*4882a593Smuzhiyun unsigned long flags;
310*4882a593Smuzhiyun int ret = 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun ret = clk_prepare_enable(acdev->clk);
313*4882a593Smuzhiyun if (ret) {
314*4882a593Smuzhiyun dev_dbg(acdev->host->dev, "clock enable failed");
315*4882a593Smuzhiyun return ret;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun ret = clk_set_rate(acdev->clk, 166000000);
319*4882a593Smuzhiyun if (ret) {
320*4882a593Smuzhiyun dev_warn(acdev->host->dev, "clock set rate failed");
321*4882a593Smuzhiyun clk_disable_unprepare(acdev->clk);
322*4882a593Smuzhiyun return ret;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
326*4882a593Smuzhiyun /* configure CF interface clock */
327*4882a593Smuzhiyun /* TODO: read from device tree */
328*4882a593Smuzhiyun if_clk = CF_IF_CLK_166M;
329*4882a593Smuzhiyun if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M)
330*4882a593Smuzhiyun if_clk = pdata->cf_if_clk;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun writel(if_clk, acdev->vbase + CLK_CFG);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE);
335*4882a593Smuzhiyun cf_interrupt_enable(acdev, CARD_DETECT_IRQ, 1);
336*4882a593Smuzhiyun cf_ginterrupt_enable(acdev, 1);
337*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun return ret;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
cf_exit(struct arasan_cf_dev * acdev)342*4882a593Smuzhiyun static void cf_exit(struct arasan_cf_dev *acdev)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun unsigned long flags;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
347*4882a593Smuzhiyun cf_ginterrupt_enable(acdev, 0);
348*4882a593Smuzhiyun cf_interrupt_enable(acdev, TRUE_IDE_IRQS, 0);
349*4882a593Smuzhiyun cf_card_reset(acdev);
350*4882a593Smuzhiyun writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB,
351*4882a593Smuzhiyun acdev->vbase + OP_MODE);
352*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
353*4882a593Smuzhiyun clk_disable_unprepare(acdev->clk);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
dma_callback(void * dev)356*4882a593Smuzhiyun static void dma_callback(void *dev)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun struct arasan_cf_dev *acdev = dev;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun complete(&acdev->dma_completion);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
dma_complete(struct arasan_cf_dev * acdev)363*4882a593Smuzhiyun static inline void dma_complete(struct arasan_cf_dev *acdev)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun struct ata_queued_cmd *qc = acdev->qc;
366*4882a593Smuzhiyun unsigned long flags;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun acdev->qc = NULL;
369*4882a593Smuzhiyun ata_sff_interrupt(acdev->irq, acdev->host);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
372*4882a593Smuzhiyun if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol))
373*4882a593Smuzhiyun ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout");
374*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
wait4buf(struct arasan_cf_dev * acdev)377*4882a593Smuzhiyun static inline int wait4buf(struct arasan_cf_dev *acdev)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) {
380*4882a593Smuzhiyun u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read");
383*4882a593Smuzhiyun return -ETIMEDOUT;
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* Check if PIO Error interrupt has occurred */
387*4882a593Smuzhiyun if (acdev->dma_status & ATA_DMA_ERR)
388*4882a593Smuzhiyun return -EAGAIN;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun static int
dma_xfer(struct arasan_cf_dev * acdev,dma_addr_t src,dma_addr_t dest,u32 len)394*4882a593Smuzhiyun dma_xfer(struct arasan_cf_dev *acdev, dma_addr_t src, dma_addr_t dest, u32 len)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
397*4882a593Smuzhiyun struct dma_chan *chan = acdev->dma_chan;
398*4882a593Smuzhiyun dma_cookie_t cookie;
399*4882a593Smuzhiyun unsigned long flags = DMA_PREP_INTERRUPT;
400*4882a593Smuzhiyun int ret = 0;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags);
403*4882a593Smuzhiyun if (!tx) {
404*4882a593Smuzhiyun dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n");
405*4882a593Smuzhiyun return -EAGAIN;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun tx->callback = dma_callback;
409*4882a593Smuzhiyun tx->callback_param = acdev;
410*4882a593Smuzhiyun cookie = tx->tx_submit(tx);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun ret = dma_submit_error(cookie);
413*4882a593Smuzhiyun if (ret) {
414*4882a593Smuzhiyun dev_err(acdev->host->dev, "dma_submit_error\n");
415*4882a593Smuzhiyun return ret;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun chan->device->device_issue_pending(chan);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun /* Wait for DMA to complete */
421*4882a593Smuzhiyun if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) {
422*4882a593Smuzhiyun dmaengine_terminate_all(chan);
423*4882a593Smuzhiyun dev_err(acdev->host->dev, "wait_for_completion_timeout\n");
424*4882a593Smuzhiyun return -ETIMEDOUT;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return ret;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
sg_xfer(struct arasan_cf_dev * acdev,struct scatterlist * sg)430*4882a593Smuzhiyun static int sg_xfer(struct arasan_cf_dev *acdev, struct scatterlist *sg)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun dma_addr_t dest = 0, src = 0;
433*4882a593Smuzhiyun u32 xfer_cnt, sglen, dma_len, xfer_ctr;
434*4882a593Smuzhiyun u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE;
435*4882a593Smuzhiyun unsigned long flags;
436*4882a593Smuzhiyun int ret = 0;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun sglen = sg_dma_len(sg);
439*4882a593Smuzhiyun if (write) {
440*4882a593Smuzhiyun src = sg_dma_address(sg);
441*4882a593Smuzhiyun dest = acdev->pbase + EXT_WRITE_PORT;
442*4882a593Smuzhiyun } else {
443*4882a593Smuzhiyun dest = sg_dma_address(sg);
444*4882a593Smuzhiyun src = acdev->pbase + EXT_READ_PORT;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun * For each sg:
449*4882a593Smuzhiyun * MAX_XFER_COUNT data will be transferred before we get transfer
450*4882a593Smuzhiyun * complete interrupt. Between after FIFO_SIZE data
451*4882a593Smuzhiyun * buffer available interrupt will be generated. At this time we will
452*4882a593Smuzhiyun * fill FIFO again: max FIFO_SIZE data.
453*4882a593Smuzhiyun */
454*4882a593Smuzhiyun while (sglen) {
455*4882a593Smuzhiyun xfer_cnt = min(sglen, MAX_XFER_COUNT);
456*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
457*4882a593Smuzhiyun xfer_ctr = readl(acdev->vbase + XFER_CTR) &
458*4882a593Smuzhiyun ~XFER_COUNT_MASK;
459*4882a593Smuzhiyun writel(xfer_ctr | xfer_cnt | XFER_START,
460*4882a593Smuzhiyun acdev->vbase + XFER_CTR);
461*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /* continue dma xfers until current sg is completed */
464*4882a593Smuzhiyun while (xfer_cnt) {
465*4882a593Smuzhiyun /* wait for read to complete */
466*4882a593Smuzhiyun if (!write) {
467*4882a593Smuzhiyun ret = wait4buf(acdev);
468*4882a593Smuzhiyun if (ret)
469*4882a593Smuzhiyun goto fail;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* read/write FIFO in chunk of FIFO_SIZE */
473*4882a593Smuzhiyun dma_len = min(xfer_cnt, FIFO_SIZE);
474*4882a593Smuzhiyun ret = dma_xfer(acdev, src, dest, dma_len);
475*4882a593Smuzhiyun if (ret) {
476*4882a593Smuzhiyun dev_err(acdev->host->dev, "dma failed");
477*4882a593Smuzhiyun goto fail;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (write)
481*4882a593Smuzhiyun src += dma_len;
482*4882a593Smuzhiyun else
483*4882a593Smuzhiyun dest += dma_len;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun sglen -= dma_len;
486*4882a593Smuzhiyun xfer_cnt -= dma_len;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* wait for write to complete */
489*4882a593Smuzhiyun if (write) {
490*4882a593Smuzhiyun ret = wait4buf(acdev);
491*4882a593Smuzhiyun if (ret)
492*4882a593Smuzhiyun goto fail;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun fail:
498*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
499*4882a593Smuzhiyun writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
500*4882a593Smuzhiyun acdev->vbase + XFER_CTR);
501*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return ret;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * This routine uses External DMA controller to read/write data to FIFO of CF
508*4882a593Smuzhiyun * controller. There are two xfer related interrupt supported by CF controller:
509*4882a593Smuzhiyun * - buf_avail: This interrupt is generated as soon as we have buffer of 512
510*4882a593Smuzhiyun * bytes available for reading or empty buffer available for writing.
511*4882a593Smuzhiyun * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
512*4882a593Smuzhiyun * data to/from FIFO. xfer_size is programmed in XFER_CTR register.
513*4882a593Smuzhiyun *
514*4882a593Smuzhiyun * Max buffer size = FIFO_SIZE = 512 Bytes.
515*4882a593Smuzhiyun * Max xfer_size = MAX_XFER_COUNT = 256 KB.
516*4882a593Smuzhiyun */
data_xfer(struct work_struct * work)517*4882a593Smuzhiyun static void data_xfer(struct work_struct *work)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
520*4882a593Smuzhiyun work);
521*4882a593Smuzhiyun struct ata_queued_cmd *qc = acdev->qc;
522*4882a593Smuzhiyun struct scatterlist *sg;
523*4882a593Smuzhiyun unsigned long flags;
524*4882a593Smuzhiyun u32 temp;
525*4882a593Smuzhiyun int ret = 0;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* request dma channels */
528*4882a593Smuzhiyun /* dma_request_channel may sleep, so calling from process context */
529*4882a593Smuzhiyun acdev->dma_chan = dma_request_chan(acdev->host->dev, "data");
530*4882a593Smuzhiyun if (IS_ERR(acdev->dma_chan)) {
531*4882a593Smuzhiyun dev_err(acdev->host->dev, "Unable to get dma_chan\n");
532*4882a593Smuzhiyun acdev->dma_chan = NULL;
533*4882a593Smuzhiyun goto chan_request_fail;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun for_each_sg(qc->sg, sg, qc->n_elem, temp) {
537*4882a593Smuzhiyun ret = sg_xfer(acdev, sg);
538*4882a593Smuzhiyun if (ret)
539*4882a593Smuzhiyun break;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun dma_release_channel(acdev->dma_chan);
543*4882a593Smuzhiyun acdev->dma_chan = NULL;
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* data xferred successfully */
546*4882a593Smuzhiyun if (!ret) {
547*4882a593Smuzhiyun u32 status;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
550*4882a593Smuzhiyun status = ioread8(qc->ap->ioaddr.altstatus_addr);
551*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
552*4882a593Smuzhiyun if (status & (ATA_BUSY | ATA_DRQ)) {
553*4882a593Smuzhiyun ata_sff_queue_delayed_work(&acdev->dwork, 1);
554*4882a593Smuzhiyun return;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun goto sff_intr;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun cf_dumpregs(acdev);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun chan_request_fail:
563*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
564*4882a593Smuzhiyun /* error when transferring data to/from memory */
565*4882a593Smuzhiyun qc->err_mask |= AC_ERR_HOST_BUS;
566*4882a593Smuzhiyun qc->ap->hsm_task_state = HSM_ST_ERR;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun cf_ctrl_reset(acdev);
569*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
570*4882a593Smuzhiyun sff_intr:
571*4882a593Smuzhiyun dma_complete(acdev);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
delayed_finish(struct work_struct * work)574*4882a593Smuzhiyun static void delayed_finish(struct work_struct *work)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun struct arasan_cf_dev *acdev = container_of(work, struct arasan_cf_dev,
577*4882a593Smuzhiyun dwork.work);
578*4882a593Smuzhiyun struct ata_queued_cmd *qc = acdev->qc;
579*4882a593Smuzhiyun unsigned long flags;
580*4882a593Smuzhiyun u8 status;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
583*4882a593Smuzhiyun status = ioread8(qc->ap->ioaddr.altstatus_addr);
584*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (status & (ATA_BUSY | ATA_DRQ))
587*4882a593Smuzhiyun ata_sff_queue_delayed_work(&acdev->dwork, 1);
588*4882a593Smuzhiyun else
589*4882a593Smuzhiyun dma_complete(acdev);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
arasan_cf_interrupt(int irq,void * dev)592*4882a593Smuzhiyun static irqreturn_t arasan_cf_interrupt(int irq, void *dev)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data;
595*4882a593Smuzhiyun unsigned long flags;
596*4882a593Smuzhiyun u32 irqsts;
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun irqsts = readl(acdev->vbase + GIRQ_STS);
599*4882a593Smuzhiyun if (!(irqsts & GIRQ_CF))
600*4882a593Smuzhiyun return IRQ_NONE;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
603*4882a593Smuzhiyun irqsts = readl(acdev->vbase + IRQ_STS);
604*4882a593Smuzhiyun writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */
605*4882a593Smuzhiyun writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun /* handle only relevant interrupts */
608*4882a593Smuzhiyun irqsts &= ~IGNORED_IRQS;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (irqsts & CARD_DETECT_IRQ) {
611*4882a593Smuzhiyun cf_card_detect(acdev, 1);
612*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
613*4882a593Smuzhiyun return IRQ_HANDLED;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun if (irqsts & PIO_XFER_ERR_IRQ) {
617*4882a593Smuzhiyun acdev->dma_status = ATA_DMA_ERR;
618*4882a593Smuzhiyun writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
619*4882a593Smuzhiyun acdev->vbase + XFER_CTR);
620*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
621*4882a593Smuzhiyun complete(&acdev->cf_completion);
622*4882a593Smuzhiyun dev_err(acdev->host->dev, "pio xfer err irq\n");
623*4882a593Smuzhiyun return IRQ_HANDLED;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (irqsts & BUF_AVAIL_IRQ) {
629*4882a593Smuzhiyun complete(&acdev->cf_completion);
630*4882a593Smuzhiyun return IRQ_HANDLED;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (irqsts & XFER_DONE_IRQ) {
634*4882a593Smuzhiyun struct ata_queued_cmd *qc = acdev->qc;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* Send Complete only for write */
637*4882a593Smuzhiyun if (qc->tf.flags & ATA_TFLAG_WRITE)
638*4882a593Smuzhiyun complete(&acdev->cf_completion);
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return IRQ_HANDLED;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
arasan_cf_freeze(struct ata_port * ap)644*4882a593Smuzhiyun static void arasan_cf_freeze(struct ata_port *ap)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun struct arasan_cf_dev *acdev = ap->host->private_data;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* stop transfer and reset controller */
649*4882a593Smuzhiyun writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START,
650*4882a593Smuzhiyun acdev->vbase + XFER_CTR);
651*4882a593Smuzhiyun cf_ctrl_reset(acdev);
652*4882a593Smuzhiyun acdev->dma_status = ATA_DMA_ERR;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun ata_sff_dma_pause(ap);
655*4882a593Smuzhiyun ata_sff_freeze(ap);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
arasan_cf_error_handler(struct ata_port * ap)658*4882a593Smuzhiyun static void arasan_cf_error_handler(struct ata_port *ap)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct arasan_cf_dev *acdev = ap->host->private_data;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /*
663*4882a593Smuzhiyun * DMA transfers using an external DMA controller may be scheduled.
664*4882a593Smuzhiyun * Abort them before handling error. Refer data_xfer() for further
665*4882a593Smuzhiyun * details.
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun cancel_work_sync(&acdev->work);
668*4882a593Smuzhiyun cancel_delayed_work_sync(&acdev->dwork);
669*4882a593Smuzhiyun return ata_sff_error_handler(ap);
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
arasan_cf_dma_start(struct arasan_cf_dev * acdev)672*4882a593Smuzhiyun static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct ata_queued_cmd *qc = acdev->qc;
675*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
676*4882a593Smuzhiyun struct ata_taskfile *tf = &qc->tf;
677*4882a593Smuzhiyun u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK;
678*4882a593Smuzhiyun u32 write = tf->flags & ATA_TFLAG_WRITE;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun xfer_ctr |= write ? XFER_WRITE : XFER_READ;
681*4882a593Smuzhiyun writel(xfer_ctr, acdev->vbase + XFER_CTR);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun ap->ops->sff_exec_command(ap, tf);
684*4882a593Smuzhiyun ata_sff_queue_work(&acdev->work);
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
arasan_cf_qc_issue(struct ata_queued_cmd * qc)687*4882a593Smuzhiyun static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct ata_port *ap = qc->ap;
690*4882a593Smuzhiyun struct arasan_cf_dev *acdev = ap->host->private_data;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun /* defer PIO handling to sff_qc_issue */
693*4882a593Smuzhiyun if (!ata_is_dma(qc->tf.protocol))
694*4882a593Smuzhiyun return ata_sff_qc_issue(qc);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* select the device */
697*4882a593Smuzhiyun ata_wait_idle(ap);
698*4882a593Smuzhiyun ata_sff_dev_select(ap, qc->dev->devno);
699*4882a593Smuzhiyun ata_wait_idle(ap);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun /* start the command */
702*4882a593Smuzhiyun switch (qc->tf.protocol) {
703*4882a593Smuzhiyun case ATA_PROT_DMA:
704*4882a593Smuzhiyun WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ap->ops->sff_tf_load(ap, &qc->tf);
707*4882a593Smuzhiyun acdev->dma_status = 0;
708*4882a593Smuzhiyun acdev->qc = qc;
709*4882a593Smuzhiyun arasan_cf_dma_start(acdev);
710*4882a593Smuzhiyun ap->hsm_task_state = HSM_ST_LAST;
711*4882a593Smuzhiyun break;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun default:
714*4882a593Smuzhiyun WARN_ON(1);
715*4882a593Smuzhiyun return AC_ERR_SYSTEM;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun return 0;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
arasan_cf_set_piomode(struct ata_port * ap,struct ata_device * adev)721*4882a593Smuzhiyun static void arasan_cf_set_piomode(struct ata_port *ap, struct ata_device *adev)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun struct arasan_cf_dev *acdev = ap->host->private_data;
724*4882a593Smuzhiyun u8 pio = adev->pio_mode - XFER_PIO_0;
725*4882a593Smuzhiyun unsigned long flags;
726*4882a593Smuzhiyun u32 val;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /* Arasan ctrl supports Mode0 -> Mode6 */
729*4882a593Smuzhiyun if (pio > 6) {
730*4882a593Smuzhiyun dev_err(ap->dev, "Unknown PIO mode\n");
731*4882a593Smuzhiyun return;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
735*4882a593Smuzhiyun val = readl(acdev->vbase + OP_MODE) &
736*4882a593Smuzhiyun ~(ULTRA_DMA_ENB | MULTI_WORD_DMA_ENB | DRQ_BLOCK_SIZE_MASK);
737*4882a593Smuzhiyun writel(val, acdev->vbase + OP_MODE);
738*4882a593Smuzhiyun val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK;
739*4882a593Smuzhiyun val |= pio << TRUEIDE_PIO_TIMING_SHIFT;
740*4882a593Smuzhiyun writel(val, acdev->vbase + TM_CFG);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 0);
743*4882a593Smuzhiyun cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 1);
744*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
arasan_cf_set_dmamode(struct ata_port * ap,struct ata_device * adev)747*4882a593Smuzhiyun static void arasan_cf_set_dmamode(struct ata_port *ap, struct ata_device *adev)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct arasan_cf_dev *acdev = ap->host->private_data;
750*4882a593Smuzhiyun u32 opmode, tmcfg, dma_mode = adev->dma_mode;
751*4882a593Smuzhiyun unsigned long flags;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun spin_lock_irqsave(&acdev->host->lock, flags);
754*4882a593Smuzhiyun opmode = readl(acdev->vbase + OP_MODE) &
755*4882a593Smuzhiyun ~(MULTI_WORD_DMA_ENB | ULTRA_DMA_ENB);
756*4882a593Smuzhiyun tmcfg = readl(acdev->vbase + TM_CFG);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if ((dma_mode >= XFER_UDMA_0) && (dma_mode <= XFER_UDMA_6)) {
759*4882a593Smuzhiyun opmode |= ULTRA_DMA_ENB;
760*4882a593Smuzhiyun tmcfg &= ~ULTRA_DMA_TIMING_MASK;
761*4882a593Smuzhiyun tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT;
762*4882a593Smuzhiyun } else if ((dma_mode >= XFER_MW_DMA_0) && (dma_mode <= XFER_MW_DMA_4)) {
763*4882a593Smuzhiyun opmode |= MULTI_WORD_DMA_ENB;
764*4882a593Smuzhiyun tmcfg &= ~TRUEIDE_MWORD_DMA_TIMING_MASK;
765*4882a593Smuzhiyun tmcfg |= (dma_mode - XFER_MW_DMA_0) <<
766*4882a593Smuzhiyun TRUEIDE_MWORD_DMA_TIMING_SHIFT;
767*4882a593Smuzhiyun } else {
768*4882a593Smuzhiyun dev_err(ap->dev, "Unknown DMA mode\n");
769*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
770*4882a593Smuzhiyun return;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun writel(opmode, acdev->vbase + OP_MODE);
774*4882a593Smuzhiyun writel(tmcfg, acdev->vbase + TM_CFG);
775*4882a593Smuzhiyun writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun cf_interrupt_enable(acdev, PIO_XFER_ERR_IRQ, 0);
778*4882a593Smuzhiyun cf_interrupt_enable(acdev, BUF_AVAIL_IRQ | XFER_DONE_IRQ, 1);
779*4882a593Smuzhiyun spin_unlock_irqrestore(&acdev->host->lock, flags);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun static struct ata_port_operations arasan_cf_ops = {
783*4882a593Smuzhiyun .inherits = &ata_sff_port_ops,
784*4882a593Smuzhiyun .freeze = arasan_cf_freeze,
785*4882a593Smuzhiyun .error_handler = arasan_cf_error_handler,
786*4882a593Smuzhiyun .qc_issue = arasan_cf_qc_issue,
787*4882a593Smuzhiyun .set_piomode = arasan_cf_set_piomode,
788*4882a593Smuzhiyun .set_dmamode = arasan_cf_set_dmamode,
789*4882a593Smuzhiyun };
790*4882a593Smuzhiyun
arasan_cf_probe(struct platform_device * pdev)791*4882a593Smuzhiyun static int arasan_cf_probe(struct platform_device *pdev)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun struct arasan_cf_dev *acdev;
794*4882a593Smuzhiyun struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev);
795*4882a593Smuzhiyun struct ata_host *host;
796*4882a593Smuzhiyun struct ata_port *ap;
797*4882a593Smuzhiyun struct resource *res;
798*4882a593Smuzhiyun u32 quirk;
799*4882a593Smuzhiyun irq_handler_t irq_handler = NULL;
800*4882a593Smuzhiyun int ret;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
803*4882a593Smuzhiyun if (!res)
804*4882a593Smuzhiyun return -EINVAL;
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
807*4882a593Smuzhiyun DRIVER_NAME)) {
808*4882a593Smuzhiyun dev_warn(&pdev->dev, "Failed to get memory region resource\n");
809*4882a593Smuzhiyun return -ENOENT;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL);
813*4882a593Smuzhiyun if (!acdev)
814*4882a593Smuzhiyun return -ENOMEM;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun if (pdata)
817*4882a593Smuzhiyun quirk = pdata->quirk;
818*4882a593Smuzhiyun else
819*4882a593Smuzhiyun quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun /*
822*4882a593Smuzhiyun * If there's an error getting IRQ (or we do get IRQ0),
823*4882a593Smuzhiyun * support only PIO
824*4882a593Smuzhiyun */
825*4882a593Smuzhiyun ret = platform_get_irq(pdev, 0);
826*4882a593Smuzhiyun if (ret > 0) {
827*4882a593Smuzhiyun acdev->irq = ret;
828*4882a593Smuzhiyun irq_handler = arasan_cf_interrupt;
829*4882a593Smuzhiyun } else if (ret == -EPROBE_DEFER) {
830*4882a593Smuzhiyun return ret;
831*4882a593Smuzhiyun } else {
832*4882a593Smuzhiyun quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun acdev->pbase = res->start;
836*4882a593Smuzhiyun acdev->vbase = devm_ioremap(&pdev->dev, res->start,
837*4882a593Smuzhiyun resource_size(res));
838*4882a593Smuzhiyun if (!acdev->vbase) {
839*4882a593Smuzhiyun dev_warn(&pdev->dev, "ioremap fail\n");
840*4882a593Smuzhiyun return -ENOMEM;
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun acdev->clk = devm_clk_get(&pdev->dev, NULL);
844*4882a593Smuzhiyun if (IS_ERR(acdev->clk)) {
845*4882a593Smuzhiyun dev_warn(&pdev->dev, "Clock not found\n");
846*4882a593Smuzhiyun return PTR_ERR(acdev->clk);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun /* allocate host */
850*4882a593Smuzhiyun host = ata_host_alloc(&pdev->dev, 1);
851*4882a593Smuzhiyun if (!host) {
852*4882a593Smuzhiyun dev_warn(&pdev->dev, "alloc host fail\n");
853*4882a593Smuzhiyun return -ENOMEM;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun ap = host->ports[0];
857*4882a593Smuzhiyun host->private_data = acdev;
858*4882a593Smuzhiyun acdev->host = host;
859*4882a593Smuzhiyun ap->ops = &arasan_cf_ops;
860*4882a593Smuzhiyun ap->pio_mask = ATA_PIO6;
861*4882a593Smuzhiyun ap->mwdma_mask = ATA_MWDMA4;
862*4882a593Smuzhiyun ap->udma_mask = ATA_UDMA6;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun init_completion(&acdev->cf_completion);
865*4882a593Smuzhiyun init_completion(&acdev->dma_completion);
866*4882a593Smuzhiyun INIT_WORK(&acdev->work, data_xfer);
867*4882a593Smuzhiyun INIT_DELAYED_WORK(&acdev->dwork, delayed_finish);
868*4882a593Smuzhiyun dma_cap_set(DMA_MEMCPY, acdev->mask);
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /* Handle platform specific quirks */
871*4882a593Smuzhiyun if (quirk) {
872*4882a593Smuzhiyun if (quirk & CF_BROKEN_PIO) {
873*4882a593Smuzhiyun ap->ops->set_piomode = NULL;
874*4882a593Smuzhiyun ap->pio_mask = 0;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun if (quirk & CF_BROKEN_MWDMA)
877*4882a593Smuzhiyun ap->mwdma_mask = 0;
878*4882a593Smuzhiyun if (quirk & CF_BROKEN_UDMA)
879*4882a593Smuzhiyun ap->udma_mask = 0;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT;
884*4882a593Smuzhiyun ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT;
885*4882a593Smuzhiyun ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR;
886*4882a593Smuzhiyun ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR;
887*4882a593Smuzhiyun ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC;
888*4882a593Smuzhiyun ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN;
889*4882a593Smuzhiyun ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL;
890*4882a593Smuzhiyun ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH;
891*4882a593Smuzhiyun ap->ioaddr.device_addr = acdev->vbase + ATA_SH;
892*4882a593Smuzhiyun ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD;
893*4882a593Smuzhiyun ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD;
894*4882a593Smuzhiyun ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR;
895*4882a593Smuzhiyun ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun ata_port_desc(ap, "phy_addr %llx virt_addr %p",
898*4882a593Smuzhiyun (unsigned long long) res->start, acdev->vbase);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun ret = cf_init(acdev);
901*4882a593Smuzhiyun if (ret)
902*4882a593Smuzhiyun return ret;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun cf_card_detect(acdev, 0);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun ret = ata_host_activate(host, acdev->irq, irq_handler, 0,
907*4882a593Smuzhiyun &arasan_cf_sht);
908*4882a593Smuzhiyun if (!ret)
909*4882a593Smuzhiyun return 0;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun cf_exit(acdev);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun return ret;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
arasan_cf_remove(struct platform_device * pdev)916*4882a593Smuzhiyun static int arasan_cf_remove(struct platform_device *pdev)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct ata_host *host = platform_get_drvdata(pdev);
919*4882a593Smuzhiyun struct arasan_cf_dev *acdev = host->ports[0]->private_data;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun ata_host_detach(host);
922*4882a593Smuzhiyun cf_exit(acdev);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun return 0;
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
arasan_cf_suspend(struct device * dev)928*4882a593Smuzhiyun static int arasan_cf_suspend(struct device *dev)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun struct ata_host *host = dev_get_drvdata(dev);
931*4882a593Smuzhiyun struct arasan_cf_dev *acdev = host->ports[0]->private_data;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (acdev->dma_chan)
934*4882a593Smuzhiyun dmaengine_terminate_all(acdev->dma_chan);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun cf_exit(acdev);
937*4882a593Smuzhiyun return ata_host_suspend(host, PMSG_SUSPEND);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
arasan_cf_resume(struct device * dev)940*4882a593Smuzhiyun static int arasan_cf_resume(struct device *dev)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun struct ata_host *host = dev_get_drvdata(dev);
943*4882a593Smuzhiyun struct arasan_cf_dev *acdev = host->ports[0]->private_data;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun cf_init(acdev);
946*4882a593Smuzhiyun ata_host_resume(host);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun #endif
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume);
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun #ifdef CONFIG_OF
955*4882a593Smuzhiyun static const struct of_device_id arasan_cf_id_table[] = {
956*4882a593Smuzhiyun { .compatible = "arasan,cf-spear1340" },
957*4882a593Smuzhiyun {}
958*4882a593Smuzhiyun };
959*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, arasan_cf_id_table);
960*4882a593Smuzhiyun #endif
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun static struct platform_driver arasan_cf_driver = {
963*4882a593Smuzhiyun .probe = arasan_cf_probe,
964*4882a593Smuzhiyun .remove = arasan_cf_remove,
965*4882a593Smuzhiyun .driver = {
966*4882a593Smuzhiyun .name = DRIVER_NAME,
967*4882a593Smuzhiyun .pm = &arasan_cf_pm_ops,
968*4882a593Smuzhiyun .of_match_table = of_match_ptr(arasan_cf_id_table),
969*4882a593Smuzhiyun },
970*4882a593Smuzhiyun };
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun module_platform_driver(arasan_cf_driver);
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");
975*4882a593Smuzhiyun MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
976*4882a593Smuzhiyun MODULE_LICENSE("GPL");
977*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRIVER_NAME);
978