1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/drivers/mmc/host/pxa.c - PXA MMCI driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2003 Russell King, All Rights Reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This hardware is really sick:
8*4882a593Smuzhiyun * - No way to clear interrupts.
9*4882a593Smuzhiyun * - Have to turn off the clock whenever we touch the device.
10*4882a593Smuzhiyun * - Doesn't tell you how many data blocks were transferred.
11*4882a593Smuzhiyun * Yuck!
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * 1 and 3 byte data transfers not supported
14*4882a593Smuzhiyun * max block length up to 1023
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/ioport.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/dmaengine.h>
23*4882a593Smuzhiyun #include <linux/dma-mapping.h>
24*4882a593Smuzhiyun #include <linux/clk.h>
25*4882a593Smuzhiyun #include <linux/err.h>
26*4882a593Smuzhiyun #include <linux/mmc/host.h>
27*4882a593Smuzhiyun #include <linux/mmc/slot-gpio.h>
28*4882a593Smuzhiyun #include <linux/io.h>
29*4882a593Smuzhiyun #include <linux/regulator/consumer.h>
30*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
31*4882a593Smuzhiyun #include <linux/gfp.h>
32*4882a593Smuzhiyun #include <linux/of.h>
33*4882a593Smuzhiyun #include <linux/of_device.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/sizes.h>
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <mach/hardware.h>
38*4882a593Smuzhiyun #include <linux/platform_data/mmc-pxamci.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "pxamci.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define DRIVER_NAME "pxa2xx-mci"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define NR_SG 1
45*4882a593Smuzhiyun #define CLKRT_OFF (~0)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define mmc_has_26MHz() (cpu_is_pxa300() || cpu_is_pxa310() \
48*4882a593Smuzhiyun || cpu_is_pxa935())
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun struct pxamci_host {
51*4882a593Smuzhiyun struct mmc_host *mmc;
52*4882a593Smuzhiyun spinlock_t lock;
53*4882a593Smuzhiyun struct resource *res;
54*4882a593Smuzhiyun void __iomem *base;
55*4882a593Smuzhiyun struct clk *clk;
56*4882a593Smuzhiyun unsigned long clkrate;
57*4882a593Smuzhiyun unsigned int clkrt;
58*4882a593Smuzhiyun unsigned int cmdat;
59*4882a593Smuzhiyun unsigned int imask;
60*4882a593Smuzhiyun unsigned int power_mode;
61*4882a593Smuzhiyun unsigned long detect_delay_ms;
62*4882a593Smuzhiyun bool use_ro_gpio;
63*4882a593Smuzhiyun struct gpio_desc *power;
64*4882a593Smuzhiyun struct pxamci_platform_data *pdata;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct mmc_request *mrq;
67*4882a593Smuzhiyun struct mmc_command *cmd;
68*4882a593Smuzhiyun struct mmc_data *data;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct dma_chan *dma_chan_rx;
71*4882a593Smuzhiyun struct dma_chan *dma_chan_tx;
72*4882a593Smuzhiyun dma_cookie_t dma_cookie;
73*4882a593Smuzhiyun unsigned int dma_len;
74*4882a593Smuzhiyun unsigned int dma_dir;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
pxamci_init_ocr(struct pxamci_host * host)77*4882a593Smuzhiyun static int pxamci_init_ocr(struct pxamci_host *host)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct mmc_host *mmc = host->mmc;
80*4882a593Smuzhiyun int ret;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun ret = mmc_regulator_get_supply(mmc);
83*4882a593Smuzhiyun if (ret < 0)
84*4882a593Smuzhiyun return ret;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (IS_ERR(mmc->supply.vmmc)) {
87*4882a593Smuzhiyun /* fall-back to platform data */
88*4882a593Smuzhiyun mmc->ocr_avail = host->pdata ?
89*4882a593Smuzhiyun host->pdata->ocr_mask :
90*4882a593Smuzhiyun MMC_VDD_32_33 | MMC_VDD_33_34;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
pxamci_set_power(struct pxamci_host * host,unsigned char power_mode,unsigned int vdd)96*4882a593Smuzhiyun static inline int pxamci_set_power(struct pxamci_host *host,
97*4882a593Smuzhiyun unsigned char power_mode,
98*4882a593Smuzhiyun unsigned int vdd)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun struct mmc_host *mmc = host->mmc;
101*4882a593Smuzhiyun struct regulator *supply = mmc->supply.vmmc;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (!IS_ERR(supply))
104*4882a593Smuzhiyun return mmc_regulator_set_ocr(mmc, supply, vdd);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (host->power) {
107*4882a593Smuzhiyun bool on = !!((1 << vdd) & host->pdata->ocr_mask);
108*4882a593Smuzhiyun gpiod_set_value(host->power, on);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if (host->pdata && host->pdata->setpower)
112*4882a593Smuzhiyun return host->pdata->setpower(mmc_dev(host->mmc), vdd);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return 0;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
pxamci_stop_clock(struct pxamci_host * host)117*4882a593Smuzhiyun static void pxamci_stop_clock(struct pxamci_host *host)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun if (readl(host->base + MMC_STAT) & STAT_CLK_EN) {
120*4882a593Smuzhiyun unsigned long timeout = 10000;
121*4882a593Smuzhiyun unsigned int v;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun writel(STOP_CLOCK, host->base + MMC_STRPCL);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun do {
126*4882a593Smuzhiyun v = readl(host->base + MMC_STAT);
127*4882a593Smuzhiyun if (!(v & STAT_CLK_EN))
128*4882a593Smuzhiyun break;
129*4882a593Smuzhiyun udelay(1);
130*4882a593Smuzhiyun } while (timeout--);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun if (v & STAT_CLK_EN)
133*4882a593Smuzhiyun dev_err(mmc_dev(host->mmc), "unable to stop clock\n");
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
pxamci_enable_irq(struct pxamci_host * host,unsigned int mask)137*4882a593Smuzhiyun static void pxamci_enable_irq(struct pxamci_host *host, unsigned int mask)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun unsigned long flags;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
142*4882a593Smuzhiyun host->imask &= ~mask;
143*4882a593Smuzhiyun writel(host->imask, host->base + MMC_I_MASK);
144*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
pxamci_disable_irq(struct pxamci_host * host,unsigned int mask)147*4882a593Smuzhiyun static void pxamci_disable_irq(struct pxamci_host *host, unsigned int mask)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun unsigned long flags;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
152*4882a593Smuzhiyun host->imask |= mask;
153*4882a593Smuzhiyun writel(host->imask, host->base + MMC_I_MASK);
154*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun static void pxamci_dma_irq(void *param);
158*4882a593Smuzhiyun
pxamci_setup_data(struct pxamci_host * host,struct mmc_data * data)159*4882a593Smuzhiyun static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx;
162*4882a593Smuzhiyun enum dma_transfer_direction direction;
163*4882a593Smuzhiyun struct dma_slave_config config;
164*4882a593Smuzhiyun struct dma_chan *chan;
165*4882a593Smuzhiyun unsigned int nob = data->blocks;
166*4882a593Smuzhiyun unsigned long long clks;
167*4882a593Smuzhiyun unsigned int timeout;
168*4882a593Smuzhiyun int ret;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun host->data = data;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun writel(nob, host->base + MMC_NOB);
173*4882a593Smuzhiyun writel(data->blksz, host->base + MMC_BLKLEN);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun clks = (unsigned long long)data->timeout_ns * host->clkrate;
176*4882a593Smuzhiyun do_div(clks, 1000000000UL);
177*4882a593Smuzhiyun timeout = (unsigned int)clks + (data->timeout_clks << host->clkrt);
178*4882a593Smuzhiyun writel((timeout + 255) / 256, host->base + MMC_RDTO);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun memset(&config, 0, sizeof(config));
181*4882a593Smuzhiyun config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
182*4882a593Smuzhiyun config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
183*4882a593Smuzhiyun config.src_addr = host->res->start + MMC_RXFIFO;
184*4882a593Smuzhiyun config.dst_addr = host->res->start + MMC_TXFIFO;
185*4882a593Smuzhiyun config.src_maxburst = 32;
186*4882a593Smuzhiyun config.dst_maxburst = 32;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (data->flags & MMC_DATA_READ) {
189*4882a593Smuzhiyun host->dma_dir = DMA_FROM_DEVICE;
190*4882a593Smuzhiyun direction = DMA_DEV_TO_MEM;
191*4882a593Smuzhiyun chan = host->dma_chan_rx;
192*4882a593Smuzhiyun } else {
193*4882a593Smuzhiyun host->dma_dir = DMA_TO_DEVICE;
194*4882a593Smuzhiyun direction = DMA_MEM_TO_DEV;
195*4882a593Smuzhiyun chan = host->dma_chan_tx;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun config.direction = direction;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun ret = dmaengine_slave_config(chan, &config);
201*4882a593Smuzhiyun if (ret < 0) {
202*4882a593Smuzhiyun dev_err(mmc_dev(host->mmc), "dma slave config failed\n");
203*4882a593Smuzhiyun return;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun host->dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len,
207*4882a593Smuzhiyun host->dma_dir);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun tx = dmaengine_prep_slave_sg(chan, data->sg, host->dma_len, direction,
210*4882a593Smuzhiyun DMA_PREP_INTERRUPT);
211*4882a593Smuzhiyun if (!tx) {
212*4882a593Smuzhiyun dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n");
213*4882a593Smuzhiyun return;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun if (!(data->flags & MMC_DATA_READ)) {
217*4882a593Smuzhiyun tx->callback = pxamci_dma_irq;
218*4882a593Smuzhiyun tx->callback_param = host;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun host->dma_cookie = dmaengine_submit(tx);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun * workaround for erratum #91:
225*4882a593Smuzhiyun * only start DMA now if we are doing a read,
226*4882a593Smuzhiyun * otherwise we wait until CMD/RESP has finished
227*4882a593Smuzhiyun * before starting DMA.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun if (!cpu_is_pxa27x() || data->flags & MMC_DATA_READ)
230*4882a593Smuzhiyun dma_async_issue_pending(chan);
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
pxamci_start_cmd(struct pxamci_host * host,struct mmc_command * cmd,unsigned int cmdat)233*4882a593Smuzhiyun static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd, unsigned int cmdat)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun WARN_ON(host->cmd != NULL);
236*4882a593Smuzhiyun host->cmd = cmd;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (cmd->flags & MMC_RSP_BUSY)
239*4882a593Smuzhiyun cmdat |= CMDAT_BUSY;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun #define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
242*4882a593Smuzhiyun switch (RSP_TYPE(mmc_resp_type(cmd))) {
243*4882a593Smuzhiyun case RSP_TYPE(MMC_RSP_R1): /* r1, r1b, r6, r7 */
244*4882a593Smuzhiyun cmdat |= CMDAT_RESP_SHORT;
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun case RSP_TYPE(MMC_RSP_R3):
247*4882a593Smuzhiyun cmdat |= CMDAT_RESP_R3;
248*4882a593Smuzhiyun break;
249*4882a593Smuzhiyun case RSP_TYPE(MMC_RSP_R2):
250*4882a593Smuzhiyun cmdat |= CMDAT_RESP_R2;
251*4882a593Smuzhiyun break;
252*4882a593Smuzhiyun default:
253*4882a593Smuzhiyun break;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun writel(cmd->opcode, host->base + MMC_CMD);
257*4882a593Smuzhiyun writel(cmd->arg >> 16, host->base + MMC_ARGH);
258*4882a593Smuzhiyun writel(cmd->arg & 0xffff, host->base + MMC_ARGL);
259*4882a593Smuzhiyun writel(cmdat, host->base + MMC_CMDAT);
260*4882a593Smuzhiyun writel(host->clkrt, host->base + MMC_CLKRT);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun writel(START_CLOCK, host->base + MMC_STRPCL);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun pxamci_enable_irq(host, END_CMD_RES);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
pxamci_finish_request(struct pxamci_host * host,struct mmc_request * mrq)267*4882a593Smuzhiyun static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun host->mrq = NULL;
270*4882a593Smuzhiyun host->cmd = NULL;
271*4882a593Smuzhiyun host->data = NULL;
272*4882a593Smuzhiyun mmc_request_done(host->mmc, mrq);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
pxamci_cmd_done(struct pxamci_host * host,unsigned int stat)275*4882a593Smuzhiyun static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct mmc_command *cmd = host->cmd;
278*4882a593Smuzhiyun int i;
279*4882a593Smuzhiyun u32 v;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (!cmd)
282*4882a593Smuzhiyun return 0;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun host->cmd = NULL;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /*
287*4882a593Smuzhiyun * Did I mention this is Sick. We always need to
288*4882a593Smuzhiyun * discard the upper 8 bits of the first 16-bit word.
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun v = readl(host->base + MMC_RES) & 0xffff;
291*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
292*4882a593Smuzhiyun u32 w1 = readl(host->base + MMC_RES) & 0xffff;
293*4882a593Smuzhiyun u32 w2 = readl(host->base + MMC_RES) & 0xffff;
294*4882a593Smuzhiyun cmd->resp[i] = v << 24 | w1 << 8 | w2 >> 8;
295*4882a593Smuzhiyun v = w2;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (stat & STAT_TIME_OUT_RESPONSE) {
299*4882a593Smuzhiyun cmd->error = -ETIMEDOUT;
300*4882a593Smuzhiyun } else if (stat & STAT_RES_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun * workaround for erratum #42:
303*4882a593Smuzhiyun * Intel PXA27x Family Processor Specification Update Rev 001
304*4882a593Smuzhiyun * A bogus CRC error can appear if the msb of a 136 bit
305*4882a593Smuzhiyun * response is a one.
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun if (cpu_is_pxa27x() &&
308*4882a593Smuzhiyun (cmd->flags & MMC_RSP_136 && cmd->resp[0] & 0x80000000))
309*4882a593Smuzhiyun pr_debug("ignoring CRC from command %d - *risky*\n", cmd->opcode);
310*4882a593Smuzhiyun else
311*4882a593Smuzhiyun cmd->error = -EILSEQ;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun pxamci_disable_irq(host, END_CMD_RES);
315*4882a593Smuzhiyun if (host->data && !cmd->error) {
316*4882a593Smuzhiyun pxamci_enable_irq(host, DATA_TRAN_DONE);
317*4882a593Smuzhiyun /*
318*4882a593Smuzhiyun * workaround for erratum #91, if doing write
319*4882a593Smuzhiyun * enable DMA late
320*4882a593Smuzhiyun */
321*4882a593Smuzhiyun if (cpu_is_pxa27x() && host->data->flags & MMC_DATA_WRITE)
322*4882a593Smuzhiyun dma_async_issue_pending(host->dma_chan_tx);
323*4882a593Smuzhiyun } else {
324*4882a593Smuzhiyun pxamci_finish_request(host, host->mrq);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun return 1;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
pxamci_data_done(struct pxamci_host * host,unsigned int stat)330*4882a593Smuzhiyun static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun struct mmc_data *data = host->data;
333*4882a593Smuzhiyun struct dma_chan *chan;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (!data)
336*4882a593Smuzhiyun return 0;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (data->flags & MMC_DATA_READ)
339*4882a593Smuzhiyun chan = host->dma_chan_rx;
340*4882a593Smuzhiyun else
341*4882a593Smuzhiyun chan = host->dma_chan_tx;
342*4882a593Smuzhiyun dma_unmap_sg(chan->device->dev,
343*4882a593Smuzhiyun data->sg, data->sg_len, host->dma_dir);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (stat & STAT_READ_TIME_OUT)
346*4882a593Smuzhiyun data->error = -ETIMEDOUT;
347*4882a593Smuzhiyun else if (stat & (STAT_CRC_READ_ERROR|STAT_CRC_WRITE_ERROR))
348*4882a593Smuzhiyun data->error = -EILSEQ;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * There appears to be a hardware design bug here. There seems to
352*4882a593Smuzhiyun * be no way to find out how much data was transferred to the card.
353*4882a593Smuzhiyun * This means that if there was an error on any block, we mark all
354*4882a593Smuzhiyun * data blocks as being in error.
355*4882a593Smuzhiyun */
356*4882a593Smuzhiyun if (!data->error)
357*4882a593Smuzhiyun data->bytes_xfered = data->blocks * data->blksz;
358*4882a593Smuzhiyun else
359*4882a593Smuzhiyun data->bytes_xfered = 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun pxamci_disable_irq(host, DATA_TRAN_DONE);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun host->data = NULL;
364*4882a593Smuzhiyun if (host->mrq->stop) {
365*4882a593Smuzhiyun pxamci_stop_clock(host);
366*4882a593Smuzhiyun pxamci_start_cmd(host, host->mrq->stop, host->cmdat);
367*4882a593Smuzhiyun } else {
368*4882a593Smuzhiyun pxamci_finish_request(host, host->mrq);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun return 1;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
pxamci_irq(int irq,void * devid)374*4882a593Smuzhiyun static irqreturn_t pxamci_irq(int irq, void *devid)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun struct pxamci_host *host = devid;
377*4882a593Smuzhiyun unsigned int ireg;
378*4882a593Smuzhiyun int handled = 0;
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun ireg = readl(host->base + MMC_I_REG) & ~readl(host->base + MMC_I_MASK);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (ireg) {
383*4882a593Smuzhiyun unsigned stat = readl(host->base + MMC_STAT);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun pr_debug("PXAMCI: irq %08x stat %08x\n", ireg, stat);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (ireg & END_CMD_RES)
388*4882a593Smuzhiyun handled |= pxamci_cmd_done(host, stat);
389*4882a593Smuzhiyun if (ireg & DATA_TRAN_DONE)
390*4882a593Smuzhiyun handled |= pxamci_data_done(host, stat);
391*4882a593Smuzhiyun if (ireg & SDIO_INT) {
392*4882a593Smuzhiyun mmc_signal_sdio_irq(host->mmc);
393*4882a593Smuzhiyun handled = 1;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return IRQ_RETVAL(handled);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
pxamci_request(struct mmc_host * mmc,struct mmc_request * mrq)400*4882a593Smuzhiyun static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun struct pxamci_host *host = mmc_priv(mmc);
403*4882a593Smuzhiyun unsigned int cmdat;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun WARN_ON(host->mrq != NULL);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun host->mrq = mrq;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun pxamci_stop_clock(host);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun cmdat = host->cmdat;
412*4882a593Smuzhiyun host->cmdat &= ~CMDAT_INIT;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (mrq->data) {
415*4882a593Smuzhiyun pxamci_setup_data(host, mrq->data);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun cmdat &= ~CMDAT_BUSY;
418*4882a593Smuzhiyun cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
419*4882a593Smuzhiyun if (mrq->data->flags & MMC_DATA_WRITE)
420*4882a593Smuzhiyun cmdat |= CMDAT_WRITE;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun pxamci_start_cmd(host, mrq->cmd, cmdat);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
pxamci_get_ro(struct mmc_host * mmc)426*4882a593Smuzhiyun static int pxamci_get_ro(struct mmc_host *mmc)
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun struct pxamci_host *host = mmc_priv(mmc);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (host->use_ro_gpio)
431*4882a593Smuzhiyun return mmc_gpio_get_ro(mmc);
432*4882a593Smuzhiyun if (host->pdata && host->pdata->get_ro)
433*4882a593Smuzhiyun return !!host->pdata->get_ro(mmc_dev(mmc));
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Board doesn't support read only detection; let the mmc core
436*4882a593Smuzhiyun * decide what to do.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun return -ENOSYS;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
pxamci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)441*4882a593Smuzhiyun static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun struct pxamci_host *host = mmc_priv(mmc);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (ios->clock) {
446*4882a593Smuzhiyun unsigned long rate = host->clkrate;
447*4882a593Smuzhiyun unsigned int clk = rate / ios->clock;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (host->clkrt == CLKRT_OFF)
450*4882a593Smuzhiyun clk_prepare_enable(host->clk);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (ios->clock == 26000000) {
453*4882a593Smuzhiyun /* to support 26MHz */
454*4882a593Smuzhiyun host->clkrt = 7;
455*4882a593Smuzhiyun } else {
456*4882a593Smuzhiyun /* to handle (19.5MHz, 26MHz) */
457*4882a593Smuzhiyun if (!clk)
458*4882a593Smuzhiyun clk = 1;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * clk might result in a lower divisor than we
462*4882a593Smuzhiyun * desire. check for that condition and adjust
463*4882a593Smuzhiyun * as appropriate.
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun if (rate / clk > ios->clock)
466*4882a593Smuzhiyun clk <<= 1;
467*4882a593Smuzhiyun host->clkrt = fls(clk) - 1;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /*
471*4882a593Smuzhiyun * we write clkrt on the next command
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun } else {
474*4882a593Smuzhiyun pxamci_stop_clock(host);
475*4882a593Smuzhiyun if (host->clkrt != CLKRT_OFF) {
476*4882a593Smuzhiyun host->clkrt = CLKRT_OFF;
477*4882a593Smuzhiyun clk_disable_unprepare(host->clk);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (host->power_mode != ios->power_mode) {
482*4882a593Smuzhiyun int ret;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun host->power_mode = ios->power_mode;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun ret = pxamci_set_power(host, ios->power_mode, ios->vdd);
487*4882a593Smuzhiyun if (ret) {
488*4882a593Smuzhiyun dev_err(mmc_dev(mmc), "unable to set power\n");
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * The .set_ios() function in the mmc_host_ops
491*4882a593Smuzhiyun * struct return void, and failing to set the
492*4882a593Smuzhiyun * power should be rare so we print an error and
493*4882a593Smuzhiyun * return here.
494*4882a593Smuzhiyun */
495*4882a593Smuzhiyun return;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun if (ios->power_mode == MMC_POWER_ON)
499*4882a593Smuzhiyun host->cmdat |= CMDAT_INIT;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun if (ios->bus_width == MMC_BUS_WIDTH_4)
503*4882a593Smuzhiyun host->cmdat |= CMDAT_SD_4DAT;
504*4882a593Smuzhiyun else
505*4882a593Smuzhiyun host->cmdat &= ~CMDAT_SD_4DAT;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun dev_dbg(mmc_dev(mmc), "PXAMCI: clkrt = %x cmdat = %x\n",
508*4882a593Smuzhiyun host->clkrt, host->cmdat);
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
pxamci_enable_sdio_irq(struct mmc_host * host,int enable)511*4882a593Smuzhiyun static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct pxamci_host *pxa_host = mmc_priv(host);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (enable)
516*4882a593Smuzhiyun pxamci_enable_irq(pxa_host, SDIO_INT);
517*4882a593Smuzhiyun else
518*4882a593Smuzhiyun pxamci_disable_irq(pxa_host, SDIO_INT);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun static const struct mmc_host_ops pxamci_ops = {
522*4882a593Smuzhiyun .request = pxamci_request,
523*4882a593Smuzhiyun .get_cd = mmc_gpio_get_cd,
524*4882a593Smuzhiyun .get_ro = pxamci_get_ro,
525*4882a593Smuzhiyun .set_ios = pxamci_set_ios,
526*4882a593Smuzhiyun .enable_sdio_irq = pxamci_enable_sdio_irq,
527*4882a593Smuzhiyun };
528*4882a593Smuzhiyun
pxamci_dma_irq(void * param)529*4882a593Smuzhiyun static void pxamci_dma_irq(void *param)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct pxamci_host *host = param;
532*4882a593Smuzhiyun struct dma_tx_state state;
533*4882a593Smuzhiyun enum dma_status status;
534*4882a593Smuzhiyun struct dma_chan *chan;
535*4882a593Smuzhiyun unsigned long flags;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun spin_lock_irqsave(&host->lock, flags);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (!host->data)
540*4882a593Smuzhiyun goto out_unlock;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun if (host->data->flags & MMC_DATA_READ)
543*4882a593Smuzhiyun chan = host->dma_chan_rx;
544*4882a593Smuzhiyun else
545*4882a593Smuzhiyun chan = host->dma_chan_tx;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun status = dmaengine_tx_status(chan, host->dma_cookie, &state);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (likely(status == DMA_COMPLETE)) {
550*4882a593Smuzhiyun writel(BUF_PART_FULL, host->base + MMC_PRTBUF);
551*4882a593Smuzhiyun } else {
552*4882a593Smuzhiyun pr_err("%s: DMA error on %s channel\n", mmc_hostname(host->mmc),
553*4882a593Smuzhiyun host->data->flags & MMC_DATA_READ ? "rx" : "tx");
554*4882a593Smuzhiyun host->data->error = -EIO;
555*4882a593Smuzhiyun pxamci_data_done(host, 0);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun out_unlock:
559*4882a593Smuzhiyun spin_unlock_irqrestore(&host->lock, flags);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
pxamci_detect_irq(int irq,void * devid)562*4882a593Smuzhiyun static irqreturn_t pxamci_detect_irq(int irq, void *devid)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun struct pxamci_host *host = mmc_priv(devid);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun mmc_detect_change(devid, msecs_to_jiffies(host->detect_delay_ms));
567*4882a593Smuzhiyun return IRQ_HANDLED;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun #ifdef CONFIG_OF
571*4882a593Smuzhiyun static const struct of_device_id pxa_mmc_dt_ids[] = {
572*4882a593Smuzhiyun { .compatible = "marvell,pxa-mmc" },
573*4882a593Smuzhiyun { }
574*4882a593Smuzhiyun };
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids);
577*4882a593Smuzhiyun
pxamci_of_init(struct platform_device * pdev,struct mmc_host * mmc)578*4882a593Smuzhiyun static int pxamci_of_init(struct platform_device *pdev,
579*4882a593Smuzhiyun struct mmc_host *mmc)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct device_node *np = pdev->dev.of_node;
582*4882a593Smuzhiyun struct pxamci_host *host = mmc_priv(mmc);
583*4882a593Smuzhiyun u32 tmp;
584*4882a593Smuzhiyun int ret;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun if (!np)
587*4882a593Smuzhiyun return 0;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* pxa-mmc specific */
590*4882a593Smuzhiyun if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0)
591*4882a593Smuzhiyun host->detect_delay_ms = tmp;
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun ret = mmc_of_parse(mmc);
594*4882a593Smuzhiyun if (ret < 0)
595*4882a593Smuzhiyun return ret;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun return 0;
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun #else
pxamci_of_init(struct platform_device * pdev,struct mmc_host * mmc)600*4882a593Smuzhiyun static int pxamci_of_init(struct platform_device *pdev,
601*4882a593Smuzhiyun struct mmc_host *mmc)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun return 0;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun #endif
606*4882a593Smuzhiyun
pxamci_probe(struct platform_device * pdev)607*4882a593Smuzhiyun static int pxamci_probe(struct platform_device *pdev)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun struct mmc_host *mmc;
610*4882a593Smuzhiyun struct pxamci_host *host = NULL;
611*4882a593Smuzhiyun struct device *dev = &pdev->dev;
612*4882a593Smuzhiyun struct resource *r;
613*4882a593Smuzhiyun int ret, irq;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
616*4882a593Smuzhiyun irq = platform_get_irq(pdev, 0);
617*4882a593Smuzhiyun if (irq < 0)
618*4882a593Smuzhiyun return irq;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun mmc = mmc_alloc_host(sizeof(struct pxamci_host), dev);
621*4882a593Smuzhiyun if (!mmc) {
622*4882a593Smuzhiyun ret = -ENOMEM;
623*4882a593Smuzhiyun goto out;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun mmc->ops = &pxamci_ops;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * We can do SG-DMA, but we don't because we never know how much
630*4882a593Smuzhiyun * data we successfully wrote to the card.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun mmc->max_segs = NR_SG;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * Our hardware DMA can handle a maximum of one page per SG entry.
636*4882a593Smuzhiyun */
637*4882a593Smuzhiyun mmc->max_seg_size = PAGE_SIZE;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /*
640*4882a593Smuzhiyun * Block length register is only 10 bits before PXA27x.
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun mmc->max_blk_size = cpu_is_pxa25x() ? 1023 : 2048;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun /*
645*4882a593Smuzhiyun * Block count register is 16 bits.
646*4882a593Smuzhiyun */
647*4882a593Smuzhiyun mmc->max_blk_count = 65535;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun ret = pxamci_of_init(pdev, mmc);
650*4882a593Smuzhiyun if (ret)
651*4882a593Smuzhiyun goto out;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun host = mmc_priv(mmc);
654*4882a593Smuzhiyun host->mmc = mmc;
655*4882a593Smuzhiyun host->pdata = pdev->dev.platform_data;
656*4882a593Smuzhiyun host->clkrt = CLKRT_OFF;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun host->clk = devm_clk_get(dev, NULL);
659*4882a593Smuzhiyun if (IS_ERR(host->clk)) {
660*4882a593Smuzhiyun ret = PTR_ERR(host->clk);
661*4882a593Smuzhiyun host->clk = NULL;
662*4882a593Smuzhiyun goto out;
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun host->clkrate = clk_get_rate(host->clk);
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /*
668*4882a593Smuzhiyun * Calculate minimum clock rate, rounding up.
669*4882a593Smuzhiyun */
670*4882a593Smuzhiyun mmc->f_min = (host->clkrate + 63) / 64;
671*4882a593Smuzhiyun mmc->f_max = (mmc_has_26MHz()) ? 26000000 : host->clkrate;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun ret = pxamci_init_ocr(host);
674*4882a593Smuzhiyun if (ret < 0)
675*4882a593Smuzhiyun goto out;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun mmc->caps = 0;
678*4882a593Smuzhiyun host->cmdat = 0;
679*4882a593Smuzhiyun if (!cpu_is_pxa25x()) {
680*4882a593Smuzhiyun mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
681*4882a593Smuzhiyun host->cmdat |= CMDAT_SDIO_INT_EN;
682*4882a593Smuzhiyun if (mmc_has_26MHz())
683*4882a593Smuzhiyun mmc->caps |= MMC_CAP_MMC_HIGHSPEED |
684*4882a593Smuzhiyun MMC_CAP_SD_HIGHSPEED;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun spin_lock_init(&host->lock);
688*4882a593Smuzhiyun host->res = r;
689*4882a593Smuzhiyun host->imask = MMC_I_MASK_ALL;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun host->base = devm_ioremap_resource(dev, r);
692*4882a593Smuzhiyun if (IS_ERR(host->base)) {
693*4882a593Smuzhiyun ret = PTR_ERR(host->base);
694*4882a593Smuzhiyun goto out;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * Ensure that the host controller is shut down, and setup
699*4882a593Smuzhiyun * with our defaults.
700*4882a593Smuzhiyun */
701*4882a593Smuzhiyun pxamci_stop_clock(host);
702*4882a593Smuzhiyun writel(0, host->base + MMC_SPI);
703*4882a593Smuzhiyun writel(64, host->base + MMC_RESTO);
704*4882a593Smuzhiyun writel(host->imask, host->base + MMC_I_MASK);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun ret = devm_request_irq(dev, irq, pxamci_irq, 0,
707*4882a593Smuzhiyun DRIVER_NAME, host);
708*4882a593Smuzhiyun if (ret)
709*4882a593Smuzhiyun goto out;
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun platform_set_drvdata(pdev, mmc);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun host->dma_chan_rx = dma_request_chan(dev, "rx");
714*4882a593Smuzhiyun if (IS_ERR(host->dma_chan_rx)) {
715*4882a593Smuzhiyun dev_err(dev, "unable to request rx dma channel\n");
716*4882a593Smuzhiyun ret = PTR_ERR(host->dma_chan_rx);
717*4882a593Smuzhiyun host->dma_chan_rx = NULL;
718*4882a593Smuzhiyun goto out;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun host->dma_chan_tx = dma_request_chan(dev, "tx");
722*4882a593Smuzhiyun if (IS_ERR(host->dma_chan_tx)) {
723*4882a593Smuzhiyun dev_err(dev, "unable to request tx dma channel\n");
724*4882a593Smuzhiyun ret = PTR_ERR(host->dma_chan_tx);
725*4882a593Smuzhiyun host->dma_chan_tx = NULL;
726*4882a593Smuzhiyun goto out;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (host->pdata) {
730*4882a593Smuzhiyun host->detect_delay_ms = host->pdata->detect_delay_ms;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
733*4882a593Smuzhiyun if (IS_ERR(host->power)) {
734*4882a593Smuzhiyun ret = PTR_ERR(host->power);
735*4882a593Smuzhiyun dev_err(dev, "Failed requesting gpio_power\n");
736*4882a593Smuzhiyun goto out;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /* FIXME: should we pass detection delay to debounce? */
740*4882a593Smuzhiyun ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0);
741*4882a593Smuzhiyun if (ret && ret != -ENOENT) {
742*4882a593Smuzhiyun dev_err(dev, "Failed requesting gpio_cd\n");
743*4882a593Smuzhiyun goto out;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun if (!host->pdata->gpio_card_ro_invert)
747*4882a593Smuzhiyun mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0);
750*4882a593Smuzhiyun if (ret && ret != -ENOENT) {
751*4882a593Smuzhiyun dev_err(dev, "Failed requesting gpio_ro\n");
752*4882a593Smuzhiyun goto out;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun if (!ret)
755*4882a593Smuzhiyun host->use_ro_gpio = true;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (host->pdata->init)
758*4882a593Smuzhiyun host->pdata->init(dev, pxamci_detect_irq, mmc);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun if (host->power && host->pdata->setpower)
761*4882a593Smuzhiyun dev_warn(dev, "gpio_power and setpower() both defined\n");
762*4882a593Smuzhiyun if (host->use_ro_gpio && host->pdata->get_ro)
763*4882a593Smuzhiyun dev_warn(dev, "gpio_ro and get_ro() both defined\n");
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun mmc_add_host(mmc);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun out:
771*4882a593Smuzhiyun if (host) {
772*4882a593Smuzhiyun if (host->dma_chan_rx)
773*4882a593Smuzhiyun dma_release_channel(host->dma_chan_rx);
774*4882a593Smuzhiyun if (host->dma_chan_tx)
775*4882a593Smuzhiyun dma_release_channel(host->dma_chan_tx);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun if (mmc)
778*4882a593Smuzhiyun mmc_free_host(mmc);
779*4882a593Smuzhiyun return ret;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
pxamci_remove(struct platform_device * pdev)782*4882a593Smuzhiyun static int pxamci_remove(struct platform_device *pdev)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun struct mmc_host *mmc = platform_get_drvdata(pdev);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (mmc) {
787*4882a593Smuzhiyun struct pxamci_host *host = mmc_priv(mmc);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun mmc_remove_host(mmc);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun if (host->pdata && host->pdata->exit)
792*4882a593Smuzhiyun host->pdata->exit(&pdev->dev, mmc);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun pxamci_stop_clock(host);
795*4882a593Smuzhiyun writel(TXFIFO_WR_REQ|RXFIFO_RD_REQ|CLK_IS_OFF|STOP_CMD|
796*4882a593Smuzhiyun END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
797*4882a593Smuzhiyun host->base + MMC_I_MASK);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun dmaengine_terminate_all(host->dma_chan_rx);
800*4882a593Smuzhiyun dmaengine_terminate_all(host->dma_chan_tx);
801*4882a593Smuzhiyun dma_release_channel(host->dma_chan_rx);
802*4882a593Smuzhiyun dma_release_channel(host->dma_chan_tx);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun mmc_free_host(mmc);
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun return 0;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun static struct platform_driver pxamci_driver = {
811*4882a593Smuzhiyun .probe = pxamci_probe,
812*4882a593Smuzhiyun .remove = pxamci_remove,
813*4882a593Smuzhiyun .driver = {
814*4882a593Smuzhiyun .name = DRIVER_NAME,
815*4882a593Smuzhiyun .probe_type = PROBE_PREFER_ASYNCHRONOUS,
816*4882a593Smuzhiyun .of_match_table = of_match_ptr(pxa_mmc_dt_ids),
817*4882a593Smuzhiyun },
818*4882a593Smuzhiyun };
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun module_platform_driver(pxamci_driver);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
823*4882a593Smuzhiyun MODULE_LICENSE("GPL");
824*4882a593Smuzhiyun MODULE_ALIAS("platform:pxa2xx-mci");
825