1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Synopsys DesignWare Multimedia Card Interface driver
4 * (Based on NXP driver for lpc 31xx)
5 *
6 * Copyright (C) 2009 NXP Semiconductors
7 * Copyright (C) 2009, 2010 Imagination Technologies Ltd.
8 */
9
10 #include <linux/blkdev.h>
11 #include <linux/clk.h>
12 #include <linux/debugfs.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/iopoll.h>
19 #include <linux/ioport.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/seq_file.h>
25 #include <linux/slab.h>
26 #include <linux/stat.h>
27 #include <linux/delay.h>
28 #include <linux/irq.h>
29 #include <linux/mmc/card.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/mmc.h>
32 #include <linux/mmc/sd.h>
33 #include <linux/mmc/sdio.h>
34 #include <linux/bitops.h>
35 #include <linux/regulator/consumer.h>
36 #include <linux/of.h>
37 #include <linux/of_gpio.h>
38 #include <linux/mmc/slot-gpio.h>
39 #include <linux/soc/rockchip/rk_sdmmc.h>
40 #include <linux/soc/rockchip/rockchip_decompress.h>
41
42 #include "dw_mmc.h"
43
44 /* Common flag combinations */
45 #define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \
46 SDMMC_INT_HTO | SDMMC_INT_SBE | \
47 SDMMC_INT_EBE | SDMMC_INT_HLE)
48 #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \
49 SDMMC_INT_RESP_ERR | SDMMC_INT_HLE)
50 #define DW_MCI_ERROR_FLAGS (DW_MCI_DATA_ERROR_FLAGS | \
51 DW_MCI_CMD_ERROR_FLAGS)
52 #define DW_MCI_SEND_STATUS 1
53 #define DW_MCI_RECV_STATUS 2
54 #define DW_MCI_DMA_THRESHOLD 16
55
56 #define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */
57 #define DW_MCI_FREQ_MIN 100000 /* unit: HZ */
58
59 #define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \
60 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \
61 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \
62 SDMMC_IDMAC_INT_TI)
63
64 #define DESC_RING_BUF_SZ PAGE_SIZE
65
66 struct idmac_desc_64addr {
67 u32 des0; /* Control Descriptor */
68 #define IDMAC_OWN_CLR64(x) \
69 !((x) & cpu_to_le32(IDMAC_DES0_OWN))
70
71 u32 des1; /* Reserved */
72
73 u32 des2; /*Buffer sizes */
74 #define IDMAC_64ADDR_SET_BUFFER1_SIZE(d, s) \
75 ((d)->des2 = ((d)->des2 & cpu_to_le32(0x03ffe000)) | \
76 ((cpu_to_le32(s)) & cpu_to_le32(0x1fff)))
77
78 u32 des3; /* Reserved */
79
80 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1*/
81 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1*/
82
83 u32 des6; /* Lower 32-bits of Next Descriptor Address */
84 u32 des7; /* Upper 32-bits of Next Descriptor Address */
85 };
86
87 struct idmac_desc {
88 __le32 des0; /* Control Descriptor */
89 #define IDMAC_DES0_DIC BIT(1)
90 #define IDMAC_DES0_LD BIT(2)
91 #define IDMAC_DES0_FD BIT(3)
92 #define IDMAC_DES0_CH BIT(4)
93 #define IDMAC_DES0_ER BIT(5)
94 #define IDMAC_DES0_CES BIT(30)
95 #define IDMAC_DES0_OWN BIT(31)
96
97 __le32 des1; /* Buffer sizes */
98 #define IDMAC_SET_BUFFER1_SIZE(d, s) \
99 ((d)->des1 = ((d)->des1 & cpu_to_le32(0x03ffe000)) | (cpu_to_le32((s) & 0x1fff)))
100
101 __le32 des2; /* buffer 1 physical address */
102
103 __le32 des3; /* buffer 2 physical address */
104 };
105
106 /* Each descriptor can transfer up to 4KB of data in chained mode */
107 #define DW_MCI_DESC_DATA_LENGTH 0x1000
108
109 #if IS_ENABLED(CONFIG_CPU_RV1106)
110 static spinlock_t *g_sdmmc_ispvicap_lock;
111
rv1106_sdmmc_get_lock(void)112 void rv1106_sdmmc_get_lock(void)
113 {
114 if (g_sdmmc_ispvicap_lock)
115 spin_lock(g_sdmmc_ispvicap_lock);
116 }
117 EXPORT_SYMBOL(rv1106_sdmmc_get_lock);
118
rv1106_sdmmc_put_lock(void)119 void rv1106_sdmmc_put_lock(void)
120 {
121 if (g_sdmmc_ispvicap_lock)
122 spin_unlock(g_sdmmc_ispvicap_lock);
123 }
124 EXPORT_SYMBOL(rv1106_sdmmc_put_lock);
125 #endif
126
127 #if defined(CONFIG_DEBUG_FS)
dw_mci_req_show(struct seq_file * s,void * v)128 static int dw_mci_req_show(struct seq_file *s, void *v)
129 {
130 struct dw_mci_slot *slot = s->private;
131 struct mmc_request *mrq;
132 struct mmc_command *cmd;
133 struct mmc_command *stop;
134 struct mmc_data *data;
135
136 /* Make sure we get a consistent snapshot */
137 spin_lock_bh(&slot->host->lock);
138 mrq = slot->mrq;
139
140 if (mrq) {
141 cmd = mrq->cmd;
142 data = mrq->data;
143 stop = mrq->stop;
144
145 if (cmd)
146 seq_printf(s,
147 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
148 cmd->opcode, cmd->arg, cmd->flags,
149 cmd->resp[0], cmd->resp[1], cmd->resp[2],
150 cmd->resp[2], cmd->error);
151 if (data)
152 seq_printf(s, "DATA %u / %u * %u flg %x err %d\n",
153 data->bytes_xfered, data->blocks,
154 data->blksz, data->flags, data->error);
155 if (stop)
156 seq_printf(s,
157 "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n",
158 stop->opcode, stop->arg, stop->flags,
159 stop->resp[0], stop->resp[1], stop->resp[2],
160 stop->resp[2], stop->error);
161 }
162
163 spin_unlock_bh(&slot->host->lock);
164
165 return 0;
166 }
167 DEFINE_SHOW_ATTRIBUTE(dw_mci_req);
168
dw_mci_regs_show(struct seq_file * s,void * v)169 static int dw_mci_regs_show(struct seq_file *s, void *v)
170 {
171 struct dw_mci *host = s->private;
172
173 pm_runtime_get_sync(host->dev);
174
175 seq_printf(s, "STATUS:\t0x%08x\n", mci_readl(host, STATUS));
176 seq_printf(s, "RINTSTS:\t0x%08x\n", mci_readl(host, RINTSTS));
177 seq_printf(s, "CMD:\t0x%08x\n", mci_readl(host, CMD));
178 seq_printf(s, "CTRL:\t0x%08x\n", mci_readl(host, CTRL));
179 seq_printf(s, "INTMASK:\t0x%08x\n", mci_readl(host, INTMASK));
180 seq_printf(s, "CLKENA:\t0x%08x\n", mci_readl(host, CLKENA));
181
182 pm_runtime_put_autosuspend(host->dev);
183
184 return 0;
185 }
186 DEFINE_SHOW_ATTRIBUTE(dw_mci_regs);
187
dw_mci_init_debugfs(struct dw_mci_slot * slot)188 static void dw_mci_init_debugfs(struct dw_mci_slot *slot)
189 {
190 struct mmc_host *mmc = slot->mmc;
191 struct dw_mci *host = slot->host;
192 struct dentry *root;
193
194 root = mmc->debugfs_root;
195 if (!root)
196 return;
197
198 debugfs_create_file("regs", S_IRUSR, root, host, &dw_mci_regs_fops);
199 debugfs_create_file("req", S_IRUSR, root, slot, &dw_mci_req_fops);
200 debugfs_create_u32("state", S_IRUSR, root, &host->state);
201 debugfs_create_xul("pending_events", S_IRUSR, root,
202 &host->pending_events);
203 debugfs_create_xul("completed_events", S_IRUSR, root,
204 &host->completed_events);
205 }
206 #endif /* defined(CONFIG_DEBUG_FS) */
207
dw_mci_ctrl_reset(struct dw_mci * host,u32 reset)208 static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset)
209 {
210 u32 ctrl;
211
212 ctrl = mci_readl(host, CTRL);
213 ctrl |= reset;
214 mci_writel(host, CTRL, ctrl);
215
216 /* wait till resets clear */
217 if (readl_poll_timeout_atomic(host->regs + SDMMC_CTRL, ctrl,
218 !(ctrl & reset),
219 1, 500 * USEC_PER_MSEC)) {
220 dev_err(host->dev,
221 "Timeout resetting block (ctrl reset %#x)\n",
222 ctrl & reset);
223 return false;
224 }
225
226 return true;
227 }
228
dw_mci_wait_while_busy(struct dw_mci * host,u32 cmd_flags)229 static void dw_mci_wait_while_busy(struct dw_mci *host, u32 cmd_flags)
230 {
231 u32 status;
232 u32 delay = 10;
233
234 /*
235 * Databook says that before issuing a new data transfer command
236 * we need to check to see if the card is busy. Data transfer commands
237 * all have SDMMC_CMD_PRV_DAT_WAIT set, so we'll key off that.
238 *
239 * ...also allow sending for SDMMC_CMD_VOLT_SWITCH where busy is
240 * expected.
241 */
242 #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT_MMC
243 if (host->slot->mmc->caps2 & MMC_CAP2_NO_SD &&
244 host->slot->mmc->caps2 & MMC_CAP2_NO_SDIO)
245 delay = 0;
246 #endif
247 if ((cmd_flags & SDMMC_CMD_PRV_DAT_WAIT) &&
248 !(cmd_flags & SDMMC_CMD_VOLT_SWITCH)) {
249 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
250 status,
251 !(status & SDMMC_STATUS_BUSY),
252 delay, 500 * USEC_PER_MSEC))
253 dev_err(host->dev, "Busy; trying anyway\n");
254 }
255 }
256
mci_send_cmd(struct dw_mci_slot * slot,u32 cmd,u32 arg)257 static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg)
258 {
259 struct dw_mci *host = slot->host;
260 unsigned int cmd_status = 0;
261
262 mci_writel(host, CMDARG, arg);
263 wmb(); /* drain writebuffer */
264 dw_mci_wait_while_busy(host, cmd);
265 mci_writel(host, CMD, SDMMC_CMD_START | cmd);
266
267 if (readl_poll_timeout_atomic(host->regs + SDMMC_CMD, cmd_status,
268 !(cmd_status & SDMMC_CMD_START),
269 1, 500 * USEC_PER_MSEC))
270 dev_err(&slot->mmc->class_dev,
271 "Timeout sending command (cmd %#x arg %#x status %#x)\n",
272 cmd, arg, cmd_status);
273 }
274
dw_mci_prepare_command(struct mmc_host * mmc,struct mmc_command * cmd)275 static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
276 {
277 struct dw_mci_slot *slot = mmc_priv(mmc);
278 struct dw_mci *host = slot->host;
279 u32 cmdr;
280
281 cmd->error = -EINPROGRESS;
282 cmdr = cmd->opcode;
283
284 if (cmd->opcode == MMC_STOP_TRANSMISSION ||
285 cmd->opcode == MMC_GO_IDLE_STATE ||
286 cmd->opcode == MMC_GO_INACTIVE_STATE ||
287 (cmd->opcode == SD_IO_RW_DIRECT &&
288 ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT))
289 cmdr |= SDMMC_CMD_STOP;
290 else if (cmd->opcode != MMC_SEND_STATUS && cmd->data)
291 cmdr |= SDMMC_CMD_PRV_DAT_WAIT;
292
293 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
294 u32 clk_en_a;
295
296 /* Special bit makes CMD11 not die */
297 cmdr |= SDMMC_CMD_VOLT_SWITCH;
298
299 /* Change state to continue to handle CMD11 weirdness */
300 WARN_ON(slot->host->state != STATE_SENDING_CMD);
301 slot->host->state = STATE_SENDING_CMD11;
302
303 /*
304 * We need to disable low power mode (automatic clock stop)
305 * while doing voltage switch so we don't confuse the card,
306 * since stopping the clock is a specific part of the UHS
307 * voltage change dance.
308 *
309 * Note that low power mode (SDMMC_CLKEN_LOW_PWR) will be
310 * unconditionally turned back on in dw_mci_setup_bus() if it's
311 * ever called with a non-zero clock. That shouldn't happen
312 * until the voltage change is all done.
313 */
314 clk_en_a = mci_readl(host, CLKENA);
315 clk_en_a &= ~(SDMMC_CLKEN_LOW_PWR << slot->id);
316 mci_writel(host, CLKENA, clk_en_a);
317 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
318 SDMMC_CMD_PRV_DAT_WAIT, 0);
319 }
320
321 if (cmd->flags & MMC_RSP_PRESENT) {
322 /* We expect a response, so set this bit */
323 cmdr |= SDMMC_CMD_RESP_EXP;
324 if (cmd->flags & MMC_RSP_136)
325 cmdr |= SDMMC_CMD_RESP_LONG;
326 }
327
328 if (cmd->flags & MMC_RSP_CRC)
329 cmdr |= SDMMC_CMD_RESP_CRC;
330
331 if (cmd->data) {
332 cmdr |= SDMMC_CMD_DAT_EXP;
333 if (cmd->data->flags & MMC_DATA_WRITE)
334 cmdr |= SDMMC_CMD_DAT_WR;
335 }
336
337 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
338 cmdr |= SDMMC_CMD_USE_HOLD_REG;
339
340 return cmdr;
341 }
342
dw_mci_prep_stop_abort(struct dw_mci * host,struct mmc_command * cmd)343 static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd)
344 {
345 struct mmc_command *stop;
346 u32 cmdr;
347
348 if (!cmd->data)
349 return 0;
350
351 stop = &host->stop_abort;
352 cmdr = cmd->opcode;
353 memset(stop, 0, sizeof(struct mmc_command));
354
355 if (cmdr == MMC_READ_SINGLE_BLOCK ||
356 cmdr == MMC_READ_MULTIPLE_BLOCK ||
357 cmdr == MMC_WRITE_BLOCK ||
358 cmdr == MMC_WRITE_MULTIPLE_BLOCK ||
359 cmdr == MMC_SEND_TUNING_BLOCK ||
360 cmdr == MMC_SEND_TUNING_BLOCK_HS200) {
361 stop->opcode = MMC_STOP_TRANSMISSION;
362 stop->arg = 0;
363 stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
364 } else if (cmdr == SD_IO_RW_EXTENDED) {
365 stop->opcode = SD_IO_RW_DIRECT;
366 stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) |
367 ((cmd->arg >> 28) & 0x7);
368 stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC;
369 } else {
370 return 0;
371 }
372
373 cmdr = stop->opcode | SDMMC_CMD_STOP |
374 SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP;
375
376 if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &host->slot->flags))
377 cmdr |= SDMMC_CMD_USE_HOLD_REG;
378
379 return cmdr;
380 }
381
dw_mci_set_cto(struct dw_mci * host)382 static inline void dw_mci_set_cto(struct dw_mci *host)
383 {
384 unsigned int cto_clks;
385 unsigned int cto_div;
386 unsigned int cto_ms;
387 unsigned long irqflags;
388
389 cto_clks = mci_readl(host, TMOUT) & 0xff;
390 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
391 if (cto_div == 0)
392 cto_div = 1;
393
394 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
395 host->bus_hz);
396
397 /* add a bit spare time */
398 cto_ms += 10;
399
400 /*
401 * The durations we're working with are fairly short so we have to be
402 * extra careful about synchronization here. Specifically in hardware a
403 * command timeout is _at most_ 5.1 ms, so that means we expect an
404 * interrupt (either command done or timeout) to come rather quickly
405 * after the mci_writel. ...but just in case we have a long interrupt
406 * latency let's add a bit of paranoia.
407 *
408 * In general we'll assume that at least an interrupt will be asserted
409 * in hardware by the time the cto_timer runs. ...and if it hasn't
410 * been asserted in hardware by that time then we'll assume it'll never
411 * come.
412 */
413 spin_lock_irqsave(&host->irq_lock, irqflags);
414 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
415 mod_timer(&host->cto_timer,
416 jiffies + msecs_to_jiffies(cto_ms) + 1);
417 spin_unlock_irqrestore(&host->irq_lock, irqflags);
418 }
419
dw_mci_start_command(struct dw_mci * host,struct mmc_command * cmd,u32 cmd_flags)420 static void dw_mci_start_command(struct dw_mci *host,
421 struct mmc_command *cmd, u32 cmd_flags)
422 {
423 host->cmd = cmd;
424 dev_vdbg(host->dev,
425 "start command: ARGR=0x%08x CMDR=0x%08x\n",
426 cmd->arg, cmd_flags);
427
428 mci_writel(host, CMDARG, cmd->arg);
429 wmb(); /* drain writebuffer */
430 dw_mci_wait_while_busy(host, cmd_flags);
431
432 mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START);
433
434 /* response expected command only */
435 if (cmd_flags & SDMMC_CMD_RESP_EXP)
436 dw_mci_set_cto(host);
437 }
438
send_stop_abort(struct dw_mci * host,struct mmc_data * data)439 static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data)
440 {
441 struct mmc_command *stop = &host->stop_abort;
442
443 dw_mci_start_command(host, stop, host->stop_cmdr);
444 }
445
446 /* DMA interface functions */
dw_mci_stop_dma(struct dw_mci * host)447 static void dw_mci_stop_dma(struct dw_mci *host)
448 {
449 if (host->using_dma) {
450 host->dma_ops->stop(host);
451 host->dma_ops->cleanup(host);
452 }
453
454 /* Data transfer was stopped by the interrupt handler */
455 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
456 }
457
dw_mci_dma_cleanup(struct dw_mci * host)458 static void dw_mci_dma_cleanup(struct dw_mci *host)
459 {
460 struct mmc_data *data = host->data;
461
462 if (data && data->host_cookie == COOKIE_MAPPED) {
463 dma_unmap_sg(host->dev,
464 data->sg,
465 data->sg_len,
466 mmc_get_dma_dir(data));
467 data->host_cookie = COOKIE_UNMAPPED;
468 }
469 }
470
dw_mci_idmac_reset(struct dw_mci * host)471 static void dw_mci_idmac_reset(struct dw_mci *host)
472 {
473 u32 bmod = mci_readl(host, BMOD);
474 /* Software reset of DMA */
475 bmod |= SDMMC_IDMAC_SWRESET;
476 mci_writel(host, BMOD, bmod);
477 }
478
dw_mci_idmac_stop_dma(struct dw_mci * host)479 static void dw_mci_idmac_stop_dma(struct dw_mci *host)
480 {
481 u32 temp;
482
483 /* Disable and reset the IDMAC interface */
484 temp = mci_readl(host, CTRL);
485 if (!host->is_rv1106_sd)
486 temp &= ~SDMMC_CTRL_USE_IDMAC;
487
488 temp |= SDMMC_CTRL_DMA_RESET;
489 mci_writel(host, CTRL, temp);
490
491 /* Stop the IDMAC running */
492 temp = mci_readl(host, BMOD);
493 if (host->is_rv1106_sd) {
494 temp |= SDMMC_IDMAC_SWRESET;
495 } else {
496 temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB);
497 temp |= SDMMC_IDMAC_SWRESET;
498 }
499 mci_writel(host, BMOD, temp);
500 }
501
dw_mci_dmac_complete_dma(void * arg)502 static void dw_mci_dmac_complete_dma(void *arg)
503 {
504 struct dw_mci *host = arg;
505 struct mmc_data *data = host->data;
506
507 dev_vdbg(host->dev, "DMA complete\n");
508
509 if ((host->use_dma == TRANS_MODE_EDMAC) &&
510 data && (data->flags & MMC_DATA_READ))
511 /* Invalidate cache after read */
512 dma_sync_sg_for_cpu(mmc_dev(host->slot->mmc),
513 data->sg,
514 data->sg_len,
515 DMA_FROM_DEVICE);
516
517 host->dma_ops->cleanup(host);
518
519 /*
520 * If the card was removed, data will be NULL. No point in trying to
521 * send the stop command or waiting for NBUSY in this case.
522 */
523 if (data) {
524 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
525 tasklet_schedule(&host->tasklet);
526 }
527
528 if (host->need_xfer_timer &&
529 host->dir_status == DW_MCI_RECV_STATUS)
530 del_timer(&host->xfer_timer);
531 }
532
dw_mci_idmac_init(struct dw_mci * host)533 static int dw_mci_idmac_init(struct dw_mci *host)
534 {
535 int i;
536
537 if (host->dma_64bit_address == 1) {
538 struct idmac_desc_64addr *p;
539 /* Number of descriptors in the ring buffer */
540 host->ring_size =
541 DESC_RING_BUF_SZ / sizeof(struct idmac_desc_64addr);
542
543 /* Forward link the descriptor list */
544 for (i = 0, p = host->sg_cpu; i < host->ring_size - 1;
545 i++, p++) {
546 p->des6 = (host->sg_dma +
547 (sizeof(struct idmac_desc_64addr) *
548 (i + 1))) & 0xffffffff;
549
550 p->des7 = (u64)(host->sg_dma +
551 (sizeof(struct idmac_desc_64addr) *
552 (i + 1))) >> 32;
553 /* Initialize reserved and buffer size fields to "0" */
554 p->des0 = 0;
555 p->des1 = 0;
556 p->des2 = 0;
557 p->des3 = 0;
558 }
559
560 /* Set the last descriptor as the end-of-ring descriptor */
561 p->des6 = host->sg_dma & 0xffffffff;
562 p->des7 = (u64)host->sg_dma >> 32;
563 p->des0 = IDMAC_DES0_ER;
564
565 } else {
566 struct idmac_desc *p;
567 /* Number of descriptors in the ring buffer */
568 host->ring_size =
569 DESC_RING_BUF_SZ / sizeof(struct idmac_desc);
570
571 /* Forward link the descriptor list */
572 for (i = 0, p = host->sg_cpu;
573 i < host->ring_size - 1;
574 i++, p++) {
575 p->des3 = cpu_to_le32(host->sg_dma +
576 (sizeof(struct idmac_desc) * (i + 1)));
577 p->des0 = 0;
578 p->des1 = 0;
579 }
580
581 /* Set the last descriptor as the end-of-ring descriptor */
582 p->des3 = cpu_to_le32(host->sg_dma);
583 p->des0 = cpu_to_le32(IDMAC_DES0_ER);
584 }
585
586 dw_mci_idmac_reset(host);
587
588 if (host->dma_64bit_address == 1) {
589 /* Mask out interrupts - get Tx & Rx complete only */
590 mci_writel(host, IDSTS64, IDMAC_INT_CLR);
591 mci_writel(host, IDINTEN64, SDMMC_IDMAC_INT_NI |
592 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
593
594 /* Set the descriptor base address */
595 mci_writel(host, DBADDRL, host->sg_dma & 0xffffffff);
596 mci_writel(host, DBADDRU, (u64)host->sg_dma >> 32);
597
598 } else {
599 /* Mask out interrupts - get Tx & Rx complete only */
600 mci_writel(host, IDSTS, IDMAC_INT_CLR);
601 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI |
602 SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI);
603
604 /* Set the descriptor base address */
605 mci_writel(host, DBADDR, host->sg_dma);
606 }
607
608 return 0;
609 }
610
dw_mci_prepare_desc64(struct dw_mci * host,struct mmc_data * data,unsigned int sg_len)611 static inline int dw_mci_prepare_desc64(struct dw_mci *host,
612 struct mmc_data *data,
613 unsigned int sg_len)
614 {
615 unsigned int desc_len;
616 struct idmac_desc_64addr *desc_first, *desc_last, *desc;
617 u32 val;
618 int i;
619
620 desc_first = desc_last = desc = host->sg_cpu;
621
622 for (i = 0; i < sg_len; i++) {
623 unsigned int length = sg_dma_len(&data->sg[i]);
624
625 u64 mem_addr = sg_dma_address(&data->sg[i]);
626
627 for ( ; length ; desc++) {
628 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
629 length : DW_MCI_DESC_DATA_LENGTH;
630
631 length -= desc_len;
632
633 /*
634 * Wait for the former clear OWN bit operation
635 * of IDMAC to make sure that this descriptor
636 * isn't still owned by IDMAC as IDMAC's write
637 * ops and CPU's read ops are asynchronous.
638 */
639 if (readl_poll_timeout_atomic(&desc->des0, val,
640 !(val & IDMAC_DES0_OWN),
641 10, 100 * USEC_PER_MSEC))
642 goto err_own_bit;
643
644 /*
645 * Set the OWN bit and disable interrupts
646 * for this descriptor
647 */
648 desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC |
649 IDMAC_DES0_CH;
650
651 /* Buffer length */
652 IDMAC_64ADDR_SET_BUFFER1_SIZE(desc, desc_len);
653
654 /* Physical address to DMA to/from */
655 desc->des4 = mem_addr & 0xffffffff;
656 desc->des5 = mem_addr >> 32;
657
658 /* Update physical address for the next desc */
659 mem_addr += desc_len;
660
661 /* Save pointer to the last descriptor */
662 desc_last = desc;
663 }
664 }
665
666 /* Set first descriptor */
667 desc_first->des0 |= IDMAC_DES0_FD;
668
669 /* Set last descriptor */
670 desc_last->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC);
671 desc_last->des0 |= IDMAC_DES0_LD;
672
673 return 0;
674 err_own_bit:
675 /* restore the descriptor chain as it's polluted */
676 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
677 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
678 dw_mci_idmac_init(host);
679 return -EINVAL;
680 }
681
682
dw_mci_prepare_desc32(struct dw_mci * host,struct mmc_data * data,unsigned int sg_len)683 static inline int dw_mci_prepare_desc32(struct dw_mci *host,
684 struct mmc_data *data,
685 unsigned int sg_len)
686 {
687 unsigned int desc_len;
688 struct idmac_desc *desc_first, *desc_last, *desc;
689 u32 val;
690 int i;
691
692 desc_first = desc_last = desc = host->sg_cpu;
693
694 for (i = 0; i < sg_len; i++) {
695 unsigned int length = sg_dma_len(&data->sg[i]);
696
697 u32 mem_addr = sg_dma_address(&data->sg[i]);
698
699 for ( ; length ; desc++) {
700 desc_len = (length <= DW_MCI_DESC_DATA_LENGTH) ?
701 length : DW_MCI_DESC_DATA_LENGTH;
702
703 length -= desc_len;
704
705 /*
706 * Wait for the former clear OWN bit operation
707 * of IDMAC to make sure that this descriptor
708 * isn't still owned by IDMAC as IDMAC's write
709 * ops and CPU's read ops are asynchronous.
710 */
711 if (readl_poll_timeout_atomic(&desc->des0, val,
712 IDMAC_OWN_CLR64(val),
713 10,
714 100 * USEC_PER_MSEC))
715 goto err_own_bit;
716
717 /*
718 * Set the OWN bit and disable interrupts
719 * for this descriptor
720 */
721 desc->des0 = cpu_to_le32(IDMAC_DES0_OWN |
722 IDMAC_DES0_DIC |
723 IDMAC_DES0_CH);
724
725 /* Buffer length */
726 IDMAC_SET_BUFFER1_SIZE(desc, desc_len);
727
728 /* Physical address to DMA to/from */
729 desc->des2 = cpu_to_le32(mem_addr);
730
731 /* Update physical address for the next desc */
732 mem_addr += desc_len;
733
734 /* Save pointer to the last descriptor */
735 desc_last = desc;
736 }
737 }
738
739 if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE)) {
740 desc->des0 = desc_last->des0;
741 desc->des2 = desc_last->des2;
742 desc->des1 = 0x8; /* Random dirty data for last one desc */
743 desc_last = desc;
744 }
745
746 /* Set first descriptor */
747 desc_first->des0 |= cpu_to_le32(IDMAC_DES0_FD);
748
749 /* Set last descriptor */
750 desc_last->des0 &= cpu_to_le32(~(IDMAC_DES0_CH |
751 IDMAC_DES0_DIC));
752 desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD);
753
754 return 0;
755 err_own_bit:
756 /* restore the descriptor chain as it's polluted */
757 dev_dbg(host->dev, "descriptor is still owned by IDMAC.\n");
758 memset(host->sg_cpu, 0, DESC_RING_BUF_SZ);
759 dw_mci_idmac_init(host);
760 return -EINVAL;
761 }
762
dw_mci_idmac_start_dma(struct dw_mci * host,unsigned int sg_len)763 static int dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len)
764 {
765 u32 temp;
766 int ret;
767
768 if (host->dma_64bit_address == 1)
769 ret = dw_mci_prepare_desc64(host, host->data, sg_len);
770 else
771 ret = dw_mci_prepare_desc32(host, host->data, sg_len);
772
773 if (ret)
774 goto out;
775
776 /* drain writebuffer */
777 wmb();
778
779 /* Make sure to reset DMA in case we did PIO before this */
780 dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET);
781 dw_mci_idmac_reset(host);
782
783 /* Select IDMAC interface */
784 temp = mci_readl(host, CTRL);
785 temp |= SDMMC_CTRL_USE_IDMAC;
786 mci_writel(host, CTRL, temp);
787
788 /* drain writebuffer */
789 wmb();
790
791 /* Enable the IDMAC */
792 temp = mci_readl(host, BMOD);
793 temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB;
794 mci_writel(host, BMOD, temp);
795
796 /* Start it running */
797 mci_writel(host, PLDMND, 1);
798
799 out:
800 return ret;
801 }
802
803 static const struct dw_mci_dma_ops dw_mci_idmac_ops = {
804 .init = dw_mci_idmac_init,
805 .start = dw_mci_idmac_start_dma,
806 .stop = dw_mci_idmac_stop_dma,
807 .complete = dw_mci_dmac_complete_dma,
808 .cleanup = dw_mci_dma_cleanup,
809 };
810
dw_mci_edmac_stop_dma(struct dw_mci * host)811 static void dw_mci_edmac_stop_dma(struct dw_mci *host)
812 {
813 dmaengine_terminate_async(host->dms->ch);
814 }
815
dw_mci_edmac_start_dma(struct dw_mci * host,unsigned int sg_len)816 static int dw_mci_edmac_start_dma(struct dw_mci *host,
817 unsigned int sg_len)
818 {
819 struct dma_slave_config cfg;
820 struct dma_async_tx_descriptor *desc = NULL;
821 struct scatterlist *sgl = host->data->sg;
822 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
823 u32 sg_elems = host->data->sg_len;
824 u32 fifoth_val;
825 u32 fifo_offset = host->fifo_reg - host->regs;
826 int ret = 0;
827
828 /* Set external dma config: burst size, burst width */
829 memset(&cfg, 0, sizeof(cfg));
830 cfg.dst_addr = host->phy_regs + fifo_offset;
831 cfg.src_addr = cfg.dst_addr;
832 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
833 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
834
835 /* Match burst msize with external dma config */
836 fifoth_val = mci_readl(host, FIFOTH);
837 cfg.dst_maxburst = mszs[(fifoth_val >> 28) & 0x7];
838 cfg.src_maxburst = cfg.dst_maxburst;
839
840 if (host->data->flags & MMC_DATA_WRITE)
841 cfg.direction = DMA_MEM_TO_DEV;
842 else
843 cfg.direction = DMA_DEV_TO_MEM;
844
845 ret = dmaengine_slave_config(host->dms->ch, &cfg);
846 if (ret) {
847 dev_err(host->dev, "Failed to config edmac.\n");
848 return -EBUSY;
849 }
850
851 desc = dmaengine_prep_slave_sg(host->dms->ch, sgl,
852 sg_len, cfg.direction,
853 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
854 if (!desc) {
855 dev_err(host->dev, "Can't prepare slave sg.\n");
856 return -EBUSY;
857 }
858
859 /* Set dw_mci_dmac_complete_dma as callback */
860 desc->callback = dw_mci_dmac_complete_dma;
861 desc->callback_param = (void *)host;
862 dmaengine_submit(desc);
863
864 /* Flush cache before write */
865 if (host->data->flags & MMC_DATA_WRITE)
866 dma_sync_sg_for_device(mmc_dev(host->slot->mmc), sgl,
867 sg_elems, DMA_TO_DEVICE);
868
869 dma_async_issue_pending(host->dms->ch);
870
871 return 0;
872 }
873
dw_mci_edmac_init(struct dw_mci * host)874 static int dw_mci_edmac_init(struct dw_mci *host)
875 {
876 /* Request external dma channel */
877 host->dms = kzalloc(sizeof(struct dw_mci_dma_slave), GFP_KERNEL);
878 if (!host->dms)
879 return -ENOMEM;
880
881 host->dms->ch = dma_request_chan(host->dev, "rx-tx");
882 if (IS_ERR(host->dms->ch)) {
883 int ret = PTR_ERR(host->dms->ch);
884
885 dev_err(host->dev, "Failed to get external DMA channel.\n");
886 kfree(host->dms);
887 host->dms = NULL;
888 return ret;
889 }
890
891 return 0;
892 }
893
dw_mci_edmac_exit(struct dw_mci * host)894 static void dw_mci_edmac_exit(struct dw_mci *host)
895 {
896 if (host->dms) {
897 if (host->dms->ch) {
898 dma_release_channel(host->dms->ch);
899 host->dms->ch = NULL;
900 }
901 kfree(host->dms);
902 host->dms = NULL;
903 }
904 }
905
906 static const struct dw_mci_dma_ops dw_mci_edmac_ops = {
907 .init = dw_mci_edmac_init,
908 .exit = dw_mci_edmac_exit,
909 .start = dw_mci_edmac_start_dma,
910 .stop = dw_mci_edmac_stop_dma,
911 .complete = dw_mci_dmac_complete_dma,
912 .cleanup = dw_mci_dma_cleanup,
913 };
914
dw_mci_pre_dma_transfer(struct dw_mci * host,struct mmc_data * data,int cookie)915 static int dw_mci_pre_dma_transfer(struct dw_mci *host,
916 struct mmc_data *data,
917 int cookie)
918 {
919 struct scatterlist *sg;
920 unsigned int i, sg_len;
921
922 if (data->host_cookie == COOKIE_PRE_MAPPED)
923 return data->sg_len;
924
925 /*
926 * We don't do DMA on "complex" transfers, i.e. with
927 * non-word-aligned buffers or lengths. Also, we don't bother
928 * with all the DMA setup overhead for short transfers.
929 */
930 if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD && !host->is_rv1106_sd)
931 return -EINVAL;
932
933 if (data->blksz & 3)
934 return -EINVAL;
935
936 for_each_sg(data->sg, sg, data->sg_len, i) {
937 if (sg->offset & 3 || sg->length & 3)
938 return -EINVAL;
939 }
940
941 sg_len = dma_map_sg(host->dev,
942 data->sg,
943 data->sg_len,
944 mmc_get_dma_dir(data));
945 if (sg_len == 0)
946 return -EINVAL;
947
948 data->host_cookie = cookie;
949
950 return sg_len;
951 }
952
dw_mci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)953 static void dw_mci_pre_req(struct mmc_host *mmc,
954 struct mmc_request *mrq)
955 {
956 struct dw_mci_slot *slot = mmc_priv(mmc);
957 struct mmc_data *data = mrq->data;
958
959 if (!slot->host->use_dma || !data)
960 return;
961
962 /* This data might be unmapped at this time */
963 data->host_cookie = COOKIE_UNMAPPED;
964
965 if (dw_mci_pre_dma_transfer(slot->host, mrq->data,
966 COOKIE_PRE_MAPPED) < 0)
967 data->host_cookie = COOKIE_UNMAPPED;
968 }
969
dw_mci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)970 static void dw_mci_post_req(struct mmc_host *mmc,
971 struct mmc_request *mrq,
972 int err)
973 {
974 struct dw_mci_slot *slot = mmc_priv(mmc);
975 struct mmc_data *data = mrq->data;
976
977 if (!slot->host->use_dma || !data)
978 return;
979
980 if (data->host_cookie != COOKIE_UNMAPPED)
981 dma_unmap_sg(slot->host->dev,
982 data->sg,
983 data->sg_len,
984 mmc_get_dma_dir(data));
985 data->host_cookie = COOKIE_UNMAPPED;
986 }
987
dw_mci_get_cd(struct mmc_host * mmc)988 static int dw_mci_get_cd(struct mmc_host *mmc)
989 {
990 int present;
991 struct dw_mci_slot *slot = mmc_priv(mmc);
992 struct dw_mci *host = slot->host;
993 int gpio_cd = mmc_gpio_get_cd(mmc);
994
995 /* Use platform get_cd function, else try onboard card detect */
996 if (((mmc->caps & MMC_CAP_NEEDS_POLL)
997 || !mmc_card_is_removable(mmc))) {
998 present = 1;
999
1000 if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
1001 if (mmc->caps & MMC_CAP_NEEDS_POLL) {
1002 dev_info(&mmc->class_dev,
1003 "card is polling.\n");
1004 } else {
1005 dev_info(&mmc->class_dev,
1006 "card is non-removable.\n");
1007 }
1008 set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
1009 }
1010
1011 return present;
1012 } else if (gpio_cd >= 0)
1013 present = gpio_cd;
1014 else
1015 present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
1016 == 0 ? 1 : 0;
1017
1018 spin_lock_bh(&host->lock);
1019 if (present && !test_and_set_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1020 dev_dbg(&mmc->class_dev, "card is present\n");
1021 else if (!present &&
1022 !test_and_clear_bit(DW_MMC_CARD_PRESENT, &slot->flags))
1023 dev_dbg(&mmc->class_dev, "card is not present\n");
1024 spin_unlock_bh(&host->lock);
1025
1026 return present;
1027 }
1028
dw_mci_adjust_fifoth(struct dw_mci * host,struct mmc_data * data)1029 static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data)
1030 {
1031 unsigned int blksz = data->blksz;
1032 static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256};
1033 u32 fifo_width = 1 << host->data_shift;
1034 u32 blksz_depth = blksz / fifo_width, fifoth_val;
1035 u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers;
1036 int idx = ARRAY_SIZE(mszs) - 1;
1037
1038 /* pio should ship this scenario */
1039 if (!host->use_dma)
1040 return;
1041
1042 tx_wmark = (host->fifo_depth) / 2;
1043 tx_wmark_invers = host->fifo_depth - tx_wmark;
1044
1045 /*
1046 * MSIZE is '1',
1047 * if blksz is not a multiple of the FIFO width
1048 */
1049 if (blksz % fifo_width)
1050 goto done;
1051
1052 do {
1053 if (!((blksz_depth % mszs[idx]) ||
1054 (tx_wmark_invers % mszs[idx]))) {
1055 msize = idx;
1056 rx_wmark = mszs[idx] - 1;
1057 break;
1058 }
1059 } while (--idx > 0);
1060 /*
1061 * If idx is '0', it won't be tried
1062 * Thus, initial values are uesed
1063 */
1064 done:
1065 fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark);
1066 mci_writel(host, FIFOTH, fifoth_val);
1067 }
1068
dw_mci_ctrl_thld(struct dw_mci * host,struct mmc_data * data)1069 static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1070 {
1071 unsigned int blksz = data->blksz;
1072 u32 blksz_depth, fifo_depth;
1073 u16 thld_size;
1074 u8 enable;
1075
1076 /*
1077 * CDTHRCTL doesn't exist prior to 240A (in fact that register offset is
1078 * in the FIFO region, so we really shouldn't access it).
1079 */
1080 if (host->verid < DW_MMC_240A ||
1081 (host->verid < DW_MMC_280A && data->flags & MMC_DATA_WRITE))
1082 return;
1083
1084 /*
1085 * Card write Threshold is introduced since 2.80a
1086 * It's used when HS400 mode is enabled.
1087 */
1088 if (data->flags & MMC_DATA_WRITE &&
1089 host->timing != MMC_TIMING_MMC_HS400)
1090 goto disable;
1091
1092 if (data->flags & MMC_DATA_WRITE)
1093 enable = SDMMC_CARD_WR_THR_EN;
1094 else
1095 enable = SDMMC_CARD_RD_THR_EN;
1096
1097 if (host->timing != MMC_TIMING_MMC_HS200 &&
1098 host->timing != MMC_TIMING_UHS_SDR104 &&
1099 host->timing != MMC_TIMING_MMC_HS400)
1100 goto disable;
1101
1102 blksz_depth = blksz / (1 << host->data_shift);
1103 fifo_depth = host->fifo_depth;
1104
1105 if (blksz_depth > fifo_depth)
1106 goto disable;
1107
1108 /*
1109 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz'
1110 * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz
1111 * Currently just choose blksz.
1112 */
1113 thld_size = blksz;
1114 mci_writel(host, CDTHRCTL, SDMMC_SET_THLD(thld_size, enable));
1115 return;
1116
1117 disable:
1118 mci_writel(host, CDTHRCTL, 0);
1119 }
1120
dw_mci_submit_data_dma(struct dw_mci * host,struct mmc_data * data)1121 static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
1122 {
1123 unsigned long irqflags;
1124 int sg_len;
1125 u32 temp;
1126
1127 host->using_dma = 0;
1128
1129 /* If we don't have a channel, we can't do DMA */
1130 if (!host->use_dma)
1131 return -ENODEV;
1132
1133 sg_len = dw_mci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1134 if (sg_len < 0) {
1135 host->dma_ops->stop(host);
1136 return sg_len;
1137 }
1138
1139 host->using_dma = 1;
1140
1141 if (host->use_dma == TRANS_MODE_IDMAC)
1142 dev_vdbg(host->dev,
1143 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
1144 (unsigned long)host->sg_cpu,
1145 (unsigned long)host->sg_dma,
1146 sg_len);
1147
1148 /*
1149 * Decide the MSIZE and RX/TX Watermark.
1150 * If current block size is same with previous size,
1151 * no need to update fifoth.
1152 */
1153 if (host->prev_blksz != data->blksz)
1154 dw_mci_adjust_fifoth(host, data);
1155
1156 /* Enable the DMA interface */
1157 temp = mci_readl(host, CTRL);
1158 temp |= SDMMC_CTRL_DMA_ENABLE;
1159 mci_writel(host, CTRL, temp);
1160
1161 /* Disable RX/TX IRQs, let DMA handle it */
1162 spin_lock_irqsave(&host->irq_lock, irqflags);
1163 temp = mci_readl(host, INTMASK);
1164 temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
1165 mci_writel(host, INTMASK, temp);
1166 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1167
1168 if (host->dma_ops->start(host, sg_len)) {
1169 host->dma_ops->stop(host);
1170 /* We can't do DMA, try PIO for this one */
1171 dev_dbg(host->dev,
1172 "%s: fall back to PIO mode for current transfer\n",
1173 __func__);
1174 return -ENODEV;
1175 }
1176
1177 return 0;
1178 }
1179
dw_mci_submit_data(struct dw_mci * host,struct mmc_data * data)1180 static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
1181 {
1182 unsigned long irqflags;
1183 int flags = SG_MITER_ATOMIC;
1184 u32 temp;
1185
1186 data->error = -EINPROGRESS;
1187
1188 WARN_ON(host->data);
1189 host->sg = NULL;
1190 host->data = data;
1191
1192 if (data->flags & MMC_DATA_READ)
1193 host->dir_status = DW_MCI_RECV_STATUS;
1194 else
1195 host->dir_status = DW_MCI_SEND_STATUS;
1196
1197 dw_mci_ctrl_thld(host, data);
1198
1199 if (dw_mci_submit_data_dma(host, data)) {
1200 if (host->data->flags & MMC_DATA_READ)
1201 flags |= SG_MITER_TO_SG;
1202 else
1203 flags |= SG_MITER_FROM_SG;
1204
1205 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1206 host->sg = data->sg;
1207 host->part_buf_start = 0;
1208 host->part_buf_count = 0;
1209
1210 mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
1211
1212 spin_lock_irqsave(&host->irq_lock, irqflags);
1213 temp = mci_readl(host, INTMASK);
1214 temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
1215 mci_writel(host, INTMASK, temp);
1216 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1217
1218 temp = mci_readl(host, CTRL);
1219 temp &= ~SDMMC_CTRL_DMA_ENABLE;
1220 mci_writel(host, CTRL, temp);
1221
1222 /*
1223 * Use the initial fifoth_val for PIO mode. If wm_algined
1224 * is set, we set watermark same as data size.
1225 * If next issued data may be transfered by DMA mode,
1226 * prev_blksz should be invalidated.
1227 */
1228 if (host->wm_aligned)
1229 dw_mci_adjust_fifoth(host, data);
1230 else
1231 mci_writel(host, FIFOTH, host->fifoth_val);
1232 host->prev_blksz = 0;
1233 } else {
1234 /*
1235 * Keep the current block size.
1236 * It will be used to decide whether to update
1237 * fifoth register next time.
1238 */
1239 host->prev_blksz = data->blksz;
1240 }
1241 }
1242
dw_mci_setup_bus(struct dw_mci_slot * slot,bool force_clkinit)1243 static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
1244 {
1245 struct dw_mci *host = slot->host;
1246 unsigned int clock = slot->clock;
1247 u32 div;
1248 u32 clk_en_a;
1249 u32 sdmmc_cmd_bits = SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT;
1250
1251 /* We must continue to set bit 28 in CMD until the change is complete */
1252 if (host->state == STATE_WAITING_CMD11_DONE)
1253 sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH;
1254
1255 slot->mmc->actual_clock = 0;
1256
1257 if (!clock) {
1258 mci_writel(host, CLKENA, 0);
1259 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1260 } else if (clock != host->current_speed || force_clkinit) {
1261 div = host->bus_hz / clock;
1262 if (host->bus_hz % clock && host->bus_hz > clock)
1263 /*
1264 * move the + 1 after the divide to prevent
1265 * over-clocking the card.
1266 */
1267 div += 1;
1268
1269 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
1270
1271 if ((clock != slot->__clk_old &&
1272 !test_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags)) ||
1273 force_clkinit) {
1274 /* Silent the verbose log if calling from PM context */
1275 if (!force_clkinit)
1276 dev_info(&slot->mmc->class_dev,
1277 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
1278 slot->id, host->bus_hz, clock,
1279 div ? ((host->bus_hz / div) >> 1) :
1280 host->bus_hz, div);
1281
1282 /*
1283 * If card is polling, display the message only
1284 * one time at boot time.
1285 */
1286 if (slot->mmc->caps & MMC_CAP_NEEDS_POLL &&
1287 slot->mmc->f_min == clock)
1288 set_bit(DW_MMC_CARD_NEEDS_POLL, &slot->flags);
1289 }
1290
1291 /* disable clock */
1292 mci_writel(host, CLKENA, 0);
1293 mci_writel(host, CLKSRC, 0);
1294
1295 /* inform CIU */
1296 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1297
1298 /* set clock to desired speed */
1299 mci_writel(host, CLKDIV, div);
1300
1301 /* inform CIU */
1302 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1303
1304 /* enable clock; only low power if no SDIO */
1305 clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
1306 if (!test_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags))
1307 clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
1308 mci_writel(host, CLKENA, clk_en_a);
1309
1310 /* inform CIU */
1311 mci_send_cmd(slot, sdmmc_cmd_bits, 0);
1312
1313 /* keep the last clock value that was requested from core */
1314 slot->__clk_old = clock;
1315 slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) :
1316 host->bus_hz;
1317 }
1318
1319 host->current_speed = clock;
1320
1321 /* Set the current slot bus width */
1322 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1323 }
1324
__dw_mci_start_request(struct dw_mci * host,struct dw_mci_slot * slot,struct mmc_command * cmd)1325 static void __dw_mci_start_request(struct dw_mci *host,
1326 struct dw_mci_slot *slot,
1327 struct mmc_command *cmd)
1328 {
1329 struct mmc_request *mrq;
1330 struct mmc_data *data;
1331 u32 cmdflags;
1332
1333 mrq = slot->mrq;
1334
1335 host->mrq = mrq;
1336
1337 host->pending_events = 0;
1338 host->completed_events = 0;
1339 host->cmd_status = 0;
1340 host->data_status = 0;
1341 host->dir_status = 0;
1342
1343 if (host->is_rv1106_sd)
1344 mci_writel(host, CTYPE, (slot->ctype << slot->id));
1345
1346 data = cmd->data;
1347 if (data) {
1348 mci_writel(host, TMOUT, 0xFFFFFFFF);
1349 if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE))
1350 mci_writel(host, BYTCNT, 0);
1351 else
1352 mci_writel(host, BYTCNT, data->blksz*data->blocks);
1353 mci_writel(host, BLKSIZ, data->blksz);
1354 }
1355
1356 cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
1357
1358 /* this is the first command, send the initialization clock */
1359 if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags))
1360 cmdflags |= SDMMC_CMD_INIT;
1361
1362 if (data) {
1363 dw_mci_submit_data(host, data);
1364 wmb(); /* drain writebuffer */
1365 }
1366
1367 dw_mci_start_command(host, cmd, cmdflags);
1368
1369 if (cmd->opcode == SD_SWITCH_VOLTAGE) {
1370 unsigned long irqflags;
1371
1372 /*
1373 * Databook says to fail after 2ms w/ no response, but evidence
1374 * shows that sometimes the cmd11 interrupt takes over 130ms.
1375 * We'll set to 500ms, plus an extra jiffy just in case jiffies
1376 * is just about to roll over.
1377 *
1378 * We do this whole thing under spinlock and only if the
1379 * command hasn't already completed (indicating the the irq
1380 * already ran so we don't want the timeout).
1381 */
1382 spin_lock_irqsave(&host->irq_lock, irqflags);
1383 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
1384 mod_timer(&host->cmd11_timer,
1385 jiffies + msecs_to_jiffies(500) + 1);
1386 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1387 }
1388
1389 host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd);
1390 }
1391
dw_mci_start_request(struct dw_mci * host,struct dw_mci_slot * slot)1392 static void dw_mci_start_request(struct dw_mci *host,
1393 struct dw_mci_slot *slot)
1394 {
1395 struct mmc_request *mrq = slot->mrq;
1396 struct mmc_command *cmd;
1397
1398 cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
1399 __dw_mci_start_request(host, slot, cmd);
1400 }
1401
1402 /* must be called with host->lock held */
dw_mci_queue_request(struct dw_mci * host,struct dw_mci_slot * slot,struct mmc_request * mrq)1403 static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
1404 struct mmc_request *mrq)
1405 {
1406 dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n",
1407 host->state);
1408
1409 slot->mrq = mrq;
1410
1411 if (host->state == STATE_WAITING_CMD11_DONE) {
1412 dev_warn(&slot->mmc->class_dev,
1413 "Voltage change didn't complete\n");
1414 /*
1415 * this case isn't expected to happen, so we can
1416 * either crash here or just try to continue on
1417 * in the closest possible state
1418 */
1419 host->state = STATE_IDLE;
1420 }
1421
1422 if (host->state == STATE_IDLE) {
1423 host->state = STATE_SENDING_CMD;
1424 dw_mci_start_request(host, slot);
1425 } else {
1426 list_add_tail(&slot->queue_node, &host->queue);
1427 }
1428 }
1429
1430 static bool dw_mci_reset(struct dw_mci *host);
dw_mci_request(struct mmc_host * mmc,struct mmc_request * mrq)1431 static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1432 {
1433 struct dw_mci_slot *slot = mmc_priv(mmc);
1434 struct dw_mci *host = slot->host;
1435
1436 WARN_ON(slot->mrq);
1437
1438 /*
1439 * The check for card presence and queueing of the request must be
1440 * atomic, otherwise the card could be removed in between and the
1441 * request wouldn't fail until another card was inserted.
1442 */
1443
1444 if (!dw_mci_get_cd(mmc)) {
1445 mrq->cmd->error = -ENOMEDIUM;
1446 mmc_request_done(mmc, mrq);
1447 return;
1448 }
1449
1450 if (host->is_rv1106_sd) {
1451 u32 reg;
1452
1453 readl_poll_timeout(host->regs + SDMMC_STATUS, reg,
1454 reg & BIT(2), USEC_PER_MSEC, 500 * USEC_PER_MSEC);
1455 }
1456
1457 spin_lock_bh(&host->lock);
1458
1459 if (host->is_rv1106_sd)
1460 dw_mci_reset(host);
1461
1462 dw_mci_queue_request(host, slot, mrq);
1463
1464 spin_unlock_bh(&host->lock);
1465 }
1466
dw_mci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)1467 static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1468 {
1469 struct dw_mci_slot *slot = mmc_priv(mmc);
1470 const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
1471 u32 regs;
1472 int ret;
1473
1474 switch (ios->bus_width) {
1475 case MMC_BUS_WIDTH_4:
1476 slot->ctype = SDMMC_CTYPE_4BIT;
1477 break;
1478 case MMC_BUS_WIDTH_8:
1479 slot->ctype = SDMMC_CTYPE_8BIT;
1480 break;
1481 default:
1482 /* set default 1 bit mode */
1483 slot->ctype = SDMMC_CTYPE_1BIT;
1484 }
1485
1486 regs = mci_readl(slot->host, UHS_REG);
1487
1488 /* DDR mode set */
1489 if (ios->timing == MMC_TIMING_MMC_DDR52 ||
1490 ios->timing == MMC_TIMING_UHS_DDR50 ||
1491 ios->timing == MMC_TIMING_MMC_HS400)
1492 regs |= ((0x1 << slot->id) << 16);
1493 else
1494 regs &= ~((0x1 << slot->id) << 16);
1495
1496 mci_writel(slot->host, UHS_REG, regs);
1497 slot->host->timing = ios->timing;
1498
1499 /*
1500 * Use mirror of ios->clock to prevent race with mmc
1501 * core ios update when finding the minimum.
1502 */
1503 slot->clock = ios->clock;
1504
1505 if (drv_data && drv_data->set_ios)
1506 drv_data->set_ios(slot->host, ios);
1507
1508 switch (ios->power_mode) {
1509 case MMC_POWER_UP:
1510 if (!IS_ERR_OR_NULL(slot->host->pinctrl))
1511 pinctrl_select_state(slot->host->pinctrl, slot->host->idle_state);
1512
1513 if (!IS_ERR(mmc->supply.vmmc)) {
1514 ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
1515 ios->vdd);
1516 if (ret) {
1517 dev_err(slot->host->dev,
1518 "failed to enable vmmc regulator\n");
1519 /*return, if failed turn on vmmc*/
1520 return;
1521 }
1522 }
1523 set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags);
1524 regs = mci_readl(slot->host, PWREN);
1525 regs |= (1 << slot->id);
1526 mci_writel(slot->host, PWREN, regs);
1527 break;
1528 case MMC_POWER_ON:
1529 if (!IS_ERR_OR_NULL(slot->host->pinctrl))
1530 pinctrl_select_state(slot->host->pinctrl, slot->host->normal_state);
1531
1532 if (!slot->host->vqmmc_enabled) {
1533 if (!IS_ERR(mmc->supply.vqmmc)) {
1534 ret = regulator_enable(mmc->supply.vqmmc);
1535 if (ret < 0)
1536 dev_err(slot->host->dev,
1537 "failed to enable vqmmc\n");
1538 else
1539 slot->host->vqmmc_enabled = true;
1540
1541 } else {
1542 /* Keep track so we don't reset again */
1543 slot->host->vqmmc_enabled = true;
1544 }
1545
1546 /* Reset our state machine after powering on */
1547 dw_mci_ctrl_reset(slot->host,
1548 SDMMC_CTRL_ALL_RESET_FLAGS);
1549 }
1550
1551 /* Adjust clock / bus width after power is up */
1552 dw_mci_setup_bus(slot, false);
1553
1554 break;
1555 case MMC_POWER_OFF:
1556 if (!IS_ERR_OR_NULL(slot->host->pinctrl))
1557 pinctrl_select_state(slot->host->pinctrl, slot->host->idle_state);
1558
1559 /* Turn clock off before power goes down */
1560 dw_mci_setup_bus(slot, false);
1561
1562 if (!IS_ERR(mmc->supply.vmmc))
1563 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
1564
1565 if (!IS_ERR(mmc->supply.vqmmc) && slot->host->vqmmc_enabled)
1566 regulator_disable(mmc->supply.vqmmc);
1567 slot->host->vqmmc_enabled = false;
1568
1569 regs = mci_readl(slot->host, PWREN);
1570 regs &= ~(1 << slot->id);
1571 mci_writel(slot->host, PWREN, regs);
1572 break;
1573 default:
1574 break;
1575 }
1576
1577 if (slot->host->state == STATE_WAITING_CMD11_DONE && ios->clock != 0)
1578 slot->host->state = STATE_IDLE;
1579 }
1580
dw_mci_card_busy(struct mmc_host * mmc)1581 static int dw_mci_card_busy(struct mmc_host *mmc)
1582 {
1583 struct dw_mci_slot *slot = mmc_priv(mmc);
1584 u32 status;
1585
1586 /*
1587 * Check the busy bit which is low when DAT[3:0]
1588 * (the data lines) are 0000
1589 */
1590 status = mci_readl(slot->host, STATUS);
1591
1592 return !!(status & SDMMC_STATUS_BUSY);
1593 }
1594
dw_mci_switch_voltage(struct mmc_host * mmc,struct mmc_ios * ios)1595 static int dw_mci_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios)
1596 {
1597 struct dw_mci_slot *slot = mmc_priv(mmc);
1598 struct dw_mci *host = slot->host;
1599 const struct dw_mci_drv_data *drv_data = host->drv_data;
1600 u32 uhs;
1601 u32 v18 = SDMMC_UHS_18V << slot->id;
1602 int ret;
1603
1604 if (drv_data && drv_data->switch_voltage)
1605 return drv_data->switch_voltage(mmc, ios);
1606
1607 /*
1608 * Program the voltage. Note that some instances of dw_mmc may use
1609 * the UHS_REG for this. For other instances (like exynos) the UHS_REG
1610 * does no harm but you need to set the regulator directly. Try both.
1611 */
1612 uhs = mci_readl(host, UHS_REG);
1613 if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330)
1614 uhs &= ~v18;
1615 else
1616 uhs |= v18;
1617
1618 if (!IS_ERR(mmc->supply.vqmmc)) {
1619 ret = mmc_regulator_set_vqmmc(mmc, ios);
1620 if (ret < 0) {
1621 dev_dbg(&mmc->class_dev,
1622 "Regulator set error %d - %s V\n",
1623 ret, uhs & v18 ? "1.8" : "3.3");
1624 return ret;
1625 }
1626 }
1627 mci_writel(host, UHS_REG, uhs);
1628
1629 return 0;
1630 }
1631
dw_mci_get_ro(struct mmc_host * mmc)1632 static int dw_mci_get_ro(struct mmc_host *mmc)
1633 {
1634 int read_only;
1635 struct dw_mci_slot *slot = mmc_priv(mmc);
1636 int gpio_ro = mmc_gpio_get_ro(mmc);
1637
1638 /* Use platform get_ro function, else try on board write protect */
1639 if (gpio_ro >= 0)
1640 read_only = gpio_ro;
1641 else
1642 read_only =
1643 mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0;
1644
1645 dev_dbg(&mmc->class_dev, "card is %s\n",
1646 read_only ? "read-only" : "read-write");
1647
1648 return read_only;
1649 }
1650
dw_mci_hw_reset(struct mmc_host * mmc)1651 static void dw_mci_hw_reset(struct mmc_host *mmc)
1652 {
1653 struct dw_mci_slot *slot = mmc_priv(mmc);
1654 struct dw_mci *host = slot->host;
1655 int reset;
1656
1657 if (host->use_dma == TRANS_MODE_IDMAC)
1658 dw_mci_idmac_reset(host);
1659
1660 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
1661 SDMMC_CTRL_FIFO_RESET))
1662 return;
1663
1664 /*
1665 * According to eMMC spec, card reset procedure:
1666 * tRstW >= 1us: RST_n pulse width
1667 * tRSCA >= 200us: RST_n to Command time
1668 * tRSTH >= 1us: RST_n high period
1669 */
1670 reset = mci_readl(host, RST_N);
1671 reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
1672 mci_writel(host, RST_N, reset);
1673 usleep_range(1, 2);
1674 reset |= SDMMC_RST_HWACTIVE << slot->id;
1675 mci_writel(host, RST_N, reset);
1676 usleep_range(200, 300);
1677 }
1678
dw_mci_init_card(struct mmc_host * mmc,struct mmc_card * card)1679 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
1680 {
1681 struct dw_mci_slot *slot = mmc_priv(mmc);
1682 struct dw_mci *host = slot->host;
1683
1684 /*
1685 * Low power mode will stop the card clock when idle. According to the
1686 * description of the CLKENA register we should disable low power mode
1687 * for SDIO cards if we need SDIO interrupts to work.
1688 */
1689 if (mmc->caps & MMC_CAP_SDIO_IRQ) {
1690 const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
1691 u32 clk_en_a_old;
1692 u32 clk_en_a;
1693
1694 clk_en_a_old = mci_readl(host, CLKENA);
1695
1696 if (card->type == MMC_TYPE_SDIO ||
1697 card->type == MMC_TYPE_SD_COMBO) {
1698 set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1699 clk_en_a = clk_en_a_old & ~clken_low_pwr;
1700 } else {
1701 clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
1702 clk_en_a = clk_en_a_old | clken_low_pwr;
1703 }
1704
1705 if (clk_en_a != clk_en_a_old) {
1706 mci_writel(host, CLKENA, clk_en_a);
1707 mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
1708 SDMMC_CMD_PRV_DAT_WAIT, 0);
1709 }
1710 }
1711 }
1712
__dw_mci_enable_sdio_irq(struct dw_mci_slot * slot,int enb)1713 static void __dw_mci_enable_sdio_irq(struct dw_mci_slot *slot, int enb)
1714 {
1715 struct dw_mci *host = slot->host;
1716 unsigned long irqflags;
1717 u32 int_mask;
1718
1719 spin_lock_irqsave(&host->irq_lock, irqflags);
1720
1721 /* Enable/disable Slot Specific SDIO interrupt */
1722 int_mask = mci_readl(host, INTMASK);
1723 if (enb)
1724 int_mask |= SDMMC_INT_SDIO(slot->sdio_id);
1725 else
1726 int_mask &= ~SDMMC_INT_SDIO(slot->sdio_id);
1727 mci_writel(host, INTMASK, int_mask);
1728
1729 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1730 }
1731
dw_mci_enable_sdio_irq(struct mmc_host * mmc,int enb)1732 static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
1733 {
1734 struct dw_mci_slot *slot = mmc_priv(mmc);
1735 struct dw_mci *host = slot->host;
1736
1737 __dw_mci_enable_sdio_irq(slot, enb);
1738
1739 /* Avoid runtime suspending the device when SDIO IRQ is enabled */
1740 if (enb)
1741 pm_runtime_get_noresume(host->dev);
1742 else
1743 pm_runtime_put_noidle(host->dev);
1744 }
1745
dw_mci_ack_sdio_irq(struct mmc_host * mmc)1746 static void dw_mci_ack_sdio_irq(struct mmc_host *mmc)
1747 {
1748 struct dw_mci_slot *slot = mmc_priv(mmc);
1749
1750 __dw_mci_enable_sdio_irq(slot, 1);
1751 }
1752
dw_mci_execute_tuning(struct mmc_host * mmc,u32 opcode)1753 static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
1754 {
1755 struct dw_mci_slot *slot = mmc_priv(mmc);
1756 struct dw_mci *host = slot->host;
1757 const struct dw_mci_drv_data *drv_data = host->drv_data;
1758 int err = -EINVAL;
1759
1760 if (drv_data && drv_data->execute_tuning)
1761 err = drv_data->execute_tuning(slot, opcode);
1762 return err;
1763 }
1764
dw_mci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)1765 static int dw_mci_prepare_hs400_tuning(struct mmc_host *mmc,
1766 struct mmc_ios *ios)
1767 {
1768 struct dw_mci_slot *slot = mmc_priv(mmc);
1769 struct dw_mci *host = slot->host;
1770 const struct dw_mci_drv_data *drv_data = host->drv_data;
1771
1772 if (drv_data && drv_data->prepare_hs400_tuning)
1773 return drv_data->prepare_hs400_tuning(host, ios);
1774
1775 return 0;
1776 }
1777
dw_mci_reset(struct dw_mci * host)1778 static bool dw_mci_reset(struct dw_mci *host)
1779 {
1780 u32 flags = SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET;
1781 bool ret = false;
1782 u32 status = 0;
1783
1784 /*
1785 * Resetting generates a block interrupt, hence setting
1786 * the scatter-gather pointer to NULL.
1787 */
1788 if (host->sg) {
1789 sg_miter_stop(&host->sg_miter);
1790 host->sg = NULL;
1791 }
1792
1793 if (host->use_dma)
1794 flags |= SDMMC_CTRL_DMA_RESET;
1795
1796 if (dw_mci_ctrl_reset(host, flags)) {
1797 /*
1798 * In all cases we clear the RAWINTS
1799 * register to clear any interrupts.
1800 */
1801 mci_writel(host, RINTSTS, 0xFFFFFFFF);
1802
1803 if (!host->use_dma) {
1804 ret = true;
1805 goto ciu_out;
1806 }
1807
1808 /* Wait for dma_req to be cleared */
1809 if (readl_poll_timeout_atomic(host->regs + SDMMC_STATUS,
1810 status,
1811 !(status & SDMMC_STATUS_DMA_REQ),
1812 1, 500 * USEC_PER_MSEC)) {
1813 dev_err(host->dev,
1814 "%s: Timeout waiting for dma_req to be cleared\n",
1815 __func__);
1816 goto ciu_out;
1817 }
1818
1819 /* when using DMA next we reset the fifo again */
1820 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET))
1821 goto ciu_out;
1822 } else {
1823 /* if the controller reset bit did clear, then set clock regs */
1824 if (!(mci_readl(host, CTRL) & SDMMC_CTRL_RESET)) {
1825 dev_err(host->dev,
1826 "%s: fifo/dma reset bits didn't clear but ciu was reset, doing clock update\n",
1827 __func__);
1828 goto ciu_out;
1829 }
1830 }
1831
1832 if (host->use_dma == TRANS_MODE_IDMAC)
1833 /* It is also required that we reinit idmac */
1834 dw_mci_idmac_init(host);
1835
1836 ret = true;
1837
1838 ciu_out:
1839 /* After a CTRL reset we need to have CIU set clock registers */
1840 mci_send_cmd(host->slot, SDMMC_CMD_UPD_CLK, 0);
1841
1842 return ret;
1843 }
1844
1845 static const struct mmc_host_ops dw_mci_ops = {
1846 .request = dw_mci_request,
1847 .pre_req = dw_mci_pre_req,
1848 .post_req = dw_mci_post_req,
1849 .set_ios = dw_mci_set_ios,
1850 .get_ro = dw_mci_get_ro,
1851 .get_cd = dw_mci_get_cd,
1852 .hw_reset = dw_mci_hw_reset,
1853 .enable_sdio_irq = dw_mci_enable_sdio_irq,
1854 .ack_sdio_irq = dw_mci_ack_sdio_irq,
1855 .execute_tuning = dw_mci_execute_tuning,
1856 .card_busy = dw_mci_card_busy,
1857 .start_signal_voltage_switch = dw_mci_switch_voltage,
1858 .init_card = dw_mci_init_card,
1859 .prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
1860 };
1861
dw_mci_request_end(struct dw_mci * host,struct mmc_request * mrq)1862 static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
1863 __releases(&host->lock)
1864 __acquires(&host->lock)
1865 {
1866 struct dw_mci_slot *slot;
1867 struct mmc_host *prev_mmc = host->slot->mmc;
1868
1869 WARN_ON(host->cmd || host->data);
1870
1871 host->slot->mrq = NULL;
1872 host->mrq = NULL;
1873 if (!list_empty(&host->queue)) {
1874 slot = list_entry(host->queue.next,
1875 struct dw_mci_slot, queue_node);
1876 list_del(&slot->queue_node);
1877 dev_vdbg(host->dev, "list not empty: %s is next\n",
1878 mmc_hostname(slot->mmc));
1879 host->state = STATE_SENDING_CMD;
1880 dw_mci_start_request(host, slot);
1881 } else {
1882 dev_vdbg(host->dev, "list empty\n");
1883
1884 if (host->state == STATE_SENDING_CMD11)
1885 host->state = STATE_WAITING_CMD11_DONE;
1886 else
1887 host->state = STATE_IDLE;
1888 }
1889
1890 spin_unlock(&host->lock);
1891
1892 mmc_request_done(prev_mmc, mrq);
1893 spin_lock(&host->lock);
1894 }
1895
dw_mci_command_complete(struct dw_mci * host,struct mmc_command * cmd)1896 static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd)
1897 {
1898 u32 status = host->cmd_status;
1899
1900 host->cmd_status = 0;
1901
1902 /* Read the response from the card (up to 16 bytes) */
1903 if (cmd->flags & MMC_RSP_PRESENT) {
1904 if (cmd->flags & MMC_RSP_136) {
1905 cmd->resp[3] = mci_readl(host, RESP0);
1906 cmd->resp[2] = mci_readl(host, RESP1);
1907 cmd->resp[1] = mci_readl(host, RESP2);
1908 cmd->resp[0] = mci_readl(host, RESP3);
1909 } else {
1910 cmd->resp[0] = mci_readl(host, RESP0);
1911 cmd->resp[1] = 0;
1912 cmd->resp[2] = 0;
1913 cmd->resp[3] = 0;
1914 }
1915 }
1916
1917 if (status & SDMMC_INT_RTO)
1918 cmd->error = -ETIMEDOUT;
1919 else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC))
1920 cmd->error = -EILSEQ;
1921 else if (status & SDMMC_INT_RESP_ERR)
1922 cmd->error = -EIO;
1923 else
1924 cmd->error = 0;
1925
1926 return cmd->error;
1927 }
1928
dw_mci_data_complete(struct dw_mci * host,struct mmc_data * data)1929 static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data)
1930 {
1931 u32 status = host->data_status;
1932
1933 if (host->is_rv1106_sd && (data->flags & MMC_DATA_WRITE) && (status & SDMMC_INT_DATA_OVER))
1934 goto finish;
1935
1936 if (status & DW_MCI_DATA_ERROR_FLAGS) {
1937 if (status & SDMMC_INT_DRTO) {
1938 data->error = -ETIMEDOUT;
1939 } else if (status & SDMMC_INT_DCRC) {
1940 data->error = -EILSEQ;
1941 } else if (status & SDMMC_INT_EBE) {
1942 if (host->dir_status ==
1943 DW_MCI_SEND_STATUS) {
1944 /*
1945 * No data CRC status was returned.
1946 * The number of bytes transferred
1947 * will be exaggerated in PIO mode.
1948 */
1949 data->bytes_xfered = 0;
1950 data->error = -ETIMEDOUT;
1951 } else if (host->dir_status ==
1952 DW_MCI_RECV_STATUS) {
1953 data->error = -EILSEQ;
1954 }
1955 } else {
1956 /* SDMMC_INT_SBE is included */
1957 data->error = -EILSEQ;
1958 }
1959
1960 dev_dbg(host->dev, "data error, status 0x%08x\n", status);
1961
1962 /*
1963 * After an error, there may be data lingering
1964 * in the FIFO
1965 */
1966 dw_mci_reset(host);
1967 } else {
1968 finish:
1969 data->bytes_xfered = data->blocks * data->blksz;
1970 data->error = 0;
1971 }
1972
1973 return data->error;
1974 }
1975
dw_mci_set_drto(struct dw_mci * host)1976 static void dw_mci_set_drto(struct dw_mci *host)
1977 {
1978 unsigned int drto_clks;
1979 unsigned int drto_div;
1980 unsigned int drto_ms;
1981 unsigned long irqflags;
1982
1983 drto_clks = mci_readl(host, TMOUT) >> 8;
1984 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1985 if (drto_div == 0)
1986 drto_div = 1;
1987
1988 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1989 host->bus_hz);
1990
1991 /* add a bit spare time */
1992 drto_ms += 10;
1993
1994 spin_lock_irqsave(&host->irq_lock, irqflags);
1995 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
1996 mod_timer(&host->dto_timer,
1997 jiffies + msecs_to_jiffies(drto_ms));
1998 spin_unlock_irqrestore(&host->irq_lock, irqflags);
1999 }
2000
dw_mci_set_xfer_timeout(struct dw_mci * host)2001 static void dw_mci_set_xfer_timeout(struct dw_mci *host)
2002 {
2003 unsigned int xfer_clks;
2004 unsigned int xfer_div;
2005 unsigned int xfer_ms;
2006 unsigned long irqflags;
2007
2008 xfer_clks = mci_readl(host, TMOUT) >> 8;
2009 xfer_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
2010 if (xfer_div == 0)
2011 xfer_div = 1;
2012 xfer_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * xfer_clks * xfer_div,
2013 host->bus_hz);
2014
2015 /* add a bit spare time */
2016 xfer_ms += 100;
2017
2018 spin_lock_irqsave(&host->irq_lock, irqflags);
2019 if (!test_bit(EVENT_XFER_COMPLETE, &host->pending_events))
2020 mod_timer(&host->xfer_timer,
2021 jiffies + msecs_to_jiffies(xfer_ms));
2022 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2023 }
2024
dw_mci_clear_pending_cmd_complete(struct dw_mci * host)2025 static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host)
2026 {
2027 if (!test_bit(EVENT_CMD_COMPLETE, &host->pending_events))
2028 return false;
2029
2030 /*
2031 * Really be certain that the timer has stopped. This is a bit of
2032 * paranoia and could only really happen if we had really bad
2033 * interrupt latency and the interrupt routine and timeout were
2034 * running concurrently so that the del_timer() in the interrupt
2035 * handler couldn't run.
2036 */
2037 WARN_ON(del_timer_sync(&host->cto_timer));
2038 clear_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2039
2040 return true;
2041 }
2042
dw_mci_clear_pending_data_complete(struct dw_mci * host)2043 static bool dw_mci_clear_pending_data_complete(struct dw_mci *host)
2044 {
2045 if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events))
2046 return false;
2047
2048 /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */
2049 WARN_ON(del_timer_sync(&host->dto_timer));
2050 clear_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2051
2052 return true;
2053 }
2054
dw_mci_tasklet_func(unsigned long priv)2055 static void dw_mci_tasklet_func(unsigned long priv)
2056 {
2057 struct dw_mci *host = (struct dw_mci *)priv;
2058 struct mmc_data *data;
2059 struct mmc_command *cmd;
2060 struct mmc_request *mrq;
2061 enum dw_mci_state state;
2062 enum dw_mci_state prev_state;
2063 unsigned int err;
2064
2065 spin_lock(&host->lock);
2066
2067 state = host->state;
2068 data = host->data;
2069 mrq = host->mrq;
2070
2071 do {
2072 prev_state = state;
2073
2074 switch (state) {
2075 case STATE_IDLE:
2076 case STATE_WAITING_CMD11_DONE:
2077 break;
2078
2079 case STATE_SENDING_CMD11:
2080 case STATE_SENDING_CMD:
2081 if (!dw_mci_clear_pending_cmd_complete(host))
2082 break;
2083
2084 cmd = host->cmd;
2085 host->cmd = NULL;
2086 set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
2087 err = dw_mci_command_complete(host, cmd);
2088 if (cmd == mrq->sbc && !err) {
2089 __dw_mci_start_request(host, host->slot,
2090 mrq->cmd);
2091 goto unlock;
2092 }
2093
2094 if (cmd->data && err) {
2095 /*
2096 * During UHS tuning sequence, sending the stop
2097 * command after the response CRC error would
2098 * throw the system into a confused state
2099 * causing all future tuning phases to report
2100 * failure.
2101 *
2102 * In such case controller will move into a data
2103 * transfer state after a response error or
2104 * response CRC error. Let's let that finish
2105 * before trying to send a stop, so we'll go to
2106 * STATE_SENDING_DATA.
2107 *
2108 * Although letting the data transfer take place
2109 * will waste a bit of time (we already know
2110 * the command was bad), it can't cause any
2111 * errors since it's possible it would have
2112 * taken place anyway if this tasklet got
2113 * delayed. Allowing the transfer to take place
2114 * avoids races and keeps things simple.
2115 */
2116 if (err != -ETIMEDOUT &&
2117 host->dir_status == DW_MCI_RECV_STATUS) {
2118 state = STATE_SENDING_DATA;
2119 continue;
2120 }
2121
2122 send_stop_abort(host, data);
2123 dw_mci_stop_dma(host);
2124 state = STATE_SENDING_STOP;
2125 break;
2126 }
2127
2128 if (!cmd->data || err) {
2129 dw_mci_request_end(host, mrq);
2130 goto unlock;
2131 }
2132
2133 prev_state = state = STATE_SENDING_DATA;
2134 fallthrough;
2135
2136 case STATE_SENDING_DATA:
2137 /*
2138 * We could get a data error and never a transfer
2139 * complete so we'd better check for it here.
2140 *
2141 * Note that we don't really care if we also got a
2142 * transfer complete; stopping the DMA and sending an
2143 * abort won't hurt.
2144 */
2145 if (test_and_clear_bit(EVENT_DATA_ERROR,
2146 &host->pending_events)) {
2147 if (!(host->data_status & (SDMMC_INT_DRTO |
2148 SDMMC_INT_EBE)))
2149 send_stop_abort(host, data);
2150 dw_mci_stop_dma(host);
2151 state = STATE_DATA_ERROR;
2152 break;
2153 }
2154
2155 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2156 &host->pending_events)) {
2157 /*
2158 * If all data-related interrupts don't come
2159 * within the given time in reading data state.
2160 */
2161 if (host->dir_status == DW_MCI_RECV_STATUS)
2162 dw_mci_set_drto(host);
2163 if (host->need_xfer_timer &&
2164 host->dir_status == DW_MCI_RECV_STATUS)
2165 dw_mci_set_xfer_timeout(host);
2166 break;
2167 }
2168
2169 set_bit(EVENT_XFER_COMPLETE, &host->completed_events);
2170
2171 /*
2172 * Handle an EVENT_DATA_ERROR that might have shown up
2173 * before the transfer completed. This might not have
2174 * been caught by the check above because the interrupt
2175 * could have gone off between the previous check and
2176 * the check for transfer complete.
2177 *
2178 * Technically this ought not be needed assuming we
2179 * get a DATA_COMPLETE eventually (we'll notice the
2180 * error and end the request), but it shouldn't hurt.
2181 *
2182 * This has the advantage of sending the stop command.
2183 */
2184 if (test_and_clear_bit(EVENT_DATA_ERROR,
2185 &host->pending_events)) {
2186 if (!(host->data_status & (SDMMC_INT_DRTO |
2187 SDMMC_INT_EBE)))
2188 send_stop_abort(host, data);
2189 dw_mci_stop_dma(host);
2190 state = STATE_DATA_ERROR;
2191 break;
2192 }
2193 prev_state = state = STATE_DATA_BUSY;
2194
2195 fallthrough;
2196
2197 case STATE_DATA_BUSY:
2198 if (!dw_mci_clear_pending_data_complete(host)) {
2199 /*
2200 * If data error interrupt comes but data over
2201 * interrupt doesn't come within the given time.
2202 * in reading data state.
2203 */
2204 if (host->dir_status == DW_MCI_RECV_STATUS)
2205 dw_mci_set_drto(host);
2206 break;
2207 }
2208
2209 host->data = NULL;
2210 set_bit(EVENT_DATA_COMPLETE, &host->completed_events);
2211 err = dw_mci_data_complete(host, data);
2212
2213 if (!err) {
2214 if (!data->stop || mrq->sbc) {
2215 if (mrq->sbc && data->stop)
2216 data->stop->error = 0;
2217 dw_mci_request_end(host, mrq);
2218 goto unlock;
2219 }
2220
2221 /* stop command for open-ended transfer*/
2222 if (data->stop)
2223 send_stop_abort(host, data);
2224 } else {
2225 /*
2226 * If we don't have a command complete now we'll
2227 * never get one since we just reset everything;
2228 * better end the request.
2229 *
2230 * If we do have a command complete we'll fall
2231 * through to the SENDING_STOP command and
2232 * everything will be peachy keen.
2233 */
2234 if (!test_bit(EVENT_CMD_COMPLETE,
2235 &host->pending_events)) {
2236 host->cmd = NULL;
2237 dw_mci_request_end(host, mrq);
2238 goto unlock;
2239 }
2240 }
2241
2242 /*
2243 * If err has non-zero,
2244 * stop-abort command has been already issued.
2245 */
2246 prev_state = state = STATE_SENDING_STOP;
2247
2248 fallthrough;
2249
2250 case STATE_SENDING_STOP:
2251 if (!dw_mci_clear_pending_cmd_complete(host))
2252 break;
2253
2254 /* CMD error in data command */
2255 if (mrq->cmd->error && mrq->data)
2256 dw_mci_reset(host);
2257
2258 host->cmd = NULL;
2259 host->data = NULL;
2260
2261 if (!mrq->sbc && mrq->stop)
2262 dw_mci_command_complete(host, mrq->stop);
2263 else
2264 host->cmd_status = 0;
2265
2266 dw_mci_request_end(host, mrq);
2267 goto unlock;
2268
2269 case STATE_DATA_ERROR:
2270 if (!test_and_clear_bit(EVENT_XFER_COMPLETE,
2271 &host->pending_events))
2272 break;
2273
2274 state = STATE_DATA_BUSY;
2275 break;
2276 }
2277 } while (state != prev_state);
2278
2279 host->state = state;
2280 unlock:
2281 spin_unlock(&host->lock);
2282
2283 }
2284
2285 /* push final bytes to part_buf, only use during push */
dw_mci_set_part_bytes(struct dw_mci * host,void * buf,int cnt)2286 static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt)
2287 {
2288 memcpy((void *)&host->part_buf, buf, cnt);
2289 host->part_buf_count = cnt;
2290 }
2291
2292 /* append bytes to part_buf, only use during push */
dw_mci_push_part_bytes(struct dw_mci * host,void * buf,int cnt)2293 static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt)
2294 {
2295 cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count);
2296 memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt);
2297 host->part_buf_count += cnt;
2298 return cnt;
2299 }
2300
2301 /* pull first bytes from part_buf, only use during pull */
dw_mci_pull_part_bytes(struct dw_mci * host,void * buf,int cnt)2302 static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt)
2303 {
2304 cnt = min_t(int, cnt, host->part_buf_count);
2305 if (cnt) {
2306 memcpy(buf, (void *)&host->part_buf + host->part_buf_start,
2307 cnt);
2308 host->part_buf_count -= cnt;
2309 host->part_buf_start += cnt;
2310 }
2311 return cnt;
2312 }
2313
2314 /* pull final bytes from the part_buf, assuming it's just been filled */
dw_mci_pull_final_bytes(struct dw_mci * host,void * buf,int cnt)2315 static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt)
2316 {
2317 memcpy(buf, &host->part_buf, cnt);
2318 host->part_buf_start = cnt;
2319 host->part_buf_count = (1 << host->data_shift) - cnt;
2320 }
2321
dw_mci_push_data16(struct dw_mci * host,void * buf,int cnt)2322 static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
2323 {
2324 struct mmc_data *data = host->data;
2325 int init_cnt = cnt;
2326
2327 /* try and push anything in the part_buf */
2328 if (unlikely(host->part_buf_count)) {
2329 int len = dw_mci_push_part_bytes(host, buf, cnt);
2330
2331 buf += len;
2332 cnt -= len;
2333 if (host->part_buf_count == 2) {
2334 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2335 host->part_buf_count = 0;
2336 }
2337 }
2338 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2339 if (unlikely((unsigned long)buf & 0x1)) {
2340 while (cnt >= 2) {
2341 u16 aligned_buf[64];
2342 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2343 int items = len >> 1;
2344 int i;
2345 /* memcpy from input buffer into aligned buffer */
2346 memcpy(aligned_buf, buf, len);
2347 buf += len;
2348 cnt -= len;
2349 /* push data from aligned buffer into fifo */
2350 for (i = 0; i < items; ++i)
2351 mci_fifo_writew(host->fifo_reg, aligned_buf[i]);
2352 }
2353 } else
2354 #endif
2355 {
2356 u16 *pdata = buf;
2357
2358 for (; cnt >= 2; cnt -= 2)
2359 mci_fifo_writew(host->fifo_reg, *pdata++);
2360 buf = pdata;
2361 }
2362 /* put anything remaining in the part_buf */
2363 if (cnt) {
2364 dw_mci_set_part_bytes(host, buf, cnt);
2365 /* Push data if we have reached the expected data length */
2366 if ((data->bytes_xfered + init_cnt) ==
2367 (data->blksz * data->blocks))
2368 mci_fifo_writew(host->fifo_reg, host->part_buf16);
2369 }
2370 }
2371
dw_mci_pull_data16(struct dw_mci * host,void * buf,int cnt)2372 static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
2373 {
2374 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2375 if (unlikely((unsigned long)buf & 0x1)) {
2376 while (cnt >= 2) {
2377 /* pull data from fifo into aligned buffer */
2378 u16 aligned_buf[64];
2379 int len = min(cnt & -2, (int)sizeof(aligned_buf));
2380 int items = len >> 1;
2381 int i;
2382
2383 for (i = 0; i < items; ++i)
2384 aligned_buf[i] = mci_fifo_readw(host->fifo_reg);
2385 /* memcpy from aligned buffer into output buffer */
2386 memcpy(buf, aligned_buf, len);
2387 buf += len;
2388 cnt -= len;
2389 }
2390 } else
2391 #endif
2392 {
2393 u16 *pdata = buf;
2394
2395 for (; cnt >= 2; cnt -= 2)
2396 *pdata++ = mci_fifo_readw(host->fifo_reg);
2397 buf = pdata;
2398 }
2399 if (cnt) {
2400 host->part_buf16 = mci_fifo_readw(host->fifo_reg);
2401 dw_mci_pull_final_bytes(host, buf, cnt);
2402 }
2403 }
2404
dw_mci_push_data32(struct dw_mci * host,void * buf,int cnt)2405 static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
2406 {
2407 struct mmc_data *data = host->data;
2408 int init_cnt = cnt;
2409
2410 /* try and push anything in the part_buf */
2411 if (unlikely(host->part_buf_count)) {
2412 int len = dw_mci_push_part_bytes(host, buf, cnt);
2413
2414 buf += len;
2415 cnt -= len;
2416 if (host->part_buf_count == 4) {
2417 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2418 host->part_buf_count = 0;
2419 }
2420 }
2421 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2422 if (unlikely((unsigned long)buf & 0x3)) {
2423 while (cnt >= 4) {
2424 u32 aligned_buf[32];
2425 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2426 int items = len >> 2;
2427 int i;
2428 /* memcpy from input buffer into aligned buffer */
2429 memcpy(aligned_buf, buf, len);
2430 buf += len;
2431 cnt -= len;
2432 /* push data from aligned buffer into fifo */
2433 for (i = 0; i < items; ++i)
2434 mci_fifo_writel(host->fifo_reg, aligned_buf[i]);
2435 }
2436 } else
2437 #endif
2438 {
2439 u32 *pdata = buf;
2440
2441 for (; cnt >= 4; cnt -= 4)
2442 mci_fifo_writel(host->fifo_reg, *pdata++);
2443 buf = pdata;
2444 }
2445 /* put anything remaining in the part_buf */
2446 if (cnt) {
2447 dw_mci_set_part_bytes(host, buf, cnt);
2448 /* Push data if we have reached the expected data length */
2449 if ((data->bytes_xfered + init_cnt) ==
2450 (data->blksz * data->blocks))
2451 mci_fifo_writel(host->fifo_reg, host->part_buf32);
2452 }
2453 }
2454
dw_mci_pull_data32(struct dw_mci * host,void * buf,int cnt)2455 static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
2456 {
2457 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2458 if (unlikely((unsigned long)buf & 0x3)) {
2459 while (cnt >= 4) {
2460 /* pull data from fifo into aligned buffer */
2461 u32 aligned_buf[32];
2462 int len = min(cnt & -4, (int)sizeof(aligned_buf));
2463 int items = len >> 2;
2464 int i;
2465
2466 for (i = 0; i < items; ++i)
2467 aligned_buf[i] = mci_fifo_readl(host->fifo_reg);
2468 /* memcpy from aligned buffer into output buffer */
2469 memcpy(buf, aligned_buf, len);
2470 buf += len;
2471 cnt -= len;
2472 }
2473 } else
2474 #endif
2475 {
2476 u32 *pdata = buf;
2477
2478 for (; cnt >= 4; cnt -= 4)
2479 *pdata++ = mci_fifo_readl(host->fifo_reg);
2480 buf = pdata;
2481 }
2482 if (cnt) {
2483 host->part_buf32 = mci_fifo_readl(host->fifo_reg);
2484 dw_mci_pull_final_bytes(host, buf, cnt);
2485 }
2486 }
2487
dw_mci_push_data64(struct dw_mci * host,void * buf,int cnt)2488 static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
2489 {
2490 struct mmc_data *data = host->data;
2491 int init_cnt = cnt;
2492
2493 /* try and push anything in the part_buf */
2494 if (unlikely(host->part_buf_count)) {
2495 int len = dw_mci_push_part_bytes(host, buf, cnt);
2496
2497 buf += len;
2498 cnt -= len;
2499
2500 if (host->part_buf_count == 8) {
2501 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2502 host->part_buf_count = 0;
2503 }
2504 }
2505 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2506 if (unlikely((unsigned long)buf & 0x7)) {
2507 while (cnt >= 8) {
2508 u64 aligned_buf[16];
2509 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2510 int items = len >> 3;
2511 int i;
2512 /* memcpy from input buffer into aligned buffer */
2513 memcpy(aligned_buf, buf, len);
2514 buf += len;
2515 cnt -= len;
2516 /* push data from aligned buffer into fifo */
2517 for (i = 0; i < items; ++i)
2518 mci_fifo_writeq(host->fifo_reg, aligned_buf[i]);
2519 }
2520 } else
2521 #endif
2522 {
2523 u64 *pdata = buf;
2524
2525 for (; cnt >= 8; cnt -= 8)
2526 mci_fifo_writeq(host->fifo_reg, *pdata++);
2527 buf = pdata;
2528 }
2529 /* put anything remaining in the part_buf */
2530 if (cnt) {
2531 dw_mci_set_part_bytes(host, buf, cnt);
2532 /* Push data if we have reached the expected data length */
2533 if ((data->bytes_xfered + init_cnt) ==
2534 (data->blksz * data->blocks))
2535 mci_fifo_writeq(host->fifo_reg, host->part_buf);
2536 }
2537 }
2538
dw_mci_pull_data64(struct dw_mci * host,void * buf,int cnt)2539 static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
2540 {
2541 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2542 if (unlikely((unsigned long)buf & 0x7)) {
2543 while (cnt >= 8) {
2544 /* pull data from fifo into aligned buffer */
2545 u64 aligned_buf[16];
2546 int len = min(cnt & -8, (int)sizeof(aligned_buf));
2547 int items = len >> 3;
2548 int i;
2549
2550 for (i = 0; i < items; ++i)
2551 aligned_buf[i] = mci_fifo_readq(host->fifo_reg);
2552
2553 /* memcpy from aligned buffer into output buffer */
2554 memcpy(buf, aligned_buf, len);
2555 buf += len;
2556 cnt -= len;
2557 }
2558 } else
2559 #endif
2560 {
2561 u64 *pdata = buf;
2562
2563 for (; cnt >= 8; cnt -= 8)
2564 *pdata++ = mci_fifo_readq(host->fifo_reg);
2565 buf = pdata;
2566 }
2567 if (cnt) {
2568 host->part_buf = mci_fifo_readq(host->fifo_reg);
2569 dw_mci_pull_final_bytes(host, buf, cnt);
2570 }
2571 }
2572
dw_mci_pull_data(struct dw_mci * host,void * buf,int cnt)2573 static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
2574 {
2575 int len;
2576
2577 /* get remaining partial bytes */
2578 len = dw_mci_pull_part_bytes(host, buf, cnt);
2579 if (unlikely(len == cnt))
2580 return;
2581 buf += len;
2582 cnt -= len;
2583
2584 /* get the rest of the data */
2585 host->pull_data(host, buf, cnt);
2586 }
2587
dw_mci_read_data_pio(struct dw_mci * host,bool dto)2588 static void dw_mci_read_data_pio(struct dw_mci *host, bool dto)
2589 {
2590 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2591 void *buf;
2592 unsigned int offset;
2593 struct mmc_data *data = host->data;
2594 int shift = host->data_shift;
2595 u32 status;
2596 unsigned int len;
2597 unsigned int remain, fcnt;
2598
2599 do {
2600 if (!sg_miter_next(sg_miter))
2601 goto done;
2602
2603 host->sg = sg_miter->piter.sg;
2604 buf = sg_miter->addr;
2605 remain = sg_miter->length;
2606 offset = 0;
2607
2608 do {
2609 fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
2610 << shift) + host->part_buf_count;
2611 len = min(remain, fcnt);
2612 if (!len)
2613 break;
2614 dw_mci_pull_data(host, (void *)(buf + offset), len);
2615 data->bytes_xfered += len;
2616 offset += len;
2617 remain -= len;
2618 } while (remain);
2619
2620 sg_miter->consumed = offset;
2621 status = mci_readl(host, MINTSTS);
2622 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2623 /* if the RXDR is ready read again */
2624 } while ((status & SDMMC_INT_RXDR) ||
2625 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS))));
2626
2627 if (!remain) {
2628 if (!sg_miter_next(sg_miter))
2629 goto done;
2630 sg_miter->consumed = 0;
2631 }
2632 sg_miter_stop(sg_miter);
2633 return;
2634
2635 done:
2636 sg_miter_stop(sg_miter);
2637 host->sg = NULL;
2638 smp_wmb(); /* drain writebuffer */
2639 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2640 if (host->need_xfer_timer)
2641 del_timer(&host->xfer_timer);
2642 }
2643
dw_mci_write_data_pio(struct dw_mci * host)2644 static void dw_mci_write_data_pio(struct dw_mci *host)
2645 {
2646 struct sg_mapping_iter *sg_miter = &host->sg_miter;
2647 void *buf;
2648 unsigned int offset;
2649 struct mmc_data *data = host->data;
2650 int shift = host->data_shift;
2651 u32 status;
2652 unsigned int len;
2653 unsigned int fifo_depth = host->fifo_depth;
2654 unsigned int remain, fcnt;
2655
2656 do {
2657 if (!sg_miter_next(sg_miter))
2658 goto done;
2659
2660 host->sg = sg_miter->piter.sg;
2661 buf = sg_miter->addr;
2662 remain = sg_miter->length;
2663 offset = 0;
2664
2665 do {
2666 fcnt = ((fifo_depth -
2667 SDMMC_GET_FCNT(mci_readl(host, STATUS)))
2668 << shift) - host->part_buf_count;
2669 len = min(remain, fcnt);
2670 if (!len)
2671 break;
2672 host->push_data(host, (void *)(buf + offset), len);
2673 data->bytes_xfered += len;
2674 offset += len;
2675 remain -= len;
2676 } while (remain);
2677
2678 sg_miter->consumed = offset;
2679 status = mci_readl(host, MINTSTS);
2680 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2681 } while (status & SDMMC_INT_TXDR); /* if TXDR write again */
2682
2683 if (!remain) {
2684 if (!sg_miter_next(sg_miter))
2685 goto done;
2686 sg_miter->consumed = 0;
2687 }
2688 sg_miter_stop(sg_miter);
2689 return;
2690
2691 done:
2692 sg_miter_stop(sg_miter);
2693 host->sg = NULL;
2694 smp_wmb(); /* drain writebuffer */
2695 set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
2696 }
2697
dw_mci_cmd_interrupt(struct dw_mci * host,u32 status)2698 static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
2699 {
2700 del_timer(&host->cto_timer);
2701
2702 if (!host->cmd_status)
2703 host->cmd_status = status;
2704
2705 smp_wmb(); /* drain writebuffer */
2706
2707 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2708 tasklet_schedule(&host->tasklet);
2709 }
2710
dw_mci_handle_cd(struct dw_mci * host)2711 static void dw_mci_handle_cd(struct dw_mci *host)
2712 {
2713 struct dw_mci_slot *slot = host->slot;
2714
2715 if (slot->mmc->ops->card_event)
2716 slot->mmc->ops->card_event(slot->mmc);
2717 mmc_detect_change(slot->mmc,
2718 msecs_to_jiffies(host->pdata->detect_delay_ms));
2719 }
2720
dw_mci_interrupt(int irq,void * dev_id)2721 static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
2722 {
2723 struct dw_mci *host = dev_id;
2724 u32 pending;
2725 struct dw_mci_slot *slot = host->slot;
2726 unsigned long irqflags;
2727
2728 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
2729
2730 if (pending) {
2731 /* Check volt switch first, since it can look like an error */
2732 if ((host->state == STATE_SENDING_CMD11) &&
2733 (pending & SDMMC_INT_VOLT_SWITCH)) {
2734 mci_writel(host, RINTSTS, SDMMC_INT_VOLT_SWITCH);
2735 pending &= ~SDMMC_INT_VOLT_SWITCH;
2736
2737 /*
2738 * Hold the lock; we know cmd11_timer can't be kicked
2739 * off after the lock is released, so safe to delete.
2740 */
2741 spin_lock_irqsave(&host->irq_lock, irqflags);
2742 dw_mci_cmd_interrupt(host, pending);
2743 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2744
2745 del_timer(&host->cmd11_timer);
2746 }
2747
2748 if (pending & DW_MCI_CMD_ERROR_FLAGS) {
2749 spin_lock_irqsave(&host->irq_lock, irqflags);
2750
2751 del_timer(&host->cto_timer);
2752 mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
2753 host->cmd_status = pending;
2754 if ((host->need_xfer_timer) &&
2755 host->dir_status == DW_MCI_RECV_STATUS)
2756 del_timer(&host->xfer_timer);
2757 smp_wmb(); /* drain writebuffer */
2758 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
2759
2760 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2761 }
2762
2763 if (pending & DW_MCI_DATA_ERROR_FLAGS) {
2764 /* if there is an error report DATA_ERROR */
2765 mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
2766 host->data_status = pending;
2767 smp_wmb(); /* drain writebuffer */
2768 set_bit(EVENT_DATA_ERROR, &host->pending_events);
2769 tasklet_schedule(&host->tasklet);
2770 }
2771
2772 if (pending & SDMMC_INT_DATA_OVER) {
2773 rv1106_sd:
2774 spin_lock_irqsave(&host->irq_lock, irqflags);
2775
2776 del_timer(&host->dto_timer);
2777
2778 mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
2779 if (host->is_rv1106_sd)
2780 pending |= SDMMC_INT_DATA_OVER;
2781 if (!host->data_status)
2782 host->data_status = pending;
2783 smp_wmb(); /* drain writebuffer */
2784 if (host->dir_status == DW_MCI_RECV_STATUS) {
2785 if (host->sg != NULL)
2786 dw_mci_read_data_pio(host, true);
2787 }
2788 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
2789 tasklet_schedule(&host->tasklet);
2790
2791 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2792 }
2793
2794 if (pending & SDMMC_INT_RXDR) {
2795 mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
2796 if (host->dir_status == DW_MCI_RECV_STATUS && host->sg)
2797 dw_mci_read_data_pio(host, false);
2798 }
2799
2800 if (pending & SDMMC_INT_TXDR) {
2801 mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
2802 if (host->dir_status == DW_MCI_SEND_STATUS && host->sg)
2803 dw_mci_write_data_pio(host);
2804 }
2805
2806 if (pending & SDMMC_INT_CMD_DONE) {
2807 spin_lock_irqsave(&host->irq_lock, irqflags);
2808
2809 mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
2810 dw_mci_cmd_interrupt(host, pending);
2811
2812 spin_unlock_irqrestore(&host->irq_lock, irqflags);
2813 }
2814
2815 if (pending & SDMMC_INT_CD) {
2816 mci_writel(host, RINTSTS, SDMMC_INT_CD);
2817 dw_mci_handle_cd(host);
2818 }
2819
2820 if (pending & SDMMC_INT_SDIO(slot->sdio_id)) {
2821 mci_writel(host, RINTSTS,
2822 SDMMC_INT_SDIO(slot->sdio_id));
2823 __dw_mci_enable_sdio_irq(slot, 0);
2824 sdio_signal_irq(slot->mmc);
2825 }
2826
2827 }
2828
2829 if (host->use_dma != TRANS_MODE_IDMAC)
2830 return IRQ_HANDLED;
2831
2832 /* Handle IDMA interrupts */
2833 if (host->dma_64bit_address == 1) {
2834 pending = mci_readl(host, IDSTS64);
2835 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2836 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_TI |
2837 SDMMC_IDMAC_INT_RI);
2838 mci_writel(host, IDSTS64, SDMMC_IDMAC_INT_NI);
2839 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2840 host->dma_ops->complete((void *)host);
2841 }
2842 } else {
2843 pending = mci_readl(host, IDSTS);
2844 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
2845 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI |
2846 SDMMC_IDMAC_INT_RI);
2847 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
2848 if (!test_bit(EVENT_DATA_ERROR, &host->pending_events))
2849 host->dma_ops->complete((void *)host);
2850
2851 if (host->is_rv1106_sd && (pending & SDMMC_IDMAC_INT_TI))
2852 goto rv1106_sd;
2853 }
2854 }
2855
2856 return IRQ_HANDLED;
2857 }
2858
dw_mci_init_slot_caps(struct dw_mci_slot * slot)2859 static int dw_mci_init_slot_caps(struct dw_mci_slot *slot)
2860 {
2861 struct dw_mci *host = slot->host;
2862 const struct dw_mci_drv_data *drv_data = host->drv_data;
2863 struct mmc_host *mmc = slot->mmc;
2864 int ctrl_id;
2865
2866 if (host->pdata->caps)
2867 mmc->caps = host->pdata->caps;
2868
2869 if (host->pdata->pm_caps)
2870 mmc->pm_caps = host->pdata->pm_caps;
2871
2872 if (host->dev->of_node) {
2873 ctrl_id = of_alias_get_id(host->dev->of_node, "mshc");
2874 if (ctrl_id < 0)
2875 ctrl_id = 0;
2876 } else {
2877 ctrl_id = to_platform_device(host->dev)->id;
2878 }
2879
2880 if (drv_data && drv_data->caps) {
2881 if (ctrl_id >= drv_data->num_caps) {
2882 dev_err(host->dev, "invalid controller id %d\n",
2883 ctrl_id);
2884 return -EINVAL;
2885 }
2886 mmc->caps |= drv_data->caps[ctrl_id];
2887 }
2888
2889 if (host->pdata->caps2)
2890 mmc->caps2 = host->pdata->caps2;
2891
2892 mmc->f_min = DW_MCI_FREQ_MIN;
2893 if (!mmc->f_max)
2894 mmc->f_max = DW_MCI_FREQ_MAX;
2895
2896 /* Process SDIO IRQs through the sdio_irq_work. */
2897 if (mmc->caps & MMC_CAP_SDIO_IRQ)
2898 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
2899
2900 return 0;
2901 }
2902
dw_mci_init_slot(struct dw_mci * host)2903 static int dw_mci_init_slot(struct dw_mci *host)
2904 {
2905 struct mmc_host *mmc;
2906 struct dw_mci_slot *slot;
2907 int ret;
2908
2909 mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev);
2910 if (!mmc)
2911 return -ENOMEM;
2912
2913 slot = mmc_priv(mmc);
2914 slot->id = 0;
2915 slot->sdio_id = host->sdio_id0 + slot->id;
2916 slot->mmc = mmc;
2917 slot->host = host;
2918 host->slot = slot;
2919
2920 mmc->ops = &dw_mci_ops;
2921
2922 /*if there are external regulators, get them*/
2923 ret = mmc_regulator_get_supply(mmc);
2924 if (ret)
2925 goto err_host_allocated;
2926
2927 if (!mmc->ocr_avail)
2928 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
2929
2930 ret = mmc_of_parse(mmc);
2931 if (ret)
2932 goto err_host_allocated;
2933
2934 ret = dw_mci_init_slot_caps(slot);
2935 if (ret)
2936 goto err_host_allocated;
2937
2938 /* Useful defaults if platform data is unset. */
2939 if (host->use_dma == TRANS_MODE_IDMAC) {
2940 /* Reserve last desc for dirty data */
2941 if (host->is_rv1106_sd)
2942 host->ring_size--;
2943
2944 mmc->max_segs = host->ring_size;
2945 mmc->max_blk_size = 65535;
2946 mmc->max_seg_size = 0x1000;
2947 mmc->max_req_size = mmc->max_seg_size * host->ring_size;
2948 mmc->max_blk_count = mmc->max_req_size / 512;
2949 } else if (host->use_dma == TRANS_MODE_EDMAC) {
2950 mmc->max_segs = 64;
2951 mmc->max_blk_size = 65535;
2952 mmc->max_blk_count = 65535;
2953 mmc->max_req_size =
2954 mmc->max_blk_size * mmc->max_blk_count;
2955 mmc->max_seg_size = mmc->max_req_size;
2956 } else {
2957 /* TRANS_MODE_PIO */
2958 mmc->max_segs = 64;
2959 mmc->max_blk_size = 65535; /* BLKSIZ is 16 bits */
2960 mmc->max_blk_count = 512;
2961 mmc->max_req_size = mmc->max_blk_size *
2962 mmc->max_blk_count;
2963 mmc->max_seg_size = mmc->max_req_size;
2964 }
2965
2966 dw_mci_get_cd(mmc);
2967
2968 ret = mmc_add_host(mmc);
2969 if (ret)
2970 goto err_host_allocated;
2971
2972 #if defined(CONFIG_DEBUG_FS)
2973 dw_mci_init_debugfs(slot);
2974 #endif
2975
2976 return 0;
2977
2978 err_host_allocated:
2979 mmc_free_host(mmc);
2980 return ret;
2981 }
2982
dw_mci_cleanup_slot(struct dw_mci_slot * slot)2983 static void dw_mci_cleanup_slot(struct dw_mci_slot *slot)
2984 {
2985 /* Debugfs stuff is cleaned up by mmc core */
2986 mmc_remove_host(slot->mmc);
2987 slot->host->slot = NULL;
2988 mmc_free_host(slot->mmc);
2989 }
2990
dw_mci_init_dma(struct dw_mci * host)2991 static void dw_mci_init_dma(struct dw_mci *host)
2992 {
2993 int addr_config;
2994 struct device *dev = host->dev;
2995
2996 /*
2997 * Check tansfer mode from HCON[17:16]
2998 * Clear the ambiguous description of dw_mmc databook:
2999 * 2b'00: No DMA Interface -> Actually means using Internal DMA block
3000 * 2b'01: DesignWare DMA Interface -> Synopsys DW-DMA block
3001 * 2b'10: Generic DMA Interface -> non-Synopsys generic DMA block
3002 * 2b'11: Non DW DMA Interface -> pio only
3003 * Compared to DesignWare DMA Interface, Generic DMA Interface has a
3004 * simpler request/acknowledge handshake mechanism and both of them
3005 * are regarded as external dma master for dw_mmc.
3006 */
3007 host->use_dma = SDMMC_GET_TRANS_MODE(mci_readl(host, HCON));
3008 if (host->use_dma == DMA_INTERFACE_IDMA) {
3009 host->use_dma = TRANS_MODE_IDMAC;
3010 } else if (host->use_dma == DMA_INTERFACE_DWDMA ||
3011 host->use_dma == DMA_INTERFACE_GDMA) {
3012 host->use_dma = TRANS_MODE_EDMAC;
3013 } else {
3014 goto no_dma;
3015 }
3016
3017 /* Determine which DMA interface to use */
3018 if (host->use_dma == TRANS_MODE_IDMAC) {
3019 /*
3020 * Check ADDR_CONFIG bit in HCON to find
3021 * IDMAC address bus width
3022 */
3023 addr_config = SDMMC_GET_ADDR_CONFIG(mci_readl(host, HCON));
3024
3025 if (addr_config == 1) {
3026 /* host supports IDMAC in 64-bit address mode */
3027 host->dma_64bit_address = 1;
3028 dev_info(host->dev,
3029 "IDMAC supports 64-bit address mode.\n");
3030 if (!dma_set_mask(host->dev, DMA_BIT_MASK(64)))
3031 dma_set_coherent_mask(host->dev,
3032 DMA_BIT_MASK(64));
3033 } else {
3034 /* host supports IDMAC in 32-bit address mode */
3035 host->dma_64bit_address = 0;
3036 dev_info(host->dev,
3037 "IDMAC supports 32-bit address mode.\n");
3038 }
3039
3040 /* Alloc memory for sg translation */
3041 host->sg_cpu = dmam_alloc_coherent(host->dev,
3042 DESC_RING_BUF_SZ,
3043 &host->sg_dma, GFP_KERNEL);
3044 if (!host->sg_cpu) {
3045 dev_err(host->dev,
3046 "%s: could not alloc DMA memory\n",
3047 __func__);
3048 goto no_dma;
3049 }
3050
3051 host->dma_ops = &dw_mci_idmac_ops;
3052 dev_info(host->dev, "Using internal DMA controller.\n");
3053 } else {
3054 /* TRANS_MODE_EDMAC: check dma bindings again */
3055 if ((device_property_read_string_array(dev, "dma-names",
3056 NULL, 0) < 0) ||
3057 !device_property_present(dev, "dmas")) {
3058 goto no_dma;
3059 }
3060 host->dma_ops = &dw_mci_edmac_ops;
3061 dev_info(host->dev, "Using external DMA controller.\n");
3062 }
3063
3064 if (host->dma_ops->init && host->dma_ops->start &&
3065 host->dma_ops->stop && host->dma_ops->cleanup) {
3066 if (host->dma_ops->init(host)) {
3067 dev_err(host->dev, "%s: Unable to initialize DMA Controller.\n",
3068 __func__);
3069 goto no_dma;
3070 }
3071 } else {
3072 dev_err(host->dev, "DMA initialization not found.\n");
3073 goto no_dma;
3074 }
3075
3076 return;
3077
3078 no_dma:
3079 dev_info(host->dev, "Using PIO mode.\n");
3080 host->use_dma = TRANS_MODE_PIO;
3081 }
3082
dw_mci_cmd11_timer(struct timer_list * t)3083 static void dw_mci_cmd11_timer(struct timer_list *t)
3084 {
3085 struct dw_mci *host = from_timer(host, t, cmd11_timer);
3086
3087 if (host->state != STATE_SENDING_CMD11) {
3088 dev_warn(host->dev, "Unexpected CMD11 timeout\n");
3089 return;
3090 }
3091
3092 host->cmd_status = SDMMC_INT_RTO;
3093 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3094 tasklet_schedule(&host->tasklet);
3095 }
3096
dw_mci_cto_timer(struct timer_list * t)3097 static void dw_mci_cto_timer(struct timer_list *t)
3098 {
3099 struct dw_mci *host = from_timer(host, t, cto_timer);
3100 unsigned long irqflags;
3101 u32 pending;
3102
3103 spin_lock_irqsave(&host->irq_lock, irqflags);
3104
3105 /*
3106 * If somehow we have very bad interrupt latency it's remotely possible
3107 * that the timer could fire while the interrupt is still pending or
3108 * while the interrupt is midway through running. Let's be paranoid
3109 * and detect those two cases. Note that this is paranoia is somewhat
3110 * justified because in this function we don't actually cancel the
3111 * pending command in the controller--we just assume it will never come.
3112 */
3113 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3114 if (pending & (DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_CMD_DONE)) {
3115 /* The interrupt should fire; no need to act but we can warn */
3116 dev_warn(host->dev, "Unexpected interrupt latency\n");
3117 goto exit;
3118 }
3119 if (test_bit(EVENT_CMD_COMPLETE, &host->pending_events)) {
3120 /* Presumably interrupt handler couldn't delete the timer */
3121 dev_warn(host->dev, "CTO timeout when already completed\n");
3122 goto exit;
3123 }
3124
3125 /*
3126 * Continued paranoia to make sure we're in the state we expect.
3127 * This paranoia isn't really justified but it seems good to be safe.
3128 */
3129 switch (host->state) {
3130 case STATE_SENDING_CMD11:
3131 case STATE_SENDING_CMD:
3132 case STATE_SENDING_STOP:
3133 /*
3134 * If CMD_DONE interrupt does NOT come in sending command
3135 * state, we should notify the driver to terminate current
3136 * transfer and report a command timeout to the core.
3137 */
3138 host->cmd_status = SDMMC_INT_RTO;
3139 set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
3140 tasklet_schedule(&host->tasklet);
3141 break;
3142 default:
3143 dev_warn(host->dev, "Unexpected command timeout, state %d\n",
3144 host->state);
3145 break;
3146 }
3147
3148 exit:
3149 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3150 }
3151
dw_mci_xfer_timer(struct timer_list * t)3152 static void dw_mci_xfer_timer(struct timer_list *t)
3153 {
3154 struct dw_mci *host = from_timer(host, t, xfer_timer);
3155 unsigned long irqflags;
3156
3157 spin_lock_irqsave(&host->irq_lock, irqflags);
3158
3159 if (test_bit(EVENT_XFER_COMPLETE, &host->pending_events)) {
3160 /* Presumably interrupt handler couldn't delete the timer */
3161 dev_warn(host->dev, "xfer when already completed\n");
3162 goto exit;
3163 }
3164
3165 switch (host->state) {
3166 case STATE_SENDING_DATA:
3167 host->data_status = SDMMC_INT_DRTO;
3168 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3169 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3170 tasklet_schedule(&host->tasklet);
3171 break;
3172 default:
3173 dev_warn(host->dev, "Unexpected xfer timeout, state %d\n",
3174 host->state);
3175 break;
3176 }
3177
3178 exit:
3179 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3180 }
3181
dw_mci_dto_timer(struct timer_list * t)3182 static void dw_mci_dto_timer(struct timer_list *t)
3183 {
3184 struct dw_mci *host = from_timer(host, t, dto_timer);
3185 unsigned long irqflags;
3186 u32 pending;
3187
3188 spin_lock_irqsave(&host->irq_lock, irqflags);
3189
3190 /*
3191 * The DTO timer is much longer than the CTO timer, so it's even less
3192 * likely that we'll these cases, but it pays to be paranoid.
3193 */
3194 pending = mci_readl(host, MINTSTS); /* read-only mask reg */
3195 if (pending & SDMMC_INT_DATA_OVER) {
3196 /* The interrupt should fire; no need to act but we can warn */
3197 dev_warn(host->dev, "Unexpected data interrupt latency\n");
3198 goto exit;
3199 }
3200 if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) {
3201 /* Presumably interrupt handler couldn't delete the timer */
3202 dev_warn(host->dev, "DTO timeout when already completed\n");
3203 goto exit;
3204 }
3205
3206 /*
3207 * Continued paranoia to make sure we're in the state we expect.
3208 * This paranoia isn't really justified but it seems good to be safe.
3209 */
3210 switch (host->state) {
3211 case STATE_SENDING_DATA:
3212 case STATE_DATA_BUSY:
3213 /*
3214 * If DTO interrupt does NOT come in sending data state,
3215 * we should notify the driver to terminate current transfer
3216 * and report a data timeout to the core.
3217 */
3218 host->data_status = SDMMC_INT_DRTO;
3219 set_bit(EVENT_DATA_ERROR, &host->pending_events);
3220 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
3221 tasklet_schedule(&host->tasklet);
3222 break;
3223 default:
3224 dev_warn(host->dev, "Unexpected data timeout, state %d\n",
3225 host->state);
3226 break;
3227 }
3228
3229 exit:
3230 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3231 }
3232
3233 #ifdef CONFIG_OF
dw_mci_parse_dt(struct dw_mci * host)3234 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3235 {
3236 struct dw_mci_board *pdata;
3237 struct device *dev = host->dev;
3238 const struct dw_mci_drv_data *drv_data = host->drv_data;
3239 int ret;
3240 u32 clock_frequency;
3241
3242 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
3243 if (!pdata)
3244 return ERR_PTR(-ENOMEM);
3245
3246 /* find reset controller when exist */
3247 pdata->rstc = devm_reset_control_get_optional_exclusive(dev, "reset");
3248 if (IS_ERR(pdata->rstc)) {
3249 if (PTR_ERR(pdata->rstc) == -EPROBE_DEFER)
3250 return ERR_PTR(-EPROBE_DEFER);
3251 }
3252
3253 if (device_property_read_u32(dev, "fifo-depth", &pdata->fifo_depth))
3254 dev_info(dev,
3255 "fifo-depth property not found, using value of FIFOTH register as default\n");
3256
3257 device_property_read_u32(dev, "card-detect-delay",
3258 &pdata->detect_delay_ms);
3259
3260 device_property_read_u32(dev, "data-addr", &host->data_addr_override);
3261
3262 if (device_property_present(dev, "fifo-watermark-aligned"))
3263 host->wm_aligned = true;
3264
3265 if (!device_property_read_u32(dev, "clock-frequency", &clock_frequency))
3266 pdata->bus_hz = clock_frequency;
3267
3268 if (drv_data && drv_data->parse_dt) {
3269 ret = drv_data->parse_dt(host);
3270 if (ret)
3271 return ERR_PTR(ret);
3272 }
3273
3274 host->pinctrl = devm_pinctrl_get(host->dev);
3275 if (!IS_ERR(host->pinctrl)) {
3276 host->normal_state = pinctrl_lookup_state(host->pinctrl, "normal");
3277 if (IS_ERR(host->normal_state))
3278 dev_warn(dev, "No normal pinctrl state\n");
3279
3280 host->idle_state = pinctrl_lookup_state(host->pinctrl, "idle");
3281 if (IS_ERR(host->idle_state))
3282 dev_warn(dev, "No idle pinctrl state\n");
3283
3284 if (!IS_ERR(host->normal_state) && !IS_ERR(host->idle_state))
3285 pinctrl_select_state(host->pinctrl, host->idle_state);
3286 else
3287 host->pinctrl = NULL;
3288 }
3289
3290 return pdata;
3291 }
3292
3293 #else /* CONFIG_OF */
dw_mci_parse_dt(struct dw_mci * host)3294 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
3295 {
3296 return ERR_PTR(-EINVAL);
3297 }
3298 #endif /* CONFIG_OF */
3299
dw_mci_enable_cd(struct dw_mci * host)3300 static void dw_mci_enable_cd(struct dw_mci *host)
3301 {
3302 unsigned long irqflags;
3303 u32 temp;
3304
3305 /*
3306 * No need for CD if all slots have a non-error GPIO
3307 * as well as broken card detection is found.
3308 */
3309 if (host->slot->mmc->caps & MMC_CAP_NEEDS_POLL)
3310 return;
3311
3312 if (mmc_gpio_get_cd(host->slot->mmc) < 0) {
3313 spin_lock_irqsave(&host->irq_lock, irqflags);
3314 temp = mci_readl(host, INTMASK);
3315 temp |= SDMMC_INT_CD;
3316 mci_writel(host, INTMASK, temp);
3317 spin_unlock_irqrestore(&host->irq_lock, irqflags);
3318 }
3319 }
3320
dw_mci_probe(struct dw_mci * host)3321 int dw_mci_probe(struct dw_mci *host)
3322 {
3323 const struct dw_mci_drv_data *drv_data = host->drv_data;
3324 int width, i, ret = 0;
3325 u32 fifo_size;
3326
3327 if (!host->pdata) {
3328 host->pdata = dw_mci_parse_dt(host);
3329 if (IS_ERR(host->pdata))
3330 return dev_err_probe(host->dev, PTR_ERR(host->pdata),
3331 "platform data not available\n");
3332 }
3333
3334 host->biu_clk = devm_clk_get(host->dev, "biu");
3335 if (IS_ERR(host->biu_clk)) {
3336 dev_dbg(host->dev, "biu clock not available\n");
3337 } else {
3338 ret = clk_prepare_enable(host->biu_clk);
3339 if (ret) {
3340 dev_err(host->dev, "failed to enable biu clock\n");
3341 return ret;
3342 }
3343 }
3344
3345 #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT_MMC
3346 if (device_property_read_bool(host->dev, "no-sd") &&
3347 device_property_read_bool(host->dev, "no-sdio")) {
3348 if (readl_poll_timeout(host->regs + SDMMC_STATUS,
3349 fifo_size,
3350 !(fifo_size & (BIT(10) | GENMASK(7, 4))),
3351 0, 500 * USEC_PER_MSEC))
3352 dev_err(host->dev, "Controller is occupied!\n");
3353
3354 if (readl_poll_timeout(host->regs + SDMMC_IDSTS,
3355 fifo_size, !(fifo_size & GENMASK(16, 13)),
3356 0, 500 * USEC_PER_MSEC))
3357 dev_err(host->dev, "DMA is still running!\n");
3358
3359 BUG_ON(mci_readl(host, RINTSTS) & DW_MCI_ERROR_FLAGS);
3360 }
3361 #endif
3362
3363 host->ciu_clk = devm_clk_get(host->dev, "ciu");
3364 if (IS_ERR(host->ciu_clk)) {
3365 dev_dbg(host->dev, "ciu clock not available\n");
3366 host->bus_hz = host->pdata->bus_hz;
3367 } else {
3368 ret = clk_prepare_enable(host->ciu_clk);
3369 if (ret) {
3370 dev_err(host->dev, "failed to enable ciu clock\n");
3371 goto err_clk_biu;
3372 }
3373
3374 if (host->pdata->bus_hz) {
3375 ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz);
3376 if (ret)
3377 dev_warn(host->dev,
3378 "Unable to set bus rate to %uHz\n",
3379 host->pdata->bus_hz);
3380 }
3381 host->bus_hz = clk_get_rate(host->ciu_clk);
3382 }
3383
3384 if (!host->bus_hz) {
3385 dev_err(host->dev,
3386 "Platform data must supply bus speed\n");
3387 ret = -ENODEV;
3388 goto err_clk_ciu;
3389 }
3390
3391 if (!IS_ERR(host->pdata->rstc)) {
3392 reset_control_assert(host->pdata->rstc);
3393 usleep_range(10, 50);
3394 reset_control_deassert(host->pdata->rstc);
3395 }
3396
3397 if (drv_data && drv_data->init) {
3398 ret = drv_data->init(host);
3399 if (ret) {
3400 dev_err(host->dev,
3401 "implementation specific init failed\n");
3402 goto err_clk_ciu;
3403 }
3404 }
3405
3406 timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0);
3407 timer_setup(&host->cto_timer, dw_mci_cto_timer, 0);
3408 timer_setup(&host->dto_timer, dw_mci_dto_timer, 0);
3409 if (host->need_xfer_timer)
3410 timer_setup(&host->xfer_timer, dw_mci_xfer_timer, 0);
3411
3412 spin_lock_init(&host->lock);
3413 spin_lock_init(&host->irq_lock);
3414 INIT_LIST_HEAD(&host->queue);
3415
3416 /*
3417 * Get the host data width - this assumes that HCON has been set with
3418 * the correct values.
3419 */
3420 i = SDMMC_GET_HDATA_WIDTH(mci_readl(host, HCON));
3421 if (!i) {
3422 host->push_data = dw_mci_push_data16;
3423 host->pull_data = dw_mci_pull_data16;
3424 width = 16;
3425 host->data_shift = 1;
3426 } else if (i == 2) {
3427 host->push_data = dw_mci_push_data64;
3428 host->pull_data = dw_mci_pull_data64;
3429 width = 64;
3430 host->data_shift = 3;
3431 } else {
3432 /* Check for a reserved value, and warn if it is */
3433 WARN((i != 1),
3434 "HCON reports a reserved host data width!\n"
3435 "Defaulting to 32-bit access.\n");
3436 host->push_data = dw_mci_push_data32;
3437 host->pull_data = dw_mci_pull_data32;
3438 width = 32;
3439 host->data_shift = 2;
3440 }
3441
3442 /* Reset all blocks */
3443 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3444 ret = -ENODEV;
3445 goto err_clk_ciu;
3446 }
3447
3448 host->dma_ops = host->pdata->dma_ops;
3449 dw_mci_init_dma(host);
3450
3451 /* Clear the interrupts for the host controller */
3452 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3453 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3454
3455 /* Put in max timeout */
3456 mci_writel(host, TMOUT, 0xFFFFFFFF);
3457
3458 /*
3459 * FIFO threshold settings RxMark = fifo_size / 2 - 1,
3460 * Tx Mark = fifo_size / 2 DMA Size = 8
3461 */
3462 if (!host->pdata->fifo_depth) {
3463 /*
3464 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
3465 * have been overwritten by the bootloader, just like we're
3466 * about to do, so if you know the value for your hardware, you
3467 * should put it in the platform data.
3468 */
3469 fifo_size = mci_readl(host, FIFOTH);
3470 fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
3471 } else {
3472 fifo_size = host->pdata->fifo_depth;
3473 }
3474 host->fifo_depth = fifo_size;
3475 host->fifoth_val =
3476 SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2);
3477 mci_writel(host, FIFOTH, host->fifoth_val);
3478
3479 /* disable clock to CIU */
3480 mci_writel(host, CLKENA, 0);
3481 mci_writel(host, CLKSRC, 0);
3482
3483 /*
3484 * In 2.40a spec, Data offset is changed.
3485 * Need to check the version-id and set data-offset for DATA register.
3486 */
3487 host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
3488 dev_info(host->dev, "Version ID is %04x\n", host->verid);
3489
3490 if (host->data_addr_override)
3491 host->fifo_reg = host->regs + host->data_addr_override;
3492 else if (host->verid < DW_MMC_240A)
3493 host->fifo_reg = host->regs + DATA_OFFSET;
3494 else
3495 host->fifo_reg = host->regs + DATA_240A_OFFSET;
3496
3497 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
3498 ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt,
3499 host->irq_flags, "dw-mci", host);
3500 if (ret)
3501 goto err_dmaunmap;
3502
3503 /*
3504 * Enable interrupts for command done, data over, data empty,
3505 * receive ready and error such as transmit, receive timeout, crc error
3506 */
3507 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
3508 SDMMC_INT_TXDR | SDMMC_INT_RXDR |
3509 DW_MCI_ERROR_FLAGS);
3510 /* Enable mci interrupt */
3511 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3512
3513 dev_info(host->dev,
3514 "DW MMC controller at irq %d,%d bit host data width,%u deep fifo\n",
3515 host->irq, width, fifo_size);
3516
3517 /* We need at least one slot to succeed */
3518 ret = dw_mci_init_slot(host);
3519 if (ret) {
3520 dev_dbg(host->dev, "slot %d init failed\n", i);
3521 goto err_dmaunmap;
3522 }
3523
3524 if (host->is_rv1106_sd) {
3525 #if IS_ENABLED(CONFIG_CPU_RV1106)
3526 g_sdmmc_ispvicap_lock = &host->lock;
3527 #endif
3528 /* Select IDMAC interface */
3529 fifo_size = mci_readl(host, CTRL);
3530 fifo_size |= SDMMC_CTRL_USE_IDMAC;
3531 mci_writel(host, CTRL, fifo_size);
3532
3533 fifo_size = mci_readl(host, INTMASK);
3534 fifo_size &= ~SDMMC_INT_HTO;
3535 mci_writel(host, INTMASK, fifo_size);
3536
3537 host->slot->mmc->caps &= ~(MMC_CAP_UHS_DDR50 | MMC_CAP_UHS_SDR104 |
3538 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
3539 MMC_CAP_UHS_SDR12);
3540 }
3541
3542 /* Now that slots are all setup, we can enable card detect */
3543 dw_mci_enable_cd(host);
3544
3545 return 0;
3546
3547 err_dmaunmap:
3548 if (host->use_dma && host->dma_ops->exit)
3549 host->dma_ops->exit(host);
3550
3551 if (!IS_ERR(host->pdata->rstc))
3552 reset_control_assert(host->pdata->rstc);
3553
3554 err_clk_ciu:
3555 clk_disable_unprepare(host->ciu_clk);
3556
3557 err_clk_biu:
3558 clk_disable_unprepare(host->biu_clk);
3559
3560 return ret;
3561 }
3562 EXPORT_SYMBOL(dw_mci_probe);
3563
dw_mci_remove(struct dw_mci * host)3564 void dw_mci_remove(struct dw_mci *host)
3565 {
3566 dev_dbg(host->dev, "remove slot\n");
3567 if (host->slot)
3568 dw_mci_cleanup_slot(host->slot);
3569
3570 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3571 mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
3572
3573 /* disable clock to CIU */
3574 mci_writel(host, CLKENA, 0);
3575 mci_writel(host, CLKSRC, 0);
3576
3577 if (host->use_dma && host->dma_ops->exit)
3578 host->dma_ops->exit(host);
3579
3580 if (!IS_ERR(host->pdata->rstc))
3581 reset_control_assert(host->pdata->rstc);
3582
3583 clk_disable_unprepare(host->ciu_clk);
3584 clk_disable_unprepare(host->biu_clk);
3585 }
3586 EXPORT_SYMBOL(dw_mci_remove);
3587
3588
3589
3590 #ifdef CONFIG_PM
dw_mci_runtime_suspend(struct device * dev)3591 int dw_mci_runtime_suspend(struct device *dev)
3592 {
3593 struct dw_mci *host = dev_get_drvdata(dev);
3594
3595 if (host->use_dma && host->dma_ops->exit)
3596 host->dma_ops->exit(host);
3597
3598 clk_disable_unprepare(host->ciu_clk);
3599
3600 if (host->slot &&
3601 (mmc_can_gpio_cd(host->slot->mmc) ||
3602 !mmc_card_is_removable(host->slot->mmc)))
3603 clk_disable_unprepare(host->biu_clk);
3604
3605 return 0;
3606 }
3607 EXPORT_SYMBOL(dw_mci_runtime_suspend);
3608
dw_mci_runtime_resume(struct device * dev)3609 int dw_mci_runtime_resume(struct device *dev)
3610 {
3611 int ret = 0;
3612 struct dw_mci *host = dev_get_drvdata(dev);
3613
3614 if (host->slot &&
3615 (mmc_can_gpio_cd(host->slot->mmc) ||
3616 !mmc_card_is_removable(host->slot->mmc))) {
3617 ret = clk_prepare_enable(host->biu_clk);
3618 if (ret)
3619 return ret;
3620 }
3621
3622 ret = clk_prepare_enable(host->ciu_clk);
3623 if (ret)
3624 goto err;
3625
3626 if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
3627 clk_disable_unprepare(host->ciu_clk);
3628 ret = -ENODEV;
3629 goto err;
3630 }
3631
3632 if (host->use_dma && host->dma_ops->init)
3633 host->dma_ops->init(host);
3634
3635 /*
3636 * Restore the initial value at FIFOTH register
3637 * And Invalidate the prev_blksz with zero
3638 */
3639 mci_writel(host, FIFOTH, host->fifoth_val);
3640 host->prev_blksz = 0;
3641
3642 /* Put in max timeout */
3643 mci_writel(host, TMOUT, 0xFFFFFFFF);
3644
3645 mci_writel(host, RINTSTS, 0xFFFFFFFF);
3646 mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | DW_MCI_ERROR_FLAGS);
3647 mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE);
3648
3649 if (host->is_rv1106_sd) {
3650 /* Select IDMAC interface */
3651 ret = mci_readl(host, CTRL);
3652 ret |= SDMMC_CTRL_USE_IDMAC;
3653 mci_writel(host, CTRL, ret);
3654
3655 ret = mci_readl(host, INTMASK);
3656 ret &= ~SDMMC_INT_HTO;
3657 mci_writel(host, INTMASK, ret);
3658 }
3659
3660 if (host->slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
3661 dw_mci_set_ios(host->slot->mmc, &host->slot->mmc->ios);
3662
3663 /* Force setup bus to guarantee available clock output */
3664 dw_mci_setup_bus(host->slot, true);
3665
3666 /* Re-enable SDIO interrupts. */
3667 if (sdio_irq_claimed(host->slot->mmc))
3668 __dw_mci_enable_sdio_irq(host->slot, 1);
3669
3670 /* Now that slots are all setup, we can enable card detect */
3671 dw_mci_enable_cd(host);
3672
3673 return 0;
3674
3675 err:
3676 if (host->slot &&
3677 (mmc_can_gpio_cd(host->slot->mmc) ||
3678 !mmc_card_is_removable(host->slot->mmc)))
3679 clk_disable_unprepare(host->biu_clk);
3680
3681 return ret;
3682 }
3683 EXPORT_SYMBOL(dw_mci_runtime_resume);
3684 #endif /* CONFIG_PM */
3685
dw_mci_init(void)3686 static int __init dw_mci_init(void)
3687 {
3688 pr_info("Synopsys Designware Multimedia Card Interface Driver\n");
3689 return 0;
3690 }
3691
dw_mci_exit(void)3692 static void __exit dw_mci_exit(void)
3693 {
3694 }
3695
3696 module_init(dw_mci_init);
3697 module_exit(dw_mci_exit);
3698
3699 MODULE_DESCRIPTION("DW Multimedia Card Interface driver");
3700 MODULE_AUTHOR("NXP Semiconductor VietNam");
3701 MODULE_AUTHOR("Imagination Technologies Ltd");
3702 MODULE_LICENSE("GPL v2");
3703