1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * (C) Copyright 2012 SAMSUNG Electronics
3*4882a593Smuzhiyun * Jaehoon Chung <jh80.chung@samsung.com>
4*4882a593Smuzhiyun * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <common.h>
10*4882a593Smuzhiyun #include <bouncebuf.h>
11*4882a593Smuzhiyun #include <div64.h>
12*4882a593Smuzhiyun #include <errno.h>
13*4882a593Smuzhiyun #include <malloc.h>
14*4882a593Smuzhiyun #include <memalign.h>
15*4882a593Smuzhiyun #include <mmc.h>
16*4882a593Smuzhiyun #include <dwmmc.h>
17*4882a593Smuzhiyun #ifdef CONFIG_DM_GPIO
18*4882a593Smuzhiyun #include <asm/gpio.h>
19*4882a593Smuzhiyun #include <asm-generic/gpio.h>
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define PAGE_SIZE 4096
23*4882a593Smuzhiyun #define MSEC_PER_SEC 1000ULL
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * Currently it supports read/write up to 8*8*4 Bytes per
27*4882a593Smuzhiyun * stride as a burst mode. Please note that if you change
28*4882a593Smuzhiyun * MAX_STRIDE, you should also update dwmci_memcpy_fromio
29*4882a593Smuzhiyun * to augment the groups of {ldm, stm}.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun #define MAX_STRIDE 64
32*4882a593Smuzhiyun #if (CONFIG_ARM && CONFIG_CPU_V7 && !defined(CONFIG_MMC_SIMPLE))
dwmci_memcpy_fromio(void * buffer,void * fifo_addr)33*4882a593Smuzhiyun void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun __asm__ __volatile__ (
36*4882a593Smuzhiyun "push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
37*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51*4882a593Smuzhiyun "ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
52*4882a593Smuzhiyun "stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
53*4882a593Smuzhiyun "pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
54*4882a593Smuzhiyun :::"memory"
55*4882a593Smuzhiyun );
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
dwmci_memcpy_toio(void * buffer,void * fifo_addr)58*4882a593Smuzhiyun void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun __asm__ __volatile__ (
61*4882a593Smuzhiyun "push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
62*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
63*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
64*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76*4882a593Smuzhiyun "ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
77*4882a593Smuzhiyun "stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
78*4882a593Smuzhiyun "pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
79*4882a593Smuzhiyun :::"memory"
80*4882a593Smuzhiyun );
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun #else
dwmci_memcpy_fromio(void * buffer,void * fifo_addr)83*4882a593Smuzhiyun void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
dwmci_memcpy_toio(void * buffer,void * fifo_addr)84*4882a593Smuzhiyun void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun
dwmci_wait_reset(struct dwmci_host * host,u32 value)87*4882a593Smuzhiyun static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun unsigned long timeout = 1000;
90*4882a593Smuzhiyun u32 ctrl;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CTRL, value);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun while (timeout--) {
95*4882a593Smuzhiyun ctrl = dwmci_readl(host, DWMCI_CTRL);
96*4882a593Smuzhiyun if (!(ctrl & DWMCI_RESET_ALL))
97*4882a593Smuzhiyun return 1;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun return 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
dwmci_set_idma_desc(struct dwmci_idmac * idmac,u32 desc0,u32 desc1,u32 desc2)102*4882a593Smuzhiyun static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
103*4882a593Smuzhiyun u32 desc0, u32 desc1, u32 desc2)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun struct dwmci_idmac *desc = idmac;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun desc->flags = desc0;
108*4882a593Smuzhiyun desc->cnt = desc1;
109*4882a593Smuzhiyun desc->addr = desc2;
110*4882a593Smuzhiyun desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
dwmci_prepare_data(struct dwmci_host * host,struct mmc_data * data,struct dwmci_idmac * cur_idmac,void * bounce_buffer)113*4882a593Smuzhiyun static void dwmci_prepare_data(struct dwmci_host *host,
114*4882a593Smuzhiyun struct mmc_data *data,
115*4882a593Smuzhiyun struct dwmci_idmac *cur_idmac,
116*4882a593Smuzhiyun void *bounce_buffer)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun unsigned long ctrl;
119*4882a593Smuzhiyun unsigned int i = 0, flags, cnt, blk_cnt;
120*4882a593Smuzhiyun ulong data_start, data_end;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun blk_cnt = data->blocks;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun data_start = (ulong)cur_idmac;
128*4882a593Smuzhiyun dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun do {
131*4882a593Smuzhiyun flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
132*4882a593Smuzhiyun flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
133*4882a593Smuzhiyun if (blk_cnt <= 8) {
134*4882a593Smuzhiyun flags |= DWMCI_IDMAC_LD;
135*4882a593Smuzhiyun cnt = data->blocksize * blk_cnt;
136*4882a593Smuzhiyun } else
137*4882a593Smuzhiyun cnt = data->blocksize * 8;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun dwmci_set_idma_desc(cur_idmac, flags, cnt,
140*4882a593Smuzhiyun (ulong)bounce_buffer + (i * PAGE_SIZE));
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (blk_cnt <= 8)
143*4882a593Smuzhiyun break;
144*4882a593Smuzhiyun blk_cnt -= 8;
145*4882a593Smuzhiyun cur_idmac++;
146*4882a593Smuzhiyun i++;
147*4882a593Smuzhiyun } while(1);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun data_end = (ulong)cur_idmac;
150*4882a593Smuzhiyun flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun ctrl = dwmci_readl(host, DWMCI_CTRL);
153*4882a593Smuzhiyun ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
154*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CTRL, ctrl);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun ctrl = dwmci_readl(host, DWMCI_BMOD);
157*4882a593Smuzhiyun ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
158*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BMOD, ctrl);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
161*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #ifdef CONFIG_SPL_BUILD
dwmci_get_drto(struct dwmci_host * host,const unsigned int size)165*4882a593Smuzhiyun static unsigned int dwmci_get_drto(struct dwmci_host *host,
166*4882a593Smuzhiyun const unsigned int size)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun unsigned int drto_clks;
169*4882a593Smuzhiyun unsigned int drto_div;
170*4882a593Smuzhiyun unsigned int drto_ms;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun drto_clks = dwmci_readl(host, DWMCI_TMOUT) >> 8;
173*4882a593Smuzhiyun drto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
174*4882a593Smuzhiyun if (drto_div == 0)
175*4882a593Smuzhiyun drto_div = 1;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
178*4882a593Smuzhiyun host->mmc->clock);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* add a bit spare time */
181*4882a593Smuzhiyun drto_ms += 10;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return drto_ms;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun #else
dwmci_get_drto(struct dwmci_host * host,const unsigned int size)186*4882a593Smuzhiyun static unsigned int dwmci_get_drto(struct dwmci_host *host,
187*4882a593Smuzhiyun const unsigned int size)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun unsigned int timeout;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun timeout = size * 8; /* counting in bits */
192*4882a593Smuzhiyun timeout *= 10; /* wait 10 times as long */
193*4882a593Smuzhiyun timeout /= host->mmc->clock;
194*4882a593Smuzhiyun timeout /= host->mmc->bus_width;
195*4882a593Smuzhiyun timeout *= 1000; /* counting in msec */
196*4882a593Smuzhiyun timeout = (timeout < 10000) ? 10000 : timeout;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return timeout;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun #endif
201*4882a593Smuzhiyun
dwmci_get_cto(struct dwmci_host * host)202*4882a593Smuzhiyun static unsigned int dwmci_get_cto(struct dwmci_host *host)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun unsigned int cto_clks;
205*4882a593Smuzhiyun unsigned int cto_div;
206*4882a593Smuzhiyun unsigned int cto_ms;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun cto_clks = dwmci_readl(host, DWMCI_TMOUT) & 0xff;
209*4882a593Smuzhiyun cto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
210*4882a593Smuzhiyun if (cto_div == 0)
211*4882a593Smuzhiyun cto_div = 1;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
214*4882a593Smuzhiyun host->mmc->clock);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* add a bit spare time */
217*4882a593Smuzhiyun cto_ms += 10;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return cto_ms;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
dwmci_data_transfer(struct dwmci_host * host,struct mmc_data * data)222*4882a593Smuzhiyun static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun int ret = 0;
225*4882a593Smuzhiyun int reset_timeout = 100;
226*4882a593Smuzhiyun u32 timeout, status, ctrl, mask, size, i, len = 0;
227*4882a593Smuzhiyun u32 *buf = NULL;
228*4882a593Smuzhiyun ulong start = get_timer(0);
229*4882a593Smuzhiyun u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
230*4882a593Smuzhiyun RX_WMARK_SHIFT) + 1) * 2;
231*4882a593Smuzhiyun bool stride;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun size = data->blocksize * data->blocks;
234*4882a593Smuzhiyun /* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
235*4882a593Smuzhiyun stride = host->stride_pio && size > 128;
236*4882a593Smuzhiyun if (data->flags == MMC_DATA_READ)
237*4882a593Smuzhiyun buf = (unsigned int *)data->dest;
238*4882a593Smuzhiyun else
239*4882a593Smuzhiyun buf = (unsigned int *)data->src;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun timeout = dwmci_get_drto(host, size);
242*4882a593Smuzhiyun size /= 4;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun for (;;) {
245*4882a593Smuzhiyun mask = dwmci_readl(host, DWMCI_RINTSTS);
246*4882a593Smuzhiyun /* Error during data transfer. */
247*4882a593Smuzhiyun if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
248*4882a593Smuzhiyun debug("%s: DATA ERROR!\n", __func__);
249*4882a593Smuzhiyun dwmci_wait_reset(host, DWMCI_RESET_ALL);
250*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
251*4882a593Smuzhiyun DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun do {
254*4882a593Smuzhiyun status = dwmci_readl(host, DWMCI_CMD);
255*4882a593Smuzhiyun if (reset_timeout-- < 0)
256*4882a593Smuzhiyun break;
257*4882a593Smuzhiyun udelay(100);
258*4882a593Smuzhiyun } while (status & DWMCI_CMD_START);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (!host->fifo_mode) {
261*4882a593Smuzhiyun ctrl = dwmci_readl(host, DWMCI_BMOD);
262*4882a593Smuzhiyun ctrl |= DWMCI_BMOD_IDMAC_RESET;
263*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BMOD, ctrl);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun ret = -EINVAL;
267*4882a593Smuzhiyun break;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (host->fifo_mode && size) {
271*4882a593Smuzhiyun len = 0;
272*4882a593Smuzhiyun if (data->flags == MMC_DATA_READ &&
273*4882a593Smuzhiyun (mask & DWMCI_INTMSK_RXDR)) {
274*4882a593Smuzhiyun while (size) {
275*4882a593Smuzhiyun len = dwmci_readl(host, DWMCI_STATUS);
276*4882a593Smuzhiyun len = (len >> DWMCI_FIFO_SHIFT) &
277*4882a593Smuzhiyun DWMCI_FIFO_MASK;
278*4882a593Smuzhiyun len = min(size, len);
279*4882a593Smuzhiyun if (!stride) {
280*4882a593Smuzhiyun /* Legacy pio mode */
281*4882a593Smuzhiyun for (i = 0; i < len; i++)
282*4882a593Smuzhiyun *buf++ = dwmci_readl(host, DWMCI_DATA);
283*4882a593Smuzhiyun goto read_again;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* dwmci_memcpy_fromio now bursts 256 Bytes once */
287*4882a593Smuzhiyun if (len < MAX_STRIDE)
288*4882a593Smuzhiyun continue;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun for (i = 0; i < len / MAX_STRIDE; i++) {
291*4882a593Smuzhiyun dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
292*4882a593Smuzhiyun buf += MAX_STRIDE;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun len = i * MAX_STRIDE;
296*4882a593Smuzhiyun read_again:
297*4882a593Smuzhiyun size = size > len ? (size - len) : 0;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS,
300*4882a593Smuzhiyun DWMCI_INTMSK_RXDR);
301*4882a593Smuzhiyun start = get_timer(0);
302*4882a593Smuzhiyun } else if (data->flags == MMC_DATA_WRITE &&
303*4882a593Smuzhiyun (mask & DWMCI_INTMSK_TXDR)) {
304*4882a593Smuzhiyun while (size) {
305*4882a593Smuzhiyun len = dwmci_readl(host, DWMCI_STATUS);
306*4882a593Smuzhiyun len = fifo_depth - ((len >>
307*4882a593Smuzhiyun DWMCI_FIFO_SHIFT) &
308*4882a593Smuzhiyun DWMCI_FIFO_MASK);
309*4882a593Smuzhiyun len = min(size, len);
310*4882a593Smuzhiyun if (!stride) {
311*4882a593Smuzhiyun for (i = 0; i < len; i++)
312*4882a593Smuzhiyun dwmci_writel(host, DWMCI_DATA,
313*4882a593Smuzhiyun *buf++);
314*4882a593Smuzhiyun goto write_again;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun /* dwmci_memcpy_toio now bursts 256 Bytes once */
317*4882a593Smuzhiyun if (len < MAX_STRIDE)
318*4882a593Smuzhiyun continue;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun for (i = 0; i < len / MAX_STRIDE; i++) {
321*4882a593Smuzhiyun dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
322*4882a593Smuzhiyun buf += MAX_STRIDE;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun len = i * MAX_STRIDE;
326*4882a593Smuzhiyun write_again:
327*4882a593Smuzhiyun size = size > len ? (size - len) : 0;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS,
330*4882a593Smuzhiyun DWMCI_INTMSK_TXDR);
331*4882a593Smuzhiyun start = get_timer(0);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /* Data arrived correctly. */
336*4882a593Smuzhiyun if (mask & DWMCI_INTMSK_DTO) {
337*4882a593Smuzhiyun ret = 0;
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* Check for timeout. */
342*4882a593Smuzhiyun if (get_timer(start) > timeout) {
343*4882a593Smuzhiyun debug("%s: Timeout waiting for data!\n",
344*4882a593Smuzhiyun __func__);
345*4882a593Smuzhiyun ret = -ETIMEDOUT;
346*4882a593Smuzhiyun break;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS, mask);
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return ret;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
dwmci_set_transfer_mode(struct dwmci_host * host,struct mmc_data * data)355*4882a593Smuzhiyun static int dwmci_set_transfer_mode(struct dwmci_host *host,
356*4882a593Smuzhiyun struct mmc_data *data)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun unsigned long mode;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun mode = DWMCI_CMD_DATA_EXP;
361*4882a593Smuzhiyun if (data->flags & MMC_DATA_WRITE)
362*4882a593Smuzhiyun mode |= DWMCI_CMD_RW;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return mode;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #ifdef CONFIG_DM_MMC
dwmci_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)368*4882a593Smuzhiyun static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
369*4882a593Smuzhiyun struct mmc_data *data)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun struct mmc *mmc = mmc_get_mmc_dev(dev);
372*4882a593Smuzhiyun #else
373*4882a593Smuzhiyun static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
374*4882a593Smuzhiyun struct mmc_data *data)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun #endif
377*4882a593Smuzhiyun struct dwmci_host *host = mmc->priv;
378*4882a593Smuzhiyun ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
379*4882a593Smuzhiyun data ? DIV_ROUND_UP(data->blocks, 8) : 0);
380*4882a593Smuzhiyun int ret = 0, flags = 0;
381*4882a593Smuzhiyun unsigned int timeout = 500;
382*4882a593Smuzhiyun u32 mask, ctrl;
383*4882a593Smuzhiyun ulong start = get_timer(0);
384*4882a593Smuzhiyun struct bounce_buffer bbstate;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
387*4882a593Smuzhiyun if (get_timer(start) > timeout) {
388*4882a593Smuzhiyun debug("%s: Timeout on data busy\n", __func__);
389*4882a593Smuzhiyun return -ETIMEDOUT;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun if (data) {
396*4882a593Smuzhiyun if (host->fifo_mode) {
397*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
398*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BYTCNT,
399*4882a593Smuzhiyun data->blocksize * data->blocks);
400*4882a593Smuzhiyun dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
401*4882a593Smuzhiyun } else {
402*4882a593Smuzhiyun if (data->flags == MMC_DATA_READ) {
403*4882a593Smuzhiyun ret = bounce_buffer_start(&bbstate,
404*4882a593Smuzhiyun (void*)data->dest,
405*4882a593Smuzhiyun data->blocksize *
406*4882a593Smuzhiyun data->blocks, GEN_BB_WRITE);
407*4882a593Smuzhiyun } else {
408*4882a593Smuzhiyun ret = bounce_buffer_start(&bbstate,
409*4882a593Smuzhiyun (void*)data->src,
410*4882a593Smuzhiyun data->blocksize *
411*4882a593Smuzhiyun data->blocks, GEN_BB_READ);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (ret)
415*4882a593Smuzhiyun return ret;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun dwmci_prepare_data(host, data, cur_idmac,
418*4882a593Smuzhiyun bbstate.bounce_buffer);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (data)
425*4882a593Smuzhiyun flags = dwmci_set_transfer_mode(host, data);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
428*4882a593Smuzhiyun return -1;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
431*4882a593Smuzhiyun flags |= DWMCI_CMD_ABORT_STOP;
432*4882a593Smuzhiyun else
433*4882a593Smuzhiyun flags |= DWMCI_CMD_PRV_DAT_WAIT;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_PRESENT) {
436*4882a593Smuzhiyun flags |= DWMCI_CMD_RESP_EXP;
437*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_136)
438*4882a593Smuzhiyun flags |= DWMCI_CMD_RESP_LENGTH;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_CRC)
442*4882a593Smuzhiyun flags |= DWMCI_CMD_CHECK_CRC;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun debug("Sending CMD%d\n",cmd->cmdidx);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMD, flags);
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun timeout = dwmci_get_cto(host);
451*4882a593Smuzhiyun start = get_timer(0);
452*4882a593Smuzhiyun do {
453*4882a593Smuzhiyun mask = dwmci_readl(host, DWMCI_RINTSTS);
454*4882a593Smuzhiyun if (mask & DWMCI_INTMSK_CDONE) {
455*4882a593Smuzhiyun if (!data)
456*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS, mask);
457*4882a593Smuzhiyun break;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun } while (!(get_timer(start) > timeout));
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun if (get_timer(start) > timeout) {
462*4882a593Smuzhiyun debug("%s: Timeout.\n", __func__);
463*4882a593Smuzhiyun return -ETIMEDOUT;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if (mask & DWMCI_INTMSK_RTO) {
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * Timeout here is not necessarily fatal. (e)MMC cards
469*4882a593Smuzhiyun * will splat here when they receive CMD55 as they do
470*4882a593Smuzhiyun * not support this command and that is exactly the way
471*4882a593Smuzhiyun * to tell them apart from SD cards. Thus, this output
472*4882a593Smuzhiyun * below shall be debug(). eMMC cards also do not favor
473*4882a593Smuzhiyun * CMD8, please keep that in mind.
474*4882a593Smuzhiyun */
475*4882a593Smuzhiyun debug("%s: Response Timeout.\n", __func__);
476*4882a593Smuzhiyun return -ETIMEDOUT;
477*4882a593Smuzhiyun } else if (mask & DWMCI_INTMSK_RE) {
478*4882a593Smuzhiyun debug("%s: Response Error.\n", __func__);
479*4882a593Smuzhiyun return -EIO;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_PRESENT) {
484*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_136) {
485*4882a593Smuzhiyun cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
486*4882a593Smuzhiyun cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
487*4882a593Smuzhiyun cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
488*4882a593Smuzhiyun cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
489*4882a593Smuzhiyun } else {
490*4882a593Smuzhiyun cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (data) {
495*4882a593Smuzhiyun ret = dwmci_data_transfer(host, data);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* only dma mode need it */
498*4882a593Smuzhiyun if (!host->fifo_mode) {
499*4882a593Smuzhiyun ctrl = dwmci_readl(host, DWMCI_CTRL);
500*4882a593Smuzhiyun ctrl &= ~(DWMCI_DMA_EN);
501*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CTRL, ctrl);
502*4882a593Smuzhiyun bounce_buffer_stop(&bbstate);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun return ret;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun #ifdef CONFIG_SPL_BLK_READ_PREPARE
510*4882a593Smuzhiyun #ifdef CONFIG_DM_MMC
511*4882a593Smuzhiyun static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
512*4882a593Smuzhiyun struct mmc_data *data)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct mmc *mmc = mmc_get_mmc_dev(dev);
515*4882a593Smuzhiyun #else
516*4882a593Smuzhiyun static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
517*4882a593Smuzhiyun struct mmc_data *data)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun #endif
520*4882a593Smuzhiyun struct dwmci_host *host = mmc->priv;
521*4882a593Smuzhiyun struct dwmci_idmac *cur_idmac;
522*4882a593Smuzhiyun int ret = 0, flags = 0;
523*4882a593Smuzhiyun unsigned int timeout = 500;
524*4882a593Smuzhiyun u32 mask;
525*4882a593Smuzhiyun ulong start = get_timer(0);
526*4882a593Smuzhiyun struct bounce_buffer bbstate;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
529*4882a593Smuzhiyun sizeof(struct dwmci_idmac),
530*4882a593Smuzhiyun ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
531*4882a593Smuzhiyun if (!cur_idmac)
532*4882a593Smuzhiyun return -ENODATA;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
535*4882a593Smuzhiyun if (get_timer(start) > timeout) {
536*4882a593Smuzhiyun debug("%s: Timeout on data busy\n", __func__);
537*4882a593Smuzhiyun return -ETIMEDOUT;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun if (data) {
544*4882a593Smuzhiyun if (host->fifo_mode) {
545*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
546*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BYTCNT,
547*4882a593Smuzhiyun data->blocksize * data->blocks);
548*4882a593Smuzhiyun dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
549*4882a593Smuzhiyun } else {
550*4882a593Smuzhiyun if (data->flags == MMC_DATA_READ) {
551*4882a593Smuzhiyun bounce_buffer_start(&bbstate, (void *)data->dest,
552*4882a593Smuzhiyun data->blocksize *
553*4882a593Smuzhiyun data->blocks, GEN_BB_WRITE);
554*4882a593Smuzhiyun } else {
555*4882a593Smuzhiyun bounce_buffer_start(&bbstate, (void *)data->src,
556*4882a593Smuzhiyun data->blocksize *
557*4882a593Smuzhiyun data->blocks, GEN_BB_READ);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun dwmci_prepare_data(host, data, cur_idmac,
560*4882a593Smuzhiyun bbstate.bounce_buffer);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (data)
567*4882a593Smuzhiyun flags = dwmci_set_transfer_mode(host, data);
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
570*4882a593Smuzhiyun return -1;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
573*4882a593Smuzhiyun flags |= DWMCI_CMD_ABORT_STOP;
574*4882a593Smuzhiyun else
575*4882a593Smuzhiyun flags |= DWMCI_CMD_PRV_DAT_WAIT;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_PRESENT) {
578*4882a593Smuzhiyun flags |= DWMCI_CMD_RESP_EXP;
579*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_136)
580*4882a593Smuzhiyun flags |= DWMCI_CMD_RESP_LENGTH;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_CRC)
584*4882a593Smuzhiyun flags |= DWMCI_CMD_CHECK_CRC;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun debug("Sending CMD%d\n", cmd->cmdidx);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMD, flags);
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun timeout = dwmci_get_cto(host);
593*4882a593Smuzhiyun start = get_timer(0);
594*4882a593Smuzhiyun do {
595*4882a593Smuzhiyun mask = dwmci_readl(host, DWMCI_RINTSTS);
596*4882a593Smuzhiyun if (mask & DWMCI_INTMSK_CDONE) {
597*4882a593Smuzhiyun if (!data)
598*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS, mask);
599*4882a593Smuzhiyun break;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun } while (!(get_timer(start) > timeout));
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (get_timer(start) > timeout) {
604*4882a593Smuzhiyun debug("%s: Timeout.\n", __func__);
605*4882a593Smuzhiyun return -ETIMEDOUT;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (mask & DWMCI_INTMSK_RTO) {
609*4882a593Smuzhiyun /*
610*4882a593Smuzhiyun * Timeout here is not necessarily fatal. (e)MMC cards
611*4882a593Smuzhiyun * will splat here when they receive CMD55 as they do
612*4882a593Smuzhiyun * not support this command and that is exactly the way
613*4882a593Smuzhiyun * to tell them apart from SD cards. Thus, this output
614*4882a593Smuzhiyun * below shall be debug(). eMMC cards also do not favor
615*4882a593Smuzhiyun * CMD8, please keep that in mind.
616*4882a593Smuzhiyun */
617*4882a593Smuzhiyun debug("%s: Response Timeout.\n", __func__);
618*4882a593Smuzhiyun return -ETIMEDOUT;
619*4882a593Smuzhiyun } else if (mask & DWMCI_INTMSK_RE) {
620*4882a593Smuzhiyun debug("%s: Response Error.\n", __func__);
621*4882a593Smuzhiyun return -EIO;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_PRESENT) {
625*4882a593Smuzhiyun if (cmd->resp_type & MMC_RSP_136) {
626*4882a593Smuzhiyun cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
627*4882a593Smuzhiyun cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
628*4882a593Smuzhiyun cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
629*4882a593Smuzhiyun cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
630*4882a593Smuzhiyun } else {
631*4882a593Smuzhiyun cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun return ret;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun #endif
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun u32 div, status;
642*4882a593Smuzhiyun int timeout = 10000;
643*4882a593Smuzhiyun unsigned long sclk;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (freq == 0)
646*4882a593Smuzhiyun return 0;
647*4882a593Smuzhiyun /*
648*4882a593Smuzhiyun * If host->get_mmc_clk isn't defined,
649*4882a593Smuzhiyun * then assume that host->bus_hz is source clock value.
650*4882a593Smuzhiyun * host->bus_hz should be set by user.
651*4882a593Smuzhiyun */
652*4882a593Smuzhiyun if (host->get_mmc_clk)
653*4882a593Smuzhiyun sclk = host->get_mmc_clk(host, freq);
654*4882a593Smuzhiyun else if (host->bus_hz)
655*4882a593Smuzhiyun sclk = host->bus_hz;
656*4882a593Smuzhiyun else {
657*4882a593Smuzhiyun debug("%s: Didn't get source clock value.\n", __func__);
658*4882a593Smuzhiyun return -EINVAL;
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun if (sclk == 0)
662*4882a593Smuzhiyun return -EINVAL;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (sclk == freq)
665*4882a593Smuzhiyun div = 0; /* bypass mode */
666*4882a593Smuzhiyun else
667*4882a593Smuzhiyun div = DIV_ROUND_UP(sclk, 2 * freq);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CLKENA, 0);
670*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CLKSRC, 0);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CLKDIV, div);
673*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
674*4882a593Smuzhiyun DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun do {
677*4882a593Smuzhiyun status = dwmci_readl(host, DWMCI_CMD);
678*4882a593Smuzhiyun if (timeout-- < 0) {
679*4882a593Smuzhiyun debug("%s: Timeout!\n", __func__);
680*4882a593Smuzhiyun return -ETIMEDOUT;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun } while (status & DWMCI_CMD_START);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
685*4882a593Smuzhiyun DWMCI_CLKEN_LOW_PWR);
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
688*4882a593Smuzhiyun DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun timeout = 10000;
691*4882a593Smuzhiyun do {
692*4882a593Smuzhiyun status = dwmci_readl(host, DWMCI_CMD);
693*4882a593Smuzhiyun if (timeout-- < 0) {
694*4882a593Smuzhiyun debug("%s: Timeout!\n", __func__);
695*4882a593Smuzhiyun return -ETIMEDOUT;
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun } while (status & DWMCI_CMD_START);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun host->clock = freq;
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun return 0;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun #ifdef CONFIG_DM_MMC
705*4882a593Smuzhiyun static bool dwmci_card_busy(struct udevice *dev)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun struct mmc *mmc = mmc_get_mmc_dev(dev);
708*4882a593Smuzhiyun #else
709*4882a593Smuzhiyun static bool dwmci_card_busy(struct mmc *mmc)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun #endif
712*4882a593Smuzhiyun u32 status;
713*4882a593Smuzhiyun struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * Check the busy bit which is low when DAT[3:0]
717*4882a593Smuzhiyun * (the data lines) are 0000
718*4882a593Smuzhiyun */
719*4882a593Smuzhiyun status = dwmci_readl(host, DWMCI_STATUS);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun return !!(status & DWMCI_BUSY);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun #ifdef CONFIG_DM_MMC
725*4882a593Smuzhiyun static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun struct mmc *mmc = mmc_get_mmc_dev(dev);
728*4882a593Smuzhiyun #else
729*4882a593Smuzhiyun static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun #endif
732*4882a593Smuzhiyun struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (!host->execute_tuning)
735*4882a593Smuzhiyun return -EIO;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun return host->execute_tuning(host, opcode);
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun #ifdef CONFIG_DM_MMC
741*4882a593Smuzhiyun static int dwmci_set_ios(struct udevice *dev)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun struct mmc *mmc = mmc_get_mmc_dev(dev);
744*4882a593Smuzhiyun #else
745*4882a593Smuzhiyun static int dwmci_set_ios(struct mmc *mmc)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun #endif
748*4882a593Smuzhiyun struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
749*4882a593Smuzhiyun u32 ctype, regs;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun dwmci_setup_bus(host, mmc->clock);
754*4882a593Smuzhiyun switch (mmc->bus_width) {
755*4882a593Smuzhiyun case 8:
756*4882a593Smuzhiyun ctype = DWMCI_CTYPE_8BIT;
757*4882a593Smuzhiyun break;
758*4882a593Smuzhiyun case 4:
759*4882a593Smuzhiyun ctype = DWMCI_CTYPE_4BIT;
760*4882a593Smuzhiyun break;
761*4882a593Smuzhiyun default:
762*4882a593Smuzhiyun ctype = DWMCI_CTYPE_1BIT;
763*4882a593Smuzhiyun break;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CTYPE, ctype);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun regs = dwmci_readl(host, DWMCI_UHS_REG);
769*4882a593Smuzhiyun if (mmc_card_ddr(mmc))
770*4882a593Smuzhiyun regs |= DWMCI_DDR_MODE;
771*4882a593Smuzhiyun else
772*4882a593Smuzhiyun regs &= ~DWMCI_DDR_MODE;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun dwmci_writel(host, DWMCI_UHS_REG, regs);
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (host->clksel)
777*4882a593Smuzhiyun host->clksel(host);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return 0;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun static int dwmci_init(struct mmc *mmc)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun struct dwmci_host *host = mmc->priv;
785*4882a593Smuzhiyun uint32_t use_dma;
786*4882a593Smuzhiyun uint32_t verid;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun if (host->board_init)
789*4882a593Smuzhiyun host->board_init(host);
790*4882a593Smuzhiyun #ifdef CONFIG_ARCH_ROCKCHIP
791*4882a593Smuzhiyun if (host->dev_index == 0)
792*4882a593Smuzhiyun dwmci_writel(host, DWMCI_PWREN, 1);
793*4882a593Smuzhiyun else if (host->dev_index == 1)
794*4882a593Smuzhiyun dwmci_writel(host, DWMCI_PWREN, 0);
795*4882a593Smuzhiyun else
796*4882a593Smuzhiyun dwmci_writel(host, DWMCI_PWREN, 1);
797*4882a593Smuzhiyun #else
798*4882a593Smuzhiyun dwmci_writel(host, DWMCI_PWREN, 1);
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
802*4882a593Smuzhiyun if (verid >= DW_MMC_240A)
803*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
806*4882a593Smuzhiyun debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
807*4882a593Smuzhiyun return -EIO;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
811*4882a593Smuzhiyun if (use_dma == DMA_INTERFACE_IDMA) {
812*4882a593Smuzhiyun host->fifo_mode = 0;
813*4882a593Smuzhiyun } else {
814*4882a593Smuzhiyun host->fifo_mode = 1;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* Enumerate at 400KHz */
818*4882a593Smuzhiyun dwmci_setup_bus(host, mmc->cfg->f_min);
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
821*4882a593Smuzhiyun dwmci_writel(host, DWMCI_INTMASK, 0);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun dwmci_writel(host, DWMCI_IDINTEN, 0);
826*4882a593Smuzhiyun dwmci_writel(host, DWMCI_BMOD, 1);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (!host->fifoth_val) {
829*4882a593Smuzhiyun uint32_t fifo_size;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
832*4882a593Smuzhiyun fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
833*4882a593Smuzhiyun host->fifoth_val = MSIZE(DWMCI_MSIZE) |
834*4882a593Smuzhiyun RX_WMARK(fifo_size / 2 - 1) |
835*4882a593Smuzhiyun TX_WMARK(fifo_size / 2);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CLKENA, 0);
840*4882a593Smuzhiyun dwmci_writel(host, DWMCI_CLKSRC, 0);
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun return 0;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun static int dwmci_get_cd(struct udevice *dev)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun int ret = -1;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
850*4882a593Smuzhiyun struct gpio_desc detect;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
853*4882a593Smuzhiyun if (ret) {
854*4882a593Smuzhiyun return ret;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun ret = !dm_gpio_get_value(&detect);
858*4882a593Smuzhiyun dm_gpio_free(dev, &detect);
859*4882a593Smuzhiyun #endif
860*4882a593Smuzhiyun return ret;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun #ifdef CONFIG_DM_MMC
864*4882a593Smuzhiyun int dwmci_probe(struct udevice *dev)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun struct mmc *mmc = mmc_get_mmc_dev(dev);
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return dwmci_init(mmc);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun const struct dm_mmc_ops dm_dwmci_ops = {
872*4882a593Smuzhiyun .card_busy = dwmci_card_busy,
873*4882a593Smuzhiyun .send_cmd = dwmci_send_cmd,
874*4882a593Smuzhiyun #ifdef CONFIG_SPL_BLK_READ_PREPARE
875*4882a593Smuzhiyun .send_cmd_prepare = dwmci_send_cmd_prepare,
876*4882a593Smuzhiyun #endif
877*4882a593Smuzhiyun .set_ios = dwmci_set_ios,
878*4882a593Smuzhiyun .get_cd = dwmci_get_cd,
879*4882a593Smuzhiyun .execute_tuning = dwmci_execute_tuning,
880*4882a593Smuzhiyun };
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun #else
883*4882a593Smuzhiyun static const struct mmc_ops dwmci_ops = {
884*4882a593Smuzhiyun .card_busy = dwmci_card_busy,
885*4882a593Smuzhiyun .send_cmd = dwmci_send_cmd,
886*4882a593Smuzhiyun .set_ios = dwmci_set_ios,
887*4882a593Smuzhiyun .get_cd = dwmci_get_cd,
888*4882a593Smuzhiyun .init = dwmci_init,
889*4882a593Smuzhiyun .execute_tuning = dwmci_execute_tuning,
890*4882a593Smuzhiyun };
891*4882a593Smuzhiyun #endif
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
894*4882a593Smuzhiyun u32 max_clk, u32 min_clk)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun cfg->name = host->name;
897*4882a593Smuzhiyun #ifndef CONFIG_DM_MMC
898*4882a593Smuzhiyun cfg->ops = &dwmci_ops;
899*4882a593Smuzhiyun #endif
900*4882a593Smuzhiyun cfg->f_min = min_clk;
901*4882a593Smuzhiyun cfg->f_max = max_clk;
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun cfg->host_caps = host->caps;
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun switch (host->buswidth) {
908*4882a593Smuzhiyun case 8:
909*4882a593Smuzhiyun cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
910*4882a593Smuzhiyun break;
911*4882a593Smuzhiyun case 4:
912*4882a593Smuzhiyun cfg->host_caps |= MMC_MODE_4BIT;
913*4882a593Smuzhiyun cfg->host_caps &= ~MMC_MODE_8BIT;
914*4882a593Smuzhiyun break;
915*4882a593Smuzhiyun case 1:
916*4882a593Smuzhiyun cfg->host_caps &= ~MMC_MODE_4BIT;
917*4882a593Smuzhiyun cfg->host_caps &= ~MMC_MODE_8BIT;
918*4882a593Smuzhiyun break;
919*4882a593Smuzhiyun default:
920*4882a593Smuzhiyun printf("Unsupported bus width: %d\n", host->buswidth);
921*4882a593Smuzhiyun break;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun #ifdef CONFIG_BLK
929*4882a593Smuzhiyun int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
930*4882a593Smuzhiyun {
931*4882a593Smuzhiyun return mmc_bind(dev, mmc, cfg);
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun #else
934*4882a593Smuzhiyun int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun host->mmc = mmc_create(&host->cfg, host);
939*4882a593Smuzhiyun if (host->mmc == NULL)
940*4882a593Smuzhiyun return -1;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun return 0;
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun #endif
945