xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision 2c6a058b7ea25398013cb25b4e3bb96fe40da1a5)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <bouncebuf.h>
10 #include <common.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 
17 #define PAGE_SIZE 4096
18 
19 /*
20  * Currently it supports read/write up to 8*8*4 Bytes per
21  * stride as a burst mode. Please note that if you change
22  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
23  * to augment the groups of {ldm, stm}.
24  */
25 #define MAX_STRIDE 64
26 #if CONFIG_ARM && CONFIG_CPU_V7
27 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
28 {
29 	__asm__ __volatile__ (
30 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
31 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
32 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
33 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
34 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
35 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
36 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
37 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
48 		:::"memory"
49 	);
50 }
51 
52 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
53 {
54 	__asm__ __volatile__ (
55 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
56 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
57 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
58 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
59 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
60 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
61 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
62 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
63 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
73 		:::"memory"
74 	);
75 }
76 #else
77 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
78 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
79 #endif
80 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
81 {
82 	unsigned long timeout = 1000;
83 	u32 ctrl;
84 
85 	dwmci_writel(host, DWMCI_CTRL, value);
86 
87 	while (timeout--) {
88 		ctrl = dwmci_readl(host, DWMCI_CTRL);
89 		if (!(ctrl & DWMCI_RESET_ALL))
90 			return 1;
91 	}
92 	return 0;
93 }
94 
95 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
96 		u32 desc0, u32 desc1, u32 desc2)
97 {
98 	struct dwmci_idmac *desc = idmac;
99 
100 	desc->flags = desc0;
101 	desc->cnt = desc1;
102 	desc->addr = desc2;
103 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
104 }
105 
106 static void dwmci_prepare_data(struct dwmci_host *host,
107 			       struct mmc_data *data,
108 			       struct dwmci_idmac *cur_idmac,
109 			       void *bounce_buffer)
110 {
111 	unsigned long ctrl;
112 	unsigned int i = 0, flags, cnt, blk_cnt;
113 	ulong data_start, data_end;
114 
115 
116 	blk_cnt = data->blocks;
117 
118 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
119 
120 	data_start = (ulong)cur_idmac;
121 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
122 
123 	do {
124 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
125 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
126 		if (blk_cnt <= 8) {
127 			flags |= DWMCI_IDMAC_LD;
128 			cnt = data->blocksize * blk_cnt;
129 		} else
130 			cnt = data->blocksize * 8;
131 
132 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
133 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
134 
135 		if (blk_cnt <= 8)
136 			break;
137 		blk_cnt -= 8;
138 		cur_idmac++;
139 		i++;
140 	} while(1);
141 
142 	data_end = (ulong)cur_idmac;
143 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
144 
145 	ctrl = dwmci_readl(host, DWMCI_CTRL);
146 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
147 	dwmci_writel(host, DWMCI_CTRL, ctrl);
148 
149 	ctrl = dwmci_readl(host, DWMCI_BMOD);
150 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
151 	dwmci_writel(host, DWMCI_BMOD, ctrl);
152 
153 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
154 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
155 }
156 
157 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
158 {
159 	int ret = 0;
160 	int reset_timeout = 100;
161 	u32 timeout = 240000;
162 	u32 status, ctrl, mask, size, i, len = 0;
163 	u32 *buf = NULL;
164 	ulong start = get_timer(0);
165 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
166 			    RX_WMARK_SHIFT) + 1) * 2;
167 	bool stride;
168 
169 	size = data->blocksize * data->blocks / 4;
170 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
171 	stride = host->stride_pio && size > 128;
172 	if (data->flags == MMC_DATA_READ)
173 		buf = (unsigned int *)data->dest;
174 	else
175 		buf = (unsigned int *)data->src;
176 
177 	for (;;) {
178 		mask = dwmci_readl(host, DWMCI_RINTSTS);
179 		/* Error during data transfer. */
180 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
181 			debug("%s: DATA ERROR!\n", __func__);
182 
183 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
184 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
185 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
186 
187 			do {
188 				status = dwmci_readl(host, DWMCI_CMD);
189 				if (reset_timeout-- < 0)
190 					break;
191 				udelay(100);
192 			} while (status & DWMCI_CMD_START);
193 
194 			if (!host->fifo_mode) {
195 				ctrl = dwmci_readl(host, DWMCI_BMOD);
196 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
197 				dwmci_writel(host, DWMCI_BMOD, ctrl);
198 			}
199 
200 			ret = -EINVAL;
201 			break;
202 		}
203 
204 		if (host->fifo_mode && size) {
205 			len = 0;
206 			if (data->flags == MMC_DATA_READ &&
207 			    (mask & DWMCI_INTMSK_RXDR)) {
208 				while (size) {
209 					len = dwmci_readl(host, DWMCI_STATUS);
210 					len = (len >> DWMCI_FIFO_SHIFT) &
211 						    DWMCI_FIFO_MASK;
212 					len = min(size, len);
213 					if (!stride) {
214 						/* Legacy pio mode */
215 						for (i = 0; i < len; i++)
216 							*buf++ = dwmci_readl(host, DWMCI_DATA);
217 						goto read_again;
218 					}
219 
220 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
221 					if (len < MAX_STRIDE)
222 						continue;
223 
224 					for (i = 0; i < len / MAX_STRIDE; i++) {
225 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
226 						buf += MAX_STRIDE;
227 					}
228 
229 					len = i * MAX_STRIDE;
230 read_again:
231 					size = size > len ? (size - len) : 0;
232 				}
233 				dwmci_writel(host, DWMCI_RINTSTS,
234 					     DWMCI_INTMSK_RXDR);
235 			} else if (data->flags == MMC_DATA_WRITE &&
236 				   (mask & DWMCI_INTMSK_TXDR)) {
237 				while (size) {
238 					len = dwmci_readl(host, DWMCI_STATUS);
239 					len = fifo_depth - ((len >>
240 						   DWMCI_FIFO_SHIFT) &
241 						   DWMCI_FIFO_MASK);
242 					len = min(size, len);
243 					if (!stride) {
244 						for (i = 0; i < len; i++)
245 							dwmci_writel(host, DWMCI_DATA,
246 								     *buf++);
247 						goto write_again;
248 					}
249 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
250 					if (len < MAX_STRIDE)
251 						continue;
252 
253 					for (i = 0; i < len / MAX_STRIDE; i++) {
254 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
255 						buf += MAX_STRIDE;
256 					}
257 
258 					len = i * MAX_STRIDE;
259 write_again:
260 					size = size > len ? (size - len) : 0;
261 				}
262 				dwmci_writel(host, DWMCI_RINTSTS,
263 					     DWMCI_INTMSK_TXDR);
264 			}
265 		}
266 
267 		/* Data arrived correctly. */
268 		if (mask & DWMCI_INTMSK_DTO) {
269 			ret = 0;
270 			break;
271 		}
272 
273 		/* Check for timeout. */
274 		if (get_timer(start) > timeout) {
275 			debug("%s: Timeout waiting for data!\n",
276 			      __func__);
277 			ret = -ETIMEDOUT;
278 			break;
279 		}
280 	}
281 
282 	dwmci_writel(host, DWMCI_RINTSTS, mask);
283 
284 	return ret;
285 }
286 
287 static int dwmci_set_transfer_mode(struct dwmci_host *host,
288 		struct mmc_data *data)
289 {
290 	unsigned long mode;
291 
292 	mode = DWMCI_CMD_DATA_EXP;
293 	if (data->flags & MMC_DATA_WRITE)
294 		mode |= DWMCI_CMD_RW;
295 
296 	return mode;
297 }
298 
299 #ifdef CONFIG_DM_MMC
300 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
301 		   struct mmc_data *data)
302 {
303 	struct mmc *mmc = mmc_get_mmc_dev(dev);
304 #else
305 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
306 		struct mmc_data *data)
307 {
308 #endif
309 	struct dwmci_host *host = mmc->priv;
310 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
311 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
312 	int ret = 0, flags = 0, i;
313 	unsigned int timeout = 500;
314 	u32 retry = 100000;
315 	u32 mask, ctrl;
316 	ulong start = get_timer(0);
317 	struct bounce_buffer bbstate;
318 
319 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
320 		if (get_timer(start) > timeout) {
321 			debug("%s: Timeout on data busy\n", __func__);
322 			return -ETIMEDOUT;
323 		}
324 	}
325 
326 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
327 
328 	if (data) {
329 		if (host->fifo_mode) {
330 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
331 			dwmci_writel(host, DWMCI_BYTCNT,
332 				     data->blocksize * data->blocks);
333 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
334 		} else {
335 			if (data->flags == MMC_DATA_READ) {
336 				bounce_buffer_start(&bbstate, (void*)data->dest,
337 						data->blocksize *
338 						data->blocks, GEN_BB_WRITE);
339 			} else {
340 				bounce_buffer_start(&bbstate, (void*)data->src,
341 						data->blocksize *
342 						data->blocks, GEN_BB_READ);
343 			}
344 			dwmci_prepare_data(host, data, cur_idmac,
345 					   bbstate.bounce_buffer);
346 		}
347 	}
348 
349 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
350 
351 	if (data)
352 		flags = dwmci_set_transfer_mode(host, data);
353 
354 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
355 		return -1;
356 
357 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
358 		flags |= DWMCI_CMD_ABORT_STOP;
359 	else
360 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
361 
362 	if (cmd->resp_type & MMC_RSP_PRESENT) {
363 		flags |= DWMCI_CMD_RESP_EXP;
364 		if (cmd->resp_type & MMC_RSP_136)
365 			flags |= DWMCI_CMD_RESP_LENGTH;
366 	}
367 
368 	if (cmd->resp_type & MMC_RSP_CRC)
369 		flags |= DWMCI_CMD_CHECK_CRC;
370 
371 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
372 
373 	debug("Sending CMD%d\n",cmd->cmdidx);
374 
375 	dwmci_writel(host, DWMCI_CMD, flags);
376 
377 	for (i = 0; i < retry; i++) {
378 		mask = dwmci_readl(host, DWMCI_RINTSTS);
379 		if (mask & DWMCI_INTMSK_CDONE) {
380 			if (!data)
381 				dwmci_writel(host, DWMCI_RINTSTS, mask);
382 			break;
383 		}
384 	}
385 
386 	if (i == retry) {
387 		debug("%s: Timeout.\n", __func__);
388 		return -ETIMEDOUT;
389 	}
390 
391 	if (mask & DWMCI_INTMSK_RTO) {
392 		/*
393 		 * Timeout here is not necessarily fatal. (e)MMC cards
394 		 * will splat here when they receive CMD55 as they do
395 		 * not support this command and that is exactly the way
396 		 * to tell them apart from SD cards. Thus, this output
397 		 * below shall be debug(). eMMC cards also do not favor
398 		 * CMD8, please keep that in mind.
399 		 */
400 		debug("%s: Response Timeout.\n", __func__);
401 		return -ETIMEDOUT;
402 	} else if (mask & DWMCI_INTMSK_RE) {
403 		debug("%s: Response Error.\n", __func__);
404 		return -EIO;
405 	}
406 
407 
408 	if (cmd->resp_type & MMC_RSP_PRESENT) {
409 		if (cmd->resp_type & MMC_RSP_136) {
410 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
411 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
412 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
413 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
414 		} else {
415 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
416 		}
417 	}
418 
419 	if (data) {
420 		ret = dwmci_data_transfer(host, data);
421 
422 		/* only dma mode need it */
423 		if (!host->fifo_mode) {
424 			ctrl = dwmci_readl(host, DWMCI_CTRL);
425 			ctrl &= ~(DWMCI_DMA_EN);
426 			dwmci_writel(host, DWMCI_CTRL, ctrl);
427 			bounce_buffer_stop(&bbstate);
428 		}
429 	}
430 
431 	udelay(100);
432 
433 	return ret;
434 }
435 
436 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
437 {
438 	u32 div, status;
439 	int timeout = 10000;
440 	unsigned long sclk;
441 
442 	if (freq == 0)
443 		return 0;
444 	/*
445 	 * If host->get_mmc_clk isn't defined,
446 	 * then assume that host->bus_hz is source clock value.
447 	 * host->bus_hz should be set by user.
448 	 */
449 	if (host->get_mmc_clk)
450 		sclk = host->get_mmc_clk(host, freq);
451 	else if (host->bus_hz)
452 		sclk = host->bus_hz;
453 	else {
454 		debug("%s: Didn't get source clock value.\n", __func__);
455 		return -EINVAL;
456 	}
457 
458 	if (sclk == freq)
459 		div = 0;	/* bypass mode */
460 	else
461 		div = DIV_ROUND_UP(sclk, 2 * freq);
462 
463 	dwmci_writel(host, DWMCI_CLKENA, 0);
464 	dwmci_writel(host, DWMCI_CLKSRC, 0);
465 
466 	dwmci_writel(host, DWMCI_CLKDIV, div);
467 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
468 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
469 
470 	do {
471 		status = dwmci_readl(host, DWMCI_CMD);
472 		if (timeout-- < 0) {
473 			debug("%s: Timeout!\n", __func__);
474 			return -ETIMEDOUT;
475 		}
476 	} while (status & DWMCI_CMD_START);
477 
478 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
479 			DWMCI_CLKEN_LOW_PWR);
480 
481 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
482 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
483 
484 	timeout = 10000;
485 	do {
486 		status = dwmci_readl(host, DWMCI_CMD);
487 		if (timeout-- < 0) {
488 			debug("%s: Timeout!\n", __func__);
489 			return -ETIMEDOUT;
490 		}
491 	} while (status & DWMCI_CMD_START);
492 
493 	host->clock = freq;
494 
495 	return 0;
496 }
497 
498 #ifdef CONFIG_DM_MMC
499 static bool dwmci_card_busy(struct udevice *dev)
500 {
501 	struct mmc *mmc = mmc_get_mmc_dev(dev);
502 #else
503 static bool dwmci_card_busy(struct mmc *mmc)
504 {
505 #endif
506 	u32 status;
507 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
508 
509 	/*
510 	 * Check the busy bit which is low when DAT[3:0]
511 	 * (the data lines) are 0000
512 	 */
513 	status = dwmci_readl(host, DWMCI_STATUS);
514 
515 	return !!(status & DWMCI_BUSY);
516 }
517 
518 #ifdef CONFIG_DM_MMC
519 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
520 {
521 	struct mmc *mmc = mmc_get_mmc_dev(dev);
522 #else
523 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
524 {
525 #endif
526 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
527 
528 	if (!host->execute_tuning)
529 		return -EIO;
530 
531 	return host->execute_tuning(host, opcode);
532 }
533 
534 #ifdef CONFIG_DM_MMC
535 static int dwmci_set_ios(struct udevice *dev)
536 {
537 	struct mmc *mmc = mmc_get_mmc_dev(dev);
538 #else
539 static int dwmci_set_ios(struct mmc *mmc)
540 {
541 #endif
542 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
543 	u32 ctype, regs;
544 
545 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
546 
547 	dwmci_setup_bus(host, mmc->clock);
548 	switch (mmc->bus_width) {
549 	case 8:
550 		ctype = DWMCI_CTYPE_8BIT;
551 		break;
552 	case 4:
553 		ctype = DWMCI_CTYPE_4BIT;
554 		break;
555 	default:
556 		ctype = DWMCI_CTYPE_1BIT;
557 		break;
558 	}
559 
560 	dwmci_writel(host, DWMCI_CTYPE, ctype);
561 
562 	regs = dwmci_readl(host, DWMCI_UHS_REG);
563 	if (mmc_card_ddr(mmc))
564 		regs |= DWMCI_DDR_MODE;
565 	else
566 		regs &= ~DWMCI_DDR_MODE;
567 
568 	dwmci_writel(host, DWMCI_UHS_REG, regs);
569 
570 	if (host->clksel)
571 		host->clksel(host);
572 
573 	return 0;
574 }
575 
576 static int dwmci_init(struct mmc *mmc)
577 {
578 	struct dwmci_host *host = mmc->priv;
579 	uint32_t use_dma;
580 
581 	if (host->board_init)
582 		host->board_init(host);
583 
584 	dwmci_writel(host, DWMCI_PWREN, 1);
585 
586 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
587 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
588 		return -EIO;
589 	}
590 
591 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
592 	if (use_dma == DMA_INTERFACE_IDMA) {
593 		host->fifo_mode = 0;
594 	} else {
595 		host->fifo_mode = 1;
596 	}
597 
598 	/* Enumerate at 400KHz */
599 	dwmci_setup_bus(host, mmc->cfg->f_min);
600 
601 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
602 	dwmci_writel(host, DWMCI_INTMASK, 0);
603 
604 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
605 
606 	dwmci_writel(host, DWMCI_IDINTEN, 0);
607 	dwmci_writel(host, DWMCI_BMOD, 1);
608 
609 	if (!host->fifoth_val) {
610 		uint32_t fifo_size;
611 
612 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
613 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
614 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
615 				RX_WMARK(fifo_size / 2 - 1) |
616 				TX_WMARK(fifo_size / 2);
617 	}
618 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
619 
620 	dwmci_writel(host, DWMCI_CLKENA, 0);
621 	dwmci_writel(host, DWMCI_CLKSRC, 0);
622 
623 	return 0;
624 }
625 
626 #ifdef CONFIG_DM_MMC
627 int dwmci_probe(struct udevice *dev)
628 {
629 	struct mmc *mmc = mmc_get_mmc_dev(dev);
630 
631 	return dwmci_init(mmc);
632 }
633 
634 const struct dm_mmc_ops dm_dwmci_ops = {
635 	.card_busy	= dwmci_card_busy,
636 	.send_cmd	= dwmci_send_cmd,
637 	.set_ios	= dwmci_set_ios,
638 	.execute_tuning	= dwmci_execute_tuning,
639 };
640 
641 #else
642 static const struct mmc_ops dwmci_ops = {
643 	.card_busy	= dwmci_card_busy,
644 	.send_cmd	= dwmci_send_cmd,
645 	.set_ios	= dwmci_set_ios,
646 	.init		= dwmci_init,
647 	.execute_tuning	= dwmci_execute_tuning,
648 };
649 #endif
650 
651 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
652 		u32 max_clk, u32 min_clk)
653 {
654 	cfg->name = host->name;
655 #ifndef CONFIG_DM_MMC
656 	cfg->ops = &dwmci_ops;
657 #endif
658 	cfg->f_min = min_clk;
659 	cfg->f_max = max_clk;
660 
661 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
662 
663 	cfg->host_caps = host->caps;
664 
665 	if (host->buswidth == 8) {
666 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
667 	} else {
668 		cfg->host_caps |= MMC_MODE_4BIT;
669 		cfg->host_caps &= ~MMC_MODE_8BIT;
670 	}
671 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
672 
673 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
674 }
675 
676 #ifdef CONFIG_BLK
677 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
678 {
679 	return mmc_bind(dev, mmc, cfg);
680 }
681 #else
682 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
683 {
684 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
685 
686 	host->mmc = mmc_create(&host->cfg, host);
687 	if (host->mmc == NULL)
688 		return -1;
689 
690 	return 0;
691 }
692 #endif
693