xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision 9ac6f4797ca1f9a9c5c1f4bdb8414fefa85f8bd3)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <bouncebuf.h>
10 #include <common.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 
17 #define PAGE_SIZE 4096
18 
19 /*
20  * Currently it supports read/write up to 8*8*4 Bytes per
21  * stride as a burst mode. Please note that if you change
22  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
23  * to augment the groups of {ldm, stm}.
24  */
25 #define MAX_STRIDE 64
26 #if CONFIG_ARM && CONFIG_CPU_V7
27 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
28 {
29 	__asm__ __volatile__ (
30 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
31 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
32 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
33 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
34 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
35 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
36 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
37 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
48 		:::"memory"
49 	);
50 }
51 
52 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
53 {
54 	dwmci_memcpy_fromio(fifo_addr, buffer);
55 }
56 #else
57 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
58 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
59 #endif
60 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
61 {
62 	unsigned long timeout = 1000;
63 	u32 ctrl;
64 
65 	dwmci_writel(host, DWMCI_CTRL, value);
66 
67 	while (timeout--) {
68 		ctrl = dwmci_readl(host, DWMCI_CTRL);
69 		if (!(ctrl & DWMCI_RESET_ALL))
70 			return 1;
71 	}
72 	return 0;
73 }
74 
75 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
76 		u32 desc0, u32 desc1, u32 desc2)
77 {
78 	struct dwmci_idmac *desc = idmac;
79 
80 	desc->flags = desc0;
81 	desc->cnt = desc1;
82 	desc->addr = desc2;
83 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
84 }
85 
86 static void dwmci_prepare_data(struct dwmci_host *host,
87 			       struct mmc_data *data,
88 			       struct dwmci_idmac *cur_idmac,
89 			       void *bounce_buffer)
90 {
91 	unsigned long ctrl;
92 	unsigned int i = 0, flags, cnt, blk_cnt;
93 	ulong data_start, data_end;
94 
95 
96 	blk_cnt = data->blocks;
97 
98 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
99 
100 	data_start = (ulong)cur_idmac;
101 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
102 
103 	do {
104 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
105 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
106 		if (blk_cnt <= 8) {
107 			flags |= DWMCI_IDMAC_LD;
108 			cnt = data->blocksize * blk_cnt;
109 		} else
110 			cnt = data->blocksize * 8;
111 
112 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
113 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
114 
115 		if (blk_cnt <= 8)
116 			break;
117 		blk_cnt -= 8;
118 		cur_idmac++;
119 		i++;
120 	} while(1);
121 
122 	data_end = (ulong)cur_idmac;
123 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
124 
125 	ctrl = dwmci_readl(host, DWMCI_CTRL);
126 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
127 	dwmci_writel(host, DWMCI_CTRL, ctrl);
128 
129 	ctrl = dwmci_readl(host, DWMCI_BMOD);
130 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
131 	dwmci_writel(host, DWMCI_BMOD, ctrl);
132 
133 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
134 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
135 }
136 
137 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
138 {
139 	int ret = 0;
140 	u32 timeout = 240000;
141 	u32 status, ctrl, mask, size, i, len = 0;
142 	u32 *buf = NULL;
143 	ulong start = get_timer(0);
144 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
145 			    RX_WMARK_SHIFT) + 1) * 2;
146 	bool stride;
147 
148 	size = data->blocksize * data->blocks / 4;
149 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
150 	stride = host->stride_pio && size > 128;
151 	if (data->flags == MMC_DATA_READ)
152 		buf = (unsigned int *)data->dest;
153 	else
154 		buf = (unsigned int *)data->src;
155 
156 	for (;;) {
157 		mask = dwmci_readl(host, DWMCI_RINTSTS);
158 		/* Error during data transfer. */
159 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
160 			debug("%s: DATA ERROR!\n", __func__);
161 
162 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
163 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
164 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
165 
166 			do {
167 				status = dwmci_readl(host, DWMCI_CMD);
168 				if (timeout-- < 0)
169 					ret = -ETIMEDOUT;
170 			} while (status & DWMCI_CMD_START);
171 
172 			if (!host->fifo_mode) {
173 				ctrl = dwmci_readl(host, DWMCI_BMOD);
174 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
175 				dwmci_writel(host, DWMCI_BMOD, ctrl);
176 			}
177 
178 			ret = -EINVAL;
179 			break;
180 		}
181 
182 		if (host->fifo_mode && size) {
183 			len = 0;
184 			if (data->flags == MMC_DATA_READ &&
185 			    (mask & DWMCI_INTMSK_RXDR)) {
186 				while (size) {
187 					len = dwmci_readl(host, DWMCI_STATUS);
188 					len = (len >> DWMCI_FIFO_SHIFT) &
189 						    DWMCI_FIFO_MASK;
190 					len = min(size, len);
191 					if (!stride) {
192 						/* Legacy pio mode */
193 						for (i = 0; i < len; i++)
194 							*buf++ = dwmci_readl(host, DWMCI_DATA);
195 						goto read_again;
196 					}
197 
198 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
199 					if (len < MAX_STRIDE)
200 						continue;
201 
202 					for (i = 0; i < len / MAX_STRIDE; i++) {
203 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
204 						buf += MAX_STRIDE;
205 					}
206 
207 					len = i * MAX_STRIDE;
208 read_again:
209 					size = size > len ? (size - len) : 0;
210 				}
211 				dwmci_writel(host, DWMCI_RINTSTS,
212 					     DWMCI_INTMSK_RXDR);
213 			} else if (data->flags == MMC_DATA_WRITE &&
214 				   (mask & DWMCI_INTMSK_TXDR)) {
215 				while (size) {
216 					len = dwmci_readl(host, DWMCI_STATUS);
217 					len = fifo_depth - ((len >>
218 						   DWMCI_FIFO_SHIFT) &
219 						   DWMCI_FIFO_MASK);
220 					len = min(size, len);
221 					if (!stride) {
222 						for (i = 0; i < len; i++)
223 							dwmci_writel(host, DWMCI_DATA,
224 								     *buf++);
225 						goto write_again;
226 					}
227 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
228 					if (len < MAX_STRIDE)
229 						continue;
230 
231 					for (i = 0; i < len / MAX_STRIDE; i++) {
232 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
233 						buf += MAX_STRIDE;
234 					}
235 
236 					len = i * MAX_STRIDE;
237 write_again:
238 					size = size > len ? (size - len) : 0;
239 				}
240 				dwmci_writel(host, DWMCI_RINTSTS,
241 					     DWMCI_INTMSK_TXDR);
242 			}
243 		}
244 
245 		/* Data arrived correctly. */
246 		if (mask & DWMCI_INTMSK_DTO) {
247 			ret = 0;
248 			break;
249 		}
250 
251 		/* Check for timeout. */
252 		if (get_timer(start) > timeout) {
253 			debug("%s: Timeout waiting for data!\n",
254 			      __func__);
255 			ret = -ETIMEDOUT;
256 			break;
257 		}
258 	}
259 
260 	dwmci_writel(host, DWMCI_RINTSTS, mask);
261 
262 	return ret;
263 }
264 
265 static int dwmci_set_transfer_mode(struct dwmci_host *host,
266 		struct mmc_data *data)
267 {
268 	unsigned long mode;
269 
270 	mode = DWMCI_CMD_DATA_EXP;
271 	if (data->flags & MMC_DATA_WRITE)
272 		mode |= DWMCI_CMD_RW;
273 
274 	return mode;
275 }
276 
277 #ifdef CONFIG_DM_MMC
278 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
279 		   struct mmc_data *data)
280 {
281 	struct mmc *mmc = mmc_get_mmc_dev(dev);
282 #else
283 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
284 		struct mmc_data *data)
285 {
286 #endif
287 	struct dwmci_host *host = mmc->priv;
288 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
289 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
290 	int ret = 0, flags = 0, i;
291 	unsigned int timeout = 500;
292 	u32 retry = 100000;
293 	u32 mask, ctrl;
294 	ulong start = get_timer(0);
295 	struct bounce_buffer bbstate;
296 
297 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
298 		if (get_timer(start) > timeout) {
299 			debug("%s: Timeout on data busy\n", __func__);
300 			return -ETIMEDOUT;
301 		}
302 	}
303 
304 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
305 
306 	if (data) {
307 		if (host->fifo_mode) {
308 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
309 			dwmci_writel(host, DWMCI_BYTCNT,
310 				     data->blocksize * data->blocks);
311 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
312 		} else {
313 			if (data->flags == MMC_DATA_READ) {
314 				bounce_buffer_start(&bbstate, (void*)data->dest,
315 						data->blocksize *
316 						data->blocks, GEN_BB_WRITE);
317 			} else {
318 				bounce_buffer_start(&bbstate, (void*)data->src,
319 						data->blocksize *
320 						data->blocks, GEN_BB_READ);
321 			}
322 			dwmci_prepare_data(host, data, cur_idmac,
323 					   bbstate.bounce_buffer);
324 		}
325 	}
326 
327 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
328 
329 	if (data)
330 		flags = dwmci_set_transfer_mode(host, data);
331 
332 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
333 		return -1;
334 
335 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
336 		flags |= DWMCI_CMD_ABORT_STOP;
337 	else
338 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
339 
340 	if (cmd->resp_type & MMC_RSP_PRESENT) {
341 		flags |= DWMCI_CMD_RESP_EXP;
342 		if (cmd->resp_type & MMC_RSP_136)
343 			flags |= DWMCI_CMD_RESP_LENGTH;
344 	}
345 
346 	if (cmd->resp_type & MMC_RSP_CRC)
347 		flags |= DWMCI_CMD_CHECK_CRC;
348 
349 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
350 
351 	debug("Sending CMD%d\n",cmd->cmdidx);
352 
353 	dwmci_writel(host, DWMCI_CMD, flags);
354 
355 	for (i = 0; i < retry; i++) {
356 		mask = dwmci_readl(host, DWMCI_RINTSTS);
357 		if (mask & DWMCI_INTMSK_CDONE) {
358 			if (!data)
359 				dwmci_writel(host, DWMCI_RINTSTS, mask);
360 			break;
361 		}
362 	}
363 
364 	if (i == retry) {
365 		debug("%s: Timeout.\n", __func__);
366 		return -ETIMEDOUT;
367 	}
368 
369 	if (mask & DWMCI_INTMSK_RTO) {
370 		/*
371 		 * Timeout here is not necessarily fatal. (e)MMC cards
372 		 * will splat here when they receive CMD55 as they do
373 		 * not support this command and that is exactly the way
374 		 * to tell them apart from SD cards. Thus, this output
375 		 * below shall be debug(). eMMC cards also do not favor
376 		 * CMD8, please keep that in mind.
377 		 */
378 		debug("%s: Response Timeout.\n", __func__);
379 		return -ETIMEDOUT;
380 	} else if (mask & DWMCI_INTMSK_RE) {
381 		debug("%s: Response Error.\n", __func__);
382 		return -EIO;
383 	}
384 
385 
386 	if (cmd->resp_type & MMC_RSP_PRESENT) {
387 		if (cmd->resp_type & MMC_RSP_136) {
388 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
389 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
390 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
391 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
392 		} else {
393 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
394 		}
395 	}
396 
397 	if (data) {
398 		ret = dwmci_data_transfer(host, data);
399 
400 		/* only dma mode need it */
401 		if (!host->fifo_mode) {
402 			ctrl = dwmci_readl(host, DWMCI_CTRL);
403 			ctrl &= ~(DWMCI_DMA_EN);
404 			dwmci_writel(host, DWMCI_CTRL, ctrl);
405 			bounce_buffer_stop(&bbstate);
406 		}
407 	}
408 
409 	udelay(100);
410 
411 	return ret;
412 }
413 
414 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
415 {
416 	u32 div, status;
417 	int timeout = 10000;
418 	unsigned long sclk;
419 
420 	if (freq == 0)
421 		return 0;
422 	/*
423 	 * If host->get_mmc_clk isn't defined,
424 	 * then assume that host->bus_hz is source clock value.
425 	 * host->bus_hz should be set by user.
426 	 */
427 	if (host->get_mmc_clk)
428 		sclk = host->get_mmc_clk(host, freq);
429 	else if (host->bus_hz)
430 		sclk = host->bus_hz;
431 	else {
432 		debug("%s: Didn't get source clock value.\n", __func__);
433 		return -EINVAL;
434 	}
435 
436 	if (sclk == freq)
437 		div = 0;	/* bypass mode */
438 	else
439 		div = DIV_ROUND_UP(sclk, 2 * freq);
440 
441 	dwmci_writel(host, DWMCI_CLKENA, 0);
442 	dwmci_writel(host, DWMCI_CLKSRC, 0);
443 
444 	dwmci_writel(host, DWMCI_CLKDIV, div);
445 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
446 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
447 
448 	do {
449 		status = dwmci_readl(host, DWMCI_CMD);
450 		if (timeout-- < 0) {
451 			debug("%s: Timeout!\n", __func__);
452 			return -ETIMEDOUT;
453 		}
454 	} while (status & DWMCI_CMD_START);
455 
456 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
457 			DWMCI_CLKEN_LOW_PWR);
458 
459 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
460 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
461 
462 	timeout = 10000;
463 	do {
464 		status = dwmci_readl(host, DWMCI_CMD);
465 		if (timeout-- < 0) {
466 			debug("%s: Timeout!\n", __func__);
467 			return -ETIMEDOUT;
468 		}
469 	} while (status & DWMCI_CMD_START);
470 
471 	host->clock = freq;
472 
473 	return 0;
474 }
475 
476 #ifdef CONFIG_DM_MMC
477 static bool dwmci_card_busy(struct udevice *dev)
478 {
479 	struct mmc *mmc = mmc_get_mmc_dev(dev);
480 #else
481 static bool dwmci_card_busy(struct mmc *mmc)
482 {
483 #endif
484 	u32 status;
485 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
486 
487 	/*
488 	 * Check the busy bit which is low when DAT[3:0]
489 	 * (the data lines) are 0000
490 	 */
491 	status = dwmci_readl(host, DWMCI_STATUS);
492 
493 	return !!(status & DWMCI_BUSY);
494 }
495 
496 #ifdef CONFIG_DM_MMC
497 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
498 {
499 	struct mmc *mmc = mmc_get_mmc_dev(dev);
500 #else
501 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
502 {
503 #endif
504 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
505 
506 	if (!host->execute_tuning)
507 		return -EIO;
508 
509 	return host->execute_tuning(host, opcode);
510 }
511 
512 #ifdef CONFIG_DM_MMC
513 static int dwmci_set_ios(struct udevice *dev)
514 {
515 	struct mmc *mmc = mmc_get_mmc_dev(dev);
516 #else
517 static int dwmci_set_ios(struct mmc *mmc)
518 {
519 #endif
520 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
521 	u32 ctype, regs;
522 
523 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
524 
525 	dwmci_setup_bus(host, mmc->clock);
526 	switch (mmc->bus_width) {
527 	case 8:
528 		ctype = DWMCI_CTYPE_8BIT;
529 		break;
530 	case 4:
531 		ctype = DWMCI_CTYPE_4BIT;
532 		break;
533 	default:
534 		ctype = DWMCI_CTYPE_1BIT;
535 		break;
536 	}
537 
538 	dwmci_writel(host, DWMCI_CTYPE, ctype);
539 
540 	regs = dwmci_readl(host, DWMCI_UHS_REG);
541 	if (mmc_card_ddr(mmc))
542 		regs |= DWMCI_DDR_MODE;
543 	else
544 		regs &= ~DWMCI_DDR_MODE;
545 
546 	dwmci_writel(host, DWMCI_UHS_REG, regs);
547 
548 	if (host->clksel)
549 		host->clksel(host);
550 
551 	return 0;
552 }
553 
554 static int dwmci_init(struct mmc *mmc)
555 {
556 	struct dwmci_host *host = mmc->priv;
557 
558 	if (host->board_init)
559 		host->board_init(host);
560 
561 	dwmci_writel(host, DWMCI_PWREN, 1);
562 
563 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
564 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
565 		return -EIO;
566 	}
567 
568 	/* Enumerate at 400KHz */
569 	dwmci_setup_bus(host, mmc->cfg->f_min);
570 
571 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
572 	dwmci_writel(host, DWMCI_INTMASK, 0);
573 
574 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
575 
576 	dwmci_writel(host, DWMCI_IDINTEN, 0);
577 	dwmci_writel(host, DWMCI_BMOD, 1);
578 
579 	if (!host->fifoth_val) {
580 		uint32_t fifo_size;
581 
582 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
583 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
584 		host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
585 				TX_WMARK(fifo_size / 2);
586 	}
587 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
588 
589 	dwmci_writel(host, DWMCI_CLKENA, 0);
590 	dwmci_writel(host, DWMCI_CLKSRC, 0);
591 
592 	return 0;
593 }
594 
595 #ifdef CONFIG_DM_MMC
596 int dwmci_probe(struct udevice *dev)
597 {
598 	struct mmc *mmc = mmc_get_mmc_dev(dev);
599 
600 	return dwmci_init(mmc);
601 }
602 
603 const struct dm_mmc_ops dm_dwmci_ops = {
604 	.card_busy	= dwmci_card_busy,
605 	.send_cmd	= dwmci_send_cmd,
606 	.set_ios	= dwmci_set_ios,
607 	.execute_tuning	= dwmci_execute_tuning,
608 };
609 
610 #else
611 static const struct mmc_ops dwmci_ops = {
612 	.card_busy	= dwmci_card_busy,
613 	.send_cmd	= dwmci_send_cmd,
614 	.set_ios	= dwmci_set_ios,
615 	.init		= dwmci_init,
616 	.execute_tuning	= dwmci_execute_tuning,
617 };
618 #endif
619 
620 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
621 		u32 max_clk, u32 min_clk)
622 {
623 	cfg->name = host->name;
624 #ifndef CONFIG_DM_MMC
625 	cfg->ops = &dwmci_ops;
626 #endif
627 	cfg->f_min = min_clk;
628 	cfg->f_max = max_clk;
629 
630 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
631 
632 	cfg->host_caps = host->caps;
633 
634 	if (host->buswidth == 8) {
635 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
636 	} else {
637 		cfg->host_caps |= MMC_MODE_4BIT;
638 		cfg->host_caps &= ~MMC_MODE_8BIT;
639 	}
640 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
641 
642 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
643 }
644 
645 #ifdef CONFIG_BLK
646 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
647 {
648 	return mmc_bind(dev, mmc, cfg);
649 }
650 #else
651 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
652 {
653 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
654 
655 	host->mmc = mmc_create(&host->cfg, host);
656 	if (host->mmc == NULL)
657 		return -1;
658 
659 	return 0;
660 }
661 #endif
662