xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision 299fe3b66139014360978fce3f4aac261fd7416f)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <bouncebuf.h>
11 #include <div64.h>
12 #include <errno.h>
13 #include <malloc.h>
14 #include <memalign.h>
15 #include <mmc.h>
16 #include <dwmmc.h>
17 #include <dm/pinctrl.h>
18 #include <dm.h>
19 #ifdef CONFIG_DM_GPIO
20 #include <asm/gpio.h>
21 #include <asm-generic/gpio.h>
22 #endif
23 
24 #define PAGE_SIZE 4096
25 #define MSEC_PER_SEC	1000ULL
26 
27 /*
28  * Currently it supports read/write up to 8*8*4 Bytes per
29  * stride as a burst mode. Please note that if you change
30  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
31  * to augment the groups of {ldm, stm}.
32  */
33 #define MAX_STRIDE 64
34 #if (CONFIG_ARM && CONFIG_CPU_V7 && !defined(CONFIG_MMC_SIMPLE))
35 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
36 {
37 	__asm__ __volatile__ (
38 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
52 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
53 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
54 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
55 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
56 		:::"memory"
57 	);
58 }
59 
60 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
61 {
62 	__asm__ __volatile__ (
63 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
77 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
78 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
79 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
80 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
81 		:::"memory"
82 	);
83 }
84 #else
85 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
86 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
87 #endif
88 
89 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
90 {
91 	unsigned long timeout = 1000;
92 	u32 ctrl;
93 
94 	dwmci_writel(host, DWMCI_CTRL, value);
95 
96 	while (timeout--) {
97 		ctrl = dwmci_readl(host, DWMCI_CTRL);
98 		if (!(ctrl & DWMCI_RESET_ALL))
99 			return 1;
100 	}
101 	return 0;
102 }
103 
104 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
105 		u32 desc0, u32 desc1, u32 desc2)
106 {
107 	struct dwmci_idmac *desc = idmac;
108 
109 	desc->flags = desc0;
110 	desc->cnt = desc1;
111 	desc->addr = desc2;
112 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
113 }
114 
115 static void dwmci_prepare_data(struct dwmci_host *host,
116 			       struct mmc_data *data,
117 			       struct dwmci_idmac *cur_idmac,
118 			       void *bounce_buffer)
119 {
120 	unsigned long ctrl;
121 	unsigned int i = 0, flags, cnt, blk_cnt;
122 	ulong data_start, data_end;
123 
124 
125 	blk_cnt = data->blocks;
126 
127 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
128 
129 	data_start = (ulong)cur_idmac;
130 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
131 
132 	do {
133 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
134 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
135 		if (blk_cnt <= 8) {
136 			flags |= DWMCI_IDMAC_LD;
137 			cnt = data->blocksize * blk_cnt;
138 		} else
139 			cnt = data->blocksize * 8;
140 
141 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
142 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
143 
144 		if (blk_cnt <= 8)
145 			break;
146 		blk_cnt -= 8;
147 		cur_idmac++;
148 		i++;
149 	} while(1);
150 
151 	data_end = (ulong)cur_idmac;
152 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
153 
154 	ctrl = dwmci_readl(host, DWMCI_CTRL);
155 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
156 	dwmci_writel(host, DWMCI_CTRL, ctrl);
157 
158 	ctrl = dwmci_readl(host, DWMCI_BMOD);
159 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
160 	dwmci_writel(host, DWMCI_BMOD, ctrl);
161 
162 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
163 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
164 }
165 
166 #ifdef CONFIG_SPL_BUILD
167 static unsigned int dwmci_get_drto(struct dwmci_host *host,
168 				   const unsigned int size)
169 {
170 	unsigned int drto_clks;
171 	unsigned int drto_div;
172 	unsigned int drto_ms;
173 
174 	drto_clks = dwmci_readl(host, DWMCI_TMOUT) >> 8;
175 	drto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
176 	if (drto_div == 0)
177 		drto_div = 1;
178 
179 	drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
180 				   host->mmc->clock);
181 
182 	/* add a bit spare time */
183 	drto_ms += 50;
184 
185 	return drto_ms;
186 }
187 #else
188 static unsigned int dwmci_get_drto(struct dwmci_host *host,
189 				   const unsigned int size)
190 {
191 	unsigned int timeout;
192 
193 	timeout = size * 8;	/* counting in bits */
194 	timeout *= 10;		/* wait 10 times as long */
195 	timeout /= host->mmc->clock;
196 	timeout /= host->mmc->bus_width;
197 	timeout *= 1000;	/* counting in msec */
198 	timeout = (timeout < 10000) ? 10000 : timeout;
199 
200 	return timeout;
201 }
202 #endif
203 
204 static unsigned int dwmci_get_cto(struct dwmci_host *host)
205 {
206 	unsigned int cto_clks;
207 	unsigned int cto_div;
208 	unsigned int cto_ms;
209 
210 	cto_clks = dwmci_readl(host, DWMCI_TMOUT) & 0xff;
211 	cto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
212 	if (cto_div == 0)
213 		cto_div = 1;
214 
215 	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
216 				  host->mmc->clock);
217 
218 	/* add a bit spare time */
219 	cto_ms += 10;
220 
221 	return cto_ms;
222 }
223 
224 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
225 {
226 	int ret = 0;
227 	int reset_timeout = 100;
228 	u32 timeout, status, ctrl, mask, size, i, len = 0;
229 	u32 *buf = NULL;
230 	ulong start = get_timer(0);
231 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
232 			    RX_WMARK_SHIFT) + 1) * 2;
233 	bool stride;
234 
235 	size = data->blocksize * data->blocks;
236 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
237 	stride = host->stride_pio && size > 128;
238 	if (data->flags == MMC_DATA_READ)
239 		buf = (unsigned int *)data->dest;
240 	else
241 		buf = (unsigned int *)data->src;
242 
243 	timeout = dwmci_get_drto(host, size);
244 	/* The tuning data is 128bytes, a timeout of 1ms is sufficient.*/
245 	if ((dwmci_readl(host, DWMCI_CMD) & 0x1F) == MMC_SEND_TUNING_BLOCK_HS200)
246 		timeout = 1;
247 
248 	size /= 4;
249 
250 	for (;;) {
251 		mask = dwmci_readl(host, DWMCI_RINTSTS);
252 		/* Error during data transfer. */
253 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
254 			debug("%s: DATA ERROR!\n", __func__);
255 			/*
256 			 * It is necessary to wait for several cycles before
257 			 * resetting the controller while data timeout or error.
258 			 */
259 			udelay(1);
260 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
261 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
262 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
263 
264 			do {
265 				status = dwmci_readl(host, DWMCI_CMD);
266 				if (reset_timeout-- < 0)
267 					break;
268 				udelay(100);
269 			} while (status & DWMCI_CMD_START);
270 
271 			if (!host->fifo_mode) {
272 				ctrl = dwmci_readl(host, DWMCI_BMOD);
273 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
274 				dwmci_writel(host, DWMCI_BMOD, ctrl);
275 			}
276 
277 			ret = -EINVAL;
278 			break;
279 		}
280 
281 		if (host->fifo_mode && size) {
282 			len = 0;
283 			if (data->flags == MMC_DATA_READ &&
284 			    (mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO))) {
285 				while (size) {
286 					len = dwmci_readl(host, DWMCI_STATUS);
287 					len = (len >> DWMCI_FIFO_SHIFT) &
288 						    DWMCI_FIFO_MASK;
289 					len = min(size, len);
290 					if (!stride) {
291 						/* Legacy pio mode */
292 						for (i = 0; i < len; i++)
293 							*buf++ = dwmci_readl(host, DWMCI_DATA);
294 						goto read_again;
295 					}
296 
297 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
298 					if (len < MAX_STRIDE)
299 						continue;
300 
301 					for (i = 0; i < len / MAX_STRIDE; i++) {
302 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
303 						buf += MAX_STRIDE;
304 					}
305 
306 					len = i * MAX_STRIDE;
307 read_again:
308 					size = size > len ? (size - len) : 0;
309 				}
310 
311 				dwmci_writel(host, DWMCI_RINTSTS,
312 					     mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO));
313 				start = get_timer(0);
314 			} else if (data->flags == MMC_DATA_WRITE &&
315 				   (mask & DWMCI_INTMSK_TXDR)) {
316 				while (size) {
317 					len = dwmci_readl(host, DWMCI_STATUS);
318 					len = fifo_depth - ((len >>
319 						   DWMCI_FIFO_SHIFT) &
320 						   DWMCI_FIFO_MASK);
321 					len = min(size, len);
322 					if (!stride) {
323 						for (i = 0; i < len; i++)
324 							dwmci_writel(host, DWMCI_DATA,
325 								     *buf++);
326 						goto write_again;
327 					}
328 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
329 					if (len < MAX_STRIDE)
330 						continue;
331 
332 					for (i = 0; i < len / MAX_STRIDE; i++) {
333 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
334 						buf += MAX_STRIDE;
335 					}
336 
337 					len = i * MAX_STRIDE;
338 write_again:
339 					size = size > len ? (size - len) : 0;
340 				}
341 				dwmci_writel(host, DWMCI_RINTSTS,
342 					     DWMCI_INTMSK_TXDR);
343 				start = get_timer(0);
344 			}
345 		}
346 
347 		/* Data arrived correctly. */
348 		if (mask & DWMCI_INTMSK_DTO) {
349 			ret = 0;
350 			break;
351 		}
352 
353 		/* Check for timeout. */
354 		if (get_timer(start) > timeout) {
355 			debug("%s: Timeout waiting for data!\n",
356 			      __func__);
357 			ret = -ETIMEDOUT;
358 			break;
359 		}
360 	}
361 
362 	dwmci_writel(host, DWMCI_RINTSTS, mask);
363 
364 	return ret;
365 }
366 
367 static int dwmci_set_transfer_mode(struct dwmci_host *host,
368 		struct mmc_data *data)
369 {
370 	unsigned long mode;
371 
372 	mode = DWMCI_CMD_DATA_EXP;
373 	if (data->flags & MMC_DATA_WRITE)
374 		mode |= DWMCI_CMD_RW;
375 
376 	return mode;
377 }
378 
379 #ifdef CONFIG_DM_MMC
380 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
381 		   struct mmc_data *data)
382 {
383 	struct mmc *mmc = mmc_get_mmc_dev(dev);
384 #else
385 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
386 		struct mmc_data *data)
387 {
388 #endif
389 	struct dwmci_host *host = mmc->priv;
390 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
391 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
392 	int ret = 0, flags = 0;
393 	unsigned int timeout = 500;
394 	u32 mask, ctrl;
395 	ulong start = get_timer(0);
396 	struct bounce_buffer bbstate;
397 
398 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
399 		if (get_timer(start) > timeout) {
400 			debug("%s: Timeout on data busy\n", __func__);
401 			return -ETIMEDOUT;
402 		}
403 	}
404 
405 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
406 
407 	if (data) {
408 		if (host->fifo_mode) {
409 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
410 			dwmci_writel(host, DWMCI_BYTCNT,
411 				     data->blocksize * data->blocks);
412 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
413 		} else {
414 			if (data->flags == MMC_DATA_READ) {
415 				ret = bounce_buffer_start(&bbstate,
416 						(void*)data->dest,
417 						data->blocksize *
418 						data->blocks, GEN_BB_WRITE);
419 			} else {
420 				ret = bounce_buffer_start(&bbstate,
421 						(void*)data->src,
422 						data->blocksize *
423 						data->blocks, GEN_BB_READ);
424 			}
425 
426 			if (ret)
427 				return ret;
428 
429 			dwmci_prepare_data(host, data, cur_idmac,
430 					   bbstate.bounce_buffer);
431 		}
432 	}
433 
434 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
435 
436 	if (data)
437 		flags = dwmci_set_transfer_mode(host, data);
438 
439 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
440 		return -1;
441 
442 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
443 		flags |= DWMCI_CMD_ABORT_STOP;
444 	else if (cmd->cmdidx == MMC_CMD_GO_IDLE_STATE)
445 		flags |= SDMMC_CMD_INIT | DWMCI_CMD_ABORT_STOP;
446 	else
447 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
448 
449 	if (cmd->resp_type & MMC_RSP_PRESENT) {
450 		flags |= DWMCI_CMD_RESP_EXP;
451 		if (cmd->resp_type & MMC_RSP_136)
452 			flags |= DWMCI_CMD_RESP_LENGTH;
453 	}
454 
455 	if (cmd->resp_type & MMC_RSP_CRC)
456 		flags |= DWMCI_CMD_CHECK_CRC;
457 
458 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
459 
460 	debug("Sending CMD%d\n",cmd->cmdidx);
461 
462 	dwmci_writel(host, DWMCI_CMD, flags);
463 
464 	timeout = dwmci_get_cto(host);
465 	start = get_timer(0);
466 	do {
467 		mask = dwmci_readl(host, DWMCI_RINTSTS);
468 		if (mask & DWMCI_INTMSK_CDONE) {
469 			if (!data)
470 				dwmci_writel(host, DWMCI_RINTSTS, mask);
471 			break;
472 		}
473 	} while (!(get_timer(start) > timeout));
474 
475 	if (get_timer(start) > timeout) {
476 		debug("%s: Timeout.\n", __func__);
477 		return -ETIMEDOUT;
478 	}
479 
480 	if (mask & DWMCI_INTMSK_RTO) {
481 		/*
482 		 * Timeout here is not necessarily fatal. (e)MMC cards
483 		 * will splat here when they receive CMD55 as they do
484 		 * not support this command and that is exactly the way
485 		 * to tell them apart from SD cards. Thus, this output
486 		 * below shall be debug(). eMMC cards also do not favor
487 		 * CMD8, please keep that in mind.
488 		 */
489 		debug("%s: Response Timeout.\n", __func__);
490 		return -ETIMEDOUT;
491 	} else if (mask & DWMCI_INTMSK_RE) {
492 		debug("%s: Response Error.\n", __func__);
493 		return -EIO;
494 	}
495 
496 
497 	if (cmd->resp_type & MMC_RSP_PRESENT) {
498 		if (cmd->resp_type & MMC_RSP_136) {
499 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
500 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
501 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
502 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
503 		} else {
504 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
505 		}
506 	}
507 
508 	if (data) {
509 		ret = dwmci_data_transfer(host, data);
510 
511 		/* only dma mode need it */
512 		if (!host->fifo_mode) {
513 			ctrl = dwmci_readl(host, DWMCI_CTRL);
514 			ctrl &= ~(DWMCI_DMA_EN);
515 			dwmci_writel(host, DWMCI_CTRL, ctrl);
516 			bounce_buffer_stop(&bbstate);
517 		}
518 	}
519 
520 	return ret;
521 }
522 
523 #ifdef CONFIG_SPL_BLK_READ_PREPARE
524 #ifdef CONFIG_DM_MMC
525 static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
526 				  struct mmc_data *data)
527 {
528 	struct mmc *mmc = mmc_get_mmc_dev(dev);
529 #else
530 static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
531 				  struct mmc_data *data)
532 {
533 #endif
534 	struct dwmci_host *host = mmc->priv;
535 	struct dwmci_idmac *cur_idmac;
536 	int ret = 0, flags = 0;
537 	unsigned int timeout = 500;
538 	u32 mask;
539 	ulong start = get_timer(0);
540 	struct bounce_buffer bbstate;
541 
542 	cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
543 			   sizeof(struct dwmci_idmac),
544 			   ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
545 	if (!cur_idmac)
546 		return -ENODATA;
547 
548 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
549 		if (get_timer(start) > timeout) {
550 			debug("%s: Timeout on data busy\n", __func__);
551 			return -ETIMEDOUT;
552 		}
553 	}
554 
555 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
556 
557 	if (data) {
558 		if (host->fifo_mode) {
559 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
560 			dwmci_writel(host, DWMCI_BYTCNT,
561 				     data->blocksize * data->blocks);
562 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
563 		} else {
564 			if (data->flags == MMC_DATA_READ) {
565 				bounce_buffer_start(&bbstate, (void *)data->dest,
566 						    data->blocksize *
567 						    data->blocks, GEN_BB_WRITE);
568 			} else {
569 				bounce_buffer_start(&bbstate, (void *)data->src,
570 						    data->blocksize *
571 						    data->blocks, GEN_BB_READ);
572 			}
573 			dwmci_prepare_data(host, data, cur_idmac,
574 					   bbstate.bounce_buffer);
575 		}
576 	}
577 
578 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
579 
580 	if (data)
581 		flags = dwmci_set_transfer_mode(host, data);
582 
583 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
584 		return -1;
585 
586 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
587 		flags |= DWMCI_CMD_ABORT_STOP;
588 	else
589 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
590 
591 	if (cmd->resp_type & MMC_RSP_PRESENT) {
592 		flags |= DWMCI_CMD_RESP_EXP;
593 		if (cmd->resp_type & MMC_RSP_136)
594 			flags |= DWMCI_CMD_RESP_LENGTH;
595 	}
596 
597 	if (cmd->resp_type & MMC_RSP_CRC)
598 		flags |= DWMCI_CMD_CHECK_CRC;
599 
600 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
601 
602 	debug("Sending CMD%d\n", cmd->cmdidx);
603 
604 	dwmci_writel(host, DWMCI_CMD, flags);
605 
606 	timeout = dwmci_get_cto(host);
607 	start = get_timer(0);
608 	do {
609 		mask = dwmci_readl(host, DWMCI_RINTSTS);
610 		if (mask & DWMCI_INTMSK_CDONE) {
611 			if (!data)
612 				dwmci_writel(host, DWMCI_RINTSTS, mask);
613 			break;
614 		}
615 	} while (!(get_timer(start) > timeout));
616 
617 	if (get_timer(start) > timeout) {
618 		debug("%s: Timeout.\n", __func__);
619 		return -ETIMEDOUT;
620 	}
621 
622 	if (mask & DWMCI_INTMSK_RTO) {
623 		/*
624 		 * Timeout here is not necessarily fatal. (e)MMC cards
625 		 * will splat here when they receive CMD55 as they do
626 		 * not support this command and that is exactly the way
627 		 * to tell them apart from SD cards. Thus, this output
628 		 * below shall be debug(). eMMC cards also do not favor
629 		 * CMD8, please keep that in mind.
630 		 */
631 		debug("%s: Response Timeout.\n", __func__);
632 		return -ETIMEDOUT;
633 	} else if (mask & DWMCI_INTMSK_RE) {
634 		debug("%s: Response Error.\n", __func__);
635 		return -EIO;
636 	}
637 
638 	if (cmd->resp_type & MMC_RSP_PRESENT) {
639 		if (cmd->resp_type & MMC_RSP_136) {
640 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
641 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
642 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
643 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
644 		} else {
645 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
646 		}
647 	}
648 
649 	return ret;
650 }
651 #endif
652 
653 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
654 {
655 	u32 div, status;
656 	int timeout = 10000;
657 	unsigned long sclk;
658 
659 	if (freq == 0)
660 		return 0;
661 	/*
662 	 * If host->get_mmc_clk isn't defined,
663 	 * then assume that host->bus_hz is source clock value.
664 	 * host->bus_hz should be set by user.
665 	 */
666 	if (host->get_mmc_clk)
667 		sclk = host->get_mmc_clk(host, freq);
668 	else if (host->bus_hz)
669 		sclk = host->bus_hz;
670 	else {
671 		debug("%s: Didn't get source clock value.\n", __func__);
672 		return -EINVAL;
673 	}
674 
675 	if (sclk == 0)
676 		return -EINVAL;
677 
678 	if (sclk == freq)
679 		div = 0;	/* bypass mode */
680 	else
681 		div = DIV_ROUND_UP(sclk, 2 * freq);
682 
683 	dwmci_writel(host, DWMCI_CLKENA, 0);
684 	dwmci_writel(host, DWMCI_CLKSRC, 0);
685 
686 	dwmci_writel(host, DWMCI_CLKDIV, div);
687 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
688 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
689 
690 	do {
691 		status = dwmci_readl(host, DWMCI_CMD);
692 		if (timeout-- < 0) {
693 			debug("%s: Timeout!\n", __func__);
694 			return -ETIMEDOUT;
695 		}
696 	} while (status & DWMCI_CMD_START);
697 
698 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
699 			DWMCI_CLKEN_LOW_PWR);
700 
701 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
702 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
703 
704 	timeout = 10000;
705 	do {
706 		status = dwmci_readl(host, DWMCI_CMD);
707 		if (timeout-- < 0) {
708 			debug("%s: Timeout!\n", __func__);
709 			return -ETIMEDOUT;
710 		}
711 	} while (status & DWMCI_CMD_START);
712 
713 	host->clock = freq;
714 
715 	return 0;
716 }
717 
718 #ifdef CONFIG_DM_MMC
719 static bool dwmci_card_busy(struct udevice *dev)
720 {
721 	struct mmc *mmc = mmc_get_mmc_dev(dev);
722 #else
723 static bool dwmci_card_busy(struct mmc *mmc)
724 {
725 #endif
726 	u32 status;
727 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
728 
729 	/*
730 	 * Check the busy bit which is low when DAT[3:0]
731 	 * (the data lines) are 0000
732 	 */
733 	status = dwmci_readl(host, DWMCI_STATUS);
734 
735 	return !!(status & DWMCI_BUSY);
736 }
737 
738 #ifdef CONFIG_DM_MMC
739 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
740 {
741 	struct mmc *mmc = mmc_get_mmc_dev(dev);
742 #else
743 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
744 {
745 #endif
746 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
747 
748 	if (!host->execute_tuning)
749 		return -EIO;
750 
751 	return host->execute_tuning(host, opcode);
752 }
753 
754 #ifdef CONFIG_DM_MMC
755 static int dwmci_set_ios(struct udevice *dev)
756 {
757 	struct mmc *mmc = mmc_get_mmc_dev(dev);
758 #else
759 static int dwmci_set_ios(struct mmc *mmc)
760 {
761 #endif
762 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
763 	u32 ctype, regs;
764 
765 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
766 
767 	dwmci_setup_bus(host, mmc->clock);
768 	switch (mmc->bus_width) {
769 	case 8:
770 		ctype = DWMCI_CTYPE_8BIT;
771 		break;
772 	case 4:
773 		ctype = DWMCI_CTYPE_4BIT;
774 		break;
775 	default:
776 		ctype = DWMCI_CTYPE_1BIT;
777 		break;
778 	}
779 
780 	dwmci_writel(host, DWMCI_CTYPE, ctype);
781 
782 	regs = dwmci_readl(host, DWMCI_UHS_REG);
783 	if (mmc_card_ddr(mmc))
784 		regs |= DWMCI_DDR_MODE;
785 	else
786 		regs &= ~DWMCI_DDR_MODE;
787 
788 	dwmci_writel(host, DWMCI_UHS_REG, regs);
789 
790 	if (host->clksel)
791 		host->clksel(host);
792 
793 	return 0;
794 }
795 
796 static int dwmci_init(struct mmc *mmc)
797 {
798 	struct dwmci_host *host = mmc->priv;
799 	uint32_t use_dma;
800 	uint32_t verid;
801 
802 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
803 	struct gpio_desc pwr_en_gpio;
804 	u32 delay_ms;
805 
806 	if (mmc_getcd(mmc) == 1 &&
807 	    !gpio_request_by_name(mmc->dev, "pwr-en-gpios", 0, &pwr_en_gpio, GPIOD_IS_OUT)) {
808 		dm_gpio_set_value(&pwr_en_gpio, 0);
809 		pinctrl_select_state(mmc->dev, "idle");
810 		delay_ms = dev_read_u32_default(mmc->dev, "power-off-delay-ms", 200);
811 		mdelay(delay_ms);
812 		dm_gpio_set_value(&pwr_en_gpio, 1);
813 		pinctrl_select_state(mmc->dev, "default");
814 		dm_gpio_free(mmc->dev, &pwr_en_gpio);
815 	}
816 #endif
817 
818 	if (host->board_init)
819 		host->board_init(host);
820 #ifdef CONFIG_ARCH_ROCKCHIP
821 	if (host->dev_index == 0)
822 		dwmci_writel(host, DWMCI_PWREN, 1);
823 	else if (host->dev_index == 1)
824 		dwmci_writel(host, DWMCI_PWREN, CONFIG_MMC_DW_PWREN_VALUE);
825 	else
826 		dwmci_writel(host, DWMCI_PWREN, 1);
827 #else
828 	dwmci_writel(host, DWMCI_PWREN, 1);
829 #endif
830 
831 	verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
832 	if (verid >= DW_MMC_240A)
833 		dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
834 
835 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
836 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
837 		return -EIO;
838 	}
839 
840 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
841 	if (use_dma == DMA_INTERFACE_IDMA) {
842 		host->fifo_mode = 0;
843 	} else {
844 		host->fifo_mode = 1;
845 	}
846 
847 	/* Enumerate at 400KHz */
848 	dwmci_setup_bus(host, mmc->cfg->f_min);
849 
850 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
851 	dwmci_writel(host, DWMCI_INTMASK, 0);
852 
853 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
854 
855 	dwmci_writel(host, DWMCI_IDINTEN, 0);
856 	dwmci_writel(host, DWMCI_BMOD, 1);
857 
858 	if (!host->fifoth_val) {
859 		uint32_t fifo_size;
860 
861 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
862 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
863 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
864 				RX_WMARK(fifo_size / 2 - 1) |
865 				TX_WMARK(fifo_size / 2);
866 	}
867 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
868 
869 	dwmci_writel(host, DWMCI_CLKENA, 0);
870 	dwmci_writel(host, DWMCI_CLKSRC, 0);
871 
872 	return 0;
873 }
874 
875 static int dwmci_get_cd(struct udevice *dev)
876 {
877 	int ret = -1;
878 	struct mmc *mmc = mmc_get_mmc_dev(dev);
879 	struct dwmci_host *host = mmc->priv;
880 
881 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
882 	struct gpio_desc detect;
883 
884 	ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
885 	if (ret) {
886 		goto dw_mmc_cdetect;
887 	}
888 
889 	ret = !dm_gpio_get_value(&detect);
890 	dm_gpio_free(dev, &detect);
891 	return ret;
892 dw_mmc_cdetect:
893 #endif
894 	ret = (dwmci_readl(host, DWMCI_CDETECT) & (1 << 0)) == 0 ? 1 : 0;
895 
896 	return ret;
897 }
898 
899 #ifdef CONFIG_DM_MMC
900 int dwmci_probe(struct udevice *dev)
901 {
902 	struct mmc *mmc = mmc_get_mmc_dev(dev);
903 
904 	return dwmci_init(mmc);
905 }
906 
907 const struct dm_mmc_ops dm_dwmci_ops = {
908 	.card_busy	= dwmci_card_busy,
909 	.send_cmd	= dwmci_send_cmd,
910 #ifdef CONFIG_SPL_BLK_READ_PREPARE
911 	.send_cmd_prepare = dwmci_send_cmd_prepare,
912 #endif
913 	.set_ios	= dwmci_set_ios,
914 	.get_cd         = dwmci_get_cd,
915 	.execute_tuning	= dwmci_execute_tuning,
916 };
917 
918 #else
919 static const struct mmc_ops dwmci_ops = {
920 	.card_busy	= dwmci_card_busy,
921 	.send_cmd	= dwmci_send_cmd,
922 	.set_ios	= dwmci_set_ios,
923 	.get_cd         = dwmci_get_cd,
924 	.init		= dwmci_init,
925 	.execute_tuning	= dwmci_execute_tuning,
926 };
927 #endif
928 
929 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
930 		u32 max_clk, u32 min_clk)
931 {
932 	cfg->name = host->name;
933 #ifndef CONFIG_DM_MMC
934 	cfg->ops = &dwmci_ops;
935 #endif
936 	cfg->f_min = min_clk;
937 	cfg->f_max = max_clk;
938 
939 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
940 
941 	cfg->host_caps = host->caps;
942 
943 	switch (host->buswidth) {
944 	case 8:
945 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
946 		break;
947 	case 4:
948 		cfg->host_caps |= MMC_MODE_4BIT;
949 		cfg->host_caps &= ~MMC_MODE_8BIT;
950 		break;
951 	case 1:
952 		cfg->host_caps &= ~MMC_MODE_4BIT;
953 		cfg->host_caps &= ~MMC_MODE_8BIT;
954 		break;
955 	default:
956 		printf("Unsupported bus width: %d\n", host->buswidth);
957 		break;
958 	}
959 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
960 
961 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
962 }
963 
964 #ifdef CONFIG_BLK
965 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
966 {
967 	return mmc_bind(dev, mmc, cfg);
968 }
969 #else
970 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
971 {
972 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
973 
974 	host->mmc = mmc_create(&host->cfg, host);
975 	if (host->mmc == NULL)
976 		return -1;
977 
978 	return 0;
979 }
980 #endif
981