xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision a28094e54c767e8fda1cdae560819a38f3465d5c)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <bouncebuf.h>
11 #include <div64.h>
12 #include <errno.h>
13 #include <malloc.h>
14 #include <memalign.h>
15 #include <mmc.h>
16 #include <dwmmc.h>
17 #include <dm/pinctrl.h>
18 #include <dm.h>
19 #ifdef CONFIG_DM_GPIO
20 #include <asm/gpio.h>
21 #include <asm-generic/gpio.h>
22 #endif
23 
24 #define PAGE_SIZE 4096
25 #define MSEC_PER_SEC	1000ULL
26 
27 /*
28  * Currently it supports read/write up to 8*8*4 Bytes per
29  * stride as a burst mode. Please note that if you change
30  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
31  * to augment the groups of {ldm, stm}.
32  */
33 #define MAX_STRIDE 64
34 #if (CONFIG_ARM && CONFIG_CPU_V7 && !defined(CONFIG_MMC_SIMPLE))
35 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
36 {
37 	__asm__ __volatile__ (
38 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
52 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
53 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
54 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
55 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
56 		:::"memory"
57 	);
58 }
59 
60 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
61 {
62 	__asm__ __volatile__ (
63 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
77 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
78 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
79 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
80 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
81 		:::"memory"
82 	);
83 }
84 #else
85 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
86 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
87 #endif
88 
89 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
90 {
91 	unsigned long timeout = 1000;
92 	u32 ctrl;
93 
94 	dwmci_writel(host, DWMCI_CTRL, value);
95 
96 	while (timeout--) {
97 		ctrl = dwmci_readl(host, DWMCI_CTRL);
98 		if (!(ctrl & DWMCI_RESET_ALL))
99 			return 1;
100 	}
101 	return 0;
102 }
103 
104 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
105 		u32 desc0, u32 desc1, u32 desc2)
106 {
107 	struct dwmci_idmac *desc = idmac;
108 
109 	desc->flags = desc0;
110 	desc->cnt = desc1;
111 	desc->addr = desc2;
112 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
113 }
114 
115 static void dwmci_prepare_data(struct dwmci_host *host,
116 			       struct mmc_data *data,
117 			       struct dwmci_idmac *cur_idmac,
118 			       void *bounce_buffer)
119 {
120 	unsigned long ctrl;
121 	unsigned int i = 0, flags, cnt, blk_cnt;
122 	ulong data_start, data_end;
123 
124 
125 	blk_cnt = data->blocks;
126 
127 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
128 
129 	data_start = (ulong)cur_idmac;
130 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
131 
132 	do {
133 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
134 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
135 		if (blk_cnt <= 8) {
136 			flags |= DWMCI_IDMAC_LD;
137 			cnt = data->blocksize * blk_cnt;
138 		} else
139 			cnt = data->blocksize * 8;
140 
141 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
142 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
143 
144 		if (blk_cnt <= 8)
145 			break;
146 		blk_cnt -= 8;
147 		cur_idmac++;
148 		i++;
149 	} while(1);
150 
151 	data_end = (ulong)cur_idmac;
152 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
153 
154 	ctrl = dwmci_readl(host, DWMCI_CTRL);
155 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
156 	dwmci_writel(host, DWMCI_CTRL, ctrl);
157 
158 	ctrl = dwmci_readl(host, DWMCI_BMOD);
159 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
160 	dwmci_writel(host, DWMCI_BMOD, ctrl);
161 
162 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
163 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
164 }
165 
166 static unsigned int dwmci_get_drto(struct dwmci_host *host,
167 				   const unsigned int size)
168 {
169 	unsigned int timeout;
170 
171 	timeout = size * 8;	/* counting in bits */
172 	timeout /= host->mmc->bus_width;
173 	timeout *= 10;		/* wait 10 times as long */
174 	timeout /= (host->mmc->clock / 1000); /* counting in msec */
175 	timeout = (timeout < 1000) ? 1000 : timeout;
176 
177 	return timeout;
178 }
179 
180 static unsigned int dwmci_get_cto(struct dwmci_host *host)
181 {
182 	unsigned int cto_clks;
183 	unsigned int cto_div;
184 	unsigned int cto_ms;
185 
186 	cto_clks = dwmci_readl(host, DWMCI_TMOUT) & 0xff;
187 	cto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
188 	if (cto_div == 0)
189 		cto_div = 1;
190 
191 	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
192 				  host->mmc->clock);
193 
194 	/* add a bit spare time */
195 	cto_ms += 10;
196 
197 	return cto_ms;
198 }
199 
200 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
201 {
202 	int ret = 0;
203 	int reset_timeout = 100;
204 	u32 timeout, status, ctrl, mask, size, i, len = 0;
205 	u32 *buf = NULL;
206 	ulong start = get_timer(0);
207 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
208 			    RX_WMARK_SHIFT) + 1) * 2;
209 	bool stride;
210 
211 	size = data->blocksize * data->blocks;
212 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
213 	stride = host->stride_pio && size > 128;
214 	if (data->flags == MMC_DATA_READ)
215 		buf = (unsigned int *)data->dest;
216 	else
217 		buf = (unsigned int *)data->src;
218 
219 	timeout = dwmci_get_drto(host, size);
220 	/* The tuning data is 128bytes, a timeout of 1ms is sufficient.*/
221 	if ((dwmci_readl(host, DWMCI_CMD) & 0x1F) == MMC_SEND_TUNING_BLOCK_HS200)
222 		timeout = 1;
223 
224 	size /= 4;
225 
226 	for (;;) {
227 		mask = dwmci_readl(host, DWMCI_RINTSTS);
228 		/* Error during data transfer. */
229 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
230 			debug("%s: DATA ERROR!\n", __func__);
231 			/*
232 			 * It is necessary to wait for several cycles before
233 			 * resetting the controller while data timeout or error.
234 			 */
235 			udelay(1);
236 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
237 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
238 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
239 
240 			do {
241 				status = dwmci_readl(host, DWMCI_CMD);
242 				if (reset_timeout-- < 0)
243 					break;
244 				udelay(100);
245 			} while (status & DWMCI_CMD_START);
246 
247 			if (!host->fifo_mode) {
248 				ctrl = dwmci_readl(host, DWMCI_BMOD);
249 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
250 				dwmci_writel(host, DWMCI_BMOD, ctrl);
251 			}
252 
253 			ret = -EINVAL;
254 			break;
255 		}
256 
257 		if (host->fifo_mode && size) {
258 			len = 0;
259 			if (data->flags == MMC_DATA_READ &&
260 			    (mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO))) {
261 				while (size) {
262 					len = dwmci_readl(host, DWMCI_STATUS);
263 					len = (len >> DWMCI_FIFO_SHIFT) &
264 						    DWMCI_FIFO_MASK;
265 					len = min(size, len);
266 					if (!stride) {
267 						/* Legacy pio mode */
268 						for (i = 0; i < len; i++)
269 							*buf++ = dwmci_readl(host, DWMCI_DATA);
270 						goto read_again;
271 					}
272 
273 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
274 					if (len < MAX_STRIDE)
275 						continue;
276 
277 					for (i = 0; i < len / MAX_STRIDE; i++) {
278 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
279 						buf += MAX_STRIDE;
280 					}
281 
282 					len = i * MAX_STRIDE;
283 read_again:
284 					size = size > len ? (size - len) : 0;
285 				}
286 
287 				dwmci_writel(host, DWMCI_RINTSTS,
288 					     mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO));
289 			} else if (data->flags == MMC_DATA_WRITE &&
290 				   (mask & DWMCI_INTMSK_TXDR)) {
291 				while (size) {
292 					len = dwmci_readl(host, DWMCI_STATUS);
293 					len = fifo_depth - ((len >>
294 						   DWMCI_FIFO_SHIFT) &
295 						   DWMCI_FIFO_MASK);
296 					len = min(size, len);
297 					if (!stride) {
298 						for (i = 0; i < len; i++)
299 							dwmci_writel(host, DWMCI_DATA,
300 								     *buf++);
301 						goto write_again;
302 					}
303 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
304 					if (len < MAX_STRIDE)
305 						continue;
306 
307 					for (i = 0; i < len / MAX_STRIDE; i++) {
308 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
309 						buf += MAX_STRIDE;
310 					}
311 
312 					len = i * MAX_STRIDE;
313 write_again:
314 					size = size > len ? (size - len) : 0;
315 				}
316 				dwmci_writel(host, DWMCI_RINTSTS,
317 					     DWMCI_INTMSK_TXDR);
318 			}
319 		}
320 
321 		/* Data arrived correctly. */
322 		if (mask & DWMCI_INTMSK_DTO) {
323 			ret = 0;
324 			break;
325 		}
326 
327 		/* Check for timeout. */
328 		if (get_timer(start) > timeout) {
329 			debug("%s: Timeout waiting for data!\n",
330 			      __func__);
331 			ret = -ETIMEDOUT;
332 			break;
333 		}
334 	}
335 
336 	dwmci_writel(host, DWMCI_RINTSTS, mask);
337 
338 	return ret;
339 }
340 
341 static int dwmci_set_transfer_mode(struct dwmci_host *host,
342 		struct mmc_data *data)
343 {
344 	unsigned long mode;
345 
346 	mode = DWMCI_CMD_DATA_EXP;
347 	if (data->flags & MMC_DATA_WRITE)
348 		mode |= DWMCI_CMD_RW;
349 
350 	return mode;
351 }
352 
353 #ifdef CONFIG_DM_MMC
354 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
355 		   struct mmc_data *data)
356 {
357 	struct mmc *mmc = mmc_get_mmc_dev(dev);
358 #else
359 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
360 		struct mmc_data *data)
361 {
362 #endif
363 	struct dwmci_host *host = mmc->priv;
364 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
365 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
366 	int ret = 0, flags = 0;
367 	unsigned int timeout = 500;
368 	u32 mask, ctrl;
369 	ulong start = get_timer(0);
370 	struct bounce_buffer bbstate;
371 
372 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
373 		if (get_timer(start) > timeout) {
374 			debug("%s: Timeout on data busy\n", __func__);
375 			return -ETIMEDOUT;
376 		}
377 	}
378 
379 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
380 
381 	if (data) {
382 		if (host->fifo_mode) {
383 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
384 			dwmci_writel(host, DWMCI_BYTCNT,
385 				     data->blocksize * data->blocks);
386 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
387 		} else {
388 			if (data->flags == MMC_DATA_READ) {
389 				ret = bounce_buffer_start(&bbstate,
390 						(void*)data->dest,
391 						data->blocksize *
392 						data->blocks, GEN_BB_WRITE);
393 			} else {
394 				ret = bounce_buffer_start(&bbstate,
395 						(void*)data->src,
396 						data->blocksize *
397 						data->blocks, GEN_BB_READ);
398 			}
399 
400 			if (ret)
401 				return ret;
402 
403 			dwmci_prepare_data(host, data, cur_idmac,
404 					   bbstate.bounce_buffer);
405 		}
406 	}
407 
408 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
409 
410 	if (data)
411 		flags = dwmci_set_transfer_mode(host, data);
412 
413 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
414 		return -1;
415 
416 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
417 		flags |= DWMCI_CMD_ABORT_STOP;
418 	else if (cmd->cmdidx == MMC_CMD_GO_IDLE_STATE)
419 		flags |= SDMMC_CMD_INIT | DWMCI_CMD_ABORT_STOP;
420 	else
421 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
422 
423 	if (cmd->resp_type & MMC_RSP_PRESENT) {
424 		flags |= DWMCI_CMD_RESP_EXP;
425 		if (cmd->resp_type & MMC_RSP_136)
426 			flags |= DWMCI_CMD_RESP_LENGTH;
427 	}
428 
429 	if (cmd->resp_type & MMC_RSP_CRC)
430 		flags |= DWMCI_CMD_CHECK_CRC;
431 
432 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
433 
434 	debug("Sending CMD%d\n",cmd->cmdidx);
435 
436 	dwmci_writel(host, DWMCI_CMD, flags);
437 
438 	timeout = dwmci_get_cto(host);
439 	start = get_timer(0);
440 	do {
441 		mask = dwmci_readl(host, DWMCI_RINTSTS);
442 		if (mask & DWMCI_INTMSK_CDONE) {
443 			if (!data)
444 				dwmci_writel(host, DWMCI_RINTSTS, mask);
445 			break;
446 		}
447 	} while (!(get_timer(start) > timeout));
448 
449 	if (get_timer(start) > timeout) {
450 		debug("%s: Timeout.\n", __func__);
451 		return -ETIMEDOUT;
452 	}
453 
454 	if (mask & DWMCI_INTMSK_RTO) {
455 		/*
456 		 * Timeout here is not necessarily fatal. (e)MMC cards
457 		 * will splat here when they receive CMD55 as they do
458 		 * not support this command and that is exactly the way
459 		 * to tell them apart from SD cards. Thus, this output
460 		 * below shall be debug(). eMMC cards also do not favor
461 		 * CMD8, please keep that in mind.
462 		 */
463 		debug("%s: Response Timeout.\n", __func__);
464 		return -ETIMEDOUT;
465 	} else if (mask & DWMCI_INTMSK_RE) {
466 		debug("%s: Response Error.\n", __func__);
467 		return -EIO;
468 	}
469 
470 
471 	if (cmd->resp_type & MMC_RSP_PRESENT) {
472 		if (cmd->resp_type & MMC_RSP_136) {
473 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
474 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
475 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
476 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
477 		} else {
478 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
479 		}
480 	}
481 
482 	if (data) {
483 		ret = dwmci_data_transfer(host, data);
484 
485 		/* only dma mode need it */
486 		if (!host->fifo_mode) {
487 			ctrl = dwmci_readl(host, DWMCI_CTRL);
488 			ctrl &= ~(DWMCI_DMA_EN);
489 			dwmci_writel(host, DWMCI_CTRL, ctrl);
490 			bounce_buffer_stop(&bbstate);
491 		}
492 	}
493 
494 	return ret;
495 }
496 
497 #ifdef CONFIG_SPL_BLK_READ_PREPARE
498 #ifdef CONFIG_DM_MMC
499 static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
500 				  struct mmc_data *data)
501 {
502 	struct mmc *mmc = mmc_get_mmc_dev(dev);
503 #else
504 static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
505 				  struct mmc_data *data)
506 {
507 #endif
508 	struct dwmci_host *host = mmc->priv;
509 	struct dwmci_idmac *cur_idmac;
510 	int ret = 0, flags = 0;
511 	unsigned int timeout = 500;
512 	u32 mask;
513 	ulong start = get_timer(0);
514 	ulong mmc_idmac;
515 	struct bounce_buffer bbstate;
516 
517 	mmc_idmac = dev_read_u32_default(mmc->dev, "mmc-idmac", 0);
518 	if (mmc_idmac) {
519 		cur_idmac = (struct dwmci_idmac *)mmc_idmac;
520 	} else {
521 		cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
522 			sizeof(struct dwmci_idmac),
523 			ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
524 		if (!cur_idmac)
525 			return -ENODATA;
526 	}
527 
528 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
529 		if (get_timer(start) > timeout) {
530 			debug("%s: Timeout on data busy\n", __func__);
531 			return -ETIMEDOUT;
532 		}
533 	}
534 
535 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
536 
537 	if (data) {
538 		if (host->fifo_mode) {
539 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
540 			dwmci_writel(host, DWMCI_BYTCNT,
541 				     data->blocksize * data->blocks);
542 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
543 		} else {
544 			if (data->flags == MMC_DATA_READ) {
545 				bounce_buffer_start(&bbstate, (void *)data->dest,
546 						    data->blocksize *
547 						    data->blocks, GEN_BB_WRITE);
548 			} else {
549 				bounce_buffer_start(&bbstate, (void *)data->src,
550 						    data->blocksize *
551 						    data->blocks, GEN_BB_READ);
552 			}
553 			dwmci_prepare_data(host, data, cur_idmac,
554 					   bbstate.bounce_buffer);
555 		}
556 	}
557 
558 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
559 
560 	if (data)
561 		flags = dwmci_set_transfer_mode(host, data);
562 
563 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
564 		return -1;
565 
566 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
567 		flags |= DWMCI_CMD_ABORT_STOP;
568 	else
569 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
570 
571 	if (cmd->resp_type & MMC_RSP_PRESENT) {
572 		flags |= DWMCI_CMD_RESP_EXP;
573 		if (cmd->resp_type & MMC_RSP_136)
574 			flags |= DWMCI_CMD_RESP_LENGTH;
575 	}
576 
577 	if (cmd->resp_type & MMC_RSP_CRC)
578 		flags |= DWMCI_CMD_CHECK_CRC;
579 
580 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
581 
582 	debug("Sending CMD%d\n", cmd->cmdidx);
583 
584 	dwmci_writel(host, DWMCI_CMD, flags);
585 
586 	timeout = dwmci_get_cto(host);
587 	start = get_timer(0);
588 	do {
589 		mask = dwmci_readl(host, DWMCI_RINTSTS);
590 		if (mask & DWMCI_INTMSK_CDONE) {
591 			if (!data)
592 				dwmci_writel(host, DWMCI_RINTSTS, mask);
593 			break;
594 		}
595 	} while (!(get_timer(start) > timeout));
596 
597 	if (get_timer(start) > timeout) {
598 		debug("%s: Timeout.\n", __func__);
599 		return -ETIMEDOUT;
600 	}
601 
602 	if (mask & DWMCI_INTMSK_RTO) {
603 		/*
604 		 * Timeout here is not necessarily fatal. (e)MMC cards
605 		 * will splat here when they receive CMD55 as they do
606 		 * not support this command and that is exactly the way
607 		 * to tell them apart from SD cards. Thus, this output
608 		 * below shall be debug(). eMMC cards also do not favor
609 		 * CMD8, please keep that in mind.
610 		 */
611 		debug("%s: Response Timeout.\n", __func__);
612 		return -ETIMEDOUT;
613 	} else if (mask & DWMCI_INTMSK_RE) {
614 		debug("%s: Response Error.\n", __func__);
615 		return -EIO;
616 	}
617 
618 	if (cmd->resp_type & MMC_RSP_PRESENT) {
619 		if (cmd->resp_type & MMC_RSP_136) {
620 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
621 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
622 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
623 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
624 		} else {
625 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
626 		}
627 	}
628 
629 	return ret;
630 }
631 #endif
632 
633 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
634 {
635 	u32 div, status;
636 	int timeout = 10000;
637 	unsigned long sclk;
638 
639 	if (freq == 0)
640 		return 0;
641 	/*
642 	 * If host->get_mmc_clk isn't defined,
643 	 * then assume that host->bus_hz is source clock value.
644 	 * host->bus_hz should be set by user.
645 	 */
646 	if (host->get_mmc_clk)
647 		sclk = host->get_mmc_clk(host, freq);
648 	else if (host->bus_hz)
649 		sclk = host->bus_hz;
650 	else {
651 		debug("%s: Didn't get source clock value.\n", __func__);
652 		return -EINVAL;
653 	}
654 
655 	if (sclk == 0)
656 		return -EINVAL;
657 
658 	if (sclk == freq)
659 		div = 0;	/* bypass mode */
660 	else
661 		div = DIV_ROUND_UP(sclk, 2 * freq);
662 
663 	dwmci_writel(host, DWMCI_CLKENA, 0);
664 	dwmci_writel(host, DWMCI_CLKSRC, 0);
665 
666 	dwmci_writel(host, DWMCI_CLKDIV, div);
667 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
668 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
669 
670 	do {
671 		status = dwmci_readl(host, DWMCI_CMD);
672 		if (timeout-- < 0) {
673 			debug("%s: Timeout!\n", __func__);
674 			return -ETIMEDOUT;
675 		}
676 	} while (status & DWMCI_CMD_START);
677 
678 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
679 			DWMCI_CLKEN_LOW_PWR);
680 
681 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
682 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
683 
684 	timeout = 10000;
685 	do {
686 		status = dwmci_readl(host, DWMCI_CMD);
687 		if (timeout-- < 0) {
688 			debug("%s: Timeout!\n", __func__);
689 			return -ETIMEDOUT;
690 		}
691 	} while (status & DWMCI_CMD_START);
692 
693 	host->clock = freq;
694 
695 	return 0;
696 }
697 
698 #ifdef CONFIG_DM_MMC
699 static bool dwmci_card_busy(struct udevice *dev)
700 {
701 	struct mmc *mmc = mmc_get_mmc_dev(dev);
702 #else
703 static bool dwmci_card_busy(struct mmc *mmc)
704 {
705 #endif
706 	u32 status;
707 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
708 
709 	/*
710 	 * Check the busy bit which is low when DAT[3:0]
711 	 * (the data lines) are 0000
712 	 */
713 	status = dwmci_readl(host, DWMCI_STATUS);
714 
715 	return !!(status & DWMCI_BUSY);
716 }
717 
718 #ifdef CONFIG_DM_MMC
719 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
720 {
721 	struct mmc *mmc = mmc_get_mmc_dev(dev);
722 #else
723 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
724 {
725 #endif
726 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
727 
728 	if (!host->execute_tuning)
729 		return -EIO;
730 
731 	return host->execute_tuning(host, opcode);
732 }
733 
734 #ifdef CONFIG_DM_MMC
735 static int dwmci_set_ios(struct udevice *dev)
736 {
737 	struct mmc *mmc = mmc_get_mmc_dev(dev);
738 #else
739 static int dwmci_set_ios(struct mmc *mmc)
740 {
741 #endif
742 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
743 	u32 ctype, regs;
744 
745 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
746 
747 	dwmci_setup_bus(host, mmc->clock);
748 	switch (mmc->bus_width) {
749 	case 8:
750 		ctype = DWMCI_CTYPE_8BIT;
751 		break;
752 	case 4:
753 		ctype = DWMCI_CTYPE_4BIT;
754 		break;
755 	default:
756 		ctype = DWMCI_CTYPE_1BIT;
757 		break;
758 	}
759 
760 	dwmci_writel(host, DWMCI_CTYPE, ctype);
761 
762 	regs = dwmci_readl(host, DWMCI_UHS_REG);
763 	if (mmc_card_ddr(mmc))
764 		regs |= DWMCI_DDR_MODE;
765 	else
766 		regs &= ~DWMCI_DDR_MODE;
767 
768 	dwmci_writel(host, DWMCI_UHS_REG, regs);
769 
770 	if (host->clksel)
771 		host->clksel(host);
772 
773 	return 0;
774 }
775 
776 static int dwmci_init(struct mmc *mmc)
777 {
778 	struct dwmci_host *host = mmc->priv;
779 	uint32_t use_dma;
780 	uint32_t verid;
781 
782 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
783 	struct gpio_desc pwr_en_gpio;
784 	u32 delay_ms;
785 
786 	if (mmc_getcd(mmc) == 1 &&
787 	    !gpio_request_by_name(mmc->dev, "pwr-en-gpios", 0, &pwr_en_gpio, GPIOD_IS_OUT)) {
788 		dm_gpio_set_value(&pwr_en_gpio, 0);
789 		pinctrl_select_state(mmc->dev, "idle");
790 		delay_ms = dev_read_u32_default(mmc->dev, "power-off-delay-ms", 200);
791 		mdelay(delay_ms);
792 		dm_gpio_set_value(&pwr_en_gpio, 1);
793 		pinctrl_select_state(mmc->dev, "default");
794 		dm_gpio_free(mmc->dev, &pwr_en_gpio);
795 	}
796 #endif
797 
798 	if (host->board_init)
799 		host->board_init(host);
800 #ifdef CONFIG_ARCH_ROCKCHIP
801 	if (host->dev_index == 0)
802 		dwmci_writel(host, DWMCI_PWREN, 1);
803 	else if (host->dev_index == 1)
804 		dwmci_writel(host, DWMCI_PWREN, CONFIG_MMC_DW_PWREN_VALUE);
805 	else
806 		dwmci_writel(host, DWMCI_PWREN, 1);
807 #else
808 	dwmci_writel(host, DWMCI_PWREN, 1);
809 #endif
810 
811 	verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
812 	if (verid >= DW_MMC_240A)
813 		dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
814 
815 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
816 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
817 		return -EIO;
818 	}
819 
820 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
821 	if (use_dma == DMA_INTERFACE_IDMA) {
822 		host->fifo_mode = 0;
823 	} else {
824 		host->fifo_mode = 1;
825 	}
826 
827 	/* Enumerate at 400KHz */
828 	dwmci_setup_bus(host, mmc->cfg->f_min);
829 
830 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
831 	dwmci_writel(host, DWMCI_INTMASK, 0);
832 
833 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
834 
835 	dwmci_writel(host, DWMCI_IDINTEN, 0);
836 	dwmci_writel(host, DWMCI_BMOD, 1);
837 
838 	if (!host->fifoth_val) {
839 		uint32_t fifo_size;
840 
841 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
842 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
843 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
844 				RX_WMARK(fifo_size / 2 - 1) |
845 				TX_WMARK(fifo_size / 2);
846 	}
847 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
848 
849 	dwmci_writel(host, DWMCI_CLKENA, 0);
850 	dwmci_writel(host, DWMCI_CLKSRC, 0);
851 
852 	return 0;
853 }
854 
855 static int dwmci_get_cd(struct udevice *dev)
856 {
857 	int ret = -1;
858 	struct mmc *mmc = mmc_get_mmc_dev(dev);
859 	struct dwmci_host *host = mmc->priv;
860 
861 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
862 	struct gpio_desc detect;
863 
864 	ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
865 	if (ret) {
866 		goto dw_mmc_cdetect;
867 	}
868 
869 	ret = !dm_gpio_get_value(&detect);
870 	dm_gpio_free(dev, &detect);
871 	return ret;
872 dw_mmc_cdetect:
873 #endif
874 	ret = (dwmci_readl(host, DWMCI_CDETECT) & (1 << 0)) == 0 ? 1 : 0;
875 
876 	return ret;
877 }
878 
879 #ifdef CONFIG_DM_MMC
880 int dwmci_probe(struct udevice *dev)
881 {
882 	struct mmc *mmc = mmc_get_mmc_dev(dev);
883 
884 	return dwmci_init(mmc);
885 }
886 
887 const struct dm_mmc_ops dm_dwmci_ops = {
888 	.card_busy	= dwmci_card_busy,
889 	.send_cmd	= dwmci_send_cmd,
890 #ifdef CONFIG_SPL_BLK_READ_PREPARE
891 	.send_cmd_prepare = dwmci_send_cmd_prepare,
892 #endif
893 	.set_ios	= dwmci_set_ios,
894 	.get_cd         = dwmci_get_cd,
895 	.execute_tuning	= dwmci_execute_tuning,
896 };
897 
898 #else
899 static const struct mmc_ops dwmci_ops = {
900 	.card_busy	= dwmci_card_busy,
901 	.send_cmd	= dwmci_send_cmd,
902 	.set_ios	= dwmci_set_ios,
903 	.get_cd         = dwmci_get_cd,
904 	.init		= dwmci_init,
905 	.execute_tuning	= dwmci_execute_tuning,
906 };
907 #endif
908 
909 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
910 		u32 max_clk, u32 min_clk)
911 {
912 	cfg->name = host->name;
913 #ifndef CONFIG_DM_MMC
914 	cfg->ops = &dwmci_ops;
915 #endif
916 	cfg->f_min = min_clk;
917 	cfg->f_max = max_clk;
918 
919 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
920 
921 	cfg->host_caps = host->caps;
922 
923 	switch (host->buswidth) {
924 	case 8:
925 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
926 		break;
927 	case 4:
928 		cfg->host_caps |= MMC_MODE_4BIT;
929 		cfg->host_caps &= ~MMC_MODE_8BIT;
930 		break;
931 	case 1:
932 		cfg->host_caps &= ~MMC_MODE_4BIT;
933 		cfg->host_caps &= ~MMC_MODE_8BIT;
934 		break;
935 	default:
936 		printf("Unsupported bus width: %d\n", host->buswidth);
937 		break;
938 	}
939 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
940 
941 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
942 }
943 
944 #ifdef CONFIG_BLK
945 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
946 {
947 	return mmc_bind(dev, mmc, cfg);
948 }
949 #else
950 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
951 {
952 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
953 
954 	host->mmc = mmc_create(&host->cfg, host);
955 	if (host->mmc == NULL)
956 		return -1;
957 
958 	return 0;
959 }
960 #endif
961