xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision 10427e2df5a90fdf95a3ef373e36c5dd49ba07ad)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <bouncebuf.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 #ifdef CONFIG_DM_GPIO
17 #include <asm/gpio.h>
18 #include <asm-generic/gpio.h>
19 #endif
20 
21 #define PAGE_SIZE 4096
22 
23 /*
24  * Currently it supports read/write up to 8*8*4 Bytes per
25  * stride as a burst mode. Please note that if you change
26  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
27  * to augment the groups of {ldm, stm}.
28  */
29 #define MAX_STRIDE 64
30 #if CONFIG_ARM && CONFIG_CPU_V7
31 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
32 {
33 	__asm__ __volatile__ (
34 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
35 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
36 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
37 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
52 		:::"memory"
53 	);
54 }
55 
56 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
57 {
58 	__asm__ __volatile__ (
59 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
60 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
61 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
62 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
63 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
77 		:::"memory"
78 	);
79 }
80 #else
81 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
82 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
83 #endif
84 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
85 {
86 	unsigned long timeout = 1000;
87 	u32 ctrl;
88 
89 	dwmci_writel(host, DWMCI_CTRL, value);
90 
91 	while (timeout--) {
92 		ctrl = dwmci_readl(host, DWMCI_CTRL);
93 		if (!(ctrl & DWMCI_RESET_ALL))
94 			return 1;
95 	}
96 	return 0;
97 }
98 
99 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
100 		u32 desc0, u32 desc1, u32 desc2)
101 {
102 	struct dwmci_idmac *desc = idmac;
103 
104 	desc->flags = desc0;
105 	desc->cnt = desc1;
106 	desc->addr = desc2;
107 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
108 }
109 
110 static void dwmci_prepare_data(struct dwmci_host *host,
111 			       struct mmc_data *data,
112 			       struct dwmci_idmac *cur_idmac,
113 			       void *bounce_buffer)
114 {
115 	unsigned long ctrl;
116 	unsigned int i = 0, flags, cnt, blk_cnt;
117 	ulong data_start, data_end;
118 
119 
120 	blk_cnt = data->blocks;
121 
122 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
123 
124 	data_start = (ulong)cur_idmac;
125 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
126 
127 	do {
128 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
129 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
130 		if (blk_cnt <= 8) {
131 			flags |= DWMCI_IDMAC_LD;
132 			cnt = data->blocksize * blk_cnt;
133 		} else
134 			cnt = data->blocksize * 8;
135 
136 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
137 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
138 
139 		if (blk_cnt <= 8)
140 			break;
141 		blk_cnt -= 8;
142 		cur_idmac++;
143 		i++;
144 	} while(1);
145 
146 	data_end = (ulong)cur_idmac;
147 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
148 
149 	ctrl = dwmci_readl(host, DWMCI_CTRL);
150 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
151 	dwmci_writel(host, DWMCI_CTRL, ctrl);
152 
153 	ctrl = dwmci_readl(host, DWMCI_BMOD);
154 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
155 	dwmci_writel(host, DWMCI_BMOD, ctrl);
156 
157 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
158 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
159 }
160 
161 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
162 {
163 	unsigned int timeout;
164 
165 	timeout = size * 8;	/* counting in bits */
166 	timeout *= 10;		/* wait 10 times as long */
167 	timeout /= mmc->clock;
168 	timeout /= mmc->bus_width;
169 	timeout *= 1000;	/* counting in msec */
170 	timeout = (timeout < 10000) ? 10000 : timeout;
171 
172 	return timeout;
173 }
174 
175 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
176 {
177 	int ret = 0;
178 	int reset_timeout = 100;
179 	u32 timeout, status, ctrl, mask, size, i, len = 0;
180 	u32 *buf = NULL;
181 	ulong start = get_timer(0);
182 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
183 			    RX_WMARK_SHIFT) + 1) * 2;
184 	bool stride;
185 
186 	size = data->blocksize * data->blocks;
187 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
188 	stride = host->stride_pio && size > 128;
189 	if (data->flags == MMC_DATA_READ)
190 		buf = (unsigned int *)data->dest;
191 	else
192 		buf = (unsigned int *)data->src;
193 
194 	timeout = dwmci_get_timeout(host->mmc, size);
195 	size /= 4;
196 
197 	for (;;) {
198 		mask = dwmci_readl(host, DWMCI_RINTSTS);
199 		/* Error during data transfer. */
200 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
201 			debug("%s: DATA ERROR!\n", __func__);
202 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
203 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
204 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
205 
206 			do {
207 				status = dwmci_readl(host, DWMCI_CMD);
208 				if (reset_timeout-- < 0)
209 					break;
210 				udelay(100);
211 			} while (status & DWMCI_CMD_START);
212 
213 			if (!host->fifo_mode) {
214 				ctrl = dwmci_readl(host, DWMCI_BMOD);
215 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
216 				dwmci_writel(host, DWMCI_BMOD, ctrl);
217 			}
218 
219 			ret = -EINVAL;
220 			break;
221 		}
222 
223 		if (host->fifo_mode && size) {
224 			len = 0;
225 			if (data->flags == MMC_DATA_READ &&
226 			    (mask & DWMCI_INTMSK_RXDR)) {
227 				while (size) {
228 					len = dwmci_readl(host, DWMCI_STATUS);
229 					len = (len >> DWMCI_FIFO_SHIFT) &
230 						    DWMCI_FIFO_MASK;
231 					len = min(size, len);
232 					if (!stride) {
233 						/* Legacy pio mode */
234 						for (i = 0; i < len; i++)
235 							*buf++ = dwmci_readl(host, DWMCI_DATA);
236 						goto read_again;
237 					}
238 
239 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
240 					if (len < MAX_STRIDE)
241 						continue;
242 
243 					for (i = 0; i < len / MAX_STRIDE; i++) {
244 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
245 						buf += MAX_STRIDE;
246 					}
247 
248 					len = i * MAX_STRIDE;
249 read_again:
250 					size = size > len ? (size - len) : 0;
251 				}
252 				dwmci_writel(host, DWMCI_RINTSTS,
253 					     DWMCI_INTMSK_RXDR);
254 			} else if (data->flags == MMC_DATA_WRITE &&
255 				   (mask & DWMCI_INTMSK_TXDR)) {
256 				while (size) {
257 					len = dwmci_readl(host, DWMCI_STATUS);
258 					len = fifo_depth - ((len >>
259 						   DWMCI_FIFO_SHIFT) &
260 						   DWMCI_FIFO_MASK);
261 					len = min(size, len);
262 					if (!stride) {
263 						for (i = 0; i < len; i++)
264 							dwmci_writel(host, DWMCI_DATA,
265 								     *buf++);
266 						goto write_again;
267 					}
268 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
269 					if (len < MAX_STRIDE)
270 						continue;
271 
272 					for (i = 0; i < len / MAX_STRIDE; i++) {
273 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
274 						buf += MAX_STRIDE;
275 					}
276 
277 					len = i * MAX_STRIDE;
278 write_again:
279 					size = size > len ? (size - len) : 0;
280 				}
281 				dwmci_writel(host, DWMCI_RINTSTS,
282 					     DWMCI_INTMSK_TXDR);
283 			}
284 		}
285 
286 		/* Data arrived correctly. */
287 		if (mask & DWMCI_INTMSK_DTO) {
288 			ret = 0;
289 			break;
290 		}
291 
292 		/* Check for timeout. */
293 		if (get_timer(start) > timeout) {
294 			debug("%s: Timeout waiting for data!\n",
295 			      __func__);
296 			ret = -ETIMEDOUT;
297 			break;
298 		}
299 	}
300 
301 	dwmci_writel(host, DWMCI_RINTSTS, mask);
302 
303 	return ret;
304 }
305 
306 static int dwmci_set_transfer_mode(struct dwmci_host *host,
307 		struct mmc_data *data)
308 {
309 	unsigned long mode;
310 
311 	mode = DWMCI_CMD_DATA_EXP;
312 	if (data->flags & MMC_DATA_WRITE)
313 		mode |= DWMCI_CMD_RW;
314 
315 	return mode;
316 }
317 
318 #ifdef CONFIG_DM_MMC
319 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
320 		   struct mmc_data *data)
321 {
322 	struct mmc *mmc = mmc_get_mmc_dev(dev);
323 #else
324 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
325 		struct mmc_data *data)
326 {
327 #endif
328 	struct dwmci_host *host = mmc->priv;
329 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
330 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
331 	int ret = 0, flags = 0, i;
332 	unsigned int timeout = 500;
333 	u32 retry = 100000;
334 	u32 mask, ctrl;
335 	ulong start = get_timer(0);
336 	struct bounce_buffer bbstate;
337 
338 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
339 		if (get_timer(start) > timeout) {
340 			debug("%s: Timeout on data busy\n", __func__);
341 			return -ETIMEDOUT;
342 		}
343 	}
344 
345 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
346 
347 	if (data) {
348 		if (host->fifo_mode) {
349 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
350 			dwmci_writel(host, DWMCI_BYTCNT,
351 				     data->blocksize * data->blocks);
352 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
353 		} else {
354 			if (data->flags == MMC_DATA_READ) {
355 				ret = bounce_buffer_start(&bbstate,
356 						(void*)data->dest,
357 						data->blocksize *
358 						data->blocks, GEN_BB_WRITE);
359 			} else {
360 				ret = bounce_buffer_start(&bbstate,
361 						(void*)data->src,
362 						data->blocksize *
363 						data->blocks, GEN_BB_READ);
364 			}
365 
366 			if (ret)
367 				return ret;
368 
369 			dwmci_prepare_data(host, data, cur_idmac,
370 					   bbstate.bounce_buffer);
371 		}
372 	}
373 
374 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
375 
376 	if (data)
377 		flags = dwmci_set_transfer_mode(host, data);
378 
379 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
380 		return -1;
381 
382 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
383 		flags |= DWMCI_CMD_ABORT_STOP;
384 	else
385 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
386 
387 	if (cmd->resp_type & MMC_RSP_PRESENT) {
388 		flags |= DWMCI_CMD_RESP_EXP;
389 		if (cmd->resp_type & MMC_RSP_136)
390 			flags |= DWMCI_CMD_RESP_LENGTH;
391 	}
392 
393 	if (cmd->resp_type & MMC_RSP_CRC)
394 		flags |= DWMCI_CMD_CHECK_CRC;
395 
396 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
397 
398 	debug("Sending CMD%d\n",cmd->cmdidx);
399 
400 	dwmci_writel(host, DWMCI_CMD, flags);
401 
402 	for (i = 0; i < retry; i++) {
403 		mask = dwmci_readl(host, DWMCI_RINTSTS);
404 		if (mask & DWMCI_INTMSK_CDONE) {
405 			if (!data)
406 				dwmci_writel(host, DWMCI_RINTSTS, mask);
407 			break;
408 		}
409 	}
410 
411 	if (i == retry) {
412 		debug("%s: Timeout.\n", __func__);
413 		return -ETIMEDOUT;
414 	}
415 
416 	if (mask & DWMCI_INTMSK_RTO) {
417 		/*
418 		 * Timeout here is not necessarily fatal. (e)MMC cards
419 		 * will splat here when they receive CMD55 as they do
420 		 * not support this command and that is exactly the way
421 		 * to tell them apart from SD cards. Thus, this output
422 		 * below shall be debug(). eMMC cards also do not favor
423 		 * CMD8, please keep that in mind.
424 		 */
425 		debug("%s: Response Timeout.\n", __func__);
426 		return -ETIMEDOUT;
427 	} else if (mask & DWMCI_INTMSK_RE) {
428 		debug("%s: Response Error.\n", __func__);
429 		return -EIO;
430 	}
431 
432 
433 	if (cmd->resp_type & MMC_RSP_PRESENT) {
434 		if (cmd->resp_type & MMC_RSP_136) {
435 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
436 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
437 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
438 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
439 		} else {
440 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
441 		}
442 	}
443 
444 	if (data) {
445 		ret = dwmci_data_transfer(host, data);
446 
447 		/* only dma mode need it */
448 		if (!host->fifo_mode) {
449 			ctrl = dwmci_readl(host, DWMCI_CTRL);
450 			ctrl &= ~(DWMCI_DMA_EN);
451 			dwmci_writel(host, DWMCI_CTRL, ctrl);
452 			bounce_buffer_stop(&bbstate);
453 		}
454 	}
455 
456 	return ret;
457 }
458 
459 #ifdef CONFIG_SPL_BLK_READ_PREPARE
460 #ifdef CONFIG_DM_MMC
461 static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
462 				  struct mmc_data *data)
463 {
464 	struct mmc *mmc = mmc_get_mmc_dev(dev);
465 #else
466 static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
467 				  struct mmc_data *data)
468 {
469 #endif
470 	struct dwmci_host *host = mmc->priv;
471 	struct dwmci_idmac *cur_idmac;
472 	int ret = 0, flags = 0, i;
473 	unsigned int timeout = 500;
474 	u32 retry = 100000;
475 	u32 mask;
476 	ulong start = get_timer(0);
477 	struct bounce_buffer bbstate;
478 
479 	cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
480 			   sizeof(struct dwmci_idmac),
481 			   ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
482 	if (!cur_idmac)
483 		return -ENODATA;
484 
485 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
486 		if (get_timer(start) > timeout) {
487 			debug("%s: Timeout on data busy\n", __func__);
488 			return -ETIMEDOUT;
489 		}
490 	}
491 
492 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
493 
494 	if (data) {
495 		if (host->fifo_mode) {
496 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
497 			dwmci_writel(host, DWMCI_BYTCNT,
498 				     data->blocksize * data->blocks);
499 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
500 		} else {
501 			if (data->flags == MMC_DATA_READ) {
502 				bounce_buffer_start(&bbstate, (void *)data->dest,
503 						    data->blocksize *
504 						    data->blocks, GEN_BB_WRITE);
505 			} else {
506 				bounce_buffer_start(&bbstate, (void *)data->src,
507 						    data->blocksize *
508 						    data->blocks, GEN_BB_READ);
509 			}
510 			dwmci_prepare_data(host, data, cur_idmac,
511 					   bbstate.bounce_buffer);
512 		}
513 	}
514 
515 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
516 
517 	if (data)
518 		flags = dwmci_set_transfer_mode(host, data);
519 
520 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
521 		return -1;
522 
523 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
524 		flags |= DWMCI_CMD_ABORT_STOP;
525 	else
526 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
527 
528 	if (cmd->resp_type & MMC_RSP_PRESENT) {
529 		flags |= DWMCI_CMD_RESP_EXP;
530 		if (cmd->resp_type & MMC_RSP_136)
531 			flags |= DWMCI_CMD_RESP_LENGTH;
532 	}
533 
534 	if (cmd->resp_type & MMC_RSP_CRC)
535 		flags |= DWMCI_CMD_CHECK_CRC;
536 
537 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
538 
539 	debug("Sending CMD%d\n", cmd->cmdidx);
540 
541 	dwmci_writel(host, DWMCI_CMD, flags);
542 
543 	for (i = 0; i < retry; i++) {
544 		mask = dwmci_readl(host, DWMCI_RINTSTS);
545 		if (mask & DWMCI_INTMSK_CDONE) {
546 			if (!data)
547 				dwmci_writel(host, DWMCI_RINTSTS, mask);
548 			break;
549 		}
550 	}
551 
552 	if (i == retry) {
553 		debug("%s: Timeout.\n", __func__);
554 		return -ETIMEDOUT;
555 	}
556 
557 	if (mask & DWMCI_INTMSK_RTO) {
558 		/*
559 		 * Timeout here is not necessarily fatal. (e)MMC cards
560 		 * will splat here when they receive CMD55 as they do
561 		 * not support this command and that is exactly the way
562 		 * to tell them apart from SD cards. Thus, this output
563 		 * below shall be debug(). eMMC cards also do not favor
564 		 * CMD8, please keep that in mind.
565 		 */
566 		debug("%s: Response Timeout.\n", __func__);
567 		return -ETIMEDOUT;
568 	} else if (mask & DWMCI_INTMSK_RE) {
569 		debug("%s: Response Error.\n", __func__);
570 		return -EIO;
571 	}
572 
573 	if (cmd->resp_type & MMC_RSP_PRESENT) {
574 		if (cmd->resp_type & MMC_RSP_136) {
575 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
576 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
577 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
578 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
579 		} else {
580 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
581 		}
582 	}
583 
584 	return ret;
585 }
586 #endif
587 
588 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
589 {
590 	u32 div, status;
591 	int timeout = 10000;
592 	unsigned long sclk;
593 
594 	if (freq == 0)
595 		return 0;
596 	/*
597 	 * If host->get_mmc_clk isn't defined,
598 	 * then assume that host->bus_hz is source clock value.
599 	 * host->bus_hz should be set by user.
600 	 */
601 	if (host->get_mmc_clk)
602 		sclk = host->get_mmc_clk(host, freq);
603 	else if (host->bus_hz)
604 		sclk = host->bus_hz;
605 	else {
606 		debug("%s: Didn't get source clock value.\n", __func__);
607 		return -EINVAL;
608 	}
609 
610 	if (sclk == 0)
611 		return -EINVAL;
612 
613 	if (sclk == freq)
614 		div = 0;	/* bypass mode */
615 	else
616 		div = DIV_ROUND_UP(sclk, 2 * freq);
617 
618 	dwmci_writel(host, DWMCI_CLKENA, 0);
619 	dwmci_writel(host, DWMCI_CLKSRC, 0);
620 
621 	dwmci_writel(host, DWMCI_CLKDIV, div);
622 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
623 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
624 
625 	do {
626 		status = dwmci_readl(host, DWMCI_CMD);
627 		if (timeout-- < 0) {
628 			debug("%s: Timeout!\n", __func__);
629 			return -ETIMEDOUT;
630 		}
631 	} while (status & DWMCI_CMD_START);
632 
633 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
634 			DWMCI_CLKEN_LOW_PWR);
635 
636 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
637 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
638 
639 	timeout = 10000;
640 	do {
641 		status = dwmci_readl(host, DWMCI_CMD);
642 		if (timeout-- < 0) {
643 			debug("%s: Timeout!\n", __func__);
644 			return -ETIMEDOUT;
645 		}
646 	} while (status & DWMCI_CMD_START);
647 
648 	host->clock = freq;
649 
650 	return 0;
651 }
652 
653 #ifdef CONFIG_DM_MMC
654 static bool dwmci_card_busy(struct udevice *dev)
655 {
656 	struct mmc *mmc = mmc_get_mmc_dev(dev);
657 #else
658 static bool dwmci_card_busy(struct mmc *mmc)
659 {
660 #endif
661 	u32 status;
662 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
663 
664 	/*
665 	 * Check the busy bit which is low when DAT[3:0]
666 	 * (the data lines) are 0000
667 	 */
668 	status = dwmci_readl(host, DWMCI_STATUS);
669 
670 	return !!(status & DWMCI_BUSY);
671 }
672 
673 #ifdef CONFIG_DM_MMC
674 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
675 {
676 	struct mmc *mmc = mmc_get_mmc_dev(dev);
677 #else
678 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
679 {
680 #endif
681 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
682 
683 	if (!host->execute_tuning)
684 		return -EIO;
685 
686 	return host->execute_tuning(host, opcode);
687 }
688 
689 #ifdef CONFIG_DM_MMC
690 static int dwmci_set_ios(struct udevice *dev)
691 {
692 	struct mmc *mmc = mmc_get_mmc_dev(dev);
693 #else
694 static int dwmci_set_ios(struct mmc *mmc)
695 {
696 #endif
697 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
698 	u32 ctype, regs;
699 
700 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
701 
702 	dwmci_setup_bus(host, mmc->clock);
703 	switch (mmc->bus_width) {
704 	case 8:
705 		ctype = DWMCI_CTYPE_8BIT;
706 		break;
707 	case 4:
708 		ctype = DWMCI_CTYPE_4BIT;
709 		break;
710 	default:
711 		ctype = DWMCI_CTYPE_1BIT;
712 		break;
713 	}
714 
715 	dwmci_writel(host, DWMCI_CTYPE, ctype);
716 
717 	regs = dwmci_readl(host, DWMCI_UHS_REG);
718 	if (mmc_card_ddr(mmc))
719 		regs |= DWMCI_DDR_MODE;
720 	else
721 		regs &= ~DWMCI_DDR_MODE;
722 
723 	dwmci_writel(host, DWMCI_UHS_REG, regs);
724 
725 	if (host->clksel)
726 		host->clksel(host);
727 
728 	return 0;
729 }
730 
731 static int dwmci_init(struct mmc *mmc)
732 {
733 	struct dwmci_host *host = mmc->priv;
734 	uint32_t use_dma;
735 	uint32_t verid;
736 
737 	if (host->board_init)
738 		host->board_init(host);
739 #ifdef CONFIG_ARCH_ROCKCHIP
740 	if (host->dev_index == 0)
741 		dwmci_writel(host, DWMCI_PWREN, 1);
742 	else if (host->dev_index == 1)
743 		dwmci_writel(host, DWMCI_PWREN, 0);
744 	else
745 		dwmci_writel(host, DWMCI_PWREN, 1);
746 #else
747 	dwmci_writel(host, DWMCI_PWREN, 1);
748 #endif
749 
750 	verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
751 	if (verid >= DW_MMC_240A)
752 		dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
753 
754 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
755 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
756 		return -EIO;
757 	}
758 
759 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
760 	if (use_dma == DMA_INTERFACE_IDMA) {
761 		host->fifo_mode = 0;
762 	} else {
763 		host->fifo_mode = 1;
764 	}
765 
766 	/* Enumerate at 400KHz */
767 	dwmci_setup_bus(host, mmc->cfg->f_min);
768 
769 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
770 	dwmci_writel(host, DWMCI_INTMASK, 0);
771 
772 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
773 
774 	dwmci_writel(host, DWMCI_IDINTEN, 0);
775 	dwmci_writel(host, DWMCI_BMOD, 1);
776 
777 	if (!host->fifoth_val) {
778 		uint32_t fifo_size;
779 
780 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
781 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
782 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
783 				RX_WMARK(fifo_size / 2 - 1) |
784 				TX_WMARK(fifo_size / 2);
785 	}
786 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
787 
788 	dwmci_writel(host, DWMCI_CLKENA, 0);
789 	dwmci_writel(host, DWMCI_CLKSRC, 0);
790 
791 	return 0;
792 }
793 
794 static int dwmci_get_cd(struct udevice *dev)
795 {
796 	int ret = -1;
797 
798 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
799 	struct gpio_desc detect;
800 
801 	ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
802 	if (ret) {
803 		return ret;
804 	}
805 
806 	ret = !dm_gpio_get_value(&detect);
807 #endif
808 	return ret;
809 }
810 
811 #ifdef CONFIG_DM_MMC
812 int dwmci_probe(struct udevice *dev)
813 {
814 	struct mmc *mmc = mmc_get_mmc_dev(dev);
815 
816 	return dwmci_init(mmc);
817 }
818 
819 const struct dm_mmc_ops dm_dwmci_ops = {
820 	.card_busy	= dwmci_card_busy,
821 	.send_cmd	= dwmci_send_cmd,
822 #ifdef CONFIG_SPL_BLK_READ_PREPARE
823 	.send_cmd_prepare = dwmci_send_cmd_prepare,
824 #endif
825 	.set_ios	= dwmci_set_ios,
826 	.get_cd         = dwmci_get_cd,
827 	.execute_tuning	= dwmci_execute_tuning,
828 };
829 
830 #else
831 static const struct mmc_ops dwmci_ops = {
832 	.card_busy	= dwmci_card_busy,
833 	.send_cmd	= dwmci_send_cmd,
834 	.set_ios	= dwmci_set_ios,
835 	.get_cd         = dwmci_get_cd,
836 	.init		= dwmci_init,
837 	.execute_tuning	= dwmci_execute_tuning,
838 };
839 #endif
840 
841 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
842 		u32 max_clk, u32 min_clk)
843 {
844 	cfg->name = host->name;
845 #ifndef CONFIG_DM_MMC
846 	cfg->ops = &dwmci_ops;
847 #endif
848 	cfg->f_min = min_clk;
849 	cfg->f_max = max_clk;
850 
851 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
852 
853 	cfg->host_caps = host->caps;
854 
855 	switch (host->buswidth) {
856 	case 8:
857 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
858 		break;
859 	case 4:
860 		cfg->host_caps |= MMC_MODE_4BIT;
861 		cfg->host_caps &= ~MMC_MODE_8BIT;
862 		break;
863 	case 1:
864 		cfg->host_caps &= ~MMC_MODE_4BIT;
865 		cfg->host_caps &= ~MMC_MODE_8BIT;
866 		break;
867 	default:
868 		printf("Unsupported bus width: %d\n", host->buswidth);
869 		break;
870 	}
871 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
872 
873 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
874 }
875 
876 #ifdef CONFIG_BLK
877 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
878 {
879 	return mmc_bind(dev, mmc, cfg);
880 }
881 #else
882 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
883 {
884 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
885 
886 	host->mmc = mmc_create(&host->cfg, host);
887 	if (host->mmc == NULL)
888 		return -1;
889 
890 	return 0;
891 }
892 #endif
893