xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision 514e00a960f8a815e0c86931b498063c6fc4ef76)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <bouncebuf.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 #ifdef CONFIG_DM_GPIO
17 #include <asm/gpio.h>
18 #include <asm-generic/gpio.h>
19 #endif
20 
21 #define PAGE_SIZE 4096
22 
23 /*
24  * Currently it supports read/write up to 8*8*4 Bytes per
25  * stride as a burst mode. Please note that if you change
26  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
27  * to augment the groups of {ldm, stm}.
28  */
29 #define MAX_STRIDE 64
30 #if (CONFIG_ARM && CONFIG_CPU_V7 && !defined(CONFIG_MMC_SIMPLE))
31 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
32 {
33 	__asm__ __volatile__ (
34 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
35 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
36 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
37 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
52 		:::"memory"
53 	);
54 }
55 
56 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
57 {
58 	__asm__ __volatile__ (
59 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
60 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
61 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
62 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
63 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
77 		:::"memory"
78 	);
79 }
80 #else
81 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
82 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
83 #endif
84 
85 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
86 {
87 	unsigned long timeout = 1000;
88 	u32 ctrl;
89 
90 	dwmci_writel(host, DWMCI_CTRL, value);
91 
92 	while (timeout--) {
93 		ctrl = dwmci_readl(host, DWMCI_CTRL);
94 		if (!(ctrl & DWMCI_RESET_ALL))
95 			return 1;
96 	}
97 	return 0;
98 }
99 
100 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
101 		u32 desc0, u32 desc1, u32 desc2)
102 {
103 	struct dwmci_idmac *desc = idmac;
104 
105 	desc->flags = desc0;
106 	desc->cnt = desc1;
107 	desc->addr = desc2;
108 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
109 }
110 
111 static void dwmci_prepare_data(struct dwmci_host *host,
112 			       struct mmc_data *data,
113 			       struct dwmci_idmac *cur_idmac,
114 			       void *bounce_buffer)
115 {
116 	unsigned long ctrl;
117 	unsigned int i = 0, flags, cnt, blk_cnt;
118 	ulong data_start, data_end;
119 
120 
121 	blk_cnt = data->blocks;
122 
123 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
124 
125 	data_start = (ulong)cur_idmac;
126 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
127 
128 	do {
129 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
130 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
131 		if (blk_cnt <= 8) {
132 			flags |= DWMCI_IDMAC_LD;
133 			cnt = data->blocksize * blk_cnt;
134 		} else
135 			cnt = data->blocksize * 8;
136 
137 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
138 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
139 
140 		if (blk_cnt <= 8)
141 			break;
142 		blk_cnt -= 8;
143 		cur_idmac++;
144 		i++;
145 	} while(1);
146 
147 	data_end = (ulong)cur_idmac;
148 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
149 
150 	ctrl = dwmci_readl(host, DWMCI_CTRL);
151 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
152 	dwmci_writel(host, DWMCI_CTRL, ctrl);
153 
154 	ctrl = dwmci_readl(host, DWMCI_BMOD);
155 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
156 	dwmci_writel(host, DWMCI_BMOD, ctrl);
157 
158 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
159 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
160 }
161 
162 static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
163 {
164 	unsigned int timeout;
165 
166 	timeout = size * 8;	/* counting in bits */
167 	timeout *= 10;		/* wait 10 times as long */
168 	timeout /= mmc->clock;
169 	timeout /= mmc->bus_width;
170 	timeout *= 1000;	/* counting in msec */
171 	timeout = (timeout < 10000) ? 10000 : timeout;
172 
173 	return timeout;
174 }
175 
176 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
177 {
178 	int ret = 0;
179 	int reset_timeout = 100;
180 	u32 timeout, status, ctrl, mask, size, i, len = 0;
181 	u32 *buf = NULL;
182 	ulong start = get_timer(0);
183 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
184 			    RX_WMARK_SHIFT) + 1) * 2;
185 	bool stride;
186 
187 	size = data->blocksize * data->blocks;
188 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
189 	stride = host->stride_pio && size > 128;
190 	if (data->flags == MMC_DATA_READ)
191 		buf = (unsigned int *)data->dest;
192 	else
193 		buf = (unsigned int *)data->src;
194 
195 	timeout = dwmci_get_timeout(host->mmc, size);
196 	size /= 4;
197 
198 	for (;;) {
199 		mask = dwmci_readl(host, DWMCI_RINTSTS);
200 		/* Error during data transfer. */
201 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
202 			debug("%s: DATA ERROR!\n", __func__);
203 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
204 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
205 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
206 
207 			do {
208 				status = dwmci_readl(host, DWMCI_CMD);
209 				if (reset_timeout-- < 0)
210 					break;
211 				udelay(100);
212 			} while (status & DWMCI_CMD_START);
213 
214 			if (!host->fifo_mode) {
215 				ctrl = dwmci_readl(host, DWMCI_BMOD);
216 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
217 				dwmci_writel(host, DWMCI_BMOD, ctrl);
218 			}
219 
220 			ret = -EINVAL;
221 			break;
222 		}
223 
224 		if (host->fifo_mode && size) {
225 			len = 0;
226 			if (data->flags == MMC_DATA_READ &&
227 			    (mask & DWMCI_INTMSK_RXDR)) {
228 				while (size) {
229 					len = dwmci_readl(host, DWMCI_STATUS);
230 					len = (len >> DWMCI_FIFO_SHIFT) &
231 						    DWMCI_FIFO_MASK;
232 					len = min(size, len);
233 					if (!stride) {
234 						/* Legacy pio mode */
235 						for (i = 0; i < len; i++)
236 							*buf++ = dwmci_readl(host, DWMCI_DATA);
237 						goto read_again;
238 					}
239 
240 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
241 					if (len < MAX_STRIDE)
242 						continue;
243 
244 					for (i = 0; i < len / MAX_STRIDE; i++) {
245 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
246 						buf += MAX_STRIDE;
247 					}
248 
249 					len = i * MAX_STRIDE;
250 read_again:
251 					size = size > len ? (size - len) : 0;
252 				}
253 				dwmci_writel(host, DWMCI_RINTSTS,
254 					     DWMCI_INTMSK_RXDR);
255 			} else if (data->flags == MMC_DATA_WRITE &&
256 				   (mask & DWMCI_INTMSK_TXDR)) {
257 				while (size) {
258 					len = dwmci_readl(host, DWMCI_STATUS);
259 					len = fifo_depth - ((len >>
260 						   DWMCI_FIFO_SHIFT) &
261 						   DWMCI_FIFO_MASK);
262 					len = min(size, len);
263 					if (!stride) {
264 						for (i = 0; i < len; i++)
265 							dwmci_writel(host, DWMCI_DATA,
266 								     *buf++);
267 						goto write_again;
268 					}
269 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
270 					if (len < MAX_STRIDE)
271 						continue;
272 
273 					for (i = 0; i < len / MAX_STRIDE; i++) {
274 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
275 						buf += MAX_STRIDE;
276 					}
277 
278 					len = i * MAX_STRIDE;
279 write_again:
280 					size = size > len ? (size - len) : 0;
281 				}
282 				dwmci_writel(host, DWMCI_RINTSTS,
283 					     DWMCI_INTMSK_TXDR);
284 			}
285 		}
286 
287 		/* Data arrived correctly. */
288 		if (mask & DWMCI_INTMSK_DTO) {
289 			ret = 0;
290 			break;
291 		}
292 
293 		/* Check for timeout. */
294 		if (get_timer(start) > timeout) {
295 			debug("%s: Timeout waiting for data!\n",
296 			      __func__);
297 			ret = -ETIMEDOUT;
298 			break;
299 		}
300 	}
301 
302 	dwmci_writel(host, DWMCI_RINTSTS, mask);
303 
304 	return ret;
305 }
306 
307 static int dwmci_set_transfer_mode(struct dwmci_host *host,
308 		struct mmc_data *data)
309 {
310 	unsigned long mode;
311 
312 	mode = DWMCI_CMD_DATA_EXP;
313 	if (data->flags & MMC_DATA_WRITE)
314 		mode |= DWMCI_CMD_RW;
315 
316 	return mode;
317 }
318 
319 #ifdef CONFIG_DM_MMC
320 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
321 		   struct mmc_data *data)
322 {
323 	struct mmc *mmc = mmc_get_mmc_dev(dev);
324 #else
325 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
326 		struct mmc_data *data)
327 {
328 #endif
329 	struct dwmci_host *host = mmc->priv;
330 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
331 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
332 	int ret = 0, flags = 0, i;
333 	unsigned int timeout = 500;
334 	u32 retry = 100000;
335 	u32 mask, ctrl;
336 	ulong start = get_timer(0);
337 	struct bounce_buffer bbstate;
338 
339 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
340 		if (get_timer(start) > timeout) {
341 			debug("%s: Timeout on data busy\n", __func__);
342 			return -ETIMEDOUT;
343 		}
344 	}
345 
346 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
347 
348 	if (data) {
349 		if (host->fifo_mode) {
350 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
351 			dwmci_writel(host, DWMCI_BYTCNT,
352 				     data->blocksize * data->blocks);
353 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
354 		} else {
355 			if (data->flags == MMC_DATA_READ) {
356 				ret = bounce_buffer_start(&bbstate,
357 						(void*)data->dest,
358 						data->blocksize *
359 						data->blocks, GEN_BB_WRITE);
360 			} else {
361 				ret = bounce_buffer_start(&bbstate,
362 						(void*)data->src,
363 						data->blocksize *
364 						data->blocks, GEN_BB_READ);
365 			}
366 
367 			if (ret)
368 				return ret;
369 
370 			dwmci_prepare_data(host, data, cur_idmac,
371 					   bbstate.bounce_buffer);
372 		}
373 	}
374 
375 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
376 
377 	if (data)
378 		flags = dwmci_set_transfer_mode(host, data);
379 
380 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
381 		return -1;
382 
383 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
384 		flags |= DWMCI_CMD_ABORT_STOP;
385 	else
386 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
387 
388 	if (cmd->resp_type & MMC_RSP_PRESENT) {
389 		flags |= DWMCI_CMD_RESP_EXP;
390 		if (cmd->resp_type & MMC_RSP_136)
391 			flags |= DWMCI_CMD_RESP_LENGTH;
392 	}
393 
394 	if (cmd->resp_type & MMC_RSP_CRC)
395 		flags |= DWMCI_CMD_CHECK_CRC;
396 
397 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
398 
399 	debug("Sending CMD%d\n",cmd->cmdidx);
400 
401 	dwmci_writel(host, DWMCI_CMD, flags);
402 
403 	for (i = 0; i < retry; i++) {
404 		mask = dwmci_readl(host, DWMCI_RINTSTS);
405 		if (mask & DWMCI_INTMSK_CDONE) {
406 			if (!data)
407 				dwmci_writel(host, DWMCI_RINTSTS, mask);
408 			break;
409 		}
410 	}
411 
412 	if (i == retry) {
413 		debug("%s: Timeout.\n", __func__);
414 		return -ETIMEDOUT;
415 	}
416 
417 	if (mask & DWMCI_INTMSK_RTO) {
418 		/*
419 		 * Timeout here is not necessarily fatal. (e)MMC cards
420 		 * will splat here when they receive CMD55 as they do
421 		 * not support this command and that is exactly the way
422 		 * to tell them apart from SD cards. Thus, this output
423 		 * below shall be debug(). eMMC cards also do not favor
424 		 * CMD8, please keep that in mind.
425 		 */
426 		debug("%s: Response Timeout.\n", __func__);
427 		return -ETIMEDOUT;
428 	} else if (mask & DWMCI_INTMSK_RE) {
429 		debug("%s: Response Error.\n", __func__);
430 		return -EIO;
431 	}
432 
433 
434 	if (cmd->resp_type & MMC_RSP_PRESENT) {
435 		if (cmd->resp_type & MMC_RSP_136) {
436 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
437 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
438 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
439 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
440 		} else {
441 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
442 		}
443 	}
444 
445 	if (data) {
446 		ret = dwmci_data_transfer(host, data);
447 
448 		/* only dma mode need it */
449 		if (!host->fifo_mode) {
450 			ctrl = dwmci_readl(host, DWMCI_CTRL);
451 			ctrl &= ~(DWMCI_DMA_EN);
452 			dwmci_writel(host, DWMCI_CTRL, ctrl);
453 			bounce_buffer_stop(&bbstate);
454 		}
455 	}
456 
457 	return ret;
458 }
459 
460 #ifdef CONFIG_SPL_BLK_READ_PREPARE
461 #ifdef CONFIG_DM_MMC
462 static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
463 				  struct mmc_data *data)
464 {
465 	struct mmc *mmc = mmc_get_mmc_dev(dev);
466 #else
467 static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
468 				  struct mmc_data *data)
469 {
470 #endif
471 	struct dwmci_host *host = mmc->priv;
472 	struct dwmci_idmac *cur_idmac;
473 	int ret = 0, flags = 0, i;
474 	unsigned int timeout = 500;
475 	u32 retry = 100000;
476 	u32 mask;
477 	ulong start = get_timer(0);
478 	struct bounce_buffer bbstate;
479 
480 	cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
481 			   sizeof(struct dwmci_idmac),
482 			   ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
483 	if (!cur_idmac)
484 		return -ENODATA;
485 
486 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
487 		if (get_timer(start) > timeout) {
488 			debug("%s: Timeout on data busy\n", __func__);
489 			return -ETIMEDOUT;
490 		}
491 	}
492 
493 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
494 
495 	if (data) {
496 		if (host->fifo_mode) {
497 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
498 			dwmci_writel(host, DWMCI_BYTCNT,
499 				     data->blocksize * data->blocks);
500 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
501 		} else {
502 			if (data->flags == MMC_DATA_READ) {
503 				bounce_buffer_start(&bbstate, (void *)data->dest,
504 						    data->blocksize *
505 						    data->blocks, GEN_BB_WRITE);
506 			} else {
507 				bounce_buffer_start(&bbstate, (void *)data->src,
508 						    data->blocksize *
509 						    data->blocks, GEN_BB_READ);
510 			}
511 			dwmci_prepare_data(host, data, cur_idmac,
512 					   bbstate.bounce_buffer);
513 		}
514 	}
515 
516 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
517 
518 	if (data)
519 		flags = dwmci_set_transfer_mode(host, data);
520 
521 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
522 		return -1;
523 
524 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
525 		flags |= DWMCI_CMD_ABORT_STOP;
526 	else
527 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
528 
529 	if (cmd->resp_type & MMC_RSP_PRESENT) {
530 		flags |= DWMCI_CMD_RESP_EXP;
531 		if (cmd->resp_type & MMC_RSP_136)
532 			flags |= DWMCI_CMD_RESP_LENGTH;
533 	}
534 
535 	if (cmd->resp_type & MMC_RSP_CRC)
536 		flags |= DWMCI_CMD_CHECK_CRC;
537 
538 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
539 
540 	debug("Sending CMD%d\n", cmd->cmdidx);
541 
542 	dwmci_writel(host, DWMCI_CMD, flags);
543 
544 	for (i = 0; i < retry; i++) {
545 		mask = dwmci_readl(host, DWMCI_RINTSTS);
546 		if (mask & DWMCI_INTMSK_CDONE) {
547 			if (!data)
548 				dwmci_writel(host, DWMCI_RINTSTS, mask);
549 			break;
550 		}
551 	}
552 
553 	if (i == retry) {
554 		debug("%s: Timeout.\n", __func__);
555 		return -ETIMEDOUT;
556 	}
557 
558 	if (mask & DWMCI_INTMSK_RTO) {
559 		/*
560 		 * Timeout here is not necessarily fatal. (e)MMC cards
561 		 * will splat here when they receive CMD55 as they do
562 		 * not support this command and that is exactly the way
563 		 * to tell them apart from SD cards. Thus, this output
564 		 * below shall be debug(). eMMC cards also do not favor
565 		 * CMD8, please keep that in mind.
566 		 */
567 		debug("%s: Response Timeout.\n", __func__);
568 		return -ETIMEDOUT;
569 	} else if (mask & DWMCI_INTMSK_RE) {
570 		debug("%s: Response Error.\n", __func__);
571 		return -EIO;
572 	}
573 
574 	if (cmd->resp_type & MMC_RSP_PRESENT) {
575 		if (cmd->resp_type & MMC_RSP_136) {
576 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
577 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
578 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
579 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
580 		} else {
581 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
582 		}
583 	}
584 
585 	return ret;
586 }
587 #endif
588 
589 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
590 {
591 	u32 div, status;
592 	int timeout = 10000;
593 	unsigned long sclk;
594 
595 	if (freq == 0)
596 		return 0;
597 	/*
598 	 * If host->get_mmc_clk isn't defined,
599 	 * then assume that host->bus_hz is source clock value.
600 	 * host->bus_hz should be set by user.
601 	 */
602 	if (host->get_mmc_clk)
603 		sclk = host->get_mmc_clk(host, freq);
604 	else if (host->bus_hz)
605 		sclk = host->bus_hz;
606 	else {
607 		debug("%s: Didn't get source clock value.\n", __func__);
608 		return -EINVAL;
609 	}
610 
611 	if (sclk == 0)
612 		return -EINVAL;
613 
614 	if (sclk == freq)
615 		div = 0;	/* bypass mode */
616 	else
617 		div = DIV_ROUND_UP(sclk, 2 * freq);
618 
619 	dwmci_writel(host, DWMCI_CLKENA, 0);
620 	dwmci_writel(host, DWMCI_CLKSRC, 0);
621 
622 	dwmci_writel(host, DWMCI_CLKDIV, div);
623 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
624 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
625 
626 	do {
627 		status = dwmci_readl(host, DWMCI_CMD);
628 		if (timeout-- < 0) {
629 			debug("%s: Timeout!\n", __func__);
630 			return -ETIMEDOUT;
631 		}
632 	} while (status & DWMCI_CMD_START);
633 
634 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
635 			DWMCI_CLKEN_LOW_PWR);
636 
637 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
638 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
639 
640 	timeout = 10000;
641 	do {
642 		status = dwmci_readl(host, DWMCI_CMD);
643 		if (timeout-- < 0) {
644 			debug("%s: Timeout!\n", __func__);
645 			return -ETIMEDOUT;
646 		}
647 	} while (status & DWMCI_CMD_START);
648 
649 	host->clock = freq;
650 
651 	return 0;
652 }
653 
654 #ifdef CONFIG_DM_MMC
655 static bool dwmci_card_busy(struct udevice *dev)
656 {
657 	struct mmc *mmc = mmc_get_mmc_dev(dev);
658 #else
659 static bool dwmci_card_busy(struct mmc *mmc)
660 {
661 #endif
662 	u32 status;
663 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
664 
665 	/*
666 	 * Check the busy bit which is low when DAT[3:0]
667 	 * (the data lines) are 0000
668 	 */
669 	status = dwmci_readl(host, DWMCI_STATUS);
670 
671 	return !!(status & DWMCI_BUSY);
672 }
673 
674 #ifdef CONFIG_DM_MMC
675 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
676 {
677 	struct mmc *mmc = mmc_get_mmc_dev(dev);
678 #else
679 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
680 {
681 #endif
682 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
683 
684 	if (!host->execute_tuning)
685 		return -EIO;
686 
687 	return host->execute_tuning(host, opcode);
688 }
689 
690 #ifdef CONFIG_DM_MMC
691 static int dwmci_set_ios(struct udevice *dev)
692 {
693 	struct mmc *mmc = mmc_get_mmc_dev(dev);
694 #else
695 static int dwmci_set_ios(struct mmc *mmc)
696 {
697 #endif
698 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
699 	u32 ctype, regs;
700 
701 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
702 
703 	dwmci_setup_bus(host, mmc->clock);
704 	switch (mmc->bus_width) {
705 	case 8:
706 		ctype = DWMCI_CTYPE_8BIT;
707 		break;
708 	case 4:
709 		ctype = DWMCI_CTYPE_4BIT;
710 		break;
711 	default:
712 		ctype = DWMCI_CTYPE_1BIT;
713 		break;
714 	}
715 
716 	dwmci_writel(host, DWMCI_CTYPE, ctype);
717 
718 	regs = dwmci_readl(host, DWMCI_UHS_REG);
719 	if (mmc_card_ddr(mmc))
720 		regs |= DWMCI_DDR_MODE;
721 	else
722 		regs &= ~DWMCI_DDR_MODE;
723 
724 	dwmci_writel(host, DWMCI_UHS_REG, regs);
725 
726 	if (host->clksel)
727 		host->clksel(host);
728 
729 	return 0;
730 }
731 
732 static int dwmci_init(struct mmc *mmc)
733 {
734 	struct dwmci_host *host = mmc->priv;
735 	uint32_t use_dma;
736 	uint32_t verid;
737 
738 	if (host->board_init)
739 		host->board_init(host);
740 #ifdef CONFIG_ARCH_ROCKCHIP
741 	if (host->dev_index == 0)
742 		dwmci_writel(host, DWMCI_PWREN, 1);
743 	else if (host->dev_index == 1)
744 		dwmci_writel(host, DWMCI_PWREN, 0);
745 	else
746 		dwmci_writel(host, DWMCI_PWREN, 1);
747 #else
748 	dwmci_writel(host, DWMCI_PWREN, 1);
749 #endif
750 
751 	verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
752 	if (verid >= DW_MMC_240A)
753 		dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
754 
755 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
756 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
757 		return -EIO;
758 	}
759 
760 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
761 	if (use_dma == DMA_INTERFACE_IDMA) {
762 		host->fifo_mode = 0;
763 	} else {
764 		host->fifo_mode = 1;
765 	}
766 
767 	/* Enumerate at 400KHz */
768 	dwmci_setup_bus(host, mmc->cfg->f_min);
769 
770 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
771 	dwmci_writel(host, DWMCI_INTMASK, 0);
772 
773 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
774 
775 	dwmci_writel(host, DWMCI_IDINTEN, 0);
776 	dwmci_writel(host, DWMCI_BMOD, 1);
777 
778 	if (!host->fifoth_val) {
779 		uint32_t fifo_size;
780 
781 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
782 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
783 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
784 				RX_WMARK(fifo_size / 2 - 1) |
785 				TX_WMARK(fifo_size / 2);
786 	}
787 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
788 
789 	dwmci_writel(host, DWMCI_CLKENA, 0);
790 	dwmci_writel(host, DWMCI_CLKSRC, 0);
791 
792 	return 0;
793 }
794 
795 static int dwmci_get_cd(struct udevice *dev)
796 {
797 	int ret = -1;
798 
799 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
800 	struct gpio_desc detect;
801 
802 	ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
803 	if (ret) {
804 		return ret;
805 	}
806 
807 	ret = !dm_gpio_get_value(&detect);
808 	dm_gpio_free(dev, &detect);
809 #endif
810 	return ret;
811 }
812 
813 #ifdef CONFIG_DM_MMC
814 int dwmci_probe(struct udevice *dev)
815 {
816 	struct mmc *mmc = mmc_get_mmc_dev(dev);
817 
818 	return dwmci_init(mmc);
819 }
820 
821 const struct dm_mmc_ops dm_dwmci_ops = {
822 	.card_busy	= dwmci_card_busy,
823 	.send_cmd	= dwmci_send_cmd,
824 #ifdef CONFIG_SPL_BLK_READ_PREPARE
825 	.send_cmd_prepare = dwmci_send_cmd_prepare,
826 #endif
827 	.set_ios	= dwmci_set_ios,
828 	.get_cd         = dwmci_get_cd,
829 	.execute_tuning	= dwmci_execute_tuning,
830 };
831 
832 #else
833 static const struct mmc_ops dwmci_ops = {
834 	.card_busy	= dwmci_card_busy,
835 	.send_cmd	= dwmci_send_cmd,
836 	.set_ios	= dwmci_set_ios,
837 	.get_cd         = dwmci_get_cd,
838 	.init		= dwmci_init,
839 	.execute_tuning	= dwmci_execute_tuning,
840 };
841 #endif
842 
843 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
844 		u32 max_clk, u32 min_clk)
845 {
846 	cfg->name = host->name;
847 #ifndef CONFIG_DM_MMC
848 	cfg->ops = &dwmci_ops;
849 #endif
850 	cfg->f_min = min_clk;
851 	cfg->f_max = max_clk;
852 
853 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
854 
855 	cfg->host_caps = host->caps;
856 
857 	switch (host->buswidth) {
858 	case 8:
859 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
860 		break;
861 	case 4:
862 		cfg->host_caps |= MMC_MODE_4BIT;
863 		cfg->host_caps &= ~MMC_MODE_8BIT;
864 		break;
865 	case 1:
866 		cfg->host_caps &= ~MMC_MODE_4BIT;
867 		cfg->host_caps &= ~MMC_MODE_8BIT;
868 		break;
869 	default:
870 		printf("Unsupported bus width: %d\n", host->buswidth);
871 		break;
872 	}
873 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
874 
875 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
876 }
877 
878 #ifdef CONFIG_BLK
879 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
880 {
881 	return mmc_bind(dev, mmc, cfg);
882 }
883 #else
884 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
885 {
886 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
887 
888 	host->mmc = mmc_create(&host->cfg, host);
889 	if (host->mmc == NULL)
890 		return -1;
891 
892 	return 0;
893 }
894 #endif
895