xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision cd1c982e9a20e1f221cc1158f81fc40d9d0af0c2)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <bouncebuf.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 #ifdef CONFIG_DM_GPIO
17 #include <asm/gpio.h>
18 #include <asm-generic/gpio.h>
19 #endif
20 
21 #define PAGE_SIZE 4096
22 
23 /*
24  * Currently it supports read/write up to 8*8*4 Bytes per
25  * stride as a burst mode. Please note that if you change
26  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
27  * to augment the groups of {ldm, stm}.
28  */
29 #define MAX_STRIDE 64
30 #if CONFIG_ARM && CONFIG_CPU_V7
31 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
32 {
33 	__asm__ __volatile__ (
34 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
35 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
36 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
37 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
52 		:::"memory"
53 	);
54 }
55 
56 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
57 {
58 	__asm__ __volatile__ (
59 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
60 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
61 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
62 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
63 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
77 		:::"memory"
78 	);
79 }
80 #else
81 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
82 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
83 #endif
84 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
85 {
86 	unsigned long timeout = 1000;
87 	u32 ctrl;
88 
89 	dwmci_writel(host, DWMCI_CTRL, value);
90 
91 	while (timeout--) {
92 		ctrl = dwmci_readl(host, DWMCI_CTRL);
93 		if (!(ctrl & DWMCI_RESET_ALL))
94 			return 1;
95 	}
96 	return 0;
97 }
98 
99 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
100 		u32 desc0, u32 desc1, u32 desc2)
101 {
102 	struct dwmci_idmac *desc = idmac;
103 
104 	desc->flags = desc0;
105 	desc->cnt = desc1;
106 	desc->addr = desc2;
107 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
108 }
109 
110 static void dwmci_prepare_data(struct dwmci_host *host,
111 			       struct mmc_data *data,
112 			       struct dwmci_idmac *cur_idmac,
113 			       void *bounce_buffer)
114 {
115 	unsigned long ctrl;
116 	unsigned int i = 0, flags, cnt, blk_cnt;
117 	ulong data_start, data_end;
118 
119 
120 	blk_cnt = data->blocks;
121 
122 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
123 
124 	data_start = (ulong)cur_idmac;
125 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
126 
127 	do {
128 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
129 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
130 		if (blk_cnt <= 8) {
131 			flags |= DWMCI_IDMAC_LD;
132 			cnt = data->blocksize * blk_cnt;
133 		} else
134 			cnt = data->blocksize * 8;
135 
136 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
137 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
138 
139 		if (blk_cnt <= 8)
140 			break;
141 		blk_cnt -= 8;
142 		cur_idmac++;
143 		i++;
144 	} while(1);
145 
146 	data_end = (ulong)cur_idmac;
147 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
148 
149 	ctrl = dwmci_readl(host, DWMCI_CTRL);
150 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
151 	dwmci_writel(host, DWMCI_CTRL, ctrl);
152 
153 	ctrl = dwmci_readl(host, DWMCI_BMOD);
154 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
155 	dwmci_writel(host, DWMCI_BMOD, ctrl);
156 
157 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
158 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
159 }
160 
161 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
162 {
163 	int ret = 0;
164 	int reset_timeout = 100;
165 	u32 timeout = 240000;
166 	u32 status, ctrl, mask, size, i, len = 0;
167 	u32 *buf = NULL;
168 	ulong start = get_timer(0);
169 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
170 			    RX_WMARK_SHIFT) + 1) * 2;
171 	bool stride;
172 
173 	size = data->blocksize * data->blocks / 4;
174 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
175 	stride = host->stride_pio && size > 128;
176 	if (data->flags == MMC_DATA_READ)
177 		buf = (unsigned int *)data->dest;
178 	else
179 		buf = (unsigned int *)data->src;
180 
181 	for (;;) {
182 		mask = dwmci_readl(host, DWMCI_RINTSTS);
183 		/* Error during data transfer. */
184 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
185 			debug("%s: DATA ERROR!\n", __func__);
186 
187 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
188 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
189 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
190 
191 			do {
192 				status = dwmci_readl(host, DWMCI_CMD);
193 				if (reset_timeout-- < 0)
194 					break;
195 				udelay(100);
196 			} while (status & DWMCI_CMD_START);
197 
198 			if (!host->fifo_mode) {
199 				ctrl = dwmci_readl(host, DWMCI_BMOD);
200 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
201 				dwmci_writel(host, DWMCI_BMOD, ctrl);
202 			}
203 
204 			ret = -EINVAL;
205 			break;
206 		}
207 
208 		if (host->fifo_mode && size) {
209 			len = 0;
210 			if (data->flags == MMC_DATA_READ &&
211 			    (mask & DWMCI_INTMSK_RXDR)) {
212 				while (size) {
213 					len = dwmci_readl(host, DWMCI_STATUS);
214 					len = (len >> DWMCI_FIFO_SHIFT) &
215 						    DWMCI_FIFO_MASK;
216 					len = min(size, len);
217 					if (!stride) {
218 						/* Legacy pio mode */
219 						for (i = 0; i < len; i++)
220 							*buf++ = dwmci_readl(host, DWMCI_DATA);
221 						goto read_again;
222 					}
223 
224 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
225 					if (len < MAX_STRIDE)
226 						continue;
227 
228 					for (i = 0; i < len / MAX_STRIDE; i++) {
229 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
230 						buf += MAX_STRIDE;
231 					}
232 
233 					len = i * MAX_STRIDE;
234 read_again:
235 					size = size > len ? (size - len) : 0;
236 				}
237 				dwmci_writel(host, DWMCI_RINTSTS,
238 					     DWMCI_INTMSK_RXDR);
239 			} else if (data->flags == MMC_DATA_WRITE &&
240 				   (mask & DWMCI_INTMSK_TXDR)) {
241 				while (size) {
242 					len = dwmci_readl(host, DWMCI_STATUS);
243 					len = fifo_depth - ((len >>
244 						   DWMCI_FIFO_SHIFT) &
245 						   DWMCI_FIFO_MASK);
246 					len = min(size, len);
247 					if (!stride) {
248 						for (i = 0; i < len; i++)
249 							dwmci_writel(host, DWMCI_DATA,
250 								     *buf++);
251 						goto write_again;
252 					}
253 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
254 					if (len < MAX_STRIDE)
255 						continue;
256 
257 					for (i = 0; i < len / MAX_STRIDE; i++) {
258 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
259 						buf += MAX_STRIDE;
260 					}
261 
262 					len = i * MAX_STRIDE;
263 write_again:
264 					size = size > len ? (size - len) : 0;
265 				}
266 				dwmci_writel(host, DWMCI_RINTSTS,
267 					     DWMCI_INTMSK_TXDR);
268 			}
269 		}
270 
271 		/* Data arrived correctly. */
272 		if (mask & DWMCI_INTMSK_DTO) {
273 			ret = 0;
274 			break;
275 		}
276 
277 		/* Check for timeout. */
278 		if (get_timer(start) > timeout) {
279 			debug("%s: Timeout waiting for data!\n",
280 			      __func__);
281 			ret = -ETIMEDOUT;
282 			break;
283 		}
284 	}
285 
286 	dwmci_writel(host, DWMCI_RINTSTS, mask);
287 
288 	return ret;
289 }
290 
291 static int dwmci_set_transfer_mode(struct dwmci_host *host,
292 		struct mmc_data *data)
293 {
294 	unsigned long mode;
295 
296 	mode = DWMCI_CMD_DATA_EXP;
297 	if (data->flags & MMC_DATA_WRITE)
298 		mode |= DWMCI_CMD_RW;
299 
300 	return mode;
301 }
302 
303 #ifdef CONFIG_DM_MMC
304 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
305 		   struct mmc_data *data)
306 {
307 	struct mmc *mmc = mmc_get_mmc_dev(dev);
308 #else
309 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
310 		struct mmc_data *data)
311 {
312 #endif
313 	struct dwmci_host *host = mmc->priv;
314 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
315 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
316 	int ret = 0, flags = 0, i;
317 	unsigned int timeout = 500;
318 	u32 retry = 100000;
319 	u32 mask, ctrl;
320 	ulong start = get_timer(0);
321 	struct bounce_buffer bbstate;
322 
323 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
324 		if (get_timer(start) > timeout) {
325 			debug("%s: Timeout on data busy\n", __func__);
326 			return -ETIMEDOUT;
327 		}
328 	}
329 
330 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
331 
332 	if (data) {
333 		if (host->fifo_mode) {
334 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
335 			dwmci_writel(host, DWMCI_BYTCNT,
336 				     data->blocksize * data->blocks);
337 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
338 		} else {
339 			if (data->flags == MMC_DATA_READ) {
340 				bounce_buffer_start(&bbstate, (void*)data->dest,
341 						data->blocksize *
342 						data->blocks, GEN_BB_WRITE);
343 			} else {
344 				bounce_buffer_start(&bbstate, (void*)data->src,
345 						data->blocksize *
346 						data->blocks, GEN_BB_READ);
347 			}
348 			dwmci_prepare_data(host, data, cur_idmac,
349 					   bbstate.bounce_buffer);
350 		}
351 	}
352 
353 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
354 
355 	if (data)
356 		flags = dwmci_set_transfer_mode(host, data);
357 
358 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
359 		return -1;
360 
361 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
362 		flags |= DWMCI_CMD_ABORT_STOP;
363 	else
364 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
365 
366 	if (cmd->resp_type & MMC_RSP_PRESENT) {
367 		flags |= DWMCI_CMD_RESP_EXP;
368 		if (cmd->resp_type & MMC_RSP_136)
369 			flags |= DWMCI_CMD_RESP_LENGTH;
370 	}
371 
372 	if (cmd->resp_type & MMC_RSP_CRC)
373 		flags |= DWMCI_CMD_CHECK_CRC;
374 
375 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
376 
377 	debug("Sending CMD%d\n",cmd->cmdidx);
378 
379 	dwmci_writel(host, DWMCI_CMD, flags);
380 
381 	for (i = 0; i < retry; i++) {
382 		mask = dwmci_readl(host, DWMCI_RINTSTS);
383 		if (mask & DWMCI_INTMSK_CDONE) {
384 			if (!data)
385 				dwmci_writel(host, DWMCI_RINTSTS, mask);
386 			break;
387 		}
388 	}
389 
390 	if (i == retry) {
391 		debug("%s: Timeout.\n", __func__);
392 		return -ETIMEDOUT;
393 	}
394 
395 	if (mask & DWMCI_INTMSK_RTO) {
396 		/*
397 		 * Timeout here is not necessarily fatal. (e)MMC cards
398 		 * will splat here when they receive CMD55 as they do
399 		 * not support this command and that is exactly the way
400 		 * to tell them apart from SD cards. Thus, this output
401 		 * below shall be debug(). eMMC cards also do not favor
402 		 * CMD8, please keep that in mind.
403 		 */
404 		debug("%s: Response Timeout.\n", __func__);
405 		return -ETIMEDOUT;
406 	} else if (mask & DWMCI_INTMSK_RE) {
407 		debug("%s: Response Error.\n", __func__);
408 		return -EIO;
409 	}
410 
411 
412 	if (cmd->resp_type & MMC_RSP_PRESENT) {
413 		if (cmd->resp_type & MMC_RSP_136) {
414 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
415 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
416 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
417 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
418 		} else {
419 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
420 		}
421 	}
422 
423 	if (data) {
424 		ret = dwmci_data_transfer(host, data);
425 
426 		/* only dma mode need it */
427 		if (!host->fifo_mode) {
428 			ctrl = dwmci_readl(host, DWMCI_CTRL);
429 			ctrl &= ~(DWMCI_DMA_EN);
430 			dwmci_writel(host, DWMCI_CTRL, ctrl);
431 			bounce_buffer_stop(&bbstate);
432 		}
433 	}
434 
435 	udelay(100);
436 
437 	return ret;
438 }
439 
440 #ifdef CONFIG_SPL_BLK_READ_PREPARE
441 #ifdef CONFIG_DM_MMC
442 static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
443 				  struct mmc_data *data)
444 {
445 	struct mmc *mmc = mmc_get_mmc_dev(dev);
446 #else
447 static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
448 				  struct mmc_data *data)
449 {
450 #endif
451 	struct dwmci_host *host = mmc->priv;
452 	struct dwmci_idmac *cur_idmac;
453 	int ret = 0, flags = 0, i;
454 	unsigned int timeout = 500;
455 	u32 retry = 100000;
456 	u32 mask;
457 	ulong start = get_timer(0);
458 	struct bounce_buffer bbstate;
459 
460 	cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
461 			   sizeof(struct dwmci_idmac),
462 			   ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
463 	if (!cur_idmac)
464 		return -ENODATA;
465 
466 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
467 		if (get_timer(start) > timeout) {
468 			debug("%s: Timeout on data busy\n", __func__);
469 			return -ETIMEDOUT;
470 		}
471 	}
472 
473 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
474 
475 	if (data) {
476 		if (host->fifo_mode) {
477 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
478 			dwmci_writel(host, DWMCI_BYTCNT,
479 				     data->blocksize * data->blocks);
480 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
481 		} else {
482 			if (data->flags == MMC_DATA_READ) {
483 				bounce_buffer_start(&bbstate, (void *)data->dest,
484 						    data->blocksize *
485 						    data->blocks, GEN_BB_WRITE);
486 			} else {
487 				bounce_buffer_start(&bbstate, (void *)data->src,
488 						    data->blocksize *
489 						    data->blocks, GEN_BB_READ);
490 			}
491 			dwmci_prepare_data(host, data, cur_idmac,
492 					   bbstate.bounce_buffer);
493 		}
494 	}
495 
496 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
497 
498 	if (data)
499 		flags = dwmci_set_transfer_mode(host, data);
500 
501 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
502 		return -1;
503 
504 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
505 		flags |= DWMCI_CMD_ABORT_STOP;
506 	else
507 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
508 
509 	if (cmd->resp_type & MMC_RSP_PRESENT) {
510 		flags |= DWMCI_CMD_RESP_EXP;
511 		if (cmd->resp_type & MMC_RSP_136)
512 			flags |= DWMCI_CMD_RESP_LENGTH;
513 	}
514 
515 	if (cmd->resp_type & MMC_RSP_CRC)
516 		flags |= DWMCI_CMD_CHECK_CRC;
517 
518 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
519 
520 	debug("Sending CMD%d\n", cmd->cmdidx);
521 
522 	dwmci_writel(host, DWMCI_CMD, flags);
523 
524 	for (i = 0; i < retry; i++) {
525 		mask = dwmci_readl(host, DWMCI_RINTSTS);
526 		if (mask & DWMCI_INTMSK_CDONE) {
527 			if (!data)
528 				dwmci_writel(host, DWMCI_RINTSTS, mask);
529 			break;
530 		}
531 	}
532 
533 	if (i == retry) {
534 		debug("%s: Timeout.\n", __func__);
535 		return -ETIMEDOUT;
536 	}
537 
538 	if (mask & DWMCI_INTMSK_RTO) {
539 		/*
540 		 * Timeout here is not necessarily fatal. (e)MMC cards
541 		 * will splat here when they receive CMD55 as they do
542 		 * not support this command and that is exactly the way
543 		 * to tell them apart from SD cards. Thus, this output
544 		 * below shall be debug(). eMMC cards also do not favor
545 		 * CMD8, please keep that in mind.
546 		 */
547 		debug("%s: Response Timeout.\n", __func__);
548 		return -ETIMEDOUT;
549 	} else if (mask & DWMCI_INTMSK_RE) {
550 		debug("%s: Response Error.\n", __func__);
551 		return -EIO;
552 	}
553 
554 	if (cmd->resp_type & MMC_RSP_PRESENT) {
555 		if (cmd->resp_type & MMC_RSP_136) {
556 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
557 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
558 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
559 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
560 		} else {
561 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
562 		}
563 	}
564 
565 	return ret;
566 }
567 #endif
568 
569 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
570 {
571 	u32 div, status;
572 	int timeout = 10000;
573 	unsigned long sclk;
574 
575 	if (freq == 0)
576 		return 0;
577 	/*
578 	 * If host->get_mmc_clk isn't defined,
579 	 * then assume that host->bus_hz is source clock value.
580 	 * host->bus_hz should be set by user.
581 	 */
582 	if (host->get_mmc_clk)
583 		sclk = host->get_mmc_clk(host, freq);
584 	else if (host->bus_hz)
585 		sclk = host->bus_hz;
586 	else {
587 		debug("%s: Didn't get source clock value.\n", __func__);
588 		return -EINVAL;
589 	}
590 
591 	if (sclk == freq)
592 		div = 0;	/* bypass mode */
593 	else
594 		div = DIV_ROUND_UP(sclk, 2 * freq);
595 
596 	dwmci_writel(host, DWMCI_CLKENA, 0);
597 	dwmci_writel(host, DWMCI_CLKSRC, 0);
598 
599 	dwmci_writel(host, DWMCI_CLKDIV, div);
600 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
601 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
602 
603 	do {
604 		status = dwmci_readl(host, DWMCI_CMD);
605 		if (timeout-- < 0) {
606 			debug("%s: Timeout!\n", __func__);
607 			return -ETIMEDOUT;
608 		}
609 	} while (status & DWMCI_CMD_START);
610 
611 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
612 			DWMCI_CLKEN_LOW_PWR);
613 
614 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
615 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
616 
617 	timeout = 10000;
618 	do {
619 		status = dwmci_readl(host, DWMCI_CMD);
620 		if (timeout-- < 0) {
621 			debug("%s: Timeout!\n", __func__);
622 			return -ETIMEDOUT;
623 		}
624 	} while (status & DWMCI_CMD_START);
625 
626 	host->clock = freq;
627 
628 	return 0;
629 }
630 
631 #ifdef CONFIG_DM_MMC
632 static bool dwmci_card_busy(struct udevice *dev)
633 {
634 	struct mmc *mmc = mmc_get_mmc_dev(dev);
635 #else
636 static bool dwmci_card_busy(struct mmc *mmc)
637 {
638 #endif
639 	u32 status;
640 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
641 
642 	/*
643 	 * Check the busy bit which is low when DAT[3:0]
644 	 * (the data lines) are 0000
645 	 */
646 	status = dwmci_readl(host, DWMCI_STATUS);
647 
648 	return !!(status & DWMCI_BUSY);
649 }
650 
651 #ifdef CONFIG_DM_MMC
652 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
653 {
654 	struct mmc *mmc = mmc_get_mmc_dev(dev);
655 #else
656 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
657 {
658 #endif
659 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
660 
661 	if (!host->execute_tuning)
662 		return -EIO;
663 
664 	return host->execute_tuning(host, opcode);
665 }
666 
667 #ifdef CONFIG_DM_MMC
668 static int dwmci_set_ios(struct udevice *dev)
669 {
670 	struct mmc *mmc = mmc_get_mmc_dev(dev);
671 #else
672 static int dwmci_set_ios(struct mmc *mmc)
673 {
674 #endif
675 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
676 	u32 ctype, regs;
677 
678 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
679 
680 	dwmci_setup_bus(host, mmc->clock);
681 	switch (mmc->bus_width) {
682 	case 8:
683 		ctype = DWMCI_CTYPE_8BIT;
684 		break;
685 	case 4:
686 		ctype = DWMCI_CTYPE_4BIT;
687 		break;
688 	default:
689 		ctype = DWMCI_CTYPE_1BIT;
690 		break;
691 	}
692 
693 	dwmci_writel(host, DWMCI_CTYPE, ctype);
694 
695 	regs = dwmci_readl(host, DWMCI_UHS_REG);
696 	if (mmc_card_ddr(mmc))
697 		regs |= DWMCI_DDR_MODE;
698 	else
699 		regs &= ~DWMCI_DDR_MODE;
700 
701 	dwmci_writel(host, DWMCI_UHS_REG, regs);
702 
703 	if (host->clksel)
704 		host->clksel(host);
705 
706 	return 0;
707 }
708 
709 static int dwmci_init(struct mmc *mmc)
710 {
711 	struct dwmci_host *host = mmc->priv;
712 	uint32_t use_dma;
713 	uint32_t verid;
714 
715 	if (host->board_init)
716 		host->board_init(host);
717 #ifdef CONFIG_ARCH_ROCKCHIP
718 	if (host->dev_index == 0)
719 		dwmci_writel(host, DWMCI_PWREN, 1);
720 	else if (host->dev_index == 1)
721 		dwmci_writel(host, DWMCI_PWREN, 0);
722 	else
723 		dwmci_writel(host, DWMCI_PWREN, 1);
724 #else
725 	dwmci_writel(host, DWMCI_PWREN, 1);
726 #endif
727 
728 	verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
729 	if (verid >= DW_MMC_240A)
730 		dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
731 
732 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
733 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
734 		return -EIO;
735 	}
736 
737 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
738 	if (use_dma == DMA_INTERFACE_IDMA) {
739 		host->fifo_mode = 0;
740 	} else {
741 		host->fifo_mode = 1;
742 	}
743 
744 	/* Enumerate at 400KHz */
745 	dwmci_setup_bus(host, mmc->cfg->f_min);
746 
747 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
748 	dwmci_writel(host, DWMCI_INTMASK, 0);
749 
750 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
751 
752 	dwmci_writel(host, DWMCI_IDINTEN, 0);
753 	dwmci_writel(host, DWMCI_BMOD, 1);
754 
755 	if (!host->fifoth_val) {
756 		uint32_t fifo_size;
757 
758 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
759 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
760 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
761 				RX_WMARK(fifo_size / 2 - 1) |
762 				TX_WMARK(fifo_size / 2);
763 	}
764 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
765 
766 	dwmci_writel(host, DWMCI_CLKENA, 0);
767 	dwmci_writel(host, DWMCI_CLKSRC, 0);
768 
769 	return 0;
770 }
771 
772 static int dwmci_get_cd(struct udevice *dev)
773 {
774 	int ret = -1;
775 #ifndef CONFIG_SPL_BUILD
776 #ifdef CONFIG_DM_GPIO
777 	struct gpio_desc detect;
778 
779 	ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
780 	if (ret) {
781 		return ret;
782 	}
783 
784 	ret = !dm_gpio_get_value(&detect);
785 #endif
786 #endif
787 	return ret;
788 }
789 
790 #ifdef CONFIG_DM_MMC
791 int dwmci_probe(struct udevice *dev)
792 {
793 	struct mmc *mmc = mmc_get_mmc_dev(dev);
794 
795 	return dwmci_init(mmc);
796 }
797 
798 const struct dm_mmc_ops dm_dwmci_ops = {
799 	.card_busy	= dwmci_card_busy,
800 	.send_cmd	= dwmci_send_cmd,
801 #ifdef CONFIG_SPL_BLK_READ_PREPARE
802 	.send_cmd_prepare = dwmci_send_cmd_prepare,
803 #endif
804 	.set_ios	= dwmci_set_ios,
805 	.get_cd         = dwmci_get_cd,
806 	.execute_tuning	= dwmci_execute_tuning,
807 };
808 
809 #else
810 static const struct mmc_ops dwmci_ops = {
811 	.card_busy	= dwmci_card_busy,
812 	.send_cmd	= dwmci_send_cmd,
813 	.set_ios	= dwmci_set_ios,
814 	.get_cd         = dwmci_get_cd,
815 	.init		= dwmci_init,
816 	.execute_tuning	= dwmci_execute_tuning,
817 };
818 #endif
819 
820 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
821 		u32 max_clk, u32 min_clk)
822 {
823 	cfg->name = host->name;
824 #ifndef CONFIG_DM_MMC
825 	cfg->ops = &dwmci_ops;
826 #endif
827 	cfg->f_min = min_clk;
828 	cfg->f_max = max_clk;
829 
830 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
831 
832 	cfg->host_caps = host->caps;
833 
834 	if (host->buswidth == 8) {
835 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
836 	} else {
837 		cfg->host_caps |= MMC_MODE_4BIT;
838 		cfg->host_caps &= ~MMC_MODE_8BIT;
839 	}
840 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
841 
842 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
843 }
844 
845 #ifdef CONFIG_BLK
846 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
847 {
848 	return mmc_bind(dev, mmc, cfg);
849 }
850 #else
851 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
852 {
853 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
854 
855 	host->mmc = mmc_create(&host->cfg, host);
856 	if (host->mmc == NULL)
857 		return -1;
858 
859 	return 0;
860 }
861 #endif
862