xref: /OK3568_Linux_fs/u-boot/drivers/mmc/dw_mmc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <bouncebuf.h>
11 #include <div64.h>
12 #include <errno.h>
13 #include <malloc.h>
14 #include <memalign.h>
15 #include <mmc.h>
16 #include <dwmmc.h>
17 #ifdef CONFIG_DM_GPIO
18 #include <asm/gpio.h>
19 #include <asm-generic/gpio.h>
20 #endif
21 
22 #define PAGE_SIZE 4096
23 #define MSEC_PER_SEC	1000ULL
24 
25 /*
26  * Currently it supports read/write up to 8*8*4 Bytes per
27  * stride as a burst mode. Please note that if you change
28  * MAX_STRIDE, you should also update dwmci_memcpy_fromio
29  * to augment the groups of {ldm, stm}.
30  */
31 #define MAX_STRIDE 64
32 #if (CONFIG_ARM && CONFIG_CPU_V7 && !defined(CONFIG_MMC_SIMPLE))
dwmci_memcpy_fromio(void * buffer,void * fifo_addr)33 void noinline dwmci_memcpy_fromio(void *buffer, void *fifo_addr)
34 {
35 	__asm__ __volatile__ (
36 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
37 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
38 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
39 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
40 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
41 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
42 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
43 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
44 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
45 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
46 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
47 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
48 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
49 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
50 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
51 		"ldm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
52 		"stm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
53 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
54 		:::"memory"
55 	);
56 }
57 
dwmci_memcpy_toio(void * buffer,void * fifo_addr)58 void noinline dwmci_memcpy_toio(void *buffer, void *fifo_addr)
59 {
60 	__asm__ __volatile__ (
61 		"push {r2, r3, r4, r5, r6, r7, r8, r9}\n"
62 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
63 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
64 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
65 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
66 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
67 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
68 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
69 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
70 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
71 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
72 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
73 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
74 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
75 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
76 		"ldm r0!, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
77 		"stm r1, {r2,r3,r4,r5,r6,r7,r8,r9}\n"
78 		"pop {r2, r3, r4, r5, r6,r7,r8,r9}\n"
79 		:::"memory"
80 	);
81 }
82 #else
dwmci_memcpy_fromio(void * buffer,void * fifo_addr)83 void dwmci_memcpy_fromio(void *buffer, void *fifo_addr) {};
dwmci_memcpy_toio(void * buffer,void * fifo_addr)84 void dwmci_memcpy_toio(void *buffer, void *fifo_addr) {};
85 #endif
86 
dwmci_wait_reset(struct dwmci_host * host,u32 value)87 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
88 {
89 	unsigned long timeout = 1000;
90 	u32 ctrl;
91 
92 	dwmci_writel(host, DWMCI_CTRL, value);
93 
94 	while (timeout--) {
95 		ctrl = dwmci_readl(host, DWMCI_CTRL);
96 		if (!(ctrl & DWMCI_RESET_ALL))
97 			return 1;
98 	}
99 	return 0;
100 }
101 
dwmci_set_idma_desc(struct dwmci_idmac * idmac,u32 desc0,u32 desc1,u32 desc2)102 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
103 		u32 desc0, u32 desc1, u32 desc2)
104 {
105 	struct dwmci_idmac *desc = idmac;
106 
107 	desc->flags = desc0;
108 	desc->cnt = desc1;
109 	desc->addr = desc2;
110 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
111 }
112 
dwmci_prepare_data(struct dwmci_host * host,struct mmc_data * data,struct dwmci_idmac * cur_idmac,void * bounce_buffer)113 static void dwmci_prepare_data(struct dwmci_host *host,
114 			       struct mmc_data *data,
115 			       struct dwmci_idmac *cur_idmac,
116 			       void *bounce_buffer)
117 {
118 	unsigned long ctrl;
119 	unsigned int i = 0, flags, cnt, blk_cnt;
120 	ulong data_start, data_end;
121 
122 
123 	blk_cnt = data->blocks;
124 
125 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
126 
127 	data_start = (ulong)cur_idmac;
128 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
129 
130 	do {
131 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
132 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
133 		if (blk_cnt <= 8) {
134 			flags |= DWMCI_IDMAC_LD;
135 			cnt = data->blocksize * blk_cnt;
136 		} else
137 			cnt = data->blocksize * 8;
138 
139 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
140 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
141 
142 		if (blk_cnt <= 8)
143 			break;
144 		blk_cnt -= 8;
145 		cur_idmac++;
146 		i++;
147 	} while(1);
148 
149 	data_end = (ulong)cur_idmac;
150 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
151 
152 	ctrl = dwmci_readl(host, DWMCI_CTRL);
153 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
154 	dwmci_writel(host, DWMCI_CTRL, ctrl);
155 
156 	ctrl = dwmci_readl(host, DWMCI_BMOD);
157 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
158 	dwmci_writel(host, DWMCI_BMOD, ctrl);
159 
160 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
161 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
162 }
163 
164 #ifdef CONFIG_SPL_BUILD
dwmci_get_drto(struct dwmci_host * host,const unsigned int size)165 static unsigned int dwmci_get_drto(struct dwmci_host *host,
166 				   const unsigned int size)
167 {
168 	unsigned int drto_clks;
169 	unsigned int drto_div;
170 	unsigned int drto_ms;
171 
172 	drto_clks = dwmci_readl(host, DWMCI_TMOUT) >> 8;
173 	drto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
174 	if (drto_div == 0)
175 		drto_div = 1;
176 
177 	drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
178 				   host->mmc->clock);
179 
180 	/* add a bit spare time */
181 	drto_ms += 10;
182 
183 	return drto_ms;
184 }
185 #else
dwmci_get_drto(struct dwmci_host * host,const unsigned int size)186 static unsigned int dwmci_get_drto(struct dwmci_host *host,
187 				   const unsigned int size)
188 {
189 	unsigned int timeout;
190 
191 	timeout = size * 8;	/* counting in bits */
192 	timeout *= 10;		/* wait 10 times as long */
193 	timeout /= host->mmc->clock;
194 	timeout /= host->mmc->bus_width;
195 	timeout *= 1000;	/* counting in msec */
196 	timeout = (timeout < 10000) ? 10000 : timeout;
197 
198 	return timeout;
199 }
200 #endif
201 
dwmci_get_cto(struct dwmci_host * host)202 static unsigned int dwmci_get_cto(struct dwmci_host *host)
203 {
204 	unsigned int cto_clks;
205 	unsigned int cto_div;
206 	unsigned int cto_ms;
207 
208 	cto_clks = dwmci_readl(host, DWMCI_TMOUT) & 0xff;
209 	cto_div = (dwmci_readl(host, DWMCI_CLKDIV) & 0xff) * 2;
210 	if (cto_div == 0)
211 		cto_div = 1;
212 
213 	cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
214 				  host->mmc->clock);
215 
216 	/* add a bit spare time */
217 	cto_ms += 10;
218 
219 	return cto_ms;
220 }
221 
dwmci_data_transfer(struct dwmci_host * host,struct mmc_data * data)222 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
223 {
224 	int ret = 0;
225 	int reset_timeout = 100;
226 	u32 timeout, status, ctrl, mask, size, i, len = 0;
227 	u32 *buf = NULL;
228 	ulong start = get_timer(0);
229 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
230 			    RX_WMARK_SHIFT) + 1) * 2;
231 	bool stride;
232 
233 	size = data->blocksize * data->blocks;
234 	/* Still use legacy PIO mode if size < 512(128 * 4) Bytes */
235 	stride = host->stride_pio && size > 128;
236 	if (data->flags == MMC_DATA_READ)
237 		buf = (unsigned int *)data->dest;
238 	else
239 		buf = (unsigned int *)data->src;
240 
241 	timeout = dwmci_get_drto(host, size);
242 	size /= 4;
243 
244 	for (;;) {
245 		mask = dwmci_readl(host, DWMCI_RINTSTS);
246 		/* Error during data transfer. */
247 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
248 			debug("%s: DATA ERROR!\n", __func__);
249 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
250 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
251 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
252 
253 			do {
254 				status = dwmci_readl(host, DWMCI_CMD);
255 				if (reset_timeout-- < 0)
256 					break;
257 				udelay(100);
258 			} while (status & DWMCI_CMD_START);
259 
260 			if (!host->fifo_mode) {
261 				ctrl = dwmci_readl(host, DWMCI_BMOD);
262 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
263 				dwmci_writel(host, DWMCI_BMOD, ctrl);
264 			}
265 
266 			ret = -EINVAL;
267 			break;
268 		}
269 
270 		if (host->fifo_mode && size) {
271 			len = 0;
272 			if (data->flags == MMC_DATA_READ &&
273 			    (mask & DWMCI_INTMSK_RXDR)) {
274 				while (size) {
275 					len = dwmci_readl(host, DWMCI_STATUS);
276 					len = (len >> DWMCI_FIFO_SHIFT) &
277 						    DWMCI_FIFO_MASK;
278 					len = min(size, len);
279 					if (!stride) {
280 						/* Legacy pio mode */
281 						for (i = 0; i < len; i++)
282 							*buf++ = dwmci_readl(host, DWMCI_DATA);
283 						goto read_again;
284 					}
285 
286 					/* dwmci_memcpy_fromio now bursts 256 Bytes once */
287 					if (len < MAX_STRIDE)
288 						continue;
289 
290 					for (i = 0; i < len / MAX_STRIDE; i++) {
291 						dwmci_memcpy_fromio(buf, host->ioaddr + DWMCI_DATA);
292 						buf += MAX_STRIDE;
293 					}
294 
295 					len = i * MAX_STRIDE;
296 read_again:
297 					size = size > len ? (size - len) : 0;
298 				}
299 				dwmci_writel(host, DWMCI_RINTSTS,
300 					     DWMCI_INTMSK_RXDR);
301 				start = get_timer(0);
302 			} else if (data->flags == MMC_DATA_WRITE &&
303 				   (mask & DWMCI_INTMSK_TXDR)) {
304 				while (size) {
305 					len = dwmci_readl(host, DWMCI_STATUS);
306 					len = fifo_depth - ((len >>
307 						   DWMCI_FIFO_SHIFT) &
308 						   DWMCI_FIFO_MASK);
309 					len = min(size, len);
310 					if (!stride) {
311 						for (i = 0; i < len; i++)
312 							dwmci_writel(host, DWMCI_DATA,
313 								     *buf++);
314 						goto write_again;
315 					}
316 					/* dwmci_memcpy_toio now bursts 256 Bytes once */
317 					if (len < MAX_STRIDE)
318 						continue;
319 
320 					for (i = 0; i < len / MAX_STRIDE; i++) {
321 						dwmci_memcpy_toio(buf, host->ioaddr + DWMCI_DATA);
322 						buf += MAX_STRIDE;
323 					}
324 
325 					len = i * MAX_STRIDE;
326 write_again:
327 					size = size > len ? (size - len) : 0;
328 				}
329 				dwmci_writel(host, DWMCI_RINTSTS,
330 					     DWMCI_INTMSK_TXDR);
331 				start = get_timer(0);
332 			}
333 		}
334 
335 		/* Data arrived correctly. */
336 		if (mask & DWMCI_INTMSK_DTO) {
337 			ret = 0;
338 			break;
339 		}
340 
341 		/* Check for timeout. */
342 		if (get_timer(start) > timeout) {
343 			debug("%s: Timeout waiting for data!\n",
344 			      __func__);
345 			ret = -ETIMEDOUT;
346 			break;
347 		}
348 	}
349 
350 	dwmci_writel(host, DWMCI_RINTSTS, mask);
351 
352 	return ret;
353 }
354 
dwmci_set_transfer_mode(struct dwmci_host * host,struct mmc_data * data)355 static int dwmci_set_transfer_mode(struct dwmci_host *host,
356 		struct mmc_data *data)
357 {
358 	unsigned long mode;
359 
360 	mode = DWMCI_CMD_DATA_EXP;
361 	if (data->flags & MMC_DATA_WRITE)
362 		mode |= DWMCI_CMD_RW;
363 
364 	return mode;
365 }
366 
367 #ifdef CONFIG_DM_MMC
dwmci_send_cmd(struct udevice * dev,struct mmc_cmd * cmd,struct mmc_data * data)368 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
369 		   struct mmc_data *data)
370 {
371 	struct mmc *mmc = mmc_get_mmc_dev(dev);
372 #else
373 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
374 		struct mmc_data *data)
375 {
376 #endif
377 	struct dwmci_host *host = mmc->priv;
378 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
379 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
380 	int ret = 0, flags = 0;
381 	unsigned int timeout = 500;
382 	u32 mask, ctrl;
383 	ulong start = get_timer(0);
384 	struct bounce_buffer bbstate;
385 
386 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
387 		if (get_timer(start) > timeout) {
388 			debug("%s: Timeout on data busy\n", __func__);
389 			return -ETIMEDOUT;
390 		}
391 	}
392 
393 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
394 
395 	if (data) {
396 		if (host->fifo_mode) {
397 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
398 			dwmci_writel(host, DWMCI_BYTCNT,
399 				     data->blocksize * data->blocks);
400 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
401 		} else {
402 			if (data->flags == MMC_DATA_READ) {
403 				ret = bounce_buffer_start(&bbstate,
404 						(void*)data->dest,
405 						data->blocksize *
406 						data->blocks, GEN_BB_WRITE);
407 			} else {
408 				ret = bounce_buffer_start(&bbstate,
409 						(void*)data->src,
410 						data->blocksize *
411 						data->blocks, GEN_BB_READ);
412 			}
413 
414 			if (ret)
415 				return ret;
416 
417 			dwmci_prepare_data(host, data, cur_idmac,
418 					   bbstate.bounce_buffer);
419 		}
420 	}
421 
422 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
423 
424 	if (data)
425 		flags = dwmci_set_transfer_mode(host, data);
426 
427 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
428 		return -1;
429 
430 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
431 		flags |= DWMCI_CMD_ABORT_STOP;
432 	else
433 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
434 
435 	if (cmd->resp_type & MMC_RSP_PRESENT) {
436 		flags |= DWMCI_CMD_RESP_EXP;
437 		if (cmd->resp_type & MMC_RSP_136)
438 			flags |= DWMCI_CMD_RESP_LENGTH;
439 	}
440 
441 	if (cmd->resp_type & MMC_RSP_CRC)
442 		flags |= DWMCI_CMD_CHECK_CRC;
443 
444 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
445 
446 	debug("Sending CMD%d\n",cmd->cmdidx);
447 
448 	dwmci_writel(host, DWMCI_CMD, flags);
449 
450 	timeout = dwmci_get_cto(host);
451 	start = get_timer(0);
452 	do {
453 		mask = dwmci_readl(host, DWMCI_RINTSTS);
454 		if (mask & DWMCI_INTMSK_CDONE) {
455 			if (!data)
456 				dwmci_writel(host, DWMCI_RINTSTS, mask);
457 			break;
458 		}
459 	} while (!(get_timer(start) > timeout));
460 
461 	if (get_timer(start) > timeout) {
462 		debug("%s: Timeout.\n", __func__);
463 		return -ETIMEDOUT;
464 	}
465 
466 	if (mask & DWMCI_INTMSK_RTO) {
467 		/*
468 		 * Timeout here is not necessarily fatal. (e)MMC cards
469 		 * will splat here when they receive CMD55 as they do
470 		 * not support this command and that is exactly the way
471 		 * to tell them apart from SD cards. Thus, this output
472 		 * below shall be debug(). eMMC cards also do not favor
473 		 * CMD8, please keep that in mind.
474 		 */
475 		debug("%s: Response Timeout.\n", __func__);
476 		return -ETIMEDOUT;
477 	} else if (mask & DWMCI_INTMSK_RE) {
478 		debug("%s: Response Error.\n", __func__);
479 		return -EIO;
480 	}
481 
482 
483 	if (cmd->resp_type & MMC_RSP_PRESENT) {
484 		if (cmd->resp_type & MMC_RSP_136) {
485 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
486 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
487 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
488 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
489 		} else {
490 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
491 		}
492 	}
493 
494 	if (data) {
495 		ret = dwmci_data_transfer(host, data);
496 
497 		/* only dma mode need it */
498 		if (!host->fifo_mode) {
499 			ctrl = dwmci_readl(host, DWMCI_CTRL);
500 			ctrl &= ~(DWMCI_DMA_EN);
501 			dwmci_writel(host, DWMCI_CTRL, ctrl);
502 			bounce_buffer_stop(&bbstate);
503 		}
504 	}
505 
506 	return ret;
507 }
508 
509 #ifdef CONFIG_SPL_BLK_READ_PREPARE
510 #ifdef CONFIG_DM_MMC
511 static int dwmci_send_cmd_prepare(struct udevice *dev, struct mmc_cmd *cmd,
512 				  struct mmc_data *data)
513 {
514 	struct mmc *mmc = mmc_get_mmc_dev(dev);
515 #else
516 static int dwmci_send_cmd_prepare(struct mmc *mmc, struct mmc_cmd *cmd,
517 				  struct mmc_data *data)
518 {
519 #endif
520 	struct dwmci_host *host = mmc->priv;
521 	struct dwmci_idmac *cur_idmac;
522 	int ret = 0, flags = 0;
523 	unsigned int timeout = 500;
524 	u32 mask;
525 	ulong start = get_timer(0);
526 	struct bounce_buffer bbstate;
527 
528 	cur_idmac = malloc(ROUND(DIV_ROUND_UP(data->blocks, 8) *
529 			   sizeof(struct dwmci_idmac),
530 			   ARCH_DMA_MINALIGN) + ARCH_DMA_MINALIGN - 1);
531 	if (!cur_idmac)
532 		return -ENODATA;
533 
534 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
535 		if (get_timer(start) > timeout) {
536 			debug("%s: Timeout on data busy\n", __func__);
537 			return -ETIMEDOUT;
538 		}
539 	}
540 
541 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
542 
543 	if (data) {
544 		if (host->fifo_mode) {
545 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
546 			dwmci_writel(host, DWMCI_BYTCNT,
547 				     data->blocksize * data->blocks);
548 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
549 		} else {
550 			if (data->flags == MMC_DATA_READ) {
551 				bounce_buffer_start(&bbstate, (void *)data->dest,
552 						    data->blocksize *
553 						    data->blocks, GEN_BB_WRITE);
554 			} else {
555 				bounce_buffer_start(&bbstate, (void *)data->src,
556 						    data->blocksize *
557 						    data->blocks, GEN_BB_READ);
558 			}
559 			dwmci_prepare_data(host, data, cur_idmac,
560 					   bbstate.bounce_buffer);
561 		}
562 	}
563 
564 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
565 
566 	if (data)
567 		flags = dwmci_set_transfer_mode(host, data);
568 
569 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
570 		return -1;
571 
572 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
573 		flags |= DWMCI_CMD_ABORT_STOP;
574 	else
575 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
576 
577 	if (cmd->resp_type & MMC_RSP_PRESENT) {
578 		flags |= DWMCI_CMD_RESP_EXP;
579 		if (cmd->resp_type & MMC_RSP_136)
580 			flags |= DWMCI_CMD_RESP_LENGTH;
581 	}
582 
583 	if (cmd->resp_type & MMC_RSP_CRC)
584 		flags |= DWMCI_CMD_CHECK_CRC;
585 
586 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
587 
588 	debug("Sending CMD%d\n", cmd->cmdidx);
589 
590 	dwmci_writel(host, DWMCI_CMD, flags);
591 
592 	timeout = dwmci_get_cto(host);
593 	start = get_timer(0);
594 	do {
595 		mask = dwmci_readl(host, DWMCI_RINTSTS);
596 		if (mask & DWMCI_INTMSK_CDONE) {
597 			if (!data)
598 				dwmci_writel(host, DWMCI_RINTSTS, mask);
599 			break;
600 		}
601 	} while (!(get_timer(start) > timeout));
602 
603 	if (get_timer(start) > timeout) {
604 		debug("%s: Timeout.\n", __func__);
605 		return -ETIMEDOUT;
606 	}
607 
608 	if (mask & DWMCI_INTMSK_RTO) {
609 		/*
610 		 * Timeout here is not necessarily fatal. (e)MMC cards
611 		 * will splat here when they receive CMD55 as they do
612 		 * not support this command and that is exactly the way
613 		 * to tell them apart from SD cards. Thus, this output
614 		 * below shall be debug(). eMMC cards also do not favor
615 		 * CMD8, please keep that in mind.
616 		 */
617 		debug("%s: Response Timeout.\n", __func__);
618 		return -ETIMEDOUT;
619 	} else if (mask & DWMCI_INTMSK_RE) {
620 		debug("%s: Response Error.\n", __func__);
621 		return -EIO;
622 	}
623 
624 	if (cmd->resp_type & MMC_RSP_PRESENT) {
625 		if (cmd->resp_type & MMC_RSP_136) {
626 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
627 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
628 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
629 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
630 		} else {
631 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
632 		}
633 	}
634 
635 	return ret;
636 }
637 #endif
638 
639 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
640 {
641 	u32 div, status;
642 	int timeout = 10000;
643 	unsigned long sclk;
644 
645 	if (freq == 0)
646 		return 0;
647 	/*
648 	 * If host->get_mmc_clk isn't defined,
649 	 * then assume that host->bus_hz is source clock value.
650 	 * host->bus_hz should be set by user.
651 	 */
652 	if (host->get_mmc_clk)
653 		sclk = host->get_mmc_clk(host, freq);
654 	else if (host->bus_hz)
655 		sclk = host->bus_hz;
656 	else {
657 		debug("%s: Didn't get source clock value.\n", __func__);
658 		return -EINVAL;
659 	}
660 
661 	if (sclk == 0)
662 		return -EINVAL;
663 
664 	if (sclk == freq)
665 		div = 0;	/* bypass mode */
666 	else
667 		div = DIV_ROUND_UP(sclk, 2 * freq);
668 
669 	dwmci_writel(host, DWMCI_CLKENA, 0);
670 	dwmci_writel(host, DWMCI_CLKSRC, 0);
671 
672 	dwmci_writel(host, DWMCI_CLKDIV, div);
673 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
674 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
675 
676 	do {
677 		status = dwmci_readl(host, DWMCI_CMD);
678 		if (timeout-- < 0) {
679 			debug("%s: Timeout!\n", __func__);
680 			return -ETIMEDOUT;
681 		}
682 	} while (status & DWMCI_CMD_START);
683 
684 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
685 			DWMCI_CLKEN_LOW_PWR);
686 
687 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
688 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
689 
690 	timeout = 10000;
691 	do {
692 		status = dwmci_readl(host, DWMCI_CMD);
693 		if (timeout-- < 0) {
694 			debug("%s: Timeout!\n", __func__);
695 			return -ETIMEDOUT;
696 		}
697 	} while (status & DWMCI_CMD_START);
698 
699 	host->clock = freq;
700 
701 	return 0;
702 }
703 
704 #ifdef CONFIG_DM_MMC
705 static bool dwmci_card_busy(struct udevice *dev)
706 {
707 	struct mmc *mmc = mmc_get_mmc_dev(dev);
708 #else
709 static bool dwmci_card_busy(struct mmc *mmc)
710 {
711 #endif
712 	u32 status;
713 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
714 
715 	/*
716 	 * Check the busy bit which is low when DAT[3:0]
717 	 * (the data lines) are 0000
718 	 */
719 	status = dwmci_readl(host, DWMCI_STATUS);
720 
721 	return !!(status & DWMCI_BUSY);
722 }
723 
724 #ifdef CONFIG_DM_MMC
725 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
726 {
727 	struct mmc *mmc = mmc_get_mmc_dev(dev);
728 #else
729 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
730 {
731 #endif
732 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
733 
734 	if (!host->execute_tuning)
735 		return -EIO;
736 
737 	return host->execute_tuning(host, opcode);
738 }
739 
740 #ifdef CONFIG_DM_MMC
741 static int dwmci_set_ios(struct udevice *dev)
742 {
743 	struct mmc *mmc = mmc_get_mmc_dev(dev);
744 #else
745 static int dwmci_set_ios(struct mmc *mmc)
746 {
747 #endif
748 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
749 	u32 ctype, regs;
750 
751 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
752 
753 	dwmci_setup_bus(host, mmc->clock);
754 	switch (mmc->bus_width) {
755 	case 8:
756 		ctype = DWMCI_CTYPE_8BIT;
757 		break;
758 	case 4:
759 		ctype = DWMCI_CTYPE_4BIT;
760 		break;
761 	default:
762 		ctype = DWMCI_CTYPE_1BIT;
763 		break;
764 	}
765 
766 	dwmci_writel(host, DWMCI_CTYPE, ctype);
767 
768 	regs = dwmci_readl(host, DWMCI_UHS_REG);
769 	if (mmc_card_ddr(mmc))
770 		regs |= DWMCI_DDR_MODE;
771 	else
772 		regs &= ~DWMCI_DDR_MODE;
773 
774 	dwmci_writel(host, DWMCI_UHS_REG, regs);
775 
776 	if (host->clksel)
777 		host->clksel(host);
778 
779 	return 0;
780 }
781 
782 static int dwmci_init(struct mmc *mmc)
783 {
784 	struct dwmci_host *host = mmc->priv;
785 	uint32_t use_dma;
786 	uint32_t verid;
787 
788 	if (host->board_init)
789 		host->board_init(host);
790 #ifdef CONFIG_ARCH_ROCKCHIP
791 	if (host->dev_index == 0)
792 		dwmci_writel(host, DWMCI_PWREN, 1);
793 	else if (host->dev_index == 1)
794 		dwmci_writel(host, DWMCI_PWREN, 0);
795 	else
796 		dwmci_writel(host, DWMCI_PWREN, 1);
797 #else
798 	dwmci_writel(host, DWMCI_PWREN, 1);
799 #endif
800 
801 	verid = dwmci_readl(host, DWMCI_VERID) & 0x0000ffff;
802 	if (verid >= DW_MMC_240A)
803 		dwmci_writel(host, DWMCI_CARDTHRCTL, DWMCI_CDTHRCTRL_CONFIG);
804 
805 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
806 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
807 		return -EIO;
808 	}
809 
810 	use_dma = SDMMC_GET_TRANS_MODE(dwmci_readl(host, DWMCI_HCON));
811 	if (use_dma == DMA_INTERFACE_IDMA) {
812 		host->fifo_mode = 0;
813 	} else {
814 		host->fifo_mode = 1;
815 	}
816 
817 	/* Enumerate at 400KHz */
818 	dwmci_setup_bus(host, mmc->cfg->f_min);
819 
820 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
821 	dwmci_writel(host, DWMCI_INTMASK, 0);
822 
823 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
824 
825 	dwmci_writel(host, DWMCI_IDINTEN, 0);
826 	dwmci_writel(host, DWMCI_BMOD, 1);
827 
828 	if (!host->fifoth_val) {
829 		uint32_t fifo_size;
830 
831 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
832 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
833 		host->fifoth_val = MSIZE(DWMCI_MSIZE) |
834 				RX_WMARK(fifo_size / 2 - 1) |
835 				TX_WMARK(fifo_size / 2);
836 	}
837 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
838 
839 	dwmci_writel(host, DWMCI_CLKENA, 0);
840 	dwmci_writel(host, DWMCI_CLKSRC, 0);
841 
842 	return 0;
843 }
844 
845 static int dwmci_get_cd(struct udevice *dev)
846 {
847 	int ret = -1;
848 
849 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
850 	struct gpio_desc detect;
851 
852 	ret = gpio_request_by_name(dev, "cd-gpios", 0, &detect, GPIOD_IS_IN);
853 	if (ret) {
854 		return ret;
855 	}
856 
857 	ret = !dm_gpio_get_value(&detect);
858 	dm_gpio_free(dev, &detect);
859 #endif
860 	return ret;
861 }
862 
863 #ifdef CONFIG_DM_MMC
864 int dwmci_probe(struct udevice *dev)
865 {
866 	struct mmc *mmc = mmc_get_mmc_dev(dev);
867 
868 	return dwmci_init(mmc);
869 }
870 
871 const struct dm_mmc_ops dm_dwmci_ops = {
872 	.card_busy	= dwmci_card_busy,
873 	.send_cmd	= dwmci_send_cmd,
874 #ifdef CONFIG_SPL_BLK_READ_PREPARE
875 	.send_cmd_prepare = dwmci_send_cmd_prepare,
876 #endif
877 	.set_ios	= dwmci_set_ios,
878 	.get_cd         = dwmci_get_cd,
879 	.execute_tuning	= dwmci_execute_tuning,
880 };
881 
882 #else
883 static const struct mmc_ops dwmci_ops = {
884 	.card_busy	= dwmci_card_busy,
885 	.send_cmd	= dwmci_send_cmd,
886 	.set_ios	= dwmci_set_ios,
887 	.get_cd         = dwmci_get_cd,
888 	.init		= dwmci_init,
889 	.execute_tuning	= dwmci_execute_tuning,
890 };
891 #endif
892 
893 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
894 		u32 max_clk, u32 min_clk)
895 {
896 	cfg->name = host->name;
897 #ifndef CONFIG_DM_MMC
898 	cfg->ops = &dwmci_ops;
899 #endif
900 	cfg->f_min = min_clk;
901 	cfg->f_max = max_clk;
902 
903 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
904 
905 	cfg->host_caps = host->caps;
906 
907 	switch (host->buswidth) {
908 	case 8:
909 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
910 		break;
911 	case 4:
912 		cfg->host_caps |= MMC_MODE_4BIT;
913 		cfg->host_caps &= ~MMC_MODE_8BIT;
914 		break;
915 	case 1:
916 		cfg->host_caps &= ~MMC_MODE_4BIT;
917 		cfg->host_caps &= ~MMC_MODE_8BIT;
918 		break;
919 	default:
920 		printf("Unsupported bus width: %d\n", host->buswidth);
921 		break;
922 	}
923 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
924 
925 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
926 }
927 
928 #ifdef CONFIG_BLK
929 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
930 {
931 	return mmc_bind(dev, mmc, cfg);
932 }
933 #else
934 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
935 {
936 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
937 
938 	host->mmc = mmc_create(&host->cfg, host);
939 	if (host->mmc == NULL)
940 		return -1;
941 
942 	return 0;
943 }
944 #endif
945