xref: /rk3399_rockchip-uboot/drivers/mmc/dw_mmc.c (revision f9d5dc255d0cd7307a62fd3ec2ccbe621ea531da)
1 /*
2  * (C) Copyright 2012 SAMSUNG Electronics
3  * Jaehoon Chung <jh80.chung@samsung.com>
4  * Rajeshawari Shinde <rajeshwari.s@samsung.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <bouncebuf.h>
10 #include <common.h>
11 #include <errno.h>
12 #include <malloc.h>
13 #include <memalign.h>
14 #include <mmc.h>
15 #include <dwmmc.h>
16 
17 #define PAGE_SIZE 4096
18 
19 static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
20 {
21 	unsigned long timeout = 1000;
22 	u32 ctrl;
23 
24 	dwmci_writel(host, DWMCI_CTRL, value);
25 
26 	while (timeout--) {
27 		ctrl = dwmci_readl(host, DWMCI_CTRL);
28 		if (!(ctrl & DWMCI_RESET_ALL))
29 			return 1;
30 	}
31 	return 0;
32 }
33 
34 static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
35 		u32 desc0, u32 desc1, u32 desc2)
36 {
37 	struct dwmci_idmac *desc = idmac;
38 
39 	desc->flags = desc0;
40 	desc->cnt = desc1;
41 	desc->addr = desc2;
42 	desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
43 }
44 
45 static void dwmci_prepare_data(struct dwmci_host *host,
46 			       struct mmc_data *data,
47 			       struct dwmci_idmac *cur_idmac,
48 			       void *bounce_buffer)
49 {
50 	unsigned long ctrl;
51 	unsigned int i = 0, flags, cnt, blk_cnt;
52 	ulong data_start, data_end;
53 
54 
55 	blk_cnt = data->blocks;
56 
57 	dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
58 
59 	data_start = (ulong)cur_idmac;
60 	dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
61 
62 	do {
63 		flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
64 		flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
65 		if (blk_cnt <= 8) {
66 			flags |= DWMCI_IDMAC_LD;
67 			cnt = data->blocksize * blk_cnt;
68 		} else
69 			cnt = data->blocksize * 8;
70 
71 		dwmci_set_idma_desc(cur_idmac, flags, cnt,
72 				    (ulong)bounce_buffer + (i * PAGE_SIZE));
73 
74 		if (blk_cnt <= 8)
75 			break;
76 		blk_cnt -= 8;
77 		cur_idmac++;
78 		i++;
79 	} while(1);
80 
81 	data_end = (ulong)cur_idmac;
82 	flush_dcache_range(data_start, data_end + ARCH_DMA_MINALIGN);
83 
84 	ctrl = dwmci_readl(host, DWMCI_CTRL);
85 	ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
86 	dwmci_writel(host, DWMCI_CTRL, ctrl);
87 
88 	ctrl = dwmci_readl(host, DWMCI_BMOD);
89 	ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
90 	dwmci_writel(host, DWMCI_BMOD, ctrl);
91 
92 	dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
93 	dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
94 }
95 
96 static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
97 {
98 	int ret = 0;
99 	u32 timeout = 240000;
100 	u32 status, ctrl, mask, size, i, len = 0;
101 	u32 *buf = NULL;
102 	ulong start = get_timer(0);
103 	u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
104 			    RX_WMARK_SHIFT) + 1) * 2;
105 
106 	size = data->blocksize * data->blocks / 4;
107 	if (data->flags == MMC_DATA_READ)
108 		buf = (unsigned int *)data->dest;
109 	else
110 		buf = (unsigned int *)data->src;
111 
112 	for (;;) {
113 		mask = dwmci_readl(host, DWMCI_RINTSTS);
114 		/* Error during data transfer. */
115 		if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
116 			debug("%s: DATA ERROR!\n", __func__);
117 
118 			dwmci_wait_reset(host, DWMCI_RESET_ALL);
119 			dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
120 				     DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
121 
122 			do {
123 				status = dwmci_readl(host, DWMCI_CMD);
124 				if (timeout-- < 0)
125 					ret = -ETIMEDOUT;
126 			} while (status & DWMCI_CMD_START);
127 
128 			if (!host->fifo_mode) {
129 				ctrl = dwmci_readl(host, DWMCI_BMOD);
130 				ctrl |= DWMCI_BMOD_IDMAC_RESET;
131 				dwmci_writel(host, DWMCI_BMOD, ctrl);
132 			}
133 
134 			ret = -EINVAL;
135 			break;
136 		}
137 
138 		if (host->fifo_mode && size) {
139 			len = 0;
140 			if (data->flags == MMC_DATA_READ &&
141 			    (mask & DWMCI_INTMSK_RXDR)) {
142 				while (size) {
143 					len = dwmci_readl(host, DWMCI_STATUS);
144 					len = (len >> DWMCI_FIFO_SHIFT) &
145 						    DWMCI_FIFO_MASK;
146 					len = min(size, len);
147 					for (i = 0; i < len; i++)
148 						*buf++ =
149 						dwmci_readl(host, DWMCI_DATA);
150 					size = size > len ? (size - len) : 0;
151 				}
152 				dwmci_writel(host, DWMCI_RINTSTS,
153 					     DWMCI_INTMSK_RXDR);
154 			} else if (data->flags == MMC_DATA_WRITE &&
155 				   (mask & DWMCI_INTMSK_TXDR)) {
156 				while (size) {
157 					len = dwmci_readl(host, DWMCI_STATUS);
158 					len = fifo_depth - ((len >>
159 						   DWMCI_FIFO_SHIFT) &
160 						   DWMCI_FIFO_MASK);
161 					len = min(size, len);
162 					for (i = 0; i < len; i++)
163 						dwmci_writel(host, DWMCI_DATA,
164 							     *buf++);
165 					size = size > len ? (size - len) : 0;
166 				}
167 				dwmci_writel(host, DWMCI_RINTSTS,
168 					     DWMCI_INTMSK_TXDR);
169 			}
170 		}
171 
172 		/* Data arrived correctly. */
173 		if (mask & DWMCI_INTMSK_DTO) {
174 			ret = 0;
175 			break;
176 		}
177 
178 		/* Check for timeout. */
179 		if (get_timer(start) > timeout) {
180 			debug("%s: Timeout waiting for data!\n",
181 			      __func__);
182 			ret = -ETIMEDOUT;
183 			break;
184 		}
185 	}
186 
187 	dwmci_writel(host, DWMCI_RINTSTS, mask);
188 
189 	return ret;
190 }
191 
192 static int dwmci_set_transfer_mode(struct dwmci_host *host,
193 		struct mmc_data *data)
194 {
195 	unsigned long mode;
196 
197 	mode = DWMCI_CMD_DATA_EXP;
198 	if (data->flags & MMC_DATA_WRITE)
199 		mode |= DWMCI_CMD_RW;
200 
201 	return mode;
202 }
203 
204 #ifdef CONFIG_DM_MMC
205 static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
206 		   struct mmc_data *data)
207 {
208 	struct mmc *mmc = mmc_get_mmc_dev(dev);
209 #else
210 static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
211 		struct mmc_data *data)
212 {
213 #endif
214 	struct dwmci_host *host = mmc->priv;
215 	ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
216 				 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
217 	int ret = 0, flags = 0, i;
218 	unsigned int timeout = 500;
219 	u32 retry = 100000;
220 	u32 mask, ctrl;
221 	ulong start = get_timer(0);
222 	struct bounce_buffer bbstate;
223 
224 	while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
225 		if (get_timer(start) > timeout) {
226 			debug("%s: Timeout on data busy\n", __func__);
227 			return -ETIMEDOUT;
228 		}
229 	}
230 
231 	dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
232 
233 	if (data) {
234 		if (host->fifo_mode) {
235 			dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
236 			dwmci_writel(host, DWMCI_BYTCNT,
237 				     data->blocksize * data->blocks);
238 			dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
239 		} else {
240 			if (data->flags == MMC_DATA_READ) {
241 				bounce_buffer_start(&bbstate, (void*)data->dest,
242 						data->blocksize *
243 						data->blocks, GEN_BB_WRITE);
244 			} else {
245 				bounce_buffer_start(&bbstate, (void*)data->src,
246 						data->blocksize *
247 						data->blocks, GEN_BB_READ);
248 			}
249 			dwmci_prepare_data(host, data, cur_idmac,
250 					   bbstate.bounce_buffer);
251 		}
252 	}
253 
254 	dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
255 
256 	if (data)
257 		flags = dwmci_set_transfer_mode(host, data);
258 
259 	if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
260 		return -1;
261 
262 	if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
263 		flags |= DWMCI_CMD_ABORT_STOP;
264 	else
265 		flags |= DWMCI_CMD_PRV_DAT_WAIT;
266 
267 	if (cmd->resp_type & MMC_RSP_PRESENT) {
268 		flags |= DWMCI_CMD_RESP_EXP;
269 		if (cmd->resp_type & MMC_RSP_136)
270 			flags |= DWMCI_CMD_RESP_LENGTH;
271 	}
272 
273 	if (cmd->resp_type & MMC_RSP_CRC)
274 		flags |= DWMCI_CMD_CHECK_CRC;
275 
276 	flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
277 
278 	debug("Sending CMD%d\n",cmd->cmdidx);
279 
280 	dwmci_writel(host, DWMCI_CMD, flags);
281 
282 	for (i = 0; i < retry; i++) {
283 		mask = dwmci_readl(host, DWMCI_RINTSTS);
284 		if (mask & DWMCI_INTMSK_CDONE) {
285 			if (!data)
286 				dwmci_writel(host, DWMCI_RINTSTS, mask);
287 			break;
288 		}
289 	}
290 
291 	if (i == retry) {
292 		debug("%s: Timeout.\n", __func__);
293 		return -ETIMEDOUT;
294 	}
295 
296 	if (mask & DWMCI_INTMSK_RTO) {
297 		/*
298 		 * Timeout here is not necessarily fatal. (e)MMC cards
299 		 * will splat here when they receive CMD55 as they do
300 		 * not support this command and that is exactly the way
301 		 * to tell them apart from SD cards. Thus, this output
302 		 * below shall be debug(). eMMC cards also do not favor
303 		 * CMD8, please keep that in mind.
304 		 */
305 		debug("%s: Response Timeout.\n", __func__);
306 		return -ETIMEDOUT;
307 	} else if (mask & DWMCI_INTMSK_RE) {
308 		debug("%s: Response Error.\n", __func__);
309 		return -EIO;
310 	}
311 
312 
313 	if (cmd->resp_type & MMC_RSP_PRESENT) {
314 		if (cmd->resp_type & MMC_RSP_136) {
315 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
316 			cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
317 			cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
318 			cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
319 		} else {
320 			cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
321 		}
322 	}
323 
324 	if (data) {
325 		ret = dwmci_data_transfer(host, data);
326 
327 		/* only dma mode need it */
328 		if (!host->fifo_mode) {
329 			ctrl = dwmci_readl(host, DWMCI_CTRL);
330 			ctrl &= ~(DWMCI_DMA_EN);
331 			dwmci_writel(host, DWMCI_CTRL, ctrl);
332 			bounce_buffer_stop(&bbstate);
333 		}
334 	}
335 
336 	udelay(100);
337 
338 	return ret;
339 }
340 
341 static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
342 {
343 	u32 div, status;
344 	int timeout = 10000;
345 	unsigned long sclk;
346 
347 	if (freq == 0)
348 		return 0;
349 	/*
350 	 * If host->get_mmc_clk isn't defined,
351 	 * then assume that host->bus_hz is source clock value.
352 	 * host->bus_hz should be set by user.
353 	 */
354 	if (host->get_mmc_clk)
355 		sclk = host->get_mmc_clk(host, freq);
356 	else if (host->bus_hz)
357 		sclk = host->bus_hz;
358 	else {
359 		debug("%s: Didn't get source clock value.\n", __func__);
360 		return -EINVAL;
361 	}
362 
363 	if (sclk == freq)
364 		div = 0;	/* bypass mode */
365 	else
366 		div = DIV_ROUND_UP(sclk, 2 * freq);
367 
368 	dwmci_writel(host, DWMCI_CLKENA, 0);
369 	dwmci_writel(host, DWMCI_CLKSRC, 0);
370 
371 	dwmci_writel(host, DWMCI_CLKDIV, div);
372 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
373 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
374 
375 	do {
376 		status = dwmci_readl(host, DWMCI_CMD);
377 		if (timeout-- < 0) {
378 			debug("%s: Timeout!\n", __func__);
379 			return -ETIMEDOUT;
380 		}
381 	} while (status & DWMCI_CMD_START);
382 
383 	dwmci_writel(host, DWMCI_CLKENA, DWMCI_CLKEN_ENABLE |
384 			DWMCI_CLKEN_LOW_PWR);
385 
386 	dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_PRV_DAT_WAIT |
387 			DWMCI_CMD_UPD_CLK | DWMCI_CMD_START);
388 
389 	timeout = 10000;
390 	do {
391 		status = dwmci_readl(host, DWMCI_CMD);
392 		if (timeout-- < 0) {
393 			debug("%s: Timeout!\n", __func__);
394 			return -ETIMEDOUT;
395 		}
396 	} while (status & DWMCI_CMD_START);
397 
398 	host->clock = freq;
399 
400 	return 0;
401 }
402 
403 #ifdef CONFIG_DM_MMC
404 static bool dwmci_card_busy(struct udevice *dev)
405 {
406 	struct mmc *mmc = mmc_get_mmc_dev(dev);
407 #else
408 static bool dwmci_card_busy(struct mmc *mmc)
409 {
410 #endif
411 	u32 status;
412 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
413 
414 	/*
415 	 * Check the busy bit which is low when DAT[3:0]
416 	 * (the data lines) are 0000
417 	 */
418 	status = dwmci_readl(host, DWMCI_STATUS);
419 
420 	return !!(status & DWMCI_BUSY);
421 }
422 
423 #ifdef CONFIG_DM_MMC
424 static int dwmci_execute_tuning(struct udevice *dev, u32 opcode)
425 {
426 	struct mmc *mmc = mmc_get_mmc_dev(dev);
427 #else
428 static int dwmci_execute_tuning(struct mmc *mmc, u32 opcode)
429 {
430 #endif
431 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
432 
433 	if (!host->execute_tuning)
434 		return -EIO;
435 
436 	return host->execute_tuning(host, opcode);
437 }
438 
439 #ifdef CONFIG_DM_MMC
440 static int dwmci_set_ios(struct udevice *dev)
441 {
442 	struct mmc *mmc = mmc_get_mmc_dev(dev);
443 #else
444 static int dwmci_set_ios(struct mmc *mmc)
445 {
446 #endif
447 	struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
448 	u32 ctype, regs;
449 
450 	debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
451 
452 	dwmci_setup_bus(host, mmc->clock);
453 	switch (mmc->bus_width) {
454 	case 8:
455 		ctype = DWMCI_CTYPE_8BIT;
456 		break;
457 	case 4:
458 		ctype = DWMCI_CTYPE_4BIT;
459 		break;
460 	default:
461 		ctype = DWMCI_CTYPE_1BIT;
462 		break;
463 	}
464 
465 	dwmci_writel(host, DWMCI_CTYPE, ctype);
466 
467 	regs = dwmci_readl(host, DWMCI_UHS_REG);
468 	if (mmc_card_ddr(mmc))
469 		regs |= DWMCI_DDR_MODE;
470 	else
471 		regs &= ~DWMCI_DDR_MODE;
472 
473 	dwmci_writel(host, DWMCI_UHS_REG, regs);
474 
475 	if (host->clksel)
476 		host->clksel(host);
477 
478 	return 0;
479 }
480 
481 static int dwmci_init(struct mmc *mmc)
482 {
483 	struct dwmci_host *host = mmc->priv;
484 
485 	if (host->board_init)
486 		host->board_init(host);
487 
488 	dwmci_writel(host, DWMCI_PWREN, 1);
489 
490 	if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
491 		debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
492 		return -EIO;
493 	}
494 
495 	/* Enumerate at 400KHz */
496 	dwmci_setup_bus(host, mmc->cfg->f_min);
497 
498 	dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
499 	dwmci_writel(host, DWMCI_INTMASK, 0);
500 
501 	dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
502 
503 	dwmci_writel(host, DWMCI_IDINTEN, 0);
504 	dwmci_writel(host, DWMCI_BMOD, 1);
505 
506 	if (!host->fifoth_val) {
507 		uint32_t fifo_size;
508 
509 		fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
510 		fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
511 		host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
512 				TX_WMARK(fifo_size / 2);
513 	}
514 	dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
515 
516 	dwmci_writel(host, DWMCI_CLKENA, 0);
517 	dwmci_writel(host, DWMCI_CLKSRC, 0);
518 
519 	return 0;
520 }
521 
522 #ifdef CONFIG_DM_MMC
523 int dwmci_probe(struct udevice *dev)
524 {
525 	struct mmc *mmc = mmc_get_mmc_dev(dev);
526 
527 	return dwmci_init(mmc);
528 }
529 
530 const struct dm_mmc_ops dm_dwmci_ops = {
531 	.card_busy	= dwmci_card_busy,
532 	.send_cmd	= dwmci_send_cmd,
533 	.set_ios	= dwmci_set_ios,
534 	.execute_tuning	= dwmci_execute_tuning,
535 };
536 
537 #else
538 static const struct mmc_ops dwmci_ops = {
539 	.card_busy	= dwmci_card_busy,
540 	.send_cmd	= dwmci_send_cmd,
541 	.set_ios	= dwmci_set_ios,
542 	.init		= dwmci_init,
543 	.execute_tuning	= dwmci_execute_tuning,
544 };
545 #endif
546 
547 void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
548 		u32 max_clk, u32 min_clk)
549 {
550 	cfg->name = host->name;
551 #ifndef CONFIG_DM_MMC
552 	cfg->ops = &dwmci_ops;
553 #endif
554 	cfg->f_min = min_clk;
555 	cfg->f_max = max_clk;
556 
557 	cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
558 
559 	cfg->host_caps = host->caps;
560 
561 	if (host->buswidth == 8) {
562 		cfg->host_caps |= MMC_MODE_8BIT | MMC_MODE_4BIT;
563 	} else {
564 		cfg->host_caps |= MMC_MODE_4BIT;
565 		cfg->host_caps &= ~MMC_MODE_8BIT;
566 	}
567 	cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
568 
569 	cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
570 }
571 
572 #ifdef CONFIG_BLK
573 int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
574 {
575 	return mmc_bind(dev, mmc, cfg);
576 }
577 #else
578 int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
579 {
580 	dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
581 
582 	host->mmc = mmc_create(&host->cfg, host);
583 	if (host->mmc == NULL)
584 		return -1;
585 
586 	return 0;
587 }
588 #endif
589