1 /*
2 * Copyright (c) 2013 Google, Inc
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #include <common.h>
8 #include <clk.h>
9 #include <dm.h>
10 #include <dt-structs.h>
11 #include <dwmmc.h>
12 #include <errno.h>
13 #include <mapmem.h>
14 #include <pwrseq.h>
15 #include <syscon.h>
16 #include <asm/gpio.h>
17 #include <asm/arch/clock.h>
18 #include <asm/arch/periph.h>
19 #include <linux/err.h>
20
21 DECLARE_GLOBAL_DATA_PTR;
22
23 #define USRID_INTER_PHASE 0x20230001
24 #define SDMMC_TIMING_CON0 0x130
25 #define SDMMC_TIMING_CON1 0x134
26 #define ROCKCHIP_MMC_DELAY_SEL BIT(10)
27 #define ROCKCHIP_MMC_DEGREE_MASK 0x3
28 #define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
29 #define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
30 #define PSECS_PER_SEC 1000000000000LL
31 #define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
32 #define HIWORD_UPDATE(val, mask, shift) \
33 ((val) << (shift) | (mask) << ((shift) + 16))
34
35 struct rockchip_mmc_plat {
36 #if CONFIG_IS_ENABLED(OF_PLATDATA)
37 struct dtd_rockchip_rk3288_dw_mshc dtplat;
38 #endif
39 struct mmc_config cfg;
40 struct mmc mmc;
41 };
42
43 struct rockchip_dwmmc_priv {
44 struct clk clk;
45 struct clk sample_clk;
46 struct dwmci_host host;
47 int fifo_depth;
48 bool fifo_mode;
49 int usrid;
50 u32 minmax[2];
51 };
52
53 #ifdef CONFIG_USING_KERNEL_DTB
board_mmc_dm_reinit(struct udevice * dev)54 int board_mmc_dm_reinit(struct udevice *dev)
55 {
56 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
57
58 if (!priv)
59 return 0;
60
61 if (!memcmp(dev->name, "dwmmc", strlen("dwmmc")))
62 return clk_get_by_index(dev, 0, &priv->clk);
63 else
64 return 0;
65 }
66 #endif
67
68 #ifdef CONFIG_SPL_BUILD
mmc_gpio_init_direct(void)69 __weak void mmc_gpio_init_direct(void) {}
70 #endif
71
rockchip_dwmmc_get_mmc_clk(struct dwmci_host * host,uint freq)72 static uint rockchip_dwmmc_get_mmc_clk(struct dwmci_host *host, uint freq)
73 {
74 struct udevice *dev = host->priv;
75 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
76 int ret;
77
78 /*
79 * If DDR52 8bit mode(only emmc work in 8bit mode),
80 * divider must be set 1
81 */
82 if (mmc_card_ddr52(host->mmc) && host->mmc->bus_width == 8)
83 freq *= 2;
84
85 ret = clk_set_rate(&priv->clk, freq);
86 if (ret < 0) {
87 debug("%s: err=%d\n", __func__, ret);
88 return 0;
89 }
90
91 return freq;
92 }
93
rockchip_dwmmc_ofdata_to_platdata(struct udevice * dev)94 static int rockchip_dwmmc_ofdata_to_platdata(struct udevice *dev)
95 {
96 #if !CONFIG_IS_ENABLED(OF_PLATDATA)
97 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
98 struct dwmci_host *host = &priv->host;
99
100 host->name = dev->name;
101 host->ioaddr = dev_read_addr_ptr(dev);
102 host->buswidth = dev_read_u32_default(dev, "bus-width", 4);
103 host->get_mmc_clk = rockchip_dwmmc_get_mmc_clk;
104 host->priv = dev;
105
106 /* use non-removeable as sdcard and emmc as judgement */
107 if (dev_read_bool(dev, "non-removable"))
108 host->dev_index = 0;
109 else
110 host->dev_index = 1;
111
112 priv->fifo_depth = dev_read_u32_default(dev, "fifo-depth", 0);
113
114 if (priv->fifo_depth < 0)
115 return -EINVAL;
116 priv->fifo_mode = dev_read_bool(dev, "fifo-mode");
117
118 /*
119 * 'clock-freq-min-max' is deprecated
120 * (see https://github.com/torvalds/linux/commit/b023030f10573de738bbe8df63d43acab64c9f7b)
121 */
122 if (dev_read_u32_array(dev, "clock-freq-min-max", priv->minmax, 2)) {
123 int val = dev_read_u32_default(dev, "max-frequency", -EINVAL);
124
125 if (val < 0)
126 return val;
127
128 priv->minmax[0] = 400000; /* 400 kHz */
129 priv->minmax[1] = val;
130 } else {
131 debug("%s: 'clock-freq-min-max' property was deprecated.\n",
132 __func__);
133 }
134 #endif
135 return 0;
136 }
137
138 #ifndef CONFIG_MMC_SIMPLE
139 #define NUM_PHASES 32
140 #define TUNING_ITERATION_TO_PHASE(i, num_phases) (DIV_ROUND_UP((i) * 360, num_phases))
141
142 /*
143 * Each fine delay is between 44ps-77ps. Assume each fine delay is 60ps to
144 * simplify calculations. So 45degs could be anywhere between 33deg and 57.8deg.
145 */
rockchip_mmc_get_phase(struct dwmci_host * host,bool sample)146 static int rockchip_mmc_get_phase(struct dwmci_host *host, bool sample)
147 {
148 struct udevice *dev = host->priv;
149 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
150 unsigned long rate = clk_get_rate(&priv->clk) / 2;
151 u32 raw_value;
152 u16 degrees;
153 u32 delay_num = 0;
154
155 /* Constant signal, no measurable phase shift */
156 if (!rate)
157 return 0;
158
159 if (sample)
160 raw_value = dwmci_readl(host, SDMMC_TIMING_CON1) >> 1;
161 else
162 raw_value = dwmci_readl(host, SDMMC_TIMING_CON0) >> 1;
163
164 degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
165 if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
166 /* degrees/delaynum * 1000000 */
167 unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) * 36 * (rate / 10000);
168
169 delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
170 delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
171 degrees += DIV_ROUND_CLOSEST(delay_num * factor, 1000000);
172 }
173 return degrees % 360;
174 }
175
rockchip_mmc_set_phase(struct dwmci_host * host,bool sample,int degrees)176 static int rockchip_mmc_set_phase(struct dwmci_host *host, bool sample, int degrees)
177 {
178 struct udevice *dev = host->priv;
179 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
180 unsigned long rate = clk_get_rate(&priv->clk) / 2;
181 u8 nineties, remainder;
182 u8 delay_num;
183 u32 raw_value;
184 u32 delay;
185
186 /*
187 * The below calculation is based on the output clock from
188 * MMC host to the card, which expects the phase clock inherits
189 * the clock rate from its parent, namely the output clock
190 * provider of MMC host. However, things may go wrong if
191 * (1) It is orphan.
192 * (2) It is assigned to the wrong parent.
193 *
194 * This check help debug the case (1), which seems to be the
195 * most likely problem we often face and which makes it difficult
196 * for people to debug unstable mmc tuning results.
197 */
198 if (!rate) {
199 printf("%s: invalid clk rate\n", __func__);
200 return -EINVAL;
201 }
202
203 nineties = degrees / 90;
204 remainder = (degrees % 90);
205
206 /*
207 * Due to the inexact nature of the "fine" delay, we might
208 * actually go non-monotonic. We don't go _too_ monotonic
209 * though, so we should be OK. Here are options of how we may
210 * work:
211 *
212 * Ideally we end up with:
213 * 1.0, 2.0, ..., 69.0, 70.0, ..., 89.0, 90.0
214 *
215 * On one extreme (if delay is actually 44ps):
216 * .73, 1.5, ..., 50.6, 51.3, ..., 65.3, 90.0
217 * The other (if delay is actually 77ps):
218 * 1.3, 2.6, ..., 88.6. 89.8, ..., 114.0, 90
219 *
220 * It's possible we might make a delay that is up to 25
221 * degrees off from what we think we're making. That's OK
222 * though because we should be REALLY far from any bad range.
223 */
224
225 /*
226 * Convert to delay; do a little extra work to make sure we
227 * don't overflow 32-bit / 64-bit numbers.
228 */
229 delay = 10000000; /* PSECS_PER_SEC / 10000 / 10 */
230 delay *= remainder;
231 delay = DIV_ROUND_CLOSEST(delay,
232 (rate / 1000) * 36 *
233 (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10));
234
235 delay_num = (u8) min_t(u32, delay, 255);
236
237 raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
238 raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
239 raw_value |= nineties;
240
241 if (sample)
242 dwmci_writel(host, SDMMC_TIMING_CON1, HIWORD_UPDATE(raw_value, 0x07ff, 1));
243 else
244 dwmci_writel(host, SDMMC_TIMING_CON0, HIWORD_UPDATE(raw_value, 0x07ff, 1));
245
246 debug("set %s_phase(%d) delay_nums=%u actual_degrees=%d\n",
247 sample ? "sample" : "drv", degrees, delay_num,
248 rockchip_mmc_get_phase(host, sample)
249 );
250
251 return 0;
252 }
253
rockchip_dwmmc_execute_tuning(struct dwmci_host * host,u32 opcode)254 static int rockchip_dwmmc_execute_tuning(struct dwmci_host *host, u32 opcode)
255 {
256 struct mmc *mmc = host->mmc;
257 struct udevice *dev = host->priv;
258 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
259 int ret = 0;
260 int i, num_phases = NUM_PHASES;
261 bool v, prev_v = 0, first_v;
262 struct range_t {
263 short start;
264 short end; /* inclusive */
265 };
266 struct range_t ranges[NUM_PHASES / 2 + 1];
267 unsigned int range_count = 0;
268 int longest_range_len = -1;
269 int longest_range = -1;
270 int middle_phase, real_middle_phase;
271 ulong ts;
272
273 if (!(priv->sample_clk.dev) && priv->usrid != USRID_INTER_PHASE)
274 return -EIO;
275 ts = get_timer(0);
276
277 /* Try each phase and extract good ranges */
278 for (i = 0; i < num_phases; ) {
279 /* Cannot guarantee any phases larger than 270 would work well */
280 if (TUNING_ITERATION_TO_PHASE(i, num_phases) > 270)
281 break;
282 if (priv->usrid == USRID_INTER_PHASE)
283 rockchip_mmc_set_phase(host, true, TUNING_ITERATION_TO_PHASE(i, num_phases));
284 else
285 clk_set_phase(&priv->sample_clk, TUNING_ITERATION_TO_PHASE(i, num_phases));
286
287 v = !mmc_send_tuning(mmc, opcode);
288 debug("3 Tuning phase is %d v = %x\n", TUNING_ITERATION_TO_PHASE(i, num_phases), v);
289 if (i == 0)
290 first_v = v;
291
292 if ((!prev_v) && v) {
293 range_count++;
294 ranges[range_count - 1].start = i;
295 }
296
297 if (v)
298 ranges[range_count - 1].end = i;
299 i++;
300 prev_v = v;
301 }
302
303 if (range_count == 0) {
304 dev_warn(host->dev, "All phases bad!");
305 return -EIO;
306 }
307
308 /* wrap around case, merge the end points */
309 if ((range_count > 1) && first_v && v) {
310 ranges[0].start = ranges[range_count - 1].start;
311 range_count--;
312 }
313
314 /* Find the longest range */
315 for (i = 0; i < range_count; i++) {
316 int len = (ranges[i].end - ranges[i].start + 1);
317
318 if (len < 0)
319 len += num_phases;
320
321 if (longest_range_len < len) {
322 longest_range_len = len;
323 longest_range = i;
324 }
325
326 debug("Good phase range %d-%d (%d len)\n",
327 TUNING_ITERATION_TO_PHASE(ranges[i].start, num_phases),
328 TUNING_ITERATION_TO_PHASE(ranges[i].end, num_phases),
329 len);
330 }
331
332 printf("Best phase range %d-%d (%d len)\n",
333 TUNING_ITERATION_TO_PHASE(ranges[longest_range].start, num_phases),
334 TUNING_ITERATION_TO_PHASE(ranges[longest_range].end, num_phases),
335 longest_range_len);
336
337 middle_phase = ranges[longest_range].start + longest_range_len / 2;
338 middle_phase %= num_phases;
339 real_middle_phase = TUNING_ITERATION_TO_PHASE(middle_phase, num_phases);
340
341 /*
342 * Since we cut out 270 ~ 360, the original algorithm
343 * still rolling ranges before and after 270 together
344 * in some corner cases, we should adjust it to avoid
345 * using any middle phase located between 270 and 360.
346 * By calculatiion, it happends due to the bad phases
347 * lay between 90 ~ 180. So others are all fine to chose.
348 * Pick 270 is a better choice in those cases. In case of
349 * bad phases exceed 180, the middle phase of rollback
350 * would be bigger than 315, so we chose 360.
351 */
352 if (real_middle_phase > 270) {
353 if (real_middle_phase < 315)
354 real_middle_phase = 270;
355 else
356 real_middle_phase = 0;
357 }
358
359 printf("Successfully tuned phase to %d, used %ldms\n", real_middle_phase, get_timer(0) - ts);
360
361 if (priv->usrid == USRID_INTER_PHASE)
362 rockchip_mmc_set_phase(host, true, real_middle_phase);
363 else
364 clk_set_phase(&priv->sample_clk, real_middle_phase);
365
366 return ret;
367 }
368 #else
rockchip_dwmmc_execute_tuning(struct dwmci_host * host,u32 opcode)369 static int rockchip_dwmmc_execute_tuning(struct dwmci_host *host, u32 opcode) { return 0; }
rockchip_mmc_set_phase(struct dwmci_host * host,bool sample,int degrees)370 static int rockchip_mmc_set_phase(struct dwmci_host *host, bool sample, int degrees) { return 0; }
371 #endif
372
rockchip_dwmmc_probe(struct udevice * dev)373 static int rockchip_dwmmc_probe(struct udevice *dev)
374 {
375 struct rockchip_mmc_plat *plat = dev_get_platdata(dev);
376 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
377 struct rockchip_dwmmc_priv *priv = dev_get_priv(dev);
378 struct dwmci_host *host = &priv->host;
379 struct udevice *pwr_dev __maybe_unused;
380 int ret;
381
382 #ifdef CONFIG_SPL_BUILD
383 mmc_gpio_init_direct();
384 #endif
385 #if CONFIG_IS_ENABLED(OF_PLATDATA)
386 struct dtd_rockchip_rk3288_dw_mshc *dtplat = &plat->dtplat;
387
388 host->name = dev->name;
389 host->ioaddr = map_sysmem(dtplat->reg[0], dtplat->reg[1]);
390 host->buswidth = dtplat->bus_width;
391 host->get_mmc_clk = rockchip_dwmmc_get_mmc_clk;
392 host->execute_tuning = rockchip_dwmmc_execute_tuning;
393 host->priv = dev;
394 host->dev_index = 0;
395 priv->fifo_depth = dtplat->fifo_depth;
396 priv->fifo_mode = 0;
397 priv->minmax[0] = 400000; /* 400 kHz */
398 priv->minmax[1] = dtplat->max_frequency;
399
400 ret = clk_get_by_index_platdata(dev, 0, dtplat->clocks, &priv->clk);
401 if (ret < 0)
402 return ret;
403 #else
404 ret = clk_get_by_index(dev, 0, &priv->clk);
405 if (ret < 0)
406 return ret;
407
408 priv->usrid = dwmci_readl(host, DWMCI_USRID);
409 if (priv->usrid == USRID_INTER_PHASE)
410 goto internal_phase;
411
412 ret = clk_get_by_name(dev, "ciu-sample", &priv->sample_clk);
413 if (ret < 0)
414 debug("MMC: sample clock not found, not support hs200!\n");
415 internal_phase:
416 host->execute_tuning = rockchip_dwmmc_execute_tuning;
417 #endif
418 host->fifoth_val = MSIZE(DWMCI_MSIZE) |
419 RX_WMARK(priv->fifo_depth / 2 - 1) |
420 TX_WMARK(priv->fifo_depth / 2);
421
422 host->fifo_mode = priv->fifo_mode;
423
424 #ifdef CONFIG_ROCKCHIP_RK3128
425 host->stride_pio = true;
426 #else
427 host->stride_pio = false;
428 #endif
429
430 #ifdef CONFIG_PWRSEQ
431 /* Enable power if needed */
432 ret = uclass_get_device_by_phandle(UCLASS_PWRSEQ, dev, "mmc-pwrseq",
433 &pwr_dev);
434 if (!ret) {
435 ret = pwrseq_set_power(pwr_dev, true);
436 if (ret)
437 return ret;
438 }
439 #endif
440 dwmci_setup_cfg(&plat->cfg, host, priv->minmax[1], priv->minmax[0]);
441 if (dev_read_bool(dev, "mmc-hs200-1_8v"))
442 plat->cfg.host_caps |= MMC_MODE_HS200;
443 plat->mmc.default_phase =
444 dev_read_u32_default(dev, "default-sample-phase", 0);
445
446 /* Set default sample phase for initializate */
447 if (!(ret < 0)) {
448 if (priv->usrid == USRID_INTER_PHASE)
449 ret = rockchip_mmc_set_phase(host, true, plat->mmc.default_phase);
450 else if ((!priv->sample_clk.dev))
451 ret = clk_set_phase(&priv->sample_clk, plat->mmc.default_phase);
452 if (ret < 0)
453 debug("MMC: can not set default phase!\n");
454 }
455
456 plat->mmc.init_retry = 0;
457 host->mmc = &plat->mmc;
458 host->mmc->priv = &priv->host;
459 host->mmc->dev = dev;
460 upriv->mmc = host->mmc;
461
462 return dwmci_probe(dev);
463 }
464
rockchip_dwmmc_bind(struct udevice * dev)465 static int rockchip_dwmmc_bind(struct udevice *dev)
466 {
467 struct rockchip_mmc_plat *plat = dev_get_platdata(dev);
468
469 return dwmci_bind(dev, &plat->mmc, &plat->cfg);
470 }
471
472 static const struct udevice_id rockchip_dwmmc_ids[] = {
473 { .compatible = "rockchip,rk3288-dw-mshc" },
474 { .compatible = "rockchip,rk2928-dw-mshc" },
475 { }
476 };
477
478 U_BOOT_DRIVER(rockchip_dwmmc_drv) = {
479 .name = "rockchip_rk3288_dw_mshc",
480 .id = UCLASS_MMC,
481 .of_match = rockchip_dwmmc_ids,
482 .ofdata_to_platdata = rockchip_dwmmc_ofdata_to_platdata,
483 .ops = &dm_dwmci_ops,
484 .bind = rockchip_dwmmc_bind,
485 .probe = rockchip_dwmmc_probe,
486 .priv_auto_alloc_size = sizeof(struct rockchip_dwmmc_priv),
487 .platdata_auto_alloc_size = sizeof(struct rockchip_mmc_plat),
488 };
489
490 #ifdef CONFIG_PWRSEQ
rockchip_dwmmc_pwrseq_set_power(struct udevice * dev,bool enable)491 static int rockchip_dwmmc_pwrseq_set_power(struct udevice *dev, bool enable)
492 {
493 struct gpio_desc reset;
494 int ret;
495
496 ret = gpio_request_by_name(dev, "reset-gpios", 0, &reset, GPIOD_IS_OUT);
497 if (ret)
498 return ret;
499 dm_gpio_set_value(&reset, 1);
500 udelay(1);
501 dm_gpio_set_value(&reset, 0);
502 udelay(200);
503
504 return 0;
505 }
506
507 static const struct pwrseq_ops rockchip_dwmmc_pwrseq_ops = {
508 .set_power = rockchip_dwmmc_pwrseq_set_power,
509 };
510
511 static const struct udevice_id rockchip_dwmmc_pwrseq_ids[] = {
512 { .compatible = "mmc-pwrseq-emmc" },
513 { }
514 };
515
516 U_BOOT_DRIVER(rockchip_dwmmc_pwrseq_drv) = {
517 .name = "mmc_pwrseq_emmc",
518 .id = UCLASS_PWRSEQ,
519 .of_match = rockchip_dwmmc_pwrseq_ids,
520 .ops = &rockchip_dwmmc_pwrseq_ops,
521 };
522 #endif
523