1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Rockchip Serial Flash Controller Driver
4 *
5 * Copyright (c) 2017-2021, Rockchip Inc.
6 * Author: Shawn Lin <shawn.lin@rock-chips.com>
7 * Chris Morgan <macromorgan@hotmail.com>
8 * Jon Lin <Jon.lin@rock-chips.com>
9 */
10
11 #include <asm/io.h>
12 #include <bouncebuf.h>
13 #include <clk.h>
14 #include <dm.h>
15 #include <linux/bitops.h>
16 #include <linux/delay.h>
17 #include <linux/iopoll.h>
18 #include <spi.h>
19 #include <spi-mem.h>
20 #include <asm/gpio.h>
21
22 /* System control */
23 #define SFC_CTRL 0x0
24 #define SFC_CTRL_PHASE_SEL_NEGETIVE BIT(1)
25 #define SFC_CTRL_CMD_BITS_SHIFT 8
26 #define SFC_CTRL_ADDR_BITS_SHIFT 10
27 #define SFC_CTRL_DATA_BITS_SHIFT 12
28
29 /* Interrupt mask */
30 #define SFC_IMR 0x4
31 #define SFC_IMR_RX_FULL BIT(0)
32 #define SFC_IMR_RX_UFLOW BIT(1)
33 #define SFC_IMR_TX_OFLOW BIT(2)
34 #define SFC_IMR_TX_EMPTY BIT(3)
35 #define SFC_IMR_TRAN_FINISH BIT(4)
36 #define SFC_IMR_BUS_ERR BIT(5)
37 #define SFC_IMR_NSPI_ERR BIT(6)
38 #define SFC_IMR_DMA BIT(7)
39
40 /* Interrupt clear */
41 #define SFC_ICLR 0x8
42 #define SFC_ICLR_RX_FULL BIT(0)
43 #define SFC_ICLR_RX_UFLOW BIT(1)
44 #define SFC_ICLR_TX_OFLOW BIT(2)
45 #define SFC_ICLR_TX_EMPTY BIT(3)
46 #define SFC_ICLR_TRAN_FINISH BIT(4)
47 #define SFC_ICLR_BUS_ERR BIT(5)
48 #define SFC_ICLR_NSPI_ERR BIT(6)
49 #define SFC_ICLR_DMA BIT(7)
50
51 /* FIFO threshold level */
52 #define SFC_FTLR 0xc
53 #define SFC_FTLR_TX_SHIFT 0
54 #define SFC_FTLR_TX_MASK 0x1f
55 #define SFC_FTLR_RX_SHIFT 8
56 #define SFC_FTLR_RX_MASK 0x1f
57
58 /* Reset FSM and FIFO */
59 #define SFC_RCVR 0x10
60 #define SFC_RCVR_RESET BIT(0)
61
62 /* Enhanced mode */
63 #define SFC_AX 0x14
64
65 /* Address Bit number */
66 #define SFC_ABIT 0x18
67
68 /* Interrupt status */
69 #define SFC_ISR 0x1c
70 #define SFC_ISR_RX_FULL_SHIFT BIT(0)
71 #define SFC_ISR_RX_UFLOW_SHIFT BIT(1)
72 #define SFC_ISR_TX_OFLOW_SHIFT BIT(2)
73 #define SFC_ISR_TX_EMPTY_SHIFT BIT(3)
74 #define SFC_ISR_TX_FINISH_SHIFT BIT(4)
75 #define SFC_ISR_BUS_ERR_SHIFT BIT(5)
76 #define SFC_ISR_NSPI_ERR_SHIFT BIT(6)
77 #define SFC_ISR_DMA_SHIFT BIT(7)
78
79 /* FIFO status */
80 #define SFC_FSR 0x20
81 #define SFC_FSR_TX_IS_FULL BIT(0)
82 #define SFC_FSR_TX_IS_EMPTY BIT(1)
83 #define SFC_FSR_RX_IS_EMPTY BIT(2)
84 #define SFC_FSR_RX_IS_FULL BIT(3)
85 #define SFC_FSR_TXLV_MASK GENMASK(13, 8)
86 #define SFC_FSR_TXLV_SHIFT 8
87 #define SFC_FSR_RXLV_MASK GENMASK(20, 16)
88 #define SFC_FSR_RXLV_SHIFT 16
89
90 /* FSM status */
91 #define SFC_SR 0x24
92 #define SFC_SR_IS_IDLE 0x0
93 #define SFC_SR_IS_BUSY 0x1
94
95 /* Raw interrupt status */
96 #define SFC_RISR 0x28
97 #define SFC_RISR_RX_FULL BIT(0)
98 #define SFC_RISR_RX_UNDERFLOW BIT(1)
99 #define SFC_RISR_TX_OVERFLOW BIT(2)
100 #define SFC_RISR_TX_EMPTY BIT(3)
101 #define SFC_RISR_TRAN_FINISH BIT(4)
102 #define SFC_RISR_BUS_ERR BIT(5)
103 #define SFC_RISR_NSPI_ERR BIT(6)
104 #define SFC_RISR_DMA BIT(7)
105
106 /* Version */
107 #define SFC_VER 0x2C
108 #define SFC_VER_3 0x3
109 #define SFC_VER_4 0x4
110 #define SFC_VER_5 0x5
111 #define SFC_VER_6 0x6
112 #define SFC_VER_8 0x8
113 #define SFC_VER_9 0x9
114
115 /* Ext ctrl */
116 #define SFC_EXT_CTRL 0x34
117 #define SFC_SCLK_X2_BYPASS BIT(24)
118
119 /* Delay line controller resiter */
120 #define SFC_DLL_CTRL0 0x3C
121 #define SFC_DLL_CTRL0_SCLK_SMP_DLL BIT(15)
122 #define SFC_DLL_CTRL0_DLL_MAX_VER4 0xFFU
123 #define SFC_DLL_CTRL0_DLL_MAX_VER5 0x1FFU
124
125 /* Master trigger */
126 #define SFC_DMA_TRIGGER 0x80
127 #define SFC_DMA_TRIGGER_START 1
128
129 /* Src or Dst addr for master */
130 #define SFC_DMA_ADDR 0x84
131
132 /* Length control register extension 32GB */
133 #define SFC_LEN_CTRL 0x88
134 #define SFC_LEN_CTRL_TRB_SEL 1
135 #define SFC_LEN_EXT 0x8C
136
137 /* Command */
138 #define SFC_CMD 0x100
139 #define SFC_CMD_IDX_SHIFT 0
140 #define SFC_CMD_DUMMY_SHIFT 8
141 #define SFC_CMD_DIR_SHIFT 12
142 #define SFC_CMD_DIR_RD 0
143 #define SFC_CMD_DIR_WR 1
144 #define SFC_CMD_ADDR_SHIFT 14
145 #define SFC_CMD_ADDR_0BITS 0
146 #define SFC_CMD_ADDR_24BITS 1
147 #define SFC_CMD_ADDR_32BITS 2
148 #define SFC_CMD_ADDR_XBITS 3
149 #define SFC_CMD_TRAN_BYTES_SHIFT 16
150 #define SFC_CMD_CS_SHIFT 30
151
152 /* Address */
153 #define SFC_ADDR 0x104
154
155 /* Data */
156 #define SFC_DATA 0x108
157
158 #define SFC_CS1_REG_OFFSET 0x200
159
160 #define SFC_MAX_CHIPSELECT_NUM 2
161
162 /* The SFC can transfer max 16KB - 1 at one time
163 * we set it to 15.5KB here for alignment.
164 */
165 #define SFC_MAX_IOSIZE_VER3 (512 * 31)
166
167 #define SFC_MAX_IOSIZE_VER4 (0xFFFFFFFFU)
168
169 /* DMA is only enabled for large data transmission */
170 #define SFC_DMA_TRANS_THRETHOLD (0x40)
171
172 /* Maximum clock values from datasheet suggest keeping clock value under
173 * 150MHz. No minimum or average value is suggested.
174 */
175 #define SFC_MAX_SPEED (150 * 1000 * 1000)
176 #define SFC_DLL_THRESHOLD_RATE (50 * 1000 * 1000)
177
178 #define SFC_DLL_TRANING_STEP 10 /* Training step */
179 #define SFC_DLL_TRANING_VALID_WINDOW 80 /* Training Valid DLL winbow */
180
181 struct rockchip_sfc {
182 struct udevice *dev;
183 void __iomem *regbase;
184 struct clk hclk;
185 struct clk clk;
186 u32 max_freq;
187 u32 cur_speed;
188 u32 cur_real_speed;
189 u32 speed[SFC_MAX_CHIPSELECT_NUM];
190 bool use_dma;
191 bool sclk_x2_bypass;
192 u32 max_iosize;
193 u16 version;
194
195 u32 last_async_size;
196 u32 async;
197 u32 dll_cells[SFC_MAX_CHIPSELECT_NUM];
198 u32 max_dll_cells;
199
200 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
201 struct gpio_desc cs_gpios[SFC_MAX_CHIPSELECT_NUM];
202 #endif
203 };
204
rockchip_sfc_reset(struct rockchip_sfc * sfc)205 static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
206 {
207 int err;
208 u32 status;
209
210 writel(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
211
212 err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
213 !(status & SFC_RCVR_RESET),
214 1000000);
215 if (err)
216 dev_err(sfc->dev, "SFC reset never finished\n");
217
218 /* Still need to clear the masked interrupt from RISR */
219 writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
220
221 dev_dbg(sfc->dev, "reset\n");
222
223 return err;
224 }
225
rockchip_sfc_get_version(struct rockchip_sfc * sfc)226 static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
227 {
228 return (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
229 }
230
rockchip_sfc_get_max_iosize(struct rockchip_sfc * sfc)231 static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
232 {
233 if (sfc->version >= SFC_VER_4)
234 return SFC_MAX_IOSIZE_VER4;
235
236 return SFC_MAX_IOSIZE_VER3;
237 }
238
rockchip_sfc_get_max_dll_cells(struct rockchip_sfc * sfc)239 static u32 rockchip_sfc_get_max_dll_cells(struct rockchip_sfc *sfc)
240 {
241 if (sfc->max_dll_cells)
242 return sfc->max_dll_cells;
243
244 if (sfc->version > SFC_VER_4)
245 return SFC_DLL_CTRL0_DLL_MAX_VER5;
246 else if (sfc->version == SFC_VER_4)
247 return SFC_DLL_CTRL0_DLL_MAX_VER4;
248 else
249 return 0;
250 }
251
rockchip_sfc_set_delay_lines(struct rockchip_sfc * sfc,u16 cells,u8 cs)252 static __maybe_unused void rockchip_sfc_set_delay_lines(struct rockchip_sfc *sfc, u16 cells, u8 cs)
253 {
254 u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
255 u32 val = 0;
256
257 if (cells > cell_max)
258 cells = cell_max;
259
260 if (cells)
261 val = SFC_DLL_CTRL0_SCLK_SMP_DLL | cells;
262
263 writel(val, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_DLL_CTRL0);
264 }
265
266 #if CONFIG_IS_ENABLED(CLK)
rockchip_sfc_clk_set_rate(struct rockchip_sfc * sfc,unsigned long speed)267 static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long speed)
268 {
269 if (sfc->version < SFC_VER_8|| sfc->sclk_x2_bypass)
270 return clk_set_rate(&sfc->clk, speed);
271 else
272 return clk_set_rate(&sfc->clk, speed * 2);
273 }
274
rockchip_sfc_clk_get_rate(struct rockchip_sfc * sfc)275 static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc)
276 {
277 if (sfc->version < SFC_VER_8 || sfc->sclk_x2_bypass)
278 return clk_get_rate(&sfc->clk);
279 else
280 return clk_get_rate(&sfc->clk) / 2;
281 }
282 #endif
283
rockchip_sfc_init(struct rockchip_sfc * sfc)284 static int rockchip_sfc_init(struct rockchip_sfc *sfc)
285 {
286 u32 reg;
287
288 #if defined(CONFIG_SPL_BUILD)
289 printf("sfc cmd=%02xH(6BH-x4)\n", readl(sfc->regbase + SFC_CMD) & 0xFF);
290 #endif
291 writel(0, sfc->regbase + SFC_CTRL);
292 if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
293 writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
294 if (rockchip_sfc_get_version(sfc) > SFC_VER_8 && sfc->sclk_x2_bypass) {
295 reg = readl(sfc->regbase + SFC_EXT_CTRL);
296 reg |= SFC_SCLK_X2_BYPASS;
297 writel(reg, sfc->regbase + SFC_EXT_CTRL);
298 }
299
300 return 0;
301 }
302
rockchip_cs_setup(struct udevice * bus)303 static int rockchip_cs_setup(struct udevice *bus)
304 {
305 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
306 struct rockchip_sfc *sfc = dev_get_platdata(bus);
307 int ret;
308 int i;
309
310 ret = gpio_request_list_by_name(bus, "sfc-cs-gpios", sfc->cs_gpios,
311 ARRAY_SIZE(sfc->cs_gpios), 0);
312 if (ret < 0) {
313 pr_err("Can't get %s gpios! Error: %d\n", bus->name, ret);
314 return ret;
315 }
316
317 for (i = 0; i < ARRAY_SIZE(sfc->cs_gpios); i++) {
318 if (!dm_gpio_is_valid(&sfc->cs_gpios[i]))
319 continue;
320
321 ret = dm_gpio_set_dir_flags(&sfc->cs_gpios[i],
322 GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
323 if (ret) {
324 dev_err(bus, "Setting cs %d error, ret=%d\n", i, ret);
325 return ret;
326 }
327 }
328 #endif
329 return 0;
330 }
331
rockchip_sfc_ofdata_to_platdata(struct udevice * bus)332 static int rockchip_sfc_ofdata_to_platdata(struct udevice *bus)
333 {
334 struct rockchip_sfc *sfc = dev_get_platdata(bus);
335
336 sfc->regbase = dev_read_addr_ptr(bus);
337 if (ofnode_read_bool(dev_ofnode(bus), "sfc-no-dma"))
338 sfc->use_dma = false;
339 else
340 sfc->use_dma = true;
341 sfc->sclk_x2_bypass = ofnode_read_bool(dev_ofnode(bus), "rockchip,sclk-x2-bypass");
342 sfc->max_dll_cells = dev_read_u32_default(bus, "rockchip,max-dll", 0);
343 if (sfc->max_dll_cells > SFC_DLL_CTRL0_DLL_MAX_VER5)
344 sfc->max_dll_cells = SFC_DLL_CTRL0_DLL_MAX_VER5;
345 #if CONFIG_IS_ENABLED(CLK)
346 int ret;
347
348 ret = clk_get_by_index(bus, 0, &sfc->clk);
349 if (ret < 0) {
350 printf("Could not get clock for %s: %d\n", bus->name, ret);
351 return ret;
352 }
353
354 ret = clk_get_by_index(bus, 1, &sfc->hclk);
355 if (ret < 0) {
356 printf("Could not get ahb clock for %s: %d\n", bus->name, ret);
357 return ret;
358 }
359 #endif
360
361 rockchip_cs_setup(bus);
362
363 return 0;
364 }
365
rockchip_sfc_probe(struct udevice * bus)366 static int rockchip_sfc_probe(struct udevice *bus)
367 {
368 struct rockchip_sfc *sfc = dev_get_platdata(bus);
369 int ret;
370
371 #if CONFIG_IS_ENABLED(CLK)
372 ret = clk_enable(&sfc->hclk);
373 if (ret)
374 dev_dbg(sfc->dev, "sfc Enable ahb clock fail %s: %d\n", bus->name, ret);
375
376 ret = clk_enable(&sfc->clk);
377 if (ret)
378 dev_dbg(sfc->dev, "sfc Enable clock fail for %s: %d\n", bus->name, ret);
379 #endif
380 /* Initial the version at the first */
381 sfc->version = rockchip_sfc_get_version(sfc);
382 if (sfc->version == SFC_VER_9)
383 sfc->version = SFC_VER_6;
384
385 ret = rockchip_sfc_init(sfc);
386 if (ret)
387 goto err_init;
388
389 sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
390 sfc->max_freq = SFC_MAX_SPEED;
391 sfc->dev = bus;
392
393 return 0;
394
395 err_init:
396 #if CONFIG_IS_ENABLED(CLK)
397 clk_disable(&sfc->clk);
398 clk_disable(&sfc->hclk);
399 #endif
400
401 return ret;
402 }
403
rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc * sfc,u32 timeout_us)404 static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
405 {
406 int ret = 0;
407 u32 status;
408
409 ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
410 status & SFC_FSR_TXLV_MASK,
411 timeout_us);
412 if (ret) {
413 dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
414
415 return -ETIMEDOUT;
416 }
417
418 return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
419 }
420
rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc * sfc,u32 timeout_us)421 static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
422 {
423 int ret = 0;
424 u32 status;
425
426 ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
427 status & SFC_FSR_RXLV_MASK,
428 timeout_us);
429 if (ret) {
430 dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
431
432 return -ETIMEDOUT;
433 }
434
435 return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
436 }
437
rockchip_sfc_adjust_op_work(struct spi_mem_op * op)438 static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
439 {
440 if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
441 /*
442 * SFC not support output DUMMY cycles right after CMD cycles, so
443 * treat it as ADDR cycles.
444 */
445 op->addr.nbytes = op->dummy.nbytes;
446 op->addr.buswidth = op->dummy.buswidth;
447 op->addr.val = 0xFFFFFFFFF;
448
449 op->dummy.nbytes = 0;
450 }
451 }
452
rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc * sfc,int timeout)453 static int rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc *sfc, int timeout)
454 {
455 unsigned long tbase;
456
457 /* Wait for the DMA interrupt status */
458 tbase = get_timer(0);
459 while (!(readl(sfc->regbase + SFC_RISR) & SFC_RISR_DMA)) {
460 if (get_timer(tbase) > timeout) {
461 printf("dma timeout\n");
462 rockchip_sfc_reset(sfc);
463
464 return -ETIMEDOUT;
465 }
466
467 udelay(1);
468 }
469
470 writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
471
472 return 0;
473 }
474
rockchip_sfc_xfer_setup(struct rockchip_sfc * sfc,struct spi_slave * mem,const struct spi_mem_op * op,u32 len)475 static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
476 struct spi_slave *mem,
477 const struct spi_mem_op *op,
478 u32 len)
479 {
480 struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
481 u32 ctrl = 0, cmd = 0;
482
483 /* set CMD */
484 cmd = op->cmd.opcode;
485 ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
486
487 /* set ADDR */
488 if (op->addr.nbytes) {
489 if (op->addr.nbytes == 4) {
490 cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
491 } else if (op->addr.nbytes == 3) {
492 cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
493 } else {
494 cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
495 writel(op->addr.nbytes * 8 - 1, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
496 }
497
498 ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
499 }
500
501 /* set DUMMY */
502 if (op->dummy.nbytes) {
503 if (op->dummy.buswidth == 4)
504 cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
505 else if (op->dummy.buswidth == 2)
506 cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
507 else
508 cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
509 }
510
511 /* set DATA */
512 if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
513 writel(len, sfc->regbase + SFC_LEN_EXT);
514 else
515 cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
516 if (len) {
517 if (op->data.dir == SPI_MEM_DATA_OUT)
518 cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
519
520 ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
521 }
522 if (!len && op->addr.nbytes)
523 cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
524
525 /* set the Controller */
526 ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
527 cmd |= plat->cs << SFC_CMD_CS_SHIFT;
528
529 dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
530 op->addr.nbytes, op->addr.buswidth,
531 op->dummy.nbytes, op->dummy.buswidth);
532 dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x cs=%x\n",
533 ctrl, cmd, op->addr.val, len, plat->cs);
534
535 writel(ctrl, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
536 writel(cmd, sfc->regbase + SFC_CMD);
537 if (op->addr.nbytes)
538 writel(op->addr.val, sfc->regbase + SFC_ADDR);
539
540 return 0;
541 }
542
rockchip_sfc_write_fifo(struct rockchip_sfc * sfc,const u8 * buf,int len)543 static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
544 {
545 u8 bytes = len & 0x3;
546 u32 dwords;
547 int tx_level;
548 u32 write_words;
549 u32 tmp = 0;
550
551 dwords = len >> 2;
552 while (dwords) {
553 tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
554 if (tx_level < 0)
555 return tx_level;
556 write_words = min_t(u32, tx_level, dwords);
557 writesl(sfc->regbase + SFC_DATA, buf, write_words);
558 buf += write_words << 2;
559 dwords -= write_words;
560 }
561
562 /* write the rest non word aligned bytes */
563 if (bytes) {
564 tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
565 if (tx_level < 0)
566 return tx_level;
567 memcpy(&tmp, buf, bytes);
568 writel(tmp, sfc->regbase + SFC_DATA);
569 }
570
571 return len;
572 }
573
rockchip_sfc_read_fifo(struct rockchip_sfc * sfc,u8 * buf,int len)574 static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
575 {
576 u8 bytes = len & 0x3;
577 u32 dwords;
578 u8 read_words;
579 int rx_level;
580 int tmp;
581
582 /* word aligned access only */
583 dwords = len >> 2;
584 while (dwords) {
585 rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
586 if (rx_level < 0)
587 return rx_level;
588 read_words = min_t(u32, rx_level, dwords);
589 readsl(sfc->regbase + SFC_DATA, buf, read_words);
590 buf += read_words << 2;
591 dwords -= read_words;
592 }
593
594 /* read the rest non word aligned bytes */
595 if (bytes) {
596 rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
597 if (rx_level < 0)
598 return rx_level;
599 tmp = readl(sfc->regbase + SFC_DATA);
600 memcpy(buf, &tmp, bytes);
601 }
602
603 return len;
604 }
605
rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc * sfc,dma_addr_t dma_buf,size_t len)606 static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
607 {
608 writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
609 writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
610 writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
611
612 return len;
613 }
614
rockchip_sfc_xfer_data_poll(struct rockchip_sfc * sfc,const struct spi_mem_op * op,u32 len)615 static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
616 const struct spi_mem_op *op, u32 len)
617 {
618 dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
619
620 if (op->data.dir == SPI_MEM_DATA_OUT)
621 return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
622 else
623 return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
624 }
625
rockchip_sfc_xfer_data_dma(struct rockchip_sfc * sfc,const struct spi_mem_op * op,u32 len)626 static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
627 const struct spi_mem_op *op, u32 len)
628 {
629 struct bounce_buffer bb;
630 unsigned int bb_flags;
631 void *dma_buf;
632 int ret;
633
634 dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
635
636 if (op->data.dir == SPI_MEM_DATA_OUT) {
637 dma_buf = (void *)op->data.buf.out;
638 bb_flags = GEN_BB_READ;
639 } else {
640 dma_buf = (void *)op->data.buf.in;
641 bb_flags = GEN_BB_WRITE;
642 }
643
644 ret = bounce_buffer_start(&bb, dma_buf, len, bb_flags);
645 if (ret)
646 return ret;
647
648 ret = rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)bb.bounce_buffer, len);
649 rockchip_sfc_wait_for_dma_finished(sfc, len * 10);
650 bounce_buffer_stop(&bb);
651
652 return ret;
653 }
654
rockchip_sfc_xfer_data_dma_async(struct rockchip_sfc * sfc,const struct spi_mem_op * op,u32 len)655 static int rockchip_sfc_xfer_data_dma_async(struct rockchip_sfc *sfc,
656 const struct spi_mem_op *op, u32 len)
657 {
658 void *dma_buf;
659
660 if (op->data.dir == SPI_MEM_DATA_OUT) {
661 dma_buf = (void *)op->data.buf.out;
662 flush_dcache_range((unsigned long)dma_buf,
663 (unsigned long)dma_buf + len);
664 } else {
665 dma_buf = (void *)op->data.buf.in;
666 }
667
668 dev_dbg(sfc->dev, "xfer_dma_async len=%x %p\n", len, dma_buf);
669
670 rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)dma_buf, len);
671 sfc->last_async_size = len;
672
673 return 0;
674 }
675
rockchip_sfc_xfer_done(struct rockchip_sfc * sfc,u32 timeout_us)676 static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
677 {
678 int ret = 0;
679 u32 status;
680
681 ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
682 !(status & SFC_SR_IS_BUSY),
683 timeout_us);
684 if (ret) {
685 dev_err(sfc->dev, "wait sfc idle timeout\n");
686 rockchip_sfc_reset(sfc);
687
688 ret = -EIO;
689 }
690
691 return ret;
692 }
693
rockchip_spi_set_cs(struct rockchip_sfc * sfc,struct spi_slave * mem,bool enable)694 static int rockchip_spi_set_cs(struct rockchip_sfc *sfc, struct spi_slave *mem, bool enable)
695 {
696 #if defined(CONFIG_DM_GPIO) && (defined(CONFIG_SPL_GPIO_SUPPORT) || !defined(CONFIG_SPL_BUILD))
697 struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
698 u32 cs = plat->cs;
699
700 if (!dm_gpio_is_valid(&sfc->cs_gpios[cs]))
701 return 0;
702
703 debug("%s %d %x\n", __func__, cs, enable);
704 dm_gpio_set_value(&sfc->cs_gpios[cs], enable);
705 #endif
706 return 0;
707 }
708
709 #if CONFIG_IS_ENABLED(CLK)
rockchip_sfc_exec_op_bypass(struct rockchip_sfc * sfc,struct spi_slave * mem,const struct spi_mem_op * op)710 static int rockchip_sfc_exec_op_bypass(struct rockchip_sfc *sfc,
711 struct spi_slave *mem,
712 const struct spi_mem_op *op)
713 {
714 u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
715 u32 ret;
716
717 rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
718 rockchip_spi_set_cs(sfc, mem, true);
719 rockchip_sfc_xfer_setup(sfc, mem, op, len);
720 ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
721 if (ret != len) {
722 dev_err(sfc->dev, "xfer data failed ret %d\n", ret);
723
724 return -EIO;
725 }
726
727 ret = rockchip_sfc_xfer_done(sfc, 100000);
728 rockchip_spi_set_cs(sfc, mem, false);
729
730 return ret;
731 }
732
rockchip_sfc_delay_lines_tuning(struct rockchip_sfc * sfc,struct spi_slave * mem)733 static void rockchip_sfc_delay_lines_tuning(struct rockchip_sfc *sfc, struct spi_slave *mem)
734 {
735 struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
736 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
737 SPI_MEM_OP_NO_ADDR,
738 SPI_MEM_OP_NO_DUMMY,
739 SPI_MEM_OP_DATA_IN(3, NULL, 1));
740 u8 id[3], id_temp[3];
741 u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
742 u16 right, left = 0;
743 u16 step = SFC_DLL_TRANING_STEP;
744 bool dll_valid = false;
745 u8 cs = plat->cs;
746
747 rockchip_sfc_clk_set_rate(sfc, SFC_DLL_THRESHOLD_RATE);
748 op.data.buf.in = &id;
749 rockchip_sfc_exec_op_bypass(sfc, mem, &op);
750 if ((0xFF == id[0] && 0xFF == id[1]) ||
751 (0x00 == id[0] && 0x00 == id[1])) {
752 dev_dbg(sfc->dev, "no dev, dll by pass\n");
753 rockchip_sfc_clk_set_rate(sfc, sfc->speed[cs]);
754 sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
755
756 return;
757 }
758
759 rockchip_sfc_clk_set_rate(sfc, sfc->speed[cs]);
760 op.data.buf.in = &id_temp;
761 for (right = 0; right <= cell_max; right += step) {
762 int ret;
763
764 rockchip_sfc_set_delay_lines(sfc, right, cs);
765 rockchip_sfc_exec_op_bypass(sfc, mem, &op);
766 dev_dbg(sfc->dev, "dll read flash id:%x %x %x\n",
767 id_temp[0], id_temp[1], id_temp[2]);
768
769 ret = memcmp(&id, &id_temp, 3);
770 if (dll_valid && ret) {
771 right -= step;
772
773 break;
774 }
775 if (!dll_valid && !ret)
776 left = right;
777
778 if (!ret)
779 dll_valid = true;
780
781 /* Add cell_max to loop */
782 if (right == cell_max)
783 break;
784 if (right + step > cell_max)
785 right = cell_max - step;
786 }
787
788 if (dll_valid && (right - left) >= SFC_DLL_TRANING_VALID_WINDOW) {
789 if (left == 0 && right < cell_max)
790 sfc->dll_cells[cs] = left + (right - left) * 2 / 5;
791 else
792 sfc->dll_cells[cs] = left + (right - left) / 2;
793 } else {
794 sfc->dll_cells[cs] = 0;
795 }
796
797 if (sfc->dll_cells[cs]) {
798 dev_dbg(sfc->dev, "%d %d %d dll training success in %dMHz max_cells=%u sfc_ver=%d\n",
799 left, right, sfc->dll_cells[cs], sfc->speed[cs],
800 rockchip_sfc_get_max_dll_cells(sfc), rockchip_sfc_get_version(sfc));
801 rockchip_sfc_set_delay_lines(sfc, (u16)sfc->dll_cells[cs], cs);
802 #if defined(CONFIG_SPI_FLASH_AUTO_MERGE)
803 sfc->speed[1] = sfc->cur_speed;
804 sfc->dll_cells[1] = sfc->dll_cells[0];
805 rockchip_sfc_set_delay_lines(sfc, (u16)sfc->dll_cells[1], 1);
806 #endif
807 } else {
808 dev_err(sfc->dev, "%d %d dll training failed in %dMHz, reduce the speed\n",
809 left, right, sfc->speed[cs]);
810 rockchip_sfc_set_delay_lines(sfc, 0, cs);
811 rockchip_sfc_clk_set_rate(sfc, SFC_DLL_THRESHOLD_RATE);
812 sfc->cur_speed = SFC_DLL_THRESHOLD_RATE;
813 sfc->cur_real_speed = rockchip_sfc_clk_get_rate(sfc);
814 sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
815 }
816 }
817
818 #endif
819
rockchip_sfc_exec_op(struct spi_slave * mem,const struct spi_mem_op * op)820 static int rockchip_sfc_exec_op(struct spi_slave *mem,
821 const struct spi_mem_op *op)
822 {
823 struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
824 struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
825 u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
826 int ret;
827
828 #if defined(CONFIG_SPI_FLASH_AUTO_MERGE)
829 plat->cs = mem->auto_merge_cs_cur;
830 #endif
831
832 if (rockchip_sfc_get_version(sfc) >= SFC_VER_4 &&
833 sfc->cur_speed != sfc->speed[plat->cs]) {
834 sfc->speed[plat->cs] = sfc->cur_speed;
835 #if CONFIG_IS_ENABLED(CLK)
836 if (sfc->cur_real_speed > SFC_DLL_THRESHOLD_RATE)
837 rockchip_sfc_delay_lines_tuning(sfc, mem);
838 else
839 #endif
840 rockchip_sfc_set_delay_lines(sfc, 0, plat->cs);
841 }
842
843 /* Wait for last async transfer finished */
844 if (sfc->last_async_size) {
845 rockchip_sfc_wait_for_dma_finished(sfc, sfc->last_async_size);
846 sfc->last_async_size = 0;
847 }
848 rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
849 rockchip_spi_set_cs(sfc, mem, true);
850 rockchip_sfc_xfer_setup(sfc, mem, op, len);
851 if (len) {
852 if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
853 if (mem->mode & SPI_DMA_PREPARE)
854 return rockchip_sfc_xfer_data_dma_async(sfc, op, len);
855 ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
856 } else {
857 ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
858 }
859
860 if (ret != len) {
861 dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
862
863 return -EIO;
864 }
865 }
866
867 ret = rockchip_sfc_xfer_done(sfc, 100000);
868 rockchip_spi_set_cs(sfc, mem, false);
869
870 return ret;
871 }
872
rockchip_sfc_adjust_op_size(struct spi_slave * mem,struct spi_mem_op * op)873 static int rockchip_sfc_adjust_op_size(struct spi_slave *mem, struct spi_mem_op *op)
874 {
875 struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
876
877 op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
878
879 return 0;
880 }
881
rockchip_sfc_set_speed(struct udevice * bus,uint speed)882 static int rockchip_sfc_set_speed(struct udevice *bus, uint speed)
883 {
884 struct rockchip_sfc *sfc = dev_get_platdata(bus);
885
886 if (speed > sfc->max_freq)
887 speed = sfc->max_freq;
888
889 if (speed == sfc->cur_speed)
890 return 0;
891
892 #if CONFIG_IS_ENABLED(CLK)
893 int ret = rockchip_sfc_clk_set_rate(sfc, speed);
894
895 if (ret < 0) {
896 dev_err(sfc->dev, "set_freq=%dHz fail, check if it's the cru support level\n",
897 speed);
898 return ret;
899 }
900 sfc->cur_speed = speed;
901 sfc->cur_real_speed = rockchip_sfc_clk_get_rate(sfc);
902
903 dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%dHz\n",
904 sfc->cur_speed, sfc->cur_real_speed);
905 #else
906 dev_dbg(sfc->dev, "sfc failed, CLK not support\n");
907 #endif
908 return 0;
909 }
910
rockchip_sfc_set_mode(struct udevice * bus,uint mode)911 static int rockchip_sfc_set_mode(struct udevice *bus, uint mode)
912 {
913 return 0;
914 }
915
916 static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
917 .adjust_op_size = rockchip_sfc_adjust_op_size,
918 .exec_op = rockchip_sfc_exec_op,
919 };
920
921 static const struct dm_spi_ops rockchip_sfc_ops = {
922 .mem_ops = &rockchip_sfc_mem_ops,
923 .set_speed = rockchip_sfc_set_speed,
924 .set_mode = rockchip_sfc_set_mode,
925 };
926
927 static const struct udevice_id rockchip_sfc_ids[] = {
928 { .compatible = "rockchip,fspi"},
929 { .compatible = "rockchip,sfc"},
930 {},
931 };
932
933 U_BOOT_DRIVER(rockchip_sfc_driver) = {
934 .name = "rockchip_sfc",
935 .id = UCLASS_SPI,
936 .of_match = rockchip_sfc_ids,
937 .ops = &rockchip_sfc_ops,
938 .ofdata_to_platdata = rockchip_sfc_ofdata_to_platdata,
939 .platdata_auto_alloc_size = sizeof(struct rockchip_sfc),
940 .probe = rockchip_sfc_probe,
941 };
942