xref: /rk3399_rockchip-uboot/drivers/spi/rockchip_sfc.c (revision e63a27f7a96beae2cdcc4ca813c8b95c07c7d2e6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Rockchip Serial Flash Controller Driver
4  *
5  * Copyright (c) 2017-2021, Rockchip Inc.
6  * Author: Shawn Lin <shawn.lin@rock-chips.com>
7  *	   Chris Morgan <macromorgan@hotmail.com>
8  *	   Jon Lin <Jon.lin@rock-chips.com>
9  */
10 
11 #include <asm/io.h>
12 #include <bouncebuf.h>
13 #include <clk.h>
14 #include <dm.h>
15 #include <linux/bitops.h>
16 #include <linux/delay.h>
17 #include <linux/iopoll.h>
18 #include <spi.h>
19 #include <spi-mem.h>
20 #include <asm/gpio.h>
21 
22 /* System control */
23 #define SFC_CTRL			0x0
24 #define  SFC_CTRL_PHASE_SEL_NEGETIVE	BIT(1)
25 #define  SFC_CTRL_CMD_BITS_SHIFT	8
26 #define  SFC_CTRL_ADDR_BITS_SHIFT	10
27 #define  SFC_CTRL_DATA_BITS_SHIFT	12
28 
29 /* Interrupt mask */
30 #define SFC_IMR				0x4
31 #define  SFC_IMR_RX_FULL		BIT(0)
32 #define  SFC_IMR_RX_UFLOW		BIT(1)
33 #define  SFC_IMR_TX_OFLOW		BIT(2)
34 #define  SFC_IMR_TX_EMPTY		BIT(3)
35 #define  SFC_IMR_TRAN_FINISH		BIT(4)
36 #define  SFC_IMR_BUS_ERR		BIT(5)
37 #define  SFC_IMR_NSPI_ERR		BIT(6)
38 #define  SFC_IMR_DMA			BIT(7)
39 
40 /* Interrupt clear */
41 #define SFC_ICLR			0x8
42 #define  SFC_ICLR_RX_FULL		BIT(0)
43 #define  SFC_ICLR_RX_UFLOW		BIT(1)
44 #define  SFC_ICLR_TX_OFLOW		BIT(2)
45 #define  SFC_ICLR_TX_EMPTY		BIT(3)
46 #define  SFC_ICLR_TRAN_FINISH		BIT(4)
47 #define  SFC_ICLR_BUS_ERR		BIT(5)
48 #define  SFC_ICLR_NSPI_ERR		BIT(6)
49 #define  SFC_ICLR_DMA			BIT(7)
50 
51 /* FIFO threshold level */
52 #define SFC_FTLR			0xc
53 #define  SFC_FTLR_TX_SHIFT		0
54 #define  SFC_FTLR_TX_MASK		0x1f
55 #define  SFC_FTLR_RX_SHIFT		8
56 #define  SFC_FTLR_RX_MASK		0x1f
57 
58 /* Reset FSM and FIFO */
59 #define SFC_RCVR			0x10
60 #define  SFC_RCVR_RESET			BIT(0)
61 
62 /* Enhanced mode */
63 #define SFC_AX				0x14
64 
65 /* Address Bit number */
66 #define SFC_ABIT			0x18
67 
68 /* Interrupt status */
69 #define SFC_ISR				0x1c
70 #define  SFC_ISR_RX_FULL_SHIFT		BIT(0)
71 #define  SFC_ISR_RX_UFLOW_SHIFT		BIT(1)
72 #define  SFC_ISR_TX_OFLOW_SHIFT		BIT(2)
73 #define  SFC_ISR_TX_EMPTY_SHIFT		BIT(3)
74 #define  SFC_ISR_TX_FINISH_SHIFT	BIT(4)
75 #define  SFC_ISR_BUS_ERR_SHIFT		BIT(5)
76 #define  SFC_ISR_NSPI_ERR_SHIFT		BIT(6)
77 #define  SFC_ISR_DMA_SHIFT		BIT(7)
78 
79 /* FIFO status */
80 #define SFC_FSR				0x20
81 #define  SFC_FSR_TX_IS_FULL		BIT(0)
82 #define  SFC_FSR_TX_IS_EMPTY		BIT(1)
83 #define  SFC_FSR_RX_IS_EMPTY		BIT(2)
84 #define  SFC_FSR_RX_IS_FULL		BIT(3)
85 #define  SFC_FSR_TXLV_MASK		GENMASK(13, 8)
86 #define  SFC_FSR_TXLV_SHIFT		8
87 #define  SFC_FSR_RXLV_MASK		GENMASK(20, 16)
88 #define  SFC_FSR_RXLV_SHIFT		16
89 
90 /* FSM status */
91 #define SFC_SR				0x24
92 #define  SFC_SR_IS_IDLE			0x0
93 #define  SFC_SR_IS_BUSY			0x1
94 
95 /* Raw interrupt status */
96 #define SFC_RISR			0x28
97 #define  SFC_RISR_RX_FULL		BIT(0)
98 #define  SFC_RISR_RX_UNDERFLOW		BIT(1)
99 #define  SFC_RISR_TX_OVERFLOW		BIT(2)
100 #define  SFC_RISR_TX_EMPTY		BIT(3)
101 #define  SFC_RISR_TRAN_FINISH		BIT(4)
102 #define  SFC_RISR_BUS_ERR		BIT(5)
103 #define  SFC_RISR_NSPI_ERR		BIT(6)
104 #define  SFC_RISR_DMA			BIT(7)
105 
106 /* Version */
107 #define SFC_VER				0x2C
108 #define  SFC_VER_3			0x3
109 #define  SFC_VER_4			0x4
110 #define  SFC_VER_5			0x5
111 #define  SFC_VER_6			0x6
112 #define  SFC_VER_8			0x8
113 
114 /* Delay line controller resiter */
115 #define SFC_DLL_CTRL0			0x3C
116 #define SFC_DLL_CTRL0_SCLK_SMP_DLL	BIT(15)
117 #define SFC_DLL_CTRL0_DLL_MAX_VER4	0xFFU
118 #define SFC_DLL_CTRL0_DLL_MAX_VER5	0x1FFU
119 
120 /* Master trigger */
121 #define SFC_DMA_TRIGGER			0x80
122 #define SFC_DMA_TRIGGER_START		1
123 
124 /* Src or Dst addr for master */
125 #define SFC_DMA_ADDR			0x84
126 
127 /* Length control register extension 32GB */
128 #define SFC_LEN_CTRL			0x88
129 #define SFC_LEN_CTRL_TRB_SEL		1
130 #define SFC_LEN_EXT			0x8C
131 
132 /* Command */
133 #define SFC_CMD				0x100
134 #define  SFC_CMD_IDX_SHIFT		0
135 #define  SFC_CMD_DUMMY_SHIFT		8
136 #define  SFC_CMD_DIR_SHIFT		12
137 #define  SFC_CMD_DIR_RD			0
138 #define  SFC_CMD_DIR_WR			1
139 #define  SFC_CMD_ADDR_SHIFT		14
140 #define  SFC_CMD_ADDR_0BITS		0
141 #define  SFC_CMD_ADDR_24BITS		1
142 #define  SFC_CMD_ADDR_32BITS		2
143 #define  SFC_CMD_ADDR_XBITS		3
144 #define  SFC_CMD_TRAN_BYTES_SHIFT	16
145 #define  SFC_CMD_CS_SHIFT		30
146 
147 /* Address */
148 #define SFC_ADDR			0x104
149 
150 /* Data */
151 #define SFC_DATA			0x108
152 
153 #define SFC_CS1_REG_OFFSET		0x200
154 
155 #define SFC_MAX_CHIPSELECT_NUM		2
156 
157 /* The SFC can transfer max 16KB - 1 at one time
158  * we set it to 15.5KB here for alignment.
159  */
160 #define SFC_MAX_IOSIZE_VER3		(512 * 31)
161 
162 #define SFC_MAX_IOSIZE_VER4		(0xFFFFFFFFU)
163 
164 /* DMA is only enabled for large data transmission */
165 #define SFC_DMA_TRANS_THRETHOLD		(0x40)
166 
167 /* Maximum clock values from datasheet suggest keeping clock value under
168  * 150MHz. No minimum or average value is suggested.
169  */
170 #define SFC_MAX_SPEED		(150 * 1000 * 1000)
171 #define SFC_DLL_THRESHOLD_RATE	(50 * 1000 * 1000)
172 
173 #define SFC_DLL_TRANING_STEP		10		/* Training step */
174 #define SFC_DLL_TRANING_VALID_WINDOW	80		/* Training Valid DLL winbow */
175 
176 struct rockchip_sfc {
177 	struct udevice *dev;
178 	void __iomem *regbase;
179 	struct clk hclk;
180 	struct clk clk;
181 	u32 max_freq;
182 	u32 cur_speed;
183 	u32 cur_real_speed;
184 	u32 speed[SFC_MAX_CHIPSELECT_NUM];
185 	bool use_dma;
186 	u32 max_iosize;
187 	u16 version;
188 
189 	u32 last_async_size;
190 	u32 async;
191 	u32 dll_cells[SFC_MAX_CHIPSELECT_NUM];
192 	u32 max_dll_cells;
193 
194 #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
195 	struct gpio_desc cs_gpios[SFC_MAX_CHIPSELECT_NUM];
196 #endif
197 };
198 
199 static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
200 {
201 	int err;
202 	u32 status;
203 
204 	writel(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
205 
206 	err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
207 				 !(status & SFC_RCVR_RESET),
208 				 1000000);
209 	if (err)
210 		dev_err(sfc->dev, "SFC reset never finished\n");
211 
212 	/* Still need to clear the masked interrupt from RISR */
213 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
214 
215 	dev_dbg(sfc->dev, "reset\n");
216 
217 	return err;
218 }
219 
220 static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
221 {
222 	return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
223 }
224 
225 static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
226 {
227 	if (sfc->version >= SFC_VER_4)
228 		return SFC_MAX_IOSIZE_VER4;
229 
230 	return SFC_MAX_IOSIZE_VER3;
231 }
232 
233 static u32 rockchip_sfc_get_max_dll_cells(struct rockchip_sfc *sfc)
234 {
235 	if (sfc->version > SFC_VER_4)
236 		return SFC_DLL_CTRL0_DLL_MAX_VER5;
237 	else if (sfc->version == SFC_VER_4)
238 		return SFC_DLL_CTRL0_DLL_MAX_VER4;
239 	else
240 		return 0;
241 }
242 
243 static __maybe_unused void rockchip_sfc_set_delay_lines(struct rockchip_sfc *sfc, u16 cells, u8 cs)
244 {
245 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
246 	u32 val = 0;
247 
248 	if (cells > cell_max)
249 		cells = cell_max;
250 
251 	if (cells)
252 		val = SFC_DLL_CTRL0_SCLK_SMP_DLL | cells;
253 
254 	writel(val, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_DLL_CTRL0);
255 }
256 
257 #if CONFIG_IS_ENABLED(CLK)
258 static int rockchip_sfc_clk_set_rate(struct rockchip_sfc *sfc, unsigned long  speed)
259 {
260 	if (sfc->version >= SFC_VER_8)
261 		return clk_set_rate(&sfc->clk, speed * 2);
262 	else
263 		return clk_set_rate(&sfc->clk, speed);
264 }
265 
266 static unsigned long rockchip_sfc_clk_get_rate(struct rockchip_sfc *sfc)
267 {
268 	if (sfc->version >= SFC_VER_8)
269 		return clk_get_rate(&sfc->clk) / 2;
270 	else
271 		return clk_get_rate(&sfc->clk);
272 }
273 #endif
274 
275 static int rockchip_sfc_init(struct rockchip_sfc *sfc)
276 {
277 	writel(0, sfc->regbase + SFC_CTRL);
278 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
279 		writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
280 
281 	return 0;
282 }
283 
284 static int rockchip_cs_setup(struct udevice *bus)
285 {
286 #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
287 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
288 	int ret;
289 	int i;
290 
291 	ret = gpio_request_list_by_name(bus, "sfc-cs-gpios", sfc->cs_gpios,
292 					ARRAY_SIZE(sfc->cs_gpios), 0);
293 	if (ret < 0) {
294 		pr_err("Can't get %s gpios! Error: %d\n", bus->name, ret);
295 		return ret;
296 	}
297 
298 	for (i = 0; i < ARRAY_SIZE(sfc->cs_gpios); i++) {
299 		if (!dm_gpio_is_valid(&sfc->cs_gpios[i]))
300 			continue;
301 
302 		ret = dm_gpio_set_dir_flags(&sfc->cs_gpios[i],
303 					    GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
304 		if (ret) {
305 			dev_err(bus, "Setting cs %d error, ret=%d\n", i, ret);
306 			return ret;
307 		}
308 	}
309 #endif
310 	return 0;
311 }
312 
313 static int rockchip_sfc_ofdata_to_platdata(struct udevice *bus)
314 {
315 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
316 
317 	sfc->regbase = dev_read_addr_ptr(bus);
318 	if (ofnode_read_bool(dev_ofnode(bus), "sfc-no-dma"))
319 		sfc->use_dma = false;
320 	else
321 		sfc->use_dma = true;
322 #if CONFIG_IS_ENABLED(CLK)
323 	int ret;
324 
325 	ret = clk_get_by_index(bus, 0, &sfc->clk);
326 	if (ret < 0) {
327 		printf("Could not get clock for %s: %d\n", bus->name, ret);
328 		return ret;
329 	}
330 
331 	ret = clk_get_by_index(bus, 1, &sfc->hclk);
332 	if (ret < 0) {
333 		printf("Could not get ahb clock for %s: %d\n", bus->name, ret);
334 		return ret;
335 	}
336 #endif
337 
338 	rockchip_cs_setup(bus);
339 
340 	return 0;
341 }
342 
343 static int rockchip_sfc_probe(struct udevice *bus)
344 {
345 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
346 	int ret;
347 
348 #if CONFIG_IS_ENABLED(CLK)
349 	ret = clk_enable(&sfc->hclk);
350 	if (ret)
351 		dev_dbg(sfc->dev, "sfc Enable ahb clock fail %s: %d\n", bus->name, ret);
352 
353 	ret = clk_enable(&sfc->clk);
354 	if (ret)
355 		dev_dbg(sfc->dev, "sfc Enable clock fail for %s: %d\n", bus->name, ret);
356 #endif
357 	/* Initial the version at the first */
358 	sfc->version = rockchip_sfc_get_version(sfc);
359 
360 	ret = rockchip_sfc_init(sfc);
361 	if (ret)
362 		goto err_init;
363 
364 	sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
365 	sfc->max_freq = SFC_MAX_SPEED;
366 	sfc->dev = bus;
367 
368 	return 0;
369 
370 err_init:
371 #if CONFIG_IS_ENABLED(CLK)
372 	clk_disable(&sfc->clk);
373 	clk_disable(&sfc->hclk);
374 #endif
375 
376 	return ret;
377 }
378 
379 static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
380 {
381 	int ret = 0;
382 	u32 status;
383 
384 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
385 				 status & SFC_FSR_TXLV_MASK,
386 				 timeout_us);
387 	if (ret) {
388 		dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
389 
390 		return -ETIMEDOUT;
391 	}
392 
393 	return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
394 }
395 
396 static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
397 {
398 	int ret = 0;
399 	u32 status;
400 
401 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
402 				 status & SFC_FSR_RXLV_MASK,
403 				 timeout_us);
404 	if (ret) {
405 		dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
406 
407 		return -ETIMEDOUT;
408 	}
409 
410 	return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
411 }
412 
413 static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
414 {
415 	if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
416 		/*
417 		 * SFC not support output DUMMY cycles right after CMD cycles, so
418 		 * treat it as ADDR cycles.
419 		 */
420 		op->addr.nbytes = op->dummy.nbytes;
421 		op->addr.buswidth = op->dummy.buswidth;
422 		op->addr.val = 0xFFFFFFFFF;
423 
424 		op->dummy.nbytes = 0;
425 	}
426 }
427 
428 static int rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc *sfc, int timeout)
429 {
430 	unsigned long tbase;
431 
432 	/* Wait for the DMA interrupt status */
433 	tbase = get_timer(0);
434 	while (!(readl(sfc->regbase + SFC_RISR) & SFC_RISR_DMA)) {
435 		if (get_timer(tbase) > timeout) {
436 			printf("dma timeout\n");
437 			rockchip_sfc_reset(sfc);
438 
439 			return -ETIMEDOUT;
440 		}
441 
442 		udelay(1);
443 	}
444 
445 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
446 
447 	return 0;
448 }
449 
450 static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
451 				   struct spi_slave *mem,
452 				   const struct spi_mem_op *op,
453 				   u32 len)
454 {
455 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
456 	u32 ctrl = 0, cmd = 0;
457 
458 	/* set CMD */
459 	cmd = op->cmd.opcode;
460 	ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
461 
462 	/* set ADDR */
463 	if (op->addr.nbytes) {
464 		if (op->addr.nbytes == 4) {
465 			cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
466 		} else if (op->addr.nbytes == 3) {
467 			cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
468 		} else {
469 			cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
470 			writel(op->addr.nbytes * 8 - 1, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
471 		}
472 
473 		ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
474 	}
475 
476 	/* set DUMMY */
477 	if (op->dummy.nbytes) {
478 		if (op->dummy.buswidth == 4)
479 			cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
480 		else if (op->dummy.buswidth == 2)
481 			cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
482 		else
483 			cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
484 	}
485 
486 	/* set DATA */
487 	if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
488 		writel(len, sfc->regbase + SFC_LEN_EXT);
489 	else
490 		cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
491 	if (len) {
492 		if (op->data.dir == SPI_MEM_DATA_OUT)
493 			cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
494 
495 		ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
496 	}
497 	if (!len && op->addr.nbytes)
498 		cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
499 
500 	/* set the Controller */
501 	ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
502 	cmd |= plat->cs << SFC_CMD_CS_SHIFT;
503 
504 	dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
505 		op->addr.nbytes, op->addr.buswidth,
506 		op->dummy.nbytes, op->dummy.buswidth);
507 	dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x cs=%x\n",
508 		ctrl, cmd, op->addr.val, len, plat->cs);
509 
510 	writel(ctrl, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
511 	writel(cmd, sfc->regbase + SFC_CMD);
512 	if (op->addr.nbytes)
513 		writel(op->addr.val, sfc->regbase + SFC_ADDR);
514 
515 	return 0;
516 }
517 
518 static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
519 {
520 	u8 bytes = len & 0x3;
521 	u32 dwords;
522 	int tx_level;
523 	u32 write_words;
524 	u32 tmp = 0;
525 
526 	dwords = len >> 2;
527 	while (dwords) {
528 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
529 		if (tx_level < 0)
530 			return tx_level;
531 		write_words = min_t(u32, tx_level, dwords);
532 		writesl(sfc->regbase + SFC_DATA, buf, write_words);
533 		buf += write_words << 2;
534 		dwords -= write_words;
535 	}
536 
537 	/* write the rest non word aligned bytes */
538 	if (bytes) {
539 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
540 		if (tx_level < 0)
541 			return tx_level;
542 		memcpy(&tmp, buf, bytes);
543 		writel(tmp, sfc->regbase + SFC_DATA);
544 	}
545 
546 	return len;
547 }
548 
549 static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
550 {
551 	u8 bytes = len & 0x3;
552 	u32 dwords;
553 	u8 read_words;
554 	int rx_level;
555 	int tmp;
556 
557 	/* word aligned access only */
558 	dwords = len >> 2;
559 	while (dwords) {
560 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
561 		if (rx_level < 0)
562 			return rx_level;
563 		read_words = min_t(u32, rx_level, dwords);
564 		readsl(sfc->regbase + SFC_DATA, buf, read_words);
565 		buf += read_words << 2;
566 		dwords -= read_words;
567 	}
568 
569 	/* read the rest non word aligned bytes */
570 	if (bytes) {
571 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
572 		if (rx_level < 0)
573 			return rx_level;
574 		tmp = readl(sfc->regbase + SFC_DATA);
575 		memcpy(buf, &tmp, bytes);
576 	}
577 
578 	return len;
579 }
580 
581 static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
582 {
583 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
584 	writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
585 	writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
586 
587 	return len;
588 }
589 
590 static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
591 				       const struct spi_mem_op *op, u32 len)
592 {
593 	dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
594 
595 	if (op->data.dir == SPI_MEM_DATA_OUT)
596 		return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
597 	else
598 		return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
599 }
600 
601 static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
602 				      const struct spi_mem_op *op, u32 len)
603 {
604 	struct bounce_buffer bb;
605 	unsigned int bb_flags;
606 	void *dma_buf;
607 	int ret;
608 
609 	dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
610 
611 	if (op->data.dir == SPI_MEM_DATA_OUT) {
612 		dma_buf = (void *)op->data.buf.out;
613 		bb_flags = GEN_BB_READ;
614 	} else {
615 		dma_buf = (void *)op->data.buf.in;
616 		bb_flags = GEN_BB_WRITE;
617 	}
618 
619 	ret = bounce_buffer_start(&bb, dma_buf, len, bb_flags);
620 	if (ret)
621 		return ret;
622 
623 	ret = rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)bb.bounce_buffer, len);
624 	rockchip_sfc_wait_for_dma_finished(sfc, len * 10);
625 	bounce_buffer_stop(&bb);
626 
627 	return ret;
628 }
629 
630 static int rockchip_sfc_xfer_data_dma_async(struct rockchip_sfc *sfc,
631 					    const struct spi_mem_op *op, u32 len)
632 {
633 	void *dma_buf;
634 
635 	if (op->data.dir == SPI_MEM_DATA_OUT) {
636 		dma_buf = (void *)op->data.buf.out;
637 		flush_dcache_range((unsigned long)dma_buf,
638 				   (unsigned long)dma_buf + len);
639 	} else {
640 		dma_buf = (void *)op->data.buf.in;
641 	}
642 
643 	dev_dbg(sfc->dev, "xfer_dma_async len=%x %p\n", len, dma_buf);
644 
645 	rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)dma_buf, len);
646 	sfc->last_async_size = len;
647 
648 	return 0;
649 }
650 
651 static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
652 {
653 	int ret = 0;
654 	u32 status;
655 
656 	ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
657 				 !(status & SFC_SR_IS_BUSY),
658 				 timeout_us);
659 	if (ret) {
660 		dev_err(sfc->dev, "wait sfc idle timeout\n");
661 		rockchip_sfc_reset(sfc);
662 
663 		ret = -EIO;
664 	}
665 
666 	return ret;
667 }
668 
669 static int rockchip_spi_set_cs(struct rockchip_sfc *sfc, struct spi_slave *mem, bool enable)
670 {
671 #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
672 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
673 	u32 cs = plat->cs;
674 
675 	if (!dm_gpio_is_valid(&sfc->cs_gpios[cs]))
676 		return 0;
677 
678 	debug("%s %d %x\n", __func__, cs, enable);
679 	dm_gpio_set_value(&sfc->cs_gpios[cs], enable);
680 #endif
681 	return 0;
682 }
683 
684 #if CONFIG_IS_ENABLED(CLK)
685 static int rockchip_sfc_exec_op_bypass(struct rockchip_sfc *sfc,
686 				       struct spi_slave *mem,
687 				       const struct spi_mem_op *op)
688 {
689 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
690 	u32 ret;
691 
692 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
693 	rockchip_spi_set_cs(sfc, mem, true);
694 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
695 	ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
696 	if (ret != len) {
697 		dev_err(sfc->dev, "xfer data failed ret %d\n", ret);
698 
699 		return -EIO;
700 	}
701 
702 	ret = rockchip_sfc_xfer_done(sfc, 100000);
703 	rockchip_spi_set_cs(sfc, mem, false);
704 
705 	return ret;
706 }
707 
708 static void rockchip_sfc_delay_lines_tuning(struct rockchip_sfc *sfc, struct spi_slave *mem)
709 {
710 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
711 	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
712 						SPI_MEM_OP_NO_ADDR,
713 						SPI_MEM_OP_NO_DUMMY,
714 						SPI_MEM_OP_DATA_IN(3, NULL, 1));
715 	u8 id[3], id_temp[3];
716 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
717 	u16 right, left = 0;
718 	u16 step = SFC_DLL_TRANING_STEP;
719 	bool dll_valid = false;
720 	u8 cs = plat->cs;
721 
722 	rockchip_sfc_clk_set_rate(sfc, SFC_DLL_THRESHOLD_RATE);
723 	op.data.buf.in = &id;
724 	rockchip_sfc_exec_op_bypass(sfc, mem, &op);
725 	if ((0xFF == id[0] && 0xFF == id[1]) ||
726 	    (0x00 == id[0] && 0x00 == id[1])) {
727 		dev_dbg(sfc->dev, "no dev, dll by pass\n");
728 		rockchip_sfc_clk_set_rate(sfc, sfc->speed[cs]);
729 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
730 
731 		return;
732 	}
733 
734 	rockchip_sfc_clk_set_rate(sfc, sfc->speed[cs]);
735 	op.data.buf.in = &id_temp;
736 	for (right = 0; right <= cell_max; right += step) {
737 		int ret;
738 
739 		rockchip_sfc_set_delay_lines(sfc, right, cs);
740 		rockchip_sfc_exec_op_bypass(sfc, mem, &op);
741 		dev_dbg(sfc->dev, "dll read flash id:%x %x %x\n",
742 			id_temp[0], id_temp[1], id_temp[2]);
743 
744 		ret = memcmp(&id, &id_temp, 3);
745 		if (dll_valid && ret) {
746 			right -= step;
747 
748 			break;
749 		}
750 		if (!dll_valid && !ret)
751 			left = right;
752 
753 		if (!ret)
754 			dll_valid = true;
755 
756 		/* Add cell_max to loop */
757 		if (right == cell_max)
758 			break;
759 		if (right + step > cell_max)
760 			right = cell_max - step;
761 	}
762 
763 	if (dll_valid && (right - left) >= SFC_DLL_TRANING_VALID_WINDOW) {
764 		if (left == 0 && right < cell_max)
765 			sfc->dll_cells[cs] = left + (right - left) * 2 / 5;
766 		else
767 			sfc->dll_cells[cs] = left + (right - left) / 2;
768 	} else {
769 		sfc->dll_cells[cs] = 0;
770 	}
771 
772 	if (sfc->dll_cells[cs]) {
773 		dev_dbg(sfc->dev, "%d %d %d dll training success in %dMHz max_cells=%u sfc_ver=%d\n",
774 			left, right, sfc->dll_cells[cs], sfc->speed[cs],
775 			rockchip_sfc_get_max_dll_cells(sfc), rockchip_sfc_get_version(sfc));
776 		rockchip_sfc_set_delay_lines(sfc, (u16)sfc->dll_cells[cs], cs);
777 	} else {
778 		dev_err(sfc->dev, "%d %d dll training failed in %dMHz, reduce the speed\n",
779 			left, right, sfc->speed[cs]);
780 		rockchip_sfc_set_delay_lines(sfc, 0, cs);
781 		rockchip_sfc_clk_set_rate(sfc, SFC_DLL_THRESHOLD_RATE);
782 		sfc->cur_speed = SFC_DLL_THRESHOLD_RATE;
783 		sfc->cur_real_speed = rockchip_sfc_clk_get_rate(sfc);
784 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
785 	}
786 }
787 
788 #endif
789 
790 static int rockchip_sfc_exec_op(struct spi_slave *mem,
791 				const struct spi_mem_op *op)
792 {
793 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
794 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
795 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
796 	int ret;
797 
798 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4 &&
799 	    sfc->cur_speed != sfc->speed[plat->cs]) {
800 		sfc->speed[plat->cs] = sfc->cur_speed;
801 #if CONFIG_IS_ENABLED(CLK)
802 		if (sfc->cur_real_speed > SFC_DLL_THRESHOLD_RATE)
803 			rockchip_sfc_delay_lines_tuning(sfc, mem);
804 		else
805 #endif
806 			rockchip_sfc_set_delay_lines(sfc, 0, plat->cs);
807 	}
808 
809 	/* Wait for last async transfer finished */
810 	if (sfc->last_async_size) {
811 		rockchip_sfc_wait_for_dma_finished(sfc, sfc->last_async_size);
812 		sfc->last_async_size = 0;
813 	}
814 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
815 	rockchip_spi_set_cs(sfc, mem, true);
816 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
817 	if (len) {
818 		if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
819 			if (mem->mode & SPI_DMA_PREPARE)
820 				return rockchip_sfc_xfer_data_dma_async(sfc, op, len);
821 			ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
822 		} else {
823 			ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
824 		}
825 
826 		if (ret != len) {
827 			dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
828 
829 			return -EIO;
830 		}
831 	}
832 
833 	ret = rockchip_sfc_xfer_done(sfc, 100000);
834 	rockchip_spi_set_cs(sfc, mem, false);
835 
836 	return ret;
837 }
838 
839 static int rockchip_sfc_adjust_op_size(struct spi_slave *mem, struct spi_mem_op *op)
840 {
841 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
842 
843 	op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
844 
845 	return 0;
846 }
847 
848 static int rockchip_sfc_set_speed(struct udevice *bus, uint speed)
849 {
850 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
851 
852 	if (speed > sfc->max_freq)
853 		speed = sfc->max_freq;
854 
855 	if (speed == sfc->cur_speed)
856 		return 0;
857 
858 #if CONFIG_IS_ENABLED(CLK)
859 	int ret = rockchip_sfc_clk_set_rate(sfc, speed);
860 
861 	if (ret < 0) {
862 		dev_err(sfc->dev, "set_freq=%dHz fail, check if it's the cru support level\n",
863 			speed);
864 		return ret;
865 	}
866 	sfc->cur_speed = speed;
867 	sfc->cur_real_speed = rockchip_sfc_clk_get_rate(sfc);
868 
869 	dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%dHz\n",
870 		sfc->cur_speed, sfc->cur_real_speed);
871 #else
872 	dev_dbg(sfc->dev, "sfc failed, CLK not support\n");
873 #endif
874 	return 0;
875 }
876 
877 static int rockchip_sfc_set_mode(struct udevice *bus, uint mode)
878 {
879 	return 0;
880 }
881 
882 static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
883 	.adjust_op_size	= rockchip_sfc_adjust_op_size,
884 	.exec_op	= rockchip_sfc_exec_op,
885 };
886 
887 static const struct dm_spi_ops rockchip_sfc_ops = {
888 	.mem_ops	= &rockchip_sfc_mem_ops,
889 	.set_speed	= rockchip_sfc_set_speed,
890 	.set_mode	= rockchip_sfc_set_mode,
891 };
892 
893 static const struct udevice_id rockchip_sfc_ids[] = {
894 	{ .compatible = "rockchip,sfc"},
895 	{},
896 };
897 
898 U_BOOT_DRIVER(rockchip_sfc_driver) = {
899 	.name   = "rockchip_sfc",
900 	.id     = UCLASS_SPI,
901 	.of_match = rockchip_sfc_ids,
902 	.ops    = &rockchip_sfc_ops,
903 	.ofdata_to_platdata = rockchip_sfc_ofdata_to_platdata,
904 	.platdata_auto_alloc_size = sizeof(struct rockchip_sfc),
905 	.probe  = rockchip_sfc_probe,
906 };
907