xref: /rk3399_rockchip-uboot/drivers/spi/rockchip_sfc.c (revision 1d38ed875d76ce3a81d4d204e3d7fc6bcb3af7bf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Rockchip Serial Flash Controller Driver
4  *
5  * Copyright (c) 2017-2021, Rockchip Inc.
6  * Author: Shawn Lin <shawn.lin@rock-chips.com>
7  *	   Chris Morgan <macromorgan@hotmail.com>
8  *	   Jon Lin <Jon.lin@rock-chips.com>
9  */
10 
11 #include <asm/io.h>
12 #include <bouncebuf.h>
13 #include <clk.h>
14 #include <dm.h>
15 #include <linux/bitops.h>
16 #include <linux/delay.h>
17 #include <linux/iopoll.h>
18 #include <spi.h>
19 #include <spi-mem.h>
20 #include <asm/gpio.h>
21 
22 /* System control */
23 #define SFC_CTRL			0x0
24 #define  SFC_CTRL_PHASE_SEL_NEGETIVE	BIT(1)
25 #define  SFC_CTRL_CMD_BITS_SHIFT	8
26 #define  SFC_CTRL_ADDR_BITS_SHIFT	10
27 #define  SFC_CTRL_DATA_BITS_SHIFT	12
28 
29 /* Interrupt mask */
30 #define SFC_IMR				0x4
31 #define  SFC_IMR_RX_FULL		BIT(0)
32 #define  SFC_IMR_RX_UFLOW		BIT(1)
33 #define  SFC_IMR_TX_OFLOW		BIT(2)
34 #define  SFC_IMR_TX_EMPTY		BIT(3)
35 #define  SFC_IMR_TRAN_FINISH		BIT(4)
36 #define  SFC_IMR_BUS_ERR		BIT(5)
37 #define  SFC_IMR_NSPI_ERR		BIT(6)
38 #define  SFC_IMR_DMA			BIT(7)
39 
40 /* Interrupt clear */
41 #define SFC_ICLR			0x8
42 #define  SFC_ICLR_RX_FULL		BIT(0)
43 #define  SFC_ICLR_RX_UFLOW		BIT(1)
44 #define  SFC_ICLR_TX_OFLOW		BIT(2)
45 #define  SFC_ICLR_TX_EMPTY		BIT(3)
46 #define  SFC_ICLR_TRAN_FINISH		BIT(4)
47 #define  SFC_ICLR_BUS_ERR		BIT(5)
48 #define  SFC_ICLR_NSPI_ERR		BIT(6)
49 #define  SFC_ICLR_DMA			BIT(7)
50 
51 /* FIFO threshold level */
52 #define SFC_FTLR			0xc
53 #define  SFC_FTLR_TX_SHIFT		0
54 #define  SFC_FTLR_TX_MASK		0x1f
55 #define  SFC_FTLR_RX_SHIFT		8
56 #define  SFC_FTLR_RX_MASK		0x1f
57 
58 /* Reset FSM and FIFO */
59 #define SFC_RCVR			0x10
60 #define  SFC_RCVR_RESET			BIT(0)
61 
62 /* Enhanced mode */
63 #define SFC_AX				0x14
64 
65 /* Address Bit number */
66 #define SFC_ABIT			0x18
67 
68 /* Interrupt status */
69 #define SFC_ISR				0x1c
70 #define  SFC_ISR_RX_FULL_SHIFT		BIT(0)
71 #define  SFC_ISR_RX_UFLOW_SHIFT		BIT(1)
72 #define  SFC_ISR_TX_OFLOW_SHIFT		BIT(2)
73 #define  SFC_ISR_TX_EMPTY_SHIFT		BIT(3)
74 #define  SFC_ISR_TX_FINISH_SHIFT	BIT(4)
75 #define  SFC_ISR_BUS_ERR_SHIFT		BIT(5)
76 #define  SFC_ISR_NSPI_ERR_SHIFT		BIT(6)
77 #define  SFC_ISR_DMA_SHIFT		BIT(7)
78 
79 /* FIFO status */
80 #define SFC_FSR				0x20
81 #define  SFC_FSR_TX_IS_FULL		BIT(0)
82 #define  SFC_FSR_TX_IS_EMPTY		BIT(1)
83 #define  SFC_FSR_RX_IS_EMPTY		BIT(2)
84 #define  SFC_FSR_RX_IS_FULL		BIT(3)
85 #define  SFC_FSR_TXLV_MASK		GENMASK(13, 8)
86 #define  SFC_FSR_TXLV_SHIFT		8
87 #define  SFC_FSR_RXLV_MASK		GENMASK(20, 16)
88 #define  SFC_FSR_RXLV_SHIFT		16
89 
90 /* FSM status */
91 #define SFC_SR				0x24
92 #define  SFC_SR_IS_IDLE			0x0
93 #define  SFC_SR_IS_BUSY			0x1
94 
95 /* Raw interrupt status */
96 #define SFC_RISR			0x28
97 #define  SFC_RISR_RX_FULL		BIT(0)
98 #define  SFC_RISR_RX_UNDERFLOW		BIT(1)
99 #define  SFC_RISR_TX_OVERFLOW		BIT(2)
100 #define  SFC_RISR_TX_EMPTY		BIT(3)
101 #define  SFC_RISR_TRAN_FINISH		BIT(4)
102 #define  SFC_RISR_BUS_ERR		BIT(5)
103 #define  SFC_RISR_NSPI_ERR		BIT(6)
104 #define  SFC_RISR_DMA			BIT(7)
105 
106 /* Version */
107 #define SFC_VER				0x2C
108 #define  SFC_VER_3			0x3
109 #define  SFC_VER_4			0x4
110 #define  SFC_VER_5			0x5
111 #define  SFC_VER_6			0x6
112 #define  SFC_VER_8			0x8
113 
114 /* Delay line controller resiter */
115 #define SFC_DLL_CTRL0			0x3C
116 #define SFC_DLL_CTRL0_SCLK_SMP_DLL	BIT(15)
117 #define SFC_DLL_CTRL0_DLL_MAX_VER4	0xFFU
118 #define SFC_DLL_CTRL0_DLL_MAX_VER5	0x1FFU
119 
120 /* Master trigger */
121 #define SFC_DMA_TRIGGER			0x80
122 #define SFC_DMA_TRIGGER_START		1
123 
124 /* Src or Dst addr for master */
125 #define SFC_DMA_ADDR			0x84
126 
127 /* Length control register extension 32GB */
128 #define SFC_LEN_CTRL			0x88
129 #define SFC_LEN_CTRL_TRB_SEL		1
130 #define SFC_LEN_EXT			0x8C
131 
132 /* Command */
133 #define SFC_CMD				0x100
134 #define  SFC_CMD_IDX_SHIFT		0
135 #define  SFC_CMD_DUMMY_SHIFT		8
136 #define  SFC_CMD_DIR_SHIFT		12
137 #define  SFC_CMD_DIR_RD			0
138 #define  SFC_CMD_DIR_WR			1
139 #define  SFC_CMD_ADDR_SHIFT		14
140 #define  SFC_CMD_ADDR_0BITS		0
141 #define  SFC_CMD_ADDR_24BITS		1
142 #define  SFC_CMD_ADDR_32BITS		2
143 #define  SFC_CMD_ADDR_XBITS		3
144 #define  SFC_CMD_TRAN_BYTES_SHIFT	16
145 #define  SFC_CMD_CS_SHIFT		30
146 
147 /* Address */
148 #define SFC_ADDR			0x104
149 
150 /* Data */
151 #define SFC_DATA			0x108
152 
153 #define SFC_CS1_REG_OFFSET		0x200
154 
155 #define SFC_MAX_CHIPSELECT_NUM		2
156 
157 /* The SFC can transfer max 16KB - 1 at one time
158  * we set it to 15.5KB here for alignment.
159  */
160 #define SFC_MAX_IOSIZE_VER3		(512 * 31)
161 
162 #define SFC_MAX_IOSIZE_VER4		(0xFFFFFFFFU)
163 
164 /* DMA is only enabled for large data transmission */
165 #define SFC_DMA_TRANS_THRETHOLD		(0x40)
166 
167 /* Maximum clock values from datasheet suggest keeping clock value under
168  * 150MHz. No minimum or average value is suggested.
169  */
170 #define SFC_MAX_SPEED		(150 * 1000 * 1000)
171 #define SFC_DLL_THRESHOLD_RATE	(50 * 1000 * 1000)
172 
173 #define SFC_DLL_TRANING_STEP		10		/* Training step */
174 #define SFC_DLL_TRANING_VALID_WINDOW	80		/* Training Valid DLL winbow */
175 
176 struct rockchip_sfc {
177 	struct udevice *dev;
178 	void __iomem *regbase;
179 	struct clk hclk;
180 	struct clk clk;
181 	u32 max_freq;
182 	u32 cur_speed;
183 	u32 cur_real_speed;
184 	u32 speed[SFC_MAX_CHIPSELECT_NUM];
185 	bool use_dma;
186 	u32 max_iosize;
187 	u16 version;
188 
189 	u32 last_async_size;
190 	u32 async;
191 	u32 dll_cells[SFC_MAX_CHIPSELECT_NUM];
192 	u32 max_dll_cells;
193 
194 #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
195 	struct gpio_desc cs_gpios[SFC_MAX_CHIPSELECT_NUM];
196 #endif
197 };
198 
199 static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
200 {
201 	int err;
202 	u32 status;
203 
204 	writel(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
205 
206 	err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
207 				 !(status & SFC_RCVR_RESET),
208 				 1000000);
209 	if (err)
210 		dev_err(sfc->dev, "SFC reset never finished\n");
211 
212 	/* Still need to clear the masked interrupt from RISR */
213 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
214 
215 	dev_dbg(sfc->dev, "reset\n");
216 
217 	return err;
218 }
219 
220 static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
221 {
222 	return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
223 }
224 
225 static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
226 {
227 	if (sfc->version >= SFC_VER_4)
228 		return SFC_MAX_IOSIZE_VER4;
229 
230 	return SFC_MAX_IOSIZE_VER3;
231 }
232 
233 static u32 rockchip_sfc_get_max_dll_cells(struct rockchip_sfc *sfc)
234 {
235 	if (sfc->version > SFC_VER_4)
236 		return SFC_DLL_CTRL0_DLL_MAX_VER5;
237 	else if (sfc->version == SFC_VER_4)
238 		return SFC_DLL_CTRL0_DLL_MAX_VER4;
239 	else
240 		return 0;
241 }
242 
243 static __maybe_unused void rockchip_sfc_set_delay_lines(struct rockchip_sfc *sfc, u16 cells, u8 cs)
244 {
245 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
246 	u32 val = 0;
247 
248 	if (cells > cell_max)
249 		cells = cell_max;
250 
251 	if (cells)
252 		val = SFC_DLL_CTRL0_SCLK_SMP_DLL | cells;
253 
254 	writel(val, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_DLL_CTRL0);
255 }
256 
257 static int rockchip_sfc_init(struct rockchip_sfc *sfc)
258 {
259 	writel(0, sfc->regbase + SFC_CTRL);
260 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
261 		writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
262 
263 	return 0;
264 }
265 
266 static int rockchip_cs_setup(struct udevice *bus)
267 {
268 #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
269 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
270 	int ret;
271 	int i;
272 
273 	ret = gpio_request_list_by_name(bus, "sfc-cs-gpios", sfc->cs_gpios,
274 					ARRAY_SIZE(sfc->cs_gpios), 0);
275 	if (ret < 0) {
276 		pr_err("Can't get %s gpios! Error: %d\n", bus->name, ret);
277 		return ret;
278 	}
279 
280 	for (i = 0; i < ARRAY_SIZE(sfc->cs_gpios); i++) {
281 		if (!dm_gpio_is_valid(&sfc->cs_gpios[i]))
282 			continue;
283 
284 		ret = dm_gpio_set_dir_flags(&sfc->cs_gpios[i],
285 					    GPIOD_IS_OUT | GPIOD_ACTIVE_LOW);
286 		if (ret) {
287 			dev_err(bus, "Setting cs %d error, ret=%d\n", i, ret);
288 			return ret;
289 		}
290 	}
291 #endif
292 	return 0;
293 }
294 
295 static int rockchip_sfc_ofdata_to_platdata(struct udevice *bus)
296 {
297 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
298 
299 	sfc->regbase = dev_read_addr_ptr(bus);
300 	if (ofnode_read_bool(dev_ofnode(bus), "sfc-no-dma"))
301 		sfc->use_dma = false;
302 	else
303 		sfc->use_dma = true;
304 #if CONFIG_IS_ENABLED(CLK)
305 	int ret;
306 
307 	ret = clk_get_by_index(bus, 0, &sfc->clk);
308 	if (ret < 0) {
309 		printf("Could not get clock for %s: %d\n", bus->name, ret);
310 		return ret;
311 	}
312 
313 	ret = clk_get_by_index(bus, 1, &sfc->hclk);
314 	if (ret < 0) {
315 		printf("Could not get ahb clock for %s: %d\n", bus->name, ret);
316 		return ret;
317 	}
318 #endif
319 
320 	rockchip_cs_setup(bus);
321 
322 	return 0;
323 }
324 
325 static int rockchip_sfc_probe(struct udevice *bus)
326 {
327 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
328 	int ret;
329 
330 #if CONFIG_IS_ENABLED(CLK)
331 	ret = clk_enable(&sfc->hclk);
332 	if (ret)
333 		dev_dbg(sfc->dev, "sfc Enable ahb clock fail %s: %d\n", bus->name, ret);
334 
335 	ret = clk_enable(&sfc->clk);
336 	if (ret)
337 		dev_dbg(sfc->dev, "sfc Enable clock fail for %s: %d\n", bus->name, ret);
338 #endif
339 	/* Initial the version at the first */
340 	sfc->version = rockchip_sfc_get_version(sfc);
341 
342 	ret = rockchip_sfc_init(sfc);
343 	if (ret)
344 		goto err_init;
345 
346 	sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
347 	sfc->max_freq = SFC_MAX_SPEED;
348 	sfc->dev = bus;
349 
350 	return 0;
351 
352 err_init:
353 #if CONFIG_IS_ENABLED(CLK)
354 	clk_disable(&sfc->clk);
355 	clk_disable(&sfc->hclk);
356 #endif
357 
358 	return ret;
359 }
360 
361 static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
362 {
363 	int ret = 0;
364 	u32 status;
365 
366 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
367 				 status & SFC_FSR_TXLV_MASK,
368 				 timeout_us);
369 	if (ret) {
370 		dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
371 
372 		return -ETIMEDOUT;
373 	}
374 
375 	return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
376 }
377 
378 static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
379 {
380 	int ret = 0;
381 	u32 status;
382 
383 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
384 				 status & SFC_FSR_RXLV_MASK,
385 				 timeout_us);
386 	if (ret) {
387 		dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
388 
389 		return -ETIMEDOUT;
390 	}
391 
392 	return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
393 }
394 
395 static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
396 {
397 	if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
398 		/*
399 		 * SFC not support output DUMMY cycles right after CMD cycles, so
400 		 * treat it as ADDR cycles.
401 		 */
402 		op->addr.nbytes = op->dummy.nbytes;
403 		op->addr.buswidth = op->dummy.buswidth;
404 		op->addr.val = 0xFFFFFFFFF;
405 
406 		op->dummy.nbytes = 0;
407 	}
408 }
409 
410 static int rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc *sfc, int timeout)
411 {
412 	unsigned long tbase;
413 
414 	/* Wait for the DMA interrupt status */
415 	tbase = get_timer(0);
416 	while (!(readl(sfc->regbase + SFC_RISR) & SFC_RISR_DMA)) {
417 		if (get_timer(tbase) > timeout) {
418 			printf("dma timeout\n");
419 			rockchip_sfc_reset(sfc);
420 
421 			return -ETIMEDOUT;
422 		}
423 
424 		udelay(1);
425 	}
426 
427 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
428 
429 	return 0;
430 }
431 
432 static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
433 				   struct spi_slave *mem,
434 				   const struct spi_mem_op *op,
435 				   u32 len)
436 {
437 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
438 	u32 ctrl = 0, cmd = 0;
439 
440 	/* set CMD */
441 	cmd = op->cmd.opcode;
442 	ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
443 
444 	/* set ADDR */
445 	if (op->addr.nbytes) {
446 		if (op->addr.nbytes == 4) {
447 			cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
448 		} else if (op->addr.nbytes == 3) {
449 			cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
450 		} else {
451 			cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
452 			writel(op->addr.nbytes * 8 - 1, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
453 		}
454 
455 		ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
456 	}
457 
458 	/* set DUMMY */
459 	if (op->dummy.nbytes) {
460 		if (op->dummy.buswidth == 4)
461 			cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
462 		else if (op->dummy.buswidth == 2)
463 			cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
464 		else
465 			cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
466 	}
467 
468 	/* set DATA */
469 	if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
470 		writel(len, sfc->regbase + SFC_LEN_EXT);
471 	else
472 		cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
473 	if (len) {
474 		if (op->data.dir == SPI_MEM_DATA_OUT)
475 			cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
476 
477 		ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
478 	}
479 	if (!len && op->addr.nbytes)
480 		cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
481 
482 	/* set the Controller */
483 	ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
484 	cmd |= plat->cs << SFC_CMD_CS_SHIFT;
485 
486 	dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
487 		op->addr.nbytes, op->addr.buswidth,
488 		op->dummy.nbytes, op->dummy.buswidth);
489 	dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x cs=%x\n",
490 		ctrl, cmd, op->addr.val, len, plat->cs);
491 
492 	writel(ctrl, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
493 	writel(cmd, sfc->regbase + SFC_CMD);
494 	if (op->addr.nbytes)
495 		writel(op->addr.val, sfc->regbase + SFC_ADDR);
496 
497 	return 0;
498 }
499 
500 static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
501 {
502 	u8 bytes = len & 0x3;
503 	u32 dwords;
504 	int tx_level;
505 	u32 write_words;
506 	u32 tmp = 0;
507 
508 	dwords = len >> 2;
509 	while (dwords) {
510 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
511 		if (tx_level < 0)
512 			return tx_level;
513 		write_words = min_t(u32, tx_level, dwords);
514 		writesl(sfc->regbase + SFC_DATA, buf, write_words);
515 		buf += write_words << 2;
516 		dwords -= write_words;
517 	}
518 
519 	/* write the rest non word aligned bytes */
520 	if (bytes) {
521 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
522 		if (tx_level < 0)
523 			return tx_level;
524 		memcpy(&tmp, buf, bytes);
525 		writel(tmp, sfc->regbase + SFC_DATA);
526 	}
527 
528 	return len;
529 }
530 
531 static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
532 {
533 	u8 bytes = len & 0x3;
534 	u32 dwords;
535 	u8 read_words;
536 	int rx_level;
537 	int tmp;
538 
539 	/* word aligned access only */
540 	dwords = len >> 2;
541 	while (dwords) {
542 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
543 		if (rx_level < 0)
544 			return rx_level;
545 		read_words = min_t(u32, rx_level, dwords);
546 		readsl(sfc->regbase + SFC_DATA, buf, read_words);
547 		buf += read_words << 2;
548 		dwords -= read_words;
549 	}
550 
551 	/* read the rest non word aligned bytes */
552 	if (bytes) {
553 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
554 		if (rx_level < 0)
555 			return rx_level;
556 		tmp = readl(sfc->regbase + SFC_DATA);
557 		memcpy(buf, &tmp, bytes);
558 	}
559 
560 	return len;
561 }
562 
563 static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
564 {
565 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
566 	writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
567 	writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
568 
569 	return len;
570 }
571 
572 static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
573 				       const struct spi_mem_op *op, u32 len)
574 {
575 	dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
576 
577 	if (op->data.dir == SPI_MEM_DATA_OUT)
578 		return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
579 	else
580 		return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
581 }
582 
583 static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
584 				      const struct spi_mem_op *op, u32 len)
585 {
586 	struct bounce_buffer bb;
587 	unsigned int bb_flags;
588 	void *dma_buf;
589 	int ret;
590 
591 	dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
592 
593 	if (op->data.dir == SPI_MEM_DATA_OUT) {
594 		dma_buf = (void *)op->data.buf.out;
595 		bb_flags = GEN_BB_READ;
596 	} else {
597 		dma_buf = (void *)op->data.buf.in;
598 		bb_flags = GEN_BB_WRITE;
599 	}
600 
601 	ret = bounce_buffer_start(&bb, dma_buf, len, bb_flags);
602 	if (ret)
603 		return ret;
604 
605 	ret = rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)bb.bounce_buffer, len);
606 	rockchip_sfc_wait_for_dma_finished(sfc, len * 10);
607 	bounce_buffer_stop(&bb);
608 
609 	return ret;
610 }
611 
612 static int rockchip_sfc_xfer_data_dma_async(struct rockchip_sfc *sfc,
613 					    const struct spi_mem_op *op, u32 len)
614 {
615 	void *dma_buf;
616 
617 	if (op->data.dir == SPI_MEM_DATA_OUT) {
618 		dma_buf = (void *)op->data.buf.out;
619 		flush_dcache_range((unsigned long)dma_buf,
620 				   (unsigned long)dma_buf + len);
621 	} else {
622 		dma_buf = (void *)op->data.buf.in;
623 	}
624 
625 	dev_dbg(sfc->dev, "xfer_dma_async len=%x %p\n", len, dma_buf);
626 
627 	rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)dma_buf, len);
628 	sfc->last_async_size = len;
629 
630 	return 0;
631 }
632 
633 static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
634 {
635 	int ret = 0;
636 	u32 status;
637 
638 	ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
639 				 !(status & SFC_SR_IS_BUSY),
640 				 timeout_us);
641 	if (ret) {
642 		dev_err(sfc->dev, "wait sfc idle timeout\n");
643 		rockchip_sfc_reset(sfc);
644 
645 		ret = -EIO;
646 	}
647 
648 	return ret;
649 }
650 
651 static int rockchip_spi_set_cs(struct rockchip_sfc *sfc, struct spi_slave *mem, bool enable)
652 {
653 #if defined(CONFIG_DM_GPIO) && !defined(CONFIG_SPL_BUILD)
654 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
655 	u32 cs = plat->cs;
656 
657 	if (!dm_gpio_is_valid(&sfc->cs_gpios[cs]))
658 		return 0;
659 
660 	debug("%s %d %x\n", __func__, cs, enable);
661 	dm_gpio_set_value(&sfc->cs_gpios[cs], enable);
662 #endif
663 	return 0;
664 }
665 
666 #if CONFIG_IS_ENABLED(CLK)
667 static int rockchip_sfc_exec_op_bypass(struct rockchip_sfc *sfc,
668 				       struct spi_slave *mem,
669 				       const struct spi_mem_op *op)
670 {
671 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
672 	u32 ret;
673 
674 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
675 	rockchip_spi_set_cs(sfc, mem, true);
676 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
677 	ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
678 	if (ret != len) {
679 		dev_err(sfc->dev, "xfer data failed ret %d\n", ret);
680 
681 		return -EIO;
682 	}
683 
684 	ret = rockchip_sfc_xfer_done(sfc, 100000);
685 	rockchip_spi_set_cs(sfc, mem, false);
686 
687 	return ret;
688 }
689 
690 static void rockchip_sfc_delay_lines_tuning(struct rockchip_sfc *sfc, struct spi_slave *mem)
691 {
692 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
693 	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
694 						SPI_MEM_OP_NO_ADDR,
695 						SPI_MEM_OP_NO_DUMMY,
696 						SPI_MEM_OP_DATA_IN(3, NULL, 1));
697 	u8 id[3], id_temp[3];
698 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
699 	u16 right, left = 0;
700 	u16 step = SFC_DLL_TRANING_STEP;
701 	bool dll_valid = false;
702 	u8 cs = plat->cs;
703 
704 	clk_set_rate(&sfc->clk, SFC_DLL_THRESHOLD_RATE);
705 	op.data.buf.in = &id;
706 	rockchip_sfc_exec_op_bypass(sfc, mem, &op);
707 	if ((0xFF == id[0] && 0xFF == id[1]) ||
708 	    (0x00 == id[0] && 0x00 == id[1])) {
709 		dev_dbg(sfc->dev, "no dev, dll by pass\n");
710 		clk_set_rate(&sfc->clk, sfc->speed[cs]);
711 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
712 
713 		return;
714 	}
715 
716 	clk_set_rate(&sfc->clk, sfc->speed[cs]);
717 	op.data.buf.in = &id_temp;
718 	for (right = 0; right <= cell_max; right += step) {
719 		int ret;
720 
721 		rockchip_sfc_set_delay_lines(sfc, right, cs);
722 		rockchip_sfc_exec_op_bypass(sfc, mem, &op);
723 		dev_dbg(sfc->dev, "dll read flash id:%x %x %x\n",
724 			id_temp[0], id_temp[1], id_temp[2]);
725 
726 		ret = memcmp(&id, &id_temp, 3);
727 		if (dll_valid && ret) {
728 			right -= step;
729 
730 			break;
731 		}
732 		if (!dll_valid && !ret)
733 			left = right;
734 
735 		if (!ret)
736 			dll_valid = true;
737 
738 		/* Add cell_max to loop */
739 		if (right == cell_max)
740 			break;
741 		if (right + step > cell_max)
742 			right = cell_max - step;
743 	}
744 
745 	if (dll_valid && (right - left) >= SFC_DLL_TRANING_VALID_WINDOW) {
746 		if (left == 0 && right < cell_max)
747 			sfc->dll_cells[cs] = left + (right - left) * 2 / 5;
748 		else
749 			sfc->dll_cells[cs] = left + (right - left) / 2;
750 	} else {
751 		sfc->dll_cells[cs] = 0;
752 	}
753 
754 	if (sfc->dll_cells[cs]) {
755 		dev_dbg(sfc->dev, "%d %d %d dll training success in %dMHz max_cells=%u sfc_ver=%d\n",
756 			left, right, sfc->dll_cells[cs], sfc->speed[cs],
757 			rockchip_sfc_get_max_dll_cells(sfc), rockchip_sfc_get_version(sfc));
758 		rockchip_sfc_set_delay_lines(sfc, (u16)sfc->dll_cells[cs], cs);
759 	} else {
760 		dev_err(sfc->dev, "%d %d dll training failed in %dMHz, reduce the speed\n",
761 			left, right, sfc->speed[cs]);
762 		rockchip_sfc_set_delay_lines(sfc, 0, cs);
763 		clk_set_rate(&sfc->clk, SFC_DLL_THRESHOLD_RATE);
764 		sfc->cur_speed = SFC_DLL_THRESHOLD_RATE;
765 		sfc->cur_real_speed = clk_get_rate(&sfc->clk);
766 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
767 	}
768 }
769 
770 #endif
771 
772 static int rockchip_sfc_exec_op(struct spi_slave *mem,
773 				const struct spi_mem_op *op)
774 {
775 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
776 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
777 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
778 	int ret;
779 
780 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4 &&
781 	    sfc->cur_speed != sfc->speed[plat->cs]) {
782 		sfc->speed[plat->cs] = sfc->cur_speed;
783 #if CONFIG_IS_ENABLED(CLK)
784 		if (sfc->cur_real_speed > SFC_DLL_THRESHOLD_RATE)
785 			rockchip_sfc_delay_lines_tuning(sfc, mem);
786 		else
787 #endif
788 			rockchip_sfc_set_delay_lines(sfc, 0, plat->cs);
789 	}
790 
791 	/* Wait for last async transfer finished */
792 	if (sfc->last_async_size) {
793 		rockchip_sfc_wait_for_dma_finished(sfc, sfc->last_async_size);
794 		sfc->last_async_size = 0;
795 	}
796 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
797 	rockchip_spi_set_cs(sfc, mem, true);
798 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
799 	if (len) {
800 		if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
801 			if (mem->mode & SPI_DMA_PREPARE)
802 				return rockchip_sfc_xfer_data_dma_async(sfc, op, len);
803 			ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
804 		} else {
805 			ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
806 		}
807 
808 		if (ret != len) {
809 			dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
810 
811 			return -EIO;
812 		}
813 	}
814 
815 	ret = rockchip_sfc_xfer_done(sfc, 100000);
816 	rockchip_spi_set_cs(sfc, mem, false);
817 
818 	return ret;
819 }
820 
821 static int rockchip_sfc_adjust_op_size(struct spi_slave *mem, struct spi_mem_op *op)
822 {
823 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
824 
825 	op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
826 
827 	return 0;
828 }
829 
830 static int rockchip_sfc_set_speed(struct udevice *bus, uint speed)
831 {
832 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
833 
834 	if (speed > sfc->max_freq)
835 		speed = sfc->max_freq;
836 
837 	if (speed == sfc->cur_speed)
838 		return 0;
839 
840 #if CONFIG_IS_ENABLED(CLK)
841 	int ret = clk_set_rate(&sfc->clk, speed);
842 
843 	if (ret < 0) {
844 		dev_err(sfc->dev, "set_freq=%dHz fail, check if it's the cru support level\n",
845 			speed);
846 		return ret;
847 	}
848 	sfc->cur_speed = speed;
849 	sfc->cur_real_speed = clk_get_rate(&sfc->clk);
850 
851 	dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%dHz\n",
852 		sfc->cur_speed, sfc->cur_real_speed);
853 #else
854 	dev_dbg(sfc->dev, "sfc failed, CLK not support\n");
855 #endif
856 	return 0;
857 }
858 
859 static int rockchip_sfc_set_mode(struct udevice *bus, uint mode)
860 {
861 	return 0;
862 }
863 
864 static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
865 	.adjust_op_size	= rockchip_sfc_adjust_op_size,
866 	.exec_op	= rockchip_sfc_exec_op,
867 };
868 
869 static const struct dm_spi_ops rockchip_sfc_ops = {
870 	.mem_ops	= &rockchip_sfc_mem_ops,
871 	.set_speed	= rockchip_sfc_set_speed,
872 	.set_mode	= rockchip_sfc_set_mode,
873 };
874 
875 static const struct udevice_id rockchip_sfc_ids[] = {
876 	{ .compatible = "rockchip,sfc"},
877 	{},
878 };
879 
880 U_BOOT_DRIVER(rockchip_sfc_driver) = {
881 	.name   = "rockchip_sfc",
882 	.id     = UCLASS_SPI,
883 	.of_match = rockchip_sfc_ids,
884 	.ops    = &rockchip_sfc_ops,
885 	.ofdata_to_platdata = rockchip_sfc_ofdata_to_platdata,
886 	.platdata_auto_alloc_size = sizeof(struct rockchip_sfc),
887 	.probe  = rockchip_sfc_probe,
888 };
889