xref: /rk3399_rockchip-uboot/drivers/spi/rockchip_sfc.c (revision 257c8a70660eec65519a481f1dd33e4e060766c8)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Rockchip Serial Flash Controller Driver
4  *
5  * Copyright (c) 2017-2021, Rockchip Inc.
6  * Author: Shawn Lin <shawn.lin@rock-chips.com>
7  *	   Chris Morgan <macromorgan@hotmail.com>
8  *	   Jon Lin <Jon.lin@rock-chips.com>
9  */
10 
11 #include <asm/io.h>
12 #include <bouncebuf.h>
13 #include <clk.h>
14 #include <dm.h>
15 #include <linux/bitops.h>
16 #include <linux/delay.h>
17 #include <linux/iopoll.h>
18 #include <spi.h>
19 #include <spi-mem.h>
20 
21 /* System control */
22 #define SFC_CTRL			0x0
23 #define  SFC_CTRL_PHASE_SEL_NEGETIVE	BIT(1)
24 #define  SFC_CTRL_CMD_BITS_SHIFT	8
25 #define  SFC_CTRL_ADDR_BITS_SHIFT	10
26 #define  SFC_CTRL_DATA_BITS_SHIFT	12
27 
28 /* Interrupt mask */
29 #define SFC_IMR				0x4
30 #define  SFC_IMR_RX_FULL		BIT(0)
31 #define  SFC_IMR_RX_UFLOW		BIT(1)
32 #define  SFC_IMR_TX_OFLOW		BIT(2)
33 #define  SFC_IMR_TX_EMPTY		BIT(3)
34 #define  SFC_IMR_TRAN_FINISH		BIT(4)
35 #define  SFC_IMR_BUS_ERR		BIT(5)
36 #define  SFC_IMR_NSPI_ERR		BIT(6)
37 #define  SFC_IMR_DMA			BIT(7)
38 
39 /* Interrupt clear */
40 #define SFC_ICLR			0x8
41 #define  SFC_ICLR_RX_FULL		BIT(0)
42 #define  SFC_ICLR_RX_UFLOW		BIT(1)
43 #define  SFC_ICLR_TX_OFLOW		BIT(2)
44 #define  SFC_ICLR_TX_EMPTY		BIT(3)
45 #define  SFC_ICLR_TRAN_FINISH		BIT(4)
46 #define  SFC_ICLR_BUS_ERR		BIT(5)
47 #define  SFC_ICLR_NSPI_ERR		BIT(6)
48 #define  SFC_ICLR_DMA			BIT(7)
49 
50 /* FIFO threshold level */
51 #define SFC_FTLR			0xc
52 #define  SFC_FTLR_TX_SHIFT		0
53 #define  SFC_FTLR_TX_MASK		0x1f
54 #define  SFC_FTLR_RX_SHIFT		8
55 #define  SFC_FTLR_RX_MASK		0x1f
56 
57 /* Reset FSM and FIFO */
58 #define SFC_RCVR			0x10
59 #define  SFC_RCVR_RESET			BIT(0)
60 
61 /* Enhanced mode */
62 #define SFC_AX				0x14
63 
64 /* Address Bit number */
65 #define SFC_ABIT			0x18
66 
67 /* Interrupt status */
68 #define SFC_ISR				0x1c
69 #define  SFC_ISR_RX_FULL_SHIFT		BIT(0)
70 #define  SFC_ISR_RX_UFLOW_SHIFT		BIT(1)
71 #define  SFC_ISR_TX_OFLOW_SHIFT		BIT(2)
72 #define  SFC_ISR_TX_EMPTY_SHIFT		BIT(3)
73 #define  SFC_ISR_TX_FINISH_SHIFT	BIT(4)
74 #define  SFC_ISR_BUS_ERR_SHIFT		BIT(5)
75 #define  SFC_ISR_NSPI_ERR_SHIFT		BIT(6)
76 #define  SFC_ISR_DMA_SHIFT		BIT(7)
77 
78 /* FIFO status */
79 #define SFC_FSR				0x20
80 #define  SFC_FSR_TX_IS_FULL		BIT(0)
81 #define  SFC_FSR_TX_IS_EMPTY		BIT(1)
82 #define  SFC_FSR_RX_IS_EMPTY		BIT(2)
83 #define  SFC_FSR_RX_IS_FULL		BIT(3)
84 #define  SFC_FSR_TXLV_MASK		GENMASK(13, 8)
85 #define  SFC_FSR_TXLV_SHIFT		8
86 #define  SFC_FSR_RXLV_MASK		GENMASK(20, 16)
87 #define  SFC_FSR_RXLV_SHIFT		16
88 
89 /* FSM status */
90 #define SFC_SR				0x24
91 #define  SFC_SR_IS_IDLE			0x0
92 #define  SFC_SR_IS_BUSY			0x1
93 
94 /* Raw interrupt status */
95 #define SFC_RISR			0x28
96 #define  SFC_RISR_RX_FULL		BIT(0)
97 #define  SFC_RISR_RX_UNDERFLOW		BIT(1)
98 #define  SFC_RISR_TX_OVERFLOW		BIT(2)
99 #define  SFC_RISR_TX_EMPTY		BIT(3)
100 #define  SFC_RISR_TRAN_FINISH		BIT(4)
101 #define  SFC_RISR_BUS_ERR		BIT(5)
102 #define  SFC_RISR_NSPI_ERR		BIT(6)
103 #define  SFC_RISR_DMA			BIT(7)
104 
105 /* Version */
106 #define SFC_VER				0x2C
107 #define  SFC_VER_3			0x3
108 #define  SFC_VER_4			0x4
109 #define  SFC_VER_5			0x5
110 #define  SFC_VER_6			0x6
111 #define  SFC_VER_8			0x8
112 
113 /* Delay line controller resiter */
114 #define SFC_DLL_CTRL0			0x3C
115 #define SFC_DLL_CTRL0_SCLK_SMP_DLL	BIT(15)
116 #define SFC_DLL_CTRL0_DLL_MAX_VER4	0xFFU
117 #define SFC_DLL_CTRL0_DLL_MAX_VER5	0x1FFU
118 
119 /* Master trigger */
120 #define SFC_DMA_TRIGGER			0x80
121 #define SFC_DMA_TRIGGER_START		1
122 
123 /* Src or Dst addr for master */
124 #define SFC_DMA_ADDR			0x84
125 
126 /* Length control register extension 32GB */
127 #define SFC_LEN_CTRL			0x88
128 #define SFC_LEN_CTRL_TRB_SEL		1
129 #define SFC_LEN_EXT			0x8C
130 
131 /* Command */
132 #define SFC_CMD				0x100
133 #define  SFC_CMD_IDX_SHIFT		0
134 #define  SFC_CMD_DUMMY_SHIFT		8
135 #define  SFC_CMD_DIR_SHIFT		12
136 #define  SFC_CMD_DIR_RD			0
137 #define  SFC_CMD_DIR_WR			1
138 #define  SFC_CMD_ADDR_SHIFT		14
139 #define  SFC_CMD_ADDR_0BITS		0
140 #define  SFC_CMD_ADDR_24BITS		1
141 #define  SFC_CMD_ADDR_32BITS		2
142 #define  SFC_CMD_ADDR_XBITS		3
143 #define  SFC_CMD_TRAN_BYTES_SHIFT	16
144 #define  SFC_CMD_CS_SHIFT		30
145 
146 /* Address */
147 #define SFC_ADDR			0x104
148 
149 /* Data */
150 #define SFC_DATA			0x108
151 
152 #define SFC_CS1_REG_OFFSET		0x200
153 
154 #define SFC_MAX_CHIPSELECT_NUM		2
155 
156 /* The SFC can transfer max 16KB - 1 at one time
157  * we set it to 15.5KB here for alignment.
158  */
159 #define SFC_MAX_IOSIZE_VER3		(512 * 31)
160 
161 #define SFC_MAX_IOSIZE_VER4		(0xFFFFFFFFU)
162 
163 /* DMA is only enabled for large data transmission */
164 #define SFC_DMA_TRANS_THRETHOLD		(0x40)
165 
166 /* Maximum clock values from datasheet suggest keeping clock value under
167  * 150MHz. No minimum or average value is suggested.
168  */
169 #define SFC_MAX_SPEED		(150 * 1000 * 1000)
170 #define SFC_DLL_THRESHOLD_RATE	(50 * 1000 * 1000)
171 
172 #define SFC_DLL_TRANING_STEP		10		/* Training step */
173 #define SFC_DLL_TRANING_VALID_WINDOW	80		/* Training Valid DLL winbow */
174 
175 struct rockchip_sfc {
176 	struct udevice *dev;
177 	void __iomem *regbase;
178 	struct clk hclk;
179 	struct clk clk;
180 	u32 max_freq;
181 	u32 cur_speed;
182 	u32 cur_real_speed;
183 	u32 speed[SFC_MAX_CHIPSELECT_NUM];
184 	bool use_dma;
185 	u32 max_iosize;
186 	u16 version;
187 
188 	u32 last_async_size;
189 	u32 async;
190 	u32 dll_cells[SFC_MAX_CHIPSELECT_NUM];
191 	u32 max_dll_cells;
192 };
193 
194 static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
195 {
196 	int err;
197 	u32 status;
198 
199 	writel(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
200 
201 	err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
202 				 !(status & SFC_RCVR_RESET),
203 				 1000000);
204 	if (err)
205 		dev_err(sfc->dev, "SFC reset never finished\n");
206 
207 	/* Still need to clear the masked interrupt from RISR */
208 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
209 
210 	dev_dbg(sfc->dev, "reset\n");
211 
212 	return err;
213 }
214 
215 static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
216 {
217 	return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
218 }
219 
220 static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
221 {
222 	if (sfc->version >= SFC_VER_4)
223 		return SFC_MAX_IOSIZE_VER4;
224 
225 	return SFC_MAX_IOSIZE_VER3;
226 }
227 
228 static u32 rockchip_sfc_get_max_dll_cells(struct rockchip_sfc *sfc)
229 {
230 	if (sfc->version > SFC_VER_4)
231 		return SFC_DLL_CTRL0_DLL_MAX_VER5;
232 	else if (sfc->version == SFC_VER_4)
233 		return SFC_DLL_CTRL0_DLL_MAX_VER4;
234 	else
235 		return 0;
236 }
237 
238 static __maybe_unused void rockchip_sfc_set_delay_lines(struct rockchip_sfc *sfc, u16 cells, u8 cs)
239 {
240 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
241 	u32 val = 0;
242 
243 	if (cells > cell_max)
244 		cells = cell_max;
245 
246 	if (cells)
247 		val = SFC_DLL_CTRL0_SCLK_SMP_DLL | cells;
248 
249 	writel(val, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_DLL_CTRL0);
250 }
251 
252 static int rockchip_sfc_init(struct rockchip_sfc *sfc)
253 {
254 	writel(0, sfc->regbase + SFC_CTRL);
255 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
256 		writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
257 
258 	return 0;
259 }
260 
261 static int rockchip_sfc_ofdata_to_platdata(struct udevice *bus)
262 {
263 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
264 
265 	sfc->regbase = dev_read_addr_ptr(bus);
266 	if (ofnode_read_bool(dev_ofnode(bus), "sfc-no-dma"))
267 		sfc->use_dma = false;
268 	else
269 		sfc->use_dma = true;
270 #if CONFIG_IS_ENABLED(CLK)
271 	int ret;
272 
273 	ret = clk_get_by_index(bus, 0, &sfc->clk);
274 	if (ret < 0) {
275 		printf("Could not get clock for %s: %d\n", bus->name, ret);
276 		return ret;
277 	}
278 
279 	ret = clk_get_by_index(bus, 1, &sfc->hclk);
280 	if (ret < 0) {
281 		printf("Could not get ahb clock for %s: %d\n", bus->name, ret);
282 		return ret;
283 	}
284 #endif
285 
286 	return 0;
287 }
288 
289 static int rockchip_sfc_probe(struct udevice *bus)
290 {
291 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
292 	int ret;
293 
294 #if CONFIG_IS_ENABLED(CLK)
295 	ret = clk_enable(&sfc->hclk);
296 	if (ret)
297 		dev_dbg(sfc->dev, "sfc Enable ahb clock fail %s: %d\n", bus->name, ret);
298 
299 	ret = clk_enable(&sfc->clk);
300 	if (ret)
301 		dev_dbg(sfc->dev, "sfc Enable clock fail for %s: %d\n", bus->name, ret);
302 #endif
303 	/* Initial the version at the first */
304 	sfc->version = rockchip_sfc_get_version(sfc);
305 
306 	ret = rockchip_sfc_init(sfc);
307 	if (ret)
308 		goto err_init;
309 
310 	sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
311 	sfc->max_freq = SFC_MAX_SPEED;
312 	sfc->dev = bus;
313 
314 	return 0;
315 
316 err_init:
317 #if CONFIG_IS_ENABLED(CLK)
318 	clk_disable(&sfc->clk);
319 	clk_disable(&sfc->hclk);
320 #endif
321 
322 	return ret;
323 }
324 
325 static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
326 {
327 	int ret = 0;
328 	u32 status;
329 
330 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
331 				 status & SFC_FSR_TXLV_MASK,
332 				 timeout_us);
333 	if (ret) {
334 		dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
335 
336 		return -ETIMEDOUT;
337 	}
338 
339 	return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
340 }
341 
342 static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
343 {
344 	int ret = 0;
345 	u32 status;
346 
347 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
348 				 status & SFC_FSR_RXLV_MASK,
349 				 timeout_us);
350 	if (ret) {
351 		dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
352 
353 		return -ETIMEDOUT;
354 	}
355 
356 	return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
357 }
358 
359 static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
360 {
361 	if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
362 		/*
363 		 * SFC not support output DUMMY cycles right after CMD cycles, so
364 		 * treat it as ADDR cycles.
365 		 */
366 		op->addr.nbytes = op->dummy.nbytes;
367 		op->addr.buswidth = op->dummy.buswidth;
368 		op->addr.val = 0xFFFFFFFFF;
369 
370 		op->dummy.nbytes = 0;
371 	}
372 }
373 
374 static int rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc *sfc, int timeout)
375 {
376 	unsigned long tbase;
377 
378 	/* Wait for the DMA interrupt status */
379 	tbase = get_timer(0);
380 	while (!(readl(sfc->regbase + SFC_RISR) & SFC_RISR_DMA)) {
381 		if (get_timer(tbase) > timeout) {
382 			printf("dma timeout\n");
383 			rockchip_sfc_reset(sfc);
384 
385 			return -ETIMEDOUT;
386 		}
387 
388 		udelay(1);
389 	}
390 
391 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
392 
393 	return 0;
394 }
395 
396 static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
397 				   struct spi_slave *mem,
398 				   const struct spi_mem_op *op,
399 				   u32 len)
400 {
401 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
402 	u32 ctrl = 0, cmd = 0;
403 
404 	/* set CMD */
405 	cmd = op->cmd.opcode;
406 	ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
407 
408 	/* set ADDR */
409 	if (op->addr.nbytes) {
410 		if (op->addr.nbytes == 4) {
411 			cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
412 		} else if (op->addr.nbytes == 3) {
413 			cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
414 		} else {
415 			cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
416 			writel(op->addr.nbytes * 8 - 1, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
417 		}
418 
419 		ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
420 	}
421 
422 	/* set DUMMY */
423 	if (op->dummy.nbytes) {
424 		if (op->dummy.buswidth == 4)
425 			cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
426 		else if (op->dummy.buswidth == 2)
427 			cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
428 		else
429 			cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
430 	}
431 
432 	/* set DATA */
433 	if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
434 		writel(len, sfc->regbase + SFC_LEN_EXT);
435 	else
436 		cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
437 	if (len) {
438 		if (op->data.dir == SPI_MEM_DATA_OUT)
439 			cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
440 
441 		ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
442 	}
443 	if (!len && op->addr.nbytes)
444 		cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
445 
446 	/* set the Controller */
447 	ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
448 	cmd |= plat->cs << SFC_CMD_CS_SHIFT;
449 
450 	dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
451 		op->addr.nbytes, op->addr.buswidth,
452 		op->dummy.nbytes, op->dummy.buswidth);
453 	dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
454 		ctrl, cmd, op->addr.val, len);
455 
456 	writel(ctrl, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
457 	writel(cmd, sfc->regbase + SFC_CMD);
458 	if (op->addr.nbytes)
459 		writel(op->addr.val, sfc->regbase + SFC_ADDR);
460 
461 	return 0;
462 }
463 
464 static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
465 {
466 	u8 bytes = len & 0x3;
467 	u32 dwords;
468 	int tx_level;
469 	u32 write_words;
470 	u32 tmp = 0;
471 
472 	dwords = len >> 2;
473 	while (dwords) {
474 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
475 		if (tx_level < 0)
476 			return tx_level;
477 		write_words = min_t(u32, tx_level, dwords);
478 		writesl(sfc->regbase + SFC_DATA, buf, write_words);
479 		buf += write_words << 2;
480 		dwords -= write_words;
481 	}
482 
483 	/* write the rest non word aligned bytes */
484 	if (bytes) {
485 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
486 		if (tx_level < 0)
487 			return tx_level;
488 		memcpy(&tmp, buf, bytes);
489 		writel(tmp, sfc->regbase + SFC_DATA);
490 	}
491 
492 	return len;
493 }
494 
495 static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
496 {
497 	u8 bytes = len & 0x3;
498 	u32 dwords;
499 	u8 read_words;
500 	int rx_level;
501 	int tmp;
502 
503 	/* word aligned access only */
504 	dwords = len >> 2;
505 	while (dwords) {
506 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
507 		if (rx_level < 0)
508 			return rx_level;
509 		read_words = min_t(u32, rx_level, dwords);
510 		readsl(sfc->regbase + SFC_DATA, buf, read_words);
511 		buf += read_words << 2;
512 		dwords -= read_words;
513 	}
514 
515 	/* read the rest non word aligned bytes */
516 	if (bytes) {
517 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
518 		if (rx_level < 0)
519 			return rx_level;
520 		tmp = readl(sfc->regbase + SFC_DATA);
521 		memcpy(buf, &tmp, bytes);
522 	}
523 
524 	return len;
525 }
526 
527 static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
528 {
529 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
530 	writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
531 	writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
532 
533 	return len;
534 }
535 
536 static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
537 				       const struct spi_mem_op *op, u32 len)
538 {
539 	dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
540 
541 	if (op->data.dir == SPI_MEM_DATA_OUT)
542 		return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
543 	else
544 		return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
545 }
546 
547 static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
548 				      const struct spi_mem_op *op, u32 len)
549 {
550 	struct bounce_buffer bb;
551 	unsigned int bb_flags;
552 	void *dma_buf;
553 	int ret;
554 
555 	dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
556 
557 	if (op->data.dir == SPI_MEM_DATA_OUT) {
558 		dma_buf = (void *)op->data.buf.out;
559 		bb_flags = GEN_BB_READ;
560 	} else {
561 		dma_buf = (void *)op->data.buf.in;
562 		bb_flags = GEN_BB_WRITE;
563 	}
564 
565 	ret = bounce_buffer_start(&bb, dma_buf, len, bb_flags);
566 	if (ret)
567 		return ret;
568 
569 	ret = rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)bb.bounce_buffer, len);
570 	rockchip_sfc_wait_for_dma_finished(sfc, len * 10);
571 	bounce_buffer_stop(&bb);
572 
573 	return ret;
574 }
575 
576 static int rockchip_sfc_xfer_data_dma_async(struct rockchip_sfc *sfc,
577 					    const struct spi_mem_op *op, u32 len)
578 {
579 	void *dma_buf;
580 
581 	if (op->data.dir == SPI_MEM_DATA_OUT) {
582 		dma_buf = (void *)op->data.buf.out;
583 		flush_dcache_range((unsigned long)dma_buf,
584 				   (unsigned long)dma_buf + len);
585 	} else {
586 		dma_buf = (void *)op->data.buf.in;
587 	}
588 
589 	dev_dbg(sfc->dev, "xfer_dma_async len=%x %p\n", len, dma_buf);
590 
591 	rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)dma_buf, len);
592 	sfc->last_async_size = len;
593 
594 	return 0;
595 }
596 
597 static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
598 {
599 	int ret = 0;
600 	u32 status;
601 
602 	ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
603 				 !(status & SFC_SR_IS_BUSY),
604 				 timeout_us);
605 	if (ret) {
606 		dev_err(sfc->dev, "wait sfc idle timeout\n");
607 		rockchip_sfc_reset(sfc);
608 
609 		ret = -EIO;
610 	}
611 
612 	return ret;
613 }
614 
615 #if CONFIG_IS_ENABLED(CLK)
616 static int rockchip_sfc_exec_op_bypass(struct rockchip_sfc *sfc,
617 				       struct spi_slave *mem,
618 				       const struct spi_mem_op *op)
619 {
620 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
621 	u32 ret;
622 
623 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
624 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
625 	ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
626 	if (ret != len) {
627 		dev_err(sfc->dev, "xfer data failed ret %d\n", ret);
628 
629 		return -EIO;
630 	}
631 
632 	return rockchip_sfc_xfer_done(sfc, 100000);
633 }
634 
635 static void rockchip_sfc_delay_lines_tuning(struct rockchip_sfc *sfc, struct spi_slave *mem)
636 {
637 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
638 	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
639 						SPI_MEM_OP_NO_ADDR,
640 						SPI_MEM_OP_NO_DUMMY,
641 						SPI_MEM_OP_DATA_IN(3, NULL, 1));
642 	u8 id[3], id_temp[3];
643 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
644 	u16 right, left = 0;
645 	u16 step = SFC_DLL_TRANING_STEP;
646 	bool dll_valid = false;
647 	u8 cs = plat->cs;
648 
649 	clk_set_rate(&sfc->clk, SFC_DLL_THRESHOLD_RATE);
650 	op.data.buf.in = &id;
651 	rockchip_sfc_exec_op_bypass(sfc, mem, &op);
652 	if ((0xFF == id[0] && 0xFF == id[1]) ||
653 	    (0x00 == id[0] && 0x00 == id[1])) {
654 		dev_dbg(sfc->dev, "no dev, dll by pass\n");
655 		clk_set_rate(&sfc->clk, sfc->speed[cs]);
656 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
657 
658 		return;
659 	}
660 
661 	clk_set_rate(&sfc->clk, sfc->speed[cs]);
662 	op.data.buf.in = &id_temp;
663 	for (right = 0; right <= cell_max; right += step) {
664 		int ret;
665 
666 		rockchip_sfc_set_delay_lines(sfc, right, cs);
667 		rockchip_sfc_exec_op_bypass(sfc, mem, &op);
668 		dev_dbg(sfc->dev, "dll read flash id:%x %x %x\n",
669 			id_temp[0], id_temp[1], id_temp[2]);
670 
671 		ret = memcmp(&id, &id_temp, 3);
672 		if (dll_valid && ret) {
673 			right -= step;
674 
675 			break;
676 		}
677 		if (!dll_valid && !ret)
678 			left = right;
679 
680 		if (!ret)
681 			dll_valid = true;
682 
683 		/* Add cell_max to loop */
684 		if (right == cell_max)
685 			break;
686 		if (right + step > cell_max)
687 			right = cell_max - step;
688 	}
689 
690 	if (dll_valid && (right - left) >= SFC_DLL_TRANING_VALID_WINDOW) {
691 		if (left == 0 && right < cell_max)
692 			sfc->dll_cells[cs] = left + (right - left) * 2 / 5;
693 		else
694 			sfc->dll_cells[cs] = left + (right - left) / 2;
695 	} else {
696 		sfc->dll_cells[cs] = 0;
697 	}
698 
699 	if (sfc->dll_cells[cs]) {
700 		dev_dbg(sfc->dev, "%d %d %d dll training success in %dMHz max_cells=%u sfc_ver=%d\n",
701 			left, right, sfc->dll_cells[cs], sfc->speed[cs],
702 			rockchip_sfc_get_max_dll_cells(sfc), rockchip_sfc_get_version(sfc));
703 		rockchip_sfc_set_delay_lines(sfc, (u16)sfc->dll_cells[cs], cs);
704 	} else {
705 		dev_err(sfc->dev, "%d %d dll training failed in %dMHz, reduce the speed\n",
706 			left, right, sfc->speed[cs]);
707 		rockchip_sfc_set_delay_lines(sfc, 0, cs);
708 		clk_set_rate(&sfc->clk, SFC_DLL_THRESHOLD_RATE);
709 		sfc->cur_speed = SFC_DLL_THRESHOLD_RATE;
710 		sfc->cur_real_speed = clk_get_rate(&sfc->clk);
711 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
712 	}
713 }
714 
715 #endif
716 
717 static int rockchip_sfc_exec_op(struct spi_slave *mem,
718 				const struct spi_mem_op *op)
719 {
720 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
721 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
722 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
723 	int ret;
724 
725 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4 &&
726 	    sfc->cur_speed != sfc->speed[plat->cs]) {
727 		sfc->speed[plat->cs] = sfc->cur_speed;
728 #if CONFIG_IS_ENABLED(CLK)
729 		if (sfc->cur_real_speed > SFC_DLL_THRESHOLD_RATE)
730 			rockchip_sfc_delay_lines_tuning(sfc, mem);
731 		else
732 #endif
733 			rockchip_sfc_set_delay_lines(sfc, 0, plat->cs);
734 	}
735 
736 	/* Wait for last async transfer finished */
737 	if (sfc->last_async_size) {
738 		rockchip_sfc_wait_for_dma_finished(sfc, sfc->last_async_size);
739 		sfc->last_async_size = 0;
740 	}
741 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
742 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
743 	if (len) {
744 		if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
745 			if (mem->mode & SPI_DMA_PREPARE)
746 				return rockchip_sfc_xfer_data_dma_async(sfc, op, len);
747 			ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
748 		} else {
749 			ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
750 		}
751 
752 		if (ret != len) {
753 			dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
754 
755 			return -EIO;
756 		}
757 	}
758 
759 	return rockchip_sfc_xfer_done(sfc, 100000);
760 }
761 
762 static int rockchip_sfc_adjust_op_size(struct spi_slave *mem, struct spi_mem_op *op)
763 {
764 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
765 
766 	op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
767 
768 	return 0;
769 }
770 
771 static int rockchip_sfc_set_speed(struct udevice *bus, uint speed)
772 {
773 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
774 
775 	if (speed > sfc->max_freq)
776 		speed = sfc->max_freq;
777 
778 	if (speed == sfc->cur_speed)
779 		return 0;
780 
781 #if CONFIG_IS_ENABLED(CLK)
782 	int ret = clk_set_rate(&sfc->clk, speed);
783 
784 	if (ret < 0) {
785 		dev_err(sfc->dev, "set_freq=%dHz fail, check if it's the cru support level\n",
786 			speed);
787 		return ret;
788 	}
789 	sfc->cur_speed = speed;
790 	sfc->cur_real_speed = clk_get_rate(&sfc->clk);
791 
792 	dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%dHz\n",
793 		sfc->cur_speed, sfc->cur_real_speed);
794 #else
795 	dev_dbg(sfc->dev, "sfc failed, CLK not support\n");
796 #endif
797 	return 0;
798 }
799 
800 static int rockchip_sfc_set_mode(struct udevice *bus, uint mode)
801 {
802 	return 0;
803 }
804 
805 static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
806 	.adjust_op_size	= rockchip_sfc_adjust_op_size,
807 	.exec_op	= rockchip_sfc_exec_op,
808 };
809 
810 static const struct dm_spi_ops rockchip_sfc_ops = {
811 	.mem_ops	= &rockchip_sfc_mem_ops,
812 	.set_speed	= rockchip_sfc_set_speed,
813 	.set_mode	= rockchip_sfc_set_mode,
814 };
815 
816 static const struct udevice_id rockchip_sfc_ids[] = {
817 	{ .compatible = "rockchip,sfc"},
818 	{},
819 };
820 
821 U_BOOT_DRIVER(rockchip_sfc_driver) = {
822 	.name   = "rockchip_sfc",
823 	.id     = UCLASS_SPI,
824 	.of_match = rockchip_sfc_ids,
825 	.ops    = &rockchip_sfc_ops,
826 	.ofdata_to_platdata = rockchip_sfc_ofdata_to_platdata,
827 	.platdata_auto_alloc_size = sizeof(struct rockchip_sfc),
828 	.probe  = rockchip_sfc_probe,
829 };
830