xref: /rk3399_rockchip-uboot/drivers/spi/rockchip_sfc.c (revision 6c9734572f43ee93b1df1ee441df4295ca72a3b4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Rockchip Serial Flash Controller Driver
4  *
5  * Copyright (c) 2017-2021, Rockchip Inc.
6  * Author: Shawn Lin <shawn.lin@rock-chips.com>
7  *	   Chris Morgan <macromorgan@hotmail.com>
8  *	   Jon Lin <Jon.lin@rock-chips.com>
9  */
10 
11 #include <asm/io.h>
12 #include <bouncebuf.h>
13 #include <clk.h>
14 #include <dm.h>
15 #include <linux/bitops.h>
16 #include <linux/delay.h>
17 #include <linux/iopoll.h>
18 #include <spi.h>
19 #include <spi-mem.h>
20 
21 /* System control */
22 #define SFC_CTRL			0x0
23 #define  SFC_CTRL_PHASE_SEL_NEGETIVE	BIT(1)
24 #define  SFC_CTRL_CMD_BITS_SHIFT	8
25 #define  SFC_CTRL_ADDR_BITS_SHIFT	10
26 #define  SFC_CTRL_DATA_BITS_SHIFT	12
27 
28 /* Interrupt mask */
29 #define SFC_IMR				0x4
30 #define  SFC_IMR_RX_FULL		BIT(0)
31 #define  SFC_IMR_RX_UFLOW		BIT(1)
32 #define  SFC_IMR_TX_OFLOW		BIT(2)
33 #define  SFC_IMR_TX_EMPTY		BIT(3)
34 #define  SFC_IMR_TRAN_FINISH		BIT(4)
35 #define  SFC_IMR_BUS_ERR		BIT(5)
36 #define  SFC_IMR_NSPI_ERR		BIT(6)
37 #define  SFC_IMR_DMA			BIT(7)
38 
39 /* Interrupt clear */
40 #define SFC_ICLR			0x8
41 #define  SFC_ICLR_RX_FULL		BIT(0)
42 #define  SFC_ICLR_RX_UFLOW		BIT(1)
43 #define  SFC_ICLR_TX_OFLOW		BIT(2)
44 #define  SFC_ICLR_TX_EMPTY		BIT(3)
45 #define  SFC_ICLR_TRAN_FINISH		BIT(4)
46 #define  SFC_ICLR_BUS_ERR		BIT(5)
47 #define  SFC_ICLR_NSPI_ERR		BIT(6)
48 #define  SFC_ICLR_DMA			BIT(7)
49 
50 /* FIFO threshold level */
51 #define SFC_FTLR			0xc
52 #define  SFC_FTLR_TX_SHIFT		0
53 #define  SFC_FTLR_TX_MASK		0x1f
54 #define  SFC_FTLR_RX_SHIFT		8
55 #define  SFC_FTLR_RX_MASK		0x1f
56 
57 /* Reset FSM and FIFO */
58 #define SFC_RCVR			0x10
59 #define  SFC_RCVR_RESET			BIT(0)
60 
61 /* Enhanced mode */
62 #define SFC_AX				0x14
63 
64 /* Address Bit number */
65 #define SFC_ABIT			0x18
66 
67 /* Interrupt status */
68 #define SFC_ISR				0x1c
69 #define  SFC_ISR_RX_FULL_SHIFT		BIT(0)
70 #define  SFC_ISR_RX_UFLOW_SHIFT		BIT(1)
71 #define  SFC_ISR_TX_OFLOW_SHIFT		BIT(2)
72 #define  SFC_ISR_TX_EMPTY_SHIFT		BIT(3)
73 #define  SFC_ISR_TX_FINISH_SHIFT	BIT(4)
74 #define  SFC_ISR_BUS_ERR_SHIFT		BIT(5)
75 #define  SFC_ISR_NSPI_ERR_SHIFT		BIT(6)
76 #define  SFC_ISR_DMA_SHIFT		BIT(7)
77 
78 /* FIFO status */
79 #define SFC_FSR				0x20
80 #define  SFC_FSR_TX_IS_FULL		BIT(0)
81 #define  SFC_FSR_TX_IS_EMPTY		BIT(1)
82 #define  SFC_FSR_RX_IS_EMPTY		BIT(2)
83 #define  SFC_FSR_RX_IS_FULL		BIT(3)
84 #define  SFC_FSR_TXLV_MASK		GENMASK(13, 8)
85 #define  SFC_FSR_TXLV_SHIFT		8
86 #define  SFC_FSR_RXLV_MASK		GENMASK(20, 16)
87 #define  SFC_FSR_RXLV_SHIFT		16
88 
89 /* FSM status */
90 #define SFC_SR				0x24
91 #define  SFC_SR_IS_IDLE			0x0
92 #define  SFC_SR_IS_BUSY			0x1
93 
94 /* Raw interrupt status */
95 #define SFC_RISR			0x28
96 #define  SFC_RISR_RX_FULL		BIT(0)
97 #define  SFC_RISR_RX_UNDERFLOW		BIT(1)
98 #define  SFC_RISR_TX_OVERFLOW		BIT(2)
99 #define  SFC_RISR_TX_EMPTY		BIT(3)
100 #define  SFC_RISR_TRAN_FINISH		BIT(4)
101 #define  SFC_RISR_BUS_ERR		BIT(5)
102 #define  SFC_RISR_NSPI_ERR		BIT(6)
103 #define  SFC_RISR_DMA			BIT(7)
104 
105 /* Version */
106 #define SFC_VER				0x2C
107 #define  SFC_VER_3			0x3
108 #define  SFC_VER_4			0x4
109 #define  SFC_VER_5			0x5
110 #define  SFC_VER_6			0x6
111 #define  SFC_VER_8			0x8
112 
113 /* Delay line controller resiter */
114 #define SFC_DLL_CTRL0			0x3C
115 #define SFC_DLL_CTRL0_SCLK_SMP_DLL	BIT(15)
116 #define SFC_DLL_CTRL0_DLL_MAX_VER4	0xFFU
117 #define SFC_DLL_CTRL0_DLL_MAX_VER5	0x1FFU
118 
119 /* Master trigger */
120 #define SFC_DMA_TRIGGER			0x80
121 #define SFC_DMA_TRIGGER_START		1
122 
123 /* Src or Dst addr for master */
124 #define SFC_DMA_ADDR			0x84
125 
126 /* Length control register extension 32GB */
127 #define SFC_LEN_CTRL			0x88
128 #define SFC_LEN_CTRL_TRB_SEL		1
129 #define SFC_LEN_EXT			0x8C
130 
131 /* Command */
132 #define SFC_CMD				0x100
133 #define  SFC_CMD_IDX_SHIFT		0
134 #define  SFC_CMD_DUMMY_SHIFT		8
135 #define  SFC_CMD_DIR_SHIFT		12
136 #define  SFC_CMD_DIR_RD			0
137 #define  SFC_CMD_DIR_WR			1
138 #define  SFC_CMD_ADDR_SHIFT		14
139 #define  SFC_CMD_ADDR_0BITS		0
140 #define  SFC_CMD_ADDR_24BITS		1
141 #define  SFC_CMD_ADDR_32BITS		2
142 #define  SFC_CMD_ADDR_XBITS		3
143 #define  SFC_CMD_TRAN_BYTES_SHIFT	16
144 #define  SFC_CMD_CS_SHIFT		30
145 
146 /* Address */
147 #define SFC_ADDR			0x104
148 
149 /* Data */
150 #define SFC_DATA			0x108
151 
152 #define SFC_CS1_REG_OFFSET		0x200
153 
154 #define SFC_MAX_CHIPSELECT_NUM		2
155 
156 /* The SFC can transfer max 16KB - 1 at one time
157  * we set it to 15.5KB here for alignment.
158  */
159 #define SFC_MAX_IOSIZE_VER3		(512 * 31)
160 
161 #define SFC_MAX_IOSIZE_VER4		(0xFFFFFFFFU)
162 
163 /* DMA is only enabled for large data transmission */
164 #define SFC_DMA_TRANS_THRETHOLD		(0x40)
165 
166 /* Maximum clock values from datasheet suggest keeping clock value under
167  * 150MHz. No minimum or average value is suggested.
168  */
169 #define SFC_MAX_SPEED		(150 * 1000 * 1000)
170 #define SFC_DLL_THRESHOLD_RATE	(50 * 1000 * 1000)
171 
172 #define SFC_DLL_TRANING_STEP		10		/* Training step */
173 #define SFC_DLL_TRANING_VALID_WINDOW	80		/* Training Valid DLL winbow */
174 
175 struct rockchip_sfc {
176 	struct udevice *dev;
177 	void __iomem *regbase;
178 	struct clk hclk;
179 	struct clk clk;
180 	u32 max_freq;
181 	u32 cur_speed;
182 	u32 cur_real_speed;
183 	u32 speed[SFC_MAX_CHIPSELECT_NUM];
184 	bool use_dma;
185 	u32 max_iosize;
186 	u16 version;
187 
188 	u32 last_async_size;
189 	u32 async;
190 	u32 dll_cells[SFC_MAX_CHIPSELECT_NUM];
191 	u32 max_dll_cells;
192 };
193 
194 static int rockchip_sfc_reset(struct rockchip_sfc *sfc)
195 {
196 	int err;
197 	u32 status;
198 
199 	writel(SFC_RCVR_RESET, sfc->regbase + SFC_RCVR);
200 
201 	err = readl_poll_timeout(sfc->regbase + SFC_RCVR, status,
202 				 !(status & SFC_RCVR_RESET),
203 				 1000000);
204 	if (err)
205 		dev_err(sfc->dev, "SFC reset never finished\n");
206 
207 	/* Still need to clear the masked interrupt from RISR */
208 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
209 
210 	dev_dbg(sfc->dev, "reset\n");
211 
212 	return err;
213 }
214 
215 static u16 rockchip_sfc_get_version(struct rockchip_sfc *sfc)
216 {
217 	return  (u16)(readl(sfc->regbase + SFC_VER) & 0xffff);
218 }
219 
220 static u32 rockchip_sfc_get_max_iosize(struct rockchip_sfc *sfc)
221 {
222 	if (sfc->version >= SFC_VER_4)
223 		return SFC_MAX_IOSIZE_VER4;
224 
225 	return SFC_MAX_IOSIZE_VER3;
226 }
227 
228 static u32 rockchip_sfc_get_max_dll_cells(struct rockchip_sfc *sfc)
229 {
230 	if (sfc->version > SFC_VER_4)
231 		return SFC_DLL_CTRL0_DLL_MAX_VER5;
232 	else if (sfc->version == SFC_VER_4)
233 		return SFC_DLL_CTRL0_DLL_MAX_VER4;
234 	else
235 		return 0;
236 }
237 
238 static __maybe_unused void rockchip_sfc_set_delay_lines(struct rockchip_sfc *sfc, u16 cells, u8 cs)
239 {
240 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
241 	u32 val = 0;
242 
243 	if (cells > cell_max)
244 		cells = cell_max;
245 
246 	if (cells)
247 		val = SFC_DLL_CTRL0_SCLK_SMP_DLL | cells;
248 
249 	writel(val, sfc->regbase + cs * SFC_CS1_REG_OFFSET + SFC_DLL_CTRL0);
250 }
251 
252 static int rockchip_sfc_init(struct rockchip_sfc *sfc)
253 {
254 	writel(0, sfc->regbase + SFC_CTRL);
255 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4)
256 		writel(SFC_LEN_CTRL_TRB_SEL, sfc->regbase + SFC_LEN_CTRL);
257 
258 	return 0;
259 }
260 
261 static int rockchip_sfc_ofdata_to_platdata(struct udevice *bus)
262 {
263 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
264 
265 	sfc->regbase = dev_read_addr_ptr(bus);
266 	if (ofnode_read_bool(dev_ofnode(bus), "sfc-no-dma"))
267 		sfc->use_dma = false;
268 	else
269 		sfc->use_dma = true;
270 #if CONFIG_IS_ENABLED(CLK)
271 	int ret;
272 
273 	ret = clk_get_by_index(bus, 0, &sfc->clk);
274 	if (ret < 0) {
275 		printf("Could not get clock for %s: %d\n", bus->name, ret);
276 		return ret;
277 	}
278 
279 	ret = clk_get_by_index(bus, 1, &sfc->hclk);
280 	if (ret < 0) {
281 		printf("Could not get ahb clock for %s: %d\n", bus->name, ret);
282 		return ret;
283 	}
284 #endif
285 
286 	return 0;
287 }
288 
289 static int rockchip_sfc_probe(struct udevice *bus)
290 {
291 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
292 	int ret;
293 
294 #if CONFIG_IS_ENABLED(CLK)
295 	ret = clk_enable(&sfc->hclk);
296 	if (ret)
297 		dev_dbg(sfc->dev, "sfc Enable ahb clock fail %s: %d\n", bus->name, ret);
298 
299 	ret = clk_enable(&sfc->clk);
300 	if (ret)
301 		dev_dbg(sfc->dev, "sfc Enable clock fail for %s: %d\n", bus->name, ret);
302 #endif
303 
304 	ret = rockchip_sfc_init(sfc);
305 	if (ret)
306 		goto err_init;
307 
308 	sfc->max_iosize = rockchip_sfc_get_max_iosize(sfc);
309 	sfc->version = rockchip_sfc_get_version(sfc);
310 	sfc->max_freq = SFC_MAX_SPEED;
311 	sfc->dev = bus;
312 
313 	return 0;
314 
315 err_init:
316 #if CONFIG_IS_ENABLED(CLK)
317 	clk_disable(&sfc->clk);
318 	clk_disable(&sfc->hclk);
319 #endif
320 
321 	return ret;
322 }
323 
324 static int rockchip_sfc_wait_txfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
325 {
326 	int ret = 0;
327 	u32 status;
328 
329 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
330 				 status & SFC_FSR_TXLV_MASK,
331 				 timeout_us);
332 	if (ret) {
333 		dev_dbg(sfc->dev, "sfc wait tx fifo timeout\n");
334 
335 		return -ETIMEDOUT;
336 	}
337 
338 	return (status & SFC_FSR_TXLV_MASK) >> SFC_FSR_TXLV_SHIFT;
339 }
340 
341 static int rockchip_sfc_wait_rxfifo_ready(struct rockchip_sfc *sfc, u32 timeout_us)
342 {
343 	int ret = 0;
344 	u32 status;
345 
346 	ret = readl_poll_timeout(sfc->regbase + SFC_FSR, status,
347 				 status & SFC_FSR_RXLV_MASK,
348 				 timeout_us);
349 	if (ret) {
350 		dev_dbg(sfc->dev, "sfc wait rx fifo timeout\n");
351 
352 		return -ETIMEDOUT;
353 	}
354 
355 	return (status & SFC_FSR_RXLV_MASK) >> SFC_FSR_RXLV_SHIFT;
356 }
357 
358 static void rockchip_sfc_adjust_op_work(struct spi_mem_op *op)
359 {
360 	if (unlikely(op->dummy.nbytes && !op->addr.nbytes)) {
361 		/*
362 		 * SFC not support output DUMMY cycles right after CMD cycles, so
363 		 * treat it as ADDR cycles.
364 		 */
365 		op->addr.nbytes = op->dummy.nbytes;
366 		op->addr.buswidth = op->dummy.buswidth;
367 		op->addr.val = 0xFFFFFFFFF;
368 
369 		op->dummy.nbytes = 0;
370 	}
371 }
372 
373 static int rockchip_sfc_wait_for_dma_finished(struct rockchip_sfc *sfc, int timeout)
374 {
375 	unsigned long tbase;
376 
377 	/* Wait for the DMA interrupt status */
378 	tbase = get_timer(0);
379 	while (!(readl(sfc->regbase + SFC_RISR) & SFC_RISR_DMA)) {
380 		if (get_timer(tbase) > timeout) {
381 			printf("dma timeout\n");
382 			rockchip_sfc_reset(sfc);
383 
384 			return -ETIMEDOUT;
385 		}
386 
387 		udelay(1);
388 	}
389 
390 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
391 
392 	return 0;
393 }
394 
395 static int rockchip_sfc_xfer_setup(struct rockchip_sfc *sfc,
396 				   struct spi_slave *mem,
397 				   const struct spi_mem_op *op,
398 				   u32 len)
399 {
400 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
401 	u32 ctrl = 0, cmd = 0;
402 
403 	/* set CMD */
404 	cmd = op->cmd.opcode;
405 	ctrl |= ((op->cmd.buswidth >> 1) << SFC_CTRL_CMD_BITS_SHIFT);
406 
407 	/* set ADDR */
408 	if (op->addr.nbytes) {
409 		if (op->addr.nbytes == 4) {
410 			cmd |= SFC_CMD_ADDR_32BITS << SFC_CMD_ADDR_SHIFT;
411 		} else if (op->addr.nbytes == 3) {
412 			cmd |= SFC_CMD_ADDR_24BITS << SFC_CMD_ADDR_SHIFT;
413 		} else {
414 			cmd |= SFC_CMD_ADDR_XBITS << SFC_CMD_ADDR_SHIFT;
415 			writel(op->addr.nbytes * 8 - 1, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_ABIT);
416 		}
417 
418 		ctrl |= ((op->addr.buswidth >> 1) << SFC_CTRL_ADDR_BITS_SHIFT);
419 	}
420 
421 	/* set DUMMY */
422 	if (op->dummy.nbytes) {
423 		if (op->dummy.buswidth == 4)
424 			cmd |= op->dummy.nbytes * 2 << SFC_CMD_DUMMY_SHIFT;
425 		else if (op->dummy.buswidth == 2)
426 			cmd |= op->dummy.nbytes * 4 << SFC_CMD_DUMMY_SHIFT;
427 		else
428 			cmd |= op->dummy.nbytes * 8 << SFC_CMD_DUMMY_SHIFT;
429 	}
430 
431 	/* set DATA */
432 	if (sfc->version >= SFC_VER_4) /* Clear it if no data to transfer */
433 		writel(len, sfc->regbase + SFC_LEN_EXT);
434 	else
435 		cmd |= len << SFC_CMD_TRAN_BYTES_SHIFT;
436 	if (len) {
437 		if (op->data.dir == SPI_MEM_DATA_OUT)
438 			cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
439 
440 		ctrl |= ((op->data.buswidth >> 1) << SFC_CTRL_DATA_BITS_SHIFT);
441 	}
442 	if (!len && op->addr.nbytes)
443 		cmd |= SFC_CMD_DIR_WR << SFC_CMD_DIR_SHIFT;
444 
445 	/* set the Controller */
446 	ctrl |= SFC_CTRL_PHASE_SEL_NEGETIVE;
447 	cmd |= plat->cs << SFC_CMD_CS_SHIFT;
448 
449 	dev_dbg(sfc->dev, "sfc addr.nbytes=%x(x%d) dummy.nbytes=%x(x%d)\n",
450 		op->addr.nbytes, op->addr.buswidth,
451 		op->dummy.nbytes, op->dummy.buswidth);
452 	dev_dbg(sfc->dev, "sfc ctrl=%x cmd=%x addr=%llx len=%x\n",
453 		ctrl, cmd, op->addr.val, len);
454 
455 	writel(ctrl, sfc->regbase + plat->cs * SFC_CS1_REG_OFFSET + SFC_CTRL);
456 	writel(cmd, sfc->regbase + SFC_CMD);
457 	if (op->addr.nbytes)
458 		writel(op->addr.val, sfc->regbase + SFC_ADDR);
459 
460 	return 0;
461 }
462 
463 static int rockchip_sfc_write_fifo(struct rockchip_sfc *sfc, const u8 *buf, int len)
464 {
465 	u8 bytes = len & 0x3;
466 	u32 dwords;
467 	int tx_level;
468 	u32 write_words;
469 	u32 tmp = 0;
470 
471 	dwords = len >> 2;
472 	while (dwords) {
473 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
474 		if (tx_level < 0)
475 			return tx_level;
476 		write_words = min_t(u32, tx_level, dwords);
477 		writesl(sfc->regbase + SFC_DATA, buf, write_words);
478 		buf += write_words << 2;
479 		dwords -= write_words;
480 	}
481 
482 	/* write the rest non word aligned bytes */
483 	if (bytes) {
484 		tx_level = rockchip_sfc_wait_txfifo_ready(sfc, 1000);
485 		if (tx_level < 0)
486 			return tx_level;
487 		memcpy(&tmp, buf, bytes);
488 		writel(tmp, sfc->regbase + SFC_DATA);
489 	}
490 
491 	return len;
492 }
493 
494 static int rockchip_sfc_read_fifo(struct rockchip_sfc *sfc, u8 *buf, int len)
495 {
496 	u8 bytes = len & 0x3;
497 	u32 dwords;
498 	u8 read_words;
499 	int rx_level;
500 	int tmp;
501 
502 	/* word aligned access only */
503 	dwords = len >> 2;
504 	while (dwords) {
505 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
506 		if (rx_level < 0)
507 			return rx_level;
508 		read_words = min_t(u32, rx_level, dwords);
509 		readsl(sfc->regbase + SFC_DATA, buf, read_words);
510 		buf += read_words << 2;
511 		dwords -= read_words;
512 	}
513 
514 	/* read the rest non word aligned bytes */
515 	if (bytes) {
516 		rx_level = rockchip_sfc_wait_rxfifo_ready(sfc, 1000);
517 		if (rx_level < 0)
518 			return rx_level;
519 		tmp = readl(sfc->regbase + SFC_DATA);
520 		memcpy(buf, &tmp, bytes);
521 	}
522 
523 	return len;
524 }
525 
526 static int rockchip_sfc_fifo_transfer_dma(struct rockchip_sfc *sfc, dma_addr_t dma_buf, size_t len)
527 {
528 	writel(0xFFFFFFFF, sfc->regbase + SFC_ICLR);
529 	writel((u32)dma_buf, sfc->regbase + SFC_DMA_ADDR);
530 	writel(SFC_DMA_TRIGGER_START, sfc->regbase + SFC_DMA_TRIGGER);
531 
532 	return len;
533 }
534 
535 static int rockchip_sfc_xfer_data_poll(struct rockchip_sfc *sfc,
536 				       const struct spi_mem_op *op, u32 len)
537 {
538 	dev_dbg(sfc->dev, "sfc xfer_poll len=%x\n", len);
539 
540 	if (op->data.dir == SPI_MEM_DATA_OUT)
541 		return rockchip_sfc_write_fifo(sfc, op->data.buf.out, len);
542 	else
543 		return rockchip_sfc_read_fifo(sfc, op->data.buf.in, len);
544 }
545 
546 static int rockchip_sfc_xfer_data_dma(struct rockchip_sfc *sfc,
547 				      const struct spi_mem_op *op, u32 len)
548 {
549 	struct bounce_buffer bb;
550 	unsigned int bb_flags;
551 	void *dma_buf;
552 	int ret;
553 
554 	dev_dbg(sfc->dev, "sfc xfer_dma len=%x\n", len);
555 
556 	if (op->data.dir == SPI_MEM_DATA_OUT) {
557 		dma_buf = (void *)op->data.buf.out;
558 		bb_flags = GEN_BB_READ;
559 	} else {
560 		dma_buf = (void *)op->data.buf.in;
561 		bb_flags = GEN_BB_WRITE;
562 	}
563 
564 	ret = bounce_buffer_start(&bb, dma_buf, len, bb_flags);
565 	if (ret)
566 		return ret;
567 
568 	ret = rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)bb.bounce_buffer, len);
569 	rockchip_sfc_wait_for_dma_finished(sfc, len * 10);
570 	bounce_buffer_stop(&bb);
571 
572 	return ret;
573 }
574 
575 static int rockchip_sfc_xfer_data_dma_async(struct rockchip_sfc *sfc,
576 					    const struct spi_mem_op *op, u32 len)
577 {
578 	void *dma_buf;
579 
580 	if (op->data.dir == SPI_MEM_DATA_OUT) {
581 		dma_buf = (void *)op->data.buf.out;
582 		flush_dcache_range((unsigned long)dma_buf,
583 				   (unsigned long)dma_buf + len);
584 	} else {
585 		dma_buf = (void *)op->data.buf.in;
586 	}
587 
588 	dev_dbg(sfc->dev, "xfer_dma_async len=%x %p\n", len, dma_buf);
589 
590 	rockchip_sfc_fifo_transfer_dma(sfc, (dma_addr_t)dma_buf, len);
591 	sfc->last_async_size = len;
592 
593 	return 0;
594 }
595 
596 static int rockchip_sfc_xfer_done(struct rockchip_sfc *sfc, u32 timeout_us)
597 {
598 	int ret = 0;
599 	u32 status;
600 
601 	ret = readl_poll_timeout(sfc->regbase + SFC_SR, status,
602 				 !(status & SFC_SR_IS_BUSY),
603 				 timeout_us);
604 	if (ret) {
605 		dev_err(sfc->dev, "wait sfc idle timeout\n");
606 		rockchip_sfc_reset(sfc);
607 
608 		ret = -EIO;
609 	}
610 
611 	return ret;
612 }
613 
614 #if CONFIG_IS_ENABLED(CLK)
615 static int rockchip_sfc_exec_op_bypass(struct rockchip_sfc *sfc,
616 				       struct spi_slave *mem,
617 				       const struct spi_mem_op *op)
618 {
619 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
620 	u32 ret;
621 
622 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
623 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
624 	ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
625 	if (ret != len) {
626 		dev_err(sfc->dev, "xfer data failed ret %d\n", ret);
627 
628 		return -EIO;
629 	}
630 
631 	return rockchip_sfc_xfer_done(sfc, 100000);
632 }
633 
634 static void rockchip_sfc_delay_lines_tuning(struct rockchip_sfc *sfc, struct spi_slave *mem)
635 {
636 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
637 	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(0x9F, 1),
638 						SPI_MEM_OP_NO_ADDR,
639 						SPI_MEM_OP_NO_DUMMY,
640 						SPI_MEM_OP_DATA_IN(3, NULL, 1));
641 	u8 id[3], id_temp[3];
642 	u16 cell_max = (u16)rockchip_sfc_get_max_dll_cells(sfc);
643 	u16 right, left = 0;
644 	u16 step = SFC_DLL_TRANING_STEP;
645 	bool dll_valid = false;
646 	u8 cs = plat->cs;
647 
648 	clk_set_rate(&sfc->clk, SFC_DLL_THRESHOLD_RATE);
649 	op.data.buf.in = &id;
650 	rockchip_sfc_exec_op_bypass(sfc, mem, &op);
651 	if ((0xFF == id[0] && 0xFF == id[1]) ||
652 	    (0x00 == id[0] && 0x00 == id[1])) {
653 		dev_dbg(sfc->dev, "no dev, dll by pass\n");
654 		clk_set_rate(&sfc->clk, sfc->speed[cs]);
655 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
656 
657 		return;
658 	}
659 
660 	clk_set_rate(&sfc->clk, sfc->speed[cs]);
661 	op.data.buf.in = &id_temp;
662 	for (right = 0; right <= cell_max; right += step) {
663 		int ret;
664 
665 		rockchip_sfc_set_delay_lines(sfc, right, cs);
666 		rockchip_sfc_exec_op_bypass(sfc, mem, &op);
667 		dev_dbg(sfc->dev, "dll read flash id:%x %x %x\n",
668 			id_temp[0], id_temp[1], id_temp[2]);
669 
670 		ret = memcmp(&id, &id_temp, 3);
671 		if (dll_valid && ret) {
672 			right -= step;
673 
674 			break;
675 		}
676 		if (!dll_valid && !ret)
677 			left = right;
678 
679 		if (!ret)
680 			dll_valid = true;
681 
682 		/* Add cell_max to loop */
683 		if (right == cell_max)
684 			break;
685 		if (right + step > cell_max)
686 			right = cell_max - step;
687 	}
688 
689 	if (dll_valid && (right - left) >= SFC_DLL_TRANING_VALID_WINDOW) {
690 		if (left == 0 && right < cell_max)
691 			sfc->dll_cells[cs] = left + (right - left) * 2 / 5;
692 		else
693 			sfc->dll_cells[cs] = left + (right - left) / 2;
694 	} else {
695 		sfc->dll_cells[cs] = 0;
696 	}
697 
698 	if (sfc->dll_cells[cs]) {
699 		dev_dbg(sfc->dev, "%d %d %d dll training success in %dMHz max_cells=%u sfc_ver=%d\n",
700 			left, right, sfc->dll_cells[cs], sfc->speed[cs],
701 			rockchip_sfc_get_max_dll_cells(sfc), rockchip_sfc_get_version(sfc));
702 		rockchip_sfc_set_delay_lines(sfc, (u16)sfc->dll_cells[cs], cs);
703 	} else {
704 		dev_err(sfc->dev, "%d %d dll training failed in %dMHz, reduce the speed\n",
705 			left, right, sfc->speed[cs]);
706 		rockchip_sfc_set_delay_lines(sfc, 0, cs);
707 		clk_set_rate(&sfc->clk, SFC_DLL_THRESHOLD_RATE);
708 		sfc->cur_speed = SFC_DLL_THRESHOLD_RATE;
709 		sfc->cur_real_speed = clk_get_rate(&sfc->clk);
710 		sfc->speed[cs] = SFC_DLL_THRESHOLD_RATE;
711 	}
712 }
713 
714 #endif
715 
716 static int rockchip_sfc_exec_op(struct spi_slave *mem,
717 				const struct spi_mem_op *op)
718 {
719 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
720 	struct dm_spi_slave_platdata *plat = dev_get_parent_platdata(mem->dev);
721 	u32 len = min_t(u32, op->data.nbytes, sfc->max_iosize);
722 	int ret;
723 
724 	if (rockchip_sfc_get_version(sfc) >= SFC_VER_4 &&
725 	    sfc->cur_speed != sfc->speed[plat->cs]) {
726 		sfc->speed[plat->cs] = sfc->cur_speed;
727 #if CONFIG_IS_ENABLED(CLK)
728 		if (sfc->cur_real_speed > SFC_DLL_THRESHOLD_RATE)
729 			rockchip_sfc_delay_lines_tuning(sfc, mem);
730 		else
731 #endif
732 			rockchip_sfc_set_delay_lines(sfc, 0, plat->cs);
733 	}
734 
735 	/* Wait for last async transfer finished */
736 	if (sfc->last_async_size) {
737 		rockchip_sfc_wait_for_dma_finished(sfc, sfc->last_async_size);
738 		sfc->last_async_size = 0;
739 	}
740 	rockchip_sfc_adjust_op_work((struct spi_mem_op *)op);
741 	rockchip_sfc_xfer_setup(sfc, mem, op, len);
742 	if (len) {
743 		if (likely(sfc->use_dma) && len >= SFC_DMA_TRANS_THRETHOLD) {
744 			if (mem->mode & SPI_DMA_PREPARE)
745 				return rockchip_sfc_xfer_data_dma_async(sfc, op, len);
746 			ret = rockchip_sfc_xfer_data_dma(sfc, op, len);
747 		} else {
748 			ret = rockchip_sfc_xfer_data_poll(sfc, op, len);
749 		}
750 
751 		if (ret != len) {
752 			dev_err(sfc->dev, "xfer data failed ret %d dir %d\n", ret, op->data.dir);
753 
754 			return -EIO;
755 		}
756 	}
757 
758 	return rockchip_sfc_xfer_done(sfc, 100000);
759 }
760 
761 static int rockchip_sfc_adjust_op_size(struct spi_slave *mem, struct spi_mem_op *op)
762 {
763 	struct rockchip_sfc *sfc = dev_get_platdata(mem->dev->parent);
764 
765 	op->data.nbytes = min(op->data.nbytes, sfc->max_iosize);
766 
767 	return 0;
768 }
769 
770 static int rockchip_sfc_set_speed(struct udevice *bus, uint speed)
771 {
772 	struct rockchip_sfc *sfc = dev_get_platdata(bus);
773 
774 	if (speed > sfc->max_freq)
775 		speed = sfc->max_freq;
776 
777 	if (speed == sfc->cur_speed)
778 		return 0;
779 
780 #if CONFIG_IS_ENABLED(CLK)
781 	int ret = clk_set_rate(&sfc->clk, speed);
782 
783 	if (ret < 0) {
784 		dev_err(sfc->dev, "set_freq=%dHz fail, check if it's the cru support level\n",
785 			speed);
786 		return ret;
787 	}
788 	sfc->cur_speed = speed;
789 	sfc->cur_real_speed = clk_get_rate(&sfc->clk);
790 
791 	dev_dbg(sfc->dev, "set_freq=%dHz real_freq=%dHz\n",
792 		sfc->cur_speed, sfc->cur_real_speed);
793 #else
794 	dev_dbg(sfc->dev, "sfc failed, CLK not support\n");
795 #endif
796 	return 0;
797 }
798 
799 static int rockchip_sfc_set_mode(struct udevice *bus, uint mode)
800 {
801 	return 0;
802 }
803 
804 static const struct spi_controller_mem_ops rockchip_sfc_mem_ops = {
805 	.adjust_op_size	= rockchip_sfc_adjust_op_size,
806 	.exec_op	= rockchip_sfc_exec_op,
807 };
808 
809 static const struct dm_spi_ops rockchip_sfc_ops = {
810 	.mem_ops	= &rockchip_sfc_mem_ops,
811 	.set_speed	= rockchip_sfc_set_speed,
812 	.set_mode	= rockchip_sfc_set_mode,
813 };
814 
815 static const struct udevice_id rockchip_sfc_ids[] = {
816 	{ .compatible = "rockchip,sfc"},
817 	{},
818 };
819 
820 U_BOOT_DRIVER(rockchip_sfc_driver) = {
821 	.name   = "rockchip_sfc",
822 	.id     = UCLASS_SPI,
823 	.of_match = rockchip_sfc_ids,
824 	.ops    = &rockchip_sfc_ops,
825 	.ofdata_to_platdata = rockchip_sfc_ofdata_to_platdata,
826 	.platdata_auto_alloc_size = sizeof(struct rockchip_sfc),
827 	.probe  = rockchip_sfc_probe,
828 };
829