xref: /OK3568_Linux_fs/kernel/drivers/spi/spi-ti-qspi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * TI QSPI driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com
6*4882a593Smuzhiyun  * Author: Sourav Poddar <sourav.poddar@ti.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/delay.h>
15*4882a593Smuzhiyun #include <linux/dma-mapping.h>
16*4882a593Smuzhiyun #include <linux/dmaengine.h>
17*4882a593Smuzhiyun #include <linux/omap-dma.h>
18*4882a593Smuzhiyun #include <linux/platform_device.h>
19*4882a593Smuzhiyun #include <linux/err.h>
20*4882a593Smuzhiyun #include <linux/clk.h>
21*4882a593Smuzhiyun #include <linux/io.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/pm_runtime.h>
24*4882a593Smuzhiyun #include <linux/of.h>
25*4882a593Smuzhiyun #include <linux/of_device.h>
26*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
27*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
28*4882a593Smuzhiyun #include <linux/regmap.h>
29*4882a593Smuzhiyun #include <linux/sizes.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/spi/spi.h>
32*4882a593Smuzhiyun #include <linux/spi/spi-mem.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun struct ti_qspi_regs {
35*4882a593Smuzhiyun 	u32 clkctrl;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun struct ti_qspi {
39*4882a593Smuzhiyun 	struct completion	transfer_complete;
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	/* list synchronization */
42*4882a593Smuzhiyun 	struct mutex            list_lock;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	struct spi_master	*master;
45*4882a593Smuzhiyun 	void __iomem            *base;
46*4882a593Smuzhiyun 	void __iomem            *mmap_base;
47*4882a593Smuzhiyun 	size_t			mmap_size;
48*4882a593Smuzhiyun 	struct regmap		*ctrl_base;
49*4882a593Smuzhiyun 	unsigned int		ctrl_reg;
50*4882a593Smuzhiyun 	struct clk		*fclk;
51*4882a593Smuzhiyun 	struct device           *dev;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	struct ti_qspi_regs     ctx_reg;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	dma_addr_t		mmap_phys_base;
56*4882a593Smuzhiyun 	dma_addr_t		rx_bb_dma_addr;
57*4882a593Smuzhiyun 	void			*rx_bb_addr;
58*4882a593Smuzhiyun 	struct dma_chan		*rx_chan;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	u32 spi_max_frequency;
61*4882a593Smuzhiyun 	u32 cmd;
62*4882a593Smuzhiyun 	u32 dc;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	bool mmap_enabled;
65*4882a593Smuzhiyun 	int current_cs;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define QSPI_PID			(0x0)
69*4882a593Smuzhiyun #define QSPI_SYSCONFIG			(0x10)
70*4882a593Smuzhiyun #define QSPI_SPI_CLOCK_CNTRL_REG	(0x40)
71*4882a593Smuzhiyun #define QSPI_SPI_DC_REG			(0x44)
72*4882a593Smuzhiyun #define QSPI_SPI_CMD_REG		(0x48)
73*4882a593Smuzhiyun #define QSPI_SPI_STATUS_REG		(0x4c)
74*4882a593Smuzhiyun #define QSPI_SPI_DATA_REG		(0x50)
75*4882a593Smuzhiyun #define QSPI_SPI_SETUP_REG(n)		((0x54 + 4 * n))
76*4882a593Smuzhiyun #define QSPI_SPI_SWITCH_REG		(0x64)
77*4882a593Smuzhiyun #define QSPI_SPI_DATA_REG_1		(0x68)
78*4882a593Smuzhiyun #define QSPI_SPI_DATA_REG_2		(0x6c)
79*4882a593Smuzhiyun #define QSPI_SPI_DATA_REG_3		(0x70)
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define QSPI_COMPLETION_TIMEOUT		msecs_to_jiffies(2000)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* Clock Control */
84*4882a593Smuzhiyun #define QSPI_CLK_EN			(1 << 31)
85*4882a593Smuzhiyun #define QSPI_CLK_DIV_MAX		0xffff
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Command */
88*4882a593Smuzhiyun #define QSPI_EN_CS(n)			(n << 28)
89*4882a593Smuzhiyun #define QSPI_WLEN(n)			((n - 1) << 19)
90*4882a593Smuzhiyun #define QSPI_3_PIN			(1 << 18)
91*4882a593Smuzhiyun #define QSPI_RD_SNGL			(1 << 16)
92*4882a593Smuzhiyun #define QSPI_WR_SNGL			(2 << 16)
93*4882a593Smuzhiyun #define QSPI_RD_DUAL			(3 << 16)
94*4882a593Smuzhiyun #define QSPI_RD_QUAD			(7 << 16)
95*4882a593Smuzhiyun #define QSPI_INVAL			(4 << 16)
96*4882a593Smuzhiyun #define QSPI_FLEN(n)			((n - 1) << 0)
97*4882a593Smuzhiyun #define QSPI_WLEN_MAX_BITS		128
98*4882a593Smuzhiyun #define QSPI_WLEN_MAX_BYTES		16
99*4882a593Smuzhiyun #define QSPI_WLEN_MASK			QSPI_WLEN(QSPI_WLEN_MAX_BITS)
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /* STATUS REGISTER */
102*4882a593Smuzhiyun #define BUSY				0x01
103*4882a593Smuzhiyun #define WC				0x02
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Device Control */
106*4882a593Smuzhiyun #define QSPI_DD(m, n)			(m << (3 + n * 8))
107*4882a593Smuzhiyun #define QSPI_CKPHA(n)			(1 << (2 + n * 8))
108*4882a593Smuzhiyun #define QSPI_CSPOL(n)			(1 << (1 + n * 8))
109*4882a593Smuzhiyun #define QSPI_CKPOL(n)			(1 << (n * 8))
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #define	QSPI_FRAME			4096
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define QSPI_AUTOSUSPEND_TIMEOUT         2000
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define MEM_CS_EN(n)			((n + 1) << 8)
116*4882a593Smuzhiyun #define MEM_CS_MASK			(7 << 8)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define MM_SWITCH			0x1
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun #define QSPI_SETUP_RD_NORMAL		(0x0 << 12)
121*4882a593Smuzhiyun #define QSPI_SETUP_RD_DUAL		(0x1 << 12)
122*4882a593Smuzhiyun #define QSPI_SETUP_RD_QUAD		(0x3 << 12)
123*4882a593Smuzhiyun #define QSPI_SETUP_ADDR_SHIFT		8
124*4882a593Smuzhiyun #define QSPI_SETUP_DUMMY_SHIFT		10
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun #define QSPI_DMA_BUFFER_SIZE            SZ_64K
127*4882a593Smuzhiyun 
ti_qspi_read(struct ti_qspi * qspi,unsigned long reg)128*4882a593Smuzhiyun static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
129*4882a593Smuzhiyun 		unsigned long reg)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	return readl(qspi->base + reg);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
ti_qspi_write(struct ti_qspi * qspi,unsigned long val,unsigned long reg)134*4882a593Smuzhiyun static inline void ti_qspi_write(struct ti_qspi *qspi,
135*4882a593Smuzhiyun 		unsigned long val, unsigned long reg)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	writel(val, qspi->base + reg);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
ti_qspi_setup(struct spi_device * spi)140*4882a593Smuzhiyun static int ti_qspi_setup(struct spi_device *spi)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct ti_qspi	*qspi = spi_master_get_devdata(spi->master);
143*4882a593Smuzhiyun 	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
144*4882a593Smuzhiyun 	int clk_div = 0, ret;
145*4882a593Smuzhiyun 	u32 clk_ctrl_reg, clk_rate, clk_mask;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (spi->master->busy) {
148*4882a593Smuzhiyun 		dev_dbg(qspi->dev, "master busy doing other transfers\n");
149*4882a593Smuzhiyun 		return -EBUSY;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (!qspi->spi_max_frequency) {
153*4882a593Smuzhiyun 		dev_err(qspi->dev, "spi max frequency not defined\n");
154*4882a593Smuzhiyun 		return -EINVAL;
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	clk_rate = clk_get_rate(qspi->fclk);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	if (clk_div < 0) {
162*4882a593Smuzhiyun 		dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
163*4882a593Smuzhiyun 		return -EINVAL;
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	if (clk_div > QSPI_CLK_DIV_MAX) {
167*4882a593Smuzhiyun 		dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
168*4882a593Smuzhiyun 				QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
169*4882a593Smuzhiyun 		return -EINVAL;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
173*4882a593Smuzhiyun 			qspi->spi_max_frequency, clk_div);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(qspi->dev);
176*4882a593Smuzhiyun 	if (ret < 0) {
177*4882a593Smuzhiyun 		pm_runtime_put_noidle(qspi->dev);
178*4882a593Smuzhiyun 		dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
179*4882a593Smuzhiyun 		return ret;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	clk_ctrl_reg &= ~QSPI_CLK_EN;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* disable SCLK */
187*4882a593Smuzhiyun 	ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/* enable SCLK */
190*4882a593Smuzhiyun 	clk_mask = QSPI_CLK_EN | clk_div;
191*4882a593Smuzhiyun 	ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
192*4882a593Smuzhiyun 	ctx_reg->clkctrl = clk_mask;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	pm_runtime_mark_last_busy(qspi->dev);
195*4882a593Smuzhiyun 	ret = pm_runtime_put_autosuspend(qspi->dev);
196*4882a593Smuzhiyun 	if (ret < 0) {
197*4882a593Smuzhiyun 		dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
198*4882a593Smuzhiyun 		return ret;
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
ti_qspi_restore_ctx(struct ti_qspi * qspi)204*4882a593Smuzhiyun static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
qspi_is_busy(struct ti_qspi * qspi)211*4882a593Smuzhiyun static inline u32 qspi_is_busy(struct ti_qspi *qspi)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	u32 stat;
214*4882a593Smuzhiyun 	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
217*4882a593Smuzhiyun 	while ((stat & BUSY) && time_after(timeout, jiffies)) {
218*4882a593Smuzhiyun 		cpu_relax();
219*4882a593Smuzhiyun 		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	WARN(stat & BUSY, "qspi busy\n");
223*4882a593Smuzhiyun 	return stat & BUSY;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
ti_qspi_poll_wc(struct ti_qspi * qspi)226*4882a593Smuzhiyun static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	u32 stat;
229*4882a593Smuzhiyun 	unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	do {
232*4882a593Smuzhiyun 		stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
233*4882a593Smuzhiyun 		if (stat & WC)
234*4882a593Smuzhiyun 			return 0;
235*4882a593Smuzhiyun 		cpu_relax();
236*4882a593Smuzhiyun 	} while (time_after(timeout, jiffies));
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
239*4882a593Smuzhiyun 	if (stat & WC)
240*4882a593Smuzhiyun 		return 0;
241*4882a593Smuzhiyun 	return  -ETIMEDOUT;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
qspi_write_msg(struct ti_qspi * qspi,struct spi_transfer * t,int count)244*4882a593Smuzhiyun static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
245*4882a593Smuzhiyun 			  int count)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	int wlen, xfer_len;
248*4882a593Smuzhiyun 	unsigned int cmd;
249*4882a593Smuzhiyun 	const u8 *txbuf;
250*4882a593Smuzhiyun 	u32 data;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	txbuf = t->tx_buf;
253*4882a593Smuzhiyun 	cmd = qspi->cmd | QSPI_WR_SNGL;
254*4882a593Smuzhiyun 	wlen = t->bits_per_word >> 3;	/* in bytes */
255*4882a593Smuzhiyun 	xfer_len = wlen;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	while (count) {
258*4882a593Smuzhiyun 		if (qspi_is_busy(qspi))
259*4882a593Smuzhiyun 			return -EBUSY;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		switch (wlen) {
262*4882a593Smuzhiyun 		case 1:
263*4882a593Smuzhiyun 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
264*4882a593Smuzhiyun 					cmd, qspi->dc, *txbuf);
265*4882a593Smuzhiyun 			if (count >= QSPI_WLEN_MAX_BYTES) {
266*4882a593Smuzhiyun 				u32 *txp = (u32 *)txbuf;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 				data = cpu_to_be32(*txp++);
269*4882a593Smuzhiyun 				writel(data, qspi->base +
270*4882a593Smuzhiyun 				       QSPI_SPI_DATA_REG_3);
271*4882a593Smuzhiyun 				data = cpu_to_be32(*txp++);
272*4882a593Smuzhiyun 				writel(data, qspi->base +
273*4882a593Smuzhiyun 				       QSPI_SPI_DATA_REG_2);
274*4882a593Smuzhiyun 				data = cpu_to_be32(*txp++);
275*4882a593Smuzhiyun 				writel(data, qspi->base +
276*4882a593Smuzhiyun 				       QSPI_SPI_DATA_REG_1);
277*4882a593Smuzhiyun 				data = cpu_to_be32(*txp++);
278*4882a593Smuzhiyun 				writel(data, qspi->base +
279*4882a593Smuzhiyun 				       QSPI_SPI_DATA_REG);
280*4882a593Smuzhiyun 				xfer_len = QSPI_WLEN_MAX_BYTES;
281*4882a593Smuzhiyun 				cmd |= QSPI_WLEN(QSPI_WLEN_MAX_BITS);
282*4882a593Smuzhiyun 			} else {
283*4882a593Smuzhiyun 				writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
284*4882a593Smuzhiyun 				cmd = qspi->cmd | QSPI_WR_SNGL;
285*4882a593Smuzhiyun 				xfer_len = wlen;
286*4882a593Smuzhiyun 				cmd |= QSPI_WLEN(wlen);
287*4882a593Smuzhiyun 			}
288*4882a593Smuzhiyun 			break;
289*4882a593Smuzhiyun 		case 2:
290*4882a593Smuzhiyun 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
291*4882a593Smuzhiyun 					cmd, qspi->dc, *txbuf);
292*4882a593Smuzhiyun 			writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
293*4882a593Smuzhiyun 			break;
294*4882a593Smuzhiyun 		case 4:
295*4882a593Smuzhiyun 			dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
296*4882a593Smuzhiyun 					cmd, qspi->dc, *txbuf);
297*4882a593Smuzhiyun 			writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
298*4882a593Smuzhiyun 			break;
299*4882a593Smuzhiyun 		}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
302*4882a593Smuzhiyun 		if (ti_qspi_poll_wc(qspi)) {
303*4882a593Smuzhiyun 			dev_err(qspi->dev, "write timed out\n");
304*4882a593Smuzhiyun 			return -ETIMEDOUT;
305*4882a593Smuzhiyun 		}
306*4882a593Smuzhiyun 		txbuf += xfer_len;
307*4882a593Smuzhiyun 		count -= xfer_len;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
qspi_read_msg(struct ti_qspi * qspi,struct spi_transfer * t,int count)313*4882a593Smuzhiyun static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
314*4882a593Smuzhiyun 			 int count)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	int wlen;
317*4882a593Smuzhiyun 	unsigned int cmd;
318*4882a593Smuzhiyun 	u32 rx;
319*4882a593Smuzhiyun 	u8 rxlen, rx_wlen;
320*4882a593Smuzhiyun 	u8 *rxbuf;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	rxbuf = t->rx_buf;
323*4882a593Smuzhiyun 	cmd = qspi->cmd;
324*4882a593Smuzhiyun 	switch (t->rx_nbits) {
325*4882a593Smuzhiyun 	case SPI_NBITS_DUAL:
326*4882a593Smuzhiyun 		cmd |= QSPI_RD_DUAL;
327*4882a593Smuzhiyun 		break;
328*4882a593Smuzhiyun 	case SPI_NBITS_QUAD:
329*4882a593Smuzhiyun 		cmd |= QSPI_RD_QUAD;
330*4882a593Smuzhiyun 		break;
331*4882a593Smuzhiyun 	default:
332*4882a593Smuzhiyun 		cmd |= QSPI_RD_SNGL;
333*4882a593Smuzhiyun 		break;
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 	wlen = t->bits_per_word >> 3;	/* in bytes */
336*4882a593Smuzhiyun 	rx_wlen = wlen;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	while (count) {
339*4882a593Smuzhiyun 		dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
340*4882a593Smuzhiyun 		if (qspi_is_busy(qspi))
341*4882a593Smuzhiyun 			return -EBUSY;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		switch (wlen) {
344*4882a593Smuzhiyun 		case 1:
345*4882a593Smuzhiyun 			/*
346*4882a593Smuzhiyun 			 * Optimize the 8-bit words transfers, as used by
347*4882a593Smuzhiyun 			 * the SPI flash devices.
348*4882a593Smuzhiyun 			 */
349*4882a593Smuzhiyun 			if (count >= QSPI_WLEN_MAX_BYTES) {
350*4882a593Smuzhiyun 				rxlen = QSPI_WLEN_MAX_BYTES;
351*4882a593Smuzhiyun 			} else {
352*4882a593Smuzhiyun 				rxlen = min(count, 4);
353*4882a593Smuzhiyun 			}
354*4882a593Smuzhiyun 			rx_wlen = rxlen << 3;
355*4882a593Smuzhiyun 			cmd &= ~QSPI_WLEN_MASK;
356*4882a593Smuzhiyun 			cmd |= QSPI_WLEN(rx_wlen);
357*4882a593Smuzhiyun 			break;
358*4882a593Smuzhiyun 		default:
359*4882a593Smuzhiyun 			rxlen = wlen;
360*4882a593Smuzhiyun 			break;
361*4882a593Smuzhiyun 		}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 		ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
364*4882a593Smuzhiyun 		if (ti_qspi_poll_wc(qspi)) {
365*4882a593Smuzhiyun 			dev_err(qspi->dev, "read timed out\n");
366*4882a593Smuzhiyun 			return -ETIMEDOUT;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		switch (wlen) {
370*4882a593Smuzhiyun 		case 1:
371*4882a593Smuzhiyun 			/*
372*4882a593Smuzhiyun 			 * Optimize the 8-bit words transfers, as used by
373*4882a593Smuzhiyun 			 * the SPI flash devices.
374*4882a593Smuzhiyun 			 */
375*4882a593Smuzhiyun 			if (count >= QSPI_WLEN_MAX_BYTES) {
376*4882a593Smuzhiyun 				u32 *rxp = (u32 *) rxbuf;
377*4882a593Smuzhiyun 				rx = readl(qspi->base + QSPI_SPI_DATA_REG_3);
378*4882a593Smuzhiyun 				*rxp++ = be32_to_cpu(rx);
379*4882a593Smuzhiyun 				rx = readl(qspi->base + QSPI_SPI_DATA_REG_2);
380*4882a593Smuzhiyun 				*rxp++ = be32_to_cpu(rx);
381*4882a593Smuzhiyun 				rx = readl(qspi->base + QSPI_SPI_DATA_REG_1);
382*4882a593Smuzhiyun 				*rxp++ = be32_to_cpu(rx);
383*4882a593Smuzhiyun 				rx = readl(qspi->base + QSPI_SPI_DATA_REG);
384*4882a593Smuzhiyun 				*rxp++ = be32_to_cpu(rx);
385*4882a593Smuzhiyun 			} else {
386*4882a593Smuzhiyun 				u8 *rxp = rxbuf;
387*4882a593Smuzhiyun 				rx = readl(qspi->base + QSPI_SPI_DATA_REG);
388*4882a593Smuzhiyun 				if (rx_wlen >= 8)
389*4882a593Smuzhiyun 					*rxp++ = rx >> (rx_wlen - 8);
390*4882a593Smuzhiyun 				if (rx_wlen >= 16)
391*4882a593Smuzhiyun 					*rxp++ = rx >> (rx_wlen - 16);
392*4882a593Smuzhiyun 				if (rx_wlen >= 24)
393*4882a593Smuzhiyun 					*rxp++ = rx >> (rx_wlen - 24);
394*4882a593Smuzhiyun 				if (rx_wlen >= 32)
395*4882a593Smuzhiyun 					*rxp++ = rx;
396*4882a593Smuzhiyun 			}
397*4882a593Smuzhiyun 			break;
398*4882a593Smuzhiyun 		case 2:
399*4882a593Smuzhiyun 			*((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
400*4882a593Smuzhiyun 			break;
401*4882a593Smuzhiyun 		case 4:
402*4882a593Smuzhiyun 			*((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
403*4882a593Smuzhiyun 			break;
404*4882a593Smuzhiyun 		}
405*4882a593Smuzhiyun 		rxbuf += rxlen;
406*4882a593Smuzhiyun 		count -= rxlen;
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	return 0;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
qspi_transfer_msg(struct ti_qspi * qspi,struct spi_transfer * t,int count)412*4882a593Smuzhiyun static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
413*4882a593Smuzhiyun 			     int count)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	int ret;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	if (t->tx_buf) {
418*4882a593Smuzhiyun 		ret = qspi_write_msg(qspi, t, count);
419*4882a593Smuzhiyun 		if (ret) {
420*4882a593Smuzhiyun 			dev_dbg(qspi->dev, "Error while writing\n");
421*4882a593Smuzhiyun 			return ret;
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if (t->rx_buf) {
426*4882a593Smuzhiyun 		ret = qspi_read_msg(qspi, t, count);
427*4882a593Smuzhiyun 		if (ret) {
428*4882a593Smuzhiyun 			dev_dbg(qspi->dev, "Error while reading\n");
429*4882a593Smuzhiyun 			return ret;
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
ti_qspi_dma_callback(void * param)436*4882a593Smuzhiyun static void ti_qspi_dma_callback(void *param)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct ti_qspi *qspi = param;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	complete(&qspi->transfer_complete);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
ti_qspi_dma_xfer(struct ti_qspi * qspi,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len)443*4882a593Smuzhiyun static int ti_qspi_dma_xfer(struct ti_qspi *qspi, dma_addr_t dma_dst,
444*4882a593Smuzhiyun 			    dma_addr_t dma_src, size_t len)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct dma_chan *chan = qspi->rx_chan;
447*4882a593Smuzhiyun 	dma_cookie_t cookie;
448*4882a593Smuzhiyun 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
449*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
450*4882a593Smuzhiyun 	int ret;
451*4882a593Smuzhiyun 	unsigned long time_left;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
454*4882a593Smuzhiyun 	if (!tx) {
455*4882a593Smuzhiyun 		dev_err(qspi->dev, "device_prep_dma_memcpy error\n");
456*4882a593Smuzhiyun 		return -EIO;
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	tx->callback = ti_qspi_dma_callback;
460*4882a593Smuzhiyun 	tx->callback_param = qspi;
461*4882a593Smuzhiyun 	cookie = tx->tx_submit(tx);
462*4882a593Smuzhiyun 	reinit_completion(&qspi->transfer_complete);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	ret = dma_submit_error(cookie);
465*4882a593Smuzhiyun 	if (ret) {
466*4882a593Smuzhiyun 		dev_err(qspi->dev, "dma_submit_error %d\n", cookie);
467*4882a593Smuzhiyun 		return -EIO;
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	dma_async_issue_pending(chan);
471*4882a593Smuzhiyun 	time_left = wait_for_completion_timeout(&qspi->transfer_complete,
472*4882a593Smuzhiyun 					  msecs_to_jiffies(len));
473*4882a593Smuzhiyun 	if (time_left == 0) {
474*4882a593Smuzhiyun 		dmaengine_terminate_sync(chan);
475*4882a593Smuzhiyun 		dev_err(qspi->dev, "DMA wait_for_completion_timeout\n");
476*4882a593Smuzhiyun 		return -ETIMEDOUT;
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	return 0;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
ti_qspi_dma_bounce_buffer(struct ti_qspi * qspi,loff_t offs,void * to,size_t readsize)482*4882a593Smuzhiyun static int ti_qspi_dma_bounce_buffer(struct ti_qspi *qspi, loff_t offs,
483*4882a593Smuzhiyun 				     void *to, size_t readsize)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	dma_addr_t dma_src = qspi->mmap_phys_base + offs;
486*4882a593Smuzhiyun 	int ret = 0;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	/*
489*4882a593Smuzhiyun 	 * Use bounce buffer as FS like jffs2, ubifs may pass
490*4882a593Smuzhiyun 	 * buffers that does not belong to kernel lowmem region.
491*4882a593Smuzhiyun 	 */
492*4882a593Smuzhiyun 	while (readsize != 0) {
493*4882a593Smuzhiyun 		size_t xfer_len = min_t(size_t, QSPI_DMA_BUFFER_SIZE,
494*4882a593Smuzhiyun 					readsize);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		ret = ti_qspi_dma_xfer(qspi, qspi->rx_bb_dma_addr,
497*4882a593Smuzhiyun 				       dma_src, xfer_len);
498*4882a593Smuzhiyun 		if (ret != 0)
499*4882a593Smuzhiyun 			return ret;
500*4882a593Smuzhiyun 		memcpy(to, qspi->rx_bb_addr, xfer_len);
501*4882a593Smuzhiyun 		readsize -= xfer_len;
502*4882a593Smuzhiyun 		dma_src += xfer_len;
503*4882a593Smuzhiyun 		to += xfer_len;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	return ret;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
ti_qspi_dma_xfer_sg(struct ti_qspi * qspi,struct sg_table rx_sg,loff_t from)509*4882a593Smuzhiyun static int ti_qspi_dma_xfer_sg(struct ti_qspi *qspi, struct sg_table rx_sg,
510*4882a593Smuzhiyun 			       loff_t from)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct scatterlist *sg;
513*4882a593Smuzhiyun 	dma_addr_t dma_src = qspi->mmap_phys_base + from;
514*4882a593Smuzhiyun 	dma_addr_t dma_dst;
515*4882a593Smuzhiyun 	int i, len, ret;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	for_each_sg(rx_sg.sgl, sg, rx_sg.nents, i) {
518*4882a593Smuzhiyun 		dma_dst = sg_dma_address(sg);
519*4882a593Smuzhiyun 		len = sg_dma_len(sg);
520*4882a593Smuzhiyun 		ret = ti_qspi_dma_xfer(qspi, dma_dst, dma_src, len);
521*4882a593Smuzhiyun 		if (ret)
522*4882a593Smuzhiyun 			return ret;
523*4882a593Smuzhiyun 		dma_src += len;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
ti_qspi_enable_memory_map(struct spi_device * spi)529*4882a593Smuzhiyun static void ti_qspi_enable_memory_map(struct spi_device *spi)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun 	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
534*4882a593Smuzhiyun 	if (qspi->ctrl_base) {
535*4882a593Smuzhiyun 		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
536*4882a593Smuzhiyun 				   MEM_CS_MASK,
537*4882a593Smuzhiyun 				   MEM_CS_EN(spi->chip_select));
538*4882a593Smuzhiyun 	}
539*4882a593Smuzhiyun 	qspi->mmap_enabled = true;
540*4882a593Smuzhiyun 	qspi->current_cs = spi->chip_select;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
ti_qspi_disable_memory_map(struct spi_device * spi)543*4882a593Smuzhiyun static void ti_qspi_disable_memory_map(struct spi_device *spi)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun 	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
548*4882a593Smuzhiyun 	if (qspi->ctrl_base)
549*4882a593Smuzhiyun 		regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
550*4882a593Smuzhiyun 				   MEM_CS_MASK, 0);
551*4882a593Smuzhiyun 	qspi->mmap_enabled = false;
552*4882a593Smuzhiyun 	qspi->current_cs = -1;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun 
ti_qspi_setup_mmap_read(struct spi_device * spi,u8 opcode,u8 data_nbits,u8 addr_width,u8 dummy_bytes)555*4882a593Smuzhiyun static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode,
556*4882a593Smuzhiyun 				    u8 data_nbits, u8 addr_width,
557*4882a593Smuzhiyun 				    u8 dummy_bytes)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun 	struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
560*4882a593Smuzhiyun 	u32 memval = opcode;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	switch (data_nbits) {
563*4882a593Smuzhiyun 	case SPI_NBITS_QUAD:
564*4882a593Smuzhiyun 		memval |= QSPI_SETUP_RD_QUAD;
565*4882a593Smuzhiyun 		break;
566*4882a593Smuzhiyun 	case SPI_NBITS_DUAL:
567*4882a593Smuzhiyun 		memval |= QSPI_SETUP_RD_DUAL;
568*4882a593Smuzhiyun 		break;
569*4882a593Smuzhiyun 	default:
570*4882a593Smuzhiyun 		memval |= QSPI_SETUP_RD_NORMAL;
571*4882a593Smuzhiyun 		break;
572*4882a593Smuzhiyun 	}
573*4882a593Smuzhiyun 	memval |= ((addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
574*4882a593Smuzhiyun 		   dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
575*4882a593Smuzhiyun 	ti_qspi_write(qspi, memval,
576*4882a593Smuzhiyun 		      QSPI_SPI_SETUP_REG(spi->chip_select));
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
ti_qspi_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)579*4882a593Smuzhiyun static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
582*4882a593Smuzhiyun 	size_t max_len;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (op->data.dir == SPI_MEM_DATA_IN) {
585*4882a593Smuzhiyun 		if (op->addr.val < qspi->mmap_size) {
586*4882a593Smuzhiyun 			/* Limit MMIO to the mmaped region */
587*4882a593Smuzhiyun 			if (op->addr.val + op->data.nbytes > qspi->mmap_size) {
588*4882a593Smuzhiyun 				max_len = qspi->mmap_size - op->addr.val;
589*4882a593Smuzhiyun 				op->data.nbytes = min((size_t) op->data.nbytes,
590*4882a593Smuzhiyun 						      max_len);
591*4882a593Smuzhiyun 			}
592*4882a593Smuzhiyun 		} else {
593*4882a593Smuzhiyun 			/*
594*4882a593Smuzhiyun 			 * Use fallback mode (SW generated transfers) above the
595*4882a593Smuzhiyun 			 * mmaped region.
596*4882a593Smuzhiyun 			 * Adjust size to comply with the QSPI max frame length.
597*4882a593Smuzhiyun 			 */
598*4882a593Smuzhiyun 			max_len = QSPI_FRAME;
599*4882a593Smuzhiyun 			max_len -= 1 + op->addr.nbytes + op->dummy.nbytes;
600*4882a593Smuzhiyun 			op->data.nbytes = min((size_t) op->data.nbytes,
601*4882a593Smuzhiyun 					      max_len);
602*4882a593Smuzhiyun 		}
603*4882a593Smuzhiyun 	}
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	return 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
ti_qspi_exec_mem_op(struct spi_mem * mem,const struct spi_mem_op * op)608*4882a593Smuzhiyun static int ti_qspi_exec_mem_op(struct spi_mem *mem,
609*4882a593Smuzhiyun 			       const struct spi_mem_op *op)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct ti_qspi *qspi = spi_master_get_devdata(mem->spi->master);
612*4882a593Smuzhiyun 	u32 from = 0;
613*4882a593Smuzhiyun 	int ret = 0;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* Only optimize read path. */
616*4882a593Smuzhiyun 	if (!op->data.nbytes || op->data.dir != SPI_MEM_DATA_IN ||
617*4882a593Smuzhiyun 	    !op->addr.nbytes || op->addr.nbytes > 4)
618*4882a593Smuzhiyun 		return -ENOTSUPP;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* Address exceeds MMIO window size, fall back to regular mode. */
621*4882a593Smuzhiyun 	from = op->addr.val;
622*4882a593Smuzhiyun 	if (from + op->data.nbytes > qspi->mmap_size)
623*4882a593Smuzhiyun 		return -ENOTSUPP;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	mutex_lock(&qspi->list_lock);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (!qspi->mmap_enabled || qspi->current_cs != mem->spi->chip_select)
628*4882a593Smuzhiyun 		ti_qspi_enable_memory_map(mem->spi);
629*4882a593Smuzhiyun 	ti_qspi_setup_mmap_read(mem->spi, op->cmd.opcode, op->data.buswidth,
630*4882a593Smuzhiyun 				op->addr.nbytes, op->dummy.nbytes);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	if (qspi->rx_chan) {
633*4882a593Smuzhiyun 		struct sg_table sgt;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		if (virt_addr_valid(op->data.buf.in) &&
636*4882a593Smuzhiyun 		    !spi_controller_dma_map_mem_op_data(mem->spi->master, op,
637*4882a593Smuzhiyun 							&sgt)) {
638*4882a593Smuzhiyun 			ret = ti_qspi_dma_xfer_sg(qspi, sgt, from);
639*4882a593Smuzhiyun 			spi_controller_dma_unmap_mem_op_data(mem->spi->master,
640*4882a593Smuzhiyun 							     op, &sgt);
641*4882a593Smuzhiyun 		} else {
642*4882a593Smuzhiyun 			ret = ti_qspi_dma_bounce_buffer(qspi, from,
643*4882a593Smuzhiyun 							op->data.buf.in,
644*4882a593Smuzhiyun 							op->data.nbytes);
645*4882a593Smuzhiyun 		}
646*4882a593Smuzhiyun 	} else {
647*4882a593Smuzhiyun 		memcpy_fromio(op->data.buf.in, qspi->mmap_base + from,
648*4882a593Smuzhiyun 			      op->data.nbytes);
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	mutex_unlock(&qspi->list_lock);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	return ret;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun static const struct spi_controller_mem_ops ti_qspi_mem_ops = {
657*4882a593Smuzhiyun 	.exec_op = ti_qspi_exec_mem_op,
658*4882a593Smuzhiyun 	.adjust_op_size = ti_qspi_adjust_op_size,
659*4882a593Smuzhiyun };
660*4882a593Smuzhiyun 
ti_qspi_start_transfer_one(struct spi_master * master,struct spi_message * m)661*4882a593Smuzhiyun static int ti_qspi_start_transfer_one(struct spi_master *master,
662*4882a593Smuzhiyun 		struct spi_message *m)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun 	struct ti_qspi *qspi = spi_master_get_devdata(master);
665*4882a593Smuzhiyun 	struct spi_device *spi = m->spi;
666*4882a593Smuzhiyun 	struct spi_transfer *t;
667*4882a593Smuzhiyun 	int status = 0, ret;
668*4882a593Smuzhiyun 	unsigned int frame_len_words, transfer_len_words;
669*4882a593Smuzhiyun 	int wlen;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	/* setup device control reg */
672*4882a593Smuzhiyun 	qspi->dc = 0;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	if (spi->mode & SPI_CPHA)
675*4882a593Smuzhiyun 		qspi->dc |= QSPI_CKPHA(spi->chip_select);
676*4882a593Smuzhiyun 	if (spi->mode & SPI_CPOL)
677*4882a593Smuzhiyun 		qspi->dc |= QSPI_CKPOL(spi->chip_select);
678*4882a593Smuzhiyun 	if (spi->mode & SPI_CS_HIGH)
679*4882a593Smuzhiyun 		qspi->dc |= QSPI_CSPOL(spi->chip_select);
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	frame_len_words = 0;
682*4882a593Smuzhiyun 	list_for_each_entry(t, &m->transfers, transfer_list)
683*4882a593Smuzhiyun 		frame_len_words += t->len / (t->bits_per_word >> 3);
684*4882a593Smuzhiyun 	frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	/* setup command reg */
687*4882a593Smuzhiyun 	qspi->cmd = 0;
688*4882a593Smuzhiyun 	qspi->cmd |= QSPI_EN_CS(spi->chip_select);
689*4882a593Smuzhiyun 	qspi->cmd |= QSPI_FLEN(frame_len_words);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	mutex_lock(&qspi->list_lock);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	if (qspi->mmap_enabled)
696*4882a593Smuzhiyun 		ti_qspi_disable_memory_map(spi);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	list_for_each_entry(t, &m->transfers, transfer_list) {
699*4882a593Smuzhiyun 		qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
700*4882a593Smuzhiyun 			     QSPI_WLEN(t->bits_per_word));
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		wlen = t->bits_per_word >> 3;
703*4882a593Smuzhiyun 		transfer_len_words = min(t->len / wlen, frame_len_words);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 		ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
706*4882a593Smuzhiyun 		if (ret) {
707*4882a593Smuzhiyun 			dev_dbg(qspi->dev, "transfer message failed\n");
708*4882a593Smuzhiyun 			mutex_unlock(&qspi->list_lock);
709*4882a593Smuzhiyun 			return -EINVAL;
710*4882a593Smuzhiyun 		}
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		m->actual_length += transfer_len_words * wlen;
713*4882a593Smuzhiyun 		frame_len_words -= transfer_len_words;
714*4882a593Smuzhiyun 		if (frame_len_words == 0)
715*4882a593Smuzhiyun 			break;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	mutex_unlock(&qspi->list_lock);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
721*4882a593Smuzhiyun 	m->status = status;
722*4882a593Smuzhiyun 	spi_finalize_current_message(master);
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	return status;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun 
ti_qspi_runtime_resume(struct device * dev)727*4882a593Smuzhiyun static int ti_qspi_runtime_resume(struct device *dev)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun 	struct ti_qspi      *qspi;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	qspi = dev_get_drvdata(dev);
732*4882a593Smuzhiyun 	ti_qspi_restore_ctx(qspi);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return 0;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
ti_qspi_dma_cleanup(struct ti_qspi * qspi)737*4882a593Smuzhiyun static void ti_qspi_dma_cleanup(struct ti_qspi *qspi)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	if (qspi->rx_bb_addr)
740*4882a593Smuzhiyun 		dma_free_coherent(qspi->dev, QSPI_DMA_BUFFER_SIZE,
741*4882a593Smuzhiyun 				  qspi->rx_bb_addr,
742*4882a593Smuzhiyun 				  qspi->rx_bb_dma_addr);
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	if (qspi->rx_chan)
745*4882a593Smuzhiyun 		dma_release_channel(qspi->rx_chan);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun static const struct of_device_id ti_qspi_match[] = {
749*4882a593Smuzhiyun 	{.compatible = "ti,dra7xxx-qspi" },
750*4882a593Smuzhiyun 	{.compatible = "ti,am4372-qspi" },
751*4882a593Smuzhiyun 	{},
752*4882a593Smuzhiyun };
753*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, ti_qspi_match);
754*4882a593Smuzhiyun 
ti_qspi_probe(struct platform_device * pdev)755*4882a593Smuzhiyun static int ti_qspi_probe(struct platform_device *pdev)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun 	struct  ti_qspi *qspi;
758*4882a593Smuzhiyun 	struct spi_master *master;
759*4882a593Smuzhiyun 	struct resource         *r, *res_mmap;
760*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
761*4882a593Smuzhiyun 	u32 max_freq;
762*4882a593Smuzhiyun 	int ret = 0, num_cs, irq;
763*4882a593Smuzhiyun 	dma_cap_mask_t mask;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
766*4882a593Smuzhiyun 	if (!master)
767*4882a593Smuzhiyun 		return -ENOMEM;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	master->flags = SPI_MASTER_HALF_DUPLEX;
772*4882a593Smuzhiyun 	master->setup = ti_qspi_setup;
773*4882a593Smuzhiyun 	master->auto_runtime_pm = true;
774*4882a593Smuzhiyun 	master->transfer_one_message = ti_qspi_start_transfer_one;
775*4882a593Smuzhiyun 	master->dev.of_node = pdev->dev.of_node;
776*4882a593Smuzhiyun 	master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
777*4882a593Smuzhiyun 				     SPI_BPW_MASK(8);
778*4882a593Smuzhiyun 	master->mem_ops = &ti_qspi_mem_ops;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (!of_property_read_u32(np, "num-cs", &num_cs))
781*4882a593Smuzhiyun 		master->num_chipselect = num_cs;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	qspi = spi_master_get_devdata(master);
784*4882a593Smuzhiyun 	qspi->master = master;
785*4882a593Smuzhiyun 	qspi->dev = &pdev->dev;
786*4882a593Smuzhiyun 	platform_set_drvdata(pdev, qspi);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
789*4882a593Smuzhiyun 	if (r == NULL) {
790*4882a593Smuzhiyun 		r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
791*4882a593Smuzhiyun 		if (r == NULL) {
792*4882a593Smuzhiyun 			dev_err(&pdev->dev, "missing platform data\n");
793*4882a593Smuzhiyun 			ret = -ENODEV;
794*4882a593Smuzhiyun 			goto free_master;
795*4882a593Smuzhiyun 		}
796*4882a593Smuzhiyun 	}
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	res_mmap = platform_get_resource_byname(pdev,
799*4882a593Smuzhiyun 			IORESOURCE_MEM, "qspi_mmap");
800*4882a593Smuzhiyun 	if (res_mmap == NULL) {
801*4882a593Smuzhiyun 		res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
802*4882a593Smuzhiyun 		if (res_mmap == NULL) {
803*4882a593Smuzhiyun 			dev_err(&pdev->dev,
804*4882a593Smuzhiyun 				"memory mapped resource not required\n");
805*4882a593Smuzhiyun 		}
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (res_mmap)
809*4882a593Smuzhiyun 		qspi->mmap_size = resource_size(res_mmap);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
812*4882a593Smuzhiyun 	if (irq < 0) {
813*4882a593Smuzhiyun 		ret = irq;
814*4882a593Smuzhiyun 		goto free_master;
815*4882a593Smuzhiyun 	}
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	mutex_init(&qspi->list_lock);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	qspi->base = devm_ioremap_resource(&pdev->dev, r);
820*4882a593Smuzhiyun 	if (IS_ERR(qspi->base)) {
821*4882a593Smuzhiyun 		ret = PTR_ERR(qspi->base);
822*4882a593Smuzhiyun 		goto free_master;
823*4882a593Smuzhiyun 	}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (of_property_read_bool(np, "syscon-chipselects")) {
827*4882a593Smuzhiyun 		qspi->ctrl_base =
828*4882a593Smuzhiyun 		syscon_regmap_lookup_by_phandle(np,
829*4882a593Smuzhiyun 						"syscon-chipselects");
830*4882a593Smuzhiyun 		if (IS_ERR(qspi->ctrl_base)) {
831*4882a593Smuzhiyun 			ret = PTR_ERR(qspi->ctrl_base);
832*4882a593Smuzhiyun 			goto free_master;
833*4882a593Smuzhiyun 		}
834*4882a593Smuzhiyun 		ret = of_property_read_u32_index(np,
835*4882a593Smuzhiyun 						 "syscon-chipselects",
836*4882a593Smuzhiyun 						 1, &qspi->ctrl_reg);
837*4882a593Smuzhiyun 		if (ret) {
838*4882a593Smuzhiyun 			dev_err(&pdev->dev,
839*4882a593Smuzhiyun 				"couldn't get ctrl_mod reg index\n");
840*4882a593Smuzhiyun 			goto free_master;
841*4882a593Smuzhiyun 		}
842*4882a593Smuzhiyun 	}
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	qspi->fclk = devm_clk_get(&pdev->dev, "fck");
845*4882a593Smuzhiyun 	if (IS_ERR(qspi->fclk)) {
846*4882a593Smuzhiyun 		ret = PTR_ERR(qspi->fclk);
847*4882a593Smuzhiyun 		dev_err(&pdev->dev, "could not get clk: %d\n", ret);
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	pm_runtime_use_autosuspend(&pdev->dev);
851*4882a593Smuzhiyun 	pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
852*4882a593Smuzhiyun 	pm_runtime_enable(&pdev->dev);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
855*4882a593Smuzhiyun 		qspi->spi_max_frequency = max_freq;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	dma_cap_zero(mask);
858*4882a593Smuzhiyun 	dma_cap_set(DMA_MEMCPY, mask);
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	qspi->rx_chan = dma_request_chan_by_mask(&mask);
861*4882a593Smuzhiyun 	if (IS_ERR(qspi->rx_chan)) {
862*4882a593Smuzhiyun 		dev_err(qspi->dev,
863*4882a593Smuzhiyun 			"No Rx DMA available, trying mmap mode\n");
864*4882a593Smuzhiyun 		qspi->rx_chan = NULL;
865*4882a593Smuzhiyun 		ret = 0;
866*4882a593Smuzhiyun 		goto no_dma;
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 	qspi->rx_bb_addr = dma_alloc_coherent(qspi->dev,
869*4882a593Smuzhiyun 					      QSPI_DMA_BUFFER_SIZE,
870*4882a593Smuzhiyun 					      &qspi->rx_bb_dma_addr,
871*4882a593Smuzhiyun 					      GFP_KERNEL | GFP_DMA);
872*4882a593Smuzhiyun 	if (!qspi->rx_bb_addr) {
873*4882a593Smuzhiyun 		dev_err(qspi->dev,
874*4882a593Smuzhiyun 			"dma_alloc_coherent failed, using PIO mode\n");
875*4882a593Smuzhiyun 		dma_release_channel(qspi->rx_chan);
876*4882a593Smuzhiyun 		goto no_dma;
877*4882a593Smuzhiyun 	}
878*4882a593Smuzhiyun 	master->dma_rx = qspi->rx_chan;
879*4882a593Smuzhiyun 	init_completion(&qspi->transfer_complete);
880*4882a593Smuzhiyun 	if (res_mmap)
881*4882a593Smuzhiyun 		qspi->mmap_phys_base = (dma_addr_t)res_mmap->start;
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun no_dma:
884*4882a593Smuzhiyun 	if (!qspi->rx_chan && res_mmap) {
885*4882a593Smuzhiyun 		qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
886*4882a593Smuzhiyun 		if (IS_ERR(qspi->mmap_base)) {
887*4882a593Smuzhiyun 			dev_info(&pdev->dev,
888*4882a593Smuzhiyun 				 "mmap failed with error %ld using PIO mode\n",
889*4882a593Smuzhiyun 				 PTR_ERR(qspi->mmap_base));
890*4882a593Smuzhiyun 			qspi->mmap_base = NULL;
891*4882a593Smuzhiyun 			master->mem_ops = NULL;
892*4882a593Smuzhiyun 		}
893*4882a593Smuzhiyun 	}
894*4882a593Smuzhiyun 	qspi->mmap_enabled = false;
895*4882a593Smuzhiyun 	qspi->current_cs = -1;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	ret = devm_spi_register_master(&pdev->dev, master);
898*4882a593Smuzhiyun 	if (!ret)
899*4882a593Smuzhiyun 		return 0;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	ti_qspi_dma_cleanup(qspi);
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
904*4882a593Smuzhiyun free_master:
905*4882a593Smuzhiyun 	spi_master_put(master);
906*4882a593Smuzhiyun 	return ret;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
ti_qspi_remove(struct platform_device * pdev)909*4882a593Smuzhiyun static int ti_qspi_remove(struct platform_device *pdev)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct ti_qspi *qspi = platform_get_drvdata(pdev);
912*4882a593Smuzhiyun 	int rc;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	rc = spi_master_suspend(qspi->master);
915*4882a593Smuzhiyun 	if (rc)
916*4882a593Smuzhiyun 		return rc;
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	pm_runtime_put_sync(&pdev->dev);
919*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	ti_qspi_dma_cleanup(qspi);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	return 0;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun static const struct dev_pm_ops ti_qspi_pm_ops = {
927*4882a593Smuzhiyun 	.runtime_resume = ti_qspi_runtime_resume,
928*4882a593Smuzhiyun };
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun static struct platform_driver ti_qspi_driver = {
931*4882a593Smuzhiyun 	.probe	= ti_qspi_probe,
932*4882a593Smuzhiyun 	.remove = ti_qspi_remove,
933*4882a593Smuzhiyun 	.driver = {
934*4882a593Smuzhiyun 		.name	= "ti-qspi",
935*4882a593Smuzhiyun 		.pm =   &ti_qspi_pm_ops,
936*4882a593Smuzhiyun 		.of_match_table = ti_qspi_match,
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun };
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun module_platform_driver(ti_qspi_driver);
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
943*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
944*4882a593Smuzhiyun MODULE_DESCRIPTION("TI QSPI controller driver");
945*4882a593Smuzhiyun MODULE_ALIAS("platform:ti-qspi");
946