xref: /OK3568_Linux_fs/kernel/drivers/spi/spi-rockchip.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
4*4882a593Smuzhiyun  * Author: Addy Ke <addy.ke@rock-chips.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/acpi.h>
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/dmaengine.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/miscdevice.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
16*4882a593Smuzhiyun #include <linux/pinctrl/devinfo.h>
17*4882a593Smuzhiyun #include <linux/platform_device.h>
18*4882a593Smuzhiyun #include <linux/spi/spi.h>
19*4882a593Smuzhiyun #include <linux/pm_runtime.h>
20*4882a593Smuzhiyun #include <linux/scatterlist.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define DRIVER_NAME "rockchip-spi"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define ROCKCHIP_SPI_CLR_BITS(reg, bits) \
25*4882a593Smuzhiyun 		writel_relaxed(readl_relaxed(reg) & ~(bits), reg)
26*4882a593Smuzhiyun #define ROCKCHIP_SPI_SET_BITS(reg, bits) \
27*4882a593Smuzhiyun 		writel_relaxed(readl_relaxed(reg) | (bits), reg)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* SPI register offsets */
30*4882a593Smuzhiyun #define ROCKCHIP_SPI_CTRLR0			0x0000
31*4882a593Smuzhiyun #define ROCKCHIP_SPI_CTRLR1			0x0004
32*4882a593Smuzhiyun #define ROCKCHIP_SPI_SSIENR			0x0008
33*4882a593Smuzhiyun #define ROCKCHIP_SPI_SER			0x000c
34*4882a593Smuzhiyun #define ROCKCHIP_SPI_BAUDR			0x0010
35*4882a593Smuzhiyun #define ROCKCHIP_SPI_TXFTLR			0x0014
36*4882a593Smuzhiyun #define ROCKCHIP_SPI_RXFTLR			0x0018
37*4882a593Smuzhiyun #define ROCKCHIP_SPI_TXFLR			0x001c
38*4882a593Smuzhiyun #define ROCKCHIP_SPI_RXFLR			0x0020
39*4882a593Smuzhiyun #define ROCKCHIP_SPI_SR				0x0024
40*4882a593Smuzhiyun #define ROCKCHIP_SPI_IPR			0x0028
41*4882a593Smuzhiyun #define ROCKCHIP_SPI_IMR			0x002c
42*4882a593Smuzhiyun #define ROCKCHIP_SPI_ISR			0x0030
43*4882a593Smuzhiyun #define ROCKCHIP_SPI_RISR			0x0034
44*4882a593Smuzhiyun #define ROCKCHIP_SPI_ICR			0x0038
45*4882a593Smuzhiyun #define ROCKCHIP_SPI_DMACR			0x003c
46*4882a593Smuzhiyun #define ROCKCHIP_SPI_DMATDLR			0x0040
47*4882a593Smuzhiyun #define ROCKCHIP_SPI_DMARDLR			0x0044
48*4882a593Smuzhiyun #define ROCKCHIP_SPI_VERSION			0x0048
49*4882a593Smuzhiyun #define ROCKCHIP_SPI_TXDR			0x0400
50*4882a593Smuzhiyun #define ROCKCHIP_SPI_RXDR			0x0800
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /* Bit fields in CTRLR0 */
53*4882a593Smuzhiyun #define CR0_DFS_OFFSET				0
54*4882a593Smuzhiyun #define CR0_DFS_4BIT				0x0
55*4882a593Smuzhiyun #define CR0_DFS_8BIT				0x1
56*4882a593Smuzhiyun #define CR0_DFS_16BIT				0x2
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define CR0_CFS_OFFSET				2
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define CR0_SCPH_OFFSET				6
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define CR0_SCPOL_OFFSET			7
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define CR0_CSM_OFFSET				8
65*4882a593Smuzhiyun #define CR0_CSM_KEEP				0x0
66*4882a593Smuzhiyun /* ss_n be high for half sclk_out cycles */
67*4882a593Smuzhiyun #define CR0_CSM_HALF				0X1
68*4882a593Smuzhiyun /* ss_n be high for one sclk_out cycle */
69*4882a593Smuzhiyun #define CR0_CSM_ONE					0x2
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun /* ss_n to sclk_out delay */
72*4882a593Smuzhiyun #define CR0_SSD_OFFSET				10
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * The period between ss_n active and
75*4882a593Smuzhiyun  * sclk_out active is half sclk_out cycles
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #define CR0_SSD_HALF				0x0
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun  * The period between ss_n active and
80*4882a593Smuzhiyun  * sclk_out active is one sclk_out cycle
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun #define CR0_SSD_ONE					0x1
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun #define CR0_EM_OFFSET				11
85*4882a593Smuzhiyun #define CR0_EM_LITTLE				0x0
86*4882a593Smuzhiyun #define CR0_EM_BIG					0x1
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define CR0_FBM_OFFSET				12
89*4882a593Smuzhiyun #define CR0_FBM_MSB					0x0
90*4882a593Smuzhiyun #define CR0_FBM_LSB					0x1
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define CR0_BHT_OFFSET				13
93*4882a593Smuzhiyun #define CR0_BHT_16BIT				0x0
94*4882a593Smuzhiyun #define CR0_BHT_8BIT				0x1
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define CR0_RSD_OFFSET				14
97*4882a593Smuzhiyun #define CR0_RSD_MAX				0x3
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define CR0_FRF_OFFSET				16
100*4882a593Smuzhiyun #define CR0_FRF_SPI					0x0
101*4882a593Smuzhiyun #define CR0_FRF_SSP					0x1
102*4882a593Smuzhiyun #define CR0_FRF_MICROWIRE			0x2
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define CR0_XFM_OFFSET				18
105*4882a593Smuzhiyun #define CR0_XFM_MASK				(0x03 << SPI_XFM_OFFSET)
106*4882a593Smuzhiyun #define CR0_XFM_TR					0x0
107*4882a593Smuzhiyun #define CR0_XFM_TO					0x1
108*4882a593Smuzhiyun #define CR0_XFM_RO					0x2
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define CR0_OPM_OFFSET				20
111*4882a593Smuzhiyun #define CR0_OPM_MASTER				0x0
112*4882a593Smuzhiyun #define CR0_OPM_SLAVE				0x1
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define CR0_SOI_OFFSET				23
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define CR0_MTM_OFFSET				0x21
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* Bit fields in SER, 2bit */
119*4882a593Smuzhiyun #define SER_MASK					0x3
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* Bit fields in BAUDR */
122*4882a593Smuzhiyun #define BAUDR_SCKDV_MIN				2
123*4882a593Smuzhiyun #define BAUDR_SCKDV_MAX				65534
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /* Bit fields in SR, 6bit */
126*4882a593Smuzhiyun #define SR_MASK						0x3f
127*4882a593Smuzhiyun #define SR_BUSY						(1 << 0)
128*4882a593Smuzhiyun #define SR_TF_FULL					(1 << 1)
129*4882a593Smuzhiyun #define SR_TF_EMPTY					(1 << 2)
130*4882a593Smuzhiyun #define SR_RF_EMPTY					(1 << 3)
131*4882a593Smuzhiyun #define SR_RF_FULL					(1 << 4)
132*4882a593Smuzhiyun #define SR_SLAVE_TX_BUSY				(1 << 5)
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* Bit fields in ISR, IMR, ISR, RISR, 5bit */
135*4882a593Smuzhiyun #define INT_MASK					0x1f
136*4882a593Smuzhiyun #define INT_TF_EMPTY				(1 << 0)
137*4882a593Smuzhiyun #define INT_TF_OVERFLOW				(1 << 1)
138*4882a593Smuzhiyun #define INT_RF_UNDERFLOW			(1 << 2)
139*4882a593Smuzhiyun #define INT_RF_OVERFLOW				(1 << 3)
140*4882a593Smuzhiyun #define INT_RF_FULL				(1 << 4)
141*4882a593Smuzhiyun #define INT_CS_INACTIVE				(1 << 6)
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /* Bit fields in ICR, 4bit */
144*4882a593Smuzhiyun #define ICR_MASK					0x0f
145*4882a593Smuzhiyun #define ICR_ALL						(1 << 0)
146*4882a593Smuzhiyun #define ICR_RF_UNDERFLOW			(1 << 1)
147*4882a593Smuzhiyun #define ICR_RF_OVERFLOW				(1 << 2)
148*4882a593Smuzhiyun #define ICR_TF_OVERFLOW				(1 << 3)
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /* Bit fields in DMACR */
151*4882a593Smuzhiyun #define RF_DMA_EN					(1 << 0)
152*4882a593Smuzhiyun #define TF_DMA_EN					(1 << 1)
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /* Driver state flags */
155*4882a593Smuzhiyun #define RXDMA					(1 << 0)
156*4882a593Smuzhiyun #define TXDMA					(1 << 1)
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /* sclk_out: spi master internal logic in rk3x can support 50Mhz */
159*4882a593Smuzhiyun #define MAX_SCLK_OUT				50000000U
160*4882a593Smuzhiyun /* max sclk of driver strength 4mA */
161*4882a593Smuzhiyun #define IO_DRIVER_4MA_MAX_SCLK_OUT	24000000U
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun  * SPI_CTRLR1 is 16-bits, so we should support lengths of 0xffff + 1. However,
165*4882a593Smuzhiyun  * the controller seems to hang when given 0x10000, so stick with this for now.
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun #define ROCKCHIP_SPI_MAX_TRANLEN		0xffff
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /* 2 for native cs, 2 for cs-gpio */
170*4882a593Smuzhiyun #define ROCKCHIP_SPI_MAX_CS_NUM			4
171*4882a593Smuzhiyun #define ROCKCHIP_SPI_VER2_TYPE1			0x05EC0002
172*4882a593Smuzhiyun #define ROCKCHIP_SPI_VER2_TYPE2			0x00110002
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #define ROCKCHIP_SPI_REGISTER_SIZE		0x1000
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun enum rockchip_spi_xfer_mode {
177*4882a593Smuzhiyun 	ROCKCHIP_SPI_DMA,
178*4882a593Smuzhiyun 	ROCKCHIP_SPI_IRQ,
179*4882a593Smuzhiyun 	ROCKCHIP_SPI_POLL,
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun struct rockchip_spi_quirks {
183*4882a593Smuzhiyun 	u32 max_baud_div_in_cpha;
184*4882a593Smuzhiyun };
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun struct rockchip_spi {
187*4882a593Smuzhiyun 	struct device *dev;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	struct clk *spiclk;
190*4882a593Smuzhiyun 	struct clk *apb_pclk;
191*4882a593Smuzhiyun 	struct clk *sclk_in;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	void __iomem *regs;
194*4882a593Smuzhiyun 	dma_addr_t dma_addr_rx;
195*4882a593Smuzhiyun 	dma_addr_t dma_addr_tx;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	const void *tx;
198*4882a593Smuzhiyun 	void *rx;
199*4882a593Smuzhiyun 	unsigned int tx_left;
200*4882a593Smuzhiyun 	unsigned int rx_left;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	atomic_t state;
203*4882a593Smuzhiyun 	struct completion xfer_done;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	u32 version;
206*4882a593Smuzhiyun 	/*depth of the FIFO buffer */
207*4882a593Smuzhiyun 	u32 fifo_len;
208*4882a593Smuzhiyun 	/* frequency of spiclk */
209*4882a593Smuzhiyun 	u32 freq;
210*4882a593Smuzhiyun 	/* speed of io rate */
211*4882a593Smuzhiyun 	u32 speed_hz;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	u8 n_bytes;
214*4882a593Smuzhiyun 	u8 rsd;
215*4882a593Smuzhiyun 	u8 csm;
216*4882a593Smuzhiyun 	bool poll; /* only support transfer data by cpu polling */
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	struct pinctrl_state *high_speed_state;
221*4882a593Smuzhiyun 	bool slave_aborted;
222*4882a593Smuzhiyun 	bool cs_inactive; /* spi slave tansmition stop when cs inactive */
223*4882a593Smuzhiyun 	bool cs_high_supported; /* native CS supports active-high polarity */
224*4882a593Smuzhiyun 	struct gpio_desc *ready; /* spi slave transmission ready */
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	struct spi_transfer *xfer; /* Store xfer temporarily */
227*4882a593Smuzhiyun 	phys_addr_t base_addr_phy;
228*4882a593Smuzhiyun 	struct miscdevice miscdev;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* quirks */
231*4882a593Smuzhiyun 	u32 max_baud_div_in_cpha;
232*4882a593Smuzhiyun };
233*4882a593Smuzhiyun 
spi_enable_chip(struct rockchip_spi * rs,bool enable)234*4882a593Smuzhiyun static inline void spi_enable_chip(struct rockchip_spi *rs, bool enable)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	writel_relaxed((enable ? 1U : 0U), rs->regs + ROCKCHIP_SPI_SSIENR);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
wait_for_tx_idle(struct rockchip_spi * rs,bool slave_mode)239*4882a593Smuzhiyun static inline void wait_for_tx_idle(struct rockchip_spi *rs, bool slave_mode)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	unsigned long timeout = jiffies + msecs_to_jiffies(5);
242*4882a593Smuzhiyun 	u32 bit_filed = SR_BUSY;
243*4882a593Smuzhiyun 	u32 idle_val = 0;
244*4882a593Smuzhiyun 	uint32_t speed, us;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	if (slave_mode && rs->version == ROCKCHIP_SPI_VER2_TYPE2) {
247*4882a593Smuzhiyun 		bit_filed = SR_SLAVE_TX_BUSY;
248*4882a593Smuzhiyun 		idle_val = 0;
249*4882a593Smuzhiyun 	} else if (slave_mode) {
250*4882a593Smuzhiyun 		bit_filed = SR_TF_EMPTY;
251*4882a593Smuzhiyun 		idle_val = 1;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	do {
255*4882a593Smuzhiyun 		if ((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & bit_filed) == idle_val) {
256*4882a593Smuzhiyun 			if (bit_filed == SR_TF_EMPTY) {
257*4882a593Smuzhiyun 				speed = rs->speed_hz;
258*4882a593Smuzhiyun 				us = (8 * 1000000 / speed) * 2;
259*4882a593Smuzhiyun 				udelay(us);
260*4882a593Smuzhiyun 			}
261*4882a593Smuzhiyun 			return;
262*4882a593Smuzhiyun 		}
263*4882a593Smuzhiyun 	} while (!time_after(jiffies, timeout));
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	dev_warn(rs->dev, "spi controller is in busy state!\n");
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun 
get_fifo_len(struct rockchip_spi * rs)268*4882a593Smuzhiyun static u32 get_fifo_len(struct rockchip_spi *rs)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	switch (rs->version) {
271*4882a593Smuzhiyun 	case ROCKCHIP_SPI_VER2_TYPE1:
272*4882a593Smuzhiyun 	case ROCKCHIP_SPI_VER2_TYPE2:
273*4882a593Smuzhiyun 		return 64;
274*4882a593Smuzhiyun 	default:
275*4882a593Smuzhiyun 		return 32;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
rockchip_spi_set_cs(struct spi_device * spi,bool enable)279*4882a593Smuzhiyun static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct spi_controller *ctlr = spi->controller;
282*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
283*4882a593Smuzhiyun 	bool cs_asserted = spi->mode & SPI_CS_HIGH ? enable : !enable;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* Return immediately for no-op */
286*4882a593Smuzhiyun 	if (cs_asserted == rs->cs_asserted[spi->chip_select])
287*4882a593Smuzhiyun 		return;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (cs_asserted) {
290*4882a593Smuzhiyun 		/* Keep things powered as long as CS is asserted */
291*4882a593Smuzhiyun 		pm_runtime_get_sync(rs->dev);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 		if (spi->cs_gpiod)
294*4882a593Smuzhiyun 			ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
295*4882a593Smuzhiyun 		else
296*4882a593Smuzhiyun 			ROCKCHIP_SPI_SET_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
297*4882a593Smuzhiyun 	} else {
298*4882a593Smuzhiyun 		if (spi->cs_gpiod)
299*4882a593Smuzhiyun 			ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, 1);
300*4882a593Smuzhiyun 		else
301*4882a593Smuzhiyun 			ROCKCHIP_SPI_CLR_BITS(rs->regs + ROCKCHIP_SPI_SER, BIT(spi->chip_select));
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		/* Drop reference from when we first asserted CS */
304*4882a593Smuzhiyun 		pm_runtime_put(rs->dev);
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	rs->cs_asserted[spi->chip_select] = cs_asserted;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
rockchip_spi_handle_err(struct spi_controller * ctlr,struct spi_message * msg)310*4882a593Smuzhiyun static void rockchip_spi_handle_err(struct spi_controller *ctlr,
311*4882a593Smuzhiyun 				    struct spi_message *msg)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	dev_err(rs->dev, "state=%x\n", atomic_read(&rs->state));
316*4882a593Smuzhiyun 	dev_err(rs->dev, "tx_left=%x\n", rs->tx_left);
317*4882a593Smuzhiyun 	dev_err(rs->dev, "rx_left=%x\n", rs->rx_left);
318*4882a593Smuzhiyun 	print_hex_dump(KERN_ERR, "regs ", DUMP_PREFIX_OFFSET, 4, 4, rs->regs, 0x4c, 0);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	/* stop running spi transfer
321*4882a593Smuzhiyun 	 * this also flushes both rx and tx fifos
322*4882a593Smuzhiyun 	 */
323*4882a593Smuzhiyun 	spi_enable_chip(rs, false);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* make sure all interrupts are masked and status cleared */
326*4882a593Smuzhiyun 	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
327*4882a593Smuzhiyun 	writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	if (atomic_read(&rs->state) & TXDMA)
330*4882a593Smuzhiyun 		dmaengine_terminate_async(ctlr->dma_tx);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	if (atomic_read(&rs->state) & RXDMA)
333*4882a593Smuzhiyun 		dmaengine_terminate_async(ctlr->dma_rx);
334*4882a593Smuzhiyun 	atomic_set(&rs->state, 0);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun 
rockchip_spi_pio_writer(struct rockchip_spi * rs)337*4882a593Smuzhiyun static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun 	u32 tx_free = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
340*4882a593Smuzhiyun 	u32 words = min(rs->tx_left, tx_free);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	rs->tx_left -= words;
343*4882a593Smuzhiyun 	for (; words; words--) {
344*4882a593Smuzhiyun 		u32 txw;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 		if (rs->n_bytes == 1)
347*4882a593Smuzhiyun 			txw = *(u8 *)rs->tx;
348*4882a593Smuzhiyun 		else
349*4882a593Smuzhiyun 			txw = *(u16 *)rs->tx;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
352*4882a593Smuzhiyun 		rs->tx += rs->n_bytes;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
rockchip_spi_pio_reader(struct rockchip_spi * rs)356*4882a593Smuzhiyun static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	u32 words = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
359*4882a593Smuzhiyun 	u32 rx_left = (rs->rx_left > words) ? rs->rx_left - words : 0;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* the hardware doesn't allow us to change fifo threshold
362*4882a593Smuzhiyun 	 * level while spi is enabled, so instead make sure to leave
363*4882a593Smuzhiyun 	 * enough words in the rx fifo to get the last interrupt
364*4882a593Smuzhiyun 	 * exactly when all words have been received
365*4882a593Smuzhiyun 	 */
366*4882a593Smuzhiyun 	if (rx_left) {
367*4882a593Smuzhiyun 		u32 ftl = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFTLR) + 1;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		if (rx_left < ftl) {
370*4882a593Smuzhiyun 			rx_left = ftl;
371*4882a593Smuzhiyun 			words = rs->rx_left - rx_left;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	rs->rx_left = rx_left;
376*4882a593Smuzhiyun 	for (; words; words--) {
377*4882a593Smuzhiyun 		u32 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 		if (!rs->rx)
380*4882a593Smuzhiyun 			continue;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		if (rs->n_bytes == 1)
383*4882a593Smuzhiyun 			*(u8 *)rs->rx = (u8)rxw;
384*4882a593Smuzhiyun 		else
385*4882a593Smuzhiyun 			*(u16 *)rs->rx = (u16)rxw;
386*4882a593Smuzhiyun 		rs->rx += rs->n_bytes;
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
rockchip_spi_isr(int irq,void * dev_id)390*4882a593Smuzhiyun static irqreturn_t rockchip_spi_isr(int irq, void *dev_id)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_id;
393*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* When int_cs_inactive comes, spi slave abort */
396*4882a593Smuzhiyun 	if (rs->cs_inactive && readl_relaxed(rs->regs + ROCKCHIP_SPI_ISR) & INT_CS_INACTIVE) {
397*4882a593Smuzhiyun 		ctlr->slave_abort(ctlr);
398*4882a593Smuzhiyun 		writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
399*4882a593Smuzhiyun 		writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 		return IRQ_HANDLED;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (rs->tx_left)
405*4882a593Smuzhiyun 		rockchip_spi_pio_writer(rs);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	rockchip_spi_pio_reader(rs);
408*4882a593Smuzhiyun 	if (!rs->rx_left) {
409*4882a593Smuzhiyun 		spi_enable_chip(rs, false);
410*4882a593Smuzhiyun 		writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
411*4882a593Smuzhiyun 		writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
412*4882a593Smuzhiyun 		complete(&rs->xfer_done);
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return IRQ_HANDLED;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
rockchip_spi_prepare_irq(struct rockchip_spi * rs,struct spi_controller * ctlr,struct spi_transfer * xfer)418*4882a593Smuzhiyun static int rockchip_spi_prepare_irq(struct rockchip_spi *rs,
419*4882a593Smuzhiyun 				    struct spi_controller *ctlr,
420*4882a593Smuzhiyun 				    struct spi_transfer *xfer)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun 	rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
423*4882a593Smuzhiyun 	rs->rx_left = xfer->len / rs->n_bytes;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	spi_enable_chip(rs, true);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (rs->tx_left)
430*4882a593Smuzhiyun 		rockchip_spi_pio_writer(rs);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (rs->cs_inactive)
433*4882a593Smuzhiyun 		writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
434*4882a593Smuzhiyun 	else
435*4882a593Smuzhiyun 		writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* 1 means the transfer is in progress */
438*4882a593Smuzhiyun 	return 1;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
rockchip_spi_dma_rxcb(void * data)441*4882a593Smuzhiyun static void rockchip_spi_dma_rxcb(void *data)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	struct spi_controller *ctlr = data;
444*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
445*4882a593Smuzhiyun 	int state = atomic_fetch_andnot(RXDMA, &rs->state);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	if (state & TXDMA && !rs->slave_aborted)
448*4882a593Smuzhiyun 		return;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (rs->cs_inactive)
451*4882a593Smuzhiyun 		writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	spi_enable_chip(rs, false);
454*4882a593Smuzhiyun 	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
455*4882a593Smuzhiyun 	writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
456*4882a593Smuzhiyun 	complete(&rs->xfer_done);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
rockchip_spi_dma_txcb(void * data)459*4882a593Smuzhiyun static void rockchip_spi_dma_txcb(void *data)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	struct spi_controller *ctlr = data;
462*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
463*4882a593Smuzhiyun 	int state = atomic_fetch_andnot(TXDMA, &rs->state);
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	if (state & RXDMA && !rs->slave_aborted)
466*4882a593Smuzhiyun 		return;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/* Wait until the FIFO data completely. */
469*4882a593Smuzhiyun 	wait_for_tx_idle(rs, ctlr->slave);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	spi_enable_chip(rs, false);
472*4882a593Smuzhiyun 	writel_relaxed(0, rs->regs + ROCKCHIP_SPI_IMR);
473*4882a593Smuzhiyun 	writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR);
474*4882a593Smuzhiyun 	complete(&rs->xfer_done);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
rockchip_spi_calc_burst_size(u32 data_len)477*4882a593Smuzhiyun static u32 rockchip_spi_calc_burst_size(u32 data_len)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	u32 i;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	/* burst size: 1, 2, 4, 8 */
482*4882a593Smuzhiyun 	for (i = 1; i < 8; i <<= 1) {
483*4882a593Smuzhiyun 		if (data_len & i)
484*4882a593Smuzhiyun 			break;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return i;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
rockchip_spi_prepare_dma(struct rockchip_spi * rs,struct spi_controller * ctlr,struct spi_transfer * xfer)490*4882a593Smuzhiyun static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
491*4882a593Smuzhiyun 		struct spi_controller *ctlr, struct spi_transfer *xfer)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *rxdesc, *txdesc;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	atomic_set(&rs->state, 0);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	rxdesc = NULL;
498*4882a593Smuzhiyun 	if (xfer->rx_buf) {
499*4882a593Smuzhiyun 		struct dma_slave_config rxconf = {
500*4882a593Smuzhiyun 			.direction = DMA_DEV_TO_MEM,
501*4882a593Smuzhiyun 			.src_addr = rs->dma_addr_rx,
502*4882a593Smuzhiyun 			.src_addr_width = rs->n_bytes,
503*4882a593Smuzhiyun 			.src_maxburst = rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes),
504*4882a593Smuzhiyun 		};
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 		dmaengine_slave_config(ctlr->dma_rx, &rxconf);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		rxdesc = dmaengine_prep_slave_sg(
509*4882a593Smuzhiyun 				ctlr->dma_rx,
510*4882a593Smuzhiyun 				xfer->rx_sg.sgl, xfer->rx_sg.nents,
511*4882a593Smuzhiyun 				DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
512*4882a593Smuzhiyun 		if (!rxdesc)
513*4882a593Smuzhiyun 			return -EINVAL;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		rxdesc->callback = rockchip_spi_dma_rxcb;
516*4882a593Smuzhiyun 		rxdesc->callback_param = ctlr;
517*4882a593Smuzhiyun 	}
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	txdesc = NULL;
520*4882a593Smuzhiyun 	if (xfer->tx_buf) {
521*4882a593Smuzhiyun 		struct dma_slave_config txconf = {
522*4882a593Smuzhiyun 			.direction = DMA_MEM_TO_DEV,
523*4882a593Smuzhiyun 			.dst_addr = rs->dma_addr_tx,
524*4882a593Smuzhiyun 			.dst_addr_width = rs->n_bytes,
525*4882a593Smuzhiyun 			.dst_maxburst = rs->fifo_len / 4,
526*4882a593Smuzhiyun 		};
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		dmaengine_slave_config(ctlr->dma_tx, &txconf);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		txdesc = dmaengine_prep_slave_sg(
531*4882a593Smuzhiyun 				ctlr->dma_tx,
532*4882a593Smuzhiyun 				xfer->tx_sg.sgl, xfer->tx_sg.nents,
533*4882a593Smuzhiyun 				DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
534*4882a593Smuzhiyun 		if (!txdesc) {
535*4882a593Smuzhiyun 			if (rxdesc)
536*4882a593Smuzhiyun 				dmaengine_terminate_sync(ctlr->dma_rx);
537*4882a593Smuzhiyun 			return -EINVAL;
538*4882a593Smuzhiyun 		}
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 		txdesc->callback = rockchip_spi_dma_txcb;
541*4882a593Smuzhiyun 		txdesc->callback_param = ctlr;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* rx must be started before tx due to spi instinct */
545*4882a593Smuzhiyun 	if (rxdesc) {
546*4882a593Smuzhiyun 		atomic_or(RXDMA, &rs->state);
547*4882a593Smuzhiyun 		ctlr->dma_rx->cookie = dmaengine_submit(rxdesc);
548*4882a593Smuzhiyun 		dma_async_issue_pending(ctlr->dma_rx);
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (rs->cs_inactive)
552*4882a593Smuzhiyun 		writel_relaxed(INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	spi_enable_chip(rs, true);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	if (txdesc) {
557*4882a593Smuzhiyun 		atomic_or(TXDMA, &rs->state);
558*4882a593Smuzhiyun 		dmaengine_submit(txdesc);
559*4882a593Smuzhiyun 		dma_async_issue_pending(ctlr->dma_tx);
560*4882a593Smuzhiyun 	}
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* 1 means the transfer is in progress */
563*4882a593Smuzhiyun 	return 1;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
rockchip_spi_pio_transfer(struct rockchip_spi * rs,struct spi_controller * ctlr,struct spi_transfer * xfer)566*4882a593Smuzhiyun static int rockchip_spi_pio_transfer(struct rockchip_spi *rs,
567*4882a593Smuzhiyun 		struct spi_controller *ctlr, struct spi_transfer *xfer)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	unsigned long time, timeout;
570*4882a593Smuzhiyun 	u32 speed_hz = xfer->speed_hz;
571*4882a593Smuzhiyun 	unsigned long long ms;
572*4882a593Smuzhiyun 	int ret = 0;
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	if (!speed_hz)
575*4882a593Smuzhiyun 		speed_hz = 100000;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	ms = 8LL * 1000LL * xfer->len;
578*4882a593Smuzhiyun 	do_div(ms, speed_hz);
579*4882a593Smuzhiyun 	ms += ms + 200; /* some tolerance */
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (ms > UINT_MAX || ctlr->slave)
582*4882a593Smuzhiyun 		ms = UINT_MAX;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	timeout = jiffies + msecs_to_jiffies(ms);
585*4882a593Smuzhiyun 	time = jiffies;
586*4882a593Smuzhiyun 	rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0;
587*4882a593Smuzhiyun 	rs->rx_left = rs->rx ? xfer->len / rs->n_bytes : 0;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	spi_enable_chip(rs, true);
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	while (rs->tx_left || rs->rx_left) {
592*4882a593Smuzhiyun 		if (rs->tx)
593*4882a593Smuzhiyun 			rockchip_spi_pio_writer(rs);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 		if (rs->rx)
596*4882a593Smuzhiyun 			rockchip_spi_pio_reader(rs);
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 		cpu_relax();
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		if (time_after(time, timeout)) {
601*4882a593Smuzhiyun 			ret = -EIO;
602*4882a593Smuzhiyun 			goto out;
603*4882a593Smuzhiyun 		}
604*4882a593Smuzhiyun 	};
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* If tx, wait until the FIFO data completely. */
607*4882a593Smuzhiyun 	if (rs->tx)
608*4882a593Smuzhiyun 		wait_for_tx_idle(rs, ctlr->slave);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun out:
611*4882a593Smuzhiyun 	spi_enable_chip(rs, false);
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	return ret;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun 
rockchip_spi_config(struct rockchip_spi * rs,struct spi_device * spi,struct spi_transfer * xfer,enum rockchip_spi_xfer_mode xfer_mode,bool slave_mode)616*4882a593Smuzhiyun static int rockchip_spi_config(struct rockchip_spi *rs,
617*4882a593Smuzhiyun 		struct spi_device *spi, struct spi_transfer *xfer,
618*4882a593Smuzhiyun 		enum rockchip_spi_xfer_mode xfer_mode, bool slave_mode)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun 	u32 cr0 = CR0_FRF_SPI  << CR0_FRF_OFFSET
621*4882a593Smuzhiyun 		| CR0_BHT_8BIT << CR0_BHT_OFFSET
622*4882a593Smuzhiyun 		| CR0_SSD_ONE  << CR0_SSD_OFFSET
623*4882a593Smuzhiyun 		| CR0_EM_BIG   << CR0_EM_OFFSET;
624*4882a593Smuzhiyun 	u32 cr1;
625*4882a593Smuzhiyun 	u32 dmacr = 0;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	if (slave_mode)
628*4882a593Smuzhiyun 		cr0 |= CR0_OPM_SLAVE << CR0_OPM_OFFSET;
629*4882a593Smuzhiyun 	rs->slave_aborted = false;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	cr0 |= rs->rsd << CR0_RSD_OFFSET;
632*4882a593Smuzhiyun 	cr0 |= rs->csm << CR0_CSM_OFFSET;
633*4882a593Smuzhiyun 	cr0 |= (spi->mode & 0x3U) << CR0_SCPH_OFFSET;
634*4882a593Smuzhiyun 	if (spi->mode & SPI_LSB_FIRST)
635*4882a593Smuzhiyun 		cr0 |= CR0_FBM_LSB << CR0_FBM_OFFSET;
636*4882a593Smuzhiyun 	if (spi->mode & SPI_CS_HIGH)
637*4882a593Smuzhiyun 		cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	if (xfer->rx_buf && xfer->tx_buf) {
640*4882a593Smuzhiyun 		cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
641*4882a593Smuzhiyun 	} else if (xfer->rx_buf) {
642*4882a593Smuzhiyun 		cr0 |= CR0_XFM_RO << CR0_XFM_OFFSET;
643*4882a593Smuzhiyun 	} else if (xfer->tx_buf) {
644*4882a593Smuzhiyun 		/*
645*4882a593Smuzhiyun 		 * Use the water line of rx fifo in full duplex mode to trigger
646*4882a593Smuzhiyun 		 * the interruption of tx irq transmission completion.
647*4882a593Smuzhiyun 		 */
648*4882a593Smuzhiyun 		if (xfer_mode == ROCKCHIP_SPI_IRQ)
649*4882a593Smuzhiyun 			cr0 |= CR0_XFM_TR << CR0_XFM_OFFSET;
650*4882a593Smuzhiyun 		else
651*4882a593Smuzhiyun 			cr0 |= CR0_XFM_TO << CR0_XFM_OFFSET;
652*4882a593Smuzhiyun 	} else {
653*4882a593Smuzhiyun 		dev_err(rs->dev, "no transmission buffer\n");
654*4882a593Smuzhiyun 		return -EINVAL;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	switch (xfer->bits_per_word) {
658*4882a593Smuzhiyun 	case 4:
659*4882a593Smuzhiyun 		cr0 |= CR0_DFS_4BIT << CR0_DFS_OFFSET;
660*4882a593Smuzhiyun 		cr1 = xfer->len - 1;
661*4882a593Smuzhiyun 		break;
662*4882a593Smuzhiyun 	case 8:
663*4882a593Smuzhiyun 		cr0 |= CR0_DFS_8BIT << CR0_DFS_OFFSET;
664*4882a593Smuzhiyun 		cr1 = xfer->len - 1;
665*4882a593Smuzhiyun 		break;
666*4882a593Smuzhiyun 	case 16:
667*4882a593Smuzhiyun 		cr0 |= CR0_DFS_16BIT << CR0_DFS_OFFSET;
668*4882a593Smuzhiyun 		cr1 = xfer->len / 2 - 1;
669*4882a593Smuzhiyun 		break;
670*4882a593Smuzhiyun 	default:
671*4882a593Smuzhiyun 		/* we only whitelist 4, 8 and 16 bit words in
672*4882a593Smuzhiyun 		 * ctlr->bits_per_word_mask, so this shouldn't
673*4882a593Smuzhiyun 		 * happen
674*4882a593Smuzhiyun 		 */
675*4882a593Smuzhiyun 		dev_err(rs->dev, "unknown bits per word: %d\n",
676*4882a593Smuzhiyun 			xfer->bits_per_word);
677*4882a593Smuzhiyun 		return -EINVAL;
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	if (xfer_mode == ROCKCHIP_SPI_DMA) {
681*4882a593Smuzhiyun 		if (xfer->tx_buf)
682*4882a593Smuzhiyun 			dmacr |= TF_DMA_EN;
683*4882a593Smuzhiyun 		if (xfer->rx_buf)
684*4882a593Smuzhiyun 			dmacr |= RF_DMA_EN;
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	/*
688*4882a593Smuzhiyun 	 * If speed is larger than IO_DRIVER_4MA_MAX_SCLK_OUT,
689*4882a593Smuzhiyun 	 * set higher driver strength.
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	if (rs->high_speed_state) {
692*4882a593Smuzhiyun 		if (rs->freq > IO_DRIVER_4MA_MAX_SCLK_OUT)
693*4882a593Smuzhiyun 			pinctrl_select_state(rs->dev->pins->p,
694*4882a593Smuzhiyun 					     rs->high_speed_state);
695*4882a593Smuzhiyun 		else
696*4882a593Smuzhiyun 			pinctrl_select_state(rs->dev->pins->p,
697*4882a593Smuzhiyun 					     rs->dev->pins->default_state);
698*4882a593Smuzhiyun 	}
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
701*4882a593Smuzhiyun 	writel_relaxed(cr1, rs->regs + ROCKCHIP_SPI_CTRLR1);
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	/* unfortunately setting the fifo threshold level to generate an
704*4882a593Smuzhiyun 	 * interrupt exactly when the fifo is full doesn't seem to work,
705*4882a593Smuzhiyun 	 * so we need the strict inequality here
706*4882a593Smuzhiyun 	 */
707*4882a593Smuzhiyun 	if ((xfer->len / rs->n_bytes) < rs->fifo_len)
708*4882a593Smuzhiyun 		writel_relaxed(xfer->len / rs->n_bytes - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
709*4882a593Smuzhiyun 	else
710*4882a593Smuzhiyun 		writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
713*4882a593Smuzhiyun 	writel_relaxed(rockchip_spi_calc_burst_size(xfer->len / rs->n_bytes) - 1,
714*4882a593Smuzhiyun 		       rs->regs + ROCKCHIP_SPI_DMARDLR);
715*4882a593Smuzhiyun 	writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	if (rs->max_baud_div_in_cpha && xfer->speed_hz != rs->speed_hz) {
718*4882a593Smuzhiyun 		/* the minimum divisor is 2 */
719*4882a593Smuzhiyun 		if (rs->freq < 2 * xfer->speed_hz) {
720*4882a593Smuzhiyun 			clk_set_rate(rs->spiclk, 2 * xfer->speed_hz);
721*4882a593Smuzhiyun 			rs->freq = clk_get_rate(rs->spiclk);
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		if ((spi->mode & SPI_CPHA) && (DIV_ROUND_UP(rs->freq, xfer->speed_hz) > rs->max_baud_div_in_cpha)) {
725*4882a593Smuzhiyun 			clk_set_rate(rs->spiclk, rs->max_baud_div_in_cpha * xfer->speed_hz);
726*4882a593Smuzhiyun 			rs->freq = clk_get_rate(rs->spiclk);
727*4882a593Smuzhiyun 		}
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	/* the hardware only supports an even clock divisor, so
731*4882a593Smuzhiyun 	 * round divisor = spiclk / speed up to nearest even number
732*4882a593Smuzhiyun 	 * so that the resulting speed is <= the requested speed
733*4882a593Smuzhiyun 	 */
734*4882a593Smuzhiyun 	writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
735*4882a593Smuzhiyun 			rs->regs + ROCKCHIP_SPI_BAUDR);
736*4882a593Smuzhiyun 	rs->speed_hz = xfer->speed_hz;
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	return 0;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun 
rockchip_spi_max_transfer_size(struct spi_device * spi)741*4882a593Smuzhiyun static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
742*4882a593Smuzhiyun {
743*4882a593Smuzhiyun 	return ROCKCHIP_SPI_MAX_TRANLEN;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun 
rockchip_spi_slave_abort(struct spi_controller * ctlr)746*4882a593Smuzhiyun static int rockchip_spi_slave_abort(struct spi_controller *ctlr)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
749*4882a593Smuzhiyun 	u32 rx_fifo_left;
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	/* Flush rx fifo */
752*4882a593Smuzhiyun 	rx_fifo_left = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
753*4882a593Smuzhiyun 	for (; rx_fifo_left; rx_fifo_left--)
754*4882a593Smuzhiyun 		readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	rs->slave_aborted = true;
757*4882a593Smuzhiyun 	complete(&rs->xfer_done);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	return 0;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
rockchip_spi_transfer_wait(struct spi_controller * ctlr,struct spi_transfer * xfer)762*4882a593Smuzhiyun static int rockchip_spi_transfer_wait(struct spi_controller *ctlr,
763*4882a593Smuzhiyun 				      struct spi_transfer *xfer)
764*4882a593Smuzhiyun {
765*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
766*4882a593Smuzhiyun 	u32 speed_hz = xfer->speed_hz;
767*4882a593Smuzhiyun 	unsigned long long ms;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (spi_controller_is_slave(ctlr)) {
770*4882a593Smuzhiyun 		if (wait_for_completion_interruptible(&rs->xfer_done)) {
771*4882a593Smuzhiyun 			dev_dbg(rs->dev, "RK SPI transfer interrupted\n");
772*4882a593Smuzhiyun 			return -EINTR;
773*4882a593Smuzhiyun 		}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 		if (rs->slave_aborted) {
776*4882a593Smuzhiyun 			dev_err(rs->dev, "RK SPI transfer slave abort\n");
777*4882a593Smuzhiyun 			return -EIO;
778*4882a593Smuzhiyun 		}
779*4882a593Smuzhiyun 	} else {
780*4882a593Smuzhiyun 		if (!speed_hz)
781*4882a593Smuzhiyun 			speed_hz = 100000;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 		ms = 8LL * 1000LL * xfer->len;
784*4882a593Smuzhiyun 		do_div(ms, speed_hz);
785*4882a593Smuzhiyun 		ms += ms + 200; /* some tolerance */
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		if (ms > UINT_MAX)
788*4882a593Smuzhiyun 			ms = UINT_MAX;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 		ms = wait_for_completion_timeout(&rs->xfer_done,
791*4882a593Smuzhiyun 						 msecs_to_jiffies(ms));
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		if (ms == 0) {
794*4882a593Smuzhiyun 			dev_err(rs->dev, "RK SPI transfer timed out\n");
795*4882a593Smuzhiyun 			return -ETIMEDOUT;
796*4882a593Smuzhiyun 		}
797*4882a593Smuzhiyun 	}
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	return 0;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
rockchip_spi_transfer_one(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * xfer)802*4882a593Smuzhiyun static int rockchip_spi_transfer_one(
803*4882a593Smuzhiyun 		struct spi_controller *ctlr,
804*4882a593Smuzhiyun 		struct spi_device *spi,
805*4882a593Smuzhiyun 		struct spi_transfer *xfer)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
808*4882a593Smuzhiyun 	int ret;
809*4882a593Smuzhiyun 	bool use_dma;
810*4882a593Smuzhiyun 	enum rockchip_spi_xfer_mode xfer_mode;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	/* Zero length transfers won't trigger an interrupt on completion */
813*4882a593Smuzhiyun 	if (!xfer->len) {
814*4882a593Smuzhiyun 		complete(&rs->xfer_done);
815*4882a593Smuzhiyun 		return 1;
816*4882a593Smuzhiyun 	}
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
819*4882a593Smuzhiyun 		(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	if (!xfer->tx_buf && !xfer->rx_buf) {
822*4882a593Smuzhiyun 		dev_err(rs->dev, "No buffer for transfer\n");
823*4882a593Smuzhiyun 		return -EINVAL;
824*4882a593Smuzhiyun 	}
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (xfer->len > ROCKCHIP_SPI_MAX_TRANLEN) {
827*4882a593Smuzhiyun 		dev_err(rs->dev, "Transfer is too long (%d)\n", xfer->len);
828*4882a593Smuzhiyun 		return -EINVAL;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	rs->n_bytes = xfer->bits_per_word <= 8 ? 1 : 2;
832*4882a593Smuzhiyun 	rs->xfer = xfer;
833*4882a593Smuzhiyun 	if (rs->poll) {
834*4882a593Smuzhiyun 		xfer_mode = ROCKCHIP_SPI_POLL;
835*4882a593Smuzhiyun 	} else {
836*4882a593Smuzhiyun 		use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
837*4882a593Smuzhiyun 		if (use_dma)
838*4882a593Smuzhiyun 			xfer_mode = ROCKCHIP_SPI_DMA;
839*4882a593Smuzhiyun 		else
840*4882a593Smuzhiyun 			xfer_mode = ROCKCHIP_SPI_IRQ;
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	ret = rockchip_spi_config(rs, spi, xfer, xfer_mode, ctlr->slave);
844*4882a593Smuzhiyun 	if (ret)
845*4882a593Smuzhiyun 		return ret;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	rs->tx = xfer->tx_buf;
848*4882a593Smuzhiyun 	rs->rx = xfer->rx_buf;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	reinit_completion(&rs->xfer_done);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	switch (xfer_mode) {
853*4882a593Smuzhiyun 	case ROCKCHIP_SPI_POLL:
854*4882a593Smuzhiyun 		ret = rockchip_spi_pio_transfer(rs, ctlr, xfer);
855*4882a593Smuzhiyun 		break;
856*4882a593Smuzhiyun 	case ROCKCHIP_SPI_DMA:
857*4882a593Smuzhiyun 		ret = rockchip_spi_prepare_dma(rs, ctlr, xfer);
858*4882a593Smuzhiyun 		break;
859*4882a593Smuzhiyun 	default:
860*4882a593Smuzhiyun 		ret = rockchip_spi_prepare_irq(rs, ctlr, xfer);
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	if (rs->ready) {
864*4882a593Smuzhiyun 		gpiod_set_value(rs->ready, 0);
865*4882a593Smuzhiyun 		udelay(1);
866*4882a593Smuzhiyun 		gpiod_set_value(rs->ready, 1);
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	if (ret > 0)
870*4882a593Smuzhiyun 		ret = rockchip_spi_transfer_wait(ctlr, xfer);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	if (rs->ready)
873*4882a593Smuzhiyun 		gpiod_set_value(rs->ready, 0);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	return ret;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
rockchip_spi_can_dma(struct spi_controller * ctlr,struct spi_device * spi,struct spi_transfer * xfer)878*4882a593Smuzhiyun static bool rockchip_spi_can_dma(struct spi_controller *ctlr,
879*4882a593Smuzhiyun 				 struct spi_device *spi,
880*4882a593Smuzhiyun 				 struct spi_transfer *xfer)
881*4882a593Smuzhiyun {
882*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
883*4882a593Smuzhiyun 	unsigned int bytes_per_word = xfer->bits_per_word <= 8 ? 1 : 2;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* if the numbor of spi words to transfer is less than the fifo
886*4882a593Smuzhiyun 	 * length we can just fill the fifo and wait for a single irq,
887*4882a593Smuzhiyun 	 * so don't bother setting up dma
888*4882a593Smuzhiyun 	 */
889*4882a593Smuzhiyun 	return xfer->len / bytes_per_word >= rs->fifo_len;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun 
rockchip_spi_setup(struct spi_device * spi)892*4882a593Smuzhiyun static int rockchip_spi_setup(struct spi_device *spi)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(spi->controller);
895*4882a593Smuzhiyun 	u32 cr0;
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	if (!spi->cs_gpiod && (spi->mode & SPI_CS_HIGH) && !rs->cs_high_supported) {
898*4882a593Smuzhiyun 		dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
899*4882a593Smuzhiyun 		return -EINVAL;
900*4882a593Smuzhiyun 	}
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	pm_runtime_get_sync(rs->dev);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	cr0 = readl_relaxed(rs->regs + ROCKCHIP_SPI_CTRLR0);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	cr0 |= ((spi->mode & 0x3) << CR0_SCPH_OFFSET);
907*4882a593Smuzhiyun 	if (spi->mode & SPI_CS_HIGH)
908*4882a593Smuzhiyun 		cr0 |= BIT(spi->chip_select) << CR0_SOI_OFFSET;
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	pm_runtime_put(rs->dev);
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	return 0;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
rockchip_spi_misc_open(struct inode * inode,struct file * filp)917*4882a593Smuzhiyun static int rockchip_spi_misc_open(struct inode *inode, struct file *filp)
918*4882a593Smuzhiyun {
919*4882a593Smuzhiyun 	struct miscdevice *misc = filp->private_data;
920*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(misc->parent);
921*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	pm_runtime_get_sync(rs->dev);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	return 0;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun 
rockchip_spi_misc_release(struct inode * inode,struct file * filp)928*4882a593Smuzhiyun static int rockchip_spi_misc_release(struct inode *inode, struct file *filp)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	struct miscdevice *misc = filp->private_data;
931*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(misc->parent);
932*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	pm_runtime_put(rs->dev);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	return 0;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun 
rockchip_spi_mmap(struct file * filp,struct vm_area_struct * vma)939*4882a593Smuzhiyun static int rockchip_spi_mmap(struct file *filp, struct vm_area_struct *vma)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun 	struct miscdevice *misc = filp->private_data;
942*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(misc->parent);
943*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
944*4882a593Smuzhiyun 	size_t size = vma->vm_end - vma->vm_start;
945*4882a593Smuzhiyun 	int err;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	if (size > ROCKCHIP_SPI_REGISTER_SIZE) {
948*4882a593Smuzhiyun 		dev_warn(misc->parent, "mmap size is out of limitation\n");
949*4882a593Smuzhiyun 		return -EINVAL;
950*4882a593Smuzhiyun 	}
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	vma->vm_flags |= VM_IO;
953*4882a593Smuzhiyun 	vma->vm_flags |= (VM_DONTEXPAND | VM_DONTDUMP);
954*4882a593Smuzhiyun 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	err = remap_pfn_range(vma, vma->vm_start,
957*4882a593Smuzhiyun 			      __phys_to_pfn(rs->base_addr_phy),
958*4882a593Smuzhiyun 			      size, vma->vm_page_prot);
959*4882a593Smuzhiyun 	if (err)
960*4882a593Smuzhiyun 		return -EAGAIN;
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	return 0;
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun static const struct file_operations rockchip_spi_misc_fops = {
966*4882a593Smuzhiyun 	.open		= rockchip_spi_misc_open,
967*4882a593Smuzhiyun 	.release	= rockchip_spi_misc_release,
968*4882a593Smuzhiyun 	.mmap		= rockchip_spi_mmap,
969*4882a593Smuzhiyun };
970*4882a593Smuzhiyun 
rockchip_spi_probe(struct platform_device * pdev)971*4882a593Smuzhiyun static int rockchip_spi_probe(struct platform_device *pdev)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	int ret;
974*4882a593Smuzhiyun 	struct rockchip_spi *rs;
975*4882a593Smuzhiyun 	struct spi_controller *ctlr;
976*4882a593Smuzhiyun 	struct resource *mem;
977*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
978*4882a593Smuzhiyun 	u32 rsd_nsecs, num_cs, csm;
979*4882a593Smuzhiyun 	bool slave_mode;
980*4882a593Smuzhiyun 	struct pinctrl *pinctrl = NULL;
981*4882a593Smuzhiyun 	const struct rockchip_spi_quirks *quirks_cfg;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	slave_mode = of_property_read_bool(np, "spi-slave");
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	if (slave_mode)
986*4882a593Smuzhiyun 		ctlr = spi_alloc_slave(&pdev->dev,
987*4882a593Smuzhiyun 				sizeof(struct rockchip_spi));
988*4882a593Smuzhiyun 	else
989*4882a593Smuzhiyun 		ctlr = spi_alloc_master(&pdev->dev,
990*4882a593Smuzhiyun 				sizeof(struct rockchip_spi));
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	if (!ctlr)
993*4882a593Smuzhiyun 		return -ENOMEM;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	platform_set_drvdata(pdev, ctlr);
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	rs = spi_controller_get_devdata(ctlr);
998*4882a593Smuzhiyun 	ctlr->slave = slave_mode;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	/* Get basic io resource and map it */
1001*4882a593Smuzhiyun 	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1002*4882a593Smuzhiyun 	rs->regs = devm_ioremap_resource(&pdev->dev, mem);
1003*4882a593Smuzhiyun 	if (IS_ERR(rs->regs)) {
1004*4882a593Smuzhiyun 		ret =  PTR_ERR(rs->regs);
1005*4882a593Smuzhiyun 		goto err_put_ctlr;
1006*4882a593Smuzhiyun 	}
1007*4882a593Smuzhiyun 	rs->base_addr_phy = mem->start;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (!has_acpi_companion(&pdev->dev))
1010*4882a593Smuzhiyun 		rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
1011*4882a593Smuzhiyun 	if (IS_ERR(rs->apb_pclk)) {
1012*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to get apb_pclk\n");
1013*4882a593Smuzhiyun 		ret = PTR_ERR(rs->apb_pclk);
1014*4882a593Smuzhiyun 		goto err_put_ctlr;
1015*4882a593Smuzhiyun 	}
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	if (!has_acpi_companion(&pdev->dev))
1018*4882a593Smuzhiyun 		rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
1019*4882a593Smuzhiyun 	if (IS_ERR(rs->spiclk)) {
1020*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to get spi_pclk\n");
1021*4882a593Smuzhiyun 		ret = PTR_ERR(rs->spiclk);
1022*4882a593Smuzhiyun 		goto err_put_ctlr;
1023*4882a593Smuzhiyun 	}
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	rs->sclk_in = devm_clk_get_optional(&pdev->dev, "sclk_in");
1026*4882a593Smuzhiyun 	if (IS_ERR(rs->sclk_in)) {
1027*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to get sclk_in\n");
1028*4882a593Smuzhiyun 		ret = PTR_ERR(rs->sclk_in);
1029*4882a593Smuzhiyun 		goto err_put_ctlr;
1030*4882a593Smuzhiyun 	}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	ret = clk_prepare_enable(rs->apb_pclk);
1033*4882a593Smuzhiyun 	if (ret < 0) {
1034*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
1035*4882a593Smuzhiyun 		goto err_put_ctlr;
1036*4882a593Smuzhiyun 	}
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	ret = clk_prepare_enable(rs->spiclk);
1039*4882a593Smuzhiyun 	if (ret < 0) {
1040*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to enable spi_clk\n");
1041*4882a593Smuzhiyun 		goto err_disable_apbclk;
1042*4882a593Smuzhiyun 	}
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 	ret = clk_prepare_enable(rs->sclk_in);
1045*4882a593Smuzhiyun 	if (ret < 0) {
1046*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to enable sclk_in\n");
1047*4882a593Smuzhiyun 		goto err_disable_spiclk;
1048*4882a593Smuzhiyun 	}
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	spi_enable_chip(rs, false);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	ret = platform_get_irq(pdev, 0);
1053*4882a593Smuzhiyun 	if (ret < 0)
1054*4882a593Smuzhiyun 		goto err_disable_sclk_in;
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(&pdev->dev, ret, rockchip_spi_isr, NULL,
1057*4882a593Smuzhiyun 			IRQF_ONESHOT, dev_name(&pdev->dev), ctlr);
1058*4882a593Smuzhiyun 	if (ret)
1059*4882a593Smuzhiyun 		goto err_disable_sclk_in;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	rs->dev = &pdev->dev;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	rs->freq = clk_get_rate(rs->spiclk);
1064*4882a593Smuzhiyun 	if (!rs->freq) {
1065*4882a593Smuzhiyun 		ret = device_property_read_u32(&pdev->dev, "clock-frequency", &rs->freq);
1066*4882a593Smuzhiyun 		if (ret) {
1067*4882a593Smuzhiyun 			dev_warn(rs->dev, "Failed to get clock or clock-frequency property\n");
1068*4882a593Smuzhiyun 			goto err_disable_sclk_in;
1069*4882a593Smuzhiyun 		}
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	if (!device_property_read_u32(&pdev->dev, "rx-sample-delay-ns", &rsd_nsecs)) {
1073*4882a593Smuzhiyun 		/* rx sample delay is expressed in parent clock cycles (max 3) */
1074*4882a593Smuzhiyun 		u32 rsd = DIV_ROUND_CLOSEST(rsd_nsecs * (rs->freq >> 8),
1075*4882a593Smuzhiyun 				1000000000 >> 8);
1076*4882a593Smuzhiyun 		if (!rsd) {
1077*4882a593Smuzhiyun 			dev_warn(rs->dev, "%u Hz are too slow to express %u ns delay\n",
1078*4882a593Smuzhiyun 					rs->freq, rsd_nsecs);
1079*4882a593Smuzhiyun 		} else if (rsd > CR0_RSD_MAX) {
1080*4882a593Smuzhiyun 			rsd = CR0_RSD_MAX;
1081*4882a593Smuzhiyun 			dev_warn(rs->dev, "%u Hz are too fast to express %u ns delay, clamping at %u ns\n",
1082*4882a593Smuzhiyun 					rs->freq, rsd_nsecs,
1083*4882a593Smuzhiyun 					CR0_RSD_MAX * 1000000000U / rs->freq);
1084*4882a593Smuzhiyun 		}
1085*4882a593Smuzhiyun 		rs->rsd = rsd;
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	if (!device_property_read_u32(&pdev->dev, "csm", &csm)) {
1089*4882a593Smuzhiyun 		if (csm > CR0_CSM_ONE)	{
1090*4882a593Smuzhiyun 			dev_warn(rs->dev, "The csm value %u exceeds the limit, clamping at %u\n",
1091*4882a593Smuzhiyun 				 csm, CR0_CSM_ONE);
1092*4882a593Smuzhiyun 			csm = CR0_CSM_ONE;
1093*4882a593Smuzhiyun 		}
1094*4882a593Smuzhiyun 		rs->csm = csm;
1095*4882a593Smuzhiyun 	}
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	rs->version = readl_relaxed(rs->regs + ROCKCHIP_SPI_VERSION);
1098*4882a593Smuzhiyun 	rs->fifo_len = get_fifo_len(rs);
1099*4882a593Smuzhiyun 	if (!rs->fifo_len) {
1100*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to get fifo length\n");
1101*4882a593Smuzhiyun 		ret = -EINVAL;
1102*4882a593Smuzhiyun 		goto err_disable_sclk_in;
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 	quirks_cfg = device_get_match_data(&pdev->dev);
1105*4882a593Smuzhiyun 	if (quirks_cfg)
1106*4882a593Smuzhiyun 		rs->max_baud_div_in_cpha = quirks_cfg->max_baud_div_in_cpha;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	pm_runtime_set_active(&pdev->dev);
1109*4882a593Smuzhiyun 	pm_runtime_enable(&pdev->dev);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	ctlr->auto_runtime_pm = true;
1112*4882a593Smuzhiyun 	ctlr->bus_num = pdev->id;
1113*4882a593Smuzhiyun 	ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_LSB_FIRST;
1114*4882a593Smuzhiyun 	if (slave_mode) {
1115*4882a593Smuzhiyun 		ctlr->mode_bits |= SPI_NO_CS;
1116*4882a593Smuzhiyun 		ctlr->slave_abort = rockchip_spi_slave_abort;
1117*4882a593Smuzhiyun 	} else {
1118*4882a593Smuzhiyun 		ctlr->flags = SPI_MASTER_GPIO_SS;
1119*4882a593Smuzhiyun 		ctlr->max_native_cs = ROCKCHIP_SPI_MAX_CS_NUM;
1120*4882a593Smuzhiyun 		/*
1121*4882a593Smuzhiyun 		 * rk spi0 has two native cs, spi1..5 one cs only
1122*4882a593Smuzhiyun 		 * if num-cs is missing in the dts, default to 1
1123*4882a593Smuzhiyun 		 */
1124*4882a593Smuzhiyun 		if (device_property_read_u32(&pdev->dev, "num-cs", &num_cs))
1125*4882a593Smuzhiyun 			num_cs = 1;
1126*4882a593Smuzhiyun 		ctlr->num_chipselect = num_cs;
1127*4882a593Smuzhiyun 		ctlr->use_gpio_descriptors = true;
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 	ctlr->dev.of_node = pdev->dev.of_node;
1130*4882a593Smuzhiyun 	ctlr->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8) | SPI_BPW_MASK(4);
1131*4882a593Smuzhiyun 	ctlr->min_speed_hz = rs->freq / BAUDR_SCKDV_MAX;
1132*4882a593Smuzhiyun 	ctlr->max_speed_hz = min(rs->freq / BAUDR_SCKDV_MIN, MAX_SCLK_OUT);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	ctlr->setup = rockchip_spi_setup;
1135*4882a593Smuzhiyun 	ctlr->set_cs = rockchip_spi_set_cs;
1136*4882a593Smuzhiyun 	ctlr->transfer_one = rockchip_spi_transfer_one;
1137*4882a593Smuzhiyun 	ctlr->max_transfer_size = rockchip_spi_max_transfer_size;
1138*4882a593Smuzhiyun 	ctlr->handle_err = rockchip_spi_handle_err;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	ctlr->dma_tx = dma_request_chan(rs->dev, "tx");
1141*4882a593Smuzhiyun 	if (IS_ERR(ctlr->dma_tx)) {
1142*4882a593Smuzhiyun 		/* Check tx to see if we need defer probing driver */
1143*4882a593Smuzhiyun 		if (PTR_ERR(ctlr->dma_tx) == -EPROBE_DEFER) {
1144*4882a593Smuzhiyun 			ret = -EPROBE_DEFER;
1145*4882a593Smuzhiyun 			goto err_disable_pm_runtime;
1146*4882a593Smuzhiyun 		}
1147*4882a593Smuzhiyun 		dev_warn(rs->dev, "Failed to request TX DMA channel\n");
1148*4882a593Smuzhiyun 		ctlr->dma_tx = NULL;
1149*4882a593Smuzhiyun 	}
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	ctlr->dma_rx = dma_request_chan(rs->dev, "rx");
1152*4882a593Smuzhiyun 	if (IS_ERR(ctlr->dma_rx)) {
1153*4882a593Smuzhiyun 		if (PTR_ERR(ctlr->dma_rx) == -EPROBE_DEFER) {
1154*4882a593Smuzhiyun 			ret = -EPROBE_DEFER;
1155*4882a593Smuzhiyun 			goto err_free_dma_tx;
1156*4882a593Smuzhiyun 		}
1157*4882a593Smuzhiyun 		dev_warn(rs->dev, "Failed to request RX DMA channel\n");
1158*4882a593Smuzhiyun 		ctlr->dma_rx = NULL;
1159*4882a593Smuzhiyun 	}
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	if (ctlr->dma_tx && ctlr->dma_rx) {
1162*4882a593Smuzhiyun 		rs->dma_addr_tx = mem->start + ROCKCHIP_SPI_TXDR;
1163*4882a593Smuzhiyun 		rs->dma_addr_rx = mem->start + ROCKCHIP_SPI_RXDR;
1164*4882a593Smuzhiyun 		ctlr->can_dma = rockchip_spi_can_dma;
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	rs->poll = device_property_read_bool(&pdev->dev, "rockchip,poll-only");
1168*4882a593Smuzhiyun 	init_completion(&rs->xfer_done);
1169*4882a593Smuzhiyun 	if (rs->poll && slave_mode) {
1170*4882a593Smuzhiyun 		dev_err(rs->dev, "only support rockchip,poll-only property in master mode\n");
1171*4882a593Smuzhiyun 		ret = -EINVAL;
1172*4882a593Smuzhiyun 		goto err_free_dma_rx;
1173*4882a593Smuzhiyun 	}
1174*4882a593Smuzhiyun 
1175*4882a593Smuzhiyun 	switch (rs->version) {
1176*4882a593Smuzhiyun 	case ROCKCHIP_SPI_VER2_TYPE2:
1177*4882a593Smuzhiyun 		rs->cs_high_supported = true;
1178*4882a593Smuzhiyun 		ctlr->mode_bits |= SPI_CS_HIGH;
1179*4882a593Smuzhiyun 		if (slave_mode)
1180*4882a593Smuzhiyun 			rs->cs_inactive = true;
1181*4882a593Smuzhiyun 		else
1182*4882a593Smuzhiyun 			rs->cs_inactive = false;
1183*4882a593Smuzhiyun 		break;
1184*4882a593Smuzhiyun 	default:
1185*4882a593Smuzhiyun 		rs->cs_inactive = false;
1186*4882a593Smuzhiyun 		break;
1187*4882a593Smuzhiyun 	}
1188*4882a593Smuzhiyun 	if (device_property_read_bool(&pdev->dev, "rockchip,cs-inactive-disable"))
1189*4882a593Smuzhiyun 		rs->cs_inactive = false;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	pinctrl = devm_pinctrl_get(&pdev->dev);
1192*4882a593Smuzhiyun 	if (!IS_ERR(pinctrl)) {
1193*4882a593Smuzhiyun 		rs->high_speed_state = pinctrl_lookup_state(pinctrl, "high_speed");
1194*4882a593Smuzhiyun 		if (IS_ERR_OR_NULL(rs->high_speed_state)) {
1195*4882a593Smuzhiyun 			dev_warn(&pdev->dev, "no high_speed pinctrl state\n");
1196*4882a593Smuzhiyun 			rs->high_speed_state = NULL;
1197*4882a593Smuzhiyun 		}
1198*4882a593Smuzhiyun 	}
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	rs->ready = devm_gpiod_get_optional(&pdev->dev, "ready", GPIOD_OUT_HIGH);
1201*4882a593Smuzhiyun 	if (IS_ERR(rs->ready)) {
1202*4882a593Smuzhiyun 		ret = dev_err_probe(&pdev->dev, PTR_ERR(rs->ready),
1203*4882a593Smuzhiyun 				    "invalid ready-gpios property in node\n");
1204*4882a593Smuzhiyun 		goto err_free_dma_rx;
1205*4882a593Smuzhiyun 	}
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	ret = devm_spi_register_controller(&pdev->dev, ctlr);
1208*4882a593Smuzhiyun 	if (ret < 0) {
1209*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to register controller\n");
1210*4882a593Smuzhiyun 		goto err_free_dma_rx;
1211*4882a593Smuzhiyun 	}
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_MISCDEV)) {
1214*4882a593Smuzhiyun 		char misc_name[20];
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 		snprintf(misc_name, sizeof(misc_name), "rkspi-dev%d", ctlr->bus_num);
1217*4882a593Smuzhiyun 		rs->miscdev.minor = MISC_DYNAMIC_MINOR;
1218*4882a593Smuzhiyun 		rs->miscdev.name = misc_name;
1219*4882a593Smuzhiyun 		rs->miscdev.fops = &rockchip_spi_misc_fops;
1220*4882a593Smuzhiyun 		rs->miscdev.parent = &pdev->dev;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 		ret = misc_register(&rs->miscdev);
1223*4882a593Smuzhiyun 		if (ret)
1224*4882a593Smuzhiyun 			dev_err(&pdev->dev, "failed to register misc device %s\n", misc_name);
1225*4882a593Smuzhiyun 		else
1226*4882a593Smuzhiyun 			dev_info(&pdev->dev, "register misc device %s\n", misc_name);
1227*4882a593Smuzhiyun 	}
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	dev_info(rs->dev, "probed, poll=%d, rsd=%d, cs-inactive=%d, ready=%d\n",
1230*4882a593Smuzhiyun 		 rs->poll, rs->rsd, rs->cs_inactive, rs->ready ? 1 : 0);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	return 0;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun err_free_dma_rx:
1235*4882a593Smuzhiyun 	if (ctlr->dma_rx)
1236*4882a593Smuzhiyun 		dma_release_channel(ctlr->dma_rx);
1237*4882a593Smuzhiyun err_free_dma_tx:
1238*4882a593Smuzhiyun 	if (ctlr->dma_tx)
1239*4882a593Smuzhiyun 		dma_release_channel(ctlr->dma_tx);
1240*4882a593Smuzhiyun err_disable_pm_runtime:
1241*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1242*4882a593Smuzhiyun err_disable_sclk_in:
1243*4882a593Smuzhiyun 	clk_disable_unprepare(rs->sclk_in);
1244*4882a593Smuzhiyun err_disable_spiclk:
1245*4882a593Smuzhiyun 	clk_disable_unprepare(rs->spiclk);
1246*4882a593Smuzhiyun err_disable_apbclk:
1247*4882a593Smuzhiyun 	clk_disable_unprepare(rs->apb_pclk);
1248*4882a593Smuzhiyun err_put_ctlr:
1249*4882a593Smuzhiyun 	spi_controller_put(ctlr);
1250*4882a593Smuzhiyun 
1251*4882a593Smuzhiyun 	return ret;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun 
rockchip_spi_remove(struct platform_device * pdev)1254*4882a593Smuzhiyun static int rockchip_spi_remove(struct platform_device *pdev)
1255*4882a593Smuzhiyun {
1256*4882a593Smuzhiyun 	struct spi_controller *ctlr = spi_controller_get(platform_get_drvdata(pdev));
1257*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_SPI_ROCKCHIP_MISCDEV))
1260*4882a593Smuzhiyun 		misc_deregister(&rs->miscdev);
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	pm_runtime_get_sync(&pdev->dev);
1263*4882a593Smuzhiyun 
1264*4882a593Smuzhiyun 	clk_disable_unprepare(rs->sclk_in);
1265*4882a593Smuzhiyun 	clk_disable_unprepare(rs->spiclk);
1266*4882a593Smuzhiyun 	clk_disable_unprepare(rs->apb_pclk);
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	pm_runtime_put_noidle(&pdev->dev);
1269*4882a593Smuzhiyun 	pm_runtime_disable(&pdev->dev);
1270*4882a593Smuzhiyun 	pm_runtime_set_suspended(&pdev->dev);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	if (ctlr->dma_tx)
1273*4882a593Smuzhiyun 		dma_release_channel(ctlr->dma_tx);
1274*4882a593Smuzhiyun 	if (ctlr->dma_rx)
1275*4882a593Smuzhiyun 		dma_release_channel(ctlr->dma_rx);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	spi_controller_put(ctlr);
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 	return 0;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun #ifdef CONFIG_PM
rockchip_spi_runtime_suspend(struct device * dev)1283*4882a593Smuzhiyun static int rockchip_spi_runtime_suspend(struct device *dev)
1284*4882a593Smuzhiyun {
1285*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1286*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	clk_disable_unprepare(rs->spiclk);
1289*4882a593Smuzhiyun 	clk_disable_unprepare(rs->apb_pclk);
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	return 0;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun 
rockchip_spi_runtime_resume(struct device * dev)1294*4882a593Smuzhiyun static int rockchip_spi_runtime_resume(struct device *dev)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun 	int ret;
1297*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1298*4882a593Smuzhiyun 	struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	ret = clk_prepare_enable(rs->apb_pclk);
1301*4882a593Smuzhiyun 	if (ret < 0)
1302*4882a593Smuzhiyun 		return ret;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	ret = clk_prepare_enable(rs->spiclk);
1305*4882a593Smuzhiyun 	if (ret < 0)
1306*4882a593Smuzhiyun 		clk_disable_unprepare(rs->apb_pclk);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	return 0;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun #endif /* CONFIG_PM */
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
rockchip_spi_suspend(struct device * dev)1313*4882a593Smuzhiyun static int rockchip_spi_suspend(struct device *dev)
1314*4882a593Smuzhiyun {
1315*4882a593Smuzhiyun 	int ret;
1316*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	ret = spi_controller_suspend(ctlr);
1319*4882a593Smuzhiyun 	if (ret < 0)
1320*4882a593Smuzhiyun 		return ret;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	/* Avoid redundant clock disable */
1323*4882a593Smuzhiyun 	if (!pm_runtime_status_suspended(dev))
1324*4882a593Smuzhiyun 		rockchip_spi_runtime_suspend(dev);
1325*4882a593Smuzhiyun 
1326*4882a593Smuzhiyun 	pinctrl_pm_select_sleep_state(dev);
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	return 0;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
rockchip_spi_resume(struct device * dev)1331*4882a593Smuzhiyun static int rockchip_spi_resume(struct device *dev)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun 	int ret;
1334*4882a593Smuzhiyun 	struct spi_controller *ctlr = dev_get_drvdata(dev);
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	pinctrl_pm_select_default_state(dev);
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	if (!pm_runtime_status_suspended(dev)) {
1339*4882a593Smuzhiyun 		ret = rockchip_spi_runtime_resume(dev);
1340*4882a593Smuzhiyun 		if (ret < 0)
1341*4882a593Smuzhiyun 			return ret;
1342*4882a593Smuzhiyun 	}
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 	ret = spi_controller_resume(ctlr);
1345*4882a593Smuzhiyun 	if (ret < 0)
1346*4882a593Smuzhiyun 		rockchip_spi_runtime_suspend(dev);
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	return 0;
1349*4882a593Smuzhiyun }
1350*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun static const struct dev_pm_ops rockchip_spi_pm = {
1353*4882a593Smuzhiyun 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
1354*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
1355*4882a593Smuzhiyun 			   rockchip_spi_runtime_resume, NULL)
1356*4882a593Smuzhiyun };
1357*4882a593Smuzhiyun 
1358*4882a593Smuzhiyun static const struct rockchip_spi_quirks rockchip_spi_quirks_cfg = {
1359*4882a593Smuzhiyun 	.max_baud_div_in_cpha	= 4,
1360*4882a593Smuzhiyun };
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun static const struct of_device_id rockchip_spi_dt_match[] = {
1363*4882a593Smuzhiyun 	{
1364*4882a593Smuzhiyun 		.compatible = "rockchip,px30-spi",
1365*4882a593Smuzhiyun 		.data = &rockchip_spi_quirks_cfg,
1366*4882a593Smuzhiyun 	},
1367*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3036-spi", },
1368*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3066-spi", },
1369*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3188-spi", },
1370*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3228-spi", },
1371*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3288-spi", },
1372*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3308-spi", },
1373*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3328-spi", },
1374*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3368-spi", },
1375*4882a593Smuzhiyun 	{ .compatible = "rockchip,rk3399-spi", },
1376*4882a593Smuzhiyun 	{ .compatible = "rockchip,rv1106-spi", },
1377*4882a593Smuzhiyun 	{ .compatible = "rockchip,rv1108-spi", },
1378*4882a593Smuzhiyun 	{ .compatible = "rockchip,rv1126-spi", },
1379*4882a593Smuzhiyun 	{ },
1380*4882a593Smuzhiyun };
1381*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun static struct platform_driver rockchip_spi_driver = {
1384*4882a593Smuzhiyun 	.driver = {
1385*4882a593Smuzhiyun 		.name	= DRIVER_NAME,
1386*4882a593Smuzhiyun 		.pm = &rockchip_spi_pm,
1387*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(rockchip_spi_dt_match),
1388*4882a593Smuzhiyun 	},
1389*4882a593Smuzhiyun 	.probe = rockchip_spi_probe,
1390*4882a593Smuzhiyun 	.remove = rockchip_spi_remove,
1391*4882a593Smuzhiyun };
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun module_platform_driver(rockchip_spi_driver);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
1396*4882a593Smuzhiyun MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
1397*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1398