xref: /OK3568_Linux_fs/kernel/drivers/spi/spi-uniphier.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // spi-uniphier.c - Socionext UniPhier SPI controller driver
3*4882a593Smuzhiyun // Copyright 2012      Panasonic Corporation
4*4882a593Smuzhiyun // Copyright 2016-2018 Socionext Inc.
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/kernel.h>
7*4882a593Smuzhiyun #include <linux/bitfield.h>
8*4882a593Smuzhiyun #include <linux/bitops.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/dmaengine.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/spi/spi.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/unaligned.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define SSI_TIMEOUT_MS		2000
21*4882a593Smuzhiyun #define SSI_POLL_TIMEOUT_US	200
22*4882a593Smuzhiyun #define SSI_MAX_CLK_DIVIDER	254
23*4882a593Smuzhiyun #define SSI_MIN_CLK_DIVIDER	4
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun struct uniphier_spi_priv {
26*4882a593Smuzhiyun 	void __iomem *base;
27*4882a593Smuzhiyun 	dma_addr_t base_dma_addr;
28*4882a593Smuzhiyun 	struct clk *clk;
29*4882a593Smuzhiyun 	struct spi_master *master;
30*4882a593Smuzhiyun 	struct completion xfer_done;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	int error;
33*4882a593Smuzhiyun 	unsigned int tx_bytes;
34*4882a593Smuzhiyun 	unsigned int rx_bytes;
35*4882a593Smuzhiyun 	const u8 *tx_buf;
36*4882a593Smuzhiyun 	u8 *rx_buf;
37*4882a593Smuzhiyun 	atomic_t dma_busy;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	bool is_save_param;
40*4882a593Smuzhiyun 	u8 bits_per_word;
41*4882a593Smuzhiyun 	u16 mode;
42*4882a593Smuzhiyun 	u32 speed_hz;
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define SSI_CTL			0x00
46*4882a593Smuzhiyun #define   SSI_CTL_EN		BIT(0)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define SSI_CKS			0x04
49*4882a593Smuzhiyun #define   SSI_CKS_CKRAT_MASK	GENMASK(7, 0)
50*4882a593Smuzhiyun #define   SSI_CKS_CKPHS		BIT(14)
51*4882a593Smuzhiyun #define   SSI_CKS_CKINIT	BIT(13)
52*4882a593Smuzhiyun #define   SSI_CKS_CKDLY		BIT(12)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define SSI_TXWDS		0x08
55*4882a593Smuzhiyun #define   SSI_TXWDS_WDLEN_MASK	GENMASK(13, 8)
56*4882a593Smuzhiyun #define   SSI_TXWDS_TDTF_MASK	GENMASK(7, 6)
57*4882a593Smuzhiyun #define   SSI_TXWDS_DTLEN_MASK	GENMASK(5, 0)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define SSI_RXWDS		0x0c
60*4882a593Smuzhiyun #define   SSI_RXWDS_DTLEN_MASK	GENMASK(5, 0)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define SSI_FPS			0x10
63*4882a593Smuzhiyun #define   SSI_FPS_FSPOL		BIT(15)
64*4882a593Smuzhiyun #define   SSI_FPS_FSTRT		BIT(14)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define SSI_SR			0x14
67*4882a593Smuzhiyun #define   SSI_SR_BUSY		BIT(7)
68*4882a593Smuzhiyun #define   SSI_SR_RNE		BIT(0)
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #define SSI_IE			0x18
71*4882a593Smuzhiyun #define   SSI_IE_TCIE		BIT(4)
72*4882a593Smuzhiyun #define   SSI_IE_RCIE		BIT(3)
73*4882a593Smuzhiyun #define   SSI_IE_TXRE		BIT(2)
74*4882a593Smuzhiyun #define   SSI_IE_RXRE		BIT(1)
75*4882a593Smuzhiyun #define   SSI_IE_RORIE		BIT(0)
76*4882a593Smuzhiyun #define   SSI_IE_ALL_MASK	GENMASK(4, 0)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define SSI_IS			0x1c
79*4882a593Smuzhiyun #define   SSI_IS_RXRS		BIT(9)
80*4882a593Smuzhiyun #define   SSI_IS_RCID		BIT(3)
81*4882a593Smuzhiyun #define   SSI_IS_RORID		BIT(0)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define SSI_IC			0x1c
84*4882a593Smuzhiyun #define   SSI_IC_TCIC		BIT(4)
85*4882a593Smuzhiyun #define   SSI_IC_RCIC		BIT(3)
86*4882a593Smuzhiyun #define   SSI_IC_RORIC		BIT(0)
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define SSI_FC			0x20
89*4882a593Smuzhiyun #define   SSI_FC_TXFFL		BIT(12)
90*4882a593Smuzhiyun #define   SSI_FC_TXFTH_MASK	GENMASK(11, 8)
91*4882a593Smuzhiyun #define   SSI_FC_RXFFL		BIT(4)
92*4882a593Smuzhiyun #define   SSI_FC_RXFTH_MASK	GENMASK(3, 0)
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define SSI_TXDR		0x24
95*4882a593Smuzhiyun #define SSI_RXDR		0x24
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #define SSI_FIFO_DEPTH		8U
98*4882a593Smuzhiyun #define SSI_FIFO_BURST_NUM	1
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define SSI_DMA_RX_BUSY		BIT(1)
101*4882a593Smuzhiyun #define SSI_DMA_TX_BUSY		BIT(0)
102*4882a593Smuzhiyun 
bytes_per_word(unsigned int bits)103*4882a593Smuzhiyun static inline unsigned int bytes_per_word(unsigned int bits)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
uniphier_spi_irq_enable(struct uniphier_spi_priv * priv,u32 mask)108*4882a593Smuzhiyun static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv,
109*4882a593Smuzhiyun 					   u32 mask)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	u32 val;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	val = readl(priv->base + SSI_IE);
114*4882a593Smuzhiyun 	val |= mask;
115*4882a593Smuzhiyun 	writel(val, priv->base + SSI_IE);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
uniphier_spi_irq_disable(struct uniphier_spi_priv * priv,u32 mask)118*4882a593Smuzhiyun static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv,
119*4882a593Smuzhiyun 					    u32 mask)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u32 val;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	val = readl(priv->base + SSI_IE);
124*4882a593Smuzhiyun 	val &= ~mask;
125*4882a593Smuzhiyun 	writel(val, priv->base + SSI_IE);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
uniphier_spi_set_mode(struct spi_device * spi)128*4882a593Smuzhiyun static void uniphier_spi_set_mode(struct spi_device *spi)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
131*4882a593Smuzhiyun 	u32 val1, val2;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/*
134*4882a593Smuzhiyun 	 * clock setting
135*4882a593Smuzhiyun 	 * CKPHS    capture timing. 0:rising edge, 1:falling edge
136*4882a593Smuzhiyun 	 * CKINIT   clock initial level. 0:low, 1:high
137*4882a593Smuzhiyun 	 * CKDLY    clock delay. 0:no delay, 1:delay depending on FSTRT
138*4882a593Smuzhiyun 	 *          (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
139*4882a593Smuzhiyun 	 *
140*4882a593Smuzhiyun 	 * frame setting
141*4882a593Smuzhiyun 	 * FSPOL    frame signal porarity. 0: low, 1: high
142*4882a593Smuzhiyun 	 * FSTRT    start frame timing
143*4882a593Smuzhiyun 	 *          0: rising edge of clock, 1: falling edge of clock
144*4882a593Smuzhiyun 	 */
145*4882a593Smuzhiyun 	switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
146*4882a593Smuzhiyun 	case SPI_MODE_0:
147*4882a593Smuzhiyun 		/* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
148*4882a593Smuzhiyun 		val1 = SSI_CKS_CKPHS | SSI_CKS_CKDLY;
149*4882a593Smuzhiyun 		val2 = 0;
150*4882a593Smuzhiyun 		break;
151*4882a593Smuzhiyun 	case SPI_MODE_1:
152*4882a593Smuzhiyun 		/* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
153*4882a593Smuzhiyun 		val1 = 0;
154*4882a593Smuzhiyun 		val2 = SSI_FPS_FSTRT;
155*4882a593Smuzhiyun 		break;
156*4882a593Smuzhiyun 	case SPI_MODE_2:
157*4882a593Smuzhiyun 		/* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
158*4882a593Smuzhiyun 		val1 = SSI_CKS_CKINIT | SSI_CKS_CKDLY;
159*4882a593Smuzhiyun 		val2 = SSI_FPS_FSTRT;
160*4882a593Smuzhiyun 		break;
161*4882a593Smuzhiyun 	case SPI_MODE_3:
162*4882a593Smuzhiyun 		/* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
163*4882a593Smuzhiyun 		val1 = SSI_CKS_CKPHS | SSI_CKS_CKINIT;
164*4882a593Smuzhiyun 		val2 = 0;
165*4882a593Smuzhiyun 		break;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	if (!(spi->mode & SPI_CS_HIGH))
169*4882a593Smuzhiyun 		val2 |= SSI_FPS_FSPOL;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	writel(val1, priv->base + SSI_CKS);
172*4882a593Smuzhiyun 	writel(val2, priv->base + SSI_FPS);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	val1 = 0;
175*4882a593Smuzhiyun 	if (spi->mode & SPI_LSB_FIRST)
176*4882a593Smuzhiyun 		val1 |= FIELD_PREP(SSI_TXWDS_TDTF_MASK, 1);
177*4882a593Smuzhiyun 	writel(val1, priv->base + SSI_TXWDS);
178*4882a593Smuzhiyun 	writel(val1, priv->base + SSI_RXWDS);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
uniphier_spi_set_transfer_size(struct spi_device * spi,int size)181*4882a593Smuzhiyun static void uniphier_spi_set_transfer_size(struct spi_device *spi, int size)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
184*4882a593Smuzhiyun 	u32 val;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	val = readl(priv->base + SSI_TXWDS);
187*4882a593Smuzhiyun 	val &= ~(SSI_TXWDS_WDLEN_MASK | SSI_TXWDS_DTLEN_MASK);
188*4882a593Smuzhiyun 	val |= FIELD_PREP(SSI_TXWDS_WDLEN_MASK, size);
189*4882a593Smuzhiyun 	val |= FIELD_PREP(SSI_TXWDS_DTLEN_MASK, size);
190*4882a593Smuzhiyun 	writel(val, priv->base + SSI_TXWDS);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	val = readl(priv->base + SSI_RXWDS);
193*4882a593Smuzhiyun 	val &= ~SSI_RXWDS_DTLEN_MASK;
194*4882a593Smuzhiyun 	val |= FIELD_PREP(SSI_RXWDS_DTLEN_MASK, size);
195*4882a593Smuzhiyun 	writel(val, priv->base + SSI_RXWDS);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
uniphier_spi_set_baudrate(struct spi_device * spi,unsigned int speed)198*4882a593Smuzhiyun static void uniphier_spi_set_baudrate(struct spi_device *spi,
199*4882a593Smuzhiyun 				      unsigned int speed)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
202*4882a593Smuzhiyun 	u32 val, ckdiv;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * the supported rates are even numbers from 4 to 254. (4,6,8...254)
206*4882a593Smuzhiyun 	 * round up as we look for equal or less speed
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	ckdiv = DIV_ROUND_UP(clk_get_rate(priv->clk), speed);
209*4882a593Smuzhiyun 	ckdiv = round_up(ckdiv, 2);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	val = readl(priv->base + SSI_CKS);
212*4882a593Smuzhiyun 	val &= ~SSI_CKS_CKRAT_MASK;
213*4882a593Smuzhiyun 	val |= ckdiv & SSI_CKS_CKRAT_MASK;
214*4882a593Smuzhiyun 	writel(val, priv->base + SSI_CKS);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
uniphier_spi_setup_transfer(struct spi_device * spi,struct spi_transfer * t)217*4882a593Smuzhiyun static void uniphier_spi_setup_transfer(struct spi_device *spi,
218*4882a593Smuzhiyun 				       struct spi_transfer *t)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
221*4882a593Smuzhiyun 	u32 val;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	priv->error = 0;
224*4882a593Smuzhiyun 	priv->tx_buf = t->tx_buf;
225*4882a593Smuzhiyun 	priv->rx_buf = t->rx_buf;
226*4882a593Smuzhiyun 	priv->tx_bytes = priv->rx_bytes = t->len;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (!priv->is_save_param || priv->mode != spi->mode) {
229*4882a593Smuzhiyun 		uniphier_spi_set_mode(spi);
230*4882a593Smuzhiyun 		priv->mode = spi->mode;
231*4882a593Smuzhiyun 		priv->is_save_param = false;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) {
235*4882a593Smuzhiyun 		uniphier_spi_set_transfer_size(spi, t->bits_per_word);
236*4882a593Smuzhiyun 		priv->bits_per_word = t->bits_per_word;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	if (!priv->is_save_param || priv->speed_hz != t->speed_hz) {
240*4882a593Smuzhiyun 		uniphier_spi_set_baudrate(spi, t->speed_hz);
241*4882a593Smuzhiyun 		priv->speed_hz = t->speed_hz;
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	priv->is_save_param = true;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* reset FIFOs */
247*4882a593Smuzhiyun 	val = SSI_FC_TXFFL | SSI_FC_RXFFL;
248*4882a593Smuzhiyun 	writel(val, priv->base + SSI_FC);
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun 
uniphier_spi_send(struct uniphier_spi_priv * priv)251*4882a593Smuzhiyun static void uniphier_spi_send(struct uniphier_spi_priv *priv)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 	int wsize;
254*4882a593Smuzhiyun 	u32 val = 0;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes);
257*4882a593Smuzhiyun 	priv->tx_bytes -= wsize;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	if (priv->tx_buf) {
260*4882a593Smuzhiyun 		switch (wsize) {
261*4882a593Smuzhiyun 		case 1:
262*4882a593Smuzhiyun 			val = *priv->tx_buf;
263*4882a593Smuzhiyun 			break;
264*4882a593Smuzhiyun 		case 2:
265*4882a593Smuzhiyun 			val = get_unaligned_le16(priv->tx_buf);
266*4882a593Smuzhiyun 			break;
267*4882a593Smuzhiyun 		case 4:
268*4882a593Smuzhiyun 			val = get_unaligned_le32(priv->tx_buf);
269*4882a593Smuzhiyun 			break;
270*4882a593Smuzhiyun 		}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 		priv->tx_buf += wsize;
273*4882a593Smuzhiyun 	}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	writel(val, priv->base + SSI_TXDR);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
uniphier_spi_recv(struct uniphier_spi_priv * priv)278*4882a593Smuzhiyun static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	int rsize;
281*4882a593Smuzhiyun 	u32 val;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	rsize = min(bytes_per_word(priv->bits_per_word), priv->rx_bytes);
284*4882a593Smuzhiyun 	priv->rx_bytes -= rsize;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	val = readl(priv->base + SSI_RXDR);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (priv->rx_buf) {
289*4882a593Smuzhiyun 		switch (rsize) {
290*4882a593Smuzhiyun 		case 1:
291*4882a593Smuzhiyun 			*priv->rx_buf = val;
292*4882a593Smuzhiyun 			break;
293*4882a593Smuzhiyun 		case 2:
294*4882a593Smuzhiyun 			put_unaligned_le16(val, priv->rx_buf);
295*4882a593Smuzhiyun 			break;
296*4882a593Smuzhiyun 		case 4:
297*4882a593Smuzhiyun 			put_unaligned_le32(val, priv->rx_buf);
298*4882a593Smuzhiyun 			break;
299*4882a593Smuzhiyun 		}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		priv->rx_buf += rsize;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv * priv,unsigned int threshold)305*4882a593Smuzhiyun static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv *priv,
306*4882a593Smuzhiyun 					    unsigned int threshold)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	u32 val;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	val = readl(priv->base + SSI_FC);
311*4882a593Smuzhiyun 	val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
312*4882a593Smuzhiyun 	val |= FIELD_PREP(SSI_FC_TXFTH_MASK, SSI_FIFO_DEPTH - threshold);
313*4882a593Smuzhiyun 	val |= FIELD_PREP(SSI_FC_RXFTH_MASK, threshold);
314*4882a593Smuzhiyun 	writel(val, priv->base + SSI_FC);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv * priv)317*4882a593Smuzhiyun static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	unsigned int fifo_threshold, fill_words;
320*4882a593Smuzhiyun 	unsigned int bpw = bytes_per_word(priv->bits_per_word);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	fifo_threshold = DIV_ROUND_UP(priv->rx_bytes, bpw);
323*4882a593Smuzhiyun 	fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	uniphier_spi_set_fifo_threshold(priv, fifo_threshold);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	fill_words = fifo_threshold -
328*4882a593Smuzhiyun 		DIV_ROUND_UP(priv->rx_bytes - priv->tx_bytes, bpw);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	while (fill_words--)
331*4882a593Smuzhiyun 		uniphier_spi_send(priv);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
uniphier_spi_set_cs(struct spi_device * spi,bool enable)334*4882a593Smuzhiyun static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master);
337*4882a593Smuzhiyun 	u32 val;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	val = readl(priv->base + SSI_FPS);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (enable)
342*4882a593Smuzhiyun 		val |= SSI_FPS_FSPOL;
343*4882a593Smuzhiyun 	else
344*4882a593Smuzhiyun 		val &= ~SSI_FPS_FSPOL;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	writel(val, priv->base + SSI_FPS);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
uniphier_spi_can_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * t)349*4882a593Smuzhiyun static bool uniphier_spi_can_dma(struct spi_master *master,
350*4882a593Smuzhiyun 				 struct spi_device *spi,
351*4882a593Smuzhiyun 				 struct spi_transfer *t)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
354*4882a593Smuzhiyun 	unsigned int bpw = bytes_per_word(priv->bits_per_word);
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if ((!master->dma_tx && !master->dma_rx)
357*4882a593Smuzhiyun 	    || (!master->dma_tx && t->tx_buf)
358*4882a593Smuzhiyun 	    || (!master->dma_rx && t->rx_buf))
359*4882a593Smuzhiyun 		return false;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
uniphier_spi_dma_rxcb(void * data)364*4882a593Smuzhiyun static void uniphier_spi_dma_rxcb(void *data)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun 	struct spi_master *master = data;
367*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
368*4882a593Smuzhiyun 	int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	uniphier_spi_irq_disable(priv, SSI_IE_RXRE);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (!(state & SSI_DMA_TX_BUSY))
373*4882a593Smuzhiyun 		spi_finalize_current_transfer(master);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
uniphier_spi_dma_txcb(void * data)376*4882a593Smuzhiyun static void uniphier_spi_dma_txcb(void *data)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	struct spi_master *master = data;
379*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
380*4882a593Smuzhiyun 	int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	uniphier_spi_irq_disable(priv, SSI_IE_TXRE);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (!(state & SSI_DMA_RX_BUSY))
385*4882a593Smuzhiyun 		spi_finalize_current_transfer(master);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
uniphier_spi_transfer_one_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * t)388*4882a593Smuzhiyun static int uniphier_spi_transfer_one_dma(struct spi_master *master,
389*4882a593Smuzhiyun 					 struct spi_device *spi,
390*4882a593Smuzhiyun 					 struct spi_transfer *t)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
393*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
394*4882a593Smuzhiyun 	int buswidth;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	atomic_set(&priv->dma_busy, 0);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	if (priv->bits_per_word <= 8)
401*4882a593Smuzhiyun 		buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
402*4882a593Smuzhiyun 	else if (priv->bits_per_word <= 16)
403*4882a593Smuzhiyun 		buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
404*4882a593Smuzhiyun 	else
405*4882a593Smuzhiyun 		buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (priv->rx_buf) {
408*4882a593Smuzhiyun 		struct dma_slave_config rxconf = {
409*4882a593Smuzhiyun 			.direction = DMA_DEV_TO_MEM,
410*4882a593Smuzhiyun 			.src_addr = priv->base_dma_addr + SSI_RXDR,
411*4882a593Smuzhiyun 			.src_addr_width = buswidth,
412*4882a593Smuzhiyun 			.src_maxburst = SSI_FIFO_BURST_NUM,
413*4882a593Smuzhiyun 		};
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		dmaengine_slave_config(master->dma_rx, &rxconf);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		rxdesc = dmaengine_prep_slave_sg(
418*4882a593Smuzhiyun 			master->dma_rx,
419*4882a593Smuzhiyun 			t->rx_sg.sgl, t->rx_sg.nents,
420*4882a593Smuzhiyun 			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
421*4882a593Smuzhiyun 		if (!rxdesc)
422*4882a593Smuzhiyun 			goto out_err_prep;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		rxdesc->callback = uniphier_spi_dma_rxcb;
425*4882a593Smuzhiyun 		rxdesc->callback_param = master;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		uniphier_spi_irq_enable(priv, SSI_IE_RXRE);
428*4882a593Smuzhiyun 		atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		dmaengine_submit(rxdesc);
431*4882a593Smuzhiyun 		dma_async_issue_pending(master->dma_rx);
432*4882a593Smuzhiyun 	}
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	if (priv->tx_buf) {
435*4882a593Smuzhiyun 		struct dma_slave_config txconf = {
436*4882a593Smuzhiyun 			.direction = DMA_MEM_TO_DEV,
437*4882a593Smuzhiyun 			.dst_addr = priv->base_dma_addr + SSI_TXDR,
438*4882a593Smuzhiyun 			.dst_addr_width = buswidth,
439*4882a593Smuzhiyun 			.dst_maxburst = SSI_FIFO_BURST_NUM,
440*4882a593Smuzhiyun 		};
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 		dmaengine_slave_config(master->dma_tx, &txconf);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 		txdesc = dmaengine_prep_slave_sg(
445*4882a593Smuzhiyun 			master->dma_tx,
446*4882a593Smuzhiyun 			t->tx_sg.sgl, t->tx_sg.nents,
447*4882a593Smuzhiyun 			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
448*4882a593Smuzhiyun 		if (!txdesc)
449*4882a593Smuzhiyun 			goto out_err_prep;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		txdesc->callback = uniphier_spi_dma_txcb;
452*4882a593Smuzhiyun 		txdesc->callback_param = master;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		uniphier_spi_irq_enable(priv, SSI_IE_TXRE);
455*4882a593Smuzhiyun 		atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		dmaengine_submit(txdesc);
458*4882a593Smuzhiyun 		dma_async_issue_pending(master->dma_tx);
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* signal that we need to wait for completion */
462*4882a593Smuzhiyun 	return (priv->tx_buf || priv->rx_buf);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun out_err_prep:
465*4882a593Smuzhiyun 	if (rxdesc)
466*4882a593Smuzhiyun 		dmaengine_terminate_sync(master->dma_rx);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	return -EINVAL;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
uniphier_spi_transfer_one_irq(struct spi_master * master,struct spi_device * spi,struct spi_transfer * t)471*4882a593Smuzhiyun static int uniphier_spi_transfer_one_irq(struct spi_master *master,
472*4882a593Smuzhiyun 					 struct spi_device *spi,
473*4882a593Smuzhiyun 					 struct spi_transfer *t)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
476*4882a593Smuzhiyun 	struct device *dev = master->dev.parent;
477*4882a593Smuzhiyun 	unsigned long time_left;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	reinit_completion(&priv->xfer_done);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	uniphier_spi_fill_tx_fifo(priv);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	time_left = wait_for_completion_timeout(&priv->xfer_done,
486*4882a593Smuzhiyun 					msecs_to_jiffies(SSI_TIMEOUT_MS));
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (!time_left) {
491*4882a593Smuzhiyun 		dev_err(dev, "transfer timeout.\n");
492*4882a593Smuzhiyun 		return -ETIMEDOUT;
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	return priv->error;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
uniphier_spi_transfer_one_poll(struct spi_master * master,struct spi_device * spi,struct spi_transfer * t)498*4882a593Smuzhiyun static int uniphier_spi_transfer_one_poll(struct spi_master *master,
499*4882a593Smuzhiyun 					  struct spi_device *spi,
500*4882a593Smuzhiyun 					  struct spi_transfer *t)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
503*4882a593Smuzhiyun 	int loop = SSI_POLL_TIMEOUT_US * 10;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	while (priv->tx_bytes) {
506*4882a593Smuzhiyun 		uniphier_spi_fill_tx_fifo(priv);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 		while ((priv->rx_bytes - priv->tx_bytes) > 0) {
509*4882a593Smuzhiyun 			while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
510*4882a593Smuzhiyun 								&& loop--)
511*4882a593Smuzhiyun 				ndelay(100);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 			if (loop == -1)
514*4882a593Smuzhiyun 				goto irq_transfer;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 			uniphier_spi_recv(priv);
517*4882a593Smuzhiyun 		}
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	return 0;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun irq_transfer:
523*4882a593Smuzhiyun 	return uniphier_spi_transfer_one_irq(master, spi, t);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
uniphier_spi_transfer_one(struct spi_master * master,struct spi_device * spi,struct spi_transfer * t)526*4882a593Smuzhiyun static int uniphier_spi_transfer_one(struct spi_master *master,
527*4882a593Smuzhiyun 				     struct spi_device *spi,
528*4882a593Smuzhiyun 				     struct spi_transfer *t)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
531*4882a593Smuzhiyun 	unsigned long threshold;
532*4882a593Smuzhiyun 	bool use_dma;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* Terminate and return success for 0 byte length transfer */
535*4882a593Smuzhiyun 	if (!t->len)
536*4882a593Smuzhiyun 		return 0;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	uniphier_spi_setup_transfer(spi, t);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	use_dma = master->can_dma ? master->can_dma(master, spi, t) : false;
541*4882a593Smuzhiyun 	if (use_dma)
542*4882a593Smuzhiyun 		return uniphier_spi_transfer_one_dma(master, spi, t);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/*
545*4882a593Smuzhiyun 	 * If the transfer operation will take longer than
546*4882a593Smuzhiyun 	 * SSI_POLL_TIMEOUT_US, it should use irq.
547*4882a593Smuzhiyun 	 */
548*4882a593Smuzhiyun 	threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
549*4882a593Smuzhiyun 					USEC_PER_SEC * BITS_PER_BYTE);
550*4882a593Smuzhiyun 	if (t->len > threshold)
551*4882a593Smuzhiyun 		return uniphier_spi_transfer_one_irq(master, spi, t);
552*4882a593Smuzhiyun 	else
553*4882a593Smuzhiyun 		return uniphier_spi_transfer_one_poll(master, spi, t);
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
uniphier_spi_prepare_transfer_hardware(struct spi_master * master)556*4882a593Smuzhiyun static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	writel(SSI_CTL_EN, priv->base + SSI_CTL);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	return 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
uniphier_spi_unprepare_transfer_hardware(struct spi_master * master)565*4882a593Smuzhiyun static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	writel(0, priv->base + SSI_CTL);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
uniphier_spi_handle_err(struct spi_master * master,struct spi_message * msg)574*4882a593Smuzhiyun static void uniphier_spi_handle_err(struct spi_master *master,
575*4882a593Smuzhiyun 				    struct spi_message *msg)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
578*4882a593Smuzhiyun 	u32 val;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	/* stop running spi transfer */
581*4882a593Smuzhiyun 	writel(0, priv->base + SSI_CTL);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	/* reset FIFOs */
584*4882a593Smuzhiyun 	val = SSI_FC_TXFFL | SSI_FC_RXFFL;
585*4882a593Smuzhiyun 	writel(val, priv->base + SSI_FC);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) {
590*4882a593Smuzhiyun 		dmaengine_terminate_async(master->dma_tx);
591*4882a593Smuzhiyun 		atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy);
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) {
595*4882a593Smuzhiyun 		dmaengine_terminate_async(master->dma_rx);
596*4882a593Smuzhiyun 		atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy);
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun }
599*4882a593Smuzhiyun 
uniphier_spi_handler(int irq,void * dev_id)600*4882a593Smuzhiyun static irqreturn_t uniphier_spi_handler(int irq, void *dev_id)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = dev_id;
603*4882a593Smuzhiyun 	u32 val, stat;
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	stat = readl(priv->base + SSI_IS);
606*4882a593Smuzhiyun 	val = SSI_IC_TCIC | SSI_IC_RCIC | SSI_IC_RORIC;
607*4882a593Smuzhiyun 	writel(val, priv->base + SSI_IC);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	/* rx fifo overrun */
610*4882a593Smuzhiyun 	if (stat & SSI_IS_RORID) {
611*4882a593Smuzhiyun 		priv->error = -EIO;
612*4882a593Smuzhiyun 		goto done;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* rx complete */
616*4882a593Smuzhiyun 	if ((stat & SSI_IS_RCID) && (stat & SSI_IS_RXRS)) {
617*4882a593Smuzhiyun 		while ((readl(priv->base + SSI_SR) & SSI_SR_RNE) &&
618*4882a593Smuzhiyun 				(priv->rx_bytes - priv->tx_bytes) > 0)
619*4882a593Smuzhiyun 			uniphier_spi_recv(priv);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		if ((readl(priv->base + SSI_SR) & SSI_SR_RNE) ||
622*4882a593Smuzhiyun 				(priv->rx_bytes != priv->tx_bytes)) {
623*4882a593Smuzhiyun 			priv->error = -EIO;
624*4882a593Smuzhiyun 			goto done;
625*4882a593Smuzhiyun 		} else if (priv->rx_bytes == 0)
626*4882a593Smuzhiyun 			goto done;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 		/* next tx transfer */
629*4882a593Smuzhiyun 		uniphier_spi_fill_tx_fifo(priv);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 		return IRQ_HANDLED;
632*4882a593Smuzhiyun 	}
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	return IRQ_NONE;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun done:
637*4882a593Smuzhiyun 	complete(&priv->xfer_done);
638*4882a593Smuzhiyun 	return IRQ_HANDLED;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun 
uniphier_spi_probe(struct platform_device * pdev)641*4882a593Smuzhiyun static int uniphier_spi_probe(struct platform_device *pdev)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv;
644*4882a593Smuzhiyun 	struct spi_master *master;
645*4882a593Smuzhiyun 	struct resource *res;
646*4882a593Smuzhiyun 	struct dma_slave_caps caps;
647*4882a593Smuzhiyun 	u32 dma_tx_burst = 0, dma_rx_burst = 0;
648*4882a593Smuzhiyun 	unsigned long clk_rate;
649*4882a593Smuzhiyun 	int irq;
650*4882a593Smuzhiyun 	int ret;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	master = spi_alloc_master(&pdev->dev, sizeof(*priv));
653*4882a593Smuzhiyun 	if (!master)
654*4882a593Smuzhiyun 		return -ENOMEM;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	platform_set_drvdata(pdev, master);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	priv = spi_master_get_devdata(master);
659*4882a593Smuzhiyun 	priv->master = master;
660*4882a593Smuzhiyun 	priv->is_save_param = false;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	priv->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
663*4882a593Smuzhiyun 	if (IS_ERR(priv->base)) {
664*4882a593Smuzhiyun 		ret = PTR_ERR(priv->base);
665*4882a593Smuzhiyun 		goto out_master_put;
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 	priv->base_dma_addr = res->start;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	priv->clk = devm_clk_get(&pdev->dev, NULL);
670*4882a593Smuzhiyun 	if (IS_ERR(priv->clk)) {
671*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to get clock\n");
672*4882a593Smuzhiyun 		ret = PTR_ERR(priv->clk);
673*4882a593Smuzhiyun 		goto out_master_put;
674*4882a593Smuzhiyun 	}
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	ret = clk_prepare_enable(priv->clk);
677*4882a593Smuzhiyun 	if (ret)
678*4882a593Smuzhiyun 		goto out_master_put;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
681*4882a593Smuzhiyun 	if (irq < 0) {
682*4882a593Smuzhiyun 		ret = irq;
683*4882a593Smuzhiyun 		goto out_disable_clk;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	ret = devm_request_irq(&pdev->dev, irq, uniphier_spi_handler,
687*4882a593Smuzhiyun 			       0, "uniphier-spi", priv);
688*4882a593Smuzhiyun 	if (ret) {
689*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to request IRQ\n");
690*4882a593Smuzhiyun 		goto out_disable_clk;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	init_completion(&priv->xfer_done);
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	clk_rate = clk_get_rate(priv->clk);
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	master->max_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MIN_CLK_DIVIDER);
698*4882a593Smuzhiyun 	master->min_speed_hz = DIV_ROUND_UP(clk_rate, SSI_MAX_CLK_DIVIDER);
699*4882a593Smuzhiyun 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
700*4882a593Smuzhiyun 	master->dev.of_node = pdev->dev.of_node;
701*4882a593Smuzhiyun 	master->bus_num = pdev->id;
702*4882a593Smuzhiyun 	master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	master->set_cs = uniphier_spi_set_cs;
705*4882a593Smuzhiyun 	master->transfer_one = uniphier_spi_transfer_one;
706*4882a593Smuzhiyun 	master->prepare_transfer_hardware
707*4882a593Smuzhiyun 				= uniphier_spi_prepare_transfer_hardware;
708*4882a593Smuzhiyun 	master->unprepare_transfer_hardware
709*4882a593Smuzhiyun 				= uniphier_spi_unprepare_transfer_hardware;
710*4882a593Smuzhiyun 	master->handle_err = uniphier_spi_handle_err;
711*4882a593Smuzhiyun 	master->can_dma = uniphier_spi_can_dma;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	master->num_chipselect = 1;
714*4882a593Smuzhiyun 	master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	master->dma_tx = dma_request_chan(&pdev->dev, "tx");
717*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(master->dma_tx)) {
718*4882a593Smuzhiyun 		if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) {
719*4882a593Smuzhiyun 			ret = -EPROBE_DEFER;
720*4882a593Smuzhiyun 			goto out_disable_clk;
721*4882a593Smuzhiyun 		}
722*4882a593Smuzhiyun 		master->dma_tx = NULL;
723*4882a593Smuzhiyun 		dma_tx_burst = INT_MAX;
724*4882a593Smuzhiyun 	} else {
725*4882a593Smuzhiyun 		ret = dma_get_slave_caps(master->dma_tx, &caps);
726*4882a593Smuzhiyun 		if (ret) {
727*4882a593Smuzhiyun 			dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
728*4882a593Smuzhiyun 				ret);
729*4882a593Smuzhiyun 			goto out_release_dma;
730*4882a593Smuzhiyun 		}
731*4882a593Smuzhiyun 		dma_tx_burst = caps.max_burst;
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	master->dma_rx = dma_request_chan(&pdev->dev, "rx");
735*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(master->dma_rx)) {
736*4882a593Smuzhiyun 		if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
737*4882a593Smuzhiyun 			ret = -EPROBE_DEFER;
738*4882a593Smuzhiyun 			goto out_release_dma;
739*4882a593Smuzhiyun 		}
740*4882a593Smuzhiyun 		master->dma_rx = NULL;
741*4882a593Smuzhiyun 		dma_rx_burst = INT_MAX;
742*4882a593Smuzhiyun 	} else {
743*4882a593Smuzhiyun 		ret = dma_get_slave_caps(master->dma_rx, &caps);
744*4882a593Smuzhiyun 		if (ret) {
745*4882a593Smuzhiyun 			dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
746*4882a593Smuzhiyun 				ret);
747*4882a593Smuzhiyun 			goto out_release_dma;
748*4882a593Smuzhiyun 		}
749*4882a593Smuzhiyun 		dma_rx_burst = caps.max_burst;
750*4882a593Smuzhiyun 	}
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	master->max_dma_len = min(dma_tx_burst, dma_rx_burst);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	ret = devm_spi_register_master(&pdev->dev, master);
755*4882a593Smuzhiyun 	if (ret)
756*4882a593Smuzhiyun 		goto out_release_dma;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	return 0;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun out_release_dma:
761*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(master->dma_rx)) {
762*4882a593Smuzhiyun 		dma_release_channel(master->dma_rx);
763*4882a593Smuzhiyun 		master->dma_rx = NULL;
764*4882a593Smuzhiyun 	}
765*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(master->dma_tx)) {
766*4882a593Smuzhiyun 		dma_release_channel(master->dma_tx);
767*4882a593Smuzhiyun 		master->dma_tx = NULL;
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun out_disable_clk:
771*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun out_master_put:
774*4882a593Smuzhiyun 	spi_master_put(master);
775*4882a593Smuzhiyun 	return ret;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun 
uniphier_spi_remove(struct platform_device * pdev)778*4882a593Smuzhiyun static int uniphier_spi_remove(struct platform_device *pdev)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	struct spi_master *master = platform_get_drvdata(pdev);
781*4882a593Smuzhiyun 	struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (master->dma_tx)
784*4882a593Smuzhiyun 		dma_release_channel(master->dma_tx);
785*4882a593Smuzhiyun 	if (master->dma_rx)
786*4882a593Smuzhiyun 		dma_release_channel(master->dma_rx);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	clk_disable_unprepare(priv->clk);
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	return 0;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun static const struct of_device_id uniphier_spi_match[] = {
794*4882a593Smuzhiyun 	{ .compatible = "socionext,uniphier-scssi" },
795*4882a593Smuzhiyun 	{ /* sentinel */ }
796*4882a593Smuzhiyun };
797*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, uniphier_spi_match);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun static struct platform_driver uniphier_spi_driver = {
800*4882a593Smuzhiyun 	.probe = uniphier_spi_probe,
801*4882a593Smuzhiyun 	.remove = uniphier_spi_remove,
802*4882a593Smuzhiyun 	.driver = {
803*4882a593Smuzhiyun 		.name = "uniphier-spi",
804*4882a593Smuzhiyun 		.of_match_table = uniphier_spi_match,
805*4882a593Smuzhiyun 	},
806*4882a593Smuzhiyun };
807*4882a593Smuzhiyun module_platform_driver(uniphier_spi_driver);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
810*4882a593Smuzhiyun MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
811*4882a593Smuzhiyun MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
812*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
813