xref: /OK3568_Linux_fs/kernel/drivers/spi/spi-pic32-sqi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * PIC32 Quad SPI controller driver.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Purna Chandra Mandal <purna.mandal@microchip.com>
6*4882a593Smuzhiyun  * Copyright (c) 2016, Microchip Technology Inc.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/iopoll.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/spi/spi.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* SQI registers */
21*4882a593Smuzhiyun #define PESQI_XIP_CONF1_REG	0x00
22*4882a593Smuzhiyun #define PESQI_XIP_CONF2_REG	0x04
23*4882a593Smuzhiyun #define PESQI_CONF_REG		0x08
24*4882a593Smuzhiyun #define PESQI_CTRL_REG		0x0C
25*4882a593Smuzhiyun #define PESQI_CLK_CTRL_REG	0x10
26*4882a593Smuzhiyun #define PESQI_CMD_THRES_REG	0x14
27*4882a593Smuzhiyun #define PESQI_INT_THRES_REG	0x18
28*4882a593Smuzhiyun #define PESQI_INT_ENABLE_REG	0x1C
29*4882a593Smuzhiyun #define PESQI_INT_STAT_REG	0x20
30*4882a593Smuzhiyun #define PESQI_TX_DATA_REG	0x24
31*4882a593Smuzhiyun #define PESQI_RX_DATA_REG	0x28
32*4882a593Smuzhiyun #define PESQI_STAT1_REG		0x2C
33*4882a593Smuzhiyun #define PESQI_STAT2_REG		0x30
34*4882a593Smuzhiyun #define PESQI_BD_CTRL_REG	0x34
35*4882a593Smuzhiyun #define PESQI_BD_CUR_ADDR_REG	0x38
36*4882a593Smuzhiyun #define PESQI_BD_BASE_ADDR_REG	0x40
37*4882a593Smuzhiyun #define PESQI_BD_STAT_REG	0x44
38*4882a593Smuzhiyun #define PESQI_BD_POLL_CTRL_REG	0x48
39*4882a593Smuzhiyun #define PESQI_BD_TX_DMA_STAT_REG	0x4C
40*4882a593Smuzhiyun #define PESQI_BD_RX_DMA_STAT_REG	0x50
41*4882a593Smuzhiyun #define PESQI_THRES_REG		0x54
42*4882a593Smuzhiyun #define PESQI_INT_SIGEN_REG	0x58
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* PESQI_CONF_REG fields */
45*4882a593Smuzhiyun #define PESQI_MODE		0x7
46*4882a593Smuzhiyun #define  PESQI_MODE_BOOT	0
47*4882a593Smuzhiyun #define  PESQI_MODE_PIO		1
48*4882a593Smuzhiyun #define  PESQI_MODE_DMA		2
49*4882a593Smuzhiyun #define  PESQI_MODE_XIP		3
50*4882a593Smuzhiyun #define PESQI_MODE_SHIFT	0
51*4882a593Smuzhiyun #define PESQI_CPHA		BIT(3)
52*4882a593Smuzhiyun #define PESQI_CPOL		BIT(4)
53*4882a593Smuzhiyun #define PESQI_LSBF		BIT(5)
54*4882a593Smuzhiyun #define PESQI_RXLATCH		BIT(7)
55*4882a593Smuzhiyun #define PESQI_SERMODE		BIT(8)
56*4882a593Smuzhiyun #define PESQI_WP_EN		BIT(9)
57*4882a593Smuzhiyun #define PESQI_HOLD_EN		BIT(10)
58*4882a593Smuzhiyun #define PESQI_BURST_EN		BIT(12)
59*4882a593Smuzhiyun #define PESQI_CS_CTRL_HW	BIT(15)
60*4882a593Smuzhiyun #define PESQI_SOFT_RESET	BIT(16)
61*4882a593Smuzhiyun #define PESQI_LANES_SHIFT	20
62*4882a593Smuzhiyun #define  PESQI_SINGLE_LANE	0
63*4882a593Smuzhiyun #define  PESQI_DUAL_LANE	1
64*4882a593Smuzhiyun #define  PESQI_QUAD_LANE	2
65*4882a593Smuzhiyun #define PESQI_CSEN_SHIFT	24
66*4882a593Smuzhiyun #define PESQI_EN		BIT(23)
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* PESQI_CLK_CTRL_REG fields */
69*4882a593Smuzhiyun #define PESQI_CLK_EN		BIT(0)
70*4882a593Smuzhiyun #define PESQI_CLK_STABLE	BIT(1)
71*4882a593Smuzhiyun #define PESQI_CLKDIV_SHIFT	8
72*4882a593Smuzhiyun #define PESQI_CLKDIV		0xff
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /* PESQI_INT_THR/CMD_THR_REG */
75*4882a593Smuzhiyun #define PESQI_TXTHR_MASK	0x1f
76*4882a593Smuzhiyun #define PESQI_TXTHR_SHIFT	8
77*4882a593Smuzhiyun #define PESQI_RXTHR_MASK	0x1f
78*4882a593Smuzhiyun #define PESQI_RXTHR_SHIFT	0
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
81*4882a593Smuzhiyun #define PESQI_TXEMPTY		BIT(0)
82*4882a593Smuzhiyun #define PESQI_TXFULL		BIT(1)
83*4882a593Smuzhiyun #define PESQI_TXTHR		BIT(2)
84*4882a593Smuzhiyun #define PESQI_RXEMPTY		BIT(3)
85*4882a593Smuzhiyun #define PESQI_RXFULL		BIT(4)
86*4882a593Smuzhiyun #define PESQI_RXTHR		BIT(5)
87*4882a593Smuzhiyun #define PESQI_BDDONE		BIT(9)  /* BD processing complete */
88*4882a593Smuzhiyun #define PESQI_PKTCOMP		BIT(10) /* packet processing complete */
89*4882a593Smuzhiyun #define PESQI_DMAERR		BIT(11) /* error */
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* PESQI_BD_CTRL_REG */
92*4882a593Smuzhiyun #define PESQI_DMA_EN		BIT(0) /* enable DMA engine */
93*4882a593Smuzhiyun #define PESQI_POLL_EN		BIT(1) /* enable polling */
94*4882a593Smuzhiyun #define PESQI_BDP_START		BIT(2) /* start BD processor */
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* PESQI controller buffer descriptor */
97*4882a593Smuzhiyun struct buf_desc {
98*4882a593Smuzhiyun 	u32 bd_ctrl;	/* control */
99*4882a593Smuzhiyun 	u32 bd_status;	/* reserved */
100*4882a593Smuzhiyun 	u32 bd_addr;	/* DMA buffer addr */
101*4882a593Smuzhiyun 	u32 bd_nextp;	/* next item in chain */
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* bd_ctrl */
105*4882a593Smuzhiyun #define BD_BUFLEN		0x1ff
106*4882a593Smuzhiyun #define BD_CBD_INT_EN		BIT(16)	/* Current BD is processed */
107*4882a593Smuzhiyun #define BD_PKT_INT_EN		BIT(17) /* All BDs of PKT processed */
108*4882a593Smuzhiyun #define BD_LIFM			BIT(18) /* last data of pkt */
109*4882a593Smuzhiyun #define BD_LAST			BIT(19) /* end of list */
110*4882a593Smuzhiyun #define BD_DATA_RECV		BIT(20) /* receive data */
111*4882a593Smuzhiyun #define BD_DDR			BIT(21) /* DDR mode */
112*4882a593Smuzhiyun #define BD_DUAL			BIT(22)	/* Dual SPI */
113*4882a593Smuzhiyun #define BD_QUAD			BIT(23) /* Quad SPI */
114*4882a593Smuzhiyun #define BD_LSBF			BIT(25)	/* LSB First */
115*4882a593Smuzhiyun #define BD_STAT_CHECK		BIT(27) /* Status poll */
116*4882a593Smuzhiyun #define BD_DEVSEL_SHIFT		28	/* CS */
117*4882a593Smuzhiyun #define BD_CS_DEASSERT		BIT(30) /* de-assert CS after current BD */
118*4882a593Smuzhiyun #define BD_EN			BIT(31) /* BD owned by H/W */
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun  * struct ring_desc - Representation of SQI ring descriptor
122*4882a593Smuzhiyun  * @list:	list element to add to free or used list.
123*4882a593Smuzhiyun  * @bd:		PESQI controller buffer descriptor
124*4882a593Smuzhiyun  * @bd_dma:	DMA address of PESQI controller buffer descriptor
125*4882a593Smuzhiyun  * @xfer_len:	transfer length
126*4882a593Smuzhiyun  */
127*4882a593Smuzhiyun struct ring_desc {
128*4882a593Smuzhiyun 	struct list_head list;
129*4882a593Smuzhiyun 	struct buf_desc *bd;
130*4882a593Smuzhiyun 	dma_addr_t bd_dma;
131*4882a593Smuzhiyun 	u32 xfer_len;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* Global constants */
135*4882a593Smuzhiyun #define PESQI_BD_BUF_LEN_MAX	256
136*4882a593Smuzhiyun #define PESQI_BD_COUNT		256 /* max 64KB data per spi message */
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct pic32_sqi {
139*4882a593Smuzhiyun 	void __iomem		*regs;
140*4882a593Smuzhiyun 	struct clk		*sys_clk;
141*4882a593Smuzhiyun 	struct clk		*base_clk; /* drives spi clock */
142*4882a593Smuzhiyun 	struct spi_master	*master;
143*4882a593Smuzhiyun 	int			irq;
144*4882a593Smuzhiyun 	struct completion	xfer_done;
145*4882a593Smuzhiyun 	struct ring_desc	*ring;
146*4882a593Smuzhiyun 	void			*bd;
147*4882a593Smuzhiyun 	dma_addr_t		bd_dma;
148*4882a593Smuzhiyun 	struct list_head	bd_list_free; /* free */
149*4882a593Smuzhiyun 	struct list_head	bd_list_used; /* allocated */
150*4882a593Smuzhiyun 	struct spi_device	*cur_spi;
151*4882a593Smuzhiyun 	u32			cur_speed;
152*4882a593Smuzhiyun 	u8			cur_mode;
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
pic32_setbits(void __iomem * reg,u32 set)155*4882a593Smuzhiyun static inline void pic32_setbits(void __iomem *reg, u32 set)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	writel(readl(reg) | set, reg);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
pic32_clrbits(void __iomem * reg,u32 clr)160*4882a593Smuzhiyun static inline void pic32_clrbits(void __iomem *reg, u32 clr)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	writel(readl(reg) & ~clr, reg);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
pic32_sqi_set_clk_rate(struct pic32_sqi * sqi,u32 sck)165*4882a593Smuzhiyun static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	u32 val, div;
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	/* div = base_clk / (2 * spi_clk) */
170*4882a593Smuzhiyun 	div = clk_get_rate(sqi->base_clk) / (2 * sck);
171*4882a593Smuzhiyun 	div &= PESQI_CLKDIV;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
174*4882a593Smuzhiyun 	/* apply new divider */
175*4882a593Smuzhiyun 	val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
176*4882a593Smuzhiyun 	val |= div << PESQI_CLKDIV_SHIFT;
177*4882a593Smuzhiyun 	writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* wait for stability */
180*4882a593Smuzhiyun 	return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
181*4882a593Smuzhiyun 				  val & PESQI_CLK_STABLE, 1, 5000);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun 
pic32_sqi_enable_int(struct pic32_sqi * sqi)184*4882a593Smuzhiyun static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
189*4882a593Smuzhiyun 	/* INT_SIGEN works as interrupt-gate to INTR line */
190*4882a593Smuzhiyun 	writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
pic32_sqi_disable_int(struct pic32_sqi * sqi)193*4882a593Smuzhiyun static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
196*4882a593Smuzhiyun 	writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
pic32_sqi_isr(int irq,void * dev_id)199*4882a593Smuzhiyun static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct pic32_sqi *sqi = dev_id;
202*4882a593Smuzhiyun 	u32 enable, status;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
205*4882a593Smuzhiyun 	status = readl(sqi->regs + PESQI_INT_STAT_REG);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* check spurious interrupt */
208*4882a593Smuzhiyun 	if (!status)
209*4882a593Smuzhiyun 		return IRQ_NONE;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (status & PESQI_DMAERR) {
212*4882a593Smuzhiyun 		enable = 0;
213*4882a593Smuzhiyun 		goto irq_done;
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (status & PESQI_TXTHR)
217*4882a593Smuzhiyun 		enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (status & PESQI_RXTHR)
220*4882a593Smuzhiyun 		enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (status & PESQI_BDDONE)
223*4882a593Smuzhiyun 		enable &= ~PESQI_BDDONE;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* packet processing completed */
226*4882a593Smuzhiyun 	if (status & PESQI_PKTCOMP) {
227*4882a593Smuzhiyun 		/* mask all interrupts */
228*4882a593Smuzhiyun 		enable = 0;
229*4882a593Smuzhiyun 		/* complete trasaction */
230*4882a593Smuzhiyun 		complete(&sqi->xfer_done);
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun irq_done:
234*4882a593Smuzhiyun 	/* interrupts are sticky, so mask when handled */
235*4882a593Smuzhiyun 	writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return IRQ_HANDLED;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
ring_desc_get(struct pic32_sqi * sqi)240*4882a593Smuzhiyun static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	struct ring_desc *rdesc;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (list_empty(&sqi->bd_list_free))
245*4882a593Smuzhiyun 		return NULL;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
248*4882a593Smuzhiyun 	list_move_tail(&rdesc->list, &sqi->bd_list_used);
249*4882a593Smuzhiyun 	return rdesc;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun 
ring_desc_put(struct pic32_sqi * sqi,struct ring_desc * rdesc)252*4882a593Smuzhiyun static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	list_move(&rdesc->list, &sqi->bd_list_free);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
pic32_sqi_one_transfer(struct pic32_sqi * sqi,struct spi_message * mesg,struct spi_transfer * xfer)257*4882a593Smuzhiyun static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
258*4882a593Smuzhiyun 				  struct spi_message *mesg,
259*4882a593Smuzhiyun 				  struct spi_transfer *xfer)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct spi_device *spi = mesg->spi;
262*4882a593Smuzhiyun 	struct scatterlist *sg, *sgl;
263*4882a593Smuzhiyun 	struct ring_desc *rdesc;
264*4882a593Smuzhiyun 	struct buf_desc *bd;
265*4882a593Smuzhiyun 	int nents, i;
266*4882a593Smuzhiyun 	u32 bd_ctrl;
267*4882a593Smuzhiyun 	u32 nbits;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/* Device selection */
270*4882a593Smuzhiyun 	bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	/* half-duplex: select transfer buffer, direction and lane */
273*4882a593Smuzhiyun 	if (xfer->rx_buf) {
274*4882a593Smuzhiyun 		bd_ctrl |= BD_DATA_RECV;
275*4882a593Smuzhiyun 		nbits = xfer->rx_nbits;
276*4882a593Smuzhiyun 		sgl = xfer->rx_sg.sgl;
277*4882a593Smuzhiyun 		nents = xfer->rx_sg.nents;
278*4882a593Smuzhiyun 	} else {
279*4882a593Smuzhiyun 		nbits = xfer->tx_nbits;
280*4882a593Smuzhiyun 		sgl = xfer->tx_sg.sgl;
281*4882a593Smuzhiyun 		nents = xfer->tx_sg.nents;
282*4882a593Smuzhiyun 	}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (nbits & SPI_NBITS_QUAD)
285*4882a593Smuzhiyun 		bd_ctrl |= BD_QUAD;
286*4882a593Smuzhiyun 	else if (nbits & SPI_NBITS_DUAL)
287*4882a593Smuzhiyun 		bd_ctrl |= BD_DUAL;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/* LSB first */
290*4882a593Smuzhiyun 	if (spi->mode & SPI_LSB_FIRST)
291*4882a593Smuzhiyun 		bd_ctrl |= BD_LSBF;
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	/* ownership to hardware */
294*4882a593Smuzhiyun 	bd_ctrl |= BD_EN;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, i) {
297*4882a593Smuzhiyun 		/* get ring descriptor */
298*4882a593Smuzhiyun 		rdesc = ring_desc_get(sqi);
299*4882a593Smuzhiyun 		if (!rdesc)
300*4882a593Smuzhiyun 			break;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		bd = rdesc->bd;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 		/* BD CTRL: length */
305*4882a593Smuzhiyun 		rdesc->xfer_len = sg_dma_len(sg);
306*4882a593Smuzhiyun 		bd->bd_ctrl = bd_ctrl;
307*4882a593Smuzhiyun 		bd->bd_ctrl |= rdesc->xfer_len;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		/* BD STAT */
310*4882a593Smuzhiyun 		bd->bd_status = 0;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 		/* BD BUFFER ADDRESS */
313*4882a593Smuzhiyun 		bd->bd_addr = sg->dma_address;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
pic32_sqi_prepare_hardware(struct spi_master * master)319*4882a593Smuzhiyun static int pic32_sqi_prepare_hardware(struct spi_master *master)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct pic32_sqi *sqi = spi_master_get_devdata(master);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* enable spi interface */
324*4882a593Smuzhiyun 	pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
325*4882a593Smuzhiyun 	/* enable spi clk */
326*4882a593Smuzhiyun 	pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
pic32_sqi_can_dma(struct spi_master * master,struct spi_device * spi,struct spi_transfer * x)331*4882a593Smuzhiyun static bool pic32_sqi_can_dma(struct spi_master *master,
332*4882a593Smuzhiyun 			      struct spi_device *spi,
333*4882a593Smuzhiyun 			      struct spi_transfer *x)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	/* Do DMA irrespective of transfer size */
336*4882a593Smuzhiyun 	return true;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun 
pic32_sqi_one_message(struct spi_master * master,struct spi_message * msg)339*4882a593Smuzhiyun static int pic32_sqi_one_message(struct spi_master *master,
340*4882a593Smuzhiyun 				 struct spi_message *msg)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct spi_device *spi = msg->spi;
343*4882a593Smuzhiyun 	struct ring_desc *rdesc, *next;
344*4882a593Smuzhiyun 	struct spi_transfer *xfer;
345*4882a593Smuzhiyun 	struct pic32_sqi *sqi;
346*4882a593Smuzhiyun 	int ret = 0, mode;
347*4882a593Smuzhiyun 	unsigned long timeout;
348*4882a593Smuzhiyun 	u32 val;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	sqi = spi_master_get_devdata(master);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	reinit_completion(&sqi->xfer_done);
353*4882a593Smuzhiyun 	msg->actual_length = 0;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
356*4882a593Smuzhiyun 	 * and "delay_usecs". But spi_device specific speed and mode change
357*4882a593Smuzhiyun 	 * can be handled at best during spi chip-select switch.
358*4882a593Smuzhiyun 	 */
359*4882a593Smuzhiyun 	if (sqi->cur_spi != spi) {
360*4882a593Smuzhiyun 		/* set spi speed */
361*4882a593Smuzhiyun 		if (sqi->cur_speed != spi->max_speed_hz) {
362*4882a593Smuzhiyun 			sqi->cur_speed = spi->max_speed_hz;
363*4882a593Smuzhiyun 			ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
364*4882a593Smuzhiyun 			if (ret)
365*4882a593Smuzhiyun 				dev_warn(&spi->dev, "set_clk, %d\n", ret);
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 		/* set spi mode */
369*4882a593Smuzhiyun 		mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
370*4882a593Smuzhiyun 		if (sqi->cur_mode != mode) {
371*4882a593Smuzhiyun 			val = readl(sqi->regs + PESQI_CONF_REG);
372*4882a593Smuzhiyun 			val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
373*4882a593Smuzhiyun 			if (mode & SPI_CPOL)
374*4882a593Smuzhiyun 				val |= PESQI_CPOL;
375*4882a593Smuzhiyun 			if (mode & SPI_LSB_FIRST)
376*4882a593Smuzhiyun 				val |= PESQI_LSBF;
377*4882a593Smuzhiyun 			val |= PESQI_CPHA;
378*4882a593Smuzhiyun 			writel(val, sqi->regs + PESQI_CONF_REG);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 			sqi->cur_mode = mode;
381*4882a593Smuzhiyun 		}
382*4882a593Smuzhiyun 		sqi->cur_spi = spi;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/* prepare hardware desc-list(BD) for transfer(s) */
386*4882a593Smuzhiyun 	list_for_each_entry(xfer, &msg->transfers, transfer_list) {
387*4882a593Smuzhiyun 		ret = pic32_sqi_one_transfer(sqi, msg, xfer);
388*4882a593Smuzhiyun 		if (ret) {
389*4882a593Smuzhiyun 			dev_err(&spi->dev, "xfer %p err\n", xfer);
390*4882a593Smuzhiyun 			goto xfer_out;
391*4882a593Smuzhiyun 		}
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	/* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
395*4882a593Smuzhiyun 	 * element of the list.
396*4882a593Smuzhiyun 	 */
397*4882a593Smuzhiyun 	rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
398*4882a593Smuzhiyun 	rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
399*4882a593Smuzhiyun 			      BD_LIFM | BD_PKT_INT_EN;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* set base address BD list for DMA engine */
402*4882a593Smuzhiyun 	rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
403*4882a593Smuzhiyun 	writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	/* enable interrupt */
406*4882a593Smuzhiyun 	pic32_sqi_enable_int(sqi);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	/* enable DMA engine */
409*4882a593Smuzhiyun 	val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
410*4882a593Smuzhiyun 	writel(val, sqi->regs + PESQI_BD_CTRL_REG);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* wait for xfer completion */
413*4882a593Smuzhiyun 	timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
414*4882a593Smuzhiyun 	if (timeout == 0) {
415*4882a593Smuzhiyun 		dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
416*4882a593Smuzhiyun 		ret = -ETIMEDOUT;
417*4882a593Smuzhiyun 		msg->status = ret;
418*4882a593Smuzhiyun 	} else {
419*4882a593Smuzhiyun 		/* success */
420*4882a593Smuzhiyun 		msg->status = 0;
421*4882a593Smuzhiyun 		ret = 0;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	/* disable DMA */
425*4882a593Smuzhiyun 	writel(0, sqi->regs + PESQI_BD_CTRL_REG);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	pic32_sqi_disable_int(sqi);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun xfer_out:
430*4882a593Smuzhiyun 	list_for_each_entry_safe_reverse(rdesc, next,
431*4882a593Smuzhiyun 					 &sqi->bd_list_used, list) {
432*4882a593Smuzhiyun 		/* Update total byte transferred */
433*4882a593Smuzhiyun 		msg->actual_length += rdesc->xfer_len;
434*4882a593Smuzhiyun 		/* release ring descr */
435*4882a593Smuzhiyun 		ring_desc_put(sqi, rdesc);
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 	spi_finalize_current_message(spi->master);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	return ret;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun 
pic32_sqi_unprepare_hardware(struct spi_master * master)442*4882a593Smuzhiyun static int pic32_sqi_unprepare_hardware(struct spi_master *master)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun 	struct pic32_sqi *sqi = spi_master_get_devdata(master);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* disable clk */
447*4882a593Smuzhiyun 	pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
448*4882a593Smuzhiyun 	/* disable spi */
449*4882a593Smuzhiyun 	pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return 0;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
ring_desc_ring_alloc(struct pic32_sqi * sqi)454*4882a593Smuzhiyun static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
455*4882a593Smuzhiyun {
456*4882a593Smuzhiyun 	struct ring_desc *rdesc;
457*4882a593Smuzhiyun 	struct buf_desc *bd;
458*4882a593Smuzhiyun 	int i;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/* allocate coherent DMAable memory for hardware buffer descriptors. */
461*4882a593Smuzhiyun 	sqi->bd = dma_alloc_coherent(&sqi->master->dev,
462*4882a593Smuzhiyun 				     sizeof(*bd) * PESQI_BD_COUNT,
463*4882a593Smuzhiyun 				     &sqi->bd_dma, GFP_KERNEL);
464*4882a593Smuzhiyun 	if (!sqi->bd) {
465*4882a593Smuzhiyun 		dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
466*4882a593Smuzhiyun 		return -ENOMEM;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/* allocate software ring descriptors */
470*4882a593Smuzhiyun 	sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
471*4882a593Smuzhiyun 	if (!sqi->ring) {
472*4882a593Smuzhiyun 		dma_free_coherent(&sqi->master->dev,
473*4882a593Smuzhiyun 				  sizeof(*bd) * PESQI_BD_COUNT,
474*4882a593Smuzhiyun 				  sqi->bd, sqi->bd_dma);
475*4882a593Smuzhiyun 		return -ENOMEM;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	bd = (struct buf_desc *)sqi->bd;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sqi->bd_list_free);
481*4882a593Smuzhiyun 	INIT_LIST_HEAD(&sqi->bd_list_used);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* initialize ring-desc */
484*4882a593Smuzhiyun 	for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
485*4882a593Smuzhiyun 		INIT_LIST_HEAD(&rdesc->list);
486*4882a593Smuzhiyun 		rdesc->bd = &bd[i];
487*4882a593Smuzhiyun 		rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
488*4882a593Smuzhiyun 		list_add_tail(&rdesc->list, &sqi->bd_list_free);
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Prepare BD: chain to next BD(s) */
492*4882a593Smuzhiyun 	for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
493*4882a593Smuzhiyun 		bd[i].bd_nextp = rdesc[i + 1].bd_dma;
494*4882a593Smuzhiyun 	bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
ring_desc_ring_free(struct pic32_sqi * sqi)499*4882a593Smuzhiyun static void ring_desc_ring_free(struct pic32_sqi *sqi)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	dma_free_coherent(&sqi->master->dev,
502*4882a593Smuzhiyun 			  sizeof(struct buf_desc) * PESQI_BD_COUNT,
503*4882a593Smuzhiyun 			  sqi->bd, sqi->bd_dma);
504*4882a593Smuzhiyun 	kfree(sqi->ring);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
pic32_sqi_hw_init(struct pic32_sqi * sqi)507*4882a593Smuzhiyun static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	unsigned long flags;
510*4882a593Smuzhiyun 	u32 val;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	/* Soft-reset of PESQI controller triggers interrupt.
513*4882a593Smuzhiyun 	 * We are not yet ready to handle them so disable CPU
514*4882a593Smuzhiyun 	 * interrupt for the time being.
515*4882a593Smuzhiyun 	 */
516*4882a593Smuzhiyun 	local_irq_save(flags);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	/* assert soft-reset */
519*4882a593Smuzhiyun 	writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* wait until clear */
522*4882a593Smuzhiyun 	readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
523*4882a593Smuzhiyun 				  !(val & PESQI_SOFT_RESET), 1, 5000);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* disable all interrupts */
526*4882a593Smuzhiyun 	pic32_sqi_disable_int(sqi);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* Now it is safe to enable back CPU interrupt */
529*4882a593Smuzhiyun 	local_irq_restore(flags);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* tx and rx fifo interrupt threshold */
532*4882a593Smuzhiyun 	val = readl(sqi->regs + PESQI_CMD_THRES_REG);
533*4882a593Smuzhiyun 	val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
534*4882a593Smuzhiyun 	val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
535*4882a593Smuzhiyun 	val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
536*4882a593Smuzhiyun 	writel(val, sqi->regs + PESQI_CMD_THRES_REG);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	val = readl(sqi->regs + PESQI_INT_THRES_REG);
539*4882a593Smuzhiyun 	val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
540*4882a593Smuzhiyun 	val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
541*4882a593Smuzhiyun 	val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
542*4882a593Smuzhiyun 	writel(val, sqi->regs + PESQI_INT_THRES_REG);
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	/* default configuration */
545*4882a593Smuzhiyun 	val = readl(sqi->regs + PESQI_CONF_REG);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	/* set mode: DMA */
548*4882a593Smuzhiyun 	val &= ~PESQI_MODE;
549*4882a593Smuzhiyun 	val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
550*4882a593Smuzhiyun 	writel(val, sqi->regs + PESQI_CONF_REG);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	/* DATAEN - SQIID0-ID3 */
553*4882a593Smuzhiyun 	val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	/* burst/INCR4 enable */
556*4882a593Smuzhiyun 	val |= PESQI_BURST_EN;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	/* CSEN - all CS */
559*4882a593Smuzhiyun 	val |= 3U << PESQI_CSEN_SHIFT;
560*4882a593Smuzhiyun 	writel(val, sqi->regs + PESQI_CONF_REG);
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* write poll count */
563*4882a593Smuzhiyun 	writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	sqi->cur_speed = 0;
566*4882a593Smuzhiyun 	sqi->cur_mode = -1;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
pic32_sqi_probe(struct platform_device * pdev)569*4882a593Smuzhiyun static int pic32_sqi_probe(struct platform_device *pdev)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	struct spi_master *master;
572*4882a593Smuzhiyun 	struct pic32_sqi *sqi;
573*4882a593Smuzhiyun 	int ret;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
576*4882a593Smuzhiyun 	if (!master)
577*4882a593Smuzhiyun 		return -ENOMEM;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	sqi = spi_master_get_devdata(master);
580*4882a593Smuzhiyun 	sqi->master = master;
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	sqi->regs = devm_platform_ioremap_resource(pdev, 0);
583*4882a593Smuzhiyun 	if (IS_ERR(sqi->regs)) {
584*4882a593Smuzhiyun 		ret = PTR_ERR(sqi->regs);
585*4882a593Smuzhiyun 		goto err_free_master;
586*4882a593Smuzhiyun 	}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	/* irq */
589*4882a593Smuzhiyun 	sqi->irq = platform_get_irq(pdev, 0);
590*4882a593Smuzhiyun 	if (sqi->irq < 0) {
591*4882a593Smuzhiyun 		ret = sqi->irq;
592*4882a593Smuzhiyun 		goto err_free_master;
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	/* clocks */
596*4882a593Smuzhiyun 	sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
597*4882a593Smuzhiyun 	if (IS_ERR(sqi->sys_clk)) {
598*4882a593Smuzhiyun 		ret = PTR_ERR(sqi->sys_clk);
599*4882a593Smuzhiyun 		dev_err(&pdev->dev, "no sys_clk ?\n");
600*4882a593Smuzhiyun 		goto err_free_master;
601*4882a593Smuzhiyun 	}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
604*4882a593Smuzhiyun 	if (IS_ERR(sqi->base_clk)) {
605*4882a593Smuzhiyun 		ret = PTR_ERR(sqi->base_clk);
606*4882a593Smuzhiyun 		dev_err(&pdev->dev, "no base clk ?\n");
607*4882a593Smuzhiyun 		goto err_free_master;
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	ret = clk_prepare_enable(sqi->sys_clk);
611*4882a593Smuzhiyun 	if (ret) {
612*4882a593Smuzhiyun 		dev_err(&pdev->dev, "sys clk enable failed\n");
613*4882a593Smuzhiyun 		goto err_free_master;
614*4882a593Smuzhiyun 	}
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	ret = clk_prepare_enable(sqi->base_clk);
617*4882a593Smuzhiyun 	if (ret) {
618*4882a593Smuzhiyun 		dev_err(&pdev->dev, "base clk enable failed\n");
619*4882a593Smuzhiyun 		clk_disable_unprepare(sqi->sys_clk);
620*4882a593Smuzhiyun 		goto err_free_master;
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	init_completion(&sqi->xfer_done);
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* initialize hardware */
626*4882a593Smuzhiyun 	pic32_sqi_hw_init(sqi);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	/* allocate buffers & descriptors */
629*4882a593Smuzhiyun 	ret = ring_desc_ring_alloc(sqi);
630*4882a593Smuzhiyun 	if (ret) {
631*4882a593Smuzhiyun 		dev_err(&pdev->dev, "ring alloc failed\n");
632*4882a593Smuzhiyun 		goto err_disable_clk;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/* install irq handlers */
636*4882a593Smuzhiyun 	ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
637*4882a593Smuzhiyun 			  dev_name(&pdev->dev), sqi);
638*4882a593Smuzhiyun 	if (ret < 0) {
639*4882a593Smuzhiyun 		dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
640*4882a593Smuzhiyun 		goto err_free_ring;
641*4882a593Smuzhiyun 	}
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	/* register master */
644*4882a593Smuzhiyun 	master->num_chipselect	= 2;
645*4882a593Smuzhiyun 	master->max_speed_hz	= clk_get_rate(sqi->base_clk);
646*4882a593Smuzhiyun 	master->dma_alignment	= 32;
647*4882a593Smuzhiyun 	master->max_dma_len	= PESQI_BD_BUF_LEN_MAX;
648*4882a593Smuzhiyun 	master->dev.of_node	= pdev->dev.of_node;
649*4882a593Smuzhiyun 	master->mode_bits	= SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
650*4882a593Smuzhiyun 				  SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
651*4882a593Smuzhiyun 	master->flags		= SPI_MASTER_HALF_DUPLEX;
652*4882a593Smuzhiyun 	master->can_dma		= pic32_sqi_can_dma;
653*4882a593Smuzhiyun 	master->bits_per_word_mask	= SPI_BPW_RANGE_MASK(8, 32);
654*4882a593Smuzhiyun 	master->transfer_one_message	= pic32_sqi_one_message;
655*4882a593Smuzhiyun 	master->prepare_transfer_hardware	= pic32_sqi_prepare_hardware;
656*4882a593Smuzhiyun 	master->unprepare_transfer_hardware	= pic32_sqi_unprepare_hardware;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	ret = devm_spi_register_master(&pdev->dev, master);
659*4882a593Smuzhiyun 	if (ret) {
660*4882a593Smuzhiyun 		dev_err(&master->dev, "failed registering spi master\n");
661*4882a593Smuzhiyun 		free_irq(sqi->irq, sqi);
662*4882a593Smuzhiyun 		goto err_free_ring;
663*4882a593Smuzhiyun 	}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	platform_set_drvdata(pdev, sqi);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	return 0;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun err_free_ring:
670*4882a593Smuzhiyun 	ring_desc_ring_free(sqi);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun err_disable_clk:
673*4882a593Smuzhiyun 	clk_disable_unprepare(sqi->base_clk);
674*4882a593Smuzhiyun 	clk_disable_unprepare(sqi->sys_clk);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun err_free_master:
677*4882a593Smuzhiyun 	spi_master_put(master);
678*4882a593Smuzhiyun 	return ret;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun 
pic32_sqi_remove(struct platform_device * pdev)681*4882a593Smuzhiyun static int pic32_sqi_remove(struct platform_device *pdev)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	struct pic32_sqi *sqi = platform_get_drvdata(pdev);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* release resources */
686*4882a593Smuzhiyun 	free_irq(sqi->irq, sqi);
687*4882a593Smuzhiyun 	ring_desc_ring_free(sqi);
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	/* disable clk */
690*4882a593Smuzhiyun 	clk_disable_unprepare(sqi->base_clk);
691*4882a593Smuzhiyun 	clk_disable_unprepare(sqi->sys_clk);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun static const struct of_device_id pic32_sqi_of_ids[] = {
697*4882a593Smuzhiyun 	{.compatible = "microchip,pic32mzda-sqi",},
698*4882a593Smuzhiyun 	{},
699*4882a593Smuzhiyun };
700*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun static struct platform_driver pic32_sqi_driver = {
703*4882a593Smuzhiyun 	.driver = {
704*4882a593Smuzhiyun 		.name = "sqi-pic32",
705*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(pic32_sqi_of_ids),
706*4882a593Smuzhiyun 	},
707*4882a593Smuzhiyun 	.probe = pic32_sqi_probe,
708*4882a593Smuzhiyun 	.remove = pic32_sqi_remove,
709*4882a593Smuzhiyun };
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun module_platform_driver(pic32_sqi_driver);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
714*4882a593Smuzhiyun MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
715*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
716