xref: /OK3568_Linux_fs/kernel/drivers/spi/spi-pl022.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2008-2012 ST-Ericsson AB
6*4882a593Smuzhiyun  * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Author: Linus Walleij <linus.walleij@stericsson.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Initial version inspired by:
11*4882a593Smuzhiyun  *	linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
12*4882a593Smuzhiyun  * Initial adoption to PL022 by:
13*4882a593Smuzhiyun  *      Sachin Verma <sachin.verma@st.com>
14*4882a593Smuzhiyun  */
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/device.h>
19*4882a593Smuzhiyun #include <linux/ioport.h>
20*4882a593Smuzhiyun #include <linux/errno.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/spi/spi.h>
23*4882a593Smuzhiyun #include <linux/delay.h>
24*4882a593Smuzhiyun #include <linux/clk.h>
25*4882a593Smuzhiyun #include <linux/err.h>
26*4882a593Smuzhiyun #include <linux/amba/bus.h>
27*4882a593Smuzhiyun #include <linux/amba/pl022.h>
28*4882a593Smuzhiyun #include <linux/io.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/dmaengine.h>
31*4882a593Smuzhiyun #include <linux/dma-mapping.h>
32*4882a593Smuzhiyun #include <linux/scatterlist.h>
33*4882a593Smuzhiyun #include <linux/pm_runtime.h>
34*4882a593Smuzhiyun #include <linux/gpio.h>
35*4882a593Smuzhiyun #include <linux/of_gpio.h>
36*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * This macro is used to define some register default values.
40*4882a593Smuzhiyun  * reg is masked with mask, the OR:ed with an (again masked)
41*4882a593Smuzhiyun  * val shifted sb steps to the left.
42*4882a593Smuzhiyun  */
43*4882a593Smuzhiyun #define SSP_WRITE_BITS(reg, val, mask, sb) \
44*4882a593Smuzhiyun  ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * This macro is also used to define some default values.
48*4882a593Smuzhiyun  * It will just shift val by sb steps to the left and mask
49*4882a593Smuzhiyun  * the result with mask.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun #define GEN_MASK_BITS(val, mask, sb) \
52*4882a593Smuzhiyun  (((val)<<(sb)) & (mask))
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define DRIVE_TX		0
55*4882a593Smuzhiyun #define DO_NOT_DRIVE_TX		1
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define DO_NOT_QUEUE_DMA	0
58*4882a593Smuzhiyun #define QUEUE_DMA		1
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define RX_TRANSFER		1
61*4882a593Smuzhiyun #define TX_TRANSFER		2
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * Macros to access SSP Registers with their offsets
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun #define SSP_CR0(r)	(r + 0x000)
67*4882a593Smuzhiyun #define SSP_CR1(r)	(r + 0x004)
68*4882a593Smuzhiyun #define SSP_DR(r)	(r + 0x008)
69*4882a593Smuzhiyun #define SSP_SR(r)	(r + 0x00C)
70*4882a593Smuzhiyun #define SSP_CPSR(r)	(r + 0x010)
71*4882a593Smuzhiyun #define SSP_IMSC(r)	(r + 0x014)
72*4882a593Smuzhiyun #define SSP_RIS(r)	(r + 0x018)
73*4882a593Smuzhiyun #define SSP_MIS(r)	(r + 0x01C)
74*4882a593Smuzhiyun #define SSP_ICR(r)	(r + 0x020)
75*4882a593Smuzhiyun #define SSP_DMACR(r)	(r + 0x024)
76*4882a593Smuzhiyun #define SSP_CSR(r)	(r + 0x030) /* vendor extension */
77*4882a593Smuzhiyun #define SSP_ITCR(r)	(r + 0x080)
78*4882a593Smuzhiyun #define SSP_ITIP(r)	(r + 0x084)
79*4882a593Smuzhiyun #define SSP_ITOP(r)	(r + 0x088)
80*4882a593Smuzhiyun #define SSP_TDR(r)	(r + 0x08C)
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define SSP_PID0(r)	(r + 0xFE0)
83*4882a593Smuzhiyun #define SSP_PID1(r)	(r + 0xFE4)
84*4882a593Smuzhiyun #define SSP_PID2(r)	(r + 0xFE8)
85*4882a593Smuzhiyun #define SSP_PID3(r)	(r + 0xFEC)
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define SSP_CID0(r)	(r + 0xFF0)
88*4882a593Smuzhiyun #define SSP_CID1(r)	(r + 0xFF4)
89*4882a593Smuzhiyun #define SSP_CID2(r)	(r + 0xFF8)
90*4882a593Smuzhiyun #define SSP_CID3(r)	(r + 0xFFC)
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * SSP Control Register 0  - SSP_CR0
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun #define SSP_CR0_MASK_DSS	(0x0FUL << 0)
96*4882a593Smuzhiyun #define SSP_CR0_MASK_FRF	(0x3UL << 4)
97*4882a593Smuzhiyun #define SSP_CR0_MASK_SPO	(0x1UL << 6)
98*4882a593Smuzhiyun #define SSP_CR0_MASK_SPH	(0x1UL << 7)
99*4882a593Smuzhiyun #define SSP_CR0_MASK_SCR	(0xFFUL << 8)
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /*
102*4882a593Smuzhiyun  * The ST version of this block moves som bits
103*4882a593Smuzhiyun  * in SSP_CR0 and extends it to 32 bits
104*4882a593Smuzhiyun  */
105*4882a593Smuzhiyun #define SSP_CR0_MASK_DSS_ST	(0x1FUL << 0)
106*4882a593Smuzhiyun #define SSP_CR0_MASK_HALFDUP_ST	(0x1UL << 5)
107*4882a593Smuzhiyun #define SSP_CR0_MASK_CSS_ST	(0x1FUL << 16)
108*4882a593Smuzhiyun #define SSP_CR0_MASK_FRF_ST	(0x3UL << 21)
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun  * SSP Control Register 0  - SSP_CR1
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun #define SSP_CR1_MASK_LBM	(0x1UL << 0)
114*4882a593Smuzhiyun #define SSP_CR1_MASK_SSE	(0x1UL << 1)
115*4882a593Smuzhiyun #define SSP_CR1_MASK_MS		(0x1UL << 2)
116*4882a593Smuzhiyun #define SSP_CR1_MASK_SOD	(0x1UL << 3)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * The ST version of this block adds some bits
120*4882a593Smuzhiyun  * in SSP_CR1
121*4882a593Smuzhiyun  */
122*4882a593Smuzhiyun #define SSP_CR1_MASK_RENDN_ST	(0x1UL << 4)
123*4882a593Smuzhiyun #define SSP_CR1_MASK_TENDN_ST	(0x1UL << 5)
124*4882a593Smuzhiyun #define SSP_CR1_MASK_MWAIT_ST	(0x1UL << 6)
125*4882a593Smuzhiyun #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
126*4882a593Smuzhiyun #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
127*4882a593Smuzhiyun /* This one is only in the PL023 variant */
128*4882a593Smuzhiyun #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * SSP Status Register - SSP_SR
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun #define SSP_SR_MASK_TFE		(0x1UL << 0) /* Transmit FIFO empty */
134*4882a593Smuzhiyun #define SSP_SR_MASK_TNF		(0x1UL << 1) /* Transmit FIFO not full */
135*4882a593Smuzhiyun #define SSP_SR_MASK_RNE		(0x1UL << 2) /* Receive FIFO not empty */
136*4882a593Smuzhiyun #define SSP_SR_MASK_RFF		(0x1UL << 3) /* Receive FIFO full */
137*4882a593Smuzhiyun #define SSP_SR_MASK_BSY		(0x1UL << 4) /* Busy Flag */
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /*
140*4882a593Smuzhiyun  * SSP Clock Prescale Register  - SSP_CPSR
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun #define SSP_CPSR_MASK_CPSDVSR	(0xFFUL << 0)
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /*
145*4882a593Smuzhiyun  * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
148*4882a593Smuzhiyun #define SSP_IMSC_MASK_RTIM  (0x1UL << 1) /* Receive timeout Interrupt mask */
149*4882a593Smuzhiyun #define SSP_IMSC_MASK_RXIM  (0x1UL << 2) /* Receive FIFO Interrupt mask */
150*4882a593Smuzhiyun #define SSP_IMSC_MASK_TXIM  (0x1UL << 3) /* Transmit FIFO Interrupt mask */
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun  * SSP Raw Interrupt Status Register - SSP_RIS
154*4882a593Smuzhiyun  */
155*4882a593Smuzhiyun /* Receive Overrun Raw Interrupt status */
156*4882a593Smuzhiyun #define SSP_RIS_MASK_RORRIS		(0x1UL << 0)
157*4882a593Smuzhiyun /* Receive Timeout Raw Interrupt status */
158*4882a593Smuzhiyun #define SSP_RIS_MASK_RTRIS		(0x1UL << 1)
159*4882a593Smuzhiyun /* Receive FIFO Raw Interrupt status */
160*4882a593Smuzhiyun #define SSP_RIS_MASK_RXRIS		(0x1UL << 2)
161*4882a593Smuzhiyun /* Transmit FIFO Raw Interrupt status */
162*4882a593Smuzhiyun #define SSP_RIS_MASK_TXRIS		(0x1UL << 3)
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun  * SSP Masked Interrupt Status Register - SSP_MIS
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun /* Receive Overrun Masked Interrupt status */
168*4882a593Smuzhiyun #define SSP_MIS_MASK_RORMIS		(0x1UL << 0)
169*4882a593Smuzhiyun /* Receive Timeout Masked Interrupt status */
170*4882a593Smuzhiyun #define SSP_MIS_MASK_RTMIS		(0x1UL << 1)
171*4882a593Smuzhiyun /* Receive FIFO Masked Interrupt status */
172*4882a593Smuzhiyun #define SSP_MIS_MASK_RXMIS		(0x1UL << 2)
173*4882a593Smuzhiyun /* Transmit FIFO Masked Interrupt status */
174*4882a593Smuzhiyun #define SSP_MIS_MASK_TXMIS		(0x1UL << 3)
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun  * SSP Interrupt Clear Register - SSP_ICR
178*4882a593Smuzhiyun  */
179*4882a593Smuzhiyun /* Receive Overrun Raw Clear Interrupt bit */
180*4882a593Smuzhiyun #define SSP_ICR_MASK_RORIC		(0x1UL << 0)
181*4882a593Smuzhiyun /* Receive Timeout Clear Interrupt bit */
182*4882a593Smuzhiyun #define SSP_ICR_MASK_RTIC		(0x1UL << 1)
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun  * SSP DMA Control Register - SSP_DMACR
186*4882a593Smuzhiyun  */
187*4882a593Smuzhiyun /* Receive DMA Enable bit */
188*4882a593Smuzhiyun #define SSP_DMACR_MASK_RXDMAE		(0x1UL << 0)
189*4882a593Smuzhiyun /* Transmit DMA Enable bit */
190*4882a593Smuzhiyun #define SSP_DMACR_MASK_TXDMAE		(0x1UL << 1)
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun  * SSP Chip Select Control Register - SSP_CSR
194*4882a593Smuzhiyun  * (vendor extension)
195*4882a593Smuzhiyun  */
196*4882a593Smuzhiyun #define SSP_CSR_CSVALUE_MASK		(0x1FUL << 0)
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun  * SSP Integration Test control Register - SSP_ITCR
200*4882a593Smuzhiyun  */
201*4882a593Smuzhiyun #define SSP_ITCR_MASK_ITEN		(0x1UL << 0)
202*4882a593Smuzhiyun #define SSP_ITCR_MASK_TESTFIFO		(0x1UL << 1)
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun  * SSP Integration Test Input Register - SSP_ITIP
206*4882a593Smuzhiyun  */
207*4882a593Smuzhiyun #define ITIP_MASK_SSPRXD		 (0x1UL << 0)
208*4882a593Smuzhiyun #define ITIP_MASK_SSPFSSIN		 (0x1UL << 1)
209*4882a593Smuzhiyun #define ITIP_MASK_SSPCLKIN		 (0x1UL << 2)
210*4882a593Smuzhiyun #define ITIP_MASK_RXDMAC		 (0x1UL << 3)
211*4882a593Smuzhiyun #define ITIP_MASK_TXDMAC		 (0x1UL << 4)
212*4882a593Smuzhiyun #define ITIP_MASK_SSPTXDIN		 (0x1UL << 5)
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * SSP Integration Test output Register - SSP_ITOP
216*4882a593Smuzhiyun  */
217*4882a593Smuzhiyun #define ITOP_MASK_SSPTXD		 (0x1UL << 0)
218*4882a593Smuzhiyun #define ITOP_MASK_SSPFSSOUT		 (0x1UL << 1)
219*4882a593Smuzhiyun #define ITOP_MASK_SSPCLKOUT		 (0x1UL << 2)
220*4882a593Smuzhiyun #define ITOP_MASK_SSPOEn		 (0x1UL << 3)
221*4882a593Smuzhiyun #define ITOP_MASK_SSPCTLOEn		 (0x1UL << 4)
222*4882a593Smuzhiyun #define ITOP_MASK_RORINTR		 (0x1UL << 5)
223*4882a593Smuzhiyun #define ITOP_MASK_RTINTR		 (0x1UL << 6)
224*4882a593Smuzhiyun #define ITOP_MASK_RXINTR		 (0x1UL << 7)
225*4882a593Smuzhiyun #define ITOP_MASK_TXINTR		 (0x1UL << 8)
226*4882a593Smuzhiyun #define ITOP_MASK_INTR			 (0x1UL << 9)
227*4882a593Smuzhiyun #define ITOP_MASK_RXDMABREQ		 (0x1UL << 10)
228*4882a593Smuzhiyun #define ITOP_MASK_RXDMASREQ		 (0x1UL << 11)
229*4882a593Smuzhiyun #define ITOP_MASK_TXDMABREQ		 (0x1UL << 12)
230*4882a593Smuzhiyun #define ITOP_MASK_TXDMASREQ		 (0x1UL << 13)
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun  * SSP Test Data Register - SSP_TDR
234*4882a593Smuzhiyun  */
235*4882a593Smuzhiyun #define TDR_MASK_TESTDATA		(0xFFFFFFFF)
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun  * Message State
239*4882a593Smuzhiyun  * we use the spi_message.state (void *) pointer to
240*4882a593Smuzhiyun  * hold a single state value, that's why all this
241*4882a593Smuzhiyun  * (void *) casting is done here.
242*4882a593Smuzhiyun  */
243*4882a593Smuzhiyun #define STATE_START			((void *) 0)
244*4882a593Smuzhiyun #define STATE_RUNNING			((void *) 1)
245*4882a593Smuzhiyun #define STATE_DONE			((void *) 2)
246*4882a593Smuzhiyun #define STATE_ERROR			((void *) -1)
247*4882a593Smuzhiyun #define STATE_TIMEOUT			((void *) -2)
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun  * SSP State - Whether Enabled or Disabled
251*4882a593Smuzhiyun  */
252*4882a593Smuzhiyun #define SSP_DISABLED			(0)
253*4882a593Smuzhiyun #define SSP_ENABLED			(1)
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * SSP DMA State - Whether DMA Enabled or Disabled
257*4882a593Smuzhiyun  */
258*4882a593Smuzhiyun #define SSP_DMA_DISABLED		(0)
259*4882a593Smuzhiyun #define SSP_DMA_ENABLED			(1)
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun  * SSP Clock Defaults
263*4882a593Smuzhiyun  */
264*4882a593Smuzhiyun #define SSP_DEFAULT_CLKRATE 0x2
265*4882a593Smuzhiyun #define SSP_DEFAULT_PRESCALE 0x40
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun  * SSP Clock Parameter ranges
269*4882a593Smuzhiyun  */
270*4882a593Smuzhiyun #define CPSDVR_MIN 0x02
271*4882a593Smuzhiyun #define CPSDVR_MAX 0xFE
272*4882a593Smuzhiyun #define SCR_MIN 0x00
273*4882a593Smuzhiyun #define SCR_MAX 0xFF
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun  * SSP Interrupt related Macros
277*4882a593Smuzhiyun  */
278*4882a593Smuzhiyun #define DEFAULT_SSP_REG_IMSC  0x0UL
279*4882a593Smuzhiyun #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
280*4882a593Smuzhiyun #define ENABLE_ALL_INTERRUPTS ( \
281*4882a593Smuzhiyun 	SSP_IMSC_MASK_RORIM | \
282*4882a593Smuzhiyun 	SSP_IMSC_MASK_RTIM | \
283*4882a593Smuzhiyun 	SSP_IMSC_MASK_RXIM | \
284*4882a593Smuzhiyun 	SSP_IMSC_MASK_TXIM \
285*4882a593Smuzhiyun )
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun #define CLEAR_ALL_INTERRUPTS  0x3
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #define SPI_POLLING_TIMEOUT 1000
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun  * The type of reading going on on this chip
293*4882a593Smuzhiyun  */
294*4882a593Smuzhiyun enum ssp_reading {
295*4882a593Smuzhiyun 	READING_NULL,
296*4882a593Smuzhiyun 	READING_U8,
297*4882a593Smuzhiyun 	READING_U16,
298*4882a593Smuzhiyun 	READING_U32
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun /*
302*4882a593Smuzhiyun  * The type of writing going on on this chip
303*4882a593Smuzhiyun  */
304*4882a593Smuzhiyun enum ssp_writing {
305*4882a593Smuzhiyun 	WRITING_NULL,
306*4882a593Smuzhiyun 	WRITING_U8,
307*4882a593Smuzhiyun 	WRITING_U16,
308*4882a593Smuzhiyun 	WRITING_U32
309*4882a593Smuzhiyun };
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun  * struct vendor_data - vendor-specific config parameters
313*4882a593Smuzhiyun  * for PL022 derivates
314*4882a593Smuzhiyun  * @fifodepth: depth of FIFOs (both)
315*4882a593Smuzhiyun  * @max_bpw: maximum number of bits per word
316*4882a593Smuzhiyun  * @unidir: supports unidirection transfers
317*4882a593Smuzhiyun  * @extended_cr: 32 bit wide control register 0 with extra
318*4882a593Smuzhiyun  * features and extra features in CR1 as found in the ST variants
319*4882a593Smuzhiyun  * @pl023: supports a subset of the ST extensions called "PL023"
320*4882a593Smuzhiyun  * @loopback: supports loopback mode
321*4882a593Smuzhiyun  * @internal_cs_ctrl: supports chip select control register
322*4882a593Smuzhiyun  */
323*4882a593Smuzhiyun struct vendor_data {
324*4882a593Smuzhiyun 	int fifodepth;
325*4882a593Smuzhiyun 	int max_bpw;
326*4882a593Smuzhiyun 	bool unidir;
327*4882a593Smuzhiyun 	bool extended_cr;
328*4882a593Smuzhiyun 	bool pl023;
329*4882a593Smuzhiyun 	bool loopback;
330*4882a593Smuzhiyun 	bool internal_cs_ctrl;
331*4882a593Smuzhiyun };
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun  * struct pl022 - This is the private SSP driver data structure
335*4882a593Smuzhiyun  * @adev: AMBA device model hookup
336*4882a593Smuzhiyun  * @vendor: vendor data for the IP block
337*4882a593Smuzhiyun  * @phybase: the physical memory where the SSP device resides
338*4882a593Smuzhiyun  * @virtbase: the virtual memory where the SSP is mapped
339*4882a593Smuzhiyun  * @clk: outgoing clock "SPICLK" for the SPI bus
340*4882a593Smuzhiyun  * @master: SPI framework hookup
341*4882a593Smuzhiyun  * @master_info: controller-specific data from machine setup
342*4882a593Smuzhiyun  * @pump_transfers: Tasklet used in Interrupt Transfer mode
343*4882a593Smuzhiyun  * @cur_msg: Pointer to current spi_message being processed
344*4882a593Smuzhiyun  * @cur_transfer: Pointer to current spi_transfer
345*4882a593Smuzhiyun  * @cur_chip: pointer to current clients chip(assigned from controller_state)
346*4882a593Smuzhiyun  * @next_msg_cs_active: the next message in the queue has been examined
347*4882a593Smuzhiyun  *  and it was found that it uses the same chip select as the previous
348*4882a593Smuzhiyun  *  message, so we left it active after the previous transfer, and it's
349*4882a593Smuzhiyun  *  active already.
350*4882a593Smuzhiyun  * @tx: current position in TX buffer to be read
351*4882a593Smuzhiyun  * @tx_end: end position in TX buffer to be read
352*4882a593Smuzhiyun  * @rx: current position in RX buffer to be written
353*4882a593Smuzhiyun  * @rx_end: end position in RX buffer to be written
354*4882a593Smuzhiyun  * @read: the type of read currently going on
355*4882a593Smuzhiyun  * @write: the type of write currently going on
356*4882a593Smuzhiyun  * @exp_fifo_level: expected FIFO level
357*4882a593Smuzhiyun  * @rx_lev_trig: receive FIFO watermark level which triggers IRQ
358*4882a593Smuzhiyun  * @tx_lev_trig: transmit FIFO watermark level which triggers IRQ
359*4882a593Smuzhiyun  * @dma_rx_channel: optional channel for RX DMA
360*4882a593Smuzhiyun  * @dma_tx_channel: optional channel for TX DMA
361*4882a593Smuzhiyun  * @sgt_rx: scattertable for the RX transfer
362*4882a593Smuzhiyun  * @sgt_tx: scattertable for the TX transfer
363*4882a593Smuzhiyun  * @dummypage: a dummy page used for driving data on the bus with DMA
364*4882a593Smuzhiyun  * @dma_running: indicates whether DMA is in operation
365*4882a593Smuzhiyun  * @cur_cs: current chip select (gpio)
366*4882a593Smuzhiyun  * @chipselects: list of chipselects (gpios)
367*4882a593Smuzhiyun  */
368*4882a593Smuzhiyun struct pl022 {
369*4882a593Smuzhiyun 	struct amba_device		*adev;
370*4882a593Smuzhiyun 	struct vendor_data		*vendor;
371*4882a593Smuzhiyun 	resource_size_t			phybase;
372*4882a593Smuzhiyun 	void __iomem			*virtbase;
373*4882a593Smuzhiyun 	struct clk			*clk;
374*4882a593Smuzhiyun 	struct spi_master		*master;
375*4882a593Smuzhiyun 	struct pl022_ssp_controller	*master_info;
376*4882a593Smuzhiyun 	/* Message per-transfer pump */
377*4882a593Smuzhiyun 	struct tasklet_struct		pump_transfers;
378*4882a593Smuzhiyun 	struct spi_message		*cur_msg;
379*4882a593Smuzhiyun 	struct spi_transfer		*cur_transfer;
380*4882a593Smuzhiyun 	struct chip_data		*cur_chip;
381*4882a593Smuzhiyun 	bool				next_msg_cs_active;
382*4882a593Smuzhiyun 	void				*tx;
383*4882a593Smuzhiyun 	void				*tx_end;
384*4882a593Smuzhiyun 	void				*rx;
385*4882a593Smuzhiyun 	void				*rx_end;
386*4882a593Smuzhiyun 	enum ssp_reading		read;
387*4882a593Smuzhiyun 	enum ssp_writing		write;
388*4882a593Smuzhiyun 	u32				exp_fifo_level;
389*4882a593Smuzhiyun 	enum ssp_rx_level_trig		rx_lev_trig;
390*4882a593Smuzhiyun 	enum ssp_tx_level_trig		tx_lev_trig;
391*4882a593Smuzhiyun 	/* DMA settings */
392*4882a593Smuzhiyun #ifdef CONFIG_DMA_ENGINE
393*4882a593Smuzhiyun 	struct dma_chan			*dma_rx_channel;
394*4882a593Smuzhiyun 	struct dma_chan			*dma_tx_channel;
395*4882a593Smuzhiyun 	struct sg_table			sgt_rx;
396*4882a593Smuzhiyun 	struct sg_table			sgt_tx;
397*4882a593Smuzhiyun 	char				*dummypage;
398*4882a593Smuzhiyun 	bool				dma_running;
399*4882a593Smuzhiyun #endif
400*4882a593Smuzhiyun 	int cur_cs;
401*4882a593Smuzhiyun 	int *chipselects;
402*4882a593Smuzhiyun };
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun /**
405*4882a593Smuzhiyun  * struct chip_data - To maintain runtime state of SSP for each client chip
406*4882a593Smuzhiyun  * @cr0: Value of control register CR0 of SSP - on later ST variants this
407*4882a593Smuzhiyun  *       register is 32 bits wide rather than just 16
408*4882a593Smuzhiyun  * @cr1: Value of control register CR1 of SSP
409*4882a593Smuzhiyun  * @dmacr: Value of DMA control Register of SSP
410*4882a593Smuzhiyun  * @cpsr: Value of Clock prescale register
411*4882a593Smuzhiyun  * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
412*4882a593Smuzhiyun  * @enable_dma: Whether to enable DMA or not
413*4882a593Smuzhiyun  * @read: function ptr to be used to read when doing xfer for this chip
414*4882a593Smuzhiyun  * @write: function ptr to be used to write when doing xfer for this chip
415*4882a593Smuzhiyun  * @cs_control: chip select callback provided by chip
416*4882a593Smuzhiyun  * @xfer_type: polling/interrupt/DMA
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Runtime state of the SSP controller, maintained per chip,
419*4882a593Smuzhiyun  * This would be set according to the current message that would be served
420*4882a593Smuzhiyun  */
421*4882a593Smuzhiyun struct chip_data {
422*4882a593Smuzhiyun 	u32 cr0;
423*4882a593Smuzhiyun 	u16 cr1;
424*4882a593Smuzhiyun 	u16 dmacr;
425*4882a593Smuzhiyun 	u16 cpsr;
426*4882a593Smuzhiyun 	u8 n_bytes;
427*4882a593Smuzhiyun 	bool enable_dma;
428*4882a593Smuzhiyun 	enum ssp_reading read;
429*4882a593Smuzhiyun 	enum ssp_writing write;
430*4882a593Smuzhiyun 	void (*cs_control) (u32 command);
431*4882a593Smuzhiyun 	int xfer_type;
432*4882a593Smuzhiyun };
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /**
435*4882a593Smuzhiyun  * null_cs_control - Dummy chip select function
436*4882a593Smuzhiyun  * @command: select/delect the chip
437*4882a593Smuzhiyun  *
438*4882a593Smuzhiyun  * If no chip select function is provided by client this is used as dummy
439*4882a593Smuzhiyun  * chip select
440*4882a593Smuzhiyun  */
null_cs_control(u32 command)441*4882a593Smuzhiyun static void null_cs_control(u32 command)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun /**
447*4882a593Smuzhiyun  * internal_cs_control - Control chip select signals via SSP_CSR.
448*4882a593Smuzhiyun  * @pl022: SSP driver private data structure
449*4882a593Smuzhiyun  * @command: select/delect the chip
450*4882a593Smuzhiyun  *
451*4882a593Smuzhiyun  * Used on controller with internal chip select control via SSP_CSR register
452*4882a593Smuzhiyun  * (vendor extension). Each of the 5 LSB in the register controls one chip
453*4882a593Smuzhiyun  * select signal.
454*4882a593Smuzhiyun  */
internal_cs_control(struct pl022 * pl022,u32 command)455*4882a593Smuzhiyun static void internal_cs_control(struct pl022 *pl022, u32 command)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	u32 tmp;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	tmp = readw(SSP_CSR(pl022->virtbase));
460*4882a593Smuzhiyun 	if (command == SSP_CHIP_SELECT)
461*4882a593Smuzhiyun 		tmp &= ~BIT(pl022->cur_cs);
462*4882a593Smuzhiyun 	else
463*4882a593Smuzhiyun 		tmp |= BIT(pl022->cur_cs);
464*4882a593Smuzhiyun 	writew(tmp, SSP_CSR(pl022->virtbase));
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
pl022_cs_control(struct pl022 * pl022,u32 command)467*4882a593Smuzhiyun static void pl022_cs_control(struct pl022 *pl022, u32 command)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	if (pl022->vendor->internal_cs_ctrl)
470*4882a593Smuzhiyun 		internal_cs_control(pl022, command);
471*4882a593Smuzhiyun 	else if (gpio_is_valid(pl022->cur_cs))
472*4882a593Smuzhiyun 		gpio_set_value(pl022->cur_cs, command);
473*4882a593Smuzhiyun 	else
474*4882a593Smuzhiyun 		pl022->cur_chip->cs_control(command);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun  * giveback - current spi_message is over, schedule next message and call
479*4882a593Smuzhiyun  * callback of this message. Assumes that caller already
480*4882a593Smuzhiyun  * set message->status; dma and pio irqs are blocked
481*4882a593Smuzhiyun  * @pl022: SSP driver private data structure
482*4882a593Smuzhiyun  */
giveback(struct pl022 * pl022)483*4882a593Smuzhiyun static void giveback(struct pl022 *pl022)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct spi_transfer *last_transfer;
486*4882a593Smuzhiyun 	pl022->next_msg_cs_active = false;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	last_transfer = list_last_entry(&pl022->cur_msg->transfers,
489*4882a593Smuzhiyun 					struct spi_transfer, transfer_list);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Delay if requested before any change in chip select */
492*4882a593Smuzhiyun 	/*
493*4882a593Smuzhiyun 	 * FIXME: This runs in interrupt context.
494*4882a593Smuzhiyun 	 * Is this really smart?
495*4882a593Smuzhiyun 	 */
496*4882a593Smuzhiyun 	spi_transfer_delay_exec(last_transfer);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	if (!last_transfer->cs_change) {
499*4882a593Smuzhiyun 		struct spi_message *next_msg;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 		/*
502*4882a593Smuzhiyun 		 * cs_change was not set. We can keep the chip select
503*4882a593Smuzhiyun 		 * enabled if there is message in the queue and it is
504*4882a593Smuzhiyun 		 * for the same spi device.
505*4882a593Smuzhiyun 		 *
506*4882a593Smuzhiyun 		 * We cannot postpone this until pump_messages, because
507*4882a593Smuzhiyun 		 * after calling msg->complete (below) the driver that
508*4882a593Smuzhiyun 		 * sent the current message could be unloaded, which
509*4882a593Smuzhiyun 		 * could invalidate the cs_control() callback...
510*4882a593Smuzhiyun 		 */
511*4882a593Smuzhiyun 		/* get a pointer to the next message, if any */
512*4882a593Smuzhiyun 		next_msg = spi_get_next_queued_message(pl022->master);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 		/*
515*4882a593Smuzhiyun 		 * see if the next and current messages point
516*4882a593Smuzhiyun 		 * to the same spi device.
517*4882a593Smuzhiyun 		 */
518*4882a593Smuzhiyun 		if (next_msg && next_msg->spi != pl022->cur_msg->spi)
519*4882a593Smuzhiyun 			next_msg = NULL;
520*4882a593Smuzhiyun 		if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
521*4882a593Smuzhiyun 			pl022_cs_control(pl022, SSP_CHIP_DESELECT);
522*4882a593Smuzhiyun 		else
523*4882a593Smuzhiyun 			pl022->next_msg_cs_active = true;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	pl022->cur_msg = NULL;
528*4882a593Smuzhiyun 	pl022->cur_transfer = NULL;
529*4882a593Smuzhiyun 	pl022->cur_chip = NULL;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* disable the SPI/SSP operation */
532*4882a593Smuzhiyun 	writew((readw(SSP_CR1(pl022->virtbase)) &
533*4882a593Smuzhiyun 		(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	spi_finalize_current_message(pl022->master);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun /**
539*4882a593Smuzhiyun  * flush - flush the FIFO to reach a clean state
540*4882a593Smuzhiyun  * @pl022: SSP driver private data structure
541*4882a593Smuzhiyun  */
flush(struct pl022 * pl022)542*4882a593Smuzhiyun static int flush(struct pl022 *pl022)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	unsigned long limit = loops_per_jiffy << 1;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	dev_dbg(&pl022->adev->dev, "flush\n");
547*4882a593Smuzhiyun 	do {
548*4882a593Smuzhiyun 		while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
549*4882a593Smuzhiyun 			readw(SSP_DR(pl022->virtbase));
550*4882a593Smuzhiyun 	} while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	pl022->exp_fifo_level = 0;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return limit;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun /**
558*4882a593Smuzhiyun  * restore_state - Load configuration of current chip
559*4882a593Smuzhiyun  * @pl022: SSP driver private data structure
560*4882a593Smuzhiyun  */
restore_state(struct pl022 * pl022)561*4882a593Smuzhiyun static void restore_state(struct pl022 *pl022)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct chip_data *chip = pl022->cur_chip;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (pl022->vendor->extended_cr)
566*4882a593Smuzhiyun 		writel(chip->cr0, SSP_CR0(pl022->virtbase));
567*4882a593Smuzhiyun 	else
568*4882a593Smuzhiyun 		writew(chip->cr0, SSP_CR0(pl022->virtbase));
569*4882a593Smuzhiyun 	writew(chip->cr1, SSP_CR1(pl022->virtbase));
570*4882a593Smuzhiyun 	writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
571*4882a593Smuzhiyun 	writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
572*4882a593Smuzhiyun 	writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
573*4882a593Smuzhiyun 	writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun /*
577*4882a593Smuzhiyun  * Default SSP Register Values
578*4882a593Smuzhiyun  */
579*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CR0 ( \
580*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0)	| \
581*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
582*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
583*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
584*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
585*4882a593Smuzhiyun )
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun /* ST versions have slightly different bit layout */
588*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CR0_ST ( \
589*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0)	| \
590*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
591*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
592*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
593*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
594*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16)	| \
595*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
596*4882a593Smuzhiyun )
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /* The PL023 version is slightly different again */
599*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
600*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0)	| \
601*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
602*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
603*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
604*4882a593Smuzhiyun )
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CR1 ( \
607*4882a593Smuzhiyun 	GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
608*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
609*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
610*4882a593Smuzhiyun 	GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
611*4882a593Smuzhiyun )
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun /* ST versions extend this register to use all 16 bits */
614*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CR1_ST ( \
615*4882a593Smuzhiyun 	DEFAULT_SSP_REG_CR1 | \
616*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
617*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
618*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
619*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
620*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
621*4882a593Smuzhiyun )
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun /*
624*4882a593Smuzhiyun  * The PL023 variant has further differences: no loopback mode, no microwire
625*4882a593Smuzhiyun  * support, and a new clock feedback delay setting.
626*4882a593Smuzhiyun  */
627*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
628*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
629*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
630*4882a593Smuzhiyun 	GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
631*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
632*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
633*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
634*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
635*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
636*4882a593Smuzhiyun )
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun #define DEFAULT_SSP_REG_CPSR ( \
639*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
640*4882a593Smuzhiyun )
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun #define DEFAULT_SSP_REG_DMACR (\
643*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
644*4882a593Smuzhiyun 	GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
645*4882a593Smuzhiyun )
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun /**
648*4882a593Smuzhiyun  * load_ssp_default_config - Load default configuration for SSP
649*4882a593Smuzhiyun  * @pl022: SSP driver private data structure
650*4882a593Smuzhiyun  */
load_ssp_default_config(struct pl022 * pl022)651*4882a593Smuzhiyun static void load_ssp_default_config(struct pl022 *pl022)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	if (pl022->vendor->pl023) {
654*4882a593Smuzhiyun 		writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
655*4882a593Smuzhiyun 		writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
656*4882a593Smuzhiyun 	} else if (pl022->vendor->extended_cr) {
657*4882a593Smuzhiyun 		writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
658*4882a593Smuzhiyun 		writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
659*4882a593Smuzhiyun 	} else {
660*4882a593Smuzhiyun 		writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
661*4882a593Smuzhiyun 		writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
662*4882a593Smuzhiyun 	}
663*4882a593Smuzhiyun 	writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
664*4882a593Smuzhiyun 	writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
665*4882a593Smuzhiyun 	writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
666*4882a593Smuzhiyun 	writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /*
670*4882a593Smuzhiyun  * This will write to TX and read from RX according to the parameters
671*4882a593Smuzhiyun  * set in pl022.
672*4882a593Smuzhiyun  */
readwriter(struct pl022 * pl022)673*4882a593Smuzhiyun static void readwriter(struct pl022 *pl022)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	/*
677*4882a593Smuzhiyun 	 * The FIFO depth is different between primecell variants.
678*4882a593Smuzhiyun 	 * I believe filling in too much in the FIFO might cause
679*4882a593Smuzhiyun 	 * errons in 8bit wide transfers on ARM variants (just 8 words
680*4882a593Smuzhiyun 	 * FIFO, means only 8x8 = 64 bits in FIFO) at least.
681*4882a593Smuzhiyun 	 *
682*4882a593Smuzhiyun 	 * To prevent this issue, the TX FIFO is only filled to the
683*4882a593Smuzhiyun 	 * unused RX FIFO fill length, regardless of what the TX
684*4882a593Smuzhiyun 	 * FIFO status flag indicates.
685*4882a593Smuzhiyun 	 */
686*4882a593Smuzhiyun 	dev_dbg(&pl022->adev->dev,
687*4882a593Smuzhiyun 		"%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
688*4882a593Smuzhiyun 		__func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	/* Read as much as you can */
691*4882a593Smuzhiyun 	while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
692*4882a593Smuzhiyun 	       && (pl022->rx < pl022->rx_end)) {
693*4882a593Smuzhiyun 		switch (pl022->read) {
694*4882a593Smuzhiyun 		case READING_NULL:
695*4882a593Smuzhiyun 			readw(SSP_DR(pl022->virtbase));
696*4882a593Smuzhiyun 			break;
697*4882a593Smuzhiyun 		case READING_U8:
698*4882a593Smuzhiyun 			*(u8 *) (pl022->rx) =
699*4882a593Smuzhiyun 				readw(SSP_DR(pl022->virtbase)) & 0xFFU;
700*4882a593Smuzhiyun 			break;
701*4882a593Smuzhiyun 		case READING_U16:
702*4882a593Smuzhiyun 			*(u16 *) (pl022->rx) =
703*4882a593Smuzhiyun 				(u16) readw(SSP_DR(pl022->virtbase));
704*4882a593Smuzhiyun 			break;
705*4882a593Smuzhiyun 		case READING_U32:
706*4882a593Smuzhiyun 			*(u32 *) (pl022->rx) =
707*4882a593Smuzhiyun 				readl(SSP_DR(pl022->virtbase));
708*4882a593Smuzhiyun 			break;
709*4882a593Smuzhiyun 		}
710*4882a593Smuzhiyun 		pl022->rx += (pl022->cur_chip->n_bytes);
711*4882a593Smuzhiyun 		pl022->exp_fifo_level--;
712*4882a593Smuzhiyun 	}
713*4882a593Smuzhiyun 	/*
714*4882a593Smuzhiyun 	 * Write as much as possible up to the RX FIFO size
715*4882a593Smuzhiyun 	 */
716*4882a593Smuzhiyun 	while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
717*4882a593Smuzhiyun 	       && (pl022->tx < pl022->tx_end)) {
718*4882a593Smuzhiyun 		switch (pl022->write) {
719*4882a593Smuzhiyun 		case WRITING_NULL:
720*4882a593Smuzhiyun 			writew(0x0, SSP_DR(pl022->virtbase));
721*4882a593Smuzhiyun 			break;
722*4882a593Smuzhiyun 		case WRITING_U8:
723*4882a593Smuzhiyun 			writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
724*4882a593Smuzhiyun 			break;
725*4882a593Smuzhiyun 		case WRITING_U16:
726*4882a593Smuzhiyun 			writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
727*4882a593Smuzhiyun 			break;
728*4882a593Smuzhiyun 		case WRITING_U32:
729*4882a593Smuzhiyun 			writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
730*4882a593Smuzhiyun 			break;
731*4882a593Smuzhiyun 		}
732*4882a593Smuzhiyun 		pl022->tx += (pl022->cur_chip->n_bytes);
733*4882a593Smuzhiyun 		pl022->exp_fifo_level++;
734*4882a593Smuzhiyun 		/*
735*4882a593Smuzhiyun 		 * This inner reader takes care of things appearing in the RX
736*4882a593Smuzhiyun 		 * FIFO as we're transmitting. This will happen a lot since the
737*4882a593Smuzhiyun 		 * clock starts running when you put things into the TX FIFO,
738*4882a593Smuzhiyun 		 * and then things are continuously clocked into the RX FIFO.
739*4882a593Smuzhiyun 		 */
740*4882a593Smuzhiyun 		while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
741*4882a593Smuzhiyun 		       && (pl022->rx < pl022->rx_end)) {
742*4882a593Smuzhiyun 			switch (pl022->read) {
743*4882a593Smuzhiyun 			case READING_NULL:
744*4882a593Smuzhiyun 				readw(SSP_DR(pl022->virtbase));
745*4882a593Smuzhiyun 				break;
746*4882a593Smuzhiyun 			case READING_U8:
747*4882a593Smuzhiyun 				*(u8 *) (pl022->rx) =
748*4882a593Smuzhiyun 					readw(SSP_DR(pl022->virtbase)) & 0xFFU;
749*4882a593Smuzhiyun 				break;
750*4882a593Smuzhiyun 			case READING_U16:
751*4882a593Smuzhiyun 				*(u16 *) (pl022->rx) =
752*4882a593Smuzhiyun 					(u16) readw(SSP_DR(pl022->virtbase));
753*4882a593Smuzhiyun 				break;
754*4882a593Smuzhiyun 			case READING_U32:
755*4882a593Smuzhiyun 				*(u32 *) (pl022->rx) =
756*4882a593Smuzhiyun 					readl(SSP_DR(pl022->virtbase));
757*4882a593Smuzhiyun 				break;
758*4882a593Smuzhiyun 			}
759*4882a593Smuzhiyun 			pl022->rx += (pl022->cur_chip->n_bytes);
760*4882a593Smuzhiyun 			pl022->exp_fifo_level--;
761*4882a593Smuzhiyun 		}
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 	/*
764*4882a593Smuzhiyun 	 * When we exit here the TX FIFO should be full and the RX FIFO
765*4882a593Smuzhiyun 	 * should be empty
766*4882a593Smuzhiyun 	 */
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun /**
770*4882a593Smuzhiyun  * next_transfer - Move to the Next transfer in the current spi message
771*4882a593Smuzhiyun  * @pl022: SSP driver private data structure
772*4882a593Smuzhiyun  *
773*4882a593Smuzhiyun  * This function moves though the linked list of spi transfers in the
774*4882a593Smuzhiyun  * current spi message and returns with the state of current spi
775*4882a593Smuzhiyun  * message i.e whether its last transfer is done(STATE_DONE) or
776*4882a593Smuzhiyun  * Next transfer is ready(STATE_RUNNING)
777*4882a593Smuzhiyun  */
next_transfer(struct pl022 * pl022)778*4882a593Smuzhiyun static void *next_transfer(struct pl022 *pl022)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun 	struct spi_message *msg = pl022->cur_msg;
781*4882a593Smuzhiyun 	struct spi_transfer *trans = pl022->cur_transfer;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/* Move to next transfer */
784*4882a593Smuzhiyun 	if (trans->transfer_list.next != &msg->transfers) {
785*4882a593Smuzhiyun 		pl022->cur_transfer =
786*4882a593Smuzhiyun 		    list_entry(trans->transfer_list.next,
787*4882a593Smuzhiyun 			       struct spi_transfer, transfer_list);
788*4882a593Smuzhiyun 		return STATE_RUNNING;
789*4882a593Smuzhiyun 	}
790*4882a593Smuzhiyun 	return STATE_DONE;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun /*
794*4882a593Smuzhiyun  * This DMA functionality is only compiled in if we have
795*4882a593Smuzhiyun  * access to the generic DMA devices/DMA engine.
796*4882a593Smuzhiyun  */
797*4882a593Smuzhiyun #ifdef CONFIG_DMA_ENGINE
unmap_free_dma_scatter(struct pl022 * pl022)798*4882a593Smuzhiyun static void unmap_free_dma_scatter(struct pl022 *pl022)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun 	/* Unmap and free the SG tables */
801*4882a593Smuzhiyun 	dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
802*4882a593Smuzhiyun 		     pl022->sgt_tx.nents, DMA_TO_DEVICE);
803*4882a593Smuzhiyun 	dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
804*4882a593Smuzhiyun 		     pl022->sgt_rx.nents, DMA_FROM_DEVICE);
805*4882a593Smuzhiyun 	sg_free_table(&pl022->sgt_rx);
806*4882a593Smuzhiyun 	sg_free_table(&pl022->sgt_tx);
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
dma_callback(void * data)809*4882a593Smuzhiyun static void dma_callback(void *data)
810*4882a593Smuzhiyun {
811*4882a593Smuzhiyun 	struct pl022 *pl022 = data;
812*4882a593Smuzhiyun 	struct spi_message *msg = pl022->cur_msg;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	BUG_ON(!pl022->sgt_rx.sgl);
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun #ifdef VERBOSE_DEBUG
817*4882a593Smuzhiyun 	/*
818*4882a593Smuzhiyun 	 * Optionally dump out buffers to inspect contents, this is
819*4882a593Smuzhiyun 	 * good if you want to convince yourself that the loopback
820*4882a593Smuzhiyun 	 * read/write contents are the same, when adopting to a new
821*4882a593Smuzhiyun 	 * DMA engine.
822*4882a593Smuzhiyun 	 */
823*4882a593Smuzhiyun 	{
824*4882a593Smuzhiyun 		struct scatterlist *sg;
825*4882a593Smuzhiyun 		unsigned int i;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		dma_sync_sg_for_cpu(&pl022->adev->dev,
828*4882a593Smuzhiyun 				    pl022->sgt_rx.sgl,
829*4882a593Smuzhiyun 				    pl022->sgt_rx.nents,
830*4882a593Smuzhiyun 				    DMA_FROM_DEVICE);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 		for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
833*4882a593Smuzhiyun 			dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
834*4882a593Smuzhiyun 			print_hex_dump(KERN_ERR, "SPI RX: ",
835*4882a593Smuzhiyun 				       DUMP_PREFIX_OFFSET,
836*4882a593Smuzhiyun 				       16,
837*4882a593Smuzhiyun 				       1,
838*4882a593Smuzhiyun 				       sg_virt(sg),
839*4882a593Smuzhiyun 				       sg_dma_len(sg),
840*4882a593Smuzhiyun 				       1);
841*4882a593Smuzhiyun 		}
842*4882a593Smuzhiyun 		for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
843*4882a593Smuzhiyun 			dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
844*4882a593Smuzhiyun 			print_hex_dump(KERN_ERR, "SPI TX: ",
845*4882a593Smuzhiyun 				       DUMP_PREFIX_OFFSET,
846*4882a593Smuzhiyun 				       16,
847*4882a593Smuzhiyun 				       1,
848*4882a593Smuzhiyun 				       sg_virt(sg),
849*4882a593Smuzhiyun 				       sg_dma_len(sg),
850*4882a593Smuzhiyun 				       1);
851*4882a593Smuzhiyun 		}
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun #endif
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	unmap_free_dma_scatter(pl022);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	/* Update total bytes transferred */
858*4882a593Smuzhiyun 	msg->actual_length += pl022->cur_transfer->len;
859*4882a593Smuzhiyun 	/* Move to next transfer */
860*4882a593Smuzhiyun 	msg->state = next_transfer(pl022);
861*4882a593Smuzhiyun 	if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
862*4882a593Smuzhiyun 		pl022_cs_control(pl022, SSP_CHIP_DESELECT);
863*4882a593Smuzhiyun 	tasklet_schedule(&pl022->pump_transfers);
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
setup_dma_scatter(struct pl022 * pl022,void * buffer,unsigned int length,struct sg_table * sgtab)866*4882a593Smuzhiyun static void setup_dma_scatter(struct pl022 *pl022,
867*4882a593Smuzhiyun 			      void *buffer,
868*4882a593Smuzhiyun 			      unsigned int length,
869*4882a593Smuzhiyun 			      struct sg_table *sgtab)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	struct scatterlist *sg;
872*4882a593Smuzhiyun 	int bytesleft = length;
873*4882a593Smuzhiyun 	void *bufp = buffer;
874*4882a593Smuzhiyun 	int mapbytes;
875*4882a593Smuzhiyun 	int i;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	if (buffer) {
878*4882a593Smuzhiyun 		for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
879*4882a593Smuzhiyun 			/*
880*4882a593Smuzhiyun 			 * If there are less bytes left than what fits
881*4882a593Smuzhiyun 			 * in the current page (plus page alignment offset)
882*4882a593Smuzhiyun 			 * we just feed in this, else we stuff in as much
883*4882a593Smuzhiyun 			 * as we can.
884*4882a593Smuzhiyun 			 */
885*4882a593Smuzhiyun 			if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
886*4882a593Smuzhiyun 				mapbytes = bytesleft;
887*4882a593Smuzhiyun 			else
888*4882a593Smuzhiyun 				mapbytes = PAGE_SIZE - offset_in_page(bufp);
889*4882a593Smuzhiyun 			sg_set_page(sg, virt_to_page(bufp),
890*4882a593Smuzhiyun 				    mapbytes, offset_in_page(bufp));
891*4882a593Smuzhiyun 			bufp += mapbytes;
892*4882a593Smuzhiyun 			bytesleft -= mapbytes;
893*4882a593Smuzhiyun 			dev_dbg(&pl022->adev->dev,
894*4882a593Smuzhiyun 				"set RX/TX target page @ %p, %d bytes, %d left\n",
895*4882a593Smuzhiyun 				bufp, mapbytes, bytesleft);
896*4882a593Smuzhiyun 		}
897*4882a593Smuzhiyun 	} else {
898*4882a593Smuzhiyun 		/* Map the dummy buffer on every page */
899*4882a593Smuzhiyun 		for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
900*4882a593Smuzhiyun 			if (bytesleft < PAGE_SIZE)
901*4882a593Smuzhiyun 				mapbytes = bytesleft;
902*4882a593Smuzhiyun 			else
903*4882a593Smuzhiyun 				mapbytes = PAGE_SIZE;
904*4882a593Smuzhiyun 			sg_set_page(sg, virt_to_page(pl022->dummypage),
905*4882a593Smuzhiyun 				    mapbytes, 0);
906*4882a593Smuzhiyun 			bytesleft -= mapbytes;
907*4882a593Smuzhiyun 			dev_dbg(&pl022->adev->dev,
908*4882a593Smuzhiyun 				"set RX/TX to dummy page %d bytes, %d left\n",
909*4882a593Smuzhiyun 				mapbytes, bytesleft);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 		}
912*4882a593Smuzhiyun 	}
913*4882a593Smuzhiyun 	BUG_ON(bytesleft);
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun /**
917*4882a593Smuzhiyun  * configure_dma - configures the channels for the next transfer
918*4882a593Smuzhiyun  * @pl022: SSP driver's private data structure
919*4882a593Smuzhiyun  */
configure_dma(struct pl022 * pl022)920*4882a593Smuzhiyun static int configure_dma(struct pl022 *pl022)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	struct dma_slave_config rx_conf = {
923*4882a593Smuzhiyun 		.src_addr = SSP_DR(pl022->phybase),
924*4882a593Smuzhiyun 		.direction = DMA_DEV_TO_MEM,
925*4882a593Smuzhiyun 		.device_fc = false,
926*4882a593Smuzhiyun 	};
927*4882a593Smuzhiyun 	struct dma_slave_config tx_conf = {
928*4882a593Smuzhiyun 		.dst_addr = SSP_DR(pl022->phybase),
929*4882a593Smuzhiyun 		.direction = DMA_MEM_TO_DEV,
930*4882a593Smuzhiyun 		.device_fc = false,
931*4882a593Smuzhiyun 	};
932*4882a593Smuzhiyun 	unsigned int pages;
933*4882a593Smuzhiyun 	int ret;
934*4882a593Smuzhiyun 	int rx_sglen, tx_sglen;
935*4882a593Smuzhiyun 	struct dma_chan *rxchan = pl022->dma_rx_channel;
936*4882a593Smuzhiyun 	struct dma_chan *txchan = pl022->dma_tx_channel;
937*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *rxdesc;
938*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *txdesc;
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	/* Check that the channels are available */
941*4882a593Smuzhiyun 	if (!rxchan || !txchan)
942*4882a593Smuzhiyun 		return -ENODEV;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	/*
945*4882a593Smuzhiyun 	 * If supplied, the DMA burstsize should equal the FIFO trigger level.
946*4882a593Smuzhiyun 	 * Notice that the DMA engine uses one-to-one mapping. Since we can
947*4882a593Smuzhiyun 	 * not trigger on 2 elements this needs explicit mapping rather than
948*4882a593Smuzhiyun 	 * calculation.
949*4882a593Smuzhiyun 	 */
950*4882a593Smuzhiyun 	switch (pl022->rx_lev_trig) {
951*4882a593Smuzhiyun 	case SSP_RX_1_OR_MORE_ELEM:
952*4882a593Smuzhiyun 		rx_conf.src_maxburst = 1;
953*4882a593Smuzhiyun 		break;
954*4882a593Smuzhiyun 	case SSP_RX_4_OR_MORE_ELEM:
955*4882a593Smuzhiyun 		rx_conf.src_maxburst = 4;
956*4882a593Smuzhiyun 		break;
957*4882a593Smuzhiyun 	case SSP_RX_8_OR_MORE_ELEM:
958*4882a593Smuzhiyun 		rx_conf.src_maxburst = 8;
959*4882a593Smuzhiyun 		break;
960*4882a593Smuzhiyun 	case SSP_RX_16_OR_MORE_ELEM:
961*4882a593Smuzhiyun 		rx_conf.src_maxburst = 16;
962*4882a593Smuzhiyun 		break;
963*4882a593Smuzhiyun 	case SSP_RX_32_OR_MORE_ELEM:
964*4882a593Smuzhiyun 		rx_conf.src_maxburst = 32;
965*4882a593Smuzhiyun 		break;
966*4882a593Smuzhiyun 	default:
967*4882a593Smuzhiyun 		rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
968*4882a593Smuzhiyun 		break;
969*4882a593Smuzhiyun 	}
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	switch (pl022->tx_lev_trig) {
972*4882a593Smuzhiyun 	case SSP_TX_1_OR_MORE_EMPTY_LOC:
973*4882a593Smuzhiyun 		tx_conf.dst_maxburst = 1;
974*4882a593Smuzhiyun 		break;
975*4882a593Smuzhiyun 	case SSP_TX_4_OR_MORE_EMPTY_LOC:
976*4882a593Smuzhiyun 		tx_conf.dst_maxburst = 4;
977*4882a593Smuzhiyun 		break;
978*4882a593Smuzhiyun 	case SSP_TX_8_OR_MORE_EMPTY_LOC:
979*4882a593Smuzhiyun 		tx_conf.dst_maxburst = 8;
980*4882a593Smuzhiyun 		break;
981*4882a593Smuzhiyun 	case SSP_TX_16_OR_MORE_EMPTY_LOC:
982*4882a593Smuzhiyun 		tx_conf.dst_maxburst = 16;
983*4882a593Smuzhiyun 		break;
984*4882a593Smuzhiyun 	case SSP_TX_32_OR_MORE_EMPTY_LOC:
985*4882a593Smuzhiyun 		tx_conf.dst_maxburst = 32;
986*4882a593Smuzhiyun 		break;
987*4882a593Smuzhiyun 	default:
988*4882a593Smuzhiyun 		tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
989*4882a593Smuzhiyun 		break;
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	switch (pl022->read) {
993*4882a593Smuzhiyun 	case READING_NULL:
994*4882a593Smuzhiyun 		/* Use the same as for writing */
995*4882a593Smuzhiyun 		rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
996*4882a593Smuzhiyun 		break;
997*4882a593Smuzhiyun 	case READING_U8:
998*4882a593Smuzhiyun 		rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
999*4882a593Smuzhiyun 		break;
1000*4882a593Smuzhiyun 	case READING_U16:
1001*4882a593Smuzhiyun 		rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1002*4882a593Smuzhiyun 		break;
1003*4882a593Smuzhiyun 	case READING_U32:
1004*4882a593Smuzhiyun 		rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1005*4882a593Smuzhiyun 		break;
1006*4882a593Smuzhiyun 	}
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	switch (pl022->write) {
1009*4882a593Smuzhiyun 	case WRITING_NULL:
1010*4882a593Smuzhiyun 		/* Use the same as for reading */
1011*4882a593Smuzhiyun 		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1012*4882a593Smuzhiyun 		break;
1013*4882a593Smuzhiyun 	case WRITING_U8:
1014*4882a593Smuzhiyun 		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1015*4882a593Smuzhiyun 		break;
1016*4882a593Smuzhiyun 	case WRITING_U16:
1017*4882a593Smuzhiyun 		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
1018*4882a593Smuzhiyun 		break;
1019*4882a593Smuzhiyun 	case WRITING_U32:
1020*4882a593Smuzhiyun 		tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1021*4882a593Smuzhiyun 		break;
1022*4882a593Smuzhiyun 	}
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	/* SPI pecularity: we need to read and write the same width */
1025*4882a593Smuzhiyun 	if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1026*4882a593Smuzhiyun 		rx_conf.src_addr_width = tx_conf.dst_addr_width;
1027*4882a593Smuzhiyun 	if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
1028*4882a593Smuzhiyun 		tx_conf.dst_addr_width = rx_conf.src_addr_width;
1029*4882a593Smuzhiyun 	BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	dmaengine_slave_config(rxchan, &rx_conf);
1032*4882a593Smuzhiyun 	dmaengine_slave_config(txchan, &tx_conf);
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	/* Create sglists for the transfers */
1035*4882a593Smuzhiyun 	pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
1036*4882a593Smuzhiyun 	dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
1039*4882a593Smuzhiyun 	if (ret)
1040*4882a593Smuzhiyun 		goto err_alloc_rx_sg;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
1043*4882a593Smuzhiyun 	if (ret)
1044*4882a593Smuzhiyun 		goto err_alloc_tx_sg;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	/* Fill in the scatterlists for the RX+TX buffers */
1047*4882a593Smuzhiyun 	setup_dma_scatter(pl022, pl022->rx,
1048*4882a593Smuzhiyun 			  pl022->cur_transfer->len, &pl022->sgt_rx);
1049*4882a593Smuzhiyun 	setup_dma_scatter(pl022, pl022->tx,
1050*4882a593Smuzhiyun 			  pl022->cur_transfer->len, &pl022->sgt_tx);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	/* Map DMA buffers */
1053*4882a593Smuzhiyun 	rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1054*4882a593Smuzhiyun 			   pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1055*4882a593Smuzhiyun 	if (!rx_sglen)
1056*4882a593Smuzhiyun 		goto err_rx_sgmap;
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun 	tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1059*4882a593Smuzhiyun 			   pl022->sgt_tx.nents, DMA_TO_DEVICE);
1060*4882a593Smuzhiyun 	if (!tx_sglen)
1061*4882a593Smuzhiyun 		goto err_tx_sgmap;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	/* Send both scatterlists */
1064*4882a593Smuzhiyun 	rxdesc = dmaengine_prep_slave_sg(rxchan,
1065*4882a593Smuzhiyun 				      pl022->sgt_rx.sgl,
1066*4882a593Smuzhiyun 				      rx_sglen,
1067*4882a593Smuzhiyun 				      DMA_DEV_TO_MEM,
1068*4882a593Smuzhiyun 				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1069*4882a593Smuzhiyun 	if (!rxdesc)
1070*4882a593Smuzhiyun 		goto err_rxdesc;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	txdesc = dmaengine_prep_slave_sg(txchan,
1073*4882a593Smuzhiyun 				      pl022->sgt_tx.sgl,
1074*4882a593Smuzhiyun 				      tx_sglen,
1075*4882a593Smuzhiyun 				      DMA_MEM_TO_DEV,
1076*4882a593Smuzhiyun 				      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1077*4882a593Smuzhiyun 	if (!txdesc)
1078*4882a593Smuzhiyun 		goto err_txdesc;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	/* Put the callback on the RX transfer only, that should finish last */
1081*4882a593Smuzhiyun 	rxdesc->callback = dma_callback;
1082*4882a593Smuzhiyun 	rxdesc->callback_param = pl022;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	/* Submit and fire RX and TX with TX last so we're ready to read! */
1085*4882a593Smuzhiyun 	dmaengine_submit(rxdesc);
1086*4882a593Smuzhiyun 	dmaengine_submit(txdesc);
1087*4882a593Smuzhiyun 	dma_async_issue_pending(rxchan);
1088*4882a593Smuzhiyun 	dma_async_issue_pending(txchan);
1089*4882a593Smuzhiyun 	pl022->dma_running = true;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	return 0;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun err_txdesc:
1094*4882a593Smuzhiyun 	dmaengine_terminate_all(txchan);
1095*4882a593Smuzhiyun err_rxdesc:
1096*4882a593Smuzhiyun 	dmaengine_terminate_all(rxchan);
1097*4882a593Smuzhiyun 	dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
1098*4882a593Smuzhiyun 		     pl022->sgt_tx.nents, DMA_TO_DEVICE);
1099*4882a593Smuzhiyun err_tx_sgmap:
1100*4882a593Smuzhiyun 	dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1101*4882a593Smuzhiyun 		     pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1102*4882a593Smuzhiyun err_rx_sgmap:
1103*4882a593Smuzhiyun 	sg_free_table(&pl022->sgt_tx);
1104*4882a593Smuzhiyun err_alloc_tx_sg:
1105*4882a593Smuzhiyun 	sg_free_table(&pl022->sgt_rx);
1106*4882a593Smuzhiyun err_alloc_rx_sg:
1107*4882a593Smuzhiyun 	return -ENOMEM;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
pl022_dma_probe(struct pl022 * pl022)1110*4882a593Smuzhiyun static int pl022_dma_probe(struct pl022 *pl022)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	dma_cap_mask_t mask;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	/* Try to acquire a generic DMA engine slave channel */
1115*4882a593Smuzhiyun 	dma_cap_zero(mask);
1116*4882a593Smuzhiyun 	dma_cap_set(DMA_SLAVE, mask);
1117*4882a593Smuzhiyun 	/*
1118*4882a593Smuzhiyun 	 * We need both RX and TX channels to do DMA, else do none
1119*4882a593Smuzhiyun 	 * of them.
1120*4882a593Smuzhiyun 	 */
1121*4882a593Smuzhiyun 	pl022->dma_rx_channel = dma_request_channel(mask,
1122*4882a593Smuzhiyun 					    pl022->master_info->dma_filter,
1123*4882a593Smuzhiyun 					    pl022->master_info->dma_rx_param);
1124*4882a593Smuzhiyun 	if (!pl022->dma_rx_channel) {
1125*4882a593Smuzhiyun 		dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
1126*4882a593Smuzhiyun 		goto err_no_rxchan;
1127*4882a593Smuzhiyun 	}
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 	pl022->dma_tx_channel = dma_request_channel(mask,
1130*4882a593Smuzhiyun 					    pl022->master_info->dma_filter,
1131*4882a593Smuzhiyun 					    pl022->master_info->dma_tx_param);
1132*4882a593Smuzhiyun 	if (!pl022->dma_tx_channel) {
1133*4882a593Smuzhiyun 		dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
1134*4882a593Smuzhiyun 		goto err_no_txchan;
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1138*4882a593Smuzhiyun 	if (!pl022->dummypage)
1139*4882a593Smuzhiyun 		goto err_no_dummypage;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1142*4882a593Smuzhiyun 		 dma_chan_name(pl022->dma_rx_channel),
1143*4882a593Smuzhiyun 		 dma_chan_name(pl022->dma_tx_channel));
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	return 0;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun err_no_dummypage:
1148*4882a593Smuzhiyun 	dma_release_channel(pl022->dma_tx_channel);
1149*4882a593Smuzhiyun err_no_txchan:
1150*4882a593Smuzhiyun 	dma_release_channel(pl022->dma_rx_channel);
1151*4882a593Smuzhiyun 	pl022->dma_rx_channel = NULL;
1152*4882a593Smuzhiyun err_no_rxchan:
1153*4882a593Smuzhiyun 	dev_err(&pl022->adev->dev,
1154*4882a593Smuzhiyun 			"Failed to work in dma mode, work without dma!\n");
1155*4882a593Smuzhiyun 	return -ENODEV;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun 
pl022_dma_autoprobe(struct pl022 * pl022)1158*4882a593Smuzhiyun static int pl022_dma_autoprobe(struct pl022 *pl022)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	struct device *dev = &pl022->adev->dev;
1161*4882a593Smuzhiyun 	struct dma_chan *chan;
1162*4882a593Smuzhiyun 	int err;
1163*4882a593Smuzhiyun 
1164*4882a593Smuzhiyun 	/* automatically configure DMA channels from platform, normally using DT */
1165*4882a593Smuzhiyun 	chan = dma_request_chan(dev, "rx");
1166*4882a593Smuzhiyun 	if (IS_ERR(chan)) {
1167*4882a593Smuzhiyun 		err = PTR_ERR(chan);
1168*4882a593Smuzhiyun 		goto err_no_rxchan;
1169*4882a593Smuzhiyun 	}
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	pl022->dma_rx_channel = chan;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	chan = dma_request_chan(dev, "tx");
1174*4882a593Smuzhiyun 	if (IS_ERR(chan)) {
1175*4882a593Smuzhiyun 		err = PTR_ERR(chan);
1176*4882a593Smuzhiyun 		goto err_no_txchan;
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	pl022->dma_tx_channel = chan;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1182*4882a593Smuzhiyun 	if (!pl022->dummypage) {
1183*4882a593Smuzhiyun 		err = -ENOMEM;
1184*4882a593Smuzhiyun 		goto err_no_dummypage;
1185*4882a593Smuzhiyun 	}
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	return 0;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun err_no_dummypage:
1190*4882a593Smuzhiyun 	dma_release_channel(pl022->dma_tx_channel);
1191*4882a593Smuzhiyun 	pl022->dma_tx_channel = NULL;
1192*4882a593Smuzhiyun err_no_txchan:
1193*4882a593Smuzhiyun 	dma_release_channel(pl022->dma_rx_channel);
1194*4882a593Smuzhiyun 	pl022->dma_rx_channel = NULL;
1195*4882a593Smuzhiyun err_no_rxchan:
1196*4882a593Smuzhiyun 	return err;
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun 
terminate_dma(struct pl022 * pl022)1199*4882a593Smuzhiyun static void terminate_dma(struct pl022 *pl022)
1200*4882a593Smuzhiyun {
1201*4882a593Smuzhiyun 	struct dma_chan *rxchan = pl022->dma_rx_channel;
1202*4882a593Smuzhiyun 	struct dma_chan *txchan = pl022->dma_tx_channel;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	dmaengine_terminate_all(rxchan);
1205*4882a593Smuzhiyun 	dmaengine_terminate_all(txchan);
1206*4882a593Smuzhiyun 	unmap_free_dma_scatter(pl022);
1207*4882a593Smuzhiyun 	pl022->dma_running = false;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun 
pl022_dma_remove(struct pl022 * pl022)1210*4882a593Smuzhiyun static void pl022_dma_remove(struct pl022 *pl022)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun 	if (pl022->dma_running)
1213*4882a593Smuzhiyun 		terminate_dma(pl022);
1214*4882a593Smuzhiyun 	if (pl022->dma_tx_channel)
1215*4882a593Smuzhiyun 		dma_release_channel(pl022->dma_tx_channel);
1216*4882a593Smuzhiyun 	if (pl022->dma_rx_channel)
1217*4882a593Smuzhiyun 		dma_release_channel(pl022->dma_rx_channel);
1218*4882a593Smuzhiyun 	kfree(pl022->dummypage);
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun #else
configure_dma(struct pl022 * pl022)1222*4882a593Smuzhiyun static inline int configure_dma(struct pl022 *pl022)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	return -ENODEV;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun 
pl022_dma_autoprobe(struct pl022 * pl022)1227*4882a593Smuzhiyun static inline int pl022_dma_autoprobe(struct pl022 *pl022)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun 	return 0;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun 
pl022_dma_probe(struct pl022 * pl022)1232*4882a593Smuzhiyun static inline int pl022_dma_probe(struct pl022 *pl022)
1233*4882a593Smuzhiyun {
1234*4882a593Smuzhiyun 	return 0;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun 
pl022_dma_remove(struct pl022 * pl022)1237*4882a593Smuzhiyun static inline void pl022_dma_remove(struct pl022 *pl022)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun #endif
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun /**
1243*4882a593Smuzhiyun  * pl022_interrupt_handler - Interrupt handler for SSP controller
1244*4882a593Smuzhiyun  * @irq: IRQ number
1245*4882a593Smuzhiyun  * @dev_id: Local device data
1246*4882a593Smuzhiyun  *
1247*4882a593Smuzhiyun  * This function handles interrupts generated for an interrupt based transfer.
1248*4882a593Smuzhiyun  * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
1249*4882a593Smuzhiyun  * current message's state as STATE_ERROR and schedule the tasklet
1250*4882a593Smuzhiyun  * pump_transfers which will do the postprocessing of the current message by
1251*4882a593Smuzhiyun  * calling giveback(). Otherwise it reads data from RX FIFO till there is no
1252*4882a593Smuzhiyun  * more data, and writes data in TX FIFO till it is not full. If we complete
1253*4882a593Smuzhiyun  * the transfer we move to the next transfer and schedule the tasklet.
1254*4882a593Smuzhiyun  */
pl022_interrupt_handler(int irq,void * dev_id)1255*4882a593Smuzhiyun static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	struct pl022 *pl022 = dev_id;
1258*4882a593Smuzhiyun 	struct spi_message *msg = pl022->cur_msg;
1259*4882a593Smuzhiyun 	u16 irq_status = 0;
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	if (unlikely(!msg)) {
1262*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1263*4882a593Smuzhiyun 			"bad message state in interrupt handler");
1264*4882a593Smuzhiyun 		/* Never fail */
1265*4882a593Smuzhiyun 		return IRQ_HANDLED;
1266*4882a593Smuzhiyun 	}
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	/* Read the Interrupt Status Register */
1269*4882a593Smuzhiyun 	irq_status = readw(SSP_MIS(pl022->virtbase));
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	if (unlikely(!irq_status))
1272*4882a593Smuzhiyun 		return IRQ_NONE;
1273*4882a593Smuzhiyun 
1274*4882a593Smuzhiyun 	/*
1275*4882a593Smuzhiyun 	 * This handles the FIFO interrupts, the timeout
1276*4882a593Smuzhiyun 	 * interrupts are flatly ignored, they cannot be
1277*4882a593Smuzhiyun 	 * trusted.
1278*4882a593Smuzhiyun 	 */
1279*4882a593Smuzhiyun 	if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
1280*4882a593Smuzhiyun 		/*
1281*4882a593Smuzhiyun 		 * Overrun interrupt - bail out since our Data has been
1282*4882a593Smuzhiyun 		 * corrupted
1283*4882a593Smuzhiyun 		 */
1284*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev, "FIFO overrun\n");
1285*4882a593Smuzhiyun 		if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
1286*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1287*4882a593Smuzhiyun 				"RXFIFO is full\n");
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 		/*
1290*4882a593Smuzhiyun 		 * Disable and clear interrupts, disable SSP,
1291*4882a593Smuzhiyun 		 * mark message with bad status so it can be
1292*4882a593Smuzhiyun 		 * retried.
1293*4882a593Smuzhiyun 		 */
1294*4882a593Smuzhiyun 		writew(DISABLE_ALL_INTERRUPTS,
1295*4882a593Smuzhiyun 		       SSP_IMSC(pl022->virtbase));
1296*4882a593Smuzhiyun 		writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1297*4882a593Smuzhiyun 		writew((readw(SSP_CR1(pl022->virtbase)) &
1298*4882a593Smuzhiyun 			(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1299*4882a593Smuzhiyun 		msg->state = STATE_ERROR;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 		/* Schedule message queue handler */
1302*4882a593Smuzhiyun 		tasklet_schedule(&pl022->pump_transfers);
1303*4882a593Smuzhiyun 		return IRQ_HANDLED;
1304*4882a593Smuzhiyun 	}
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	readwriter(pl022);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	if (pl022->tx == pl022->tx_end) {
1309*4882a593Smuzhiyun 		/* Disable Transmit interrupt, enable receive interrupt */
1310*4882a593Smuzhiyun 		writew((readw(SSP_IMSC(pl022->virtbase)) &
1311*4882a593Smuzhiyun 		       ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
1312*4882a593Smuzhiyun 		       SSP_IMSC(pl022->virtbase));
1313*4882a593Smuzhiyun 	}
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	/*
1316*4882a593Smuzhiyun 	 * Since all transactions must write as much as shall be read,
1317*4882a593Smuzhiyun 	 * we can conclude the entire transaction once RX is complete.
1318*4882a593Smuzhiyun 	 * At this point, all TX will always be finished.
1319*4882a593Smuzhiyun 	 */
1320*4882a593Smuzhiyun 	if (pl022->rx >= pl022->rx_end) {
1321*4882a593Smuzhiyun 		writew(DISABLE_ALL_INTERRUPTS,
1322*4882a593Smuzhiyun 		       SSP_IMSC(pl022->virtbase));
1323*4882a593Smuzhiyun 		writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
1324*4882a593Smuzhiyun 		if (unlikely(pl022->rx > pl022->rx_end)) {
1325*4882a593Smuzhiyun 			dev_warn(&pl022->adev->dev, "read %u surplus "
1326*4882a593Smuzhiyun 				 "bytes (did you request an odd "
1327*4882a593Smuzhiyun 				 "number of bytes on a 16bit bus?)\n",
1328*4882a593Smuzhiyun 				 (u32) (pl022->rx - pl022->rx_end));
1329*4882a593Smuzhiyun 		}
1330*4882a593Smuzhiyun 		/* Update total bytes transferred */
1331*4882a593Smuzhiyun 		msg->actual_length += pl022->cur_transfer->len;
1332*4882a593Smuzhiyun 		/* Move to next transfer */
1333*4882a593Smuzhiyun 		msg->state = next_transfer(pl022);
1334*4882a593Smuzhiyun 		if (msg->state != STATE_DONE && pl022->cur_transfer->cs_change)
1335*4882a593Smuzhiyun 			pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1336*4882a593Smuzhiyun 		tasklet_schedule(&pl022->pump_transfers);
1337*4882a593Smuzhiyun 		return IRQ_HANDLED;
1338*4882a593Smuzhiyun 	}
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 	return IRQ_HANDLED;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun /*
1344*4882a593Smuzhiyun  * This sets up the pointers to memory for the next message to
1345*4882a593Smuzhiyun  * send out on the SPI bus.
1346*4882a593Smuzhiyun  */
set_up_next_transfer(struct pl022 * pl022,struct spi_transfer * transfer)1347*4882a593Smuzhiyun static int set_up_next_transfer(struct pl022 *pl022,
1348*4882a593Smuzhiyun 				struct spi_transfer *transfer)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun 	int residue;
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	/* Sanity check the message for this bus width */
1353*4882a593Smuzhiyun 	residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
1354*4882a593Smuzhiyun 	if (unlikely(residue != 0)) {
1355*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1356*4882a593Smuzhiyun 			"message of %u bytes to transmit but the current "
1357*4882a593Smuzhiyun 			"chip bus has a data width of %u bytes!\n",
1358*4882a593Smuzhiyun 			pl022->cur_transfer->len,
1359*4882a593Smuzhiyun 			pl022->cur_chip->n_bytes);
1360*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev, "skipping this message\n");
1361*4882a593Smuzhiyun 		return -EIO;
1362*4882a593Smuzhiyun 	}
1363*4882a593Smuzhiyun 	pl022->tx = (void *)transfer->tx_buf;
1364*4882a593Smuzhiyun 	pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
1365*4882a593Smuzhiyun 	pl022->rx = (void *)transfer->rx_buf;
1366*4882a593Smuzhiyun 	pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
1367*4882a593Smuzhiyun 	pl022->write =
1368*4882a593Smuzhiyun 	    pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
1369*4882a593Smuzhiyun 	pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
1370*4882a593Smuzhiyun 	return 0;
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun /**
1374*4882a593Smuzhiyun  * pump_transfers - Tasklet function which schedules next transfer
1375*4882a593Smuzhiyun  * when running in interrupt or DMA transfer mode.
1376*4882a593Smuzhiyun  * @data: SSP driver private data structure
1377*4882a593Smuzhiyun  *
1378*4882a593Smuzhiyun  */
pump_transfers(unsigned long data)1379*4882a593Smuzhiyun static void pump_transfers(unsigned long data)
1380*4882a593Smuzhiyun {
1381*4882a593Smuzhiyun 	struct pl022 *pl022 = (struct pl022 *) data;
1382*4882a593Smuzhiyun 	struct spi_message *message = NULL;
1383*4882a593Smuzhiyun 	struct spi_transfer *transfer = NULL;
1384*4882a593Smuzhiyun 	struct spi_transfer *previous = NULL;
1385*4882a593Smuzhiyun 
1386*4882a593Smuzhiyun 	/* Get current state information */
1387*4882a593Smuzhiyun 	message = pl022->cur_msg;
1388*4882a593Smuzhiyun 	transfer = pl022->cur_transfer;
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	/* Handle for abort */
1391*4882a593Smuzhiyun 	if (message->state == STATE_ERROR) {
1392*4882a593Smuzhiyun 		message->status = -EIO;
1393*4882a593Smuzhiyun 		giveback(pl022);
1394*4882a593Smuzhiyun 		return;
1395*4882a593Smuzhiyun 	}
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 	/* Handle end of message */
1398*4882a593Smuzhiyun 	if (message->state == STATE_DONE) {
1399*4882a593Smuzhiyun 		message->status = 0;
1400*4882a593Smuzhiyun 		giveback(pl022);
1401*4882a593Smuzhiyun 		return;
1402*4882a593Smuzhiyun 	}
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	/* Delay if requested at end of transfer before CS change */
1405*4882a593Smuzhiyun 	if (message->state == STATE_RUNNING) {
1406*4882a593Smuzhiyun 		previous = list_entry(transfer->transfer_list.prev,
1407*4882a593Smuzhiyun 					struct spi_transfer,
1408*4882a593Smuzhiyun 					transfer_list);
1409*4882a593Smuzhiyun 		/*
1410*4882a593Smuzhiyun 		 * FIXME: This runs in interrupt context.
1411*4882a593Smuzhiyun 		 * Is this really smart?
1412*4882a593Smuzhiyun 		 */
1413*4882a593Smuzhiyun 		spi_transfer_delay_exec(previous);
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 		/* Reselect chip select only if cs_change was requested */
1416*4882a593Smuzhiyun 		if (previous->cs_change)
1417*4882a593Smuzhiyun 			pl022_cs_control(pl022, SSP_CHIP_SELECT);
1418*4882a593Smuzhiyun 	} else {
1419*4882a593Smuzhiyun 		/* STATE_START */
1420*4882a593Smuzhiyun 		message->state = STATE_RUNNING;
1421*4882a593Smuzhiyun 	}
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	if (set_up_next_transfer(pl022, transfer)) {
1424*4882a593Smuzhiyun 		message->state = STATE_ERROR;
1425*4882a593Smuzhiyun 		message->status = -EIO;
1426*4882a593Smuzhiyun 		giveback(pl022);
1427*4882a593Smuzhiyun 		return;
1428*4882a593Smuzhiyun 	}
1429*4882a593Smuzhiyun 	/* Flush the FIFOs and let's go! */
1430*4882a593Smuzhiyun 	flush(pl022);
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	if (pl022->cur_chip->enable_dma) {
1433*4882a593Smuzhiyun 		if (configure_dma(pl022)) {
1434*4882a593Smuzhiyun 			dev_dbg(&pl022->adev->dev,
1435*4882a593Smuzhiyun 				"configuration of DMA failed, fall back to interrupt mode\n");
1436*4882a593Smuzhiyun 			goto err_config_dma;
1437*4882a593Smuzhiyun 		}
1438*4882a593Smuzhiyun 		return;
1439*4882a593Smuzhiyun 	}
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun err_config_dma:
1442*4882a593Smuzhiyun 	/* enable all interrupts except RX */
1443*4882a593Smuzhiyun 	writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun 
do_interrupt_dma_transfer(struct pl022 * pl022)1446*4882a593Smuzhiyun static void do_interrupt_dma_transfer(struct pl022 *pl022)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun 	/*
1449*4882a593Smuzhiyun 	 * Default is to enable all interrupts except RX -
1450*4882a593Smuzhiyun 	 * this will be enabled once TX is complete
1451*4882a593Smuzhiyun 	 */
1452*4882a593Smuzhiyun 	u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	/* Enable target chip, if not already active */
1455*4882a593Smuzhiyun 	if (!pl022->next_msg_cs_active)
1456*4882a593Smuzhiyun 		pl022_cs_control(pl022, SSP_CHIP_SELECT);
1457*4882a593Smuzhiyun 
1458*4882a593Smuzhiyun 	if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
1459*4882a593Smuzhiyun 		/* Error path */
1460*4882a593Smuzhiyun 		pl022->cur_msg->state = STATE_ERROR;
1461*4882a593Smuzhiyun 		pl022->cur_msg->status = -EIO;
1462*4882a593Smuzhiyun 		giveback(pl022);
1463*4882a593Smuzhiyun 		return;
1464*4882a593Smuzhiyun 	}
1465*4882a593Smuzhiyun 	/* If we're using DMA, set up DMA here */
1466*4882a593Smuzhiyun 	if (pl022->cur_chip->enable_dma) {
1467*4882a593Smuzhiyun 		/* Configure DMA transfer */
1468*4882a593Smuzhiyun 		if (configure_dma(pl022)) {
1469*4882a593Smuzhiyun 			dev_dbg(&pl022->adev->dev,
1470*4882a593Smuzhiyun 				"configuration of DMA failed, fall back to interrupt mode\n");
1471*4882a593Smuzhiyun 			goto err_config_dma;
1472*4882a593Smuzhiyun 		}
1473*4882a593Smuzhiyun 		/* Disable interrupts in DMA mode, IRQ from DMA controller */
1474*4882a593Smuzhiyun 		irqflags = DISABLE_ALL_INTERRUPTS;
1475*4882a593Smuzhiyun 	}
1476*4882a593Smuzhiyun err_config_dma:
1477*4882a593Smuzhiyun 	/* Enable SSP, turn on interrupts */
1478*4882a593Smuzhiyun 	writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1479*4882a593Smuzhiyun 	       SSP_CR1(pl022->virtbase));
1480*4882a593Smuzhiyun 	writew(irqflags, SSP_IMSC(pl022->virtbase));
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun 
print_current_status(struct pl022 * pl022)1483*4882a593Smuzhiyun static void print_current_status(struct pl022 *pl022)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun 	u32 read_cr0;
1486*4882a593Smuzhiyun 	u16 read_cr1, read_dmacr, read_sr;
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	if (pl022->vendor->extended_cr)
1489*4882a593Smuzhiyun 		read_cr0 = readl(SSP_CR0(pl022->virtbase));
1490*4882a593Smuzhiyun 	else
1491*4882a593Smuzhiyun 		read_cr0 = readw(SSP_CR0(pl022->virtbase));
1492*4882a593Smuzhiyun 	read_cr1 = readw(SSP_CR1(pl022->virtbase));
1493*4882a593Smuzhiyun 	read_dmacr = readw(SSP_DMACR(pl022->virtbase));
1494*4882a593Smuzhiyun 	read_sr = readw(SSP_SR(pl022->virtbase));
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
1497*4882a593Smuzhiyun 	dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
1498*4882a593Smuzhiyun 	dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
1499*4882a593Smuzhiyun 	dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
1500*4882a593Smuzhiyun 	dev_warn(&pl022->adev->dev,
1501*4882a593Smuzhiyun 			"spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
1502*4882a593Smuzhiyun 			pl022->exp_fifo_level,
1503*4882a593Smuzhiyun 			pl022->vendor->fifodepth);
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun 
do_polling_transfer(struct pl022 * pl022)1507*4882a593Smuzhiyun static void do_polling_transfer(struct pl022 *pl022)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun 	struct spi_message *message = NULL;
1510*4882a593Smuzhiyun 	struct spi_transfer *transfer = NULL;
1511*4882a593Smuzhiyun 	struct spi_transfer *previous = NULL;
1512*4882a593Smuzhiyun 	unsigned long time, timeout;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	message = pl022->cur_msg;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	while (message->state != STATE_DONE) {
1517*4882a593Smuzhiyun 		/* Handle for abort */
1518*4882a593Smuzhiyun 		if (message->state == STATE_ERROR)
1519*4882a593Smuzhiyun 			break;
1520*4882a593Smuzhiyun 		transfer = pl022->cur_transfer;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 		/* Delay if requested at end of transfer */
1523*4882a593Smuzhiyun 		if (message->state == STATE_RUNNING) {
1524*4882a593Smuzhiyun 			previous =
1525*4882a593Smuzhiyun 			    list_entry(transfer->transfer_list.prev,
1526*4882a593Smuzhiyun 				       struct spi_transfer, transfer_list);
1527*4882a593Smuzhiyun 			spi_transfer_delay_exec(previous);
1528*4882a593Smuzhiyun 			if (previous->cs_change)
1529*4882a593Smuzhiyun 				pl022_cs_control(pl022, SSP_CHIP_SELECT);
1530*4882a593Smuzhiyun 		} else {
1531*4882a593Smuzhiyun 			/* STATE_START */
1532*4882a593Smuzhiyun 			message->state = STATE_RUNNING;
1533*4882a593Smuzhiyun 			if (!pl022->next_msg_cs_active)
1534*4882a593Smuzhiyun 				pl022_cs_control(pl022, SSP_CHIP_SELECT);
1535*4882a593Smuzhiyun 		}
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 		/* Configuration Changing Per Transfer */
1538*4882a593Smuzhiyun 		if (set_up_next_transfer(pl022, transfer)) {
1539*4882a593Smuzhiyun 			/* Error path */
1540*4882a593Smuzhiyun 			message->state = STATE_ERROR;
1541*4882a593Smuzhiyun 			break;
1542*4882a593Smuzhiyun 		}
1543*4882a593Smuzhiyun 		/* Flush FIFOs and enable SSP */
1544*4882a593Smuzhiyun 		flush(pl022);
1545*4882a593Smuzhiyun 		writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
1546*4882a593Smuzhiyun 		       SSP_CR1(pl022->virtbase));
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 		dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun 		timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
1551*4882a593Smuzhiyun 		while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
1552*4882a593Smuzhiyun 			time = jiffies;
1553*4882a593Smuzhiyun 			readwriter(pl022);
1554*4882a593Smuzhiyun 			if (time_after(time, timeout)) {
1555*4882a593Smuzhiyun 				dev_warn(&pl022->adev->dev,
1556*4882a593Smuzhiyun 				"%s: timeout!\n", __func__);
1557*4882a593Smuzhiyun 				message->state = STATE_TIMEOUT;
1558*4882a593Smuzhiyun 				print_current_status(pl022);
1559*4882a593Smuzhiyun 				goto out;
1560*4882a593Smuzhiyun 			}
1561*4882a593Smuzhiyun 			cpu_relax();
1562*4882a593Smuzhiyun 		}
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 		/* Update total byte transferred */
1565*4882a593Smuzhiyun 		message->actual_length += pl022->cur_transfer->len;
1566*4882a593Smuzhiyun 		/* Move to next transfer */
1567*4882a593Smuzhiyun 		message->state = next_transfer(pl022);
1568*4882a593Smuzhiyun 		if (message->state != STATE_DONE
1569*4882a593Smuzhiyun 		    && pl022->cur_transfer->cs_change)
1570*4882a593Smuzhiyun 			pl022_cs_control(pl022, SSP_CHIP_DESELECT);
1571*4882a593Smuzhiyun 	}
1572*4882a593Smuzhiyun out:
1573*4882a593Smuzhiyun 	/* Handle end of message */
1574*4882a593Smuzhiyun 	if (message->state == STATE_DONE)
1575*4882a593Smuzhiyun 		message->status = 0;
1576*4882a593Smuzhiyun 	else if (message->state == STATE_TIMEOUT)
1577*4882a593Smuzhiyun 		message->status = -EAGAIN;
1578*4882a593Smuzhiyun 	else
1579*4882a593Smuzhiyun 		message->status = -EIO;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	giveback(pl022);
1582*4882a593Smuzhiyun 	return;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun 
pl022_transfer_one_message(struct spi_master * master,struct spi_message * msg)1585*4882a593Smuzhiyun static int pl022_transfer_one_message(struct spi_master *master,
1586*4882a593Smuzhiyun 				      struct spi_message *msg)
1587*4882a593Smuzhiyun {
1588*4882a593Smuzhiyun 	struct pl022 *pl022 = spi_master_get_devdata(master);
1589*4882a593Smuzhiyun 
1590*4882a593Smuzhiyun 	/* Initial message state */
1591*4882a593Smuzhiyun 	pl022->cur_msg = msg;
1592*4882a593Smuzhiyun 	msg->state = STATE_START;
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	pl022->cur_transfer = list_entry(msg->transfers.next,
1595*4882a593Smuzhiyun 					 struct spi_transfer, transfer_list);
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	/* Setup the SPI using the per chip configuration */
1598*4882a593Smuzhiyun 	pl022->cur_chip = spi_get_ctldata(msg->spi);
1599*4882a593Smuzhiyun 	pl022->cur_cs = pl022->chipselects[msg->spi->chip_select];
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	restore_state(pl022);
1602*4882a593Smuzhiyun 	flush(pl022);
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
1605*4882a593Smuzhiyun 		do_polling_transfer(pl022);
1606*4882a593Smuzhiyun 	else
1607*4882a593Smuzhiyun 		do_interrupt_dma_transfer(pl022);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	return 0;
1610*4882a593Smuzhiyun }
1611*4882a593Smuzhiyun 
pl022_unprepare_transfer_hardware(struct spi_master * master)1612*4882a593Smuzhiyun static int pl022_unprepare_transfer_hardware(struct spi_master *master)
1613*4882a593Smuzhiyun {
1614*4882a593Smuzhiyun 	struct pl022 *pl022 = spi_master_get_devdata(master);
1615*4882a593Smuzhiyun 
1616*4882a593Smuzhiyun 	/* nothing more to do - disable spi/ssp and power off */
1617*4882a593Smuzhiyun 	writew((readw(SSP_CR1(pl022->virtbase)) &
1618*4882a593Smuzhiyun 		(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	return 0;
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun 
verify_controller_parameters(struct pl022 * pl022,struct pl022_config_chip const * chip_info)1623*4882a593Smuzhiyun static int verify_controller_parameters(struct pl022 *pl022,
1624*4882a593Smuzhiyun 				struct pl022_config_chip const *chip_info)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
1627*4882a593Smuzhiyun 	    || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
1628*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1629*4882a593Smuzhiyun 			"interface is configured incorrectly\n");
1630*4882a593Smuzhiyun 		return -EINVAL;
1631*4882a593Smuzhiyun 	}
1632*4882a593Smuzhiyun 	if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
1633*4882a593Smuzhiyun 	    (!pl022->vendor->unidir)) {
1634*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1635*4882a593Smuzhiyun 			"unidirectional mode not supported in this "
1636*4882a593Smuzhiyun 			"hardware version\n");
1637*4882a593Smuzhiyun 		return -EINVAL;
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun 	if ((chip_info->hierarchy != SSP_MASTER)
1640*4882a593Smuzhiyun 	    && (chip_info->hierarchy != SSP_SLAVE)) {
1641*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1642*4882a593Smuzhiyun 			"hierarchy is configured incorrectly\n");
1643*4882a593Smuzhiyun 		return -EINVAL;
1644*4882a593Smuzhiyun 	}
1645*4882a593Smuzhiyun 	if ((chip_info->com_mode != INTERRUPT_TRANSFER)
1646*4882a593Smuzhiyun 	    && (chip_info->com_mode != DMA_TRANSFER)
1647*4882a593Smuzhiyun 	    && (chip_info->com_mode != POLLING_TRANSFER)) {
1648*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1649*4882a593Smuzhiyun 			"Communication mode is configured incorrectly\n");
1650*4882a593Smuzhiyun 		return -EINVAL;
1651*4882a593Smuzhiyun 	}
1652*4882a593Smuzhiyun 	switch (chip_info->rx_lev_trig) {
1653*4882a593Smuzhiyun 	case SSP_RX_1_OR_MORE_ELEM:
1654*4882a593Smuzhiyun 	case SSP_RX_4_OR_MORE_ELEM:
1655*4882a593Smuzhiyun 	case SSP_RX_8_OR_MORE_ELEM:
1656*4882a593Smuzhiyun 		/* These are always OK, all variants can handle this */
1657*4882a593Smuzhiyun 		break;
1658*4882a593Smuzhiyun 	case SSP_RX_16_OR_MORE_ELEM:
1659*4882a593Smuzhiyun 		if (pl022->vendor->fifodepth < 16) {
1660*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1661*4882a593Smuzhiyun 			"RX FIFO Trigger Level is configured incorrectly\n");
1662*4882a593Smuzhiyun 			return -EINVAL;
1663*4882a593Smuzhiyun 		}
1664*4882a593Smuzhiyun 		break;
1665*4882a593Smuzhiyun 	case SSP_RX_32_OR_MORE_ELEM:
1666*4882a593Smuzhiyun 		if (pl022->vendor->fifodepth < 32) {
1667*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1668*4882a593Smuzhiyun 			"RX FIFO Trigger Level is configured incorrectly\n");
1669*4882a593Smuzhiyun 			return -EINVAL;
1670*4882a593Smuzhiyun 		}
1671*4882a593Smuzhiyun 		break;
1672*4882a593Smuzhiyun 	default:
1673*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1674*4882a593Smuzhiyun 			"RX FIFO Trigger Level is configured incorrectly\n");
1675*4882a593Smuzhiyun 		return -EINVAL;
1676*4882a593Smuzhiyun 	}
1677*4882a593Smuzhiyun 	switch (chip_info->tx_lev_trig) {
1678*4882a593Smuzhiyun 	case SSP_TX_1_OR_MORE_EMPTY_LOC:
1679*4882a593Smuzhiyun 	case SSP_TX_4_OR_MORE_EMPTY_LOC:
1680*4882a593Smuzhiyun 	case SSP_TX_8_OR_MORE_EMPTY_LOC:
1681*4882a593Smuzhiyun 		/* These are always OK, all variants can handle this */
1682*4882a593Smuzhiyun 		break;
1683*4882a593Smuzhiyun 	case SSP_TX_16_OR_MORE_EMPTY_LOC:
1684*4882a593Smuzhiyun 		if (pl022->vendor->fifodepth < 16) {
1685*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1686*4882a593Smuzhiyun 			"TX FIFO Trigger Level is configured incorrectly\n");
1687*4882a593Smuzhiyun 			return -EINVAL;
1688*4882a593Smuzhiyun 		}
1689*4882a593Smuzhiyun 		break;
1690*4882a593Smuzhiyun 	case SSP_TX_32_OR_MORE_EMPTY_LOC:
1691*4882a593Smuzhiyun 		if (pl022->vendor->fifodepth < 32) {
1692*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1693*4882a593Smuzhiyun 			"TX FIFO Trigger Level is configured incorrectly\n");
1694*4882a593Smuzhiyun 			return -EINVAL;
1695*4882a593Smuzhiyun 		}
1696*4882a593Smuzhiyun 		break;
1697*4882a593Smuzhiyun 	default:
1698*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1699*4882a593Smuzhiyun 			"TX FIFO Trigger Level is configured incorrectly\n");
1700*4882a593Smuzhiyun 		return -EINVAL;
1701*4882a593Smuzhiyun 	}
1702*4882a593Smuzhiyun 	if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
1703*4882a593Smuzhiyun 		if ((chip_info->ctrl_len < SSP_BITS_4)
1704*4882a593Smuzhiyun 		    || (chip_info->ctrl_len > SSP_BITS_32)) {
1705*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1706*4882a593Smuzhiyun 				"CTRL LEN is configured incorrectly\n");
1707*4882a593Smuzhiyun 			return -EINVAL;
1708*4882a593Smuzhiyun 		}
1709*4882a593Smuzhiyun 		if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
1710*4882a593Smuzhiyun 		    && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
1711*4882a593Smuzhiyun 			dev_err(&pl022->adev->dev,
1712*4882a593Smuzhiyun 				"Wait State is configured incorrectly\n");
1713*4882a593Smuzhiyun 			return -EINVAL;
1714*4882a593Smuzhiyun 		}
1715*4882a593Smuzhiyun 		/* Half duplex is only available in the ST Micro version */
1716*4882a593Smuzhiyun 		if (pl022->vendor->extended_cr) {
1717*4882a593Smuzhiyun 			if ((chip_info->duplex !=
1718*4882a593Smuzhiyun 			     SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
1719*4882a593Smuzhiyun 			    && (chip_info->duplex !=
1720*4882a593Smuzhiyun 				SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
1721*4882a593Smuzhiyun 				dev_err(&pl022->adev->dev,
1722*4882a593Smuzhiyun 					"Microwire duplex mode is configured incorrectly\n");
1723*4882a593Smuzhiyun 				return -EINVAL;
1724*4882a593Smuzhiyun 			}
1725*4882a593Smuzhiyun 		} else {
1726*4882a593Smuzhiyun 			if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
1727*4882a593Smuzhiyun 				dev_err(&pl022->adev->dev,
1728*4882a593Smuzhiyun 					"Microwire half duplex mode requested,"
1729*4882a593Smuzhiyun 					" but this is only available in the"
1730*4882a593Smuzhiyun 					" ST version of PL022\n");
1731*4882a593Smuzhiyun 				return -EINVAL;
1732*4882a593Smuzhiyun 			}
1733*4882a593Smuzhiyun 		}
1734*4882a593Smuzhiyun 	}
1735*4882a593Smuzhiyun 	return 0;
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun 
spi_rate(u32 rate,u16 cpsdvsr,u16 scr)1738*4882a593Smuzhiyun static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
1739*4882a593Smuzhiyun {
1740*4882a593Smuzhiyun 	return rate / (cpsdvsr * (1 + scr));
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun 
calculate_effective_freq(struct pl022 * pl022,int freq,struct ssp_clock_params * clk_freq)1743*4882a593Smuzhiyun static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
1744*4882a593Smuzhiyun 				    ssp_clock_params * clk_freq)
1745*4882a593Smuzhiyun {
1746*4882a593Smuzhiyun 	/* Lets calculate the frequency parameters */
1747*4882a593Smuzhiyun 	u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
1748*4882a593Smuzhiyun 	u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
1749*4882a593Smuzhiyun 		best_scr = 0, tmp, found = 0;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	rate = clk_get_rate(pl022->clk);
1752*4882a593Smuzhiyun 	/* cpsdvscr = 2 & scr 0 */
1753*4882a593Smuzhiyun 	max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
1754*4882a593Smuzhiyun 	/* cpsdvsr = 254 & scr = 255 */
1755*4882a593Smuzhiyun 	min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	if (freq > max_tclk)
1758*4882a593Smuzhiyun 		dev_warn(&pl022->adev->dev,
1759*4882a593Smuzhiyun 			"Max speed that can be programmed is %d Hz, you requested %d\n",
1760*4882a593Smuzhiyun 			max_tclk, freq);
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	if (freq < min_tclk) {
1763*4882a593Smuzhiyun 		dev_err(&pl022->adev->dev,
1764*4882a593Smuzhiyun 			"Requested frequency: %d Hz is less than minimum possible %d Hz\n",
1765*4882a593Smuzhiyun 			freq, min_tclk);
1766*4882a593Smuzhiyun 		return -EINVAL;
1767*4882a593Smuzhiyun 	}
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	/*
1770*4882a593Smuzhiyun 	 * best_freq will give closest possible available rate (<= requested
1771*4882a593Smuzhiyun 	 * freq) for all values of scr & cpsdvsr.
1772*4882a593Smuzhiyun 	 */
1773*4882a593Smuzhiyun 	while ((cpsdvsr <= CPSDVR_MAX) && !found) {
1774*4882a593Smuzhiyun 		while (scr <= SCR_MAX) {
1775*4882a593Smuzhiyun 			tmp = spi_rate(rate, cpsdvsr, scr);
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 			if (tmp > freq) {
1778*4882a593Smuzhiyun 				/* we need lower freq */
1779*4882a593Smuzhiyun 				scr++;
1780*4882a593Smuzhiyun 				continue;
1781*4882a593Smuzhiyun 			}
1782*4882a593Smuzhiyun 
1783*4882a593Smuzhiyun 			/*
1784*4882a593Smuzhiyun 			 * If found exact value, mark found and break.
1785*4882a593Smuzhiyun 			 * If found more closer value, update and break.
1786*4882a593Smuzhiyun 			 */
1787*4882a593Smuzhiyun 			if (tmp > best_freq) {
1788*4882a593Smuzhiyun 				best_freq = tmp;
1789*4882a593Smuzhiyun 				best_cpsdvsr = cpsdvsr;
1790*4882a593Smuzhiyun 				best_scr = scr;
1791*4882a593Smuzhiyun 
1792*4882a593Smuzhiyun 				if (tmp == freq)
1793*4882a593Smuzhiyun 					found = 1;
1794*4882a593Smuzhiyun 			}
1795*4882a593Smuzhiyun 			/*
1796*4882a593Smuzhiyun 			 * increased scr will give lower rates, which are not
1797*4882a593Smuzhiyun 			 * required
1798*4882a593Smuzhiyun 			 */
1799*4882a593Smuzhiyun 			break;
1800*4882a593Smuzhiyun 		}
1801*4882a593Smuzhiyun 		cpsdvsr += 2;
1802*4882a593Smuzhiyun 		scr = SCR_MIN;
1803*4882a593Smuzhiyun 	}
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
1806*4882a593Smuzhiyun 			freq);
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
1809*4882a593Smuzhiyun 	clk_freq->scr = (u8) (best_scr & 0xFF);
1810*4882a593Smuzhiyun 	dev_dbg(&pl022->adev->dev,
1811*4882a593Smuzhiyun 		"SSP Target Frequency is: %u, Effective Frequency is %u\n",
1812*4882a593Smuzhiyun 		freq, best_freq);
1813*4882a593Smuzhiyun 	dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
1814*4882a593Smuzhiyun 		clk_freq->cpsdvsr, clk_freq->scr);
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	return 0;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun /*
1820*4882a593Smuzhiyun  * A piece of default chip info unless the platform
1821*4882a593Smuzhiyun  * supplies it.
1822*4882a593Smuzhiyun  */
1823*4882a593Smuzhiyun static const struct pl022_config_chip pl022_default_chip_info = {
1824*4882a593Smuzhiyun 	.com_mode = POLLING_TRANSFER,
1825*4882a593Smuzhiyun 	.iface = SSP_INTERFACE_MOTOROLA_SPI,
1826*4882a593Smuzhiyun 	.hierarchy = SSP_SLAVE,
1827*4882a593Smuzhiyun 	.slave_tx_disable = DO_NOT_DRIVE_TX,
1828*4882a593Smuzhiyun 	.rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
1829*4882a593Smuzhiyun 	.tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
1830*4882a593Smuzhiyun 	.ctrl_len = SSP_BITS_8,
1831*4882a593Smuzhiyun 	.wait_state = SSP_MWIRE_WAIT_ZERO,
1832*4882a593Smuzhiyun 	.duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
1833*4882a593Smuzhiyun 	.cs_control = null_cs_control,
1834*4882a593Smuzhiyun };
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun /**
1837*4882a593Smuzhiyun  * pl022_setup - setup function registered to SPI master framework
1838*4882a593Smuzhiyun  * @spi: spi device which is requesting setup
1839*4882a593Smuzhiyun  *
1840*4882a593Smuzhiyun  * This function is registered to the SPI framework for this SPI master
1841*4882a593Smuzhiyun  * controller. If it is the first time when setup is called by this device,
1842*4882a593Smuzhiyun  * this function will initialize the runtime state for this chip and save
1843*4882a593Smuzhiyun  * the same in the device structure. Else it will update the runtime info
1844*4882a593Smuzhiyun  * with the updated chip info. Nothing is really being written to the
1845*4882a593Smuzhiyun  * controller hardware here, that is not done until the actual transfer
1846*4882a593Smuzhiyun  * commence.
1847*4882a593Smuzhiyun  */
pl022_setup(struct spi_device * spi)1848*4882a593Smuzhiyun static int pl022_setup(struct spi_device *spi)
1849*4882a593Smuzhiyun {
1850*4882a593Smuzhiyun 	struct pl022_config_chip const *chip_info;
1851*4882a593Smuzhiyun 	struct pl022_config_chip chip_info_dt;
1852*4882a593Smuzhiyun 	struct chip_data *chip;
1853*4882a593Smuzhiyun 	struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
1854*4882a593Smuzhiyun 	int status = 0;
1855*4882a593Smuzhiyun 	struct pl022 *pl022 = spi_master_get_devdata(spi->master);
1856*4882a593Smuzhiyun 	unsigned int bits = spi->bits_per_word;
1857*4882a593Smuzhiyun 	u32 tmp;
1858*4882a593Smuzhiyun 	struct device_node *np = spi->dev.of_node;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	if (!spi->max_speed_hz)
1861*4882a593Smuzhiyun 		return -EINVAL;
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	/* Get controller_state if one is supplied */
1864*4882a593Smuzhiyun 	chip = spi_get_ctldata(spi);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	if (chip == NULL) {
1867*4882a593Smuzhiyun 		chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1868*4882a593Smuzhiyun 		if (!chip)
1869*4882a593Smuzhiyun 			return -ENOMEM;
1870*4882a593Smuzhiyun 		dev_dbg(&spi->dev,
1871*4882a593Smuzhiyun 			"allocated memory for controller's runtime state\n");
1872*4882a593Smuzhiyun 	}
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	/* Get controller data if one is supplied */
1875*4882a593Smuzhiyun 	chip_info = spi->controller_data;
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	if (chip_info == NULL) {
1878*4882a593Smuzhiyun 		if (np) {
1879*4882a593Smuzhiyun 			chip_info_dt = pl022_default_chip_info;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 			chip_info_dt.hierarchy = SSP_MASTER;
1882*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,interface",
1883*4882a593Smuzhiyun 				&chip_info_dt.iface);
1884*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,com-mode",
1885*4882a593Smuzhiyun 				&chip_info_dt.com_mode);
1886*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,rx-level-trig",
1887*4882a593Smuzhiyun 				&chip_info_dt.rx_lev_trig);
1888*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,tx-level-trig",
1889*4882a593Smuzhiyun 				&chip_info_dt.tx_lev_trig);
1890*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,ctrl-len",
1891*4882a593Smuzhiyun 				&chip_info_dt.ctrl_len);
1892*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,wait-state",
1893*4882a593Smuzhiyun 				&chip_info_dt.wait_state);
1894*4882a593Smuzhiyun 			of_property_read_u32(np, "pl022,duplex",
1895*4882a593Smuzhiyun 				&chip_info_dt.duplex);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 			chip_info = &chip_info_dt;
1898*4882a593Smuzhiyun 		} else {
1899*4882a593Smuzhiyun 			chip_info = &pl022_default_chip_info;
1900*4882a593Smuzhiyun 			/* spi_board_info.controller_data not is supplied */
1901*4882a593Smuzhiyun 			dev_dbg(&spi->dev,
1902*4882a593Smuzhiyun 				"using default controller_data settings\n");
1903*4882a593Smuzhiyun 		}
1904*4882a593Smuzhiyun 	} else
1905*4882a593Smuzhiyun 		dev_dbg(&spi->dev,
1906*4882a593Smuzhiyun 			"using user supplied controller_data settings\n");
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	/*
1909*4882a593Smuzhiyun 	 * We can override with custom divisors, else we use the board
1910*4882a593Smuzhiyun 	 * frequency setting
1911*4882a593Smuzhiyun 	 */
1912*4882a593Smuzhiyun 	if ((0 == chip_info->clk_freq.cpsdvsr)
1913*4882a593Smuzhiyun 	    && (0 == chip_info->clk_freq.scr)) {
1914*4882a593Smuzhiyun 		status = calculate_effective_freq(pl022,
1915*4882a593Smuzhiyun 						  spi->max_speed_hz,
1916*4882a593Smuzhiyun 						  &clk_freq);
1917*4882a593Smuzhiyun 		if (status < 0)
1918*4882a593Smuzhiyun 			goto err_config_params;
1919*4882a593Smuzhiyun 	} else {
1920*4882a593Smuzhiyun 		memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
1921*4882a593Smuzhiyun 		if ((clk_freq.cpsdvsr % 2) != 0)
1922*4882a593Smuzhiyun 			clk_freq.cpsdvsr =
1923*4882a593Smuzhiyun 				clk_freq.cpsdvsr - 1;
1924*4882a593Smuzhiyun 	}
1925*4882a593Smuzhiyun 	if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1926*4882a593Smuzhiyun 	    || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1927*4882a593Smuzhiyun 		status = -EINVAL;
1928*4882a593Smuzhiyun 		dev_err(&spi->dev,
1929*4882a593Smuzhiyun 			"cpsdvsr is configured incorrectly\n");
1930*4882a593Smuzhiyun 		goto err_config_params;
1931*4882a593Smuzhiyun 	}
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 	status = verify_controller_parameters(pl022, chip_info);
1934*4882a593Smuzhiyun 	if (status) {
1935*4882a593Smuzhiyun 		dev_err(&spi->dev, "controller data is incorrect");
1936*4882a593Smuzhiyun 		goto err_config_params;
1937*4882a593Smuzhiyun 	}
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	pl022->rx_lev_trig = chip_info->rx_lev_trig;
1940*4882a593Smuzhiyun 	pl022->tx_lev_trig = chip_info->tx_lev_trig;
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	/* Now set controller state based on controller data */
1943*4882a593Smuzhiyun 	chip->xfer_type = chip_info->com_mode;
1944*4882a593Smuzhiyun 	if (!chip_info->cs_control) {
1945*4882a593Smuzhiyun 		chip->cs_control = null_cs_control;
1946*4882a593Smuzhiyun 		if (!gpio_is_valid(pl022->chipselects[spi->chip_select]))
1947*4882a593Smuzhiyun 			dev_warn(&spi->dev,
1948*4882a593Smuzhiyun 				 "invalid chip select\n");
1949*4882a593Smuzhiyun 	} else
1950*4882a593Smuzhiyun 		chip->cs_control = chip_info->cs_control;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	/* Check bits per word with vendor specific range */
1953*4882a593Smuzhiyun 	if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
1954*4882a593Smuzhiyun 		status = -ENOTSUPP;
1955*4882a593Smuzhiyun 		dev_err(&spi->dev, "illegal data size for this controller!\n");
1956*4882a593Smuzhiyun 		dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
1957*4882a593Smuzhiyun 				pl022->vendor->max_bpw);
1958*4882a593Smuzhiyun 		goto err_config_params;
1959*4882a593Smuzhiyun 	} else if (bits <= 8) {
1960*4882a593Smuzhiyun 		dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
1961*4882a593Smuzhiyun 		chip->n_bytes = 1;
1962*4882a593Smuzhiyun 		chip->read = READING_U8;
1963*4882a593Smuzhiyun 		chip->write = WRITING_U8;
1964*4882a593Smuzhiyun 	} else if (bits <= 16) {
1965*4882a593Smuzhiyun 		dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
1966*4882a593Smuzhiyun 		chip->n_bytes = 2;
1967*4882a593Smuzhiyun 		chip->read = READING_U16;
1968*4882a593Smuzhiyun 		chip->write = WRITING_U16;
1969*4882a593Smuzhiyun 	} else {
1970*4882a593Smuzhiyun 		dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
1971*4882a593Smuzhiyun 		chip->n_bytes = 4;
1972*4882a593Smuzhiyun 		chip->read = READING_U32;
1973*4882a593Smuzhiyun 		chip->write = WRITING_U32;
1974*4882a593Smuzhiyun 	}
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	/* Now Initialize all register settings required for this chip */
1977*4882a593Smuzhiyun 	chip->cr0 = 0;
1978*4882a593Smuzhiyun 	chip->cr1 = 0;
1979*4882a593Smuzhiyun 	chip->dmacr = 0;
1980*4882a593Smuzhiyun 	chip->cpsr = 0;
1981*4882a593Smuzhiyun 	if ((chip_info->com_mode == DMA_TRANSFER)
1982*4882a593Smuzhiyun 	    && ((pl022->master_info)->enable_dma)) {
1983*4882a593Smuzhiyun 		chip->enable_dma = true;
1984*4882a593Smuzhiyun 		dev_dbg(&spi->dev, "DMA mode set in controller state\n");
1985*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1986*4882a593Smuzhiyun 			       SSP_DMACR_MASK_RXDMAE, 0);
1987*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
1988*4882a593Smuzhiyun 			       SSP_DMACR_MASK_TXDMAE, 1);
1989*4882a593Smuzhiyun 	} else {
1990*4882a593Smuzhiyun 		chip->enable_dma = false;
1991*4882a593Smuzhiyun 		dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
1992*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1993*4882a593Smuzhiyun 			       SSP_DMACR_MASK_RXDMAE, 0);
1994*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
1995*4882a593Smuzhiyun 			       SSP_DMACR_MASK_TXDMAE, 1);
1996*4882a593Smuzhiyun 	}
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 	chip->cpsr = clk_freq.cpsdvsr;
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	/* Special setup for the ST micro extended control registers */
2001*4882a593Smuzhiyun 	if (pl022->vendor->extended_cr) {
2002*4882a593Smuzhiyun 		u32 etx;
2003*4882a593Smuzhiyun 
2004*4882a593Smuzhiyun 		if (pl022->vendor->pl023) {
2005*4882a593Smuzhiyun 			/* These bits are only in the PL023 */
2006*4882a593Smuzhiyun 			SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
2007*4882a593Smuzhiyun 				       SSP_CR1_MASK_FBCLKDEL_ST, 13);
2008*4882a593Smuzhiyun 		} else {
2009*4882a593Smuzhiyun 			/* These bits are in the PL022 but not PL023 */
2010*4882a593Smuzhiyun 			SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
2011*4882a593Smuzhiyun 				       SSP_CR0_MASK_HALFDUP_ST, 5);
2012*4882a593Smuzhiyun 			SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
2013*4882a593Smuzhiyun 				       SSP_CR0_MASK_CSS_ST, 16);
2014*4882a593Smuzhiyun 			SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2015*4882a593Smuzhiyun 				       SSP_CR0_MASK_FRF_ST, 21);
2016*4882a593Smuzhiyun 			SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
2017*4882a593Smuzhiyun 				       SSP_CR1_MASK_MWAIT_ST, 6);
2018*4882a593Smuzhiyun 		}
2019*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr0, bits - 1,
2020*4882a593Smuzhiyun 			       SSP_CR0_MASK_DSS_ST, 0);
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 		if (spi->mode & SPI_LSB_FIRST) {
2023*4882a593Smuzhiyun 			tmp = SSP_RX_LSB;
2024*4882a593Smuzhiyun 			etx = SSP_TX_LSB;
2025*4882a593Smuzhiyun 		} else {
2026*4882a593Smuzhiyun 			tmp = SSP_RX_MSB;
2027*4882a593Smuzhiyun 			etx = SSP_TX_MSB;
2028*4882a593Smuzhiyun 		}
2029*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
2030*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
2031*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
2032*4882a593Smuzhiyun 			       SSP_CR1_MASK_RXIFLSEL_ST, 7);
2033*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
2034*4882a593Smuzhiyun 			       SSP_CR1_MASK_TXIFLSEL_ST, 10);
2035*4882a593Smuzhiyun 	} else {
2036*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr0, bits - 1,
2037*4882a593Smuzhiyun 			       SSP_CR0_MASK_DSS, 0);
2038*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr0, chip_info->iface,
2039*4882a593Smuzhiyun 			       SSP_CR0_MASK_FRF, 4);
2040*4882a593Smuzhiyun 	}
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 	/* Stuff that is common for all versions */
2043*4882a593Smuzhiyun 	if (spi->mode & SPI_CPOL)
2044*4882a593Smuzhiyun 		tmp = SSP_CLK_POL_IDLE_HIGH;
2045*4882a593Smuzhiyun 	else
2046*4882a593Smuzhiyun 		tmp = SSP_CLK_POL_IDLE_LOW;
2047*4882a593Smuzhiyun 	SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
2048*4882a593Smuzhiyun 
2049*4882a593Smuzhiyun 	if (spi->mode & SPI_CPHA)
2050*4882a593Smuzhiyun 		tmp = SSP_CLK_SECOND_EDGE;
2051*4882a593Smuzhiyun 	else
2052*4882a593Smuzhiyun 		tmp = SSP_CLK_FIRST_EDGE;
2053*4882a593Smuzhiyun 	SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
2056*4882a593Smuzhiyun 	/* Loopback is available on all versions except PL023 */
2057*4882a593Smuzhiyun 	if (pl022->vendor->loopback) {
2058*4882a593Smuzhiyun 		if (spi->mode & SPI_LOOP)
2059*4882a593Smuzhiyun 			tmp = LOOPBACK_ENABLED;
2060*4882a593Smuzhiyun 		else
2061*4882a593Smuzhiyun 			tmp = LOOPBACK_DISABLED;
2062*4882a593Smuzhiyun 		SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
2063*4882a593Smuzhiyun 	}
2064*4882a593Smuzhiyun 	SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
2065*4882a593Smuzhiyun 	SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
2066*4882a593Smuzhiyun 	SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
2067*4882a593Smuzhiyun 		3);
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	/* Save controller_state */
2070*4882a593Smuzhiyun 	spi_set_ctldata(spi, chip);
2071*4882a593Smuzhiyun 	return status;
2072*4882a593Smuzhiyun  err_config_params:
2073*4882a593Smuzhiyun 	spi_set_ctldata(spi, NULL);
2074*4882a593Smuzhiyun 	kfree(chip);
2075*4882a593Smuzhiyun 	return status;
2076*4882a593Smuzhiyun }
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun /**
2079*4882a593Smuzhiyun  * pl022_cleanup - cleanup function registered to SPI master framework
2080*4882a593Smuzhiyun  * @spi: spi device which is requesting cleanup
2081*4882a593Smuzhiyun  *
2082*4882a593Smuzhiyun  * This function is registered to the SPI framework for this SPI master
2083*4882a593Smuzhiyun  * controller. It will free the runtime state of chip.
2084*4882a593Smuzhiyun  */
pl022_cleanup(struct spi_device * spi)2085*4882a593Smuzhiyun static void pl022_cleanup(struct spi_device *spi)
2086*4882a593Smuzhiyun {
2087*4882a593Smuzhiyun 	struct chip_data *chip = spi_get_ctldata(spi);
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	spi_set_ctldata(spi, NULL);
2090*4882a593Smuzhiyun 	kfree(chip);
2091*4882a593Smuzhiyun }
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun static struct pl022_ssp_controller *
pl022_platform_data_dt_get(struct device * dev)2094*4882a593Smuzhiyun pl022_platform_data_dt_get(struct device *dev)
2095*4882a593Smuzhiyun {
2096*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
2097*4882a593Smuzhiyun 	struct pl022_ssp_controller *pd;
2098*4882a593Smuzhiyun 	u32 tmp = 0;
2099*4882a593Smuzhiyun 
2100*4882a593Smuzhiyun 	if (!np) {
2101*4882a593Smuzhiyun 		dev_err(dev, "no dt node defined\n");
2102*4882a593Smuzhiyun 		return NULL;
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
2106*4882a593Smuzhiyun 	if (!pd)
2107*4882a593Smuzhiyun 		return NULL;
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	pd->bus_id = -1;
2110*4882a593Smuzhiyun 	pd->enable_dma = 1;
2111*4882a593Smuzhiyun 	of_property_read_u32(np, "num-cs", &tmp);
2112*4882a593Smuzhiyun 	pd->num_chipselect = tmp;
2113*4882a593Smuzhiyun 	of_property_read_u32(np, "pl022,autosuspend-delay",
2114*4882a593Smuzhiyun 			     &pd->autosuspend_delay);
2115*4882a593Smuzhiyun 	pd->rt = of_property_read_bool(np, "pl022,rt");
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun 	return pd;
2118*4882a593Smuzhiyun }
2119*4882a593Smuzhiyun 
pl022_probe(struct amba_device * adev,const struct amba_id * id)2120*4882a593Smuzhiyun static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
2121*4882a593Smuzhiyun {
2122*4882a593Smuzhiyun 	struct device *dev = &adev->dev;
2123*4882a593Smuzhiyun 	struct pl022_ssp_controller *platform_info =
2124*4882a593Smuzhiyun 			dev_get_platdata(&adev->dev);
2125*4882a593Smuzhiyun 	struct spi_master *master;
2126*4882a593Smuzhiyun 	struct pl022 *pl022 = NULL;	/*Data for this driver */
2127*4882a593Smuzhiyun 	struct device_node *np = adev->dev.of_node;
2128*4882a593Smuzhiyun 	int status = 0, i, num_cs;
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	dev_info(&adev->dev,
2131*4882a593Smuzhiyun 		 "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
2132*4882a593Smuzhiyun 	if (!platform_info && IS_ENABLED(CONFIG_OF))
2133*4882a593Smuzhiyun 		platform_info = pl022_platform_data_dt_get(dev);
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	if (!platform_info) {
2136*4882a593Smuzhiyun 		dev_err(dev, "probe: no platform data defined\n");
2137*4882a593Smuzhiyun 		return -ENODEV;
2138*4882a593Smuzhiyun 	}
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	if (platform_info->num_chipselect) {
2141*4882a593Smuzhiyun 		num_cs = platform_info->num_chipselect;
2142*4882a593Smuzhiyun 	} else {
2143*4882a593Smuzhiyun 		dev_err(dev, "probe: no chip select defined\n");
2144*4882a593Smuzhiyun 		return -ENODEV;
2145*4882a593Smuzhiyun 	}
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun 	/* Allocate master with space for data */
2148*4882a593Smuzhiyun 	master = spi_alloc_master(dev, sizeof(struct pl022));
2149*4882a593Smuzhiyun 	if (master == NULL) {
2150*4882a593Smuzhiyun 		dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
2151*4882a593Smuzhiyun 		return -ENOMEM;
2152*4882a593Smuzhiyun 	}
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun 	pl022 = spi_master_get_devdata(master);
2155*4882a593Smuzhiyun 	pl022->master = master;
2156*4882a593Smuzhiyun 	pl022->master_info = platform_info;
2157*4882a593Smuzhiyun 	pl022->adev = adev;
2158*4882a593Smuzhiyun 	pl022->vendor = id->data;
2159*4882a593Smuzhiyun 	pl022->chipselects = devm_kcalloc(dev, num_cs, sizeof(int),
2160*4882a593Smuzhiyun 					  GFP_KERNEL);
2161*4882a593Smuzhiyun 	if (!pl022->chipselects) {
2162*4882a593Smuzhiyun 		status = -ENOMEM;
2163*4882a593Smuzhiyun 		goto err_no_mem;
2164*4882a593Smuzhiyun 	}
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	/*
2167*4882a593Smuzhiyun 	 * Bus Number Which has been Assigned to this SSP controller
2168*4882a593Smuzhiyun 	 * on this board
2169*4882a593Smuzhiyun 	 */
2170*4882a593Smuzhiyun 	master->bus_num = platform_info->bus_id;
2171*4882a593Smuzhiyun 	master->num_chipselect = num_cs;
2172*4882a593Smuzhiyun 	master->cleanup = pl022_cleanup;
2173*4882a593Smuzhiyun 	master->setup = pl022_setup;
2174*4882a593Smuzhiyun 	master->auto_runtime_pm = true;
2175*4882a593Smuzhiyun 	master->transfer_one_message = pl022_transfer_one_message;
2176*4882a593Smuzhiyun 	master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
2177*4882a593Smuzhiyun 	master->rt = platform_info->rt;
2178*4882a593Smuzhiyun 	master->dev.of_node = dev->of_node;
2179*4882a593Smuzhiyun 
2180*4882a593Smuzhiyun 	if (platform_info->num_chipselect && platform_info->chipselects) {
2181*4882a593Smuzhiyun 		for (i = 0; i < num_cs; i++)
2182*4882a593Smuzhiyun 			pl022->chipselects[i] = platform_info->chipselects[i];
2183*4882a593Smuzhiyun 	} else if (pl022->vendor->internal_cs_ctrl) {
2184*4882a593Smuzhiyun 		for (i = 0; i < num_cs; i++)
2185*4882a593Smuzhiyun 			pl022->chipselects[i] = i;
2186*4882a593Smuzhiyun 	} else if (IS_ENABLED(CONFIG_OF)) {
2187*4882a593Smuzhiyun 		for (i = 0; i < num_cs; i++) {
2188*4882a593Smuzhiyun 			int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 			if (cs_gpio == -EPROBE_DEFER) {
2191*4882a593Smuzhiyun 				status = -EPROBE_DEFER;
2192*4882a593Smuzhiyun 				goto err_no_gpio;
2193*4882a593Smuzhiyun 			}
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 			pl022->chipselects[i] = cs_gpio;
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 			if (gpio_is_valid(cs_gpio)) {
2198*4882a593Smuzhiyun 				if (devm_gpio_request(dev, cs_gpio, "ssp-pl022"))
2199*4882a593Smuzhiyun 					dev_err(&adev->dev,
2200*4882a593Smuzhiyun 						"could not request %d gpio\n",
2201*4882a593Smuzhiyun 						cs_gpio);
2202*4882a593Smuzhiyun 				else if (gpio_direction_output(cs_gpio, 1))
2203*4882a593Smuzhiyun 					dev_err(&adev->dev,
2204*4882a593Smuzhiyun 						"could not set gpio %d as output\n",
2205*4882a593Smuzhiyun 						cs_gpio);
2206*4882a593Smuzhiyun 			}
2207*4882a593Smuzhiyun 		}
2208*4882a593Smuzhiyun 	}
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	/*
2211*4882a593Smuzhiyun 	 * Supports mode 0-3, loopback, and active low CS. Transfers are
2212*4882a593Smuzhiyun 	 * always MS bit first on the original pl022.
2213*4882a593Smuzhiyun 	 */
2214*4882a593Smuzhiyun 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
2215*4882a593Smuzhiyun 	if (pl022->vendor->extended_cr)
2216*4882a593Smuzhiyun 		master->mode_bits |= SPI_LSB_FIRST;
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	status = amba_request_regions(adev, NULL);
2221*4882a593Smuzhiyun 	if (status)
2222*4882a593Smuzhiyun 		goto err_no_ioregion;
2223*4882a593Smuzhiyun 
2224*4882a593Smuzhiyun 	pl022->phybase = adev->res.start;
2225*4882a593Smuzhiyun 	pl022->virtbase = devm_ioremap(dev, adev->res.start,
2226*4882a593Smuzhiyun 				       resource_size(&adev->res));
2227*4882a593Smuzhiyun 	if (pl022->virtbase == NULL) {
2228*4882a593Smuzhiyun 		status = -ENOMEM;
2229*4882a593Smuzhiyun 		goto err_no_ioremap;
2230*4882a593Smuzhiyun 	}
2231*4882a593Smuzhiyun 	dev_info(&adev->dev, "mapped registers from %pa to %p\n",
2232*4882a593Smuzhiyun 		&adev->res.start, pl022->virtbase);
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 	pl022->clk = devm_clk_get(&adev->dev, NULL);
2235*4882a593Smuzhiyun 	if (IS_ERR(pl022->clk)) {
2236*4882a593Smuzhiyun 		status = PTR_ERR(pl022->clk);
2237*4882a593Smuzhiyun 		dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
2238*4882a593Smuzhiyun 		goto err_no_clk;
2239*4882a593Smuzhiyun 	}
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	status = clk_prepare_enable(pl022->clk);
2242*4882a593Smuzhiyun 	if (status) {
2243*4882a593Smuzhiyun 		dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
2244*4882a593Smuzhiyun 		goto err_no_clk_en;
2245*4882a593Smuzhiyun 	}
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	/* Initialize transfer pump */
2248*4882a593Smuzhiyun 	tasklet_init(&pl022->pump_transfers, pump_transfers,
2249*4882a593Smuzhiyun 		     (unsigned long)pl022);
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun 	/* Disable SSP */
2252*4882a593Smuzhiyun 	writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
2253*4882a593Smuzhiyun 	       SSP_CR1(pl022->virtbase));
2254*4882a593Smuzhiyun 	load_ssp_default_config(pl022);
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
2257*4882a593Smuzhiyun 				  0, "pl022", pl022);
2258*4882a593Smuzhiyun 	if (status < 0) {
2259*4882a593Smuzhiyun 		dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
2260*4882a593Smuzhiyun 		goto err_no_irq;
2261*4882a593Smuzhiyun 	}
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	/* Get DMA channels, try autoconfiguration first */
2264*4882a593Smuzhiyun 	status = pl022_dma_autoprobe(pl022);
2265*4882a593Smuzhiyun 	if (status == -EPROBE_DEFER) {
2266*4882a593Smuzhiyun 		dev_dbg(dev, "deferring probe to get DMA channel\n");
2267*4882a593Smuzhiyun 		goto err_no_irq;
2268*4882a593Smuzhiyun 	}
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 	/* If that failed, use channels from platform_info */
2271*4882a593Smuzhiyun 	if (status == 0)
2272*4882a593Smuzhiyun 		platform_info->enable_dma = 1;
2273*4882a593Smuzhiyun 	else if (platform_info->enable_dma) {
2274*4882a593Smuzhiyun 		status = pl022_dma_probe(pl022);
2275*4882a593Smuzhiyun 		if (status != 0)
2276*4882a593Smuzhiyun 			platform_info->enable_dma = 0;
2277*4882a593Smuzhiyun 	}
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 	/* Register with the SPI framework */
2280*4882a593Smuzhiyun 	amba_set_drvdata(adev, pl022);
2281*4882a593Smuzhiyun 	status = devm_spi_register_master(&adev->dev, master);
2282*4882a593Smuzhiyun 	if (status != 0) {
2283*4882a593Smuzhiyun 		dev_err(&adev->dev,
2284*4882a593Smuzhiyun 			"probe - problem registering spi master\n");
2285*4882a593Smuzhiyun 		goto err_spi_register;
2286*4882a593Smuzhiyun 	}
2287*4882a593Smuzhiyun 	dev_dbg(dev, "probe succeeded\n");
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	/* let runtime pm put suspend */
2290*4882a593Smuzhiyun 	if (platform_info->autosuspend_delay > 0) {
2291*4882a593Smuzhiyun 		dev_info(&adev->dev,
2292*4882a593Smuzhiyun 			"will use autosuspend for runtime pm, delay %dms\n",
2293*4882a593Smuzhiyun 			platform_info->autosuspend_delay);
2294*4882a593Smuzhiyun 		pm_runtime_set_autosuspend_delay(dev,
2295*4882a593Smuzhiyun 			platform_info->autosuspend_delay);
2296*4882a593Smuzhiyun 		pm_runtime_use_autosuspend(dev);
2297*4882a593Smuzhiyun 	}
2298*4882a593Smuzhiyun 	pm_runtime_put(dev);
2299*4882a593Smuzhiyun 
2300*4882a593Smuzhiyun 	return 0;
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun  err_spi_register:
2303*4882a593Smuzhiyun 	if (platform_info->enable_dma)
2304*4882a593Smuzhiyun 		pl022_dma_remove(pl022);
2305*4882a593Smuzhiyun  err_no_irq:
2306*4882a593Smuzhiyun 	clk_disable_unprepare(pl022->clk);
2307*4882a593Smuzhiyun  err_no_clk_en:
2308*4882a593Smuzhiyun  err_no_clk:
2309*4882a593Smuzhiyun  err_no_ioremap:
2310*4882a593Smuzhiyun 	amba_release_regions(adev);
2311*4882a593Smuzhiyun  err_no_ioregion:
2312*4882a593Smuzhiyun  err_no_gpio:
2313*4882a593Smuzhiyun  err_no_mem:
2314*4882a593Smuzhiyun 	spi_master_put(master);
2315*4882a593Smuzhiyun 	return status;
2316*4882a593Smuzhiyun }
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun static void
pl022_remove(struct amba_device * adev)2319*4882a593Smuzhiyun pl022_remove(struct amba_device *adev)
2320*4882a593Smuzhiyun {
2321*4882a593Smuzhiyun 	struct pl022 *pl022 = amba_get_drvdata(adev);
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	if (!pl022)
2324*4882a593Smuzhiyun 		return;
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	/*
2327*4882a593Smuzhiyun 	 * undo pm_runtime_put() in probe.  I assume that we're not
2328*4882a593Smuzhiyun 	 * accessing the primecell here.
2329*4882a593Smuzhiyun 	 */
2330*4882a593Smuzhiyun 	pm_runtime_get_noresume(&adev->dev);
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	load_ssp_default_config(pl022);
2333*4882a593Smuzhiyun 	if (pl022->master_info->enable_dma)
2334*4882a593Smuzhiyun 		pl022_dma_remove(pl022);
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	clk_disable_unprepare(pl022->clk);
2337*4882a593Smuzhiyun 	amba_release_regions(adev);
2338*4882a593Smuzhiyun 	tasklet_disable(&pl022->pump_transfers);
2339*4882a593Smuzhiyun }
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
pl022_suspend(struct device * dev)2342*4882a593Smuzhiyun static int pl022_suspend(struct device *dev)
2343*4882a593Smuzhiyun {
2344*4882a593Smuzhiyun 	struct pl022 *pl022 = dev_get_drvdata(dev);
2345*4882a593Smuzhiyun 	int ret;
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	ret = spi_master_suspend(pl022->master);
2348*4882a593Smuzhiyun 	if (ret)
2349*4882a593Smuzhiyun 		return ret;
2350*4882a593Smuzhiyun 
2351*4882a593Smuzhiyun 	ret = pm_runtime_force_suspend(dev);
2352*4882a593Smuzhiyun 	if (ret) {
2353*4882a593Smuzhiyun 		spi_master_resume(pl022->master);
2354*4882a593Smuzhiyun 		return ret;
2355*4882a593Smuzhiyun 	}
2356*4882a593Smuzhiyun 
2357*4882a593Smuzhiyun 	pinctrl_pm_select_sleep_state(dev);
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	dev_dbg(dev, "suspended\n");
2360*4882a593Smuzhiyun 	return 0;
2361*4882a593Smuzhiyun }
2362*4882a593Smuzhiyun 
pl022_resume(struct device * dev)2363*4882a593Smuzhiyun static int pl022_resume(struct device *dev)
2364*4882a593Smuzhiyun {
2365*4882a593Smuzhiyun 	struct pl022 *pl022 = dev_get_drvdata(dev);
2366*4882a593Smuzhiyun 	int ret;
2367*4882a593Smuzhiyun 
2368*4882a593Smuzhiyun 	ret = pm_runtime_force_resume(dev);
2369*4882a593Smuzhiyun 	if (ret)
2370*4882a593Smuzhiyun 		dev_err(dev, "problem resuming\n");
2371*4882a593Smuzhiyun 
2372*4882a593Smuzhiyun 	/* Start the queue running */
2373*4882a593Smuzhiyun 	ret = spi_master_resume(pl022->master);
2374*4882a593Smuzhiyun 	if (!ret)
2375*4882a593Smuzhiyun 		dev_dbg(dev, "resumed\n");
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	return ret;
2378*4882a593Smuzhiyun }
2379*4882a593Smuzhiyun #endif
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun #ifdef CONFIG_PM
pl022_runtime_suspend(struct device * dev)2382*4882a593Smuzhiyun static int pl022_runtime_suspend(struct device *dev)
2383*4882a593Smuzhiyun {
2384*4882a593Smuzhiyun 	struct pl022 *pl022 = dev_get_drvdata(dev);
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 	clk_disable_unprepare(pl022->clk);
2387*4882a593Smuzhiyun 	pinctrl_pm_select_idle_state(dev);
2388*4882a593Smuzhiyun 
2389*4882a593Smuzhiyun 	return 0;
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun 
pl022_runtime_resume(struct device * dev)2392*4882a593Smuzhiyun static int pl022_runtime_resume(struct device *dev)
2393*4882a593Smuzhiyun {
2394*4882a593Smuzhiyun 	struct pl022 *pl022 = dev_get_drvdata(dev);
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	pinctrl_pm_select_default_state(dev);
2397*4882a593Smuzhiyun 	clk_prepare_enable(pl022->clk);
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	return 0;
2400*4882a593Smuzhiyun }
2401*4882a593Smuzhiyun #endif
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun static const struct dev_pm_ops pl022_dev_pm_ops = {
2404*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
2405*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
2406*4882a593Smuzhiyun };
2407*4882a593Smuzhiyun 
2408*4882a593Smuzhiyun static struct vendor_data vendor_arm = {
2409*4882a593Smuzhiyun 	.fifodepth = 8,
2410*4882a593Smuzhiyun 	.max_bpw = 16,
2411*4882a593Smuzhiyun 	.unidir = false,
2412*4882a593Smuzhiyun 	.extended_cr = false,
2413*4882a593Smuzhiyun 	.pl023 = false,
2414*4882a593Smuzhiyun 	.loopback = true,
2415*4882a593Smuzhiyun 	.internal_cs_ctrl = false,
2416*4882a593Smuzhiyun };
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun static struct vendor_data vendor_st = {
2419*4882a593Smuzhiyun 	.fifodepth = 32,
2420*4882a593Smuzhiyun 	.max_bpw = 32,
2421*4882a593Smuzhiyun 	.unidir = false,
2422*4882a593Smuzhiyun 	.extended_cr = true,
2423*4882a593Smuzhiyun 	.pl023 = false,
2424*4882a593Smuzhiyun 	.loopback = true,
2425*4882a593Smuzhiyun 	.internal_cs_ctrl = false,
2426*4882a593Smuzhiyun };
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun static struct vendor_data vendor_st_pl023 = {
2429*4882a593Smuzhiyun 	.fifodepth = 32,
2430*4882a593Smuzhiyun 	.max_bpw = 32,
2431*4882a593Smuzhiyun 	.unidir = false,
2432*4882a593Smuzhiyun 	.extended_cr = true,
2433*4882a593Smuzhiyun 	.pl023 = true,
2434*4882a593Smuzhiyun 	.loopback = false,
2435*4882a593Smuzhiyun 	.internal_cs_ctrl = false,
2436*4882a593Smuzhiyun };
2437*4882a593Smuzhiyun 
2438*4882a593Smuzhiyun static struct vendor_data vendor_lsi = {
2439*4882a593Smuzhiyun 	.fifodepth = 8,
2440*4882a593Smuzhiyun 	.max_bpw = 16,
2441*4882a593Smuzhiyun 	.unidir = false,
2442*4882a593Smuzhiyun 	.extended_cr = false,
2443*4882a593Smuzhiyun 	.pl023 = false,
2444*4882a593Smuzhiyun 	.loopback = true,
2445*4882a593Smuzhiyun 	.internal_cs_ctrl = true,
2446*4882a593Smuzhiyun };
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun static const struct amba_id pl022_ids[] = {
2449*4882a593Smuzhiyun 	{
2450*4882a593Smuzhiyun 		/*
2451*4882a593Smuzhiyun 		 * ARM PL022 variant, this has a 16bit wide
2452*4882a593Smuzhiyun 		 * and 8 locations deep TX/RX FIFO
2453*4882a593Smuzhiyun 		 */
2454*4882a593Smuzhiyun 		.id	= 0x00041022,
2455*4882a593Smuzhiyun 		.mask	= 0x000fffff,
2456*4882a593Smuzhiyun 		.data	= &vendor_arm,
2457*4882a593Smuzhiyun 	},
2458*4882a593Smuzhiyun 	{
2459*4882a593Smuzhiyun 		/*
2460*4882a593Smuzhiyun 		 * ST Micro derivative, this has 32bit wide
2461*4882a593Smuzhiyun 		 * and 32 locations deep TX/RX FIFO
2462*4882a593Smuzhiyun 		 */
2463*4882a593Smuzhiyun 		.id	= 0x01080022,
2464*4882a593Smuzhiyun 		.mask	= 0xffffffff,
2465*4882a593Smuzhiyun 		.data	= &vendor_st,
2466*4882a593Smuzhiyun 	},
2467*4882a593Smuzhiyun 	{
2468*4882a593Smuzhiyun 		/*
2469*4882a593Smuzhiyun 		 * ST-Ericsson derivative "PL023" (this is not
2470*4882a593Smuzhiyun 		 * an official ARM number), this is a PL022 SSP block
2471*4882a593Smuzhiyun 		 * stripped to SPI mode only, it has 32bit wide
2472*4882a593Smuzhiyun 		 * and 32 locations deep TX/RX FIFO but no extended
2473*4882a593Smuzhiyun 		 * CR0/CR1 register
2474*4882a593Smuzhiyun 		 */
2475*4882a593Smuzhiyun 		.id	= 0x00080023,
2476*4882a593Smuzhiyun 		.mask	= 0xffffffff,
2477*4882a593Smuzhiyun 		.data	= &vendor_st_pl023,
2478*4882a593Smuzhiyun 	},
2479*4882a593Smuzhiyun 	{
2480*4882a593Smuzhiyun 		/*
2481*4882a593Smuzhiyun 		 * PL022 variant that has a chip select control register whih
2482*4882a593Smuzhiyun 		 * allows control of 5 output signals nCS[0:4].
2483*4882a593Smuzhiyun 		 */
2484*4882a593Smuzhiyun 		.id	= 0x000b6022,
2485*4882a593Smuzhiyun 		.mask	= 0x000fffff,
2486*4882a593Smuzhiyun 		.data	= &vendor_lsi,
2487*4882a593Smuzhiyun 	},
2488*4882a593Smuzhiyun 	{ 0, 0 },
2489*4882a593Smuzhiyun };
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun MODULE_DEVICE_TABLE(amba, pl022_ids);
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun static struct amba_driver pl022_driver = {
2494*4882a593Smuzhiyun 	.drv = {
2495*4882a593Smuzhiyun 		.name	= "ssp-pl022",
2496*4882a593Smuzhiyun 		.pm	= &pl022_dev_pm_ops,
2497*4882a593Smuzhiyun 	},
2498*4882a593Smuzhiyun 	.id_table	= pl022_ids,
2499*4882a593Smuzhiyun 	.probe		= pl022_probe,
2500*4882a593Smuzhiyun 	.remove		= pl022_remove,
2501*4882a593Smuzhiyun };
2502*4882a593Smuzhiyun 
pl022_init(void)2503*4882a593Smuzhiyun static int __init pl022_init(void)
2504*4882a593Smuzhiyun {
2505*4882a593Smuzhiyun 	return amba_driver_register(&pl022_driver);
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun subsys_initcall(pl022_init);
2508*4882a593Smuzhiyun 
pl022_exit(void)2509*4882a593Smuzhiyun static void __exit pl022_exit(void)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun 	amba_driver_unregister(&pl022_driver);
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun module_exit(pl022_exit);
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
2516*4882a593Smuzhiyun MODULE_DESCRIPTION("PL022 SSP Controller Driver");
2517*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2518