1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // STMicroelectronics STM32 SPI Controller driver (master mode only)
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Copyright (C) 2017, STMicroelectronics - All Rights Reserved
6*4882a593Smuzhiyun // Author(s): Amelie Delaunay <amelie.delaunay@st.com> for STMicroelectronics.
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/debugfs.h>
9*4882a593Smuzhiyun #include <linux/clk.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/dmaengine.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/iopoll.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/of_platform.h>
16*4882a593Smuzhiyun #include <linux/pinctrl/consumer.h>
17*4882a593Smuzhiyun #include <linux/pm_runtime.h>
18*4882a593Smuzhiyun #include <linux/reset.h>
19*4882a593Smuzhiyun #include <linux/spi/spi.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define DRIVER_NAME "spi_stm32"
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* STM32F4 SPI registers */
24*4882a593Smuzhiyun #define STM32F4_SPI_CR1 0x00
25*4882a593Smuzhiyun #define STM32F4_SPI_CR2 0x04
26*4882a593Smuzhiyun #define STM32F4_SPI_SR 0x08
27*4882a593Smuzhiyun #define STM32F4_SPI_DR 0x0C
28*4882a593Smuzhiyun #define STM32F4_SPI_I2SCFGR 0x1C
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* STM32F4_SPI_CR1 bit fields */
31*4882a593Smuzhiyun #define STM32F4_SPI_CR1_CPHA BIT(0)
32*4882a593Smuzhiyun #define STM32F4_SPI_CR1_CPOL BIT(1)
33*4882a593Smuzhiyun #define STM32F4_SPI_CR1_MSTR BIT(2)
34*4882a593Smuzhiyun #define STM32F4_SPI_CR1_BR_SHIFT 3
35*4882a593Smuzhiyun #define STM32F4_SPI_CR1_BR GENMASK(5, 3)
36*4882a593Smuzhiyun #define STM32F4_SPI_CR1_SPE BIT(6)
37*4882a593Smuzhiyun #define STM32F4_SPI_CR1_LSBFRST BIT(7)
38*4882a593Smuzhiyun #define STM32F4_SPI_CR1_SSI BIT(8)
39*4882a593Smuzhiyun #define STM32F4_SPI_CR1_SSM BIT(9)
40*4882a593Smuzhiyun #define STM32F4_SPI_CR1_RXONLY BIT(10)
41*4882a593Smuzhiyun #define STM32F4_SPI_CR1_DFF BIT(11)
42*4882a593Smuzhiyun #define STM32F4_SPI_CR1_CRCNEXT BIT(12)
43*4882a593Smuzhiyun #define STM32F4_SPI_CR1_CRCEN BIT(13)
44*4882a593Smuzhiyun #define STM32F4_SPI_CR1_BIDIOE BIT(14)
45*4882a593Smuzhiyun #define STM32F4_SPI_CR1_BIDIMODE BIT(15)
46*4882a593Smuzhiyun #define STM32F4_SPI_CR1_BR_MIN 0
47*4882a593Smuzhiyun #define STM32F4_SPI_CR1_BR_MAX (GENMASK(5, 3) >> 3)
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* STM32F4_SPI_CR2 bit fields */
50*4882a593Smuzhiyun #define STM32F4_SPI_CR2_RXDMAEN BIT(0)
51*4882a593Smuzhiyun #define STM32F4_SPI_CR2_TXDMAEN BIT(1)
52*4882a593Smuzhiyun #define STM32F4_SPI_CR2_SSOE BIT(2)
53*4882a593Smuzhiyun #define STM32F4_SPI_CR2_FRF BIT(4)
54*4882a593Smuzhiyun #define STM32F4_SPI_CR2_ERRIE BIT(5)
55*4882a593Smuzhiyun #define STM32F4_SPI_CR2_RXNEIE BIT(6)
56*4882a593Smuzhiyun #define STM32F4_SPI_CR2_TXEIE BIT(7)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /* STM32F4_SPI_SR bit fields */
59*4882a593Smuzhiyun #define STM32F4_SPI_SR_RXNE BIT(0)
60*4882a593Smuzhiyun #define STM32F4_SPI_SR_TXE BIT(1)
61*4882a593Smuzhiyun #define STM32F4_SPI_SR_CHSIDE BIT(2)
62*4882a593Smuzhiyun #define STM32F4_SPI_SR_UDR BIT(3)
63*4882a593Smuzhiyun #define STM32F4_SPI_SR_CRCERR BIT(4)
64*4882a593Smuzhiyun #define STM32F4_SPI_SR_MODF BIT(5)
65*4882a593Smuzhiyun #define STM32F4_SPI_SR_OVR BIT(6)
66*4882a593Smuzhiyun #define STM32F4_SPI_SR_BSY BIT(7)
67*4882a593Smuzhiyun #define STM32F4_SPI_SR_FRE BIT(8)
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* STM32F4_SPI_I2SCFGR bit fields */
70*4882a593Smuzhiyun #define STM32F4_SPI_I2SCFGR_I2SMOD BIT(11)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* STM32F4 SPI Baud Rate min/max divisor */
73*4882a593Smuzhiyun #define STM32F4_SPI_BR_DIV_MIN (2 << STM32F4_SPI_CR1_BR_MIN)
74*4882a593Smuzhiyun #define STM32F4_SPI_BR_DIV_MAX (2 << STM32F4_SPI_CR1_BR_MAX)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* STM32H7 SPI registers */
77*4882a593Smuzhiyun #define STM32H7_SPI_CR1 0x00
78*4882a593Smuzhiyun #define STM32H7_SPI_CR2 0x04
79*4882a593Smuzhiyun #define STM32H7_SPI_CFG1 0x08
80*4882a593Smuzhiyun #define STM32H7_SPI_CFG2 0x0C
81*4882a593Smuzhiyun #define STM32H7_SPI_IER 0x10
82*4882a593Smuzhiyun #define STM32H7_SPI_SR 0x14
83*4882a593Smuzhiyun #define STM32H7_SPI_IFCR 0x18
84*4882a593Smuzhiyun #define STM32H7_SPI_TXDR 0x20
85*4882a593Smuzhiyun #define STM32H7_SPI_RXDR 0x30
86*4882a593Smuzhiyun #define STM32H7_SPI_I2SCFGR 0x50
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* STM32H7_SPI_CR1 bit fields */
89*4882a593Smuzhiyun #define STM32H7_SPI_CR1_SPE BIT(0)
90*4882a593Smuzhiyun #define STM32H7_SPI_CR1_MASRX BIT(8)
91*4882a593Smuzhiyun #define STM32H7_SPI_CR1_CSTART BIT(9)
92*4882a593Smuzhiyun #define STM32H7_SPI_CR1_CSUSP BIT(10)
93*4882a593Smuzhiyun #define STM32H7_SPI_CR1_HDDIR BIT(11)
94*4882a593Smuzhiyun #define STM32H7_SPI_CR1_SSI BIT(12)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* STM32H7_SPI_CR2 bit fields */
97*4882a593Smuzhiyun #define STM32H7_SPI_CR2_TSIZE_SHIFT 0
98*4882a593Smuzhiyun #define STM32H7_SPI_CR2_TSIZE GENMASK(15, 0)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* STM32H7_SPI_CFG1 bit fields */
101*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_DSIZE_SHIFT 0
102*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_DSIZE GENMASK(4, 0)
103*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_FTHLV_SHIFT 5
104*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_FTHLV GENMASK(8, 5)
105*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_RXDMAEN BIT(14)
106*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_TXDMAEN BIT(15)
107*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_MBR_SHIFT 28
108*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_MBR GENMASK(30, 28)
109*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_MBR_MIN 0
110*4882a593Smuzhiyun #define STM32H7_SPI_CFG1_MBR_MAX (GENMASK(30, 28) >> 28)
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* STM32H7_SPI_CFG2 bit fields */
113*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_MIDI_SHIFT 4
114*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_MIDI GENMASK(7, 4)
115*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_COMM_SHIFT 17
116*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_COMM GENMASK(18, 17)
117*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_SP_SHIFT 19
118*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_SP GENMASK(21, 19)
119*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_MASTER BIT(22)
120*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_LSBFRST BIT(23)
121*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_CPHA BIT(24)
122*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_CPOL BIT(25)
123*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_SSM BIT(26)
124*4882a593Smuzhiyun #define STM32H7_SPI_CFG2_AFCNTR BIT(31)
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* STM32H7_SPI_IER bit fields */
127*4882a593Smuzhiyun #define STM32H7_SPI_IER_RXPIE BIT(0)
128*4882a593Smuzhiyun #define STM32H7_SPI_IER_TXPIE BIT(1)
129*4882a593Smuzhiyun #define STM32H7_SPI_IER_DXPIE BIT(2)
130*4882a593Smuzhiyun #define STM32H7_SPI_IER_EOTIE BIT(3)
131*4882a593Smuzhiyun #define STM32H7_SPI_IER_TXTFIE BIT(4)
132*4882a593Smuzhiyun #define STM32H7_SPI_IER_OVRIE BIT(6)
133*4882a593Smuzhiyun #define STM32H7_SPI_IER_MODFIE BIT(9)
134*4882a593Smuzhiyun #define STM32H7_SPI_IER_ALL GENMASK(10, 0)
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /* STM32H7_SPI_SR bit fields */
137*4882a593Smuzhiyun #define STM32H7_SPI_SR_RXP BIT(0)
138*4882a593Smuzhiyun #define STM32H7_SPI_SR_TXP BIT(1)
139*4882a593Smuzhiyun #define STM32H7_SPI_SR_EOT BIT(3)
140*4882a593Smuzhiyun #define STM32H7_SPI_SR_OVR BIT(6)
141*4882a593Smuzhiyun #define STM32H7_SPI_SR_MODF BIT(9)
142*4882a593Smuzhiyun #define STM32H7_SPI_SR_SUSP BIT(11)
143*4882a593Smuzhiyun #define STM32H7_SPI_SR_RXPLVL_SHIFT 13
144*4882a593Smuzhiyun #define STM32H7_SPI_SR_RXPLVL GENMASK(14, 13)
145*4882a593Smuzhiyun #define STM32H7_SPI_SR_RXWNE BIT(15)
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* STM32H7_SPI_IFCR bit fields */
148*4882a593Smuzhiyun #define STM32H7_SPI_IFCR_ALL GENMASK(11, 3)
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* STM32H7_SPI_I2SCFGR bit fields */
151*4882a593Smuzhiyun #define STM32H7_SPI_I2SCFGR_I2SMOD BIT(0)
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* STM32H7 SPI Master Baud Rate min/max divisor */
154*4882a593Smuzhiyun #define STM32H7_SPI_MBR_DIV_MIN (2 << STM32H7_SPI_CFG1_MBR_MIN)
155*4882a593Smuzhiyun #define STM32H7_SPI_MBR_DIV_MAX (2 << STM32H7_SPI_CFG1_MBR_MAX)
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* STM32H7 SPI Communication mode */
158*4882a593Smuzhiyun #define STM32H7_SPI_FULL_DUPLEX 0
159*4882a593Smuzhiyun #define STM32H7_SPI_SIMPLEX_TX 1
160*4882a593Smuzhiyun #define STM32H7_SPI_SIMPLEX_RX 2
161*4882a593Smuzhiyun #define STM32H7_SPI_HALF_DUPLEX 3
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* SPI Communication type */
164*4882a593Smuzhiyun #define SPI_FULL_DUPLEX 0
165*4882a593Smuzhiyun #define SPI_SIMPLEX_TX 1
166*4882a593Smuzhiyun #define SPI_SIMPLEX_RX 2
167*4882a593Smuzhiyun #define SPI_3WIRE_TX 3
168*4882a593Smuzhiyun #define SPI_3WIRE_RX 4
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun #define SPI_1HZ_NS 1000000000
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /*
173*4882a593Smuzhiyun * use PIO for small transfers, avoiding DMA setup/teardown overhead for drivers
174*4882a593Smuzhiyun * without fifo buffers.
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun #define SPI_DMA_MIN_BYTES 16
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun * struct stm32_spi_reg - stm32 SPI register & bitfield desc
180*4882a593Smuzhiyun * @reg: register offset
181*4882a593Smuzhiyun * @mask: bitfield mask
182*4882a593Smuzhiyun * @shift: left shift
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun struct stm32_spi_reg {
185*4882a593Smuzhiyun int reg;
186*4882a593Smuzhiyun int mask;
187*4882a593Smuzhiyun int shift;
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun * struct stm32_spi_regspec - stm32 registers definition, compatible dependent data
192*4882a593Smuzhiyun * @en: enable register and SPI enable bit
193*4882a593Smuzhiyun * @dma_rx_en: SPI DMA RX enable register end SPI DMA RX enable bit
194*4882a593Smuzhiyun * @dma_tx_en: SPI DMA TX enable register end SPI DMA TX enable bit
195*4882a593Smuzhiyun * @cpol: clock polarity register and polarity bit
196*4882a593Smuzhiyun * @cpha: clock phase register and phase bit
197*4882a593Smuzhiyun * @lsb_first: LSB transmitted first register and bit
198*4882a593Smuzhiyun * @br: baud rate register and bitfields
199*4882a593Smuzhiyun * @rx: SPI RX data register
200*4882a593Smuzhiyun * @tx: SPI TX data register
201*4882a593Smuzhiyun */
202*4882a593Smuzhiyun struct stm32_spi_regspec {
203*4882a593Smuzhiyun const struct stm32_spi_reg en;
204*4882a593Smuzhiyun const struct stm32_spi_reg dma_rx_en;
205*4882a593Smuzhiyun const struct stm32_spi_reg dma_tx_en;
206*4882a593Smuzhiyun const struct stm32_spi_reg cpol;
207*4882a593Smuzhiyun const struct stm32_spi_reg cpha;
208*4882a593Smuzhiyun const struct stm32_spi_reg lsb_first;
209*4882a593Smuzhiyun const struct stm32_spi_reg br;
210*4882a593Smuzhiyun const struct stm32_spi_reg rx;
211*4882a593Smuzhiyun const struct stm32_spi_reg tx;
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun struct stm32_spi;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /**
217*4882a593Smuzhiyun * struct stm32_spi_cfg - stm32 compatible configuration data
218*4882a593Smuzhiyun * @regs: registers descriptions
219*4882a593Smuzhiyun * @get_fifo_size: routine to get fifo size
220*4882a593Smuzhiyun * @get_bpw_mask: routine to get bits per word mask
221*4882a593Smuzhiyun * @disable: routine to disable controller
222*4882a593Smuzhiyun * @config: routine to configure controller as SPI Master
223*4882a593Smuzhiyun * @set_bpw: routine to configure registers to for bits per word
224*4882a593Smuzhiyun * @set_mode: routine to configure registers to desired mode
225*4882a593Smuzhiyun * @set_data_idleness: optional routine to configure registers to desired idle
226*4882a593Smuzhiyun * time between frames (if driver has this functionality)
227*4882a593Smuzhiyun * @set_number_of_data: optional routine to configure registers to desired
228*4882a593Smuzhiyun * number of data (if driver has this functionality)
229*4882a593Smuzhiyun * @can_dma: routine to determine if the transfer is eligible for DMA use
230*4882a593Smuzhiyun * @transfer_one_dma_start: routine to start transfer a single spi_transfer
231*4882a593Smuzhiyun * using DMA
232*4882a593Smuzhiyun * @dma_rx_cb: routine to call after DMA RX channel operation is complete
233*4882a593Smuzhiyun * @dma_tx_cb: routine to call after DMA TX channel operation is complete
234*4882a593Smuzhiyun * @transfer_one_irq: routine to configure interrupts for driver
235*4882a593Smuzhiyun * @irq_handler_event: Interrupt handler for SPI controller events
236*4882a593Smuzhiyun * @irq_handler_thread: thread of interrupt handler for SPI controller
237*4882a593Smuzhiyun * @baud_rate_div_min: minimum baud rate divisor
238*4882a593Smuzhiyun * @baud_rate_div_max: maximum baud rate divisor
239*4882a593Smuzhiyun * @has_fifo: boolean to know if fifo is used for driver
240*4882a593Smuzhiyun * @has_startbit: boolean to know if start bit is used to start transfer
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun struct stm32_spi_cfg {
243*4882a593Smuzhiyun const struct stm32_spi_regspec *regs;
244*4882a593Smuzhiyun int (*get_fifo_size)(struct stm32_spi *spi);
245*4882a593Smuzhiyun int (*get_bpw_mask)(struct stm32_spi *spi);
246*4882a593Smuzhiyun void (*disable)(struct stm32_spi *spi);
247*4882a593Smuzhiyun int (*config)(struct stm32_spi *spi);
248*4882a593Smuzhiyun void (*set_bpw)(struct stm32_spi *spi);
249*4882a593Smuzhiyun int (*set_mode)(struct stm32_spi *spi, unsigned int comm_type);
250*4882a593Smuzhiyun void (*set_data_idleness)(struct stm32_spi *spi, u32 length);
251*4882a593Smuzhiyun int (*set_number_of_data)(struct stm32_spi *spi, u32 length);
252*4882a593Smuzhiyun void (*transfer_one_dma_start)(struct stm32_spi *spi);
253*4882a593Smuzhiyun void (*dma_rx_cb)(void *data);
254*4882a593Smuzhiyun void (*dma_tx_cb)(void *data);
255*4882a593Smuzhiyun int (*transfer_one_irq)(struct stm32_spi *spi);
256*4882a593Smuzhiyun irqreturn_t (*irq_handler_event)(int irq, void *dev_id);
257*4882a593Smuzhiyun irqreturn_t (*irq_handler_thread)(int irq, void *dev_id);
258*4882a593Smuzhiyun unsigned int baud_rate_div_min;
259*4882a593Smuzhiyun unsigned int baud_rate_div_max;
260*4882a593Smuzhiyun bool has_fifo;
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /**
264*4882a593Smuzhiyun * struct stm32_spi - private data of the SPI controller
265*4882a593Smuzhiyun * @dev: driver model representation of the controller
266*4882a593Smuzhiyun * @master: controller master interface
267*4882a593Smuzhiyun * @cfg: compatible configuration data
268*4882a593Smuzhiyun * @base: virtual memory area
269*4882a593Smuzhiyun * @clk: hw kernel clock feeding the SPI clock generator
270*4882a593Smuzhiyun * @clk_rate: rate of the hw kernel clock feeding the SPI clock generator
271*4882a593Smuzhiyun * @rst: SPI controller reset line
272*4882a593Smuzhiyun * @lock: prevent I/O concurrent access
273*4882a593Smuzhiyun * @irq: SPI controller interrupt line
274*4882a593Smuzhiyun * @fifo_size: size of the embedded fifo in bytes
275*4882a593Smuzhiyun * @cur_midi: master inter-data idleness in ns
276*4882a593Smuzhiyun * @cur_speed: speed configured in Hz
277*4882a593Smuzhiyun * @cur_bpw: number of bits in a single SPI data frame
278*4882a593Smuzhiyun * @cur_fthlv: fifo threshold level (data frames in a single data packet)
279*4882a593Smuzhiyun * @cur_comm: SPI communication mode
280*4882a593Smuzhiyun * @cur_xferlen: current transfer length in bytes
281*4882a593Smuzhiyun * @cur_usedma: boolean to know if dma is used in current transfer
282*4882a593Smuzhiyun * @tx_buf: data to be written, or NULL
283*4882a593Smuzhiyun * @rx_buf: data to be read, or NULL
284*4882a593Smuzhiyun * @tx_len: number of data to be written in bytes
285*4882a593Smuzhiyun * @rx_len: number of data to be read in bytes
286*4882a593Smuzhiyun * @dma_tx: dma channel for TX transfer
287*4882a593Smuzhiyun * @dma_rx: dma channel for RX transfer
288*4882a593Smuzhiyun * @phys_addr: SPI registers physical base address
289*4882a593Smuzhiyun */
290*4882a593Smuzhiyun struct stm32_spi {
291*4882a593Smuzhiyun struct device *dev;
292*4882a593Smuzhiyun struct spi_master *master;
293*4882a593Smuzhiyun const struct stm32_spi_cfg *cfg;
294*4882a593Smuzhiyun void __iomem *base;
295*4882a593Smuzhiyun struct clk *clk;
296*4882a593Smuzhiyun u32 clk_rate;
297*4882a593Smuzhiyun struct reset_control *rst;
298*4882a593Smuzhiyun spinlock_t lock; /* prevent I/O concurrent access */
299*4882a593Smuzhiyun int irq;
300*4882a593Smuzhiyun unsigned int fifo_size;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun unsigned int cur_midi;
303*4882a593Smuzhiyun unsigned int cur_speed;
304*4882a593Smuzhiyun unsigned int cur_bpw;
305*4882a593Smuzhiyun unsigned int cur_fthlv;
306*4882a593Smuzhiyun unsigned int cur_comm;
307*4882a593Smuzhiyun unsigned int cur_xferlen;
308*4882a593Smuzhiyun bool cur_usedma;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun const void *tx_buf;
311*4882a593Smuzhiyun void *rx_buf;
312*4882a593Smuzhiyun int tx_len;
313*4882a593Smuzhiyun int rx_len;
314*4882a593Smuzhiyun struct dma_chan *dma_tx;
315*4882a593Smuzhiyun struct dma_chan *dma_rx;
316*4882a593Smuzhiyun dma_addr_t phys_addr;
317*4882a593Smuzhiyun };
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun static const struct stm32_spi_regspec stm32f4_spi_regspec = {
320*4882a593Smuzhiyun .en = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE },
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun .dma_rx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_RXDMAEN },
323*4882a593Smuzhiyun .dma_tx_en = { STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN },
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun .cpol = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPOL },
326*4882a593Smuzhiyun .cpha = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_CPHA },
327*4882a593Smuzhiyun .lsb_first = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_LSBFRST },
328*4882a593Smuzhiyun .br = { STM32F4_SPI_CR1, STM32F4_SPI_CR1_BR, STM32F4_SPI_CR1_BR_SHIFT },
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun .rx = { STM32F4_SPI_DR },
331*4882a593Smuzhiyun .tx = { STM32F4_SPI_DR },
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun static const struct stm32_spi_regspec stm32h7_spi_regspec = {
335*4882a593Smuzhiyun /* SPI data transfer is enabled but spi_ker_ck is idle.
336*4882a593Smuzhiyun * CFG1 and CFG2 registers are write protected when SPE is enabled.
337*4882a593Smuzhiyun */
338*4882a593Smuzhiyun .en = { STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE },
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun .dma_rx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_RXDMAEN },
341*4882a593Smuzhiyun .dma_tx_en = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN },
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun .cpol = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPOL },
344*4882a593Smuzhiyun .cpha = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_CPHA },
345*4882a593Smuzhiyun .lsb_first = { STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_LSBFRST },
346*4882a593Smuzhiyun .br = { STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_MBR,
347*4882a593Smuzhiyun STM32H7_SPI_CFG1_MBR_SHIFT },
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun .rx = { STM32H7_SPI_RXDR },
350*4882a593Smuzhiyun .tx = { STM32H7_SPI_TXDR },
351*4882a593Smuzhiyun };
352*4882a593Smuzhiyun
stm32_spi_set_bits(struct stm32_spi * spi,u32 offset,u32 bits)353*4882a593Smuzhiyun static inline void stm32_spi_set_bits(struct stm32_spi *spi,
354*4882a593Smuzhiyun u32 offset, u32 bits)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun writel_relaxed(readl_relaxed(spi->base + offset) | bits,
357*4882a593Smuzhiyun spi->base + offset);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
stm32_spi_clr_bits(struct stm32_spi * spi,u32 offset,u32 bits)360*4882a593Smuzhiyun static inline void stm32_spi_clr_bits(struct stm32_spi *spi,
361*4882a593Smuzhiyun u32 offset, u32 bits)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun writel_relaxed(readl_relaxed(spi->base + offset) & ~bits,
364*4882a593Smuzhiyun spi->base + offset);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /**
368*4882a593Smuzhiyun * stm32h7_spi_get_fifo_size - Return fifo size
369*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
370*4882a593Smuzhiyun */
stm32h7_spi_get_fifo_size(struct stm32_spi * spi)371*4882a593Smuzhiyun static int stm32h7_spi_get_fifo_size(struct stm32_spi *spi)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun unsigned long flags;
374*4882a593Smuzhiyun u32 count = 0;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun while (readl_relaxed(spi->base + STM32H7_SPI_SR) & STM32H7_SPI_SR_TXP)
381*4882a593Smuzhiyun writeb_relaxed(++count, spi->base + STM32H7_SPI_TXDR);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun dev_dbg(spi->dev, "%d x 8-bit fifo size\n", count);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun return count;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * stm32f4_spi_get_bpw_mask - Return bits per word mask
394*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
395*4882a593Smuzhiyun */
stm32f4_spi_get_bpw_mask(struct stm32_spi * spi)396*4882a593Smuzhiyun static int stm32f4_spi_get_bpw_mask(struct stm32_spi *spi)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun dev_dbg(spi->dev, "8-bit or 16-bit data frame supported\n");
399*4882a593Smuzhiyun return SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /**
403*4882a593Smuzhiyun * stm32h7_spi_get_bpw_mask - Return bits per word mask
404*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
405*4882a593Smuzhiyun */
stm32h7_spi_get_bpw_mask(struct stm32_spi * spi)406*4882a593Smuzhiyun static int stm32h7_spi_get_bpw_mask(struct stm32_spi *spi)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun unsigned long flags;
409*4882a593Smuzhiyun u32 cfg1, max_bpw;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /*
414*4882a593Smuzhiyun * The most significant bit at DSIZE bit field is reserved when the
415*4882a593Smuzhiyun * maximum data size of periperal instances is limited to 16-bit
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_DSIZE);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun cfg1 = readl_relaxed(spi->base + STM32H7_SPI_CFG1);
420*4882a593Smuzhiyun max_bpw = (cfg1 & STM32H7_SPI_CFG1_DSIZE) >>
421*4882a593Smuzhiyun STM32H7_SPI_CFG1_DSIZE_SHIFT;
422*4882a593Smuzhiyun max_bpw += 1;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun dev_dbg(spi->dev, "%d-bit maximum data frame\n", max_bpw);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun return SPI_BPW_RANGE_MASK(4, max_bpw);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /**
432*4882a593Smuzhiyun * stm32_spi_prepare_mbr - Determine baud rate divisor value
433*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
434*4882a593Smuzhiyun * @speed_hz: requested speed
435*4882a593Smuzhiyun * @min_div: minimum baud rate divisor
436*4882a593Smuzhiyun * @max_div: maximum baud rate divisor
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * Return baud rate divisor value in case of success or -EINVAL
439*4882a593Smuzhiyun */
stm32_spi_prepare_mbr(struct stm32_spi * spi,u32 speed_hz,u32 min_div,u32 max_div)440*4882a593Smuzhiyun static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
441*4882a593Smuzhiyun u32 min_div, u32 max_div)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun u32 div, mbrdiv;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun /* Ensure spi->clk_rate is even */
446*4882a593Smuzhiyun div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /*
449*4882a593Smuzhiyun * SPI framework set xfer->speed_hz to master->max_speed_hz if
450*4882a593Smuzhiyun * xfer->speed_hz is greater than master->max_speed_hz, and it returns
451*4882a593Smuzhiyun * an error when xfer->speed_hz is lower than master->min_speed_hz, so
452*4882a593Smuzhiyun * no need to check it there.
453*4882a593Smuzhiyun * However, we need to ensure the following calculations.
454*4882a593Smuzhiyun */
455*4882a593Smuzhiyun if ((div < min_div) || (div > max_div))
456*4882a593Smuzhiyun return -EINVAL;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Determine the first power of 2 greater than or equal to div */
459*4882a593Smuzhiyun if (div & (div - 1))
460*4882a593Smuzhiyun mbrdiv = fls(div);
461*4882a593Smuzhiyun else
462*4882a593Smuzhiyun mbrdiv = fls(div) - 1;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun spi->cur_speed = spi->clk_rate / (1 << mbrdiv);
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return mbrdiv - 1;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /**
470*4882a593Smuzhiyun * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
471*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
472*4882a593Smuzhiyun * @xfer_len: length of the message to be transferred
473*4882a593Smuzhiyun */
stm32h7_spi_prepare_fthlv(struct stm32_spi * spi,u32 xfer_len)474*4882a593Smuzhiyun static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun u32 fthlv, half_fifo, packet;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* data packet should not exceed 1/2 of fifo space */
479*4882a593Smuzhiyun half_fifo = (spi->fifo_size / 2);
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* data_packet should not exceed transfer length */
482*4882a593Smuzhiyun if (half_fifo > xfer_len)
483*4882a593Smuzhiyun packet = xfer_len;
484*4882a593Smuzhiyun else
485*4882a593Smuzhiyun packet = half_fifo;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (spi->cur_bpw <= 8)
488*4882a593Smuzhiyun fthlv = packet;
489*4882a593Smuzhiyun else if (spi->cur_bpw <= 16)
490*4882a593Smuzhiyun fthlv = packet / 2;
491*4882a593Smuzhiyun else
492*4882a593Smuzhiyun fthlv = packet / 4;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun /* align packet size with data registers access */
495*4882a593Smuzhiyun if (spi->cur_bpw > 8)
496*4882a593Smuzhiyun fthlv += (fthlv % 2) ? 1 : 0;
497*4882a593Smuzhiyun else
498*4882a593Smuzhiyun fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (!fthlv)
501*4882a593Smuzhiyun fthlv = 1;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return fthlv;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /**
507*4882a593Smuzhiyun * stm32f4_spi_write_tx - Write bytes to Transmit Data Register
508*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
509*4882a593Smuzhiyun *
510*4882a593Smuzhiyun * Read from tx_buf depends on remaining bytes to avoid to read beyond
511*4882a593Smuzhiyun * tx_buf end.
512*4882a593Smuzhiyun */
stm32f4_spi_write_tx(struct stm32_spi * spi)513*4882a593Smuzhiyun static void stm32f4_spi_write_tx(struct stm32_spi *spi)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun if ((spi->tx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
516*4882a593Smuzhiyun STM32F4_SPI_SR_TXE)) {
517*4882a593Smuzhiyun u32 offs = spi->cur_xferlen - spi->tx_len;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (spi->cur_bpw == 16) {
520*4882a593Smuzhiyun const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun writew_relaxed(*tx_buf16, spi->base + STM32F4_SPI_DR);
523*4882a593Smuzhiyun spi->tx_len -= sizeof(u16);
524*4882a593Smuzhiyun } else {
525*4882a593Smuzhiyun const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun writeb_relaxed(*tx_buf8, spi->base + STM32F4_SPI_DR);
528*4882a593Smuzhiyun spi->tx_len -= sizeof(u8);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /**
536*4882a593Smuzhiyun * stm32h7_spi_write_txfifo - Write bytes in Transmit Data Register
537*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * Read from tx_buf depends on remaining bytes to avoid to read beyond
540*4882a593Smuzhiyun * tx_buf end.
541*4882a593Smuzhiyun */
stm32h7_spi_write_txfifo(struct stm32_spi * spi)542*4882a593Smuzhiyun static void stm32h7_spi_write_txfifo(struct stm32_spi *spi)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun while ((spi->tx_len > 0) &&
545*4882a593Smuzhiyun (readl_relaxed(spi->base + STM32H7_SPI_SR) &
546*4882a593Smuzhiyun STM32H7_SPI_SR_TXP)) {
547*4882a593Smuzhiyun u32 offs = spi->cur_xferlen - spi->tx_len;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun if (spi->tx_len >= sizeof(u32)) {
550*4882a593Smuzhiyun const u32 *tx_buf32 = (const u32 *)(spi->tx_buf + offs);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun writel_relaxed(*tx_buf32, spi->base + STM32H7_SPI_TXDR);
553*4882a593Smuzhiyun spi->tx_len -= sizeof(u32);
554*4882a593Smuzhiyun } else if (spi->tx_len >= sizeof(u16)) {
555*4882a593Smuzhiyun const u16 *tx_buf16 = (const u16 *)(spi->tx_buf + offs);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun writew_relaxed(*tx_buf16, spi->base + STM32H7_SPI_TXDR);
558*4882a593Smuzhiyun spi->tx_len -= sizeof(u16);
559*4882a593Smuzhiyun } else {
560*4882a593Smuzhiyun const u8 *tx_buf8 = (const u8 *)(spi->tx_buf + offs);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun writeb_relaxed(*tx_buf8, spi->base + STM32H7_SPI_TXDR);
563*4882a593Smuzhiyun spi->tx_len -= sizeof(u8);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->tx_len);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /**
571*4882a593Smuzhiyun * stm32f4_spi_read_rx - Read bytes from Receive Data Register
572*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
573*4882a593Smuzhiyun *
574*4882a593Smuzhiyun * Write in rx_buf depends on remaining bytes to avoid to write beyond
575*4882a593Smuzhiyun * rx_buf end.
576*4882a593Smuzhiyun */
stm32f4_spi_read_rx(struct stm32_spi * spi)577*4882a593Smuzhiyun static void stm32f4_spi_read_rx(struct stm32_spi *spi)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun if ((spi->rx_len > 0) && (readl_relaxed(spi->base + STM32F4_SPI_SR) &
580*4882a593Smuzhiyun STM32F4_SPI_SR_RXNE)) {
581*4882a593Smuzhiyun u32 offs = spi->cur_xferlen - spi->rx_len;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (spi->cur_bpw == 16) {
584*4882a593Smuzhiyun u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun *rx_buf16 = readw_relaxed(spi->base + STM32F4_SPI_DR);
587*4882a593Smuzhiyun spi->rx_len -= sizeof(u16);
588*4882a593Smuzhiyun } else {
589*4882a593Smuzhiyun u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun *rx_buf8 = readb_relaxed(spi->base + STM32F4_SPI_DR);
592*4882a593Smuzhiyun spi->rx_len -= sizeof(u8);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun dev_dbg(spi->dev, "%s: %d bytes left\n", __func__, spi->rx_len);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun /**
600*4882a593Smuzhiyun * stm32h7_spi_read_rxfifo - Read bytes in Receive Data Register
601*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
602*4882a593Smuzhiyun * @flush: boolean indicating that FIFO should be flushed
603*4882a593Smuzhiyun *
604*4882a593Smuzhiyun * Write in rx_buf depends on remaining bytes to avoid to write beyond
605*4882a593Smuzhiyun * rx_buf end.
606*4882a593Smuzhiyun */
stm32h7_spi_read_rxfifo(struct stm32_spi * spi,bool flush)607*4882a593Smuzhiyun static void stm32h7_spi_read_rxfifo(struct stm32_spi *spi, bool flush)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun u32 sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
610*4882a593Smuzhiyun u32 rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
611*4882a593Smuzhiyun STM32H7_SPI_SR_RXPLVL_SHIFT;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun while ((spi->rx_len > 0) &&
614*4882a593Smuzhiyun ((sr & STM32H7_SPI_SR_RXP) ||
615*4882a593Smuzhiyun (flush && ((sr & STM32H7_SPI_SR_RXWNE) || (rxplvl > 0))))) {
616*4882a593Smuzhiyun u32 offs = spi->cur_xferlen - spi->rx_len;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun if ((spi->rx_len >= sizeof(u32)) ||
619*4882a593Smuzhiyun (flush && (sr & STM32H7_SPI_SR_RXWNE))) {
620*4882a593Smuzhiyun u32 *rx_buf32 = (u32 *)(spi->rx_buf + offs);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun *rx_buf32 = readl_relaxed(spi->base + STM32H7_SPI_RXDR);
623*4882a593Smuzhiyun spi->rx_len -= sizeof(u32);
624*4882a593Smuzhiyun } else if ((spi->rx_len >= sizeof(u16)) ||
625*4882a593Smuzhiyun (flush && (rxplvl >= 2 || spi->cur_bpw > 8))) {
626*4882a593Smuzhiyun u16 *rx_buf16 = (u16 *)(spi->rx_buf + offs);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun *rx_buf16 = readw_relaxed(spi->base + STM32H7_SPI_RXDR);
629*4882a593Smuzhiyun spi->rx_len -= sizeof(u16);
630*4882a593Smuzhiyun } else {
631*4882a593Smuzhiyun u8 *rx_buf8 = (u8 *)(spi->rx_buf + offs);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun *rx_buf8 = readb_relaxed(spi->base + STM32H7_SPI_RXDR);
634*4882a593Smuzhiyun spi->rx_len -= sizeof(u8);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
638*4882a593Smuzhiyun rxplvl = (sr & STM32H7_SPI_SR_RXPLVL) >>
639*4882a593Smuzhiyun STM32H7_SPI_SR_RXPLVL_SHIFT;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun dev_dbg(spi->dev, "%s%s: %d bytes left\n", __func__,
643*4882a593Smuzhiyun flush ? "(flush)" : "", spi->rx_len);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /**
647*4882a593Smuzhiyun * stm32_spi_enable - Enable SPI controller
648*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
649*4882a593Smuzhiyun */
stm32_spi_enable(struct stm32_spi * spi)650*4882a593Smuzhiyun static void stm32_spi_enable(struct stm32_spi *spi)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun dev_dbg(spi->dev, "enable controller\n");
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun stm32_spi_set_bits(spi, spi->cfg->regs->en.reg,
655*4882a593Smuzhiyun spi->cfg->regs->en.mask);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun /**
659*4882a593Smuzhiyun * stm32f4_spi_disable - Disable SPI controller
660*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
661*4882a593Smuzhiyun */
stm32f4_spi_disable(struct stm32_spi * spi)662*4882a593Smuzhiyun static void stm32f4_spi_disable(struct stm32_spi *spi)
663*4882a593Smuzhiyun {
664*4882a593Smuzhiyun unsigned long flags;
665*4882a593Smuzhiyun u32 sr;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun dev_dbg(spi->dev, "disable controller\n");
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun if (!(readl_relaxed(spi->base + STM32F4_SPI_CR1) &
672*4882a593Smuzhiyun STM32F4_SPI_CR1_SPE)) {
673*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
674*4882a593Smuzhiyun return;
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* Disable interrupts */
678*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXEIE |
679*4882a593Smuzhiyun STM32F4_SPI_CR2_RXNEIE |
680*4882a593Smuzhiyun STM32F4_SPI_CR2_ERRIE);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Wait until BSY = 0 */
683*4882a593Smuzhiyun if (readl_relaxed_poll_timeout_atomic(spi->base + STM32F4_SPI_SR,
684*4882a593Smuzhiyun sr, !(sr & STM32F4_SPI_SR_BSY),
685*4882a593Smuzhiyun 10, 100000) < 0) {
686*4882a593Smuzhiyun dev_warn(spi->dev, "disabling condition timeout\n");
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (spi->cur_usedma && spi->dma_tx)
690*4882a593Smuzhiyun dmaengine_terminate_all(spi->dma_tx);
691*4882a593Smuzhiyun if (spi->cur_usedma && spi->dma_rx)
692*4882a593Smuzhiyun dmaengine_terminate_all(spi->dma_rx);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SPE);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_TXDMAEN |
697*4882a593Smuzhiyun STM32F4_SPI_CR2_RXDMAEN);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Sequence to clear OVR flag */
700*4882a593Smuzhiyun readl_relaxed(spi->base + STM32F4_SPI_DR);
701*4882a593Smuzhiyun readl_relaxed(spi->base + STM32F4_SPI_SR);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun * stm32h7_spi_disable - Disable SPI controller
708*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
709*4882a593Smuzhiyun *
710*4882a593Smuzhiyun * RX-Fifo is flushed when SPI controller is disabled. To prevent any data
711*4882a593Smuzhiyun * loss, use stm32h7_spi_read_rxfifo(flush) to read the remaining bytes in
712*4882a593Smuzhiyun * RX-Fifo.
713*4882a593Smuzhiyun * Normally, if TSIZE has been configured, we should relax the hardware at the
714*4882a593Smuzhiyun * reception of the EOT interrupt. But in case of error, EOT will not be
715*4882a593Smuzhiyun * raised. So the subsystem unprepare_message call allows us to properly
716*4882a593Smuzhiyun * complete the transfer from an hardware point of view.
717*4882a593Smuzhiyun */
stm32h7_spi_disable(struct stm32_spi * spi)718*4882a593Smuzhiyun static void stm32h7_spi_disable(struct stm32_spi *spi)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun unsigned long flags;
721*4882a593Smuzhiyun u32 cr1, sr;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun dev_dbg(spi->dev, "disable controller\n");
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun cr1 = readl_relaxed(spi->base + STM32H7_SPI_CR1);
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (!(cr1 & STM32H7_SPI_CR1_SPE)) {
730*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
731*4882a593Smuzhiyun return;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* Wait on EOT or suspend the flow */
735*4882a593Smuzhiyun if (readl_relaxed_poll_timeout_atomic(spi->base + STM32H7_SPI_SR,
736*4882a593Smuzhiyun sr, !(sr & STM32H7_SPI_SR_EOT),
737*4882a593Smuzhiyun 10, 100000) < 0) {
738*4882a593Smuzhiyun if (cr1 & STM32H7_SPI_CR1_CSTART) {
739*4882a593Smuzhiyun writel_relaxed(cr1 | STM32H7_SPI_CR1_CSUSP,
740*4882a593Smuzhiyun spi->base + STM32H7_SPI_CR1);
741*4882a593Smuzhiyun if (readl_relaxed_poll_timeout_atomic(
742*4882a593Smuzhiyun spi->base + STM32H7_SPI_SR,
743*4882a593Smuzhiyun sr, !(sr & STM32H7_SPI_SR_SUSP),
744*4882a593Smuzhiyun 10, 100000) < 0)
745*4882a593Smuzhiyun dev_warn(spi->dev,
746*4882a593Smuzhiyun "Suspend request timeout\n");
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun if (!spi->cur_usedma && spi->rx_buf && (spi->rx_len > 0))
751*4882a593Smuzhiyun stm32h7_spi_read_rxfifo(spi, true);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun if (spi->cur_usedma && spi->dma_tx)
754*4882a593Smuzhiyun dmaengine_terminate_all(spi->dma_tx);
755*4882a593Smuzhiyun if (spi->cur_usedma && spi->dma_rx)
756*4882a593Smuzhiyun dmaengine_terminate_all(spi->dma_rx);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SPE);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32H7_SPI_CFG1, STM32H7_SPI_CFG1_TXDMAEN |
761*4882a593Smuzhiyun STM32H7_SPI_CFG1_RXDMAEN);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* Disable interrupts and clear status flags */
764*4882a593Smuzhiyun writel_relaxed(0, spi->base + STM32H7_SPI_IER);
765*4882a593Smuzhiyun writel_relaxed(STM32H7_SPI_IFCR_ALL, spi->base + STM32H7_SPI_IFCR);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun /**
771*4882a593Smuzhiyun * stm32_spi_can_dma - Determine if the transfer is eligible for DMA use
772*4882a593Smuzhiyun * @master: controller master interface
773*4882a593Smuzhiyun * @spi_dev: pointer to the spi device
774*4882a593Smuzhiyun * @transfer: pointer to spi transfer
775*4882a593Smuzhiyun *
776*4882a593Smuzhiyun * If driver has fifo and the current transfer size is greater than fifo size,
777*4882a593Smuzhiyun * use DMA. Otherwise use DMA for transfer longer than defined DMA min bytes.
778*4882a593Smuzhiyun */
stm32_spi_can_dma(struct spi_master * master,struct spi_device * spi_dev,struct spi_transfer * transfer)779*4882a593Smuzhiyun static bool stm32_spi_can_dma(struct spi_master *master,
780*4882a593Smuzhiyun struct spi_device *spi_dev,
781*4882a593Smuzhiyun struct spi_transfer *transfer)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun unsigned int dma_size;
784*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (spi->cfg->has_fifo)
787*4882a593Smuzhiyun dma_size = spi->fifo_size;
788*4882a593Smuzhiyun else
789*4882a593Smuzhiyun dma_size = SPI_DMA_MIN_BYTES;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun dev_dbg(spi->dev, "%s: %s\n", __func__,
792*4882a593Smuzhiyun (transfer->len > dma_size) ? "true" : "false");
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun return (transfer->len > dma_size);
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /**
798*4882a593Smuzhiyun * stm32f4_spi_irq_event - Interrupt handler for SPI controller events
799*4882a593Smuzhiyun * @irq: interrupt line
800*4882a593Smuzhiyun * @dev_id: SPI controller master interface
801*4882a593Smuzhiyun */
stm32f4_spi_irq_event(int irq,void * dev_id)802*4882a593Smuzhiyun static irqreturn_t stm32f4_spi_irq_event(int irq, void *dev_id)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct spi_master *master = dev_id;
805*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
806*4882a593Smuzhiyun u32 sr, mask = 0;
807*4882a593Smuzhiyun bool end = false;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun spin_lock(&spi->lock);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun sr = readl_relaxed(spi->base + STM32F4_SPI_SR);
812*4882a593Smuzhiyun /*
813*4882a593Smuzhiyun * BSY flag is not handled in interrupt but it is normal behavior when
814*4882a593Smuzhiyun * this flag is set.
815*4882a593Smuzhiyun */
816*4882a593Smuzhiyun sr &= ~STM32F4_SPI_SR_BSY;
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (!spi->cur_usedma && (spi->cur_comm == SPI_SIMPLEX_TX ||
819*4882a593Smuzhiyun spi->cur_comm == SPI_3WIRE_TX)) {
820*4882a593Smuzhiyun /* OVR flag shouldn't be handled for TX only mode */
821*4882a593Smuzhiyun sr &= ~STM32F4_SPI_SR_OVR | STM32F4_SPI_SR_RXNE;
822*4882a593Smuzhiyun mask |= STM32F4_SPI_SR_TXE;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun if (!spi->cur_usedma && (spi->cur_comm == SPI_FULL_DUPLEX ||
826*4882a593Smuzhiyun spi->cur_comm == SPI_SIMPLEX_RX ||
827*4882a593Smuzhiyun spi->cur_comm == SPI_3WIRE_RX)) {
828*4882a593Smuzhiyun /* TXE flag is set and is handled when RXNE flag occurs */
829*4882a593Smuzhiyun sr &= ~STM32F4_SPI_SR_TXE;
830*4882a593Smuzhiyun mask |= STM32F4_SPI_SR_RXNE | STM32F4_SPI_SR_OVR;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun if (!(sr & mask)) {
834*4882a593Smuzhiyun dev_dbg(spi->dev, "spurious IT (sr=0x%08x)\n", sr);
835*4882a593Smuzhiyun spin_unlock(&spi->lock);
836*4882a593Smuzhiyun return IRQ_NONE;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (sr & STM32F4_SPI_SR_OVR) {
840*4882a593Smuzhiyun dev_warn(spi->dev, "Overrun: received value discarded\n");
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun /* Sequence to clear OVR flag */
843*4882a593Smuzhiyun readl_relaxed(spi->base + STM32F4_SPI_DR);
844*4882a593Smuzhiyun readl_relaxed(spi->base + STM32F4_SPI_SR);
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /*
847*4882a593Smuzhiyun * If overrun is detected, it means that something went wrong,
848*4882a593Smuzhiyun * so stop the current transfer. Transfer can wait for next
849*4882a593Smuzhiyun * RXNE but DR is already read and end never happens.
850*4882a593Smuzhiyun */
851*4882a593Smuzhiyun end = true;
852*4882a593Smuzhiyun goto end_irq;
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun if (sr & STM32F4_SPI_SR_TXE) {
856*4882a593Smuzhiyun if (spi->tx_buf)
857*4882a593Smuzhiyun stm32f4_spi_write_tx(spi);
858*4882a593Smuzhiyun if (spi->tx_len == 0)
859*4882a593Smuzhiyun end = true;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun if (sr & STM32F4_SPI_SR_RXNE) {
863*4882a593Smuzhiyun stm32f4_spi_read_rx(spi);
864*4882a593Smuzhiyun if (spi->rx_len == 0)
865*4882a593Smuzhiyun end = true;
866*4882a593Smuzhiyun else if (spi->tx_buf)/* Load data for discontinuous mode */
867*4882a593Smuzhiyun stm32f4_spi_write_tx(spi);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun end_irq:
871*4882a593Smuzhiyun if (end) {
872*4882a593Smuzhiyun /* Immediately disable interrupts to do not generate new one */
873*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR2,
874*4882a593Smuzhiyun STM32F4_SPI_CR2_TXEIE |
875*4882a593Smuzhiyun STM32F4_SPI_CR2_RXNEIE |
876*4882a593Smuzhiyun STM32F4_SPI_CR2_ERRIE);
877*4882a593Smuzhiyun spin_unlock(&spi->lock);
878*4882a593Smuzhiyun return IRQ_WAKE_THREAD;
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun spin_unlock(&spi->lock);
882*4882a593Smuzhiyun return IRQ_HANDLED;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /**
886*4882a593Smuzhiyun * stm32f4_spi_irq_thread - Thread of interrupt handler for SPI controller
887*4882a593Smuzhiyun * @irq: interrupt line
888*4882a593Smuzhiyun * @dev_id: SPI controller master interface
889*4882a593Smuzhiyun */
stm32f4_spi_irq_thread(int irq,void * dev_id)890*4882a593Smuzhiyun static irqreturn_t stm32f4_spi_irq_thread(int irq, void *dev_id)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun struct spi_master *master = dev_id;
893*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun spi_finalize_current_transfer(master);
896*4882a593Smuzhiyun stm32f4_spi_disable(spi);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun return IRQ_HANDLED;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun /**
902*4882a593Smuzhiyun * stm32h7_spi_irq_thread - Thread of interrupt handler for SPI controller
903*4882a593Smuzhiyun * @irq: interrupt line
904*4882a593Smuzhiyun * @dev_id: SPI controller master interface
905*4882a593Smuzhiyun */
stm32h7_spi_irq_thread(int irq,void * dev_id)906*4882a593Smuzhiyun static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun struct spi_master *master = dev_id;
909*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
910*4882a593Smuzhiyun u32 sr, ier, mask;
911*4882a593Smuzhiyun unsigned long flags;
912*4882a593Smuzhiyun bool end = false;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
917*4882a593Smuzhiyun ier = readl_relaxed(spi->base + STM32H7_SPI_IER);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun mask = ier;
920*4882a593Smuzhiyun /*
921*4882a593Smuzhiyun * EOTIE enables irq from EOT, SUSP and TXC events. We need to set
922*4882a593Smuzhiyun * SUSP to acknowledge it later. TXC is automatically cleared
923*4882a593Smuzhiyun */
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun mask |= STM32H7_SPI_SR_SUSP;
926*4882a593Smuzhiyun /*
927*4882a593Smuzhiyun * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP
928*4882a593Smuzhiyun * are set. So in case of Full-Duplex, need to poll TXP and RXP event.
929*4882a593Smuzhiyun */
930*4882a593Smuzhiyun if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma)
931*4882a593Smuzhiyun mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (!(sr & mask)) {
934*4882a593Smuzhiyun dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
935*4882a593Smuzhiyun sr, ier);
936*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
937*4882a593Smuzhiyun return IRQ_NONE;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun if (sr & STM32H7_SPI_SR_SUSP) {
941*4882a593Smuzhiyun static DEFINE_RATELIMIT_STATE(rs,
942*4882a593Smuzhiyun DEFAULT_RATELIMIT_INTERVAL * 10,
943*4882a593Smuzhiyun 1);
944*4882a593Smuzhiyun ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE);
945*4882a593Smuzhiyun if (__ratelimit(&rs))
946*4882a593Smuzhiyun dev_dbg_ratelimited(spi->dev, "Communication suspended\n");
947*4882a593Smuzhiyun if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
948*4882a593Smuzhiyun stm32h7_spi_read_rxfifo(spi, false);
949*4882a593Smuzhiyun /*
950*4882a593Smuzhiyun * If communication is suspended while using DMA, it means
951*4882a593Smuzhiyun * that something went wrong, so stop the current transfer
952*4882a593Smuzhiyun */
953*4882a593Smuzhiyun if (spi->cur_usedma)
954*4882a593Smuzhiyun end = true;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (sr & STM32H7_SPI_SR_MODF) {
958*4882a593Smuzhiyun dev_warn(spi->dev, "Mode fault: transfer aborted\n");
959*4882a593Smuzhiyun end = true;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (sr & STM32H7_SPI_SR_OVR) {
963*4882a593Smuzhiyun dev_err(spi->dev, "Overrun: RX data lost\n");
964*4882a593Smuzhiyun end = true;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun if (sr & STM32H7_SPI_SR_EOT) {
968*4882a593Smuzhiyun if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
969*4882a593Smuzhiyun stm32h7_spi_read_rxfifo(spi, true);
970*4882a593Smuzhiyun end = true;
971*4882a593Smuzhiyun }
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun if (sr & STM32H7_SPI_SR_TXP)
974*4882a593Smuzhiyun if (!spi->cur_usedma && (spi->tx_buf && (spi->tx_len > 0)))
975*4882a593Smuzhiyun stm32h7_spi_write_txfifo(spi);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun if (sr & STM32H7_SPI_SR_RXP)
978*4882a593Smuzhiyun if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
979*4882a593Smuzhiyun stm32h7_spi_read_rxfifo(spi, false);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun if (end) {
986*4882a593Smuzhiyun stm32h7_spi_disable(spi);
987*4882a593Smuzhiyun spi_finalize_current_transfer(master);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun return IRQ_HANDLED;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun /**
994*4882a593Smuzhiyun * stm32_spi_prepare_msg - set up the controller to transfer a single message
995*4882a593Smuzhiyun * @master: controller master interface
996*4882a593Smuzhiyun * @msg: pointer to spi message
997*4882a593Smuzhiyun */
stm32_spi_prepare_msg(struct spi_master * master,struct spi_message * msg)998*4882a593Smuzhiyun static int stm32_spi_prepare_msg(struct spi_master *master,
999*4882a593Smuzhiyun struct spi_message *msg)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
1002*4882a593Smuzhiyun struct spi_device *spi_dev = msg->spi;
1003*4882a593Smuzhiyun struct device_node *np = spi_dev->dev.of_node;
1004*4882a593Smuzhiyun unsigned long flags;
1005*4882a593Smuzhiyun u32 clrb = 0, setb = 0;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun /* SPI slave device may need time between data frames */
1008*4882a593Smuzhiyun spi->cur_midi = 0;
1009*4882a593Smuzhiyun if (np && !of_property_read_u32(np, "st,spi-midi-ns", &spi->cur_midi))
1010*4882a593Smuzhiyun dev_dbg(spi->dev, "%dns inter-data idleness\n", spi->cur_midi);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun if (spi_dev->mode & SPI_CPOL)
1013*4882a593Smuzhiyun setb |= spi->cfg->regs->cpol.mask;
1014*4882a593Smuzhiyun else
1015*4882a593Smuzhiyun clrb |= spi->cfg->regs->cpol.mask;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun if (spi_dev->mode & SPI_CPHA)
1018*4882a593Smuzhiyun setb |= spi->cfg->regs->cpha.mask;
1019*4882a593Smuzhiyun else
1020*4882a593Smuzhiyun clrb |= spi->cfg->regs->cpha.mask;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (spi_dev->mode & SPI_LSB_FIRST)
1023*4882a593Smuzhiyun setb |= spi->cfg->regs->lsb_first.mask;
1024*4882a593Smuzhiyun else
1025*4882a593Smuzhiyun clrb |= spi->cfg->regs->lsb_first.mask;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun dev_dbg(spi->dev, "cpol=%d cpha=%d lsb_first=%d cs_high=%d\n",
1028*4882a593Smuzhiyun spi_dev->mode & SPI_CPOL,
1029*4882a593Smuzhiyun spi_dev->mode & SPI_CPHA,
1030*4882a593Smuzhiyun spi_dev->mode & SPI_LSB_FIRST,
1031*4882a593Smuzhiyun spi_dev->mode & SPI_CS_HIGH);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /* CPOL, CPHA and LSB FIRST bits have common register */
1036*4882a593Smuzhiyun if (clrb || setb)
1037*4882a593Smuzhiyun writel_relaxed(
1038*4882a593Smuzhiyun (readl_relaxed(spi->base + spi->cfg->regs->cpol.reg) &
1039*4882a593Smuzhiyun ~clrb) | setb,
1040*4882a593Smuzhiyun spi->base + spi->cfg->regs->cpol.reg);
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun return 0;
1045*4882a593Smuzhiyun }
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun /**
1048*4882a593Smuzhiyun * stm32f4_spi_dma_tx_cb - dma callback
1049*4882a593Smuzhiyun * @data: pointer to the spi controller data structure
1050*4882a593Smuzhiyun *
1051*4882a593Smuzhiyun * DMA callback is called when the transfer is complete for DMA TX channel.
1052*4882a593Smuzhiyun */
stm32f4_spi_dma_tx_cb(void * data)1053*4882a593Smuzhiyun static void stm32f4_spi_dma_tx_cb(void *data)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun struct stm32_spi *spi = data;
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
1058*4882a593Smuzhiyun spi_finalize_current_transfer(spi->master);
1059*4882a593Smuzhiyun stm32f4_spi_disable(spi);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun }
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun /**
1064*4882a593Smuzhiyun * stm32f4_spi_dma_rx_cb - dma callback
1065*4882a593Smuzhiyun * @data: pointer to the spi controller data structure
1066*4882a593Smuzhiyun *
1067*4882a593Smuzhiyun * DMA callback is called when the transfer is complete for DMA RX channel.
1068*4882a593Smuzhiyun */
stm32f4_spi_dma_rx_cb(void * data)1069*4882a593Smuzhiyun static void stm32f4_spi_dma_rx_cb(void *data)
1070*4882a593Smuzhiyun {
1071*4882a593Smuzhiyun struct stm32_spi *spi = data;
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun spi_finalize_current_transfer(spi->master);
1074*4882a593Smuzhiyun stm32f4_spi_disable(spi);
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /**
1078*4882a593Smuzhiyun * stm32h7_spi_dma_cb - dma callback
1079*4882a593Smuzhiyun * @data: pointer to the spi controller data structure
1080*4882a593Smuzhiyun *
1081*4882a593Smuzhiyun * DMA callback is called when the transfer is complete or when an error
1082*4882a593Smuzhiyun * occurs. If the transfer is complete, EOT flag is raised.
1083*4882a593Smuzhiyun */
stm32h7_spi_dma_cb(void * data)1084*4882a593Smuzhiyun static void stm32h7_spi_dma_cb(void *data)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun struct stm32_spi *spi = data;
1087*4882a593Smuzhiyun unsigned long flags;
1088*4882a593Smuzhiyun u32 sr;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun sr = readl_relaxed(spi->base + STM32H7_SPI_SR);
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun if (!(sr & STM32H7_SPI_SR_EOT))
1097*4882a593Smuzhiyun dev_warn(spi->dev, "DMA error (sr=0x%08x)\n", sr);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun /* Now wait for EOT, or SUSP or OVR in case of error */
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /**
1103*4882a593Smuzhiyun * stm32_spi_dma_config - configure dma slave channel depending on current
1104*4882a593Smuzhiyun * transfer bits_per_word.
1105*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1106*4882a593Smuzhiyun * @dma_conf: pointer to the dma_slave_config structure
1107*4882a593Smuzhiyun * @dir: direction of the dma transfer
1108*4882a593Smuzhiyun */
stm32_spi_dma_config(struct stm32_spi * spi,struct dma_slave_config * dma_conf,enum dma_transfer_direction dir)1109*4882a593Smuzhiyun static void stm32_spi_dma_config(struct stm32_spi *spi,
1110*4882a593Smuzhiyun struct dma_slave_config *dma_conf,
1111*4882a593Smuzhiyun enum dma_transfer_direction dir)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun enum dma_slave_buswidth buswidth;
1114*4882a593Smuzhiyun u32 maxburst;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun if (spi->cur_bpw <= 8)
1117*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
1118*4882a593Smuzhiyun else if (spi->cur_bpw <= 16)
1119*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
1120*4882a593Smuzhiyun else
1121*4882a593Smuzhiyun buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun if (spi->cfg->has_fifo) {
1124*4882a593Smuzhiyun /* Valid for DMA Half or Full Fifo threshold */
1125*4882a593Smuzhiyun if (spi->cur_fthlv == 2)
1126*4882a593Smuzhiyun maxburst = 1;
1127*4882a593Smuzhiyun else
1128*4882a593Smuzhiyun maxburst = spi->cur_fthlv;
1129*4882a593Smuzhiyun } else {
1130*4882a593Smuzhiyun maxburst = 1;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun memset(dma_conf, 0, sizeof(struct dma_slave_config));
1134*4882a593Smuzhiyun dma_conf->direction = dir;
1135*4882a593Smuzhiyun if (dma_conf->direction == DMA_DEV_TO_MEM) { /* RX */
1136*4882a593Smuzhiyun dma_conf->src_addr = spi->phys_addr + spi->cfg->regs->rx.reg;
1137*4882a593Smuzhiyun dma_conf->src_addr_width = buswidth;
1138*4882a593Smuzhiyun dma_conf->src_maxburst = maxburst;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun dev_dbg(spi->dev, "Rx DMA config buswidth=%d, maxburst=%d\n",
1141*4882a593Smuzhiyun buswidth, maxburst);
1142*4882a593Smuzhiyun } else if (dma_conf->direction == DMA_MEM_TO_DEV) { /* TX */
1143*4882a593Smuzhiyun dma_conf->dst_addr = spi->phys_addr + spi->cfg->regs->tx.reg;
1144*4882a593Smuzhiyun dma_conf->dst_addr_width = buswidth;
1145*4882a593Smuzhiyun dma_conf->dst_maxburst = maxburst;
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun dev_dbg(spi->dev, "Tx DMA config buswidth=%d, maxburst=%d\n",
1148*4882a593Smuzhiyun buswidth, maxburst);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /**
1153*4882a593Smuzhiyun * stm32f4_spi_transfer_one_irq - transfer a single spi_transfer using
1154*4882a593Smuzhiyun * interrupts
1155*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1156*4882a593Smuzhiyun *
1157*4882a593Smuzhiyun * It must returns 0 if the transfer is finished or 1 if the transfer is still
1158*4882a593Smuzhiyun * in progress.
1159*4882a593Smuzhiyun */
stm32f4_spi_transfer_one_irq(struct stm32_spi * spi)1160*4882a593Smuzhiyun static int stm32f4_spi_transfer_one_irq(struct stm32_spi *spi)
1161*4882a593Smuzhiyun {
1162*4882a593Smuzhiyun unsigned long flags;
1163*4882a593Smuzhiyun u32 cr2 = 0;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun /* Enable the interrupts relative to the current communication mode */
1166*4882a593Smuzhiyun if (spi->cur_comm == SPI_SIMPLEX_TX || spi->cur_comm == SPI_3WIRE_TX) {
1167*4882a593Smuzhiyun cr2 |= STM32F4_SPI_CR2_TXEIE;
1168*4882a593Smuzhiyun } else if (spi->cur_comm == SPI_FULL_DUPLEX ||
1169*4882a593Smuzhiyun spi->cur_comm == SPI_SIMPLEX_RX ||
1170*4882a593Smuzhiyun spi->cur_comm == SPI_3WIRE_RX) {
1171*4882a593Smuzhiyun /* In transmit-only mode, the OVR flag is set in the SR register
1172*4882a593Smuzhiyun * since the received data are never read. Therefore set OVR
1173*4882a593Smuzhiyun * interrupt only when rx buffer is available.
1174*4882a593Smuzhiyun */
1175*4882a593Smuzhiyun cr2 |= STM32F4_SPI_CR2_RXNEIE | STM32F4_SPI_CR2_ERRIE;
1176*4882a593Smuzhiyun } else {
1177*4882a593Smuzhiyun return -EINVAL;
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32F4_SPI_CR2, cr2);
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun stm32_spi_enable(spi);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /* starting data transfer when buffer is loaded */
1187*4882a593Smuzhiyun if (spi->tx_buf)
1188*4882a593Smuzhiyun stm32f4_spi_write_tx(spi);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun return 1;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /**
1196*4882a593Smuzhiyun * stm32h7_spi_transfer_one_irq - transfer a single spi_transfer using
1197*4882a593Smuzhiyun * interrupts
1198*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1199*4882a593Smuzhiyun *
1200*4882a593Smuzhiyun * It must returns 0 if the transfer is finished or 1 if the transfer is still
1201*4882a593Smuzhiyun * in progress.
1202*4882a593Smuzhiyun */
stm32h7_spi_transfer_one_irq(struct stm32_spi * spi)1203*4882a593Smuzhiyun static int stm32h7_spi_transfer_one_irq(struct stm32_spi *spi)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun unsigned long flags;
1206*4882a593Smuzhiyun u32 ier = 0;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun /* Enable the interrupts relative to the current communication mode */
1209*4882a593Smuzhiyun if (spi->tx_buf && spi->rx_buf) /* Full Duplex */
1210*4882a593Smuzhiyun ier |= STM32H7_SPI_IER_DXPIE;
1211*4882a593Smuzhiyun else if (spi->tx_buf) /* Half-Duplex TX dir or Simplex TX */
1212*4882a593Smuzhiyun ier |= STM32H7_SPI_IER_TXPIE;
1213*4882a593Smuzhiyun else if (spi->rx_buf) /* Half-Duplex RX dir or Simplex RX */
1214*4882a593Smuzhiyun ier |= STM32H7_SPI_IER_RXPIE;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* Enable the interrupts relative to the end of transfer */
1217*4882a593Smuzhiyun ier |= STM32H7_SPI_IER_EOTIE | STM32H7_SPI_IER_TXTFIE |
1218*4882a593Smuzhiyun STM32H7_SPI_IER_OVRIE | STM32H7_SPI_IER_MODFIE;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun stm32_spi_enable(spi);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun /* Be sure to have data in fifo before starting data transfer */
1225*4882a593Smuzhiyun if (spi->tx_buf)
1226*4882a593Smuzhiyun stm32h7_spi_write_txfifo(spi);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun writel_relaxed(ier, spi->base + STM32H7_SPI_IER);
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun return 1;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /**
1238*4882a593Smuzhiyun * stm32f4_spi_transfer_one_dma_start - Set SPI driver registers to start
1239*4882a593Smuzhiyun * transfer using DMA
1240*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1241*4882a593Smuzhiyun */
stm32f4_spi_transfer_one_dma_start(struct stm32_spi * spi)1242*4882a593Smuzhiyun static void stm32f4_spi_transfer_one_dma_start(struct stm32_spi *spi)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun /* In DMA mode end of transfer is handled by DMA TX or RX callback. */
1245*4882a593Smuzhiyun if (spi->cur_comm == SPI_SIMPLEX_RX || spi->cur_comm == SPI_3WIRE_RX ||
1246*4882a593Smuzhiyun spi->cur_comm == SPI_FULL_DUPLEX) {
1247*4882a593Smuzhiyun /*
1248*4882a593Smuzhiyun * In transmit-only mode, the OVR flag is set in the SR register
1249*4882a593Smuzhiyun * since the received data are never read. Therefore set OVR
1250*4882a593Smuzhiyun * interrupt only when rx buffer is available.
1251*4882a593Smuzhiyun */
1252*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32F4_SPI_CR2, STM32F4_SPI_CR2_ERRIE);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun stm32_spi_enable(spi);
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun /**
1259*4882a593Smuzhiyun * stm32h7_spi_transfer_one_dma_start - Set SPI driver registers to start
1260*4882a593Smuzhiyun * transfer using DMA
1261*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1262*4882a593Smuzhiyun */
stm32h7_spi_transfer_one_dma_start(struct stm32_spi * spi)1263*4882a593Smuzhiyun static void stm32h7_spi_transfer_one_dma_start(struct stm32_spi *spi)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun /* Enable the interrupts relative to the end of transfer */
1266*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_IER, STM32H7_SPI_IER_EOTIE |
1267*4882a593Smuzhiyun STM32H7_SPI_IER_TXTFIE |
1268*4882a593Smuzhiyun STM32H7_SPI_IER_OVRIE |
1269*4882a593Smuzhiyun STM32H7_SPI_IER_MODFIE);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun stm32_spi_enable(spi);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_CSTART);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun /**
1277*4882a593Smuzhiyun * stm32_spi_transfer_one_dma - transfer a single spi_transfer using DMA
1278*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1279*4882a593Smuzhiyun * @xfer: pointer to the spi_transfer structure
1280*4882a593Smuzhiyun *
1281*4882a593Smuzhiyun * It must returns 0 if the transfer is finished or 1 if the transfer is still
1282*4882a593Smuzhiyun * in progress.
1283*4882a593Smuzhiyun */
stm32_spi_transfer_one_dma(struct stm32_spi * spi,struct spi_transfer * xfer)1284*4882a593Smuzhiyun static int stm32_spi_transfer_one_dma(struct stm32_spi *spi,
1285*4882a593Smuzhiyun struct spi_transfer *xfer)
1286*4882a593Smuzhiyun {
1287*4882a593Smuzhiyun struct dma_slave_config tx_dma_conf, rx_dma_conf;
1288*4882a593Smuzhiyun struct dma_async_tx_descriptor *tx_dma_desc, *rx_dma_desc;
1289*4882a593Smuzhiyun unsigned long flags;
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun rx_dma_desc = NULL;
1294*4882a593Smuzhiyun if (spi->rx_buf && spi->dma_rx) {
1295*4882a593Smuzhiyun stm32_spi_dma_config(spi, &rx_dma_conf, DMA_DEV_TO_MEM);
1296*4882a593Smuzhiyun dmaengine_slave_config(spi->dma_rx, &rx_dma_conf);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun /* Enable Rx DMA request */
1299*4882a593Smuzhiyun stm32_spi_set_bits(spi, spi->cfg->regs->dma_rx_en.reg,
1300*4882a593Smuzhiyun spi->cfg->regs->dma_rx_en.mask);
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun rx_dma_desc = dmaengine_prep_slave_sg(
1303*4882a593Smuzhiyun spi->dma_rx, xfer->rx_sg.sgl,
1304*4882a593Smuzhiyun xfer->rx_sg.nents,
1305*4882a593Smuzhiyun rx_dma_conf.direction,
1306*4882a593Smuzhiyun DMA_PREP_INTERRUPT);
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun tx_dma_desc = NULL;
1310*4882a593Smuzhiyun if (spi->tx_buf && spi->dma_tx) {
1311*4882a593Smuzhiyun stm32_spi_dma_config(spi, &tx_dma_conf, DMA_MEM_TO_DEV);
1312*4882a593Smuzhiyun dmaengine_slave_config(spi->dma_tx, &tx_dma_conf);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun tx_dma_desc = dmaengine_prep_slave_sg(
1315*4882a593Smuzhiyun spi->dma_tx, xfer->tx_sg.sgl,
1316*4882a593Smuzhiyun xfer->tx_sg.nents,
1317*4882a593Smuzhiyun tx_dma_conf.direction,
1318*4882a593Smuzhiyun DMA_PREP_INTERRUPT);
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if ((spi->tx_buf && spi->dma_tx && !tx_dma_desc) ||
1322*4882a593Smuzhiyun (spi->rx_buf && spi->dma_rx && !rx_dma_desc))
1323*4882a593Smuzhiyun goto dma_desc_error;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun if (spi->cur_comm == SPI_FULL_DUPLEX && (!tx_dma_desc || !rx_dma_desc))
1326*4882a593Smuzhiyun goto dma_desc_error;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun if (rx_dma_desc) {
1329*4882a593Smuzhiyun rx_dma_desc->callback = spi->cfg->dma_rx_cb;
1330*4882a593Smuzhiyun rx_dma_desc->callback_param = spi;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun if (dma_submit_error(dmaengine_submit(rx_dma_desc))) {
1333*4882a593Smuzhiyun dev_err(spi->dev, "Rx DMA submit failed\n");
1334*4882a593Smuzhiyun goto dma_desc_error;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun /* Enable Rx DMA channel */
1337*4882a593Smuzhiyun dma_async_issue_pending(spi->dma_rx);
1338*4882a593Smuzhiyun }
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun if (tx_dma_desc) {
1341*4882a593Smuzhiyun if (spi->cur_comm == SPI_SIMPLEX_TX ||
1342*4882a593Smuzhiyun spi->cur_comm == SPI_3WIRE_TX) {
1343*4882a593Smuzhiyun tx_dma_desc->callback = spi->cfg->dma_tx_cb;
1344*4882a593Smuzhiyun tx_dma_desc->callback_param = spi;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun if (dma_submit_error(dmaengine_submit(tx_dma_desc))) {
1348*4882a593Smuzhiyun dev_err(spi->dev, "Tx DMA submit failed\n");
1349*4882a593Smuzhiyun goto dma_submit_error;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun /* Enable Tx DMA channel */
1352*4882a593Smuzhiyun dma_async_issue_pending(spi->dma_tx);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* Enable Tx DMA request */
1355*4882a593Smuzhiyun stm32_spi_set_bits(spi, spi->cfg->regs->dma_tx_en.reg,
1356*4882a593Smuzhiyun spi->cfg->regs->dma_tx_en.mask);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun spi->cfg->transfer_one_dma_start(spi);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun return 1;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun dma_submit_error:
1366*4882a593Smuzhiyun if (spi->dma_rx)
1367*4882a593Smuzhiyun dmaengine_terminate_all(spi->dma_rx);
1368*4882a593Smuzhiyun
1369*4882a593Smuzhiyun dma_desc_error:
1370*4882a593Smuzhiyun stm32_spi_clr_bits(spi, spi->cfg->regs->dma_rx_en.reg,
1371*4882a593Smuzhiyun spi->cfg->regs->dma_rx_en.mask);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun dev_info(spi->dev, "DMA issue: fall back to irq transfer\n");
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun spi->cur_usedma = false;
1378*4882a593Smuzhiyun return spi->cfg->transfer_one_irq(spi);
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun /**
1382*4882a593Smuzhiyun * stm32f4_spi_set_bpw - Configure bits per word
1383*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1384*4882a593Smuzhiyun */
stm32f4_spi_set_bpw(struct stm32_spi * spi)1385*4882a593Smuzhiyun static void stm32f4_spi_set_bpw(struct stm32_spi *spi)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun if (spi->cur_bpw == 16)
1388*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
1389*4882a593Smuzhiyun else
1390*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_DFF);
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun /**
1394*4882a593Smuzhiyun * stm32h7_spi_set_bpw - configure bits per word
1395*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1396*4882a593Smuzhiyun */
stm32h7_spi_set_bpw(struct stm32_spi * spi)1397*4882a593Smuzhiyun static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun u32 bpw, fthlv;
1400*4882a593Smuzhiyun u32 cfg1_clrb = 0, cfg1_setb = 0;
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun bpw = spi->cur_bpw - 1;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun cfg1_clrb |= STM32H7_SPI_CFG1_DSIZE;
1405*4882a593Smuzhiyun cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
1406*4882a593Smuzhiyun STM32H7_SPI_CFG1_DSIZE;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
1409*4882a593Smuzhiyun fthlv = spi->cur_fthlv - 1;
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
1412*4882a593Smuzhiyun cfg1_setb |= (fthlv << STM32H7_SPI_CFG1_FTHLV_SHIFT) &
1413*4882a593Smuzhiyun STM32H7_SPI_CFG1_FTHLV;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun writel_relaxed(
1416*4882a593Smuzhiyun (readl_relaxed(spi->base + STM32H7_SPI_CFG1) &
1417*4882a593Smuzhiyun ~cfg1_clrb) | cfg1_setb,
1418*4882a593Smuzhiyun spi->base + STM32H7_SPI_CFG1);
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun /**
1422*4882a593Smuzhiyun * stm32_spi_set_mbr - Configure baud rate divisor in master mode
1423*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1424*4882a593Smuzhiyun * @mbrdiv: baud rate divisor value
1425*4882a593Smuzhiyun */
stm32_spi_set_mbr(struct stm32_spi * spi,u32 mbrdiv)1426*4882a593Smuzhiyun static void stm32_spi_set_mbr(struct stm32_spi *spi, u32 mbrdiv)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun u32 clrb = 0, setb = 0;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun clrb |= spi->cfg->regs->br.mask;
1431*4882a593Smuzhiyun setb |= ((u32)mbrdiv << spi->cfg->regs->br.shift) &
1432*4882a593Smuzhiyun spi->cfg->regs->br.mask;
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun writel_relaxed((readl_relaxed(spi->base + spi->cfg->regs->br.reg) &
1435*4882a593Smuzhiyun ~clrb) | setb,
1436*4882a593Smuzhiyun spi->base + spi->cfg->regs->br.reg);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun
1439*4882a593Smuzhiyun /**
1440*4882a593Smuzhiyun * stm32_spi_communication_type - return transfer communication type
1441*4882a593Smuzhiyun * @spi_dev: pointer to the spi device
1442*4882a593Smuzhiyun * @transfer: pointer to spi transfer
1443*4882a593Smuzhiyun */
stm32_spi_communication_type(struct spi_device * spi_dev,struct spi_transfer * transfer)1444*4882a593Smuzhiyun static unsigned int stm32_spi_communication_type(struct spi_device *spi_dev,
1445*4882a593Smuzhiyun struct spi_transfer *transfer)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun unsigned int type = SPI_FULL_DUPLEX;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun if (spi_dev->mode & SPI_3WIRE) { /* MISO/MOSI signals shared */
1450*4882a593Smuzhiyun /*
1451*4882a593Smuzhiyun * SPI_3WIRE and xfer->tx_buf != NULL and xfer->rx_buf != NULL
1452*4882a593Smuzhiyun * is forbidden and unvalidated by SPI subsystem so depending
1453*4882a593Smuzhiyun * on the valid buffer, we can determine the direction of the
1454*4882a593Smuzhiyun * transfer.
1455*4882a593Smuzhiyun */
1456*4882a593Smuzhiyun if (!transfer->tx_buf)
1457*4882a593Smuzhiyun type = SPI_3WIRE_RX;
1458*4882a593Smuzhiyun else
1459*4882a593Smuzhiyun type = SPI_3WIRE_TX;
1460*4882a593Smuzhiyun } else {
1461*4882a593Smuzhiyun if (!transfer->tx_buf)
1462*4882a593Smuzhiyun type = SPI_SIMPLEX_RX;
1463*4882a593Smuzhiyun else if (!transfer->rx_buf)
1464*4882a593Smuzhiyun type = SPI_SIMPLEX_TX;
1465*4882a593Smuzhiyun }
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun return type;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /**
1471*4882a593Smuzhiyun * stm32f4_spi_set_mode - configure communication mode
1472*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1473*4882a593Smuzhiyun * @comm_type: type of communication to configure
1474*4882a593Smuzhiyun */
stm32f4_spi_set_mode(struct stm32_spi * spi,unsigned int comm_type)1475*4882a593Smuzhiyun static int stm32f4_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun if (comm_type == SPI_3WIRE_TX || comm_type == SPI_SIMPLEX_TX) {
1478*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
1479*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIMODE |
1480*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIOE);
1481*4882a593Smuzhiyun } else if (comm_type == SPI_FULL_DUPLEX ||
1482*4882a593Smuzhiyun comm_type == SPI_SIMPLEX_RX) {
1483*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
1484*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIMODE |
1485*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIOE);
1486*4882a593Smuzhiyun } else if (comm_type == SPI_3WIRE_RX) {
1487*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32F4_SPI_CR1,
1488*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIMODE);
1489*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_CR1,
1490*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIOE);
1491*4882a593Smuzhiyun } else {
1492*4882a593Smuzhiyun return -EINVAL;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun return 0;
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun /**
1499*4882a593Smuzhiyun * stm32h7_spi_set_mode - configure communication mode
1500*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1501*4882a593Smuzhiyun * @comm_type: type of communication to configure
1502*4882a593Smuzhiyun */
stm32h7_spi_set_mode(struct stm32_spi * spi,unsigned int comm_type)1503*4882a593Smuzhiyun static int stm32h7_spi_set_mode(struct stm32_spi *spi, unsigned int comm_type)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun u32 mode;
1506*4882a593Smuzhiyun u32 cfg2_clrb = 0, cfg2_setb = 0;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun if (comm_type == SPI_3WIRE_RX) {
1509*4882a593Smuzhiyun mode = STM32H7_SPI_HALF_DUPLEX;
1510*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
1511*4882a593Smuzhiyun } else if (comm_type == SPI_3WIRE_TX) {
1512*4882a593Smuzhiyun mode = STM32H7_SPI_HALF_DUPLEX;
1513*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_HDDIR);
1514*4882a593Smuzhiyun } else if (comm_type == SPI_SIMPLEX_RX) {
1515*4882a593Smuzhiyun mode = STM32H7_SPI_SIMPLEX_RX;
1516*4882a593Smuzhiyun } else if (comm_type == SPI_SIMPLEX_TX) {
1517*4882a593Smuzhiyun mode = STM32H7_SPI_SIMPLEX_TX;
1518*4882a593Smuzhiyun } else {
1519*4882a593Smuzhiyun mode = STM32H7_SPI_FULL_DUPLEX;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun cfg2_clrb |= STM32H7_SPI_CFG2_COMM;
1523*4882a593Smuzhiyun cfg2_setb |= (mode << STM32H7_SPI_CFG2_COMM_SHIFT) &
1524*4882a593Smuzhiyun STM32H7_SPI_CFG2_COMM;
1525*4882a593Smuzhiyun
1526*4882a593Smuzhiyun writel_relaxed(
1527*4882a593Smuzhiyun (readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
1528*4882a593Smuzhiyun ~cfg2_clrb) | cfg2_setb,
1529*4882a593Smuzhiyun spi->base + STM32H7_SPI_CFG2);
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun return 0;
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun /**
1535*4882a593Smuzhiyun * stm32h7_spi_data_idleness - configure minimum time delay inserted between two
1536*4882a593Smuzhiyun * consecutive data frames in master mode
1537*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1538*4882a593Smuzhiyun * @len: transfer len
1539*4882a593Smuzhiyun */
stm32h7_spi_data_idleness(struct stm32_spi * spi,u32 len)1540*4882a593Smuzhiyun static void stm32h7_spi_data_idleness(struct stm32_spi *spi, u32 len)
1541*4882a593Smuzhiyun {
1542*4882a593Smuzhiyun u32 cfg2_clrb = 0, cfg2_setb = 0;
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun cfg2_clrb |= STM32H7_SPI_CFG2_MIDI;
1545*4882a593Smuzhiyun if ((len > 1) && (spi->cur_midi > 0)) {
1546*4882a593Smuzhiyun u32 sck_period_ns = DIV_ROUND_UP(SPI_1HZ_NS, spi->cur_speed);
1547*4882a593Smuzhiyun u32 midi = min((u32)DIV_ROUND_UP(spi->cur_midi, sck_period_ns),
1548*4882a593Smuzhiyun (u32)STM32H7_SPI_CFG2_MIDI >>
1549*4882a593Smuzhiyun STM32H7_SPI_CFG2_MIDI_SHIFT);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun dev_dbg(spi->dev, "period=%dns, midi=%d(=%dns)\n",
1552*4882a593Smuzhiyun sck_period_ns, midi, midi * sck_period_ns);
1553*4882a593Smuzhiyun cfg2_setb |= (midi << STM32H7_SPI_CFG2_MIDI_SHIFT) &
1554*4882a593Smuzhiyun STM32H7_SPI_CFG2_MIDI;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CFG2) &
1558*4882a593Smuzhiyun ~cfg2_clrb) | cfg2_setb,
1559*4882a593Smuzhiyun spi->base + STM32H7_SPI_CFG2);
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /**
1563*4882a593Smuzhiyun * stm32h7_spi_number_of_data - configure number of data at current transfer
1564*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1565*4882a593Smuzhiyun * @nb_words: transfer length (in words)
1566*4882a593Smuzhiyun */
stm32h7_spi_number_of_data(struct stm32_spi * spi,u32 nb_words)1567*4882a593Smuzhiyun static int stm32h7_spi_number_of_data(struct stm32_spi *spi, u32 nb_words)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun u32 cr2_clrb = 0, cr2_setb = 0;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun if (nb_words <= (STM32H7_SPI_CR2_TSIZE >>
1572*4882a593Smuzhiyun STM32H7_SPI_CR2_TSIZE_SHIFT)) {
1573*4882a593Smuzhiyun cr2_clrb |= STM32H7_SPI_CR2_TSIZE;
1574*4882a593Smuzhiyun cr2_setb = nb_words << STM32H7_SPI_CR2_TSIZE_SHIFT;
1575*4882a593Smuzhiyun writel_relaxed((readl_relaxed(spi->base + STM32H7_SPI_CR2) &
1576*4882a593Smuzhiyun ~cr2_clrb) | cr2_setb,
1577*4882a593Smuzhiyun spi->base + STM32H7_SPI_CR2);
1578*4882a593Smuzhiyun } else {
1579*4882a593Smuzhiyun return -EMSGSIZE;
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun return 0;
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun /**
1586*4882a593Smuzhiyun * stm32_spi_transfer_one_setup - common setup to transfer a single
1587*4882a593Smuzhiyun * spi_transfer either using DMA or
1588*4882a593Smuzhiyun * interrupts.
1589*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1590*4882a593Smuzhiyun * @spi_dev: pointer to the spi device
1591*4882a593Smuzhiyun * @transfer: pointer to spi transfer
1592*4882a593Smuzhiyun */
stm32_spi_transfer_one_setup(struct stm32_spi * spi,struct spi_device * spi_dev,struct spi_transfer * transfer)1593*4882a593Smuzhiyun static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
1594*4882a593Smuzhiyun struct spi_device *spi_dev,
1595*4882a593Smuzhiyun struct spi_transfer *transfer)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun unsigned long flags;
1598*4882a593Smuzhiyun unsigned int comm_type;
1599*4882a593Smuzhiyun int nb_words, ret = 0;
1600*4882a593Smuzhiyun int mbr;
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun spi->cur_xferlen = transfer->len;
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun spi->cur_bpw = transfer->bits_per_word;
1607*4882a593Smuzhiyun spi->cfg->set_bpw(spi);
1608*4882a593Smuzhiyun
1609*4882a593Smuzhiyun /* Update spi->cur_speed with real clock speed */
1610*4882a593Smuzhiyun mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
1611*4882a593Smuzhiyun spi->cfg->baud_rate_div_min,
1612*4882a593Smuzhiyun spi->cfg->baud_rate_div_max);
1613*4882a593Smuzhiyun if (mbr < 0) {
1614*4882a593Smuzhiyun ret = mbr;
1615*4882a593Smuzhiyun goto out;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun transfer->speed_hz = spi->cur_speed;
1619*4882a593Smuzhiyun stm32_spi_set_mbr(spi, mbr);
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun comm_type = stm32_spi_communication_type(spi_dev, transfer);
1622*4882a593Smuzhiyun ret = spi->cfg->set_mode(spi, comm_type);
1623*4882a593Smuzhiyun if (ret < 0)
1624*4882a593Smuzhiyun goto out;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun spi->cur_comm = comm_type;
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun if (spi->cfg->set_data_idleness)
1629*4882a593Smuzhiyun spi->cfg->set_data_idleness(spi, transfer->len);
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun if (spi->cur_bpw <= 8)
1632*4882a593Smuzhiyun nb_words = transfer->len;
1633*4882a593Smuzhiyun else if (spi->cur_bpw <= 16)
1634*4882a593Smuzhiyun nb_words = DIV_ROUND_UP(transfer->len * 8, 16);
1635*4882a593Smuzhiyun else
1636*4882a593Smuzhiyun nb_words = DIV_ROUND_UP(transfer->len * 8, 32);
1637*4882a593Smuzhiyun
1638*4882a593Smuzhiyun if (spi->cfg->set_number_of_data) {
1639*4882a593Smuzhiyun ret = spi->cfg->set_number_of_data(spi, nb_words);
1640*4882a593Smuzhiyun if (ret < 0)
1641*4882a593Smuzhiyun goto out;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun dev_dbg(spi->dev, "transfer communication mode set to %d\n",
1645*4882a593Smuzhiyun spi->cur_comm);
1646*4882a593Smuzhiyun dev_dbg(spi->dev,
1647*4882a593Smuzhiyun "data frame of %d-bit, data packet of %d data frames\n",
1648*4882a593Smuzhiyun spi->cur_bpw, spi->cur_fthlv);
1649*4882a593Smuzhiyun dev_dbg(spi->dev, "speed set to %dHz\n", spi->cur_speed);
1650*4882a593Smuzhiyun dev_dbg(spi->dev, "transfer of %d bytes (%d data frames)\n",
1651*4882a593Smuzhiyun spi->cur_xferlen, nb_words);
1652*4882a593Smuzhiyun dev_dbg(spi->dev, "dma %s\n",
1653*4882a593Smuzhiyun (spi->cur_usedma) ? "enabled" : "disabled");
1654*4882a593Smuzhiyun
1655*4882a593Smuzhiyun out:
1656*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun return ret;
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun /**
1662*4882a593Smuzhiyun * stm32_spi_transfer_one - transfer a single spi_transfer
1663*4882a593Smuzhiyun * @master: controller master interface
1664*4882a593Smuzhiyun * @spi_dev: pointer to the spi device
1665*4882a593Smuzhiyun * @transfer: pointer to spi transfer
1666*4882a593Smuzhiyun *
1667*4882a593Smuzhiyun * It must return 0 if the transfer is finished or 1 if the transfer is still
1668*4882a593Smuzhiyun * in progress.
1669*4882a593Smuzhiyun */
stm32_spi_transfer_one(struct spi_master * master,struct spi_device * spi_dev,struct spi_transfer * transfer)1670*4882a593Smuzhiyun static int stm32_spi_transfer_one(struct spi_master *master,
1671*4882a593Smuzhiyun struct spi_device *spi_dev,
1672*4882a593Smuzhiyun struct spi_transfer *transfer)
1673*4882a593Smuzhiyun {
1674*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
1675*4882a593Smuzhiyun int ret;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun /* Don't do anything on 0 bytes transfers */
1678*4882a593Smuzhiyun if (transfer->len == 0)
1679*4882a593Smuzhiyun return 0;
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun spi->tx_buf = transfer->tx_buf;
1682*4882a593Smuzhiyun spi->rx_buf = transfer->rx_buf;
1683*4882a593Smuzhiyun spi->tx_len = spi->tx_buf ? transfer->len : 0;
1684*4882a593Smuzhiyun spi->rx_len = spi->rx_buf ? transfer->len : 0;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun spi->cur_usedma = (master->can_dma &&
1687*4882a593Smuzhiyun master->can_dma(master, spi_dev, transfer));
1688*4882a593Smuzhiyun
1689*4882a593Smuzhiyun ret = stm32_spi_transfer_one_setup(spi, spi_dev, transfer);
1690*4882a593Smuzhiyun if (ret) {
1691*4882a593Smuzhiyun dev_err(spi->dev, "SPI transfer setup failed\n");
1692*4882a593Smuzhiyun return ret;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun if (spi->cur_usedma)
1696*4882a593Smuzhiyun return stm32_spi_transfer_one_dma(spi, transfer);
1697*4882a593Smuzhiyun else
1698*4882a593Smuzhiyun return spi->cfg->transfer_one_irq(spi);
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun /**
1702*4882a593Smuzhiyun * stm32_spi_unprepare_msg - relax the hardware
1703*4882a593Smuzhiyun * @master: controller master interface
1704*4882a593Smuzhiyun * @msg: pointer to the spi message
1705*4882a593Smuzhiyun */
stm32_spi_unprepare_msg(struct spi_master * master,struct spi_message * msg)1706*4882a593Smuzhiyun static int stm32_spi_unprepare_msg(struct spi_master *master,
1707*4882a593Smuzhiyun struct spi_message *msg)
1708*4882a593Smuzhiyun {
1709*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
1710*4882a593Smuzhiyun
1711*4882a593Smuzhiyun spi->cfg->disable(spi);
1712*4882a593Smuzhiyun
1713*4882a593Smuzhiyun return 0;
1714*4882a593Smuzhiyun }
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun /**
1717*4882a593Smuzhiyun * stm32f4_spi_config - Configure SPI controller as SPI master
1718*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1719*4882a593Smuzhiyun */
stm32f4_spi_config(struct stm32_spi * spi)1720*4882a593Smuzhiyun static int stm32f4_spi_config(struct stm32_spi *spi)
1721*4882a593Smuzhiyun {
1722*4882a593Smuzhiyun unsigned long flags;
1723*4882a593Smuzhiyun
1724*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun /* Ensure I2SMOD bit is kept cleared */
1727*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32F4_SPI_I2SCFGR,
1728*4882a593Smuzhiyun STM32F4_SPI_I2SCFGR_I2SMOD);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun /*
1731*4882a593Smuzhiyun * - SS input value high
1732*4882a593Smuzhiyun * - transmitter half duplex direction
1733*4882a593Smuzhiyun * - Set the master mode (default Motorola mode)
1734*4882a593Smuzhiyun * - Consider 1 master/n slaves configuration and
1735*4882a593Smuzhiyun * SS input value is determined by the SSI bit
1736*4882a593Smuzhiyun */
1737*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32F4_SPI_CR1, STM32F4_SPI_CR1_SSI |
1738*4882a593Smuzhiyun STM32F4_SPI_CR1_BIDIOE |
1739*4882a593Smuzhiyun STM32F4_SPI_CR1_MSTR |
1740*4882a593Smuzhiyun STM32F4_SPI_CR1_SSM);
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun return 0;
1745*4882a593Smuzhiyun }
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun /**
1748*4882a593Smuzhiyun * stm32h7_spi_config - Configure SPI controller as SPI master
1749*4882a593Smuzhiyun * @spi: pointer to the spi controller data structure
1750*4882a593Smuzhiyun */
stm32h7_spi_config(struct stm32_spi * spi)1751*4882a593Smuzhiyun static int stm32h7_spi_config(struct stm32_spi *spi)
1752*4882a593Smuzhiyun {
1753*4882a593Smuzhiyun unsigned long flags;
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun spin_lock_irqsave(&spi->lock, flags);
1756*4882a593Smuzhiyun
1757*4882a593Smuzhiyun /* Ensure I2SMOD bit is kept cleared */
1758*4882a593Smuzhiyun stm32_spi_clr_bits(spi, STM32H7_SPI_I2SCFGR,
1759*4882a593Smuzhiyun STM32H7_SPI_I2SCFGR_I2SMOD);
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun /*
1762*4882a593Smuzhiyun * - SS input value high
1763*4882a593Smuzhiyun * - transmitter half duplex direction
1764*4882a593Smuzhiyun * - automatic communication suspend when RX-Fifo is full
1765*4882a593Smuzhiyun */
1766*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CR1, STM32H7_SPI_CR1_SSI |
1767*4882a593Smuzhiyun STM32H7_SPI_CR1_HDDIR |
1768*4882a593Smuzhiyun STM32H7_SPI_CR1_MASRX);
1769*4882a593Smuzhiyun
1770*4882a593Smuzhiyun /*
1771*4882a593Smuzhiyun * - Set the master mode (default Motorola mode)
1772*4882a593Smuzhiyun * - Consider 1 master/n slaves configuration and
1773*4882a593Smuzhiyun * SS input value is determined by the SSI bit
1774*4882a593Smuzhiyun * - keep control of all associated GPIOs
1775*4882a593Smuzhiyun */
1776*4882a593Smuzhiyun stm32_spi_set_bits(spi, STM32H7_SPI_CFG2, STM32H7_SPI_CFG2_MASTER |
1777*4882a593Smuzhiyun STM32H7_SPI_CFG2_SSM |
1778*4882a593Smuzhiyun STM32H7_SPI_CFG2_AFCNTR);
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun spin_unlock_irqrestore(&spi->lock, flags);
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun return 0;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun static const struct stm32_spi_cfg stm32f4_spi_cfg = {
1786*4882a593Smuzhiyun .regs = &stm32f4_spi_regspec,
1787*4882a593Smuzhiyun .get_bpw_mask = stm32f4_spi_get_bpw_mask,
1788*4882a593Smuzhiyun .disable = stm32f4_spi_disable,
1789*4882a593Smuzhiyun .config = stm32f4_spi_config,
1790*4882a593Smuzhiyun .set_bpw = stm32f4_spi_set_bpw,
1791*4882a593Smuzhiyun .set_mode = stm32f4_spi_set_mode,
1792*4882a593Smuzhiyun .transfer_one_dma_start = stm32f4_spi_transfer_one_dma_start,
1793*4882a593Smuzhiyun .dma_tx_cb = stm32f4_spi_dma_tx_cb,
1794*4882a593Smuzhiyun .dma_rx_cb = stm32f4_spi_dma_rx_cb,
1795*4882a593Smuzhiyun .transfer_one_irq = stm32f4_spi_transfer_one_irq,
1796*4882a593Smuzhiyun .irq_handler_event = stm32f4_spi_irq_event,
1797*4882a593Smuzhiyun .irq_handler_thread = stm32f4_spi_irq_thread,
1798*4882a593Smuzhiyun .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
1799*4882a593Smuzhiyun .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
1800*4882a593Smuzhiyun .has_fifo = false,
1801*4882a593Smuzhiyun };
1802*4882a593Smuzhiyun
1803*4882a593Smuzhiyun static const struct stm32_spi_cfg stm32h7_spi_cfg = {
1804*4882a593Smuzhiyun .regs = &stm32h7_spi_regspec,
1805*4882a593Smuzhiyun .get_fifo_size = stm32h7_spi_get_fifo_size,
1806*4882a593Smuzhiyun .get_bpw_mask = stm32h7_spi_get_bpw_mask,
1807*4882a593Smuzhiyun .disable = stm32h7_spi_disable,
1808*4882a593Smuzhiyun .config = stm32h7_spi_config,
1809*4882a593Smuzhiyun .set_bpw = stm32h7_spi_set_bpw,
1810*4882a593Smuzhiyun .set_mode = stm32h7_spi_set_mode,
1811*4882a593Smuzhiyun .set_data_idleness = stm32h7_spi_data_idleness,
1812*4882a593Smuzhiyun .set_number_of_data = stm32h7_spi_number_of_data,
1813*4882a593Smuzhiyun .transfer_one_dma_start = stm32h7_spi_transfer_one_dma_start,
1814*4882a593Smuzhiyun .dma_rx_cb = stm32h7_spi_dma_cb,
1815*4882a593Smuzhiyun .dma_tx_cb = stm32h7_spi_dma_cb,
1816*4882a593Smuzhiyun .transfer_one_irq = stm32h7_spi_transfer_one_irq,
1817*4882a593Smuzhiyun .irq_handler_thread = stm32h7_spi_irq_thread,
1818*4882a593Smuzhiyun .baud_rate_div_min = STM32H7_SPI_MBR_DIV_MIN,
1819*4882a593Smuzhiyun .baud_rate_div_max = STM32H7_SPI_MBR_DIV_MAX,
1820*4882a593Smuzhiyun .has_fifo = true,
1821*4882a593Smuzhiyun };
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun static const struct of_device_id stm32_spi_of_match[] = {
1824*4882a593Smuzhiyun { .compatible = "st,stm32h7-spi", .data = (void *)&stm32h7_spi_cfg },
1825*4882a593Smuzhiyun { .compatible = "st,stm32f4-spi", .data = (void *)&stm32f4_spi_cfg },
1826*4882a593Smuzhiyun {},
1827*4882a593Smuzhiyun };
1828*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, stm32_spi_of_match);
1829*4882a593Smuzhiyun
stm32_spi_probe(struct platform_device * pdev)1830*4882a593Smuzhiyun static int stm32_spi_probe(struct platform_device *pdev)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun struct spi_master *master;
1833*4882a593Smuzhiyun struct stm32_spi *spi;
1834*4882a593Smuzhiyun struct resource *res;
1835*4882a593Smuzhiyun int ret;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
1838*4882a593Smuzhiyun if (!master) {
1839*4882a593Smuzhiyun dev_err(&pdev->dev, "spi master allocation failed\n");
1840*4882a593Smuzhiyun return -ENOMEM;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun platform_set_drvdata(pdev, master);
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun spi = spi_master_get_devdata(master);
1845*4882a593Smuzhiyun spi->dev = &pdev->dev;
1846*4882a593Smuzhiyun spi->master = master;
1847*4882a593Smuzhiyun spin_lock_init(&spi->lock);
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun spi->cfg = (const struct stm32_spi_cfg *)
1850*4882a593Smuzhiyun of_match_device(pdev->dev.driver->of_match_table,
1851*4882a593Smuzhiyun &pdev->dev)->data;
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1854*4882a593Smuzhiyun spi->base = devm_ioremap_resource(&pdev->dev, res);
1855*4882a593Smuzhiyun if (IS_ERR(spi->base))
1856*4882a593Smuzhiyun return PTR_ERR(spi->base);
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun spi->phys_addr = (dma_addr_t)res->start;
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun spi->irq = platform_get_irq(pdev, 0);
1861*4882a593Smuzhiyun if (spi->irq <= 0)
1862*4882a593Smuzhiyun return dev_err_probe(&pdev->dev, spi->irq,
1863*4882a593Smuzhiyun "failed to get irq\n");
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
1866*4882a593Smuzhiyun spi->cfg->irq_handler_event,
1867*4882a593Smuzhiyun spi->cfg->irq_handler_thread,
1868*4882a593Smuzhiyun IRQF_ONESHOT, pdev->name, master);
1869*4882a593Smuzhiyun if (ret) {
1870*4882a593Smuzhiyun dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
1871*4882a593Smuzhiyun ret);
1872*4882a593Smuzhiyun return ret;
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun spi->clk = devm_clk_get(&pdev->dev, NULL);
1876*4882a593Smuzhiyun if (IS_ERR(spi->clk)) {
1877*4882a593Smuzhiyun ret = PTR_ERR(spi->clk);
1878*4882a593Smuzhiyun dev_err(&pdev->dev, "clk get failed: %d\n", ret);
1879*4882a593Smuzhiyun return ret;
1880*4882a593Smuzhiyun }
1881*4882a593Smuzhiyun
1882*4882a593Smuzhiyun ret = clk_prepare_enable(spi->clk);
1883*4882a593Smuzhiyun if (ret) {
1884*4882a593Smuzhiyun dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
1885*4882a593Smuzhiyun return ret;
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun spi->clk_rate = clk_get_rate(spi->clk);
1888*4882a593Smuzhiyun if (!spi->clk_rate) {
1889*4882a593Smuzhiyun dev_err(&pdev->dev, "clk rate = 0\n");
1890*4882a593Smuzhiyun ret = -EINVAL;
1891*4882a593Smuzhiyun goto err_clk_disable;
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun
1894*4882a593Smuzhiyun spi->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
1895*4882a593Smuzhiyun if (!IS_ERR(spi->rst)) {
1896*4882a593Smuzhiyun reset_control_assert(spi->rst);
1897*4882a593Smuzhiyun udelay(2);
1898*4882a593Smuzhiyun reset_control_deassert(spi->rst);
1899*4882a593Smuzhiyun }
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun if (spi->cfg->has_fifo)
1902*4882a593Smuzhiyun spi->fifo_size = spi->cfg->get_fifo_size(spi);
1903*4882a593Smuzhiyun
1904*4882a593Smuzhiyun ret = spi->cfg->config(spi);
1905*4882a593Smuzhiyun if (ret) {
1906*4882a593Smuzhiyun dev_err(&pdev->dev, "controller configuration failed: %d\n",
1907*4882a593Smuzhiyun ret);
1908*4882a593Smuzhiyun goto err_clk_disable;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun master->dev.of_node = pdev->dev.of_node;
1912*4882a593Smuzhiyun master->auto_runtime_pm = true;
1913*4882a593Smuzhiyun master->bus_num = pdev->id;
1914*4882a593Smuzhiyun master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST |
1915*4882a593Smuzhiyun SPI_3WIRE;
1916*4882a593Smuzhiyun master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi);
1917*4882a593Smuzhiyun master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min;
1918*4882a593Smuzhiyun master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max;
1919*4882a593Smuzhiyun master->use_gpio_descriptors = true;
1920*4882a593Smuzhiyun master->prepare_message = stm32_spi_prepare_msg;
1921*4882a593Smuzhiyun master->transfer_one = stm32_spi_transfer_one;
1922*4882a593Smuzhiyun master->unprepare_message = stm32_spi_unprepare_msg;
1923*4882a593Smuzhiyun master->flags = SPI_MASTER_MUST_TX;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun spi->dma_tx = dma_request_chan(spi->dev, "tx");
1926*4882a593Smuzhiyun if (IS_ERR(spi->dma_tx)) {
1927*4882a593Smuzhiyun ret = PTR_ERR(spi->dma_tx);
1928*4882a593Smuzhiyun spi->dma_tx = NULL;
1929*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
1930*4882a593Smuzhiyun goto err_clk_disable;
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun dev_warn(&pdev->dev, "failed to request tx dma channel\n");
1933*4882a593Smuzhiyun } else {
1934*4882a593Smuzhiyun master->dma_tx = spi->dma_tx;
1935*4882a593Smuzhiyun }
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun spi->dma_rx = dma_request_chan(spi->dev, "rx");
1938*4882a593Smuzhiyun if (IS_ERR(spi->dma_rx)) {
1939*4882a593Smuzhiyun ret = PTR_ERR(spi->dma_rx);
1940*4882a593Smuzhiyun spi->dma_rx = NULL;
1941*4882a593Smuzhiyun if (ret == -EPROBE_DEFER)
1942*4882a593Smuzhiyun goto err_dma_release;
1943*4882a593Smuzhiyun
1944*4882a593Smuzhiyun dev_warn(&pdev->dev, "failed to request rx dma channel\n");
1945*4882a593Smuzhiyun } else {
1946*4882a593Smuzhiyun master->dma_rx = spi->dma_rx;
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun
1949*4882a593Smuzhiyun if (spi->dma_tx || spi->dma_rx)
1950*4882a593Smuzhiyun master->can_dma = stm32_spi_can_dma;
1951*4882a593Smuzhiyun
1952*4882a593Smuzhiyun pm_runtime_set_active(&pdev->dev);
1953*4882a593Smuzhiyun pm_runtime_get_noresume(&pdev->dev);
1954*4882a593Smuzhiyun pm_runtime_enable(&pdev->dev);
1955*4882a593Smuzhiyun
1956*4882a593Smuzhiyun ret = spi_register_master(master);
1957*4882a593Smuzhiyun if (ret) {
1958*4882a593Smuzhiyun dev_err(&pdev->dev, "spi master registration failed: %d\n",
1959*4882a593Smuzhiyun ret);
1960*4882a593Smuzhiyun goto err_pm_disable;
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun if (!master->cs_gpiods) {
1964*4882a593Smuzhiyun dev_err(&pdev->dev, "no CS gpios available\n");
1965*4882a593Smuzhiyun ret = -EINVAL;
1966*4882a593Smuzhiyun goto err_pm_disable;
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun dev_info(&pdev->dev, "driver initialized\n");
1970*4882a593Smuzhiyun
1971*4882a593Smuzhiyun return 0;
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun err_pm_disable:
1974*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
1975*4882a593Smuzhiyun pm_runtime_put_noidle(&pdev->dev);
1976*4882a593Smuzhiyun pm_runtime_set_suspended(&pdev->dev);
1977*4882a593Smuzhiyun err_dma_release:
1978*4882a593Smuzhiyun if (spi->dma_tx)
1979*4882a593Smuzhiyun dma_release_channel(spi->dma_tx);
1980*4882a593Smuzhiyun if (spi->dma_rx)
1981*4882a593Smuzhiyun dma_release_channel(spi->dma_rx);
1982*4882a593Smuzhiyun err_clk_disable:
1983*4882a593Smuzhiyun clk_disable_unprepare(spi->clk);
1984*4882a593Smuzhiyun
1985*4882a593Smuzhiyun return ret;
1986*4882a593Smuzhiyun }
1987*4882a593Smuzhiyun
stm32_spi_remove(struct platform_device * pdev)1988*4882a593Smuzhiyun static int stm32_spi_remove(struct platform_device *pdev)
1989*4882a593Smuzhiyun {
1990*4882a593Smuzhiyun struct spi_master *master = platform_get_drvdata(pdev);
1991*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun pm_runtime_get_sync(&pdev->dev);
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun spi_unregister_master(master);
1996*4882a593Smuzhiyun spi->cfg->disable(spi);
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
1999*4882a593Smuzhiyun pm_runtime_put_noidle(&pdev->dev);
2000*4882a593Smuzhiyun pm_runtime_set_suspended(&pdev->dev);
2001*4882a593Smuzhiyun if (master->dma_tx)
2002*4882a593Smuzhiyun dma_release_channel(master->dma_tx);
2003*4882a593Smuzhiyun if (master->dma_rx)
2004*4882a593Smuzhiyun dma_release_channel(master->dma_rx);
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun clk_disable_unprepare(spi->clk);
2007*4882a593Smuzhiyun
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun pinctrl_pm_select_sleep_state(&pdev->dev);
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun return 0;
2012*4882a593Smuzhiyun }
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun #ifdef CONFIG_PM
stm32_spi_runtime_suspend(struct device * dev)2015*4882a593Smuzhiyun static int stm32_spi_runtime_suspend(struct device *dev)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun struct spi_master *master = dev_get_drvdata(dev);
2018*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun clk_disable_unprepare(spi->clk);
2021*4882a593Smuzhiyun
2022*4882a593Smuzhiyun return pinctrl_pm_select_sleep_state(dev);
2023*4882a593Smuzhiyun }
2024*4882a593Smuzhiyun
stm32_spi_runtime_resume(struct device * dev)2025*4882a593Smuzhiyun static int stm32_spi_runtime_resume(struct device *dev)
2026*4882a593Smuzhiyun {
2027*4882a593Smuzhiyun struct spi_master *master = dev_get_drvdata(dev);
2028*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
2029*4882a593Smuzhiyun int ret;
2030*4882a593Smuzhiyun
2031*4882a593Smuzhiyun ret = pinctrl_pm_select_default_state(dev);
2032*4882a593Smuzhiyun if (ret)
2033*4882a593Smuzhiyun return ret;
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun return clk_prepare_enable(spi->clk);
2036*4882a593Smuzhiyun }
2037*4882a593Smuzhiyun #endif
2038*4882a593Smuzhiyun
2039*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
stm32_spi_suspend(struct device * dev)2040*4882a593Smuzhiyun static int stm32_spi_suspend(struct device *dev)
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun struct spi_master *master = dev_get_drvdata(dev);
2043*4882a593Smuzhiyun int ret;
2044*4882a593Smuzhiyun
2045*4882a593Smuzhiyun ret = spi_master_suspend(master);
2046*4882a593Smuzhiyun if (ret)
2047*4882a593Smuzhiyun return ret;
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun return pm_runtime_force_suspend(dev);
2050*4882a593Smuzhiyun }
2051*4882a593Smuzhiyun
stm32_spi_resume(struct device * dev)2052*4882a593Smuzhiyun static int stm32_spi_resume(struct device *dev)
2053*4882a593Smuzhiyun {
2054*4882a593Smuzhiyun struct spi_master *master = dev_get_drvdata(dev);
2055*4882a593Smuzhiyun struct stm32_spi *spi = spi_master_get_devdata(master);
2056*4882a593Smuzhiyun int ret;
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun ret = pm_runtime_force_resume(dev);
2059*4882a593Smuzhiyun if (ret)
2060*4882a593Smuzhiyun return ret;
2061*4882a593Smuzhiyun
2062*4882a593Smuzhiyun ret = spi_master_resume(master);
2063*4882a593Smuzhiyun if (ret) {
2064*4882a593Smuzhiyun clk_disable_unprepare(spi->clk);
2065*4882a593Smuzhiyun return ret;
2066*4882a593Smuzhiyun }
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun ret = pm_runtime_get_sync(dev);
2069*4882a593Smuzhiyun if (ret < 0) {
2070*4882a593Smuzhiyun pm_runtime_put_noidle(dev);
2071*4882a593Smuzhiyun dev_err(dev, "Unable to power device:%d\n", ret);
2072*4882a593Smuzhiyun return ret;
2073*4882a593Smuzhiyun }
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun spi->cfg->config(spi);
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
2078*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev);
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun return 0;
2081*4882a593Smuzhiyun }
2082*4882a593Smuzhiyun #endif
2083*4882a593Smuzhiyun
2084*4882a593Smuzhiyun static const struct dev_pm_ops stm32_spi_pm_ops = {
2085*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(stm32_spi_suspend, stm32_spi_resume)
2086*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(stm32_spi_runtime_suspend,
2087*4882a593Smuzhiyun stm32_spi_runtime_resume, NULL)
2088*4882a593Smuzhiyun };
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun static struct platform_driver stm32_spi_driver = {
2091*4882a593Smuzhiyun .probe = stm32_spi_probe,
2092*4882a593Smuzhiyun .remove = stm32_spi_remove,
2093*4882a593Smuzhiyun .driver = {
2094*4882a593Smuzhiyun .name = DRIVER_NAME,
2095*4882a593Smuzhiyun .pm = &stm32_spi_pm_ops,
2096*4882a593Smuzhiyun .of_match_table = stm32_spi_of_match,
2097*4882a593Smuzhiyun },
2098*4882a593Smuzhiyun };
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun module_platform_driver(stm32_spi_driver);
2101*4882a593Smuzhiyun
2102*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRIVER_NAME);
2103*4882a593Smuzhiyun MODULE_DESCRIPTION("STMicroelectronics STM32 SPI Controller driver");
2104*4882a593Smuzhiyun MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>");
2105*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2106