xref: /OK3568_Linux_fs/kernel/drivers/spi/spi-mpc52xx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MPC52xx SPI bus driver.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2008 Secret Lab Technologies Ltd.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This is the driver for the MPC5200's dedicated SPI controller.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Note: this driver does not support the MPC5200 PSC in SPI mode.  For
10*4882a593Smuzhiyun  * that driver see drivers/spi/mpc52xx_psc_spi.c
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/of_platform.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/spi/spi.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <linux/of_gpio.h>
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <asm/time.h>
23*4882a593Smuzhiyun #include <asm/mpc52xx.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
26*4882a593Smuzhiyun MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
27*4882a593Smuzhiyun MODULE_LICENSE("GPL");
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /* Register offsets */
30*4882a593Smuzhiyun #define SPI_CTRL1	0x00
31*4882a593Smuzhiyun #define SPI_CTRL1_SPIE		(1 << 7)
32*4882a593Smuzhiyun #define SPI_CTRL1_SPE		(1 << 6)
33*4882a593Smuzhiyun #define SPI_CTRL1_MSTR		(1 << 4)
34*4882a593Smuzhiyun #define SPI_CTRL1_CPOL		(1 << 3)
35*4882a593Smuzhiyun #define SPI_CTRL1_CPHA		(1 << 2)
36*4882a593Smuzhiyun #define SPI_CTRL1_SSOE		(1 << 1)
37*4882a593Smuzhiyun #define SPI_CTRL1_LSBFE		(1 << 0)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define SPI_CTRL2	0x01
40*4882a593Smuzhiyun #define SPI_BRR		0x04
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define SPI_STATUS	0x05
43*4882a593Smuzhiyun #define SPI_STATUS_SPIF		(1 << 7)
44*4882a593Smuzhiyun #define SPI_STATUS_WCOL		(1 << 6)
45*4882a593Smuzhiyun #define SPI_STATUS_MODF		(1 << 4)
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define SPI_DATA	0x09
48*4882a593Smuzhiyun #define SPI_PORTDATA	0x0d
49*4882a593Smuzhiyun #define SPI_DATADIR	0x10
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* FSM state return values */
52*4882a593Smuzhiyun #define FSM_STOP	0	/* Nothing more for the state machine to */
53*4882a593Smuzhiyun 				/* do.  If something interesting happens */
54*4882a593Smuzhiyun 				/* then an IRQ will be received */
55*4882a593Smuzhiyun #define FSM_POLL	1	/* need to poll for completion, an IRQ is */
56*4882a593Smuzhiyun 				/* not expected */
57*4882a593Smuzhiyun #define FSM_CONTINUE	2	/* Keep iterating the state machine */
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* Driver internal data */
60*4882a593Smuzhiyun struct mpc52xx_spi {
61*4882a593Smuzhiyun 	struct spi_master *master;
62*4882a593Smuzhiyun 	void __iomem *regs;
63*4882a593Smuzhiyun 	int irq0;	/* MODF irq */
64*4882a593Smuzhiyun 	int irq1;	/* SPIF irq */
65*4882a593Smuzhiyun 	unsigned int ipb_freq;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/* Statistics; not used now, but will be reintroduced for debugfs */
68*4882a593Smuzhiyun 	int msg_count;
69*4882a593Smuzhiyun 	int wcol_count;
70*4882a593Smuzhiyun 	int wcol_ticks;
71*4882a593Smuzhiyun 	u32 wcol_tx_timestamp;
72*4882a593Smuzhiyun 	int modf_count;
73*4882a593Smuzhiyun 	int byte_count;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	struct list_head queue;		/* queue of pending messages */
76*4882a593Smuzhiyun 	spinlock_t lock;
77*4882a593Smuzhiyun 	struct work_struct work;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	/* Details of current transfer (length, and buffer pointers) */
80*4882a593Smuzhiyun 	struct spi_message *message;	/* current message */
81*4882a593Smuzhiyun 	struct spi_transfer *transfer;	/* current transfer */
82*4882a593Smuzhiyun 	int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
83*4882a593Smuzhiyun 	int len;
84*4882a593Smuzhiyun 	int timestamp;
85*4882a593Smuzhiyun 	u8 *rx_buf;
86*4882a593Smuzhiyun 	const u8 *tx_buf;
87*4882a593Smuzhiyun 	int cs_change;
88*4882a593Smuzhiyun 	int gpio_cs_count;
89*4882a593Smuzhiyun 	unsigned int *gpio_cs;
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun  * CS control function
94*4882a593Smuzhiyun  */
mpc52xx_spi_chipsel(struct mpc52xx_spi * ms,int value)95*4882a593Smuzhiyun static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	int cs;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (ms->gpio_cs_count > 0) {
100*4882a593Smuzhiyun 		cs = ms->message->spi->chip_select;
101*4882a593Smuzhiyun 		gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
102*4882a593Smuzhiyun 	} else
103*4882a593Smuzhiyun 		out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun  * Start a new transfer.  This is called both by the idle state
108*4882a593Smuzhiyun  * for the first transfer in a message, and by the wait state when the
109*4882a593Smuzhiyun  * previous transfer in a message is complete.
110*4882a593Smuzhiyun  */
mpc52xx_spi_start_transfer(struct mpc52xx_spi * ms)111*4882a593Smuzhiyun static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	ms->rx_buf = ms->transfer->rx_buf;
114*4882a593Smuzhiyun 	ms->tx_buf = ms->transfer->tx_buf;
115*4882a593Smuzhiyun 	ms->len = ms->transfer->len;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* Activate the chip select */
118*4882a593Smuzhiyun 	if (ms->cs_change)
119*4882a593Smuzhiyun 		mpc52xx_spi_chipsel(ms, 1);
120*4882a593Smuzhiyun 	ms->cs_change = ms->transfer->cs_change;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* Write out the first byte */
123*4882a593Smuzhiyun 	ms->wcol_tx_timestamp = get_tbl();
124*4882a593Smuzhiyun 	if (ms->tx_buf)
125*4882a593Smuzhiyun 		out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
126*4882a593Smuzhiyun 	else
127*4882a593Smuzhiyun 		out_8(ms->regs + SPI_DATA, 0);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /* Forward declaration of state handlers */
131*4882a593Smuzhiyun static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
132*4882a593Smuzhiyun 					 u8 status, u8 data);
133*4882a593Smuzhiyun static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
134*4882a593Smuzhiyun 				     u8 status, u8 data);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun  * IDLE state
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * No transfers are in progress; if another transfer is pending then retrieve
140*4882a593Smuzhiyun  * it and kick it off.  Otherwise, stop processing the state machine
141*4882a593Smuzhiyun  */
142*4882a593Smuzhiyun static int
mpc52xx_spi_fsmstate_idle(int irq,struct mpc52xx_spi * ms,u8 status,u8 data)143*4882a593Smuzhiyun mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct spi_device *spi;
146*4882a593Smuzhiyun 	int spr, sppr;
147*4882a593Smuzhiyun 	u8 ctrl1;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (status && (irq != NO_IRQ))
150*4882a593Smuzhiyun 		dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
151*4882a593Smuzhiyun 			status);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	/* Check if there is another transfer waiting. */
154*4882a593Smuzhiyun 	if (list_empty(&ms->queue))
155*4882a593Smuzhiyun 		return FSM_STOP;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	/* get the head of the queue */
158*4882a593Smuzhiyun 	ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
159*4882a593Smuzhiyun 	list_del_init(&ms->message->queue);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	/* Setup the controller parameters */
162*4882a593Smuzhiyun 	ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
163*4882a593Smuzhiyun 	spi = ms->message->spi;
164*4882a593Smuzhiyun 	if (spi->mode & SPI_CPHA)
165*4882a593Smuzhiyun 		ctrl1 |= SPI_CTRL1_CPHA;
166*4882a593Smuzhiyun 	if (spi->mode & SPI_CPOL)
167*4882a593Smuzhiyun 		ctrl1 |= SPI_CTRL1_CPOL;
168*4882a593Smuzhiyun 	if (spi->mode & SPI_LSB_FIRST)
169*4882a593Smuzhiyun 		ctrl1 |= SPI_CTRL1_LSBFE;
170*4882a593Smuzhiyun 	out_8(ms->regs + SPI_CTRL1, ctrl1);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* Setup the controller speed */
173*4882a593Smuzhiyun 	/* minimum divider is '2'.  Also, add '1' to force rounding the
174*4882a593Smuzhiyun 	 * divider up. */
175*4882a593Smuzhiyun 	sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
176*4882a593Smuzhiyun 	spr = 0;
177*4882a593Smuzhiyun 	if (sppr < 1)
178*4882a593Smuzhiyun 		sppr = 1;
179*4882a593Smuzhiyun 	while (((sppr - 1) & ~0x7) != 0) {
180*4882a593Smuzhiyun 		sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
181*4882a593Smuzhiyun 		spr++;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 	sppr--;		/* sppr quantity in register is offset by 1 */
184*4882a593Smuzhiyun 	if (spr > 7) {
185*4882a593Smuzhiyun 		/* Don't overrun limits of SPI baudrate register */
186*4882a593Smuzhiyun 		spr = 7;
187*4882a593Smuzhiyun 		sppr = 7;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 	out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	ms->cs_change = 1;
192*4882a593Smuzhiyun 	ms->transfer = container_of(ms->message->transfers.next,
193*4882a593Smuzhiyun 				    struct spi_transfer, transfer_list);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	mpc52xx_spi_start_transfer(ms);
196*4882a593Smuzhiyun 	ms->state = mpc52xx_spi_fsmstate_transfer;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	return FSM_CONTINUE;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun  * TRANSFER state
203*4882a593Smuzhiyun  *
204*4882a593Smuzhiyun  * In the middle of a transfer.  If the SPI core has completed processing
205*4882a593Smuzhiyun  * a byte, then read out the received data and write out the next byte
206*4882a593Smuzhiyun  * (unless this transfer is finished; in which case go on to the wait
207*4882a593Smuzhiyun  * state)
208*4882a593Smuzhiyun  */
mpc52xx_spi_fsmstate_transfer(int irq,struct mpc52xx_spi * ms,u8 status,u8 data)209*4882a593Smuzhiyun static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
210*4882a593Smuzhiyun 					 u8 status, u8 data)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	if (!status)
213*4882a593Smuzhiyun 		return ms->irq0 ? FSM_STOP : FSM_POLL;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (status & SPI_STATUS_WCOL) {
216*4882a593Smuzhiyun 		/* The SPI controller is stoopid.  At slower speeds, it may
217*4882a593Smuzhiyun 		 * raise the SPIF flag before the state machine is actually
218*4882a593Smuzhiyun 		 * finished, which causes a collision (internal to the state
219*4882a593Smuzhiyun 		 * machine only).  The manual recommends inserting a delay
220*4882a593Smuzhiyun 		 * between receiving the interrupt and sending the next byte,
221*4882a593Smuzhiyun 		 * but it can also be worked around simply by retrying the
222*4882a593Smuzhiyun 		 * transfer which is what we do here. */
223*4882a593Smuzhiyun 		ms->wcol_count++;
224*4882a593Smuzhiyun 		ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
225*4882a593Smuzhiyun 		ms->wcol_tx_timestamp = get_tbl();
226*4882a593Smuzhiyun 		data = 0;
227*4882a593Smuzhiyun 		if (ms->tx_buf)
228*4882a593Smuzhiyun 			data = *(ms->tx_buf - 1);
229*4882a593Smuzhiyun 		out_8(ms->regs + SPI_DATA, data); /* try again */
230*4882a593Smuzhiyun 		return FSM_CONTINUE;
231*4882a593Smuzhiyun 	} else if (status & SPI_STATUS_MODF) {
232*4882a593Smuzhiyun 		ms->modf_count++;
233*4882a593Smuzhiyun 		dev_err(&ms->master->dev, "mode fault\n");
234*4882a593Smuzhiyun 		mpc52xx_spi_chipsel(ms, 0);
235*4882a593Smuzhiyun 		ms->message->status = -EIO;
236*4882a593Smuzhiyun 		if (ms->message->complete)
237*4882a593Smuzhiyun 			ms->message->complete(ms->message->context);
238*4882a593Smuzhiyun 		ms->state = mpc52xx_spi_fsmstate_idle;
239*4882a593Smuzhiyun 		return FSM_CONTINUE;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Read data out of the spi device */
243*4882a593Smuzhiyun 	ms->byte_count++;
244*4882a593Smuzhiyun 	if (ms->rx_buf)
245*4882a593Smuzhiyun 		*ms->rx_buf++ = data;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* Is the transfer complete? */
248*4882a593Smuzhiyun 	ms->len--;
249*4882a593Smuzhiyun 	if (ms->len == 0) {
250*4882a593Smuzhiyun 		ms->timestamp = get_tbl();
251*4882a593Smuzhiyun 		ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
252*4882a593Smuzhiyun 		ms->state = mpc52xx_spi_fsmstate_wait;
253*4882a593Smuzhiyun 		return FSM_CONTINUE;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	/* Write out the next byte */
257*4882a593Smuzhiyun 	ms->wcol_tx_timestamp = get_tbl();
258*4882a593Smuzhiyun 	if (ms->tx_buf)
259*4882a593Smuzhiyun 		out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
260*4882a593Smuzhiyun 	else
261*4882a593Smuzhiyun 		out_8(ms->regs + SPI_DATA, 0);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return FSM_CONTINUE;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun /*
267*4882a593Smuzhiyun  * WAIT state
268*4882a593Smuzhiyun  *
269*4882a593Smuzhiyun  * A transfer has completed; need to wait for the delay period to complete
270*4882a593Smuzhiyun  * before starting the next transfer
271*4882a593Smuzhiyun  */
272*4882a593Smuzhiyun static int
mpc52xx_spi_fsmstate_wait(int irq,struct mpc52xx_spi * ms,u8 status,u8 data)273*4882a593Smuzhiyun mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	if (status && irq)
276*4882a593Smuzhiyun 		dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
277*4882a593Smuzhiyun 			status);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (((int)get_tbl()) - ms->timestamp < 0)
280*4882a593Smuzhiyun 		return FSM_POLL;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	ms->message->actual_length += ms->transfer->len;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* Check if there is another transfer in this message.  If there
285*4882a593Smuzhiyun 	 * aren't then deactivate CS, notify sender, and drop back to idle
286*4882a593Smuzhiyun 	 * to start the next message. */
287*4882a593Smuzhiyun 	if (ms->transfer->transfer_list.next == &ms->message->transfers) {
288*4882a593Smuzhiyun 		ms->msg_count++;
289*4882a593Smuzhiyun 		mpc52xx_spi_chipsel(ms, 0);
290*4882a593Smuzhiyun 		ms->message->status = 0;
291*4882a593Smuzhiyun 		if (ms->message->complete)
292*4882a593Smuzhiyun 			ms->message->complete(ms->message->context);
293*4882a593Smuzhiyun 		ms->state = mpc52xx_spi_fsmstate_idle;
294*4882a593Smuzhiyun 		return FSM_CONTINUE;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* There is another transfer; kick it off */
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	if (ms->cs_change)
300*4882a593Smuzhiyun 		mpc52xx_spi_chipsel(ms, 0);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	ms->transfer = container_of(ms->transfer->transfer_list.next,
303*4882a593Smuzhiyun 				    struct spi_transfer, transfer_list);
304*4882a593Smuzhiyun 	mpc52xx_spi_start_transfer(ms);
305*4882a593Smuzhiyun 	ms->state = mpc52xx_spi_fsmstate_transfer;
306*4882a593Smuzhiyun 	return FSM_CONTINUE;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun /**
310*4882a593Smuzhiyun  * mpc52xx_spi_fsm_process - Finite State Machine iteration function
311*4882a593Smuzhiyun  * @irq: irq number that triggered the FSM or 0 for polling
312*4882a593Smuzhiyun  * @ms: pointer to mpc52xx_spi driver data
313*4882a593Smuzhiyun  */
mpc52xx_spi_fsm_process(int irq,struct mpc52xx_spi * ms)314*4882a593Smuzhiyun static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	int rc = FSM_CONTINUE;
317*4882a593Smuzhiyun 	u8 status, data;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	while (rc == FSM_CONTINUE) {
320*4882a593Smuzhiyun 		/* Interrupt cleared by read of STATUS followed by
321*4882a593Smuzhiyun 		 * read of DATA registers */
322*4882a593Smuzhiyun 		status = in_8(ms->regs + SPI_STATUS);
323*4882a593Smuzhiyun 		data = in_8(ms->regs + SPI_DATA);
324*4882a593Smuzhiyun 		rc = ms->state(irq, ms, status, data);
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (rc == FSM_POLL)
328*4882a593Smuzhiyun 		schedule_work(&ms->work);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun  * mpc52xx_spi_irq - IRQ handler
333*4882a593Smuzhiyun  */
mpc52xx_spi_irq(int irq,void * _ms)334*4882a593Smuzhiyun static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct mpc52xx_spi *ms = _ms;
337*4882a593Smuzhiyun 	spin_lock(&ms->lock);
338*4882a593Smuzhiyun 	mpc52xx_spi_fsm_process(irq, ms);
339*4882a593Smuzhiyun 	spin_unlock(&ms->lock);
340*4882a593Smuzhiyun 	return IRQ_HANDLED;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /**
344*4882a593Smuzhiyun  * mpc52xx_spi_wq - Workqueue function for polling the state machine
345*4882a593Smuzhiyun  */
mpc52xx_spi_wq(struct work_struct * work)346*4882a593Smuzhiyun static void mpc52xx_spi_wq(struct work_struct *work)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
349*4882a593Smuzhiyun 	unsigned long flags;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	spin_lock_irqsave(&ms->lock, flags);
352*4882a593Smuzhiyun 	mpc52xx_spi_fsm_process(0, ms);
353*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ms->lock, flags);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun  * spi_master ops
358*4882a593Smuzhiyun  */
359*4882a593Smuzhiyun 
mpc52xx_spi_transfer(struct spi_device * spi,struct spi_message * m)360*4882a593Smuzhiyun static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
363*4882a593Smuzhiyun 	unsigned long flags;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	m->actual_length = 0;
366*4882a593Smuzhiyun 	m->status = -EINPROGRESS;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	spin_lock_irqsave(&ms->lock, flags);
369*4882a593Smuzhiyun 	list_add_tail(&m->queue, &ms->queue);
370*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ms->lock, flags);
371*4882a593Smuzhiyun 	schedule_work(&ms->work);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun  * OF Platform Bus Binding
378*4882a593Smuzhiyun  */
mpc52xx_spi_probe(struct platform_device * op)379*4882a593Smuzhiyun static int mpc52xx_spi_probe(struct platform_device *op)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun 	struct spi_master *master;
382*4882a593Smuzhiyun 	struct mpc52xx_spi *ms;
383*4882a593Smuzhiyun 	void __iomem *regs;
384*4882a593Smuzhiyun 	u8 ctrl1;
385*4882a593Smuzhiyun 	int rc, i = 0;
386*4882a593Smuzhiyun 	int gpio_cs;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/* MMIO registers */
389*4882a593Smuzhiyun 	dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
390*4882a593Smuzhiyun 	regs = of_iomap(op->dev.of_node, 0);
391*4882a593Smuzhiyun 	if (!regs)
392*4882a593Smuzhiyun 		return -ENODEV;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	/* initialize the device */
395*4882a593Smuzhiyun 	ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
396*4882a593Smuzhiyun 	out_8(regs + SPI_CTRL1, ctrl1);
397*4882a593Smuzhiyun 	out_8(regs + SPI_CTRL2, 0x0);
398*4882a593Smuzhiyun 	out_8(regs + SPI_DATADIR, 0xe);	/* Set output pins */
399*4882a593Smuzhiyun 	out_8(regs + SPI_PORTDATA, 0x8);	/* Deassert /SS signal */
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/* Clear the status register and re-read it to check for a MODF
402*4882a593Smuzhiyun 	 * failure.  This driver cannot currently handle multiple masters
403*4882a593Smuzhiyun 	 * on the SPI bus.  This fault will also occur if the SPI signals
404*4882a593Smuzhiyun 	 * are not connected to any pins (port_config setting) */
405*4882a593Smuzhiyun 	in_8(regs + SPI_STATUS);
406*4882a593Smuzhiyun 	out_8(regs + SPI_CTRL1, ctrl1);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	in_8(regs + SPI_DATA);
409*4882a593Smuzhiyun 	if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
410*4882a593Smuzhiyun 		dev_err(&op->dev, "mode fault; is port_config correct?\n");
411*4882a593Smuzhiyun 		rc = -EIO;
412*4882a593Smuzhiyun 		goto err_init;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	dev_dbg(&op->dev, "allocating spi_master struct\n");
416*4882a593Smuzhiyun 	master = spi_alloc_master(&op->dev, sizeof *ms);
417*4882a593Smuzhiyun 	if (!master) {
418*4882a593Smuzhiyun 		rc = -ENOMEM;
419*4882a593Smuzhiyun 		goto err_alloc;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	master->transfer = mpc52xx_spi_transfer;
423*4882a593Smuzhiyun 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
424*4882a593Smuzhiyun 	master->bits_per_word_mask = SPI_BPW_MASK(8);
425*4882a593Smuzhiyun 	master->dev.of_node = op->dev.of_node;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	platform_set_drvdata(op, master);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	ms = spi_master_get_devdata(master);
430*4882a593Smuzhiyun 	ms->master = master;
431*4882a593Smuzhiyun 	ms->regs = regs;
432*4882a593Smuzhiyun 	ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
433*4882a593Smuzhiyun 	ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
434*4882a593Smuzhiyun 	ms->state = mpc52xx_spi_fsmstate_idle;
435*4882a593Smuzhiyun 	ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
436*4882a593Smuzhiyun 	ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
437*4882a593Smuzhiyun 	if (ms->gpio_cs_count > 0) {
438*4882a593Smuzhiyun 		master->num_chipselect = ms->gpio_cs_count;
439*4882a593Smuzhiyun 		ms->gpio_cs = kmalloc_array(ms->gpio_cs_count,
440*4882a593Smuzhiyun 					    sizeof(*ms->gpio_cs),
441*4882a593Smuzhiyun 					    GFP_KERNEL);
442*4882a593Smuzhiyun 		if (!ms->gpio_cs) {
443*4882a593Smuzhiyun 			rc = -ENOMEM;
444*4882a593Smuzhiyun 			goto err_alloc_gpio;
445*4882a593Smuzhiyun 		}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		for (i = 0; i < ms->gpio_cs_count; i++) {
448*4882a593Smuzhiyun 			gpio_cs = of_get_gpio(op->dev.of_node, i);
449*4882a593Smuzhiyun 			if (!gpio_is_valid(gpio_cs)) {
450*4882a593Smuzhiyun 				dev_err(&op->dev,
451*4882a593Smuzhiyun 					"could not parse the gpio field in oftree\n");
452*4882a593Smuzhiyun 				rc = -ENODEV;
453*4882a593Smuzhiyun 				goto err_gpio;
454*4882a593Smuzhiyun 			}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 			rc = gpio_request(gpio_cs, dev_name(&op->dev));
457*4882a593Smuzhiyun 			if (rc) {
458*4882a593Smuzhiyun 				dev_err(&op->dev,
459*4882a593Smuzhiyun 					"can't request spi cs gpio #%d on gpio line %d\n",
460*4882a593Smuzhiyun 					i, gpio_cs);
461*4882a593Smuzhiyun 				goto err_gpio;
462*4882a593Smuzhiyun 			}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 			gpio_direction_output(gpio_cs, 1);
465*4882a593Smuzhiyun 			ms->gpio_cs[i] = gpio_cs;
466*4882a593Smuzhiyun 		}
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	spin_lock_init(&ms->lock);
470*4882a593Smuzhiyun 	INIT_LIST_HEAD(&ms->queue);
471*4882a593Smuzhiyun 	INIT_WORK(&ms->work, mpc52xx_spi_wq);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* Decide if interrupts can be used */
474*4882a593Smuzhiyun 	if (ms->irq0 && ms->irq1) {
475*4882a593Smuzhiyun 		rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
476*4882a593Smuzhiyun 				  "mpc5200-spi-modf", ms);
477*4882a593Smuzhiyun 		rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
478*4882a593Smuzhiyun 				  "mpc5200-spi-spif", ms);
479*4882a593Smuzhiyun 		if (rc) {
480*4882a593Smuzhiyun 			free_irq(ms->irq0, ms);
481*4882a593Smuzhiyun 			free_irq(ms->irq1, ms);
482*4882a593Smuzhiyun 			ms->irq0 = ms->irq1 = 0;
483*4882a593Smuzhiyun 		}
484*4882a593Smuzhiyun 	} else {
485*4882a593Smuzhiyun 		/* operate in polled mode */
486*4882a593Smuzhiyun 		ms->irq0 = ms->irq1 = 0;
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (!ms->irq0)
490*4882a593Smuzhiyun 		dev_info(&op->dev, "using polled mode\n");
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	dev_dbg(&op->dev, "registering spi_master struct\n");
493*4882a593Smuzhiyun 	rc = spi_register_master(master);
494*4882a593Smuzhiyun 	if (rc)
495*4882a593Smuzhiyun 		goto err_register;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	return rc;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun  err_register:
502*4882a593Smuzhiyun 	dev_err(&ms->master->dev, "initialization failed\n");
503*4882a593Smuzhiyun  err_gpio:
504*4882a593Smuzhiyun 	while (i-- > 0)
505*4882a593Smuzhiyun 		gpio_free(ms->gpio_cs[i]);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	kfree(ms->gpio_cs);
508*4882a593Smuzhiyun  err_alloc_gpio:
509*4882a593Smuzhiyun 	spi_master_put(master);
510*4882a593Smuzhiyun  err_alloc:
511*4882a593Smuzhiyun  err_init:
512*4882a593Smuzhiyun 	iounmap(regs);
513*4882a593Smuzhiyun 	return rc;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
mpc52xx_spi_remove(struct platform_device * op)516*4882a593Smuzhiyun static int mpc52xx_spi_remove(struct platform_device *op)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	struct spi_master *master = spi_master_get(platform_get_drvdata(op));
519*4882a593Smuzhiyun 	struct mpc52xx_spi *ms = spi_master_get_devdata(master);
520*4882a593Smuzhiyun 	int i;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	free_irq(ms->irq0, ms);
523*4882a593Smuzhiyun 	free_irq(ms->irq1, ms);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	for (i = 0; i < ms->gpio_cs_count; i++)
526*4882a593Smuzhiyun 		gpio_free(ms->gpio_cs[i]);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	kfree(ms->gpio_cs);
529*4882a593Smuzhiyun 	spi_unregister_master(master);
530*4882a593Smuzhiyun 	iounmap(ms->regs);
531*4882a593Smuzhiyun 	spi_master_put(master);
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	return 0;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun static const struct of_device_id mpc52xx_spi_match[] = {
537*4882a593Smuzhiyun 	{ .compatible = "fsl,mpc5200-spi", },
538*4882a593Smuzhiyun 	{}
539*4882a593Smuzhiyun };
540*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun static struct platform_driver mpc52xx_spi_of_driver = {
543*4882a593Smuzhiyun 	.driver = {
544*4882a593Smuzhiyun 		.name = "mpc52xx-spi",
545*4882a593Smuzhiyun 		.of_match_table = mpc52xx_spi_match,
546*4882a593Smuzhiyun 	},
547*4882a593Smuzhiyun 	.probe = mpc52xx_spi_probe,
548*4882a593Smuzhiyun 	.remove = mpc52xx_spi_remove,
549*4882a593Smuzhiyun };
550*4882a593Smuzhiyun module_platform_driver(mpc52xx_spi_of_driver);
551