1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2009 Secret Lab Technologies Ltd.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Todo:
8*4882a593Smuzhiyun * - Add support for multiple requests to be queued.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/of.h>
14*4882a593Smuzhiyun #include <linux/of_platform.h>
15*4882a593Smuzhiyun #include <linux/spinlock.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <asm/prom.h>
19*4882a593Smuzhiyun #include <asm/mpc52xx.h>
20*4882a593Smuzhiyun #include <asm/time.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/fsl/bestcomm/bestcomm.h>
23*4882a593Smuzhiyun #include <linux/fsl/bestcomm/bestcomm_priv.h>
24*4882a593Smuzhiyun #include <linux/fsl/bestcomm/gen_bd.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
27*4882a593Smuzhiyun MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
28*4882a593Smuzhiyun MODULE_LICENSE("GPL");
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define LPBFIFO_REG_PACKET_SIZE (0x00)
31*4882a593Smuzhiyun #define LPBFIFO_REG_START_ADDRESS (0x04)
32*4882a593Smuzhiyun #define LPBFIFO_REG_CONTROL (0x08)
33*4882a593Smuzhiyun #define LPBFIFO_REG_ENABLE (0x0C)
34*4882a593Smuzhiyun #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
35*4882a593Smuzhiyun #define LPBFIFO_REG_FIFO_DATA (0x40)
36*4882a593Smuzhiyun #define LPBFIFO_REG_FIFO_STATUS (0x44)
37*4882a593Smuzhiyun #define LPBFIFO_REG_FIFO_CONTROL (0x48)
38*4882a593Smuzhiyun #define LPBFIFO_REG_FIFO_ALARM (0x4C)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct mpc52xx_lpbfifo {
41*4882a593Smuzhiyun struct device *dev;
42*4882a593Smuzhiyun phys_addr_t regs_phys;
43*4882a593Smuzhiyun void __iomem *regs;
44*4882a593Smuzhiyun int irq;
45*4882a593Smuzhiyun spinlock_t lock;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct bcom_task *bcom_tx_task;
48*4882a593Smuzhiyun struct bcom_task *bcom_rx_task;
49*4882a593Smuzhiyun struct bcom_task *bcom_cur_task;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Current state data */
52*4882a593Smuzhiyun struct mpc52xx_lpbfifo_request *req;
53*4882a593Smuzhiyun int dma_irqs_enabled;
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* The MPC5200 has only one fifo, so only need one instance structure */
57*4882a593Smuzhiyun static struct mpc52xx_lpbfifo lpbfifo;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred
61*4882a593Smuzhiyun */
mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request * req)62*4882a593Smuzhiyun static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun size_t transfer_size = req->size - req->pos;
65*4882a593Smuzhiyun struct bcom_bd *bd;
66*4882a593Smuzhiyun void __iomem *reg;
67*4882a593Smuzhiyun u32 *data;
68*4882a593Smuzhiyun int i;
69*4882a593Smuzhiyun int bit_fields;
70*4882a593Smuzhiyun int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
71*4882a593Smuzhiyun int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
72*4882a593Smuzhiyun int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Set and clear the reset bits; is good practice in User Manual */
75*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* set master enable bit */
78*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
79*4882a593Smuzhiyun if (!dma) {
80*4882a593Smuzhiyun /* While the FIFO can be setup for transfer sizes as large as
81*4882a593Smuzhiyun * 16M-1, the FIFO itself is only 512 bytes deep and it does
82*4882a593Smuzhiyun * not generate interrupts for FIFO full events (only transfer
83*4882a593Smuzhiyun * complete will raise an IRQ). Therefore when not using
84*4882a593Smuzhiyun * Bestcomm to drive the FIFO it needs to either be polled, or
85*4882a593Smuzhiyun * transfers need to constrained to the size of the fifo.
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun * This driver restricts the size of the transfer
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun if (transfer_size > 512)
90*4882a593Smuzhiyun transfer_size = 512;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Load the FIFO with data */
93*4882a593Smuzhiyun if (write) {
94*4882a593Smuzhiyun reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
95*4882a593Smuzhiyun data = req->data + req->pos;
96*4882a593Smuzhiyun for (i = 0; i < transfer_size; i += 4)
97*4882a593Smuzhiyun out_be32(reg, *data++);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* Unmask both error and completion irqs */
101*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
102*4882a593Smuzhiyun } else {
103*4882a593Smuzhiyun /* Choose the correct direction
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * Configure the watermarks so DMA will always complete correctly.
106*4882a593Smuzhiyun * It may be worth experimenting with the ALARM value to see if
107*4882a593Smuzhiyun * there is a performance impacit. However, if it is wrong there
108*4882a593Smuzhiyun * is a risk of DMA not transferring the last chunk of data
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun if (write) {
111*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
112*4882a593Smuzhiyun out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
113*4882a593Smuzhiyun lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
114*4882a593Smuzhiyun } else {
115*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
116*4882a593Smuzhiyun out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
117*4882a593Smuzhiyun lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (poll_dma) {
120*4882a593Smuzhiyun if (lpbfifo.dma_irqs_enabled) {
121*4882a593Smuzhiyun disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
122*4882a593Smuzhiyun lpbfifo.dma_irqs_enabled = 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun } else {
125*4882a593Smuzhiyun if (!lpbfifo.dma_irqs_enabled) {
126*4882a593Smuzhiyun enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
127*4882a593Smuzhiyun lpbfifo.dma_irqs_enabled = 1;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
133*4882a593Smuzhiyun bd->status = transfer_size;
134*4882a593Smuzhiyun if (!write) {
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * In the DMA read case, the DMA doesn't complete,
137*4882a593Smuzhiyun * possibly due to incorrect watermarks in the ALARM
138*4882a593Smuzhiyun * and CONTROL regs. For now instead of trying to
139*4882a593Smuzhiyun * determine the right watermarks that will make this
140*4882a593Smuzhiyun * work, just increase the number of bytes the FIFO is
141*4882a593Smuzhiyun * expecting.
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * When submitting another operation, the FIFO will get
144*4882a593Smuzhiyun * reset, so the condition of the FIFO waiting for a
145*4882a593Smuzhiyun * non-existent 4 bytes will get cleared.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun transfer_size += 4; /* BLECH! */
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun bd->data[0] = req->data_phys + req->pos;
150*4882a593Smuzhiyun bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* error irq & master enabled bit */
153*4882a593Smuzhiyun bit_fields = 0x00000201;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Unmask irqs */
156*4882a593Smuzhiyun if (write && (!poll_dma))
157*4882a593Smuzhiyun bit_fields |= 0x00000100; /* completion irq too */
158*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Set transfer size, width, chip select and READ mode */
162*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
163*4882a593Smuzhiyun req->offset + req->pos);
164*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun bit_fields = req->cs << 24 | 0x000008;
167*4882a593Smuzhiyun if (!write)
168*4882a593Smuzhiyun bit_fields |= 0x010000; /* read mode */
169*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* Kick it off */
172*4882a593Smuzhiyun if (!lpbfifo.req->defer_xfer_start)
173*4882a593Smuzhiyun out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
174*4882a593Smuzhiyun if (dma)
175*4882a593Smuzhiyun bcom_enable(lpbfifo.bcom_cur_task);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /**
179*4882a593Smuzhiyun * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
180*4882a593Smuzhiyun *
181*4882a593Smuzhiyun * On transmit, the dma completion irq triggers before the fifo completion
182*4882a593Smuzhiyun * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
183*4882a593Smuzhiyun * task completion irq because everything is not really done until the LPB FIFO
184*4882a593Smuzhiyun * completion irq triggers.
185*4882a593Smuzhiyun *
186*4882a593Smuzhiyun * In other words:
187*4882a593Smuzhiyun * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
188*4882a593Smuzhiyun * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
189*4882a593Smuzhiyun * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * Reasons for entering this routine:
192*4882a593Smuzhiyun * 1) PIO mode rx and tx completion irq
193*4882a593Smuzhiyun * 2) DMA interrupt mode tx completion irq
194*4882a593Smuzhiyun * 3) DMA polled mode tx
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun * Exit conditions:
197*4882a593Smuzhiyun * 1) Transfer aborted
198*4882a593Smuzhiyun * 2) FIFO complete without DMA; more data to do
199*4882a593Smuzhiyun * 3) FIFO complete without DMA; all data transferred
200*4882a593Smuzhiyun * 4) FIFO complete using DMA
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Condition 1 can occur regardless of whether or not DMA is used.
203*4882a593Smuzhiyun * It requires executing the callback to report the error and exiting
204*4882a593Smuzhiyun * immediately.
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Condition 2 requires programming the FIFO with the next block of data
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * Condition 3 requires executing the callback to report completion
209*4882a593Smuzhiyun *
210*4882a593Smuzhiyun * Condition 4 means the same as 3, except that we also retrieve the bcom
211*4882a593Smuzhiyun * buffer so DMA doesn't get clogged up.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * To make things trickier, the spinlock must be dropped before
214*4882a593Smuzhiyun * executing the callback, otherwise we could end up with a deadlock
215*4882a593Smuzhiyun * or nested spinlock condition. The out path is non-trivial, so
216*4882a593Smuzhiyun * extra fiddling is done to make sure all paths lead to the same
217*4882a593Smuzhiyun * outbound code.
218*4882a593Smuzhiyun */
mpc52xx_lpbfifo_irq(int irq,void * dev_id)219*4882a593Smuzhiyun static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct mpc52xx_lpbfifo_request *req;
222*4882a593Smuzhiyun u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
223*4882a593Smuzhiyun void __iomem *reg;
224*4882a593Smuzhiyun u32 *data;
225*4882a593Smuzhiyun int count, i;
226*4882a593Smuzhiyun int do_callback = 0;
227*4882a593Smuzhiyun u32 ts;
228*4882a593Smuzhiyun unsigned long flags;
229*4882a593Smuzhiyun int dma, write, poll_dma;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun spin_lock_irqsave(&lpbfifo.lock, flags);
232*4882a593Smuzhiyun ts = get_tbl();
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun req = lpbfifo.req;
235*4882a593Smuzhiyun if (!req) {
236*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
237*4882a593Smuzhiyun pr_err("bogus LPBFIFO IRQ\n");
238*4882a593Smuzhiyun return IRQ_HANDLED;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
242*4882a593Smuzhiyun write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
243*4882a593Smuzhiyun poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (dma && !write) {
246*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
247*4882a593Smuzhiyun pr_err("bogus LPBFIFO IRQ (dma and not writing)\n");
248*4882a593Smuzhiyun return IRQ_HANDLED;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun if ((status & 0x01) == 0) {
252*4882a593Smuzhiyun goto out;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* check abort bit */
256*4882a593Smuzhiyun if (status & 0x10) {
257*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
258*4882a593Smuzhiyun do_callback = 1;
259*4882a593Smuzhiyun goto out;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /* Read result from hardware */
263*4882a593Smuzhiyun count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
264*4882a593Smuzhiyun count &= 0x00ffffff;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (!dma && !write) {
267*4882a593Smuzhiyun /* copy the data out of the FIFO */
268*4882a593Smuzhiyun reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
269*4882a593Smuzhiyun data = req->data + req->pos;
270*4882a593Smuzhiyun for (i = 0; i < count; i += 4)
271*4882a593Smuzhiyun *data++ = in_be32(reg);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* Update transfer position and count */
275*4882a593Smuzhiyun req->pos += count;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Decide what to do next */
278*4882a593Smuzhiyun if (req->size - req->pos)
279*4882a593Smuzhiyun mpc52xx_lpbfifo_kick(req); /* more work to do */
280*4882a593Smuzhiyun else
281*4882a593Smuzhiyun do_callback = 1;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun out:
284*4882a593Smuzhiyun /* Clear the IRQ */
285*4882a593Smuzhiyun out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (dma && (status & 0x11)) {
288*4882a593Smuzhiyun /*
289*4882a593Smuzhiyun * Count the DMA as complete only when the FIFO completion
290*4882a593Smuzhiyun * status or abort bits are set.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun * (status & 0x01) should always be the case except sometimes
293*4882a593Smuzhiyun * when using polled DMA.
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * (status & 0x10) {transfer aborted}: This case needs more
296*4882a593Smuzhiyun * testing.
297*4882a593Smuzhiyun */
298*4882a593Smuzhiyun bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun req->last_byte = ((u8 *)req->data)[req->size - 1];
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* When the do_callback flag is set; it means the transfer is finished
303*4882a593Smuzhiyun * so set the FIFO as idle */
304*4882a593Smuzhiyun if (do_callback)
305*4882a593Smuzhiyun lpbfifo.req = NULL;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (irq != 0) /* don't increment on polled case */
308*4882a593Smuzhiyun req->irq_count++;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun req->irq_ticks += get_tbl() - ts;
311*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* Spinlock is released; it is now safe to call the callback */
314*4882a593Smuzhiyun if (do_callback && req->callback)
315*4882a593Smuzhiyun req->callback(req);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return IRQ_HANDLED;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /**
321*4882a593Smuzhiyun * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * Only used when receiving data.
324*4882a593Smuzhiyun */
mpc52xx_lpbfifo_bcom_irq(int irq,void * dev_id)325*4882a593Smuzhiyun static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct mpc52xx_lpbfifo_request *req;
328*4882a593Smuzhiyun unsigned long flags;
329*4882a593Smuzhiyun u32 status;
330*4882a593Smuzhiyun u32 ts;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun spin_lock_irqsave(&lpbfifo.lock, flags);
333*4882a593Smuzhiyun ts = get_tbl();
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun req = lpbfifo.req;
336*4882a593Smuzhiyun if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
337*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
338*4882a593Smuzhiyun return IRQ_HANDLED;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun if (irq != 0) /* don't increment on polled case */
342*4882a593Smuzhiyun req->irq_count++;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
345*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun req->buffer_not_done_cnt++;
348*4882a593Smuzhiyun if ((req->buffer_not_done_cnt % 1000) == 0)
349*4882a593Smuzhiyun pr_err("transfer stalled\n");
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun return IRQ_HANDLED;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun req->last_byte = ((u8 *)req->data)[req->size - 1];
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun req->pos = status & 0x00ffffff;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /* Mark the FIFO as idle */
361*4882a593Smuzhiyun lpbfifo.req = NULL;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Release the lock before calling out to the callback. */
364*4882a593Smuzhiyun req->irq_ticks += get_tbl() - ts;
365*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun if (req->callback)
368*4882a593Smuzhiyun req->callback(req);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun return IRQ_HANDLED;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /**
374*4882a593Smuzhiyun * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
375*4882a593Smuzhiyun */
mpc52xx_lpbfifo_poll(void)376*4882a593Smuzhiyun void mpc52xx_lpbfifo_poll(void)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
379*4882a593Smuzhiyun int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
380*4882a593Smuzhiyun int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun * For more information, see comments on the "Fat Lady"
384*4882a593Smuzhiyun */
385*4882a593Smuzhiyun if (dma && write)
386*4882a593Smuzhiyun mpc52xx_lpbfifo_irq(0, NULL);
387*4882a593Smuzhiyun else
388*4882a593Smuzhiyun mpc52xx_lpbfifo_bcom_irq(0, NULL);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /**
393*4882a593Smuzhiyun * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
394*4882a593Smuzhiyun * @req: Pointer to request structure
395*4882a593Smuzhiyun */
mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request * req)396*4882a593Smuzhiyun int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun unsigned long flags;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (!lpbfifo.regs)
401*4882a593Smuzhiyun return -ENODEV;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun spin_lock_irqsave(&lpbfifo.lock, flags);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /* If the req pointer is already set, then a transfer is in progress */
406*4882a593Smuzhiyun if (lpbfifo.req) {
407*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
408*4882a593Smuzhiyun return -EBUSY;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* Setup the transfer */
412*4882a593Smuzhiyun lpbfifo.req = req;
413*4882a593Smuzhiyun req->irq_count = 0;
414*4882a593Smuzhiyun req->irq_ticks = 0;
415*4882a593Smuzhiyun req->buffer_not_done_cnt = 0;
416*4882a593Smuzhiyun req->pos = 0;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun mpc52xx_lpbfifo_kick(req);
419*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
420*4882a593Smuzhiyun return 0;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
423*4882a593Smuzhiyun
mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request * req)424*4882a593Smuzhiyun int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun unsigned long flags;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (!lpbfifo.regs)
429*4882a593Smuzhiyun return -ENODEV;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun spin_lock_irqsave(&lpbfifo.lock, flags);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /*
434*4882a593Smuzhiyun * If the req pointer is already set and a transfer was
435*4882a593Smuzhiyun * started on submit, then this transfer is in progress
436*4882a593Smuzhiyun */
437*4882a593Smuzhiyun if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
438*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
439*4882a593Smuzhiyun return -EBUSY;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /*
443*4882a593Smuzhiyun * If the req was previously submitted but not
444*4882a593Smuzhiyun * started, start it now
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun if (lpbfifo.req && lpbfifo.req == req &&
447*4882a593Smuzhiyun lpbfifo.req->defer_xfer_start) {
448*4882a593Smuzhiyun out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
452*4882a593Smuzhiyun return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
455*4882a593Smuzhiyun
mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request * req)456*4882a593Smuzhiyun void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun unsigned long flags;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun spin_lock_irqsave(&lpbfifo.lock, flags);
461*4882a593Smuzhiyun if (lpbfifo.req == req) {
462*4882a593Smuzhiyun /* Put it into reset and clear the state */
463*4882a593Smuzhiyun bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
464*4882a593Smuzhiyun bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
465*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
466*4882a593Smuzhiyun lpbfifo.req = NULL;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun spin_unlock_irqrestore(&lpbfifo.lock, flags);
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
471*4882a593Smuzhiyun
mpc52xx_lpbfifo_probe(struct platform_device * op)472*4882a593Smuzhiyun static int mpc52xx_lpbfifo_probe(struct platform_device *op)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct resource res;
475*4882a593Smuzhiyun int rc = -ENOMEM;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (lpbfifo.dev != NULL)
478*4882a593Smuzhiyun return -ENOSPC;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
481*4882a593Smuzhiyun if (!lpbfifo.irq)
482*4882a593Smuzhiyun return -ENODEV;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun if (of_address_to_resource(op->dev.of_node, 0, &res))
485*4882a593Smuzhiyun return -ENODEV;
486*4882a593Smuzhiyun lpbfifo.regs_phys = res.start;
487*4882a593Smuzhiyun lpbfifo.regs = of_iomap(op->dev.of_node, 0);
488*4882a593Smuzhiyun if (!lpbfifo.regs)
489*4882a593Smuzhiyun return -ENOMEM;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun spin_lock_init(&lpbfifo.lock);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /* Put FIFO into reset */
494*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Register the interrupt handler */
497*4882a593Smuzhiyun rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
498*4882a593Smuzhiyun "mpc52xx-lpbfifo", &lpbfifo);
499*4882a593Smuzhiyun if (rc)
500*4882a593Smuzhiyun goto err_irq;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
503*4882a593Smuzhiyun lpbfifo.bcom_rx_task =
504*4882a593Smuzhiyun bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
505*4882a593Smuzhiyun BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
506*4882a593Smuzhiyun 16*1024*1024);
507*4882a593Smuzhiyun if (!lpbfifo.bcom_rx_task)
508*4882a593Smuzhiyun goto err_bcom_rx;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
511*4882a593Smuzhiyun mpc52xx_lpbfifo_bcom_irq, 0,
512*4882a593Smuzhiyun "mpc52xx-lpbfifo-rx", &lpbfifo);
513*4882a593Smuzhiyun if (rc)
514*4882a593Smuzhiyun goto err_bcom_rx_irq;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun lpbfifo.dma_irqs_enabled = 1;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
519*4882a593Smuzhiyun lpbfifo.bcom_tx_task =
520*4882a593Smuzhiyun bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
521*4882a593Smuzhiyun BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
522*4882a593Smuzhiyun if (!lpbfifo.bcom_tx_task)
523*4882a593Smuzhiyun goto err_bcom_tx;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun lpbfifo.dev = &op->dev;
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun err_bcom_tx:
529*4882a593Smuzhiyun free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
530*4882a593Smuzhiyun err_bcom_rx_irq:
531*4882a593Smuzhiyun bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
532*4882a593Smuzhiyun err_bcom_rx:
533*4882a593Smuzhiyun err_irq:
534*4882a593Smuzhiyun iounmap(lpbfifo.regs);
535*4882a593Smuzhiyun lpbfifo.regs = NULL;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
538*4882a593Smuzhiyun return -ENODEV;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun
mpc52xx_lpbfifo_remove(struct platform_device * op)542*4882a593Smuzhiyun static int mpc52xx_lpbfifo_remove(struct platform_device *op)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun if (lpbfifo.dev != &op->dev)
545*4882a593Smuzhiyun return 0;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Put FIFO in reset */
548*4882a593Smuzhiyun out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* Release the bestcomm transmit task */
551*4882a593Smuzhiyun free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
552*4882a593Smuzhiyun bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Release the bestcomm receive task */
555*4882a593Smuzhiyun free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
556*4882a593Smuzhiyun bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun free_irq(lpbfifo.irq, &lpbfifo);
559*4882a593Smuzhiyun iounmap(lpbfifo.regs);
560*4882a593Smuzhiyun lpbfifo.regs = NULL;
561*4882a593Smuzhiyun lpbfifo.dev = NULL;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return 0;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun static const struct of_device_id mpc52xx_lpbfifo_match[] = {
567*4882a593Smuzhiyun { .compatible = "fsl,mpc5200-lpbfifo", },
568*4882a593Smuzhiyun {},
569*4882a593Smuzhiyun };
570*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mpc52xx_lpbfifo_match);
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun static struct platform_driver mpc52xx_lpbfifo_driver = {
573*4882a593Smuzhiyun .driver = {
574*4882a593Smuzhiyun .name = "mpc52xx-lpbfifo",
575*4882a593Smuzhiyun .of_match_table = mpc52xx_lpbfifo_match,
576*4882a593Smuzhiyun },
577*4882a593Smuzhiyun .probe = mpc52xx_lpbfifo_probe,
578*4882a593Smuzhiyun .remove = mpc52xx_lpbfifo_remove,
579*4882a593Smuzhiyun };
580*4882a593Smuzhiyun module_platform_driver(mpc52xx_lpbfifo_driver);
581