xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/512x/mpc512x_lpbfifo.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * The driver for Freescale MPC512x LocalPlus Bus FIFO
4*4882a593Smuzhiyun  * (called SCLPC in the Reference Manual).
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright (C) 2013-2015 Alexander Popov <alex.popov@linux.com>.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/of_platform.h>
14*4882a593Smuzhiyun #include <linux/of_address.h>
15*4882a593Smuzhiyun #include <linux/of_irq.h>
16*4882a593Smuzhiyun #include <asm/mpc5121.h>
17*4882a593Smuzhiyun #include <asm/io.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/dmaengine.h>
21*4882a593Smuzhiyun #include <linux/dma-direction.h>
22*4882a593Smuzhiyun #include <linux/dma-mapping.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define DRV_NAME "mpc512x_lpbfifo"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun struct cs_range {
27*4882a593Smuzhiyun 	u32 csnum;
28*4882a593Smuzhiyun 	u32 base; /* must be zero */
29*4882a593Smuzhiyun 	u32 addr;
30*4882a593Smuzhiyun 	u32 size;
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun static struct lpbfifo_data {
34*4882a593Smuzhiyun 	spinlock_t lock; /* for protecting lpbfifo_data */
35*4882a593Smuzhiyun 	phys_addr_t regs_phys;
36*4882a593Smuzhiyun 	resource_size_t regs_size;
37*4882a593Smuzhiyun 	struct mpc512x_lpbfifo __iomem *regs;
38*4882a593Smuzhiyun 	int irq;
39*4882a593Smuzhiyun 	struct cs_range *cs_ranges;
40*4882a593Smuzhiyun 	size_t cs_n;
41*4882a593Smuzhiyun 	struct dma_chan *chan;
42*4882a593Smuzhiyun 	struct mpc512x_lpbfifo_request *req;
43*4882a593Smuzhiyun 	dma_addr_t ram_bus_addr;
44*4882a593Smuzhiyun 	bool wait_lpbfifo_irq;
45*4882a593Smuzhiyun 	bool wait_lpbfifo_callback;
46*4882a593Smuzhiyun } lpbfifo;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * A data transfer from RAM to some device on LPB is finished
50*4882a593Smuzhiyun  * when both mpc512x_lpbfifo_irq() and mpc512x_lpbfifo_callback()
51*4882a593Smuzhiyun  * have been called. We execute the callback registered in
52*4882a593Smuzhiyun  * mpc512x_lpbfifo_request just after that.
53*4882a593Smuzhiyun  * But for a data transfer from some device on LPB to RAM we don't enable
54*4882a593Smuzhiyun  * LPBFIFO interrupt because clearing MPC512X_SCLPC_SUCCESS interrupt flag
55*4882a593Smuzhiyun  * automatically disables LPBFIFO reading request to the DMA controller
56*4882a593Smuzhiyun  * and the data transfer hangs. So the callback registered in
57*4882a593Smuzhiyun  * mpc512x_lpbfifo_request is executed at the end of mpc512x_lpbfifo_callback().
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * mpc512x_lpbfifo_irq - IRQ handler for LPB FIFO
62*4882a593Smuzhiyun  */
mpc512x_lpbfifo_irq(int irq,void * param)63*4882a593Smuzhiyun static irqreturn_t mpc512x_lpbfifo_irq(int irq, void *param)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct device *dev = (struct device *)param;
66*4882a593Smuzhiyun 	struct mpc512x_lpbfifo_request *req = NULL;
67*4882a593Smuzhiyun 	unsigned long flags;
68*4882a593Smuzhiyun 	u32 status;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	spin_lock_irqsave(&lpbfifo.lock, flags);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if (!lpbfifo.regs)
73*4882a593Smuzhiyun 		goto end;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	req = lpbfifo.req;
76*4882a593Smuzhiyun 	if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) {
77*4882a593Smuzhiyun 		dev_err(dev, "bogus LPBFIFO IRQ\n");
78*4882a593Smuzhiyun 		goto end;
79*4882a593Smuzhiyun 	}
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	status = in_be32(&lpbfifo.regs->status);
82*4882a593Smuzhiyun 	if (status != MPC512X_SCLPC_SUCCESS) {
83*4882a593Smuzhiyun 		dev_err(dev, "DMA transfer from RAM to peripheral failed\n");
84*4882a593Smuzhiyun 		out_be32(&lpbfifo.regs->enable,
85*4882a593Smuzhiyun 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
86*4882a593Smuzhiyun 		goto end;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 	/* Clear the interrupt flag */
89*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->status, MPC512X_SCLPC_SUCCESS);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	lpbfifo.wait_lpbfifo_irq = false;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (lpbfifo.wait_lpbfifo_callback)
94*4882a593Smuzhiyun 		goto end;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* Transfer is finished, set the FIFO as idle */
97*4882a593Smuzhiyun 	lpbfifo.req = NULL;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (req->callback)
102*4882a593Smuzhiyun 		req->callback(req);
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	return IRQ_HANDLED;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun  end:
107*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
108*4882a593Smuzhiyun 	return IRQ_HANDLED;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun  * mpc512x_lpbfifo_callback is called by DMA driver when
113*4882a593Smuzhiyun  * DMA transaction is finished.
114*4882a593Smuzhiyun  */
mpc512x_lpbfifo_callback(void * param)115*4882a593Smuzhiyun static void mpc512x_lpbfifo_callback(void *param)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	unsigned long flags;
118*4882a593Smuzhiyun 	struct mpc512x_lpbfifo_request *req = NULL;
119*4882a593Smuzhiyun 	enum dma_data_direction dir;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	spin_lock_irqsave(&lpbfifo.lock, flags);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	if (!lpbfifo.regs) {
124*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	req = lpbfifo.req;
129*4882a593Smuzhiyun 	if (!req) {
130*4882a593Smuzhiyun 		pr_err("bogus LPBFIFO callback\n");
131*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
132*4882a593Smuzhiyun 		return;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	/* Release the mapping */
136*4882a593Smuzhiyun 	if (req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
137*4882a593Smuzhiyun 		dir = DMA_TO_DEVICE;
138*4882a593Smuzhiyun 	else
139*4882a593Smuzhiyun 		dir = DMA_FROM_DEVICE;
140*4882a593Smuzhiyun 	dma_unmap_single(lpbfifo.chan->device->dev,
141*4882a593Smuzhiyun 			lpbfifo.ram_bus_addr, req->size, dir);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	lpbfifo.wait_lpbfifo_callback = false;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (!lpbfifo.wait_lpbfifo_irq) {
146*4882a593Smuzhiyun 		/* Transfer is finished, set the FIFO as idle */
147*4882a593Smuzhiyun 		lpbfifo.req = NULL;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		if (req->callback)
152*4882a593Smuzhiyun 			req->callback(req);
153*4882a593Smuzhiyun 	} else {
154*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lpbfifo.lock, flags);
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
mpc512x_lpbfifo_kick(void)158*4882a593Smuzhiyun static int mpc512x_lpbfifo_kick(void)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	u32 bits;
161*4882a593Smuzhiyun 	bool no_incr = false;
162*4882a593Smuzhiyun 	u32 bpt = 32; /* max bytes per LPBFIFO transaction involving DMA */
163*4882a593Smuzhiyun 	u32 cs = 0;
164*4882a593Smuzhiyun 	size_t i;
165*4882a593Smuzhiyun 	struct dma_device *dma_dev = NULL;
166*4882a593Smuzhiyun 	struct scatterlist sg;
167*4882a593Smuzhiyun 	enum dma_data_direction dir;
168*4882a593Smuzhiyun 	struct dma_slave_config dma_conf = {};
169*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *dma_tx = NULL;
170*4882a593Smuzhiyun 	dma_cookie_t cookie;
171*4882a593Smuzhiyun 	int ret;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	/*
174*4882a593Smuzhiyun 	 * 1. Fit the requirements:
175*4882a593Smuzhiyun 	 * - the packet size must be a multiple of 4 since FIFO Data Word
176*4882a593Smuzhiyun 	 *    Register allows only full-word access according the Reference
177*4882a593Smuzhiyun 	 *    Manual;
178*4882a593Smuzhiyun 	 * - the physical address of the device on LPB and the packet size
179*4882a593Smuzhiyun 	 *    must be aligned on BPT (bytes per transaction) or 8-bytes
180*4882a593Smuzhiyun 	 *    boundary according the Reference Manual;
181*4882a593Smuzhiyun 	 * - but we choose DMA maxburst equal (or very close to) BPT to prevent
182*4882a593Smuzhiyun 	 *    DMA controller from overtaking FIFO and causing FIFO underflow
183*4882a593Smuzhiyun 	 *    error. So we force the packet size to be aligned on BPT boundary
184*4882a593Smuzhiyun 	 *    not to confuse DMA driver which requires the packet size to be
185*4882a593Smuzhiyun 	 *    aligned on maxburst boundary;
186*4882a593Smuzhiyun 	 * - BPT should be set to the LPB device port size for operation with
187*4882a593Smuzhiyun 	 *    disabled auto-incrementing according Reference Manual.
188*4882a593Smuzhiyun 	 */
189*4882a593Smuzhiyun 	if (lpbfifo.req->size == 0 || !IS_ALIGNED(lpbfifo.req->size, 4))
190*4882a593Smuzhiyun 		return -EINVAL;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	if (lpbfifo.req->portsize != LPB_DEV_PORTSIZE_UNDEFINED) {
193*4882a593Smuzhiyun 		bpt = lpbfifo.req->portsize;
194*4882a593Smuzhiyun 		no_incr = true;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	while (bpt > 1) {
198*4882a593Smuzhiyun 		if (IS_ALIGNED(lpbfifo.req->dev_phys_addr, min(bpt, 0x8u)) &&
199*4882a593Smuzhiyun 					IS_ALIGNED(lpbfifo.req->size, bpt)) {
200*4882a593Smuzhiyun 			break;
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		if (no_incr)
204*4882a593Smuzhiyun 			return -EINVAL;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		bpt >>= 1;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 	dma_conf.dst_maxburst = max(bpt, 0x4u) / 4;
209*4882a593Smuzhiyun 	dma_conf.src_maxburst = max(bpt, 0x4u) / 4;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	for (i = 0; i < lpbfifo.cs_n; i++) {
212*4882a593Smuzhiyun 		phys_addr_t cs_start = lpbfifo.cs_ranges[i].addr;
213*4882a593Smuzhiyun 		phys_addr_t cs_end = cs_start + lpbfifo.cs_ranges[i].size;
214*4882a593Smuzhiyun 		phys_addr_t access_start = lpbfifo.req->dev_phys_addr;
215*4882a593Smuzhiyun 		phys_addr_t access_end = access_start + lpbfifo.req->size;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		if (access_start >= cs_start && access_end <= cs_end) {
218*4882a593Smuzhiyun 			cs = lpbfifo.cs_ranges[i].csnum;
219*4882a593Smuzhiyun 			break;
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 	if (i == lpbfifo.cs_n)
223*4882a593Smuzhiyun 		return -EFAULT;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	/* 2. Prepare DMA */
226*4882a593Smuzhiyun 	dma_dev = lpbfifo.chan->device;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE) {
229*4882a593Smuzhiyun 		dir = DMA_TO_DEVICE;
230*4882a593Smuzhiyun 		dma_conf.direction = DMA_MEM_TO_DEV;
231*4882a593Smuzhiyun 		dma_conf.dst_addr = lpbfifo.regs_phys +
232*4882a593Smuzhiyun 				offsetof(struct mpc512x_lpbfifo, data_word);
233*4882a593Smuzhiyun 	} else {
234*4882a593Smuzhiyun 		dir = DMA_FROM_DEVICE;
235*4882a593Smuzhiyun 		dma_conf.direction = DMA_DEV_TO_MEM;
236*4882a593Smuzhiyun 		dma_conf.src_addr = lpbfifo.regs_phys +
237*4882a593Smuzhiyun 				offsetof(struct mpc512x_lpbfifo, data_word);
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 	dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
240*4882a593Smuzhiyun 	dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* Make DMA channel work with LPB FIFO data register */
243*4882a593Smuzhiyun 	if (dma_dev->device_config(lpbfifo.chan, &dma_conf)) {
244*4882a593Smuzhiyun 		ret = -EINVAL;
245*4882a593Smuzhiyun 		goto err_dma_prep;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	sg_init_table(&sg, 1);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	sg_dma_address(&sg) = dma_map_single(dma_dev->dev,
251*4882a593Smuzhiyun 			lpbfifo.req->ram_virt_addr, lpbfifo.req->size, dir);
252*4882a593Smuzhiyun 	if (dma_mapping_error(dma_dev->dev, sg_dma_address(&sg)))
253*4882a593Smuzhiyun 		return -EFAULT;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	lpbfifo.ram_bus_addr = sg_dma_address(&sg); /* For freeing later */
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	sg_dma_len(&sg) = lpbfifo.req->size;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	dma_tx = dmaengine_prep_slave_sg(lpbfifo.chan, &sg,
260*4882a593Smuzhiyun 						1, dma_conf.direction, 0);
261*4882a593Smuzhiyun 	if (!dma_tx) {
262*4882a593Smuzhiyun 		ret = -ENOSPC;
263*4882a593Smuzhiyun 		goto err_dma_prep;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 	dma_tx->callback = mpc512x_lpbfifo_callback;
266*4882a593Smuzhiyun 	dma_tx->callback_param = NULL;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/* 3. Prepare FIFO */
269*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->enable,
270*4882a593Smuzhiyun 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
271*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->enable, 0x0);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/*
274*4882a593Smuzhiyun 	 * Configure the watermarks for write operation (RAM->DMA->FIFO->dev):
275*4882a593Smuzhiyun 	 * - high watermark 7 words according the Reference Manual,
276*4882a593Smuzhiyun 	 * - low watermark 512 bytes (half of the FIFO).
277*4882a593Smuzhiyun 	 * These watermarks don't work for read operation since the
278*4882a593Smuzhiyun 	 * MPC512X_SCLPC_FLUSH bit is set (according the Reference Manual).
279*4882a593Smuzhiyun 	 */
280*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->fifo_ctrl, MPC512X_SCLPC_FIFO_CTRL(0x7));
281*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->fifo_alarm, MPC512X_SCLPC_FIFO_ALARM(0x200));
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/*
284*4882a593Smuzhiyun 	 * Start address is a physical address of the region which belongs
285*4882a593Smuzhiyun 	 * to the device on the LocalPlus Bus
286*4882a593Smuzhiyun 	 */
287*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->start_addr, lpbfifo.req->dev_phys_addr);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	/*
290*4882a593Smuzhiyun 	 * Configure chip select, transfer direction, address increment option
291*4882a593Smuzhiyun 	 * and bytes per transaction option
292*4882a593Smuzhiyun 	 */
293*4882a593Smuzhiyun 	bits = MPC512X_SCLPC_CS(cs);
294*4882a593Smuzhiyun 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_READ)
295*4882a593Smuzhiyun 		bits |= MPC512X_SCLPC_READ | MPC512X_SCLPC_FLUSH;
296*4882a593Smuzhiyun 	if (no_incr)
297*4882a593Smuzhiyun 		bits |= MPC512X_SCLPC_DAI;
298*4882a593Smuzhiyun 	bits |= MPC512X_SCLPC_BPT(bpt);
299*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->ctrl, bits);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* Unmask irqs */
302*4882a593Smuzhiyun 	bits = MPC512X_SCLPC_ENABLE | MPC512X_SCLPC_ABORT_INT_ENABLE;
303*4882a593Smuzhiyun 	if (lpbfifo.req->dir == MPC512X_LPBFIFO_REQ_DIR_WRITE)
304*4882a593Smuzhiyun 		bits |= MPC512X_SCLPC_NORM_INT_ENABLE;
305*4882a593Smuzhiyun 	else
306*4882a593Smuzhiyun 		lpbfifo.wait_lpbfifo_irq = false;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->enable, bits);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* 4. Set packet size and kick FIFO off */
311*4882a593Smuzhiyun 	bits = lpbfifo.req->size | MPC512X_SCLPC_START;
312*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->pkt_size, bits);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	/* 5. Finally kick DMA off */
315*4882a593Smuzhiyun 	cookie = dma_tx->tx_submit(dma_tx);
316*4882a593Smuzhiyun 	if (dma_submit_error(cookie)) {
317*4882a593Smuzhiyun 		ret = -ENOSPC;
318*4882a593Smuzhiyun 		goto err_dma_submit;
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return 0;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun  err_dma_submit:
324*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->enable,
325*4882a593Smuzhiyun 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
326*4882a593Smuzhiyun  err_dma_prep:
327*4882a593Smuzhiyun 	dma_unmap_single(dma_dev->dev, sg_dma_address(&sg),
328*4882a593Smuzhiyun 						lpbfifo.req->size, dir);
329*4882a593Smuzhiyun 	return ret;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request * req)332*4882a593Smuzhiyun static int mpc512x_lpbfifo_submit_locked(struct mpc512x_lpbfifo_request *req)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	int ret = 0;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (!lpbfifo.regs)
337*4882a593Smuzhiyun 		return -ENODEV;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* Check whether a transfer is in progress */
340*4882a593Smuzhiyun 	if (lpbfifo.req)
341*4882a593Smuzhiyun 		return -EBUSY;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	lpbfifo.wait_lpbfifo_irq = true;
344*4882a593Smuzhiyun 	lpbfifo.wait_lpbfifo_callback = true;
345*4882a593Smuzhiyun 	lpbfifo.req = req;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	ret = mpc512x_lpbfifo_kick();
348*4882a593Smuzhiyun 	if (ret != 0)
349*4882a593Smuzhiyun 		lpbfifo.req = NULL; /* Set the FIFO as idle */
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	return ret;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request * req)354*4882a593Smuzhiyun int mpc512x_lpbfifo_submit(struct mpc512x_lpbfifo_request *req)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	unsigned long flags;
357*4882a593Smuzhiyun 	int ret = 0;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	spin_lock_irqsave(&lpbfifo.lock, flags);
360*4882a593Smuzhiyun 	ret = mpc512x_lpbfifo_submit_locked(req);
361*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return ret;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL(mpc512x_lpbfifo_submit);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun /*
368*4882a593Smuzhiyun  * LPBFIFO driver uses "ranges" property of "localbus" device tree node
369*4882a593Smuzhiyun  * for being able to determine the chip select number of a client device
370*4882a593Smuzhiyun  * ordering a DMA transfer.
371*4882a593Smuzhiyun  */
get_cs_ranges(struct device * dev)372*4882a593Smuzhiyun static int get_cs_ranges(struct device *dev)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	int ret = -ENODEV;
375*4882a593Smuzhiyun 	struct device_node *lb_node;
376*4882a593Smuzhiyun 	const u32 *addr_cells_p;
377*4882a593Smuzhiyun 	const u32 *size_cells_p;
378*4882a593Smuzhiyun 	int proplen;
379*4882a593Smuzhiyun 	size_t i;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	lb_node = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-localbus");
382*4882a593Smuzhiyun 	if (!lb_node)
383*4882a593Smuzhiyun 		return ret;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/*
386*4882a593Smuzhiyun 	 * The node defined as compatible with 'fsl,mpc5121-localbus'
387*4882a593Smuzhiyun 	 * should have two address cells and one size cell.
388*4882a593Smuzhiyun 	 * Every item of its ranges property should consist of:
389*4882a593Smuzhiyun 	 * - the first address cell which is the chipselect number;
390*4882a593Smuzhiyun 	 * - the second address cell which is the offset in the chipselect,
391*4882a593Smuzhiyun 	 *    must be zero.
392*4882a593Smuzhiyun 	 * - CPU address of the beginning of an access window;
393*4882a593Smuzhiyun 	 * - the only size cell which is the size of an access window.
394*4882a593Smuzhiyun 	 */
395*4882a593Smuzhiyun 	addr_cells_p = of_get_property(lb_node, "#address-cells", NULL);
396*4882a593Smuzhiyun 	size_cells_p = of_get_property(lb_node, "#size-cells", NULL);
397*4882a593Smuzhiyun 	if (addr_cells_p == NULL || *addr_cells_p != 2 ||
398*4882a593Smuzhiyun 				size_cells_p == NULL ||	*size_cells_p != 1) {
399*4882a593Smuzhiyun 		goto end;
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	proplen = of_property_count_u32_elems(lb_node, "ranges");
403*4882a593Smuzhiyun 	if (proplen <= 0 || proplen % 4 != 0)
404*4882a593Smuzhiyun 		goto end;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	lpbfifo.cs_n = proplen / 4;
407*4882a593Smuzhiyun 	lpbfifo.cs_ranges = devm_kcalloc(dev, lpbfifo.cs_n,
408*4882a593Smuzhiyun 					sizeof(struct cs_range), GFP_KERNEL);
409*4882a593Smuzhiyun 	if (!lpbfifo.cs_ranges)
410*4882a593Smuzhiyun 		goto end;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	if (of_property_read_u32_array(lb_node, "ranges",
413*4882a593Smuzhiyun 				(u32 *)lpbfifo.cs_ranges, proplen) != 0) {
414*4882a593Smuzhiyun 		goto end;
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	for (i = 0; i < lpbfifo.cs_n; i++) {
418*4882a593Smuzhiyun 		if (lpbfifo.cs_ranges[i].base != 0)
419*4882a593Smuzhiyun 			goto end;
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	ret = 0;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun  end:
425*4882a593Smuzhiyun 	of_node_put(lb_node);
426*4882a593Smuzhiyun 	return ret;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
mpc512x_lpbfifo_probe(struct platform_device * pdev)429*4882a593Smuzhiyun static int mpc512x_lpbfifo_probe(struct platform_device *pdev)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun 	struct resource r;
432*4882a593Smuzhiyun 	int ret = 0;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	memset(&lpbfifo, 0, sizeof(struct lpbfifo_data));
435*4882a593Smuzhiyun 	spin_lock_init(&lpbfifo.lock);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	lpbfifo.chan = dma_request_chan(&pdev->dev, "rx-tx");
438*4882a593Smuzhiyun 	if (IS_ERR(lpbfifo.chan))
439*4882a593Smuzhiyun 		return PTR_ERR(lpbfifo.chan);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (of_address_to_resource(pdev->dev.of_node, 0, &r) != 0) {
442*4882a593Smuzhiyun 		dev_err(&pdev->dev, "bad 'reg' in 'sclpc' device tree node\n");
443*4882a593Smuzhiyun 		ret = -ENODEV;
444*4882a593Smuzhiyun 		goto err0;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	lpbfifo.regs_phys = r.start;
448*4882a593Smuzhiyun 	lpbfifo.regs_size = resource_size(&r);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev, lpbfifo.regs_phys,
451*4882a593Smuzhiyun 					lpbfifo.regs_size, DRV_NAME)) {
452*4882a593Smuzhiyun 		dev_err(&pdev->dev, "unable to request region\n");
453*4882a593Smuzhiyun 		ret = -EBUSY;
454*4882a593Smuzhiyun 		goto err0;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	lpbfifo.regs = devm_ioremap(&pdev->dev,
458*4882a593Smuzhiyun 					lpbfifo.regs_phys, lpbfifo.regs_size);
459*4882a593Smuzhiyun 	if (!lpbfifo.regs) {
460*4882a593Smuzhiyun 		dev_err(&pdev->dev, "mapping registers failed\n");
461*4882a593Smuzhiyun 		ret = -ENOMEM;
462*4882a593Smuzhiyun 		goto err0;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	out_be32(&lpbfifo.regs->enable,
466*4882a593Smuzhiyun 				MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (get_cs_ranges(&pdev->dev) != 0) {
469*4882a593Smuzhiyun 		dev_err(&pdev->dev, "bad '/localbus' device tree node\n");
470*4882a593Smuzhiyun 		ret = -ENODEV;
471*4882a593Smuzhiyun 		goto err0;
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	lpbfifo.irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
475*4882a593Smuzhiyun 	if (!lpbfifo.irq) {
476*4882a593Smuzhiyun 		dev_err(&pdev->dev, "mapping irq failed\n");
477*4882a593Smuzhiyun 		ret = -ENODEV;
478*4882a593Smuzhiyun 		goto err0;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	if (request_irq(lpbfifo.irq, mpc512x_lpbfifo_irq, 0,
482*4882a593Smuzhiyun 						DRV_NAME, &pdev->dev) != 0) {
483*4882a593Smuzhiyun 		dev_err(&pdev->dev, "requesting irq failed\n");
484*4882a593Smuzhiyun 		ret = -ENODEV;
485*4882a593Smuzhiyun 		goto err1;
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	dev_info(&pdev->dev, "probe succeeded\n");
489*4882a593Smuzhiyun 	return 0;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun  err1:
492*4882a593Smuzhiyun 	irq_dispose_mapping(lpbfifo.irq);
493*4882a593Smuzhiyun  err0:
494*4882a593Smuzhiyun 	dma_release_channel(lpbfifo.chan);
495*4882a593Smuzhiyun 	return ret;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
mpc512x_lpbfifo_remove(struct platform_device * pdev)498*4882a593Smuzhiyun static int mpc512x_lpbfifo_remove(struct platform_device *pdev)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	unsigned long flags;
501*4882a593Smuzhiyun 	struct dma_device *dma_dev = lpbfifo.chan->device;
502*4882a593Smuzhiyun 	struct mpc512x_lpbfifo __iomem *regs = NULL;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	spin_lock_irqsave(&lpbfifo.lock, flags);
505*4882a593Smuzhiyun 	regs = lpbfifo.regs;
506*4882a593Smuzhiyun 	lpbfifo.regs = NULL;
507*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lpbfifo.lock, flags);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	dma_dev->device_terminate_all(lpbfifo.chan);
510*4882a593Smuzhiyun 	out_be32(&regs->enable, MPC512X_SCLPC_RESET | MPC512X_SCLPC_FIFO_RESET);
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	free_irq(lpbfifo.irq, &pdev->dev);
513*4882a593Smuzhiyun 	irq_dispose_mapping(lpbfifo.irq);
514*4882a593Smuzhiyun 	dma_release_channel(lpbfifo.chan);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return 0;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun static const struct of_device_id mpc512x_lpbfifo_match[] = {
520*4882a593Smuzhiyun 	{ .compatible = "fsl,mpc512x-lpbfifo", },
521*4882a593Smuzhiyun 	{},
522*4882a593Smuzhiyun };
523*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, mpc512x_lpbfifo_match);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun static struct platform_driver mpc512x_lpbfifo_driver = {
526*4882a593Smuzhiyun 	.probe = mpc512x_lpbfifo_probe,
527*4882a593Smuzhiyun 	.remove = mpc512x_lpbfifo_remove,
528*4882a593Smuzhiyun 	.driver = {
529*4882a593Smuzhiyun 		.name = DRV_NAME,
530*4882a593Smuzhiyun 		.of_match_table = mpc512x_lpbfifo_match,
531*4882a593Smuzhiyun 	},
532*4882a593Smuzhiyun };
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun module_platform_driver(mpc512x_lpbfifo_driver);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun MODULE_AUTHOR("Alexander Popov <alex.popov@linux.com>");
537*4882a593Smuzhiyun MODULE_DESCRIPTION("MPC512x LocalPlus Bus FIFO device driver");
538*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
539