xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/pasemi/dma_lib.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2006-2007 PA Semi, Inc
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Common functions for DMA access on PA Semi PWRficient
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <asm/pasemi_dma.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define MAX_TXCH 64
18*4882a593Smuzhiyun #define MAX_RXCH 64
19*4882a593Smuzhiyun #define MAX_FLAGS 64
20*4882a593Smuzhiyun #define MAX_FUN 8
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static struct pasdma_status *dma_status;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static void __iomem *iob_regs;
25*4882a593Smuzhiyun static void __iomem *mac_regs[6];
26*4882a593Smuzhiyun static void __iomem *dma_regs;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static int base_hw_irq;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static int num_txch, num_rxch;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static struct pci_dev *dma_pdev;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* Bitmaps to handle allocation of channels */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static DECLARE_BITMAP(txch_free, MAX_TXCH);
37*4882a593Smuzhiyun static DECLARE_BITMAP(rxch_free, MAX_RXCH);
38*4882a593Smuzhiyun static DECLARE_BITMAP(flags_free, MAX_FLAGS);
39*4882a593Smuzhiyun static DECLARE_BITMAP(fun_free, MAX_FUN);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /* pasemi_read_iob_reg - read IOB register
42*4882a593Smuzhiyun  * @reg: Register to read (offset into PCI CFG space)
43*4882a593Smuzhiyun  */
pasemi_read_iob_reg(unsigned int reg)44*4882a593Smuzhiyun unsigned int pasemi_read_iob_reg(unsigned int reg)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	return in_le32(iob_regs+reg);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_read_iob_reg);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* pasemi_write_iob_reg - write IOB register
51*4882a593Smuzhiyun  * @reg: Register to write to (offset into PCI CFG space)
52*4882a593Smuzhiyun  * @val: Value to write
53*4882a593Smuzhiyun  */
pasemi_write_iob_reg(unsigned int reg,unsigned int val)54*4882a593Smuzhiyun void pasemi_write_iob_reg(unsigned int reg, unsigned int val)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun 	out_le32(iob_regs+reg, val);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_write_iob_reg);
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* pasemi_read_mac_reg - read MAC register
61*4882a593Smuzhiyun  * @intf: MAC interface
62*4882a593Smuzhiyun  * @reg: Register to read (offset into PCI CFG space)
63*4882a593Smuzhiyun  */
pasemi_read_mac_reg(int intf,unsigned int reg)64*4882a593Smuzhiyun unsigned int pasemi_read_mac_reg(int intf, unsigned int reg)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	return in_le32(mac_regs[intf]+reg);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_read_mac_reg);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* pasemi_write_mac_reg - write MAC register
71*4882a593Smuzhiyun  * @intf: MAC interface
72*4882a593Smuzhiyun  * @reg: Register to write to (offset into PCI CFG space)
73*4882a593Smuzhiyun  * @val: Value to write
74*4882a593Smuzhiyun  */
pasemi_write_mac_reg(int intf,unsigned int reg,unsigned int val)75*4882a593Smuzhiyun void pasemi_write_mac_reg(int intf, unsigned int reg, unsigned int val)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	out_le32(mac_regs[intf]+reg, val);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_write_mac_reg);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /* pasemi_read_dma_reg - read DMA register
82*4882a593Smuzhiyun  * @reg: Register to read (offset into PCI CFG space)
83*4882a593Smuzhiyun  */
pasemi_read_dma_reg(unsigned int reg)84*4882a593Smuzhiyun unsigned int pasemi_read_dma_reg(unsigned int reg)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	return in_le32(dma_regs+reg);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_read_dma_reg);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* pasemi_write_dma_reg - write DMA register
91*4882a593Smuzhiyun  * @reg: Register to write to (offset into PCI CFG space)
92*4882a593Smuzhiyun  * @val: Value to write
93*4882a593Smuzhiyun  */
pasemi_write_dma_reg(unsigned int reg,unsigned int val)94*4882a593Smuzhiyun void pasemi_write_dma_reg(unsigned int reg, unsigned int val)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	out_le32(dma_regs+reg, val);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_write_dma_reg);
99*4882a593Smuzhiyun 
pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)100*4882a593Smuzhiyun static int pasemi_alloc_tx_chan(enum pasemi_dmachan_type type)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	int bit;
103*4882a593Smuzhiyun 	int start, limit;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	switch (type & (TXCHAN_EVT0|TXCHAN_EVT1)) {
106*4882a593Smuzhiyun 	case TXCHAN_EVT0:
107*4882a593Smuzhiyun 		start = 0;
108*4882a593Smuzhiyun 		limit = 10;
109*4882a593Smuzhiyun 		break;
110*4882a593Smuzhiyun 	case TXCHAN_EVT1:
111*4882a593Smuzhiyun 		start = 10;
112*4882a593Smuzhiyun 		limit = MAX_TXCH;
113*4882a593Smuzhiyun 		break;
114*4882a593Smuzhiyun 	default:
115*4882a593Smuzhiyun 		start = 0;
116*4882a593Smuzhiyun 		limit = MAX_TXCH;
117*4882a593Smuzhiyun 		break;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun retry:
120*4882a593Smuzhiyun 	bit = find_next_bit(txch_free, MAX_TXCH, start);
121*4882a593Smuzhiyun 	if (bit >= limit)
122*4882a593Smuzhiyun 		return -ENOSPC;
123*4882a593Smuzhiyun 	if (!test_and_clear_bit(bit, txch_free))
124*4882a593Smuzhiyun 		goto retry;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return bit;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
pasemi_free_tx_chan(int chan)129*4882a593Smuzhiyun static void pasemi_free_tx_chan(int chan)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	BUG_ON(test_bit(chan, txch_free));
132*4882a593Smuzhiyun 	set_bit(chan, txch_free);
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
pasemi_alloc_rx_chan(void)135*4882a593Smuzhiyun static int pasemi_alloc_rx_chan(void)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	int bit;
138*4882a593Smuzhiyun retry:
139*4882a593Smuzhiyun 	bit = find_first_bit(rxch_free, MAX_RXCH);
140*4882a593Smuzhiyun 	if (bit >= MAX_TXCH)
141*4882a593Smuzhiyun 		return -ENOSPC;
142*4882a593Smuzhiyun 	if (!test_and_clear_bit(bit, rxch_free))
143*4882a593Smuzhiyun 		goto retry;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	return bit;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
pasemi_free_rx_chan(int chan)148*4882a593Smuzhiyun static void pasemi_free_rx_chan(int chan)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	BUG_ON(test_bit(chan, rxch_free));
151*4882a593Smuzhiyun 	set_bit(chan, rxch_free);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /* pasemi_dma_alloc_chan - Allocate a DMA channel
155*4882a593Smuzhiyun  * @type: Type of channel to allocate
156*4882a593Smuzhiyun  * @total_size: Total size of structure to allocate (to allow for more
157*4882a593Smuzhiyun  *		room behind the structure to be used by the client)
158*4882a593Smuzhiyun  * @offset: Offset in bytes from start of the total structure to the beginning
159*4882a593Smuzhiyun  *	    of struct pasemi_dmachan. Needed when struct pasemi_dmachan is
160*4882a593Smuzhiyun  *	    not the first member of the client structure.
161*4882a593Smuzhiyun  *
162*4882a593Smuzhiyun  * pasemi_dma_alloc_chan allocates a DMA channel for use by a client. The
163*4882a593Smuzhiyun  * type argument specifies whether it's a RX or TX channel, and in the case
164*4882a593Smuzhiyun  * of TX channels which group it needs to belong to (if any).
165*4882a593Smuzhiyun  *
166*4882a593Smuzhiyun  * Returns a pointer to the total structure allocated on success, NULL
167*4882a593Smuzhiyun  * on failure.
168*4882a593Smuzhiyun  */
pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,int total_size,int offset)169*4882a593Smuzhiyun void *pasemi_dma_alloc_chan(enum pasemi_dmachan_type type,
170*4882a593Smuzhiyun 			    int total_size, int offset)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	void *buf;
173*4882a593Smuzhiyun 	struct pasemi_dmachan *chan;
174*4882a593Smuzhiyun 	int chno;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	BUG_ON(total_size < sizeof(struct pasemi_dmachan));
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	buf = kzalloc(total_size, GFP_KERNEL);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (!buf)
181*4882a593Smuzhiyun 		return NULL;
182*4882a593Smuzhiyun 	chan = buf + offset;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	chan->priv = buf;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	switch (type & (TXCHAN|RXCHAN)) {
187*4882a593Smuzhiyun 	case RXCHAN:
188*4882a593Smuzhiyun 		chno = pasemi_alloc_rx_chan();
189*4882a593Smuzhiyun 		chan->chno = chno;
190*4882a593Smuzhiyun 		chan->irq = irq_create_mapping(NULL,
191*4882a593Smuzhiyun 					       base_hw_irq + num_txch + chno);
192*4882a593Smuzhiyun 		chan->status = &dma_status->rx_sta[chno];
193*4882a593Smuzhiyun 		break;
194*4882a593Smuzhiyun 	case TXCHAN:
195*4882a593Smuzhiyun 		chno = pasemi_alloc_tx_chan(type);
196*4882a593Smuzhiyun 		chan->chno = chno;
197*4882a593Smuzhiyun 		chan->irq = irq_create_mapping(NULL, base_hw_irq + chno);
198*4882a593Smuzhiyun 		chan->status = &dma_status->tx_sta[chno];
199*4882a593Smuzhiyun 		break;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	chan->chan_type = type;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	return chan;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_alloc_chan);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun /* pasemi_dma_free_chan - Free a previously allocated channel
209*4882a593Smuzhiyun  * @chan: Channel to free
210*4882a593Smuzhiyun  *
211*4882a593Smuzhiyun  * Frees a previously allocated channel. It will also deallocate any
212*4882a593Smuzhiyun  * descriptor ring associated with the channel, if allocated.
213*4882a593Smuzhiyun  */
pasemi_dma_free_chan(struct pasemi_dmachan * chan)214*4882a593Smuzhiyun void pasemi_dma_free_chan(struct pasemi_dmachan *chan)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun 	if (chan->ring_virt)
217*4882a593Smuzhiyun 		pasemi_dma_free_ring(chan);
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	switch (chan->chan_type & (RXCHAN|TXCHAN)) {
220*4882a593Smuzhiyun 	case RXCHAN:
221*4882a593Smuzhiyun 		pasemi_free_rx_chan(chan->chno);
222*4882a593Smuzhiyun 		break;
223*4882a593Smuzhiyun 	case TXCHAN:
224*4882a593Smuzhiyun 		pasemi_free_tx_chan(chan->chno);
225*4882a593Smuzhiyun 		break;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	kfree(chan->priv);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_free_chan);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /* pasemi_dma_alloc_ring - Allocate descriptor ring for a channel
233*4882a593Smuzhiyun  * @chan: Channel for which to allocate
234*4882a593Smuzhiyun  * @ring_size: Ring size in 64-bit (8-byte) words
235*4882a593Smuzhiyun  *
236*4882a593Smuzhiyun  * Allocate a descriptor ring for a channel. Returns 0 on success, errno
237*4882a593Smuzhiyun  * on failure. The passed in struct pasemi_dmachan is updated with the
238*4882a593Smuzhiyun  * virtual and DMA addresses of the ring.
239*4882a593Smuzhiyun  */
pasemi_dma_alloc_ring(struct pasemi_dmachan * chan,int ring_size)240*4882a593Smuzhiyun int pasemi_dma_alloc_ring(struct pasemi_dmachan *chan, int ring_size)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun 	BUG_ON(chan->ring_virt);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	chan->ring_size = ring_size;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	chan->ring_virt = dma_alloc_coherent(&dma_pdev->dev,
247*4882a593Smuzhiyun 					     ring_size * sizeof(u64),
248*4882a593Smuzhiyun 					     &chan->ring_dma, GFP_KERNEL);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!chan->ring_virt)
251*4882a593Smuzhiyun 		return -ENOMEM;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_alloc_ring);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /* pasemi_dma_free_ring - Free an allocated descriptor ring for a channel
258*4882a593Smuzhiyun  * @chan: Channel for which to free the descriptor ring
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * Frees a previously allocated descriptor ring for a channel.
261*4882a593Smuzhiyun  */
pasemi_dma_free_ring(struct pasemi_dmachan * chan)262*4882a593Smuzhiyun void pasemi_dma_free_ring(struct pasemi_dmachan *chan)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	BUG_ON(!chan->ring_virt);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	dma_free_coherent(&dma_pdev->dev, chan->ring_size * sizeof(u64),
267*4882a593Smuzhiyun 			  chan->ring_virt, chan->ring_dma);
268*4882a593Smuzhiyun 	chan->ring_virt = NULL;
269*4882a593Smuzhiyun 	chan->ring_size = 0;
270*4882a593Smuzhiyun 	chan->ring_dma = 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_free_ring);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /* pasemi_dma_start_chan - Start a DMA channel
275*4882a593Smuzhiyun  * @chan: Channel to start
276*4882a593Smuzhiyun  * @cmdsta: Additional CCMDSTA/TCMDSTA bits to write
277*4882a593Smuzhiyun  *
278*4882a593Smuzhiyun  * Enables (starts) a DMA channel with optional additional arguments.
279*4882a593Smuzhiyun  */
pasemi_dma_start_chan(const struct pasemi_dmachan * chan,const u32 cmdsta)280*4882a593Smuzhiyun void pasemi_dma_start_chan(const struct pasemi_dmachan *chan, const u32 cmdsta)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	if (chan->chan_type == RXCHAN)
283*4882a593Smuzhiyun 		pasemi_write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno),
284*4882a593Smuzhiyun 				     cmdsta | PAS_DMA_RXCHAN_CCMDSTA_EN);
285*4882a593Smuzhiyun 	else
286*4882a593Smuzhiyun 		pasemi_write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno),
287*4882a593Smuzhiyun 				     cmdsta | PAS_DMA_TXCHAN_TCMDSTA_EN);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_start_chan);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /* pasemi_dma_stop_chan - Stop a DMA channel
292*4882a593Smuzhiyun  * @chan: Channel to stop
293*4882a593Smuzhiyun  *
294*4882a593Smuzhiyun  * Stops (disables) a DMA channel. This is done by setting the ST bit in the
295*4882a593Smuzhiyun  * CMDSTA register and waiting on the ACT (active) bit to clear, then
296*4882a593Smuzhiyun  * finally disabling the whole channel.
297*4882a593Smuzhiyun  *
298*4882a593Smuzhiyun  * This function will only try for a short while for the channel to stop, if
299*4882a593Smuzhiyun  * it doesn't it will return failure.
300*4882a593Smuzhiyun  *
301*4882a593Smuzhiyun  * Returns 1 on success, 0 on failure.
302*4882a593Smuzhiyun  */
303*4882a593Smuzhiyun #define MAX_RETRIES 5000
pasemi_dma_stop_chan(const struct pasemi_dmachan * chan)304*4882a593Smuzhiyun int pasemi_dma_stop_chan(const struct pasemi_dmachan *chan)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	int reg, retries;
307*4882a593Smuzhiyun 	u32 sta;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	if (chan->chan_type == RXCHAN) {
310*4882a593Smuzhiyun 		reg = PAS_DMA_RXCHAN_CCMDSTA(chan->chno);
311*4882a593Smuzhiyun 		pasemi_write_dma_reg(reg, PAS_DMA_RXCHAN_CCMDSTA_ST);
312*4882a593Smuzhiyun 		for (retries = 0; retries < MAX_RETRIES; retries++) {
313*4882a593Smuzhiyun 			sta = pasemi_read_dma_reg(reg);
314*4882a593Smuzhiyun 			if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)) {
315*4882a593Smuzhiyun 				pasemi_write_dma_reg(reg, 0);
316*4882a593Smuzhiyun 				return 1;
317*4882a593Smuzhiyun 			}
318*4882a593Smuzhiyun 			cond_resched();
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 	} else {
321*4882a593Smuzhiyun 		reg = PAS_DMA_TXCHAN_TCMDSTA(chan->chno);
322*4882a593Smuzhiyun 		pasemi_write_dma_reg(reg, PAS_DMA_TXCHAN_TCMDSTA_ST);
323*4882a593Smuzhiyun 		for (retries = 0; retries < MAX_RETRIES; retries++) {
324*4882a593Smuzhiyun 			sta = pasemi_read_dma_reg(reg);
325*4882a593Smuzhiyun 			if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)) {
326*4882a593Smuzhiyun 				pasemi_write_dma_reg(reg, 0);
327*4882a593Smuzhiyun 				return 1;
328*4882a593Smuzhiyun 			}
329*4882a593Smuzhiyun 			cond_resched();
330*4882a593Smuzhiyun 		}
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_stop_chan);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun /* pasemi_dma_alloc_buf - Allocate a buffer to use for DMA
338*4882a593Smuzhiyun  * @chan: Channel to allocate for
339*4882a593Smuzhiyun  * @size: Size of buffer in bytes
340*4882a593Smuzhiyun  * @handle: DMA handle
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * Allocate a buffer to be used by the DMA engine for read/write,
343*4882a593Smuzhiyun  * similar to dma_alloc_coherent().
344*4882a593Smuzhiyun  *
345*4882a593Smuzhiyun  * Returns the virtual address of the buffer, or NULL in case of failure.
346*4882a593Smuzhiyun  */
pasemi_dma_alloc_buf(struct pasemi_dmachan * chan,int size,dma_addr_t * handle)347*4882a593Smuzhiyun void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size,
348*4882a593Smuzhiyun 			   dma_addr_t *handle)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun 	return dma_alloc_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_alloc_buf);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun /* pasemi_dma_free_buf - Free a buffer used for DMA
355*4882a593Smuzhiyun  * @chan: Channel the buffer was allocated for
356*4882a593Smuzhiyun  * @size: Size of buffer in bytes
357*4882a593Smuzhiyun  * @handle: DMA handle
358*4882a593Smuzhiyun  *
359*4882a593Smuzhiyun  * Frees a previously allocated buffer.
360*4882a593Smuzhiyun  */
pasemi_dma_free_buf(struct pasemi_dmachan * chan,int size,dma_addr_t * handle)361*4882a593Smuzhiyun void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size,
362*4882a593Smuzhiyun 			 dma_addr_t *handle)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	dma_free_coherent(&dma_pdev->dev, size, handle, GFP_KERNEL);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_free_buf);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization
369*4882a593Smuzhiyun  *
370*4882a593Smuzhiyun  * Allocates a flag for use with channel synchronization (event descriptors).
371*4882a593Smuzhiyun  * Returns allocated flag (0-63), < 0 on error.
372*4882a593Smuzhiyun  */
pasemi_dma_alloc_flag(void)373*4882a593Smuzhiyun int pasemi_dma_alloc_flag(void)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	int bit;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun retry:
378*4882a593Smuzhiyun 	bit = find_next_bit(flags_free, MAX_FLAGS, 0);
379*4882a593Smuzhiyun 	if (bit >= MAX_FLAGS)
380*4882a593Smuzhiyun 		return -ENOSPC;
381*4882a593Smuzhiyun 	if (!test_and_clear_bit(bit, flags_free))
382*4882a593Smuzhiyun 		goto retry;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	return bit;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_alloc_flag);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /* pasemi_dma_free_flag - Deallocates a flag (event)
390*4882a593Smuzhiyun  * @flag: Flag number to deallocate
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * Frees up a flag so it can be reused for other purposes.
393*4882a593Smuzhiyun  */
pasemi_dma_free_flag(int flag)394*4882a593Smuzhiyun void pasemi_dma_free_flag(int flag)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	BUG_ON(test_bit(flag, flags_free));
397*4882a593Smuzhiyun 	BUG_ON(flag >= MAX_FLAGS);
398*4882a593Smuzhiyun 	set_bit(flag, flags_free);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_free_flag);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun /* pasemi_dma_set_flag - Sets a flag (event) to 1
404*4882a593Smuzhiyun  * @flag: Flag number to set active
405*4882a593Smuzhiyun  *
406*4882a593Smuzhiyun  * Sets the flag provided to 1.
407*4882a593Smuzhiyun  */
pasemi_dma_set_flag(int flag)408*4882a593Smuzhiyun void pasemi_dma_set_flag(int flag)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	BUG_ON(flag >= MAX_FLAGS);
411*4882a593Smuzhiyun 	if (flag < 32)
412*4882a593Smuzhiyun 		pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag);
413*4882a593Smuzhiyun 	else
414*4882a593Smuzhiyun 		pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_set_flag);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /* pasemi_dma_clear_flag - Sets a flag (event) to 0
419*4882a593Smuzhiyun  * @flag: Flag number to set inactive
420*4882a593Smuzhiyun  *
421*4882a593Smuzhiyun  * Sets the flag provided to 0.
422*4882a593Smuzhiyun  */
pasemi_dma_clear_flag(int flag)423*4882a593Smuzhiyun void pasemi_dma_clear_flag(int flag)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	BUG_ON(flag >= MAX_FLAGS);
426*4882a593Smuzhiyun 	if (flag < 32)
427*4882a593Smuzhiyun 		pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag);
428*4882a593Smuzhiyun 	else
429*4882a593Smuzhiyun 		pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_clear_flag);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun /* pasemi_dma_alloc_fun - Allocate a function engine
434*4882a593Smuzhiyun  *
435*4882a593Smuzhiyun  * Allocates a function engine to use for crypto/checksum offload
436*4882a593Smuzhiyun  * Returns allocated engine (0-8), < 0 on error.
437*4882a593Smuzhiyun  */
pasemi_dma_alloc_fun(void)438*4882a593Smuzhiyun int pasemi_dma_alloc_fun(void)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	int bit;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun retry:
443*4882a593Smuzhiyun 	bit = find_next_bit(fun_free, MAX_FLAGS, 0);
444*4882a593Smuzhiyun 	if (bit >= MAX_FLAGS)
445*4882a593Smuzhiyun 		return -ENOSPC;
446*4882a593Smuzhiyun 	if (!test_and_clear_bit(bit, fun_free))
447*4882a593Smuzhiyun 		goto retry;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return bit;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_alloc_fun);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun /* pasemi_dma_free_fun - Deallocates a function engine
455*4882a593Smuzhiyun  * @flag: Engine number to deallocate
456*4882a593Smuzhiyun  *
457*4882a593Smuzhiyun  * Frees up a function engine so it can be used for other purposes.
458*4882a593Smuzhiyun  */
pasemi_dma_free_fun(int fun)459*4882a593Smuzhiyun void pasemi_dma_free_fun(int fun)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	BUG_ON(test_bit(fun, fun_free));
462*4882a593Smuzhiyun 	BUG_ON(fun >= MAX_FLAGS);
463*4882a593Smuzhiyun 	set_bit(fun, fun_free);
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_free_fun);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 
map_onedev(struct pci_dev * p,int index)468*4882a593Smuzhiyun static void *map_onedev(struct pci_dev *p, int index)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct device_node *dn;
471*4882a593Smuzhiyun 	void __iomem *ret;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	dn = pci_device_to_OF_node(p);
474*4882a593Smuzhiyun 	if (!dn)
475*4882a593Smuzhiyun 		goto fallback;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	ret = of_iomap(dn, index);
478*4882a593Smuzhiyun 	if (!ret)
479*4882a593Smuzhiyun 		goto fallback;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return ret;
482*4882a593Smuzhiyun fallback:
483*4882a593Smuzhiyun 	/* This is hardcoded and ugly, but we have some firmware versions
484*4882a593Smuzhiyun 	 * that don't provide the register space in the device tree. Luckily
485*4882a593Smuzhiyun 	 * they are at well-known locations so we can just do the math here.
486*4882a593Smuzhiyun 	 */
487*4882a593Smuzhiyun 	return ioremap(0xe0000000 + (p->devfn << 12), 0x2000);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun /* pasemi_dma_init - Initialize the PA Semi DMA library
491*4882a593Smuzhiyun  *
492*4882a593Smuzhiyun  * This function initializes the DMA library. It must be called before
493*4882a593Smuzhiyun  * any other function in the library.
494*4882a593Smuzhiyun  *
495*4882a593Smuzhiyun  * Returns 0 on success, errno on failure.
496*4882a593Smuzhiyun  */
pasemi_dma_init(void)497*4882a593Smuzhiyun int pasemi_dma_init(void)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	static DEFINE_SPINLOCK(init_lock);
500*4882a593Smuzhiyun 	struct pci_dev *iob_pdev;
501*4882a593Smuzhiyun 	struct pci_dev *pdev;
502*4882a593Smuzhiyun 	struct resource res;
503*4882a593Smuzhiyun 	struct device_node *dn;
504*4882a593Smuzhiyun 	int i, intf, err = 0;
505*4882a593Smuzhiyun 	unsigned long timeout;
506*4882a593Smuzhiyun 	u32 tmp;
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	if (!machine_is(pasemi))
509*4882a593Smuzhiyun 		return -ENODEV;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	spin_lock(&init_lock);
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	/* Make sure we haven't already initialized */
514*4882a593Smuzhiyun 	if (dma_pdev)
515*4882a593Smuzhiyun 		goto out;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
518*4882a593Smuzhiyun 	if (!iob_pdev) {
519*4882a593Smuzhiyun 		BUG();
520*4882a593Smuzhiyun 		pr_warn("Can't find I/O Bridge\n");
521*4882a593Smuzhiyun 		err = -ENODEV;
522*4882a593Smuzhiyun 		goto out;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 	iob_regs = map_onedev(iob_pdev, 0);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
527*4882a593Smuzhiyun 	if (!dma_pdev) {
528*4882a593Smuzhiyun 		BUG();
529*4882a593Smuzhiyun 		pr_warn("Can't find DMA controller\n");
530*4882a593Smuzhiyun 		err = -ENODEV;
531*4882a593Smuzhiyun 		goto out;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 	dma_regs = map_onedev(dma_pdev, 0);
534*4882a593Smuzhiyun 	base_hw_irq = virq_to_hw(dma_pdev->irq);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_TXCH, &tmp);
537*4882a593Smuzhiyun 	num_txch = (tmp & PAS_DMA_CAP_TXCH_TCHN_M) >> PAS_DMA_CAP_TXCH_TCHN_S;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	pci_read_config_dword(dma_pdev, PAS_DMA_CAP_RXCH, &tmp);
540*4882a593Smuzhiyun 	num_rxch = (tmp & PAS_DMA_CAP_RXCH_RCHN_M) >> PAS_DMA_CAP_RXCH_RCHN_S;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	intf = 0;
543*4882a593Smuzhiyun 	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, NULL);
544*4882a593Smuzhiyun 	     pdev;
545*4882a593Smuzhiyun 	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa006, pdev))
546*4882a593Smuzhiyun 		mac_regs[intf++] = map_onedev(pdev, 0);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	pci_dev_put(pdev);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	for (pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, NULL);
551*4882a593Smuzhiyun 	     pdev;
552*4882a593Smuzhiyun 	     pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa005, pdev))
553*4882a593Smuzhiyun 		mac_regs[intf++] = map_onedev(pdev, 0);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	pci_dev_put(pdev);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	dn = pci_device_to_OF_node(iob_pdev);
558*4882a593Smuzhiyun 	if (dn)
559*4882a593Smuzhiyun 		err = of_address_to_resource(dn, 1, &res);
560*4882a593Smuzhiyun 	if (!dn || err) {
561*4882a593Smuzhiyun 		/* Fallback for old firmware */
562*4882a593Smuzhiyun 		res.start = 0xfd800000;
563*4882a593Smuzhiyun 		res.end = res.start + 0x1000;
564*4882a593Smuzhiyun 	}
565*4882a593Smuzhiyun 	dma_status = ioremap_cache(res.start, resource_size(&res));
566*4882a593Smuzhiyun 	pci_dev_put(iob_pdev);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	for (i = 0; i < MAX_TXCH; i++)
569*4882a593Smuzhiyun 		__set_bit(i, txch_free);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	for (i = 0; i < MAX_RXCH; i++)
572*4882a593Smuzhiyun 		__set_bit(i, rxch_free);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	timeout = jiffies + HZ;
575*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0);
576*4882a593Smuzhiyun 	while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) {
577*4882a593Smuzhiyun 		if (time_after(jiffies, timeout)) {
578*4882a593Smuzhiyun 			pr_warn("Warning: Could not disable RX section\n");
579*4882a593Smuzhiyun 			break;
580*4882a593Smuzhiyun 		}
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	timeout = jiffies + HZ;
584*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0);
585*4882a593Smuzhiyun 	while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) {
586*4882a593Smuzhiyun 		if (time_after(jiffies, timeout)) {
587*4882a593Smuzhiyun 			pr_warn("Warning: Could not disable TX section\n");
588*4882a593Smuzhiyun 			break;
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/* setup resource allocations for the different DMA sections */
593*4882a593Smuzhiyun 	tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG);
594*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	/* enable tx section */
597*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/* enable rx section */
600*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	for (i = 0; i < MAX_FLAGS; i++)
603*4882a593Smuzhiyun 		__set_bit(i, flags_free);
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun 	for (i = 0; i < MAX_FUN; i++)
606*4882a593Smuzhiyun 		__set_bit(i, fun_free);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* clear all status flags */
609*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff);
610*4882a593Smuzhiyun 	pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	pr_info("PA Semi PWRficient DMA library initialized "
613*4882a593Smuzhiyun 		"(%d tx, %d rx channels)\n", num_txch, num_rxch);
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun out:
616*4882a593Smuzhiyun 	spin_unlock(&init_lock);
617*4882a593Smuzhiyun 	return err;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun EXPORT_SYMBOL(pasemi_dma_init);
620