1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2014 Texas Instruments Incorporated
3*4882a593Smuzhiyun * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
4*4882a593Smuzhiyun * Sandeep Nair <sandeep_n@ti.com>
5*4882a593Smuzhiyun * Cyril Chemparathy <cyril@ti.com>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
8*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License as
9*4882a593Smuzhiyun * published by the Free Software Foundation version 2.
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12*4882a593Smuzhiyun * kind, whether express or implied; without even the implied warranty
13*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14*4882a593Smuzhiyun * GNU General Public License for more details.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/io.h>
18*4882a593Smuzhiyun #include <linux/sched.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/dma-direction.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun #include <linux/pm_runtime.h>
23*4882a593Smuzhiyun #include <linux/of_dma.h>
24*4882a593Smuzhiyun #include <linux/of_address.h>
25*4882a593Smuzhiyun #include <linux/platform_device.h>
26*4882a593Smuzhiyun #include <linux/soc/ti/knav_dma.h>
27*4882a593Smuzhiyun #include <linux/debugfs.h>
28*4882a593Smuzhiyun #include <linux/seq_file.h>
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define REG_MASK 0xffffffff
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define DMA_LOOPBACK BIT(31)
33*4882a593Smuzhiyun #define DMA_ENABLE BIT(31)
34*4882a593Smuzhiyun #define DMA_TEARDOWN BIT(30)
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define DMA_TX_FILT_PSWORDS BIT(29)
37*4882a593Smuzhiyun #define DMA_TX_FILT_EINFO BIT(30)
38*4882a593Smuzhiyun #define DMA_TX_PRIO_SHIFT 0
39*4882a593Smuzhiyun #define DMA_RX_PRIO_SHIFT 16
40*4882a593Smuzhiyun #define DMA_PRIO_MASK GENMASK(3, 0)
41*4882a593Smuzhiyun #define DMA_PRIO_DEFAULT 0
42*4882a593Smuzhiyun #define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
43*4882a593Smuzhiyun #define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
44*4882a593Smuzhiyun #define DMA_RX_TIMEOUT_SHIFT 0
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #define CHAN_HAS_EPIB BIT(30)
47*4882a593Smuzhiyun #define CHAN_HAS_PSINFO BIT(29)
48*4882a593Smuzhiyun #define CHAN_ERR_RETRY BIT(28)
49*4882a593Smuzhiyun #define CHAN_PSINFO_AT_SOP BIT(25)
50*4882a593Smuzhiyun #define CHAN_SOP_OFF_SHIFT 16
51*4882a593Smuzhiyun #define CHAN_SOP_OFF_MASK GENMASK(9, 0)
52*4882a593Smuzhiyun #define DESC_TYPE_SHIFT 26
53*4882a593Smuzhiyun #define DESC_TYPE_MASK GENMASK(2, 0)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
57*4882a593Smuzhiyun * navigator cloud mapping scheme.
58*4882a593Smuzhiyun * using the 14bit physical queue numbers directly maps into this scheme.
59*4882a593Smuzhiyun */
60*4882a593Smuzhiyun #define CHAN_QNUM_MASK GENMASK(14, 0)
61*4882a593Smuzhiyun #define DMA_MAX_QMS 4
62*4882a593Smuzhiyun #define DMA_TIMEOUT 1 /* msecs */
63*4882a593Smuzhiyun #define DMA_INVALID_ID 0xffff
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun struct reg_global {
66*4882a593Smuzhiyun u32 revision;
67*4882a593Smuzhiyun u32 perf_control;
68*4882a593Smuzhiyun u32 emulation_control;
69*4882a593Smuzhiyun u32 priority_control;
70*4882a593Smuzhiyun u32 qm_base_address[DMA_MAX_QMS];
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct reg_chan {
74*4882a593Smuzhiyun u32 control;
75*4882a593Smuzhiyun u32 mode;
76*4882a593Smuzhiyun u32 __rsvd[6];
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct reg_tx_sched {
80*4882a593Smuzhiyun u32 prio;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun struct reg_rx_flow {
84*4882a593Smuzhiyun u32 control;
85*4882a593Smuzhiyun u32 tags;
86*4882a593Smuzhiyun u32 tag_sel;
87*4882a593Smuzhiyun u32 fdq_sel[2];
88*4882a593Smuzhiyun u32 thresh[3];
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun struct knav_dma_pool_device {
92*4882a593Smuzhiyun struct device *dev;
93*4882a593Smuzhiyun struct list_head list;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct knav_dma_device {
97*4882a593Smuzhiyun bool loopback, enable_all;
98*4882a593Smuzhiyun unsigned tx_priority, rx_priority, rx_timeout;
99*4882a593Smuzhiyun unsigned logical_queue_managers;
100*4882a593Smuzhiyun unsigned qm_base_address[DMA_MAX_QMS];
101*4882a593Smuzhiyun struct reg_global __iomem *reg_global;
102*4882a593Smuzhiyun struct reg_chan __iomem *reg_tx_chan;
103*4882a593Smuzhiyun struct reg_rx_flow __iomem *reg_rx_flow;
104*4882a593Smuzhiyun struct reg_chan __iomem *reg_rx_chan;
105*4882a593Smuzhiyun struct reg_tx_sched __iomem *reg_tx_sched;
106*4882a593Smuzhiyun unsigned max_rx_chan, max_tx_chan;
107*4882a593Smuzhiyun unsigned max_rx_flow;
108*4882a593Smuzhiyun char name[32];
109*4882a593Smuzhiyun atomic_t ref_count;
110*4882a593Smuzhiyun struct list_head list;
111*4882a593Smuzhiyun struct list_head chan_list;
112*4882a593Smuzhiyun spinlock_t lock;
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun struct knav_dma_chan {
116*4882a593Smuzhiyun enum dma_transfer_direction direction;
117*4882a593Smuzhiyun struct knav_dma_device *dma;
118*4882a593Smuzhiyun atomic_t ref_count;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* registers */
121*4882a593Smuzhiyun struct reg_chan __iomem *reg_chan;
122*4882a593Smuzhiyun struct reg_tx_sched __iomem *reg_tx_sched;
123*4882a593Smuzhiyun struct reg_rx_flow __iomem *reg_rx_flow;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* configuration stuff */
126*4882a593Smuzhiyun unsigned channel, flow;
127*4882a593Smuzhiyun struct knav_dma_cfg cfg;
128*4882a593Smuzhiyun struct list_head list;
129*4882a593Smuzhiyun spinlock_t lock;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
133*4882a593Smuzhiyun ch->channel : ch->flow)
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun static struct knav_dma_pool_device *kdev;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static bool device_ready;
knav_dma_device_ready(void)138*4882a593Smuzhiyun bool knav_dma_device_ready(void)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun return device_ready;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_dma_device_ready);
143*4882a593Smuzhiyun
check_config(struct knav_dma_chan * chan,struct knav_dma_cfg * cfg)144*4882a593Smuzhiyun static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
147*4882a593Smuzhiyun return true;
148*4882a593Smuzhiyun else
149*4882a593Smuzhiyun return false;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
chan_start(struct knav_dma_chan * chan,struct knav_dma_cfg * cfg)152*4882a593Smuzhiyun static int chan_start(struct knav_dma_chan *chan,
153*4882a593Smuzhiyun struct knav_dma_cfg *cfg)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun u32 v = 0;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun spin_lock(&chan->lock);
158*4882a593Smuzhiyun if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
159*4882a593Smuzhiyun if (cfg->u.tx.filt_pswords)
160*4882a593Smuzhiyun v |= DMA_TX_FILT_PSWORDS;
161*4882a593Smuzhiyun if (cfg->u.tx.filt_einfo)
162*4882a593Smuzhiyun v |= DMA_TX_FILT_EINFO;
163*4882a593Smuzhiyun writel_relaxed(v, &chan->reg_chan->mode);
164*4882a593Smuzhiyun writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (chan->reg_tx_sched)
168*4882a593Smuzhiyun writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (chan->reg_rx_flow) {
171*4882a593Smuzhiyun v = 0;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (cfg->u.rx.einfo_present)
174*4882a593Smuzhiyun v |= CHAN_HAS_EPIB;
175*4882a593Smuzhiyun if (cfg->u.rx.psinfo_present)
176*4882a593Smuzhiyun v |= CHAN_HAS_PSINFO;
177*4882a593Smuzhiyun if (cfg->u.rx.err_mode == DMA_RETRY)
178*4882a593Smuzhiyun v |= CHAN_ERR_RETRY;
179*4882a593Smuzhiyun v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
180*4882a593Smuzhiyun if (cfg->u.rx.psinfo_at_sop)
181*4882a593Smuzhiyun v |= CHAN_PSINFO_AT_SOP;
182*4882a593Smuzhiyun v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
183*4882a593Smuzhiyun << CHAN_SOP_OFF_SHIFT;
184*4882a593Smuzhiyun v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun writel_relaxed(v, &chan->reg_rx_flow->control);
187*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->tags);
188*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun v = cfg->u.rx.fdq[0] << 16;
191*4882a593Smuzhiyun v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
192*4882a593Smuzhiyun writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun v = cfg->u.rx.fdq[2] << 16;
195*4882a593Smuzhiyun v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
196*4882a593Smuzhiyun writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
199*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
200*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Keep a copy of the cfg */
204*4882a593Smuzhiyun memcpy(&chan->cfg, cfg, sizeof(*cfg));
205*4882a593Smuzhiyun spin_unlock(&chan->lock);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
chan_teardown(struct knav_dma_chan * chan)210*4882a593Smuzhiyun static int chan_teardown(struct knav_dma_chan *chan)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun unsigned long end, value;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun if (!chan->reg_chan)
215*4882a593Smuzhiyun return 0;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* indicate teardown */
218*4882a593Smuzhiyun writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* wait for the dma to shut itself down */
221*4882a593Smuzhiyun end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
222*4882a593Smuzhiyun do {
223*4882a593Smuzhiyun value = readl_relaxed(&chan->reg_chan->control);
224*4882a593Smuzhiyun if ((value & DMA_ENABLE) == 0)
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun } while (time_after(end, jiffies));
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
229*4882a593Smuzhiyun dev_err(kdev->dev, "timeout waiting for teardown\n");
230*4882a593Smuzhiyun return -ETIMEDOUT;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun return 0;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
chan_stop(struct knav_dma_chan * chan)236*4882a593Smuzhiyun static void chan_stop(struct knav_dma_chan *chan)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun spin_lock(&chan->lock);
239*4882a593Smuzhiyun if (chan->reg_rx_flow) {
240*4882a593Smuzhiyun /* first detach fdqs, starve out the flow */
241*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
242*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
243*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
244*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
245*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* teardown the dma channel */
249*4882a593Smuzhiyun chan_teardown(chan);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* then disconnect the completion side */
252*4882a593Smuzhiyun if (chan->reg_rx_flow) {
253*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->control);
254*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->tags);
255*4882a593Smuzhiyun writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
259*4882a593Smuzhiyun spin_unlock(&chan->lock);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun dev_dbg(kdev->dev, "channel stopped\n");
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
dma_hw_enable_all(struct knav_dma_device * dma)264*4882a593Smuzhiyun static void dma_hw_enable_all(struct knav_dma_device *dma)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun int i;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun for (i = 0; i < dma->max_tx_chan; i++) {
269*4882a593Smuzhiyun writel_relaxed(0, &dma->reg_tx_chan[i].mode);
270*4882a593Smuzhiyun writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun
knav_dma_hw_init(struct knav_dma_device * dma)275*4882a593Smuzhiyun static void knav_dma_hw_init(struct knav_dma_device *dma)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun unsigned v;
278*4882a593Smuzhiyun int i;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun spin_lock(&dma->lock);
281*4882a593Smuzhiyun v = dma->loopback ? DMA_LOOPBACK : 0;
282*4882a593Smuzhiyun writel_relaxed(v, &dma->reg_global->emulation_control);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun v = readl_relaxed(&dma->reg_global->perf_control);
285*4882a593Smuzhiyun v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
286*4882a593Smuzhiyun writel_relaxed(v, &dma->reg_global->perf_control);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
289*4882a593Smuzhiyun (dma->rx_priority << DMA_RX_PRIO_SHIFT));
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun writel_relaxed(v, &dma->reg_global->priority_control);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Always enable all Rx channels. Rx paths are managed using flows */
294*4882a593Smuzhiyun for (i = 0; i < dma->max_rx_chan; i++)
295*4882a593Smuzhiyun writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun for (i = 0; i < dma->logical_queue_managers; i++)
298*4882a593Smuzhiyun writel_relaxed(dma->qm_base_address[i],
299*4882a593Smuzhiyun &dma->reg_global->qm_base_address[i]);
300*4882a593Smuzhiyun spin_unlock(&dma->lock);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
knav_dma_hw_destroy(struct knav_dma_device * dma)303*4882a593Smuzhiyun static void knav_dma_hw_destroy(struct knav_dma_device *dma)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun int i;
306*4882a593Smuzhiyun unsigned v;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun spin_lock(&dma->lock);
309*4882a593Smuzhiyun v = ~DMA_ENABLE & REG_MASK;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun for (i = 0; i < dma->max_rx_chan; i++)
312*4882a593Smuzhiyun writel_relaxed(v, &dma->reg_rx_chan[i].control);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (i = 0; i < dma->max_tx_chan; i++)
315*4882a593Smuzhiyun writel_relaxed(v, &dma->reg_tx_chan[i].control);
316*4882a593Smuzhiyun spin_unlock(&dma->lock);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
dma_debug_show_channels(struct seq_file * s,struct knav_dma_chan * chan)319*4882a593Smuzhiyun static void dma_debug_show_channels(struct seq_file *s,
320*4882a593Smuzhiyun struct knav_dma_chan *chan)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun int i;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun seq_printf(s, "\t%s %d:\t",
325*4882a593Smuzhiyun ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
326*4882a593Smuzhiyun chan_number(chan));
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (chan->direction == DMA_MEM_TO_DEV) {
329*4882a593Smuzhiyun seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
330*4882a593Smuzhiyun chan->cfg.u.tx.filt_einfo,
331*4882a593Smuzhiyun chan->cfg.u.tx.filt_pswords,
332*4882a593Smuzhiyun chan->cfg.u.tx.priority);
333*4882a593Smuzhiyun } else {
334*4882a593Smuzhiyun seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
335*4882a593Smuzhiyun chan->cfg.u.rx.einfo_present,
336*4882a593Smuzhiyun chan->cfg.u.rx.psinfo_present,
337*4882a593Smuzhiyun chan->cfg.u.rx.desc_type);
338*4882a593Smuzhiyun seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
339*4882a593Smuzhiyun chan->cfg.u.rx.dst_q,
340*4882a593Smuzhiyun chan->cfg.u.rx.thresh);
341*4882a593Smuzhiyun for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
342*4882a593Smuzhiyun seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
343*4882a593Smuzhiyun seq_printf(s, "\n");
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
dma_debug_show_devices(struct seq_file * s,struct knav_dma_device * dma)347*4882a593Smuzhiyun static void dma_debug_show_devices(struct seq_file *s,
348*4882a593Smuzhiyun struct knav_dma_device *dma)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun struct knav_dma_chan *chan;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun list_for_each_entry(chan, &dma->chan_list, list) {
353*4882a593Smuzhiyun if (atomic_read(&chan->ref_count))
354*4882a593Smuzhiyun dma_debug_show_channels(s, chan);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
knav_dma_debug_show(struct seq_file * s,void * v)358*4882a593Smuzhiyun static int knav_dma_debug_show(struct seq_file *s, void *v)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct knav_dma_device *dma;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun list_for_each_entry(dma, &kdev->list, list) {
363*4882a593Smuzhiyun if (atomic_read(&dma->ref_count)) {
364*4882a593Smuzhiyun seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
365*4882a593Smuzhiyun dma->name, dma->max_tx_chan, dma->max_rx_flow);
366*4882a593Smuzhiyun dma_debug_show_devices(s, dma);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun return 0;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
374*4882a593Smuzhiyun
of_channel_match_helper(struct device_node * np,const char * name,const char ** dma_instance)375*4882a593Smuzhiyun static int of_channel_match_helper(struct device_node *np, const char *name,
376*4882a593Smuzhiyun const char **dma_instance)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun struct of_phandle_args args;
379*4882a593Smuzhiyun struct device_node *dma_node;
380*4882a593Smuzhiyun int index;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
383*4882a593Smuzhiyun if (!dma_node)
384*4882a593Smuzhiyun return -ENODEV;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun *dma_instance = dma_node->name;
387*4882a593Smuzhiyun index = of_property_match_string(np, "ti,navigator-dma-names", name);
388*4882a593Smuzhiyun if (index < 0) {
389*4882a593Smuzhiyun dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
390*4882a593Smuzhiyun return -ENODEV;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
394*4882a593Smuzhiyun 1, index, &args)) {
395*4882a593Smuzhiyun dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
396*4882a593Smuzhiyun return -ENODEV;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (args.args[0] < 0) {
400*4882a593Smuzhiyun dev_err(kdev->dev, "Missing args for %s\n", name);
401*4882a593Smuzhiyun return -ENODEV;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return args.args[0];
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /**
408*4882a593Smuzhiyun * knav_dma_open_channel() - try to setup an exclusive slave channel
409*4882a593Smuzhiyun * @dev: pointer to client device structure
410*4882a593Smuzhiyun * @name: slave channel name
411*4882a593Smuzhiyun * @config: dma configuration parameters
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * Returns pointer to appropriate DMA channel on success or error.
414*4882a593Smuzhiyun */
knav_dma_open_channel(struct device * dev,const char * name,struct knav_dma_cfg * config)415*4882a593Smuzhiyun void *knav_dma_open_channel(struct device *dev, const char *name,
416*4882a593Smuzhiyun struct knav_dma_cfg *config)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct knav_dma_chan *chan;
419*4882a593Smuzhiyun struct knav_dma_device *dma;
420*4882a593Smuzhiyun bool found = false;
421*4882a593Smuzhiyun int chan_num = -1;
422*4882a593Smuzhiyun const char *instance;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (!kdev) {
425*4882a593Smuzhiyun pr_err("keystone-navigator-dma driver not registered\n");
426*4882a593Smuzhiyun return (void *)-EINVAL;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun chan_num = of_channel_match_helper(dev->of_node, name, &instance);
430*4882a593Smuzhiyun if (chan_num < 0) {
431*4882a593Smuzhiyun dev_err(kdev->dev, "No DMA instance with name %s\n", name);
432*4882a593Smuzhiyun return (void *)-EINVAL;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
436*4882a593Smuzhiyun config->direction == DMA_MEM_TO_DEV ? "transmit" :
437*4882a593Smuzhiyun config->direction == DMA_DEV_TO_MEM ? "receive" :
438*4882a593Smuzhiyun "unknown", chan_num, instance);
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (config->direction != DMA_MEM_TO_DEV &&
441*4882a593Smuzhiyun config->direction != DMA_DEV_TO_MEM) {
442*4882a593Smuzhiyun dev_err(kdev->dev, "bad direction\n");
443*4882a593Smuzhiyun return (void *)-EINVAL;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Look for correct dma instance */
447*4882a593Smuzhiyun list_for_each_entry(dma, &kdev->list, list) {
448*4882a593Smuzhiyun if (!strcmp(dma->name, instance)) {
449*4882a593Smuzhiyun found = true;
450*4882a593Smuzhiyun break;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun if (!found) {
454*4882a593Smuzhiyun dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
455*4882a593Smuzhiyun return (void *)-EINVAL;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun /* Look for correct dma channel from dma instance */
459*4882a593Smuzhiyun found = false;
460*4882a593Smuzhiyun list_for_each_entry(chan, &dma->chan_list, list) {
461*4882a593Smuzhiyun if (config->direction == DMA_MEM_TO_DEV) {
462*4882a593Smuzhiyun if (chan->channel == chan_num) {
463*4882a593Smuzhiyun found = true;
464*4882a593Smuzhiyun break;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun } else {
467*4882a593Smuzhiyun if (chan->flow == chan_num) {
468*4882a593Smuzhiyun found = true;
469*4882a593Smuzhiyun break;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun if (!found) {
474*4882a593Smuzhiyun dev_err(kdev->dev, "channel %d is not in DMA %s\n",
475*4882a593Smuzhiyun chan_num, instance);
476*4882a593Smuzhiyun return (void *)-EINVAL;
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (atomic_read(&chan->ref_count) >= 1) {
480*4882a593Smuzhiyun if (!check_config(chan, config)) {
481*4882a593Smuzhiyun dev_err(kdev->dev, "channel %d config miss-match\n",
482*4882a593Smuzhiyun chan_num);
483*4882a593Smuzhiyun return (void *)-EINVAL;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if (atomic_inc_return(&chan->dma->ref_count) <= 1)
488*4882a593Smuzhiyun knav_dma_hw_init(chan->dma);
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun if (atomic_inc_return(&chan->ref_count) <= 1)
491*4882a593Smuzhiyun chan_start(chan, config);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
494*4882a593Smuzhiyun chan_num, instance);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun return chan;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_dma_open_channel);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun /**
501*4882a593Smuzhiyun * knav_dma_close_channel() - Destroy a dma channel
502*4882a593Smuzhiyun *
503*4882a593Smuzhiyun * channel: dma channel handle
504*4882a593Smuzhiyun *
505*4882a593Smuzhiyun */
knav_dma_close_channel(void * channel)506*4882a593Smuzhiyun void knav_dma_close_channel(void *channel)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct knav_dma_chan *chan = channel;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (!kdev) {
511*4882a593Smuzhiyun pr_err("keystone-navigator-dma driver not registered\n");
512*4882a593Smuzhiyun return;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun if (atomic_dec_return(&chan->ref_count) <= 0)
516*4882a593Smuzhiyun chan_stop(chan);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun if (atomic_dec_return(&chan->dma->ref_count) <= 0)
519*4882a593Smuzhiyun knav_dma_hw_destroy(chan->dma);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
522*4882a593Smuzhiyun chan->channel, chan->flow, chan->dma->name);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(knav_dma_close_channel);
525*4882a593Smuzhiyun
pktdma_get_regs(struct knav_dma_device * dma,struct device_node * node,unsigned index,resource_size_t * _size)526*4882a593Smuzhiyun static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
527*4882a593Smuzhiyun struct device_node *node,
528*4882a593Smuzhiyun unsigned index, resource_size_t *_size)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct device *dev = kdev->dev;
531*4882a593Smuzhiyun struct resource res;
532*4882a593Smuzhiyun void __iomem *regs;
533*4882a593Smuzhiyun int ret;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun ret = of_address_to_resource(node, index, &res);
536*4882a593Smuzhiyun if (ret) {
537*4882a593Smuzhiyun dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
538*4882a593Smuzhiyun node, index);
539*4882a593Smuzhiyun return ERR_PTR(ret);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun regs = devm_ioremap_resource(kdev->dev, &res);
543*4882a593Smuzhiyun if (IS_ERR(regs))
544*4882a593Smuzhiyun dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
545*4882a593Smuzhiyun index, node);
546*4882a593Smuzhiyun if (_size)
547*4882a593Smuzhiyun *_size = resource_size(&res);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun return regs;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
pktdma_init_rx_chan(struct knav_dma_chan * chan,u32 flow)552*4882a593Smuzhiyun static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun struct knav_dma_device *dma = chan->dma;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun chan->flow = flow;
557*4882a593Smuzhiyun chan->reg_rx_flow = dma->reg_rx_flow + flow;
558*4882a593Smuzhiyun chan->channel = DMA_INVALID_ID;
559*4882a593Smuzhiyun dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun return 0;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
pktdma_init_tx_chan(struct knav_dma_chan * chan,u32 channel)564*4882a593Smuzhiyun static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct knav_dma_device *dma = chan->dma;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun chan->channel = channel;
569*4882a593Smuzhiyun chan->reg_chan = dma->reg_tx_chan + channel;
570*4882a593Smuzhiyun chan->reg_tx_sched = dma->reg_tx_sched + channel;
571*4882a593Smuzhiyun chan->flow = DMA_INVALID_ID;
572*4882a593Smuzhiyun dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun return 0;
575*4882a593Smuzhiyun }
576*4882a593Smuzhiyun
pktdma_init_chan(struct knav_dma_device * dma,enum dma_transfer_direction dir,unsigned chan_num)577*4882a593Smuzhiyun static int pktdma_init_chan(struct knav_dma_device *dma,
578*4882a593Smuzhiyun enum dma_transfer_direction dir,
579*4882a593Smuzhiyun unsigned chan_num)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct device *dev = kdev->dev;
582*4882a593Smuzhiyun struct knav_dma_chan *chan;
583*4882a593Smuzhiyun int ret = -EINVAL;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
586*4882a593Smuzhiyun if (!chan)
587*4882a593Smuzhiyun return -ENOMEM;
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun INIT_LIST_HEAD(&chan->list);
590*4882a593Smuzhiyun chan->dma = dma;
591*4882a593Smuzhiyun chan->direction = DMA_TRANS_NONE;
592*4882a593Smuzhiyun atomic_set(&chan->ref_count, 0);
593*4882a593Smuzhiyun spin_lock_init(&chan->lock);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun if (dir == DMA_MEM_TO_DEV) {
596*4882a593Smuzhiyun chan->direction = dir;
597*4882a593Smuzhiyun ret = pktdma_init_tx_chan(chan, chan_num);
598*4882a593Smuzhiyun } else if (dir == DMA_DEV_TO_MEM) {
599*4882a593Smuzhiyun chan->direction = dir;
600*4882a593Smuzhiyun ret = pktdma_init_rx_chan(chan, chan_num);
601*4882a593Smuzhiyun } else {
602*4882a593Smuzhiyun dev_err(dev, "channel(%d) direction unknown\n", chan_num);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun list_add_tail(&chan->list, &dma->chan_list);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun return ret;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
dma_init(struct device_node * cloud,struct device_node * dma_node)610*4882a593Smuzhiyun static int dma_init(struct device_node *cloud, struct device_node *dma_node)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
613*4882a593Smuzhiyun struct device_node *node = dma_node;
614*4882a593Smuzhiyun struct knav_dma_device *dma;
615*4882a593Smuzhiyun int ret, len, num_chan = 0;
616*4882a593Smuzhiyun resource_size_t size;
617*4882a593Smuzhiyun u32 timeout;
618*4882a593Smuzhiyun u32 i;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
621*4882a593Smuzhiyun if (!dma) {
622*4882a593Smuzhiyun dev_err(kdev->dev, "could not allocate driver mem\n");
623*4882a593Smuzhiyun return -ENOMEM;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun INIT_LIST_HEAD(&dma->list);
626*4882a593Smuzhiyun INIT_LIST_HEAD(&dma->chan_list);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
629*4882a593Smuzhiyun dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
630*4882a593Smuzhiyun return -ENODEV;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun dma->logical_queue_managers = len / sizeof(u32);
634*4882a593Smuzhiyun if (dma->logical_queue_managers > DMA_MAX_QMS) {
635*4882a593Smuzhiyun dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
636*4882a593Smuzhiyun dma->logical_queue_managers);
637*4882a593Smuzhiyun dma->logical_queue_managers = DMA_MAX_QMS;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
641*4882a593Smuzhiyun dma->qm_base_address,
642*4882a593Smuzhiyun dma->logical_queue_managers);
643*4882a593Smuzhiyun if (ret) {
644*4882a593Smuzhiyun dev_err(kdev->dev, "invalid navigator cloud addresses\n");
645*4882a593Smuzhiyun return -ENODEV;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
649*4882a593Smuzhiyun if (!dma->reg_global)
650*4882a593Smuzhiyun return -ENODEV;
651*4882a593Smuzhiyun if (size < sizeof(struct reg_global)) {
652*4882a593Smuzhiyun dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
653*4882a593Smuzhiyun return -ENODEV;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
657*4882a593Smuzhiyun if (!dma->reg_tx_chan)
658*4882a593Smuzhiyun return -ENODEV;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun max_tx_chan = size / sizeof(struct reg_chan);
661*4882a593Smuzhiyun dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
662*4882a593Smuzhiyun if (!dma->reg_rx_chan)
663*4882a593Smuzhiyun return -ENODEV;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun max_rx_chan = size / sizeof(struct reg_chan);
666*4882a593Smuzhiyun dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
667*4882a593Smuzhiyun if (!dma->reg_tx_sched)
668*4882a593Smuzhiyun return -ENODEV;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun max_tx_sched = size / sizeof(struct reg_tx_sched);
671*4882a593Smuzhiyun dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
672*4882a593Smuzhiyun if (!dma->reg_rx_flow)
673*4882a593Smuzhiyun return -ENODEV;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun max_rx_flow = size / sizeof(struct reg_rx_flow);
676*4882a593Smuzhiyun dma->rx_priority = DMA_PRIO_DEFAULT;
677*4882a593Smuzhiyun dma->tx_priority = DMA_PRIO_DEFAULT;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
680*4882a593Smuzhiyun dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
683*4882a593Smuzhiyun if (ret < 0) {
684*4882a593Smuzhiyun dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
685*4882a593Smuzhiyun DMA_RX_TIMEOUT_DEFAULT);
686*4882a593Smuzhiyun timeout = DMA_RX_TIMEOUT_DEFAULT;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun dma->rx_timeout = timeout;
690*4882a593Smuzhiyun dma->max_rx_chan = max_rx_chan;
691*4882a593Smuzhiyun dma->max_rx_flow = max_rx_flow;
692*4882a593Smuzhiyun dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
693*4882a593Smuzhiyun atomic_set(&dma->ref_count, 0);
694*4882a593Smuzhiyun strcpy(dma->name, node->name);
695*4882a593Smuzhiyun spin_lock_init(&dma->lock);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun for (i = 0; i < dma->max_tx_chan; i++) {
698*4882a593Smuzhiyun if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
699*4882a593Smuzhiyun num_chan++;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun for (i = 0; i < dma->max_rx_flow; i++) {
703*4882a593Smuzhiyun if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
704*4882a593Smuzhiyun num_chan++;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun list_add_tail(&dma->list, &kdev->list);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun /*
710*4882a593Smuzhiyun * For DSP software usecases or userpace transport software, setup all
711*4882a593Smuzhiyun * the DMA hardware resources.
712*4882a593Smuzhiyun */
713*4882a593Smuzhiyun if (dma->enable_all) {
714*4882a593Smuzhiyun atomic_inc(&dma->ref_count);
715*4882a593Smuzhiyun knav_dma_hw_init(dma);
716*4882a593Smuzhiyun dma_hw_enable_all(dma);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
720*4882a593Smuzhiyun dma->name, num_chan, dma->max_rx_flow,
721*4882a593Smuzhiyun dma->max_tx_chan, dma->max_rx_chan,
722*4882a593Smuzhiyun dma->loopback ? ", loopback" : "");
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun return 0;
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun
knav_dma_probe(struct platform_device * pdev)727*4882a593Smuzhiyun static int knav_dma_probe(struct platform_device *pdev)
728*4882a593Smuzhiyun {
729*4882a593Smuzhiyun struct device *dev = &pdev->dev;
730*4882a593Smuzhiyun struct device_node *node = pdev->dev.of_node;
731*4882a593Smuzhiyun struct device_node *child;
732*4882a593Smuzhiyun int ret = 0;
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (!node) {
735*4882a593Smuzhiyun dev_err(&pdev->dev, "could not find device info\n");
736*4882a593Smuzhiyun return -EINVAL;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun kdev = devm_kzalloc(dev,
740*4882a593Smuzhiyun sizeof(struct knav_dma_pool_device), GFP_KERNEL);
741*4882a593Smuzhiyun if (!kdev) {
742*4882a593Smuzhiyun dev_err(dev, "could not allocate driver mem\n");
743*4882a593Smuzhiyun return -ENOMEM;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun
746*4882a593Smuzhiyun kdev->dev = dev;
747*4882a593Smuzhiyun INIT_LIST_HEAD(&kdev->list);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun pm_runtime_enable(kdev->dev);
750*4882a593Smuzhiyun ret = pm_runtime_get_sync(kdev->dev);
751*4882a593Smuzhiyun if (ret < 0) {
752*4882a593Smuzhiyun pm_runtime_put_noidle(kdev->dev);
753*4882a593Smuzhiyun dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
754*4882a593Smuzhiyun goto err_pm_disable;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /* Initialise all packet dmas */
758*4882a593Smuzhiyun for_each_child_of_node(node, child) {
759*4882a593Smuzhiyun ret = dma_init(node, child);
760*4882a593Smuzhiyun if (ret) {
761*4882a593Smuzhiyun dev_err(&pdev->dev, "init failed with %d\n", ret);
762*4882a593Smuzhiyun break;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun if (list_empty(&kdev->list)) {
767*4882a593Smuzhiyun dev_err(dev, "no valid dma instance\n");
768*4882a593Smuzhiyun ret = -ENODEV;
769*4882a593Smuzhiyun goto err_put_sync;
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
773*4882a593Smuzhiyun &knav_dma_debug_fops);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun device_ready = true;
776*4882a593Smuzhiyun return ret;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun err_put_sync:
779*4882a593Smuzhiyun pm_runtime_put_sync(kdev->dev);
780*4882a593Smuzhiyun err_pm_disable:
781*4882a593Smuzhiyun pm_runtime_disable(kdev->dev);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun return ret;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
knav_dma_remove(struct platform_device * pdev)786*4882a593Smuzhiyun static int knav_dma_remove(struct platform_device *pdev)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun struct knav_dma_device *dma;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun list_for_each_entry(dma, &kdev->list, list) {
791*4882a593Smuzhiyun if (atomic_dec_return(&dma->ref_count) == 0)
792*4882a593Smuzhiyun knav_dma_hw_destroy(dma);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun pm_runtime_put_sync(&pdev->dev);
796*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun return 0;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun static struct of_device_id of_match[] = {
802*4882a593Smuzhiyun { .compatible = "ti,keystone-navigator-dma", },
803*4882a593Smuzhiyun {},
804*4882a593Smuzhiyun };
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, of_match);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun static struct platform_driver knav_dma_driver = {
809*4882a593Smuzhiyun .probe = knav_dma_probe,
810*4882a593Smuzhiyun .remove = knav_dma_remove,
811*4882a593Smuzhiyun .driver = {
812*4882a593Smuzhiyun .name = "keystone-navigator-dma",
813*4882a593Smuzhiyun .of_match_table = of_match,
814*4882a593Smuzhiyun },
815*4882a593Smuzhiyun };
816*4882a593Smuzhiyun module_platform_driver(knav_dma_driver);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
819*4882a593Smuzhiyun MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
820*4882a593Smuzhiyun MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
821*4882a593Smuzhiyun MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");
822