1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
4*4882a593Smuzhiyun * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/slab.h>
7*4882a593Smuzhiyun #include <linux/err.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/of_address.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/of_dma.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define TI_XBAR_DRA7 0
16*4882a593Smuzhiyun #define TI_XBAR_AM335X 1
17*4882a593Smuzhiyun static const u32 ti_xbar_type[] = {
18*4882a593Smuzhiyun [TI_XBAR_DRA7] = TI_XBAR_DRA7,
19*4882a593Smuzhiyun [TI_XBAR_AM335X] = TI_XBAR_AM335X,
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun static const struct of_device_id ti_dma_xbar_match[] = {
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun .compatible = "ti,dra7-dma-crossbar",
25*4882a593Smuzhiyun .data = &ti_xbar_type[TI_XBAR_DRA7],
26*4882a593Smuzhiyun },
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun .compatible = "ti,am335x-edma-crossbar",
29*4882a593Smuzhiyun .data = &ti_xbar_type[TI_XBAR_AM335X],
30*4882a593Smuzhiyun },
31*4882a593Smuzhiyun {},
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Crossbar on AM335x/AM437x family */
35*4882a593Smuzhiyun #define TI_AM335X_XBAR_LINES 64
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun struct ti_am335x_xbar_data {
38*4882a593Smuzhiyun void __iomem *iomem;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct dma_router dmarouter;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun u32 xbar_events; /* maximum number of events to select in xbar */
43*4882a593Smuzhiyun u32 dma_requests; /* number of DMA requests on eDMA */
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct ti_am335x_xbar_map {
47*4882a593Smuzhiyun u16 dma_line;
48*4882a593Smuzhiyun u8 mux_val;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
ti_am335x_xbar_write(void __iomem * iomem,int event,u8 val)51*4882a593Smuzhiyun static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun * TPCC_EVT_MUX_60_63 register layout is different than the
55*4882a593Smuzhiyun * rest, in the sense, that event 63 is mapped to lowest byte
56*4882a593Smuzhiyun * and event 60 is mapped to highest, handle it separately.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun if (event >= 60 && event <= 63)
59*4882a593Smuzhiyun writeb_relaxed(val, iomem + (63 - event % 4));
60*4882a593Smuzhiyun else
61*4882a593Smuzhiyun writeb_relaxed(val, iomem + event);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
ti_am335x_xbar_free(struct device * dev,void * route_data)64*4882a593Smuzhiyun static void ti_am335x_xbar_free(struct device *dev, void *route_data)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
67*4882a593Smuzhiyun struct ti_am335x_xbar_map *map = route_data;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
70*4882a593Smuzhiyun map->mux_val, map->dma_line);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
73*4882a593Smuzhiyun kfree(map);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
ti_am335x_xbar_route_allocate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)76*4882a593Smuzhiyun static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
77*4882a593Smuzhiyun struct of_dma *ofdma)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
80*4882a593Smuzhiyun struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
81*4882a593Smuzhiyun struct ti_am335x_xbar_map *map;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (dma_spec->args_count != 3)
84*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (dma_spec->args[2] >= xbar->xbar_events) {
87*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
88*4882a593Smuzhiyun dma_spec->args[2]);
89*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (dma_spec->args[0] >= xbar->dma_requests) {
93*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
94*4882a593Smuzhiyun dma_spec->args[0]);
95*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* The of_node_put() will be done in the core for the node */
99*4882a593Smuzhiyun dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
100*4882a593Smuzhiyun if (!dma_spec->np) {
101*4882a593Smuzhiyun dev_err(&pdev->dev, "Can't get DMA master\n");
102*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun map = kzalloc(sizeof(*map), GFP_KERNEL);
106*4882a593Smuzhiyun if (!map) {
107*4882a593Smuzhiyun of_node_put(dma_spec->np);
108*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun map->dma_line = (u16)dma_spec->args[0];
112*4882a593Smuzhiyun map->mux_val = (u8)dma_spec->args[2];
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun dma_spec->args[2] = 0;
115*4882a593Smuzhiyun dma_spec->args_count = 2;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
118*4882a593Smuzhiyun map->mux_val, map->dma_line);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return map;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun static const struct of_device_id ti_am335x_master_match[] = {
126*4882a593Smuzhiyun { .compatible = "ti,edma3-tpcc", },
127*4882a593Smuzhiyun {},
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
ti_am335x_xbar_probe(struct platform_device * pdev)130*4882a593Smuzhiyun static int ti_am335x_xbar_probe(struct platform_device *pdev)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct device_node *node = pdev->dev.of_node;
133*4882a593Smuzhiyun const struct of_device_id *match;
134*4882a593Smuzhiyun struct device_node *dma_node;
135*4882a593Smuzhiyun struct ti_am335x_xbar_data *xbar;
136*4882a593Smuzhiyun void __iomem *iomem;
137*4882a593Smuzhiyun int i, ret;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun if (!node)
140*4882a593Smuzhiyun return -ENODEV;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
143*4882a593Smuzhiyun if (!xbar)
144*4882a593Smuzhiyun return -ENOMEM;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun dma_node = of_parse_phandle(node, "dma-masters", 0);
147*4882a593Smuzhiyun if (!dma_node) {
148*4882a593Smuzhiyun dev_err(&pdev->dev, "Can't get DMA master node\n");
149*4882a593Smuzhiyun return -ENODEV;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun match = of_match_node(ti_am335x_master_match, dma_node);
153*4882a593Smuzhiyun if (!match) {
154*4882a593Smuzhiyun dev_err(&pdev->dev, "DMA master is not supported\n");
155*4882a593Smuzhiyun of_node_put(dma_node);
156*4882a593Smuzhiyun return -EINVAL;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun if (of_property_read_u32(dma_node, "dma-requests",
160*4882a593Smuzhiyun &xbar->dma_requests)) {
161*4882a593Smuzhiyun dev_info(&pdev->dev,
162*4882a593Smuzhiyun "Missing XBAR output information, using %u.\n",
163*4882a593Smuzhiyun TI_AM335X_XBAR_LINES);
164*4882a593Smuzhiyun xbar->dma_requests = TI_AM335X_XBAR_LINES;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun of_node_put(dma_node);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
169*4882a593Smuzhiyun dev_info(&pdev->dev,
170*4882a593Smuzhiyun "Missing XBAR input information, using %u.\n",
171*4882a593Smuzhiyun TI_AM335X_XBAR_LINES);
172*4882a593Smuzhiyun xbar->xbar_events = TI_AM335X_XBAR_LINES;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun iomem = devm_platform_ioremap_resource(pdev, 0);
176*4882a593Smuzhiyun if (IS_ERR(iomem))
177*4882a593Smuzhiyun return PTR_ERR(iomem);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun xbar->iomem = iomem;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun xbar->dmarouter.dev = &pdev->dev;
182*4882a593Smuzhiyun xbar->dmarouter.route_free = ti_am335x_xbar_free;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun platform_set_drvdata(pdev, xbar);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Reset the crossbar */
187*4882a593Smuzhiyun for (i = 0; i < xbar->dma_requests; i++)
188*4882a593Smuzhiyun ti_am335x_xbar_write(xbar->iomem, i, 0);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
191*4882a593Smuzhiyun &xbar->dmarouter);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return ret;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* Crossbar on DRA7xx family */
197*4882a593Smuzhiyun #define TI_DRA7_XBAR_OUTPUTS 127
198*4882a593Smuzhiyun #define TI_DRA7_XBAR_INPUTS 256
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun struct ti_dra7_xbar_data {
201*4882a593Smuzhiyun void __iomem *iomem;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun struct dma_router dmarouter;
204*4882a593Smuzhiyun struct mutex mutex;
205*4882a593Smuzhiyun unsigned long *dma_inuse;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun u16 safe_val; /* Value to rest the crossbar lines */
208*4882a593Smuzhiyun u32 xbar_requests; /* number of DMA requests connected to XBAR */
209*4882a593Smuzhiyun u32 dma_requests; /* number of DMA requests forwarded to DMA */
210*4882a593Smuzhiyun u32 dma_offset;
211*4882a593Smuzhiyun };
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun struct ti_dra7_xbar_map {
214*4882a593Smuzhiyun u16 xbar_in;
215*4882a593Smuzhiyun int xbar_out;
216*4882a593Smuzhiyun };
217*4882a593Smuzhiyun
ti_dra7_xbar_write(void __iomem * iomem,int xbar,u16 val)218*4882a593Smuzhiyun static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun writew_relaxed(val, iomem + (xbar * 2));
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
ti_dra7_xbar_free(struct device * dev,void * route_data)223*4882a593Smuzhiyun static void ti_dra7_xbar_free(struct device *dev, void *route_data)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
226*4882a593Smuzhiyun struct ti_dra7_xbar_map *map = route_data;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
229*4882a593Smuzhiyun map->xbar_in, map->xbar_out);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
232*4882a593Smuzhiyun mutex_lock(&xbar->mutex);
233*4882a593Smuzhiyun clear_bit(map->xbar_out, xbar->dma_inuse);
234*4882a593Smuzhiyun mutex_unlock(&xbar->mutex);
235*4882a593Smuzhiyun kfree(map);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
ti_dra7_xbar_route_allocate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)238*4882a593Smuzhiyun static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
239*4882a593Smuzhiyun struct of_dma *ofdma)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
242*4882a593Smuzhiyun struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
243*4882a593Smuzhiyun struct ti_dra7_xbar_map *map;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (dma_spec->args[0] >= xbar->xbar_requests) {
246*4882a593Smuzhiyun dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
247*4882a593Smuzhiyun dma_spec->args[0]);
248*4882a593Smuzhiyun put_device(&pdev->dev);
249*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* The of_node_put() will be done in the core for the node */
253*4882a593Smuzhiyun dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
254*4882a593Smuzhiyun if (!dma_spec->np) {
255*4882a593Smuzhiyun dev_err(&pdev->dev, "Can't get DMA master\n");
256*4882a593Smuzhiyun put_device(&pdev->dev);
257*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun map = kzalloc(sizeof(*map), GFP_KERNEL);
261*4882a593Smuzhiyun if (!map) {
262*4882a593Smuzhiyun of_node_put(dma_spec->np);
263*4882a593Smuzhiyun put_device(&pdev->dev);
264*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun mutex_lock(&xbar->mutex);
268*4882a593Smuzhiyun map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
269*4882a593Smuzhiyun xbar->dma_requests);
270*4882a593Smuzhiyun if (map->xbar_out == xbar->dma_requests) {
271*4882a593Smuzhiyun mutex_unlock(&xbar->mutex);
272*4882a593Smuzhiyun dev_err(&pdev->dev, "Run out of free DMA requests\n");
273*4882a593Smuzhiyun kfree(map);
274*4882a593Smuzhiyun of_node_put(dma_spec->np);
275*4882a593Smuzhiyun put_device(&pdev->dev);
276*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun set_bit(map->xbar_out, xbar->dma_inuse);
279*4882a593Smuzhiyun mutex_unlock(&xbar->mutex);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun map->xbar_in = (u16)dma_spec->args[0];
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
286*4882a593Smuzhiyun map->xbar_in, map->xbar_out);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun return map;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun #define TI_XBAR_EDMA_OFFSET 0
294*4882a593Smuzhiyun #define TI_XBAR_SDMA_OFFSET 1
295*4882a593Smuzhiyun static const u32 ti_dma_offset[] = {
296*4882a593Smuzhiyun [TI_XBAR_EDMA_OFFSET] = 0,
297*4882a593Smuzhiyun [TI_XBAR_SDMA_OFFSET] = 1,
298*4882a593Smuzhiyun };
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun static const struct of_device_id ti_dra7_master_match[] = {
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun .compatible = "ti,omap4430-sdma",
303*4882a593Smuzhiyun .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
304*4882a593Smuzhiyun },
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun .compatible = "ti,edma3",
307*4882a593Smuzhiyun .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
308*4882a593Smuzhiyun },
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun .compatible = "ti,edma3-tpcc",
311*4882a593Smuzhiyun .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
312*4882a593Smuzhiyun },
313*4882a593Smuzhiyun {},
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun
ti_dra7_xbar_reserve(int offset,int len,unsigned long * p)316*4882a593Smuzhiyun static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun for (; len > 0; len--)
319*4882a593Smuzhiyun set_bit(offset + (len - 1), p);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
ti_dra7_xbar_probe(struct platform_device * pdev)322*4882a593Smuzhiyun static int ti_dra7_xbar_probe(struct platform_device *pdev)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun struct device_node *node = pdev->dev.of_node;
325*4882a593Smuzhiyun const struct of_device_id *match;
326*4882a593Smuzhiyun struct device_node *dma_node;
327*4882a593Smuzhiyun struct ti_dra7_xbar_data *xbar;
328*4882a593Smuzhiyun struct property *prop;
329*4882a593Smuzhiyun u32 safe_val;
330*4882a593Smuzhiyun int sz;
331*4882a593Smuzhiyun void __iomem *iomem;
332*4882a593Smuzhiyun int i, ret;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (!node)
335*4882a593Smuzhiyun return -ENODEV;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
338*4882a593Smuzhiyun if (!xbar)
339*4882a593Smuzhiyun return -ENOMEM;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun dma_node = of_parse_phandle(node, "dma-masters", 0);
342*4882a593Smuzhiyun if (!dma_node) {
343*4882a593Smuzhiyun dev_err(&pdev->dev, "Can't get DMA master node\n");
344*4882a593Smuzhiyun return -ENODEV;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun match = of_match_node(ti_dra7_master_match, dma_node);
348*4882a593Smuzhiyun if (!match) {
349*4882a593Smuzhiyun dev_err(&pdev->dev, "DMA master is not supported\n");
350*4882a593Smuzhiyun of_node_put(dma_node);
351*4882a593Smuzhiyun return -EINVAL;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (of_property_read_u32(dma_node, "dma-requests",
355*4882a593Smuzhiyun &xbar->dma_requests)) {
356*4882a593Smuzhiyun dev_info(&pdev->dev,
357*4882a593Smuzhiyun "Missing XBAR output information, using %u.\n",
358*4882a593Smuzhiyun TI_DRA7_XBAR_OUTPUTS);
359*4882a593Smuzhiyun xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun of_node_put(dma_node);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun xbar->dma_inuse = devm_kcalloc(&pdev->dev,
364*4882a593Smuzhiyun BITS_TO_LONGS(xbar->dma_requests),
365*4882a593Smuzhiyun sizeof(unsigned long), GFP_KERNEL);
366*4882a593Smuzhiyun if (!xbar->dma_inuse)
367*4882a593Smuzhiyun return -ENOMEM;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
370*4882a593Smuzhiyun dev_info(&pdev->dev,
371*4882a593Smuzhiyun "Missing XBAR input information, using %u.\n",
372*4882a593Smuzhiyun TI_DRA7_XBAR_INPUTS);
373*4882a593Smuzhiyun xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
377*4882a593Smuzhiyun xbar->safe_val = (u16)safe_val;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
381*4882a593Smuzhiyun if (prop) {
382*4882a593Smuzhiyun const char pname[] = "ti,reserved-dma-request-ranges";
383*4882a593Smuzhiyun u32 (*rsv_events)[2];
384*4882a593Smuzhiyun size_t nelm = sz / sizeof(*rsv_events);
385*4882a593Smuzhiyun int i;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (!nelm)
388*4882a593Smuzhiyun return -EINVAL;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
391*4882a593Smuzhiyun if (!rsv_events)
392*4882a593Smuzhiyun return -ENOMEM;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
395*4882a593Smuzhiyun nelm * 2);
396*4882a593Smuzhiyun if (ret) {
397*4882a593Smuzhiyun kfree(rsv_events);
398*4882a593Smuzhiyun return ret;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun for (i = 0; i < nelm; i++) {
402*4882a593Smuzhiyun ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
403*4882a593Smuzhiyun xbar->dma_inuse);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun kfree(rsv_events);
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun iomem = devm_platform_ioremap_resource(pdev, 0);
409*4882a593Smuzhiyun if (IS_ERR(iomem))
410*4882a593Smuzhiyun return PTR_ERR(iomem);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun xbar->iomem = iomem;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun xbar->dmarouter.dev = &pdev->dev;
415*4882a593Smuzhiyun xbar->dmarouter.route_free = ti_dra7_xbar_free;
416*4882a593Smuzhiyun xbar->dma_offset = *(u32 *)match->data;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun mutex_init(&xbar->mutex);
419*4882a593Smuzhiyun platform_set_drvdata(pdev, xbar);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Reset the crossbar */
422*4882a593Smuzhiyun for (i = 0; i < xbar->dma_requests; i++) {
423*4882a593Smuzhiyun if (!test_bit(i, xbar->dma_inuse))
424*4882a593Smuzhiyun ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
428*4882a593Smuzhiyun &xbar->dmarouter);
429*4882a593Smuzhiyun if (ret) {
430*4882a593Smuzhiyun /* Restore the defaults for the crossbar */
431*4882a593Smuzhiyun for (i = 0; i < xbar->dma_requests; i++) {
432*4882a593Smuzhiyun if (!test_bit(i, xbar->dma_inuse))
433*4882a593Smuzhiyun ti_dra7_xbar_write(xbar->iomem, i, i);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun return ret;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
ti_dma_xbar_probe(struct platform_device * pdev)440*4882a593Smuzhiyun static int ti_dma_xbar_probe(struct platform_device *pdev)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun const struct of_device_id *match;
443*4882a593Smuzhiyun int ret;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
446*4882a593Smuzhiyun if (unlikely(!match))
447*4882a593Smuzhiyun return -EINVAL;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun switch (*(u32 *)match->data) {
450*4882a593Smuzhiyun case TI_XBAR_DRA7:
451*4882a593Smuzhiyun ret = ti_dra7_xbar_probe(pdev);
452*4882a593Smuzhiyun break;
453*4882a593Smuzhiyun case TI_XBAR_AM335X:
454*4882a593Smuzhiyun ret = ti_am335x_xbar_probe(pdev);
455*4882a593Smuzhiyun break;
456*4882a593Smuzhiyun default:
457*4882a593Smuzhiyun dev_err(&pdev->dev, "Unsupported crossbar\n");
458*4882a593Smuzhiyun ret = -ENODEV;
459*4882a593Smuzhiyun break;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun return ret;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun static struct platform_driver ti_dma_xbar_driver = {
466*4882a593Smuzhiyun .driver = {
467*4882a593Smuzhiyun .name = "ti-dma-crossbar",
468*4882a593Smuzhiyun .of_match_table = of_match_ptr(ti_dma_xbar_match),
469*4882a593Smuzhiyun },
470*4882a593Smuzhiyun .probe = ti_dma_xbar_probe,
471*4882a593Smuzhiyun };
472*4882a593Smuzhiyun
omap_dmaxbar_init(void)473*4882a593Smuzhiyun static int omap_dmaxbar_init(void)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun return platform_driver_register(&ti_dma_xbar_driver);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun arch_initcall(omap_dmaxbar_init);
478