xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-crossbar.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  drivers/irqchip/irq-crossbar.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
6*4882a593Smuzhiyun  *  Author: Sricharan R <r.sricharan@ti.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun #include <linux/irqchip.h>
11*4882a593Smuzhiyun #include <linux/irqdomain.h>
12*4882a593Smuzhiyun #include <linux/of_address.h>
13*4882a593Smuzhiyun #include <linux/of_irq.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define IRQ_FREE	-1
17*4882a593Smuzhiyun #define IRQ_RESERVED	-2
18*4882a593Smuzhiyun #define IRQ_SKIP	-3
19*4882a593Smuzhiyun #define GIC_IRQ_START	32
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /**
22*4882a593Smuzhiyun  * struct crossbar_device - crossbar device description
23*4882a593Smuzhiyun  * @lock: spinlock serializing access to @irq_map
24*4882a593Smuzhiyun  * @int_max: maximum number of supported interrupts
25*4882a593Smuzhiyun  * @safe_map: safe default value to initialize the crossbar
26*4882a593Smuzhiyun  * @max_crossbar_sources: Maximum number of crossbar sources
27*4882a593Smuzhiyun  * @irq_map: array of interrupts to crossbar number mapping
28*4882a593Smuzhiyun  * @crossbar_base: crossbar base address
29*4882a593Smuzhiyun  * @register_offsets: offsets for each irq number
30*4882a593Smuzhiyun  * @write: register write function pointer
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun struct crossbar_device {
33*4882a593Smuzhiyun 	raw_spinlock_t lock;
34*4882a593Smuzhiyun 	uint int_max;
35*4882a593Smuzhiyun 	uint safe_map;
36*4882a593Smuzhiyun 	uint max_crossbar_sources;
37*4882a593Smuzhiyun 	uint *irq_map;
38*4882a593Smuzhiyun 	void __iomem *crossbar_base;
39*4882a593Smuzhiyun 	int *register_offsets;
40*4882a593Smuzhiyun 	void (*write)(int, int);
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static struct crossbar_device *cb;
44*4882a593Smuzhiyun 
crossbar_writel(int irq_no,int cb_no)45*4882a593Smuzhiyun static void crossbar_writel(int irq_no, int cb_no)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
crossbar_writew(int irq_no,int cb_no)50*4882a593Smuzhiyun static void crossbar_writew(int irq_no, int cb_no)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
crossbar_writeb(int irq_no,int cb_no)55*4882a593Smuzhiyun static void crossbar_writeb(int irq_no, int cb_no)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun static struct irq_chip crossbar_chip = {
61*4882a593Smuzhiyun 	.name			= "CBAR",
62*4882a593Smuzhiyun 	.irq_eoi		= irq_chip_eoi_parent,
63*4882a593Smuzhiyun 	.irq_mask		= irq_chip_mask_parent,
64*4882a593Smuzhiyun 	.irq_unmask		= irq_chip_unmask_parent,
65*4882a593Smuzhiyun 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
66*4882a593Smuzhiyun 	.irq_set_type		= irq_chip_set_type_parent,
67*4882a593Smuzhiyun 	.flags			= IRQCHIP_MASK_ON_SUSPEND |
68*4882a593Smuzhiyun 				  IRQCHIP_SKIP_SET_WAKE,
69*4882a593Smuzhiyun #ifdef CONFIG_SMP
70*4882a593Smuzhiyun 	.irq_set_affinity	= irq_chip_set_affinity_parent,
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
allocate_gic_irq(struct irq_domain * domain,unsigned virq,irq_hw_number_t hwirq)74*4882a593Smuzhiyun static int allocate_gic_irq(struct irq_domain *domain, unsigned virq,
75*4882a593Smuzhiyun 			    irq_hw_number_t hwirq)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct irq_fwspec fwspec;
78*4882a593Smuzhiyun 	int i;
79*4882a593Smuzhiyun 	int err;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (!irq_domain_get_of_node(domain->parent))
82*4882a593Smuzhiyun 		return -EINVAL;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	raw_spin_lock(&cb->lock);
85*4882a593Smuzhiyun 	for (i = cb->int_max - 1; i >= 0; i--) {
86*4882a593Smuzhiyun 		if (cb->irq_map[i] == IRQ_FREE) {
87*4882a593Smuzhiyun 			cb->irq_map[i] = hwirq;
88*4882a593Smuzhiyun 			break;
89*4882a593Smuzhiyun 		}
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 	raw_spin_unlock(&cb->lock);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (i < 0)
94*4882a593Smuzhiyun 		return -ENODEV;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	fwspec.fwnode = domain->parent->fwnode;
97*4882a593Smuzhiyun 	fwspec.param_count = 3;
98*4882a593Smuzhiyun 	fwspec.param[0] = 0;	/* SPI */
99*4882a593Smuzhiyun 	fwspec.param[1] = i;
100*4882a593Smuzhiyun 	fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
103*4882a593Smuzhiyun 	if (err)
104*4882a593Smuzhiyun 		cb->irq_map[i] = IRQ_FREE;
105*4882a593Smuzhiyun 	else
106*4882a593Smuzhiyun 		cb->write(i, hwirq);
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	return err;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
crossbar_domain_alloc(struct irq_domain * d,unsigned int virq,unsigned int nr_irqs,void * data)111*4882a593Smuzhiyun static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq,
112*4882a593Smuzhiyun 				 unsigned int nr_irqs, void *data)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct irq_fwspec *fwspec = data;
115*4882a593Smuzhiyun 	irq_hw_number_t hwirq;
116*4882a593Smuzhiyun 	int i;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (fwspec->param_count != 3)
119*4882a593Smuzhiyun 		return -EINVAL;	/* Not GIC compliant */
120*4882a593Smuzhiyun 	if (fwspec->param[0] != 0)
121*4882a593Smuzhiyun 		return -EINVAL;	/* No PPI should point to this domain */
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	hwirq = fwspec->param[1];
124*4882a593Smuzhiyun 	if ((hwirq + nr_irqs) > cb->max_crossbar_sources)
125*4882a593Smuzhiyun 		return -EINVAL;	/* Can't deal with this */
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	for (i = 0; i < nr_irqs; i++) {
128*4882a593Smuzhiyun 		int err = allocate_gic_irq(d, virq + i, hwirq + i);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 		if (err)
131*4882a593Smuzhiyun 			return err;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i,
134*4882a593Smuzhiyun 					      &crossbar_chip, NULL);
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun  * crossbar_domain_free - unmap/free a crossbar<->irq connection
142*4882a593Smuzhiyun  * @domain: domain of irq to unmap
143*4882a593Smuzhiyun  * @virq: virq number
144*4882a593Smuzhiyun  * @nr_irqs: number of irqs to free
145*4882a593Smuzhiyun  *
146*4882a593Smuzhiyun  * We do not maintain a use count of total number of map/unmap
147*4882a593Smuzhiyun  * calls for a particular irq to find out if a irq can be really
148*4882a593Smuzhiyun  * unmapped. This is because unmap is called during irq_dispose_mapping(irq),
149*4882a593Smuzhiyun  * after which irq is anyways unusable. So an explicit map has to be called
150*4882a593Smuzhiyun  * after that.
151*4882a593Smuzhiyun  */
crossbar_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)152*4882a593Smuzhiyun static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq,
153*4882a593Smuzhiyun 				 unsigned int nr_irqs)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun 	int i;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	raw_spin_lock(&cb->lock);
158*4882a593Smuzhiyun 	for (i = 0; i < nr_irqs; i++) {
159*4882a593Smuzhiyun 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 		irq_domain_reset_irq_data(d);
162*4882a593Smuzhiyun 		cb->irq_map[d->hwirq] = IRQ_FREE;
163*4882a593Smuzhiyun 		cb->write(d->hwirq, cb->safe_map);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun 	raw_spin_unlock(&cb->lock);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
crossbar_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)168*4882a593Smuzhiyun static int crossbar_domain_translate(struct irq_domain *d,
169*4882a593Smuzhiyun 				     struct irq_fwspec *fwspec,
170*4882a593Smuzhiyun 				     unsigned long *hwirq,
171*4882a593Smuzhiyun 				     unsigned int *type)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	if (is_of_node(fwspec->fwnode)) {
174*4882a593Smuzhiyun 		if (fwspec->param_count != 3)
175*4882a593Smuzhiyun 			return -EINVAL;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 		/* No PPI should point to this domain */
178*4882a593Smuzhiyun 		if (fwspec->param[0] != 0)
179*4882a593Smuzhiyun 			return -EINVAL;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 		*hwirq = fwspec->param[1];
182*4882a593Smuzhiyun 		*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
183*4882a593Smuzhiyun 		return 0;
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	return -EINVAL;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun static const struct irq_domain_ops crossbar_domain_ops = {
190*4882a593Smuzhiyun 	.alloc		= crossbar_domain_alloc,
191*4882a593Smuzhiyun 	.free		= crossbar_domain_free,
192*4882a593Smuzhiyun 	.translate	= crossbar_domain_translate,
193*4882a593Smuzhiyun };
194*4882a593Smuzhiyun 
crossbar_of_init(struct device_node * node)195*4882a593Smuzhiyun static int __init crossbar_of_init(struct device_node *node)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	u32 max = 0, entry, reg_size;
198*4882a593Smuzhiyun 	int i, size, reserved = 0;
199*4882a593Smuzhiyun 	const __be32 *irqsr;
200*4882a593Smuzhiyun 	int ret = -ENOMEM;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (!cb)
205*4882a593Smuzhiyun 		return ret;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	cb->crossbar_base = of_iomap(node, 0);
208*4882a593Smuzhiyun 	if (!cb->crossbar_base)
209*4882a593Smuzhiyun 		goto err_cb;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	of_property_read_u32(node, "ti,max-crossbar-sources",
212*4882a593Smuzhiyun 			     &cb->max_crossbar_sources);
213*4882a593Smuzhiyun 	if (!cb->max_crossbar_sources) {
214*4882a593Smuzhiyun 		pr_err("missing 'ti,max-crossbar-sources' property\n");
215*4882a593Smuzhiyun 		ret = -EINVAL;
216*4882a593Smuzhiyun 		goto err_base;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	of_property_read_u32(node, "ti,max-irqs", &max);
220*4882a593Smuzhiyun 	if (!max) {
221*4882a593Smuzhiyun 		pr_err("missing 'ti,max-irqs' property\n");
222*4882a593Smuzhiyun 		ret = -EINVAL;
223*4882a593Smuzhiyun 		goto err_base;
224*4882a593Smuzhiyun 	}
225*4882a593Smuzhiyun 	cb->irq_map = kcalloc(max, sizeof(int), GFP_KERNEL);
226*4882a593Smuzhiyun 	if (!cb->irq_map)
227*4882a593Smuzhiyun 		goto err_base;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	cb->int_max = max;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	for (i = 0; i < max; i++)
232*4882a593Smuzhiyun 		cb->irq_map[i] = IRQ_FREE;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Get and mark reserved irqs */
235*4882a593Smuzhiyun 	irqsr = of_get_property(node, "ti,irqs-reserved", &size);
236*4882a593Smuzhiyun 	if (irqsr) {
237*4882a593Smuzhiyun 		size /= sizeof(__be32);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		for (i = 0; i < size; i++) {
240*4882a593Smuzhiyun 			of_property_read_u32_index(node,
241*4882a593Smuzhiyun 						   "ti,irqs-reserved",
242*4882a593Smuzhiyun 						   i, &entry);
243*4882a593Smuzhiyun 			if (entry >= max) {
244*4882a593Smuzhiyun 				pr_err("Invalid reserved entry\n");
245*4882a593Smuzhiyun 				ret = -EINVAL;
246*4882a593Smuzhiyun 				goto err_irq_map;
247*4882a593Smuzhiyun 			}
248*4882a593Smuzhiyun 			cb->irq_map[entry] = IRQ_RESERVED;
249*4882a593Smuzhiyun 		}
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	/* Skip irqs hardwired to bypass the crossbar */
253*4882a593Smuzhiyun 	irqsr = of_get_property(node, "ti,irqs-skip", &size);
254*4882a593Smuzhiyun 	if (irqsr) {
255*4882a593Smuzhiyun 		size /= sizeof(__be32);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		for (i = 0; i < size; i++) {
258*4882a593Smuzhiyun 			of_property_read_u32_index(node,
259*4882a593Smuzhiyun 						   "ti,irqs-skip",
260*4882a593Smuzhiyun 						   i, &entry);
261*4882a593Smuzhiyun 			if (entry >= max) {
262*4882a593Smuzhiyun 				pr_err("Invalid skip entry\n");
263*4882a593Smuzhiyun 				ret = -EINVAL;
264*4882a593Smuzhiyun 				goto err_irq_map;
265*4882a593Smuzhiyun 			}
266*4882a593Smuzhiyun 			cb->irq_map[entry] = IRQ_SKIP;
267*4882a593Smuzhiyun 		}
268*4882a593Smuzhiyun 	}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	cb->register_offsets = kcalloc(max, sizeof(int), GFP_KERNEL);
272*4882a593Smuzhiyun 	if (!cb->register_offsets)
273*4882a593Smuzhiyun 		goto err_irq_map;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	of_property_read_u32(node, "ti,reg-size", &reg_size);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	switch (reg_size) {
278*4882a593Smuzhiyun 	case 1:
279*4882a593Smuzhiyun 		cb->write = crossbar_writeb;
280*4882a593Smuzhiyun 		break;
281*4882a593Smuzhiyun 	case 2:
282*4882a593Smuzhiyun 		cb->write = crossbar_writew;
283*4882a593Smuzhiyun 		break;
284*4882a593Smuzhiyun 	case 4:
285*4882a593Smuzhiyun 		cb->write = crossbar_writel;
286*4882a593Smuzhiyun 		break;
287*4882a593Smuzhiyun 	default:
288*4882a593Smuzhiyun 		pr_err("Invalid reg-size property\n");
289*4882a593Smuzhiyun 		ret = -EINVAL;
290*4882a593Smuzhiyun 		goto err_reg_offset;
291*4882a593Smuzhiyun 		break;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/*
295*4882a593Smuzhiyun 	 * Register offsets are not linear because of the
296*4882a593Smuzhiyun 	 * reserved irqs. so find and store the offsets once.
297*4882a593Smuzhiyun 	 */
298*4882a593Smuzhiyun 	for (i = 0; i < max; i++) {
299*4882a593Smuzhiyun 		if (cb->irq_map[i] == IRQ_RESERVED)
300*4882a593Smuzhiyun 			continue;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		cb->register_offsets[i] = reserved;
303*4882a593Smuzhiyun 		reserved += reg_size;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
307*4882a593Smuzhiyun 	/* Initialize the crossbar with safe map to start with */
308*4882a593Smuzhiyun 	for (i = 0; i < max; i++) {
309*4882a593Smuzhiyun 		if (cb->irq_map[i] == IRQ_RESERVED ||
310*4882a593Smuzhiyun 		    cb->irq_map[i] == IRQ_SKIP)
311*4882a593Smuzhiyun 			continue;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 		cb->write(i, cb->safe_map);
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	raw_spin_lock_init(&cb->lock);
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	return 0;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun err_reg_offset:
321*4882a593Smuzhiyun 	kfree(cb->register_offsets);
322*4882a593Smuzhiyun err_irq_map:
323*4882a593Smuzhiyun 	kfree(cb->irq_map);
324*4882a593Smuzhiyun err_base:
325*4882a593Smuzhiyun 	iounmap(cb->crossbar_base);
326*4882a593Smuzhiyun err_cb:
327*4882a593Smuzhiyun 	kfree(cb);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	cb = NULL;
330*4882a593Smuzhiyun 	return ret;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
irqcrossbar_init(struct device_node * node,struct device_node * parent)333*4882a593Smuzhiyun static int __init irqcrossbar_init(struct device_node *node,
334*4882a593Smuzhiyun 				   struct device_node *parent)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	struct irq_domain *parent_domain, *domain;
337*4882a593Smuzhiyun 	int err;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	if (!parent) {
340*4882a593Smuzhiyun 		pr_err("%pOF: no parent, giving up\n", node);
341*4882a593Smuzhiyun 		return -ENODEV;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	parent_domain = irq_find_host(parent);
345*4882a593Smuzhiyun 	if (!parent_domain) {
346*4882a593Smuzhiyun 		pr_err("%pOF: unable to obtain parent domain\n", node);
347*4882a593Smuzhiyun 		return -ENXIO;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	err = crossbar_of_init(node);
351*4882a593Smuzhiyun 	if (err)
352*4882a593Smuzhiyun 		return err;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	domain = irq_domain_add_hierarchy(parent_domain, 0,
355*4882a593Smuzhiyun 					  cb->max_crossbar_sources,
356*4882a593Smuzhiyun 					  node, &crossbar_domain_ops,
357*4882a593Smuzhiyun 					  NULL);
358*4882a593Smuzhiyun 	if (!domain) {
359*4882a593Smuzhiyun 		pr_err("%pOF: failed to allocated domain\n", node);
360*4882a593Smuzhiyun 		return -ENOMEM;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init);
367