1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Marvell Orion SoCs IRQ chip driver.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This file is licensed under the terms of the GNU General Public
7*4882a593Smuzhiyun * License version 2. This program is licensed "as is" without any
8*4882a593Smuzhiyun * warranty of any kind, whether express or implied.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/irq.h>
13*4882a593Smuzhiyun #include <linux/irqchip.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/of_address.h>
16*4882a593Smuzhiyun #include <linux/of_irq.h>
17*4882a593Smuzhiyun #include <asm/exception.h>
18*4882a593Smuzhiyun #include <asm/mach/irq.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * Orion SoC main interrupt controller
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun #define ORION_IRQS_PER_CHIP 32
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define ORION_IRQ_CAUSE 0x00
26*4882a593Smuzhiyun #define ORION_IRQ_MASK 0x04
27*4882a593Smuzhiyun #define ORION_IRQ_FIQ_MASK 0x08
28*4882a593Smuzhiyun #define ORION_IRQ_ENDP_MASK 0x0c
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun static struct irq_domain *orion_irq_domain;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static void
orion_handle_irq(struct pt_regs * regs)33*4882a593Smuzhiyun __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
36*4882a593Smuzhiyun int n, base = 0;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
39*4882a593Smuzhiyun struct irq_chip_generic *gc =
40*4882a593Smuzhiyun irq_get_domain_generic_chip(orion_irq_domain, base);
41*4882a593Smuzhiyun u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
42*4882a593Smuzhiyun gc->mask_cache;
43*4882a593Smuzhiyun while (stat) {
44*4882a593Smuzhiyun u32 hwirq = __fls(stat);
45*4882a593Smuzhiyun handle_domain_irq(orion_irq_domain,
46*4882a593Smuzhiyun gc->irq_base + hwirq, regs);
47*4882a593Smuzhiyun stat &= ~(1 << hwirq);
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
orion_irq_init(struct device_node * np,struct device_node * parent)52*4882a593Smuzhiyun static int __init orion_irq_init(struct device_node *np,
53*4882a593Smuzhiyun struct device_node *parent)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
56*4882a593Smuzhiyun int n, ret, base, num_chips = 0;
57*4882a593Smuzhiyun struct resource r;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* count number of irq chips by valid reg addresses */
60*4882a593Smuzhiyun while (of_address_to_resource(np, num_chips, &r) == 0)
61*4882a593Smuzhiyun num_chips++;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun orion_irq_domain = irq_domain_add_linear(np,
64*4882a593Smuzhiyun num_chips * ORION_IRQS_PER_CHIP,
65*4882a593Smuzhiyun &irq_generic_chip_ops, NULL);
66*4882a593Smuzhiyun if (!orion_irq_domain)
67*4882a593Smuzhiyun panic("%pOFn: unable to add irq domain\n", np);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun ret = irq_alloc_domain_generic_chips(orion_irq_domain,
70*4882a593Smuzhiyun ORION_IRQS_PER_CHIP, 1, np->full_name,
71*4882a593Smuzhiyun handle_level_irq, clr, 0,
72*4882a593Smuzhiyun IRQ_GC_INIT_MASK_CACHE);
73*4882a593Smuzhiyun if (ret)
74*4882a593Smuzhiyun panic("%pOFn: unable to alloc irq domain gc\n", np);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
77*4882a593Smuzhiyun struct irq_chip_generic *gc =
78*4882a593Smuzhiyun irq_get_domain_generic_chip(orion_irq_domain, base);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun of_address_to_resource(np, n, &r);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (!request_mem_region(r.start, resource_size(&r), np->name))
83*4882a593Smuzhiyun panic("%pOFn: unable to request mem region %d",
84*4882a593Smuzhiyun np, n);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun gc->reg_base = ioremap(r.start, resource_size(&r));
87*4882a593Smuzhiyun if (!gc->reg_base)
88*4882a593Smuzhiyun panic("%pOFn: unable to map resource %d", np, n);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
91*4882a593Smuzhiyun gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
92*4882a593Smuzhiyun gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /* mask all interrupts */
95*4882a593Smuzhiyun writel(0, gc->reg_base + ORION_IRQ_MASK);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun set_handle_irq(orion_handle_irq);
99*4882a593Smuzhiyun return 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /*
104*4882a593Smuzhiyun * Orion SoC bridge interrupt controller
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun #define ORION_BRIDGE_IRQ_CAUSE 0x00
107*4882a593Smuzhiyun #define ORION_BRIDGE_IRQ_MASK 0x04
108*4882a593Smuzhiyun
orion_bridge_irq_handler(struct irq_desc * desc)109*4882a593Smuzhiyun static void orion_bridge_irq_handler(struct irq_desc *desc)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct irq_domain *d = irq_desc_get_handler_data(desc);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
114*4882a593Smuzhiyun u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
115*4882a593Smuzhiyun gc->mask_cache;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun while (stat) {
118*4882a593Smuzhiyun u32 hwirq = __fls(stat);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
121*4882a593Smuzhiyun stat &= ~(1 << hwirq);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
127*4882a593Smuzhiyun * To avoid interrupt events on stale irqs, we clear them before unmask.
128*4882a593Smuzhiyun */
orion_bridge_irq_startup(struct irq_data * d)129*4882a593Smuzhiyun static unsigned int orion_bridge_irq_startup(struct irq_data *d)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun struct irq_chip_type *ct = irq_data_get_chip_type(d);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun ct->chip.irq_ack(d);
134*4882a593Smuzhiyun ct->chip.irq_unmask(d);
135*4882a593Smuzhiyun return 0;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
orion_bridge_irq_init(struct device_node * np,struct device_node * parent)138*4882a593Smuzhiyun static int __init orion_bridge_irq_init(struct device_node *np,
139*4882a593Smuzhiyun struct device_node *parent)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
142*4882a593Smuzhiyun struct resource r;
143*4882a593Smuzhiyun struct irq_domain *domain;
144*4882a593Smuzhiyun struct irq_chip_generic *gc;
145*4882a593Smuzhiyun int ret, irq, nrirqs = 32;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /* get optional number of interrupts provided */
148*4882a593Smuzhiyun of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun domain = irq_domain_add_linear(np, nrirqs,
151*4882a593Smuzhiyun &irq_generic_chip_ops, NULL);
152*4882a593Smuzhiyun if (!domain) {
153*4882a593Smuzhiyun pr_err("%pOFn: unable to add irq domain\n", np);
154*4882a593Smuzhiyun return -ENOMEM;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
158*4882a593Smuzhiyun handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
159*4882a593Smuzhiyun if (ret) {
160*4882a593Smuzhiyun pr_err("%pOFn: unable to alloc irq domain gc\n", np);
161*4882a593Smuzhiyun return ret;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun ret = of_address_to_resource(np, 0, &r);
165*4882a593Smuzhiyun if (ret) {
166*4882a593Smuzhiyun pr_err("%pOFn: unable to get resource\n", np);
167*4882a593Smuzhiyun return ret;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (!request_mem_region(r.start, resource_size(&r), np->name)) {
171*4882a593Smuzhiyun pr_err("%s: unable to request mem region\n", np->name);
172*4882a593Smuzhiyun return -ENOMEM;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Map the parent interrupt for the chained handler */
176*4882a593Smuzhiyun irq = irq_of_parse_and_map(np, 0);
177*4882a593Smuzhiyun if (irq <= 0) {
178*4882a593Smuzhiyun pr_err("%pOFn: unable to parse irq\n", np);
179*4882a593Smuzhiyun return -EINVAL;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun gc = irq_get_domain_generic_chip(domain, 0);
183*4882a593Smuzhiyun gc->reg_base = ioremap(r.start, resource_size(&r));
184*4882a593Smuzhiyun if (!gc->reg_base) {
185*4882a593Smuzhiyun pr_err("%pOFn: unable to map resource\n", np);
186*4882a593Smuzhiyun return -ENOMEM;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
190*4882a593Smuzhiyun gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
191*4882a593Smuzhiyun gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
192*4882a593Smuzhiyun gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
193*4882a593Smuzhiyun gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
194*4882a593Smuzhiyun gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /* mask and clear all interrupts */
197*4882a593Smuzhiyun writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
198*4882a593Smuzhiyun writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
201*4882a593Smuzhiyun domain);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return 0;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun IRQCHIP_DECLARE(orion_bridge_intc,
206*4882a593Smuzhiyun "marvell,orion-bridge-intc", orion_bridge_irq_init);
207