1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Support for C64x+ Megamodule Interrupt Controller
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2010, 2011 Texas Instruments Incorporated
6*4882a593Smuzhiyun * Contributed by: Mark Salter <msalter@redhat.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/of.h>
12*4882a593Smuzhiyun #include <linux/of_irq.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <asm/soc.h>
16*4882a593Smuzhiyun #include <asm/megamod-pic.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define NR_COMBINERS 4
19*4882a593Smuzhiyun #define NR_MUX_OUTPUTS 12
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define IRQ_UNMAPPED 0xffff
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun * Megamodule Interrupt Controller register layout
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun struct megamod_regs {
27*4882a593Smuzhiyun u32 evtflag[8];
28*4882a593Smuzhiyun u32 evtset[8];
29*4882a593Smuzhiyun u32 evtclr[8];
30*4882a593Smuzhiyun u32 reserved0[8];
31*4882a593Smuzhiyun u32 evtmask[8];
32*4882a593Smuzhiyun u32 mevtflag[8];
33*4882a593Smuzhiyun u32 expmask[8];
34*4882a593Smuzhiyun u32 mexpflag[8];
35*4882a593Smuzhiyun u32 intmux_unused;
36*4882a593Smuzhiyun u32 intmux[7];
37*4882a593Smuzhiyun u32 reserved1[8];
38*4882a593Smuzhiyun u32 aegmux[2];
39*4882a593Smuzhiyun u32 reserved2[14];
40*4882a593Smuzhiyun u32 intxstat;
41*4882a593Smuzhiyun u32 intxclr;
42*4882a593Smuzhiyun u32 intdmask;
43*4882a593Smuzhiyun u32 reserved3[13];
44*4882a593Smuzhiyun u32 evtasrt;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct megamod_pic {
48*4882a593Smuzhiyun struct irq_domain *irqhost;
49*4882a593Smuzhiyun struct megamod_regs __iomem *regs;
50*4882a593Smuzhiyun raw_spinlock_t lock;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /* hw mux mapping */
53*4882a593Smuzhiyun unsigned int output_to_irq[NR_MUX_OUTPUTS];
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static struct megamod_pic *mm_pic;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct megamod_cascade_data {
59*4882a593Smuzhiyun struct megamod_pic *pic;
60*4882a593Smuzhiyun int index;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static struct megamod_cascade_data cascade_data[NR_COMBINERS];
64*4882a593Smuzhiyun
mask_megamod(struct irq_data * data)65*4882a593Smuzhiyun static void mask_megamod(struct irq_data *data)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
68*4882a593Smuzhiyun irq_hw_number_t src = irqd_to_hwirq(data);
69*4882a593Smuzhiyun u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun raw_spin_lock(&pic->lock);
72*4882a593Smuzhiyun soc_writel(soc_readl(evtmask) | (1 << (src & 31)), evtmask);
73*4882a593Smuzhiyun raw_spin_unlock(&pic->lock);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
unmask_megamod(struct irq_data * data)76*4882a593Smuzhiyun static void unmask_megamod(struct irq_data *data)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct megamod_pic *pic = irq_data_get_irq_chip_data(data);
79*4882a593Smuzhiyun irq_hw_number_t src = irqd_to_hwirq(data);
80*4882a593Smuzhiyun u32 __iomem *evtmask = &pic->regs->evtmask[src / 32];
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun raw_spin_lock(&pic->lock);
83*4882a593Smuzhiyun soc_writel(soc_readl(evtmask) & ~(1 << (src & 31)), evtmask);
84*4882a593Smuzhiyun raw_spin_unlock(&pic->lock);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun static struct irq_chip megamod_chip = {
88*4882a593Smuzhiyun .name = "megamod",
89*4882a593Smuzhiyun .irq_mask = mask_megamod,
90*4882a593Smuzhiyun .irq_unmask = unmask_megamod,
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
megamod_irq_cascade(struct irq_desc * desc)93*4882a593Smuzhiyun static void megamod_irq_cascade(struct irq_desc *desc)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct megamod_cascade_data *cascade;
96*4882a593Smuzhiyun struct megamod_pic *pic;
97*4882a593Smuzhiyun unsigned int irq;
98*4882a593Smuzhiyun u32 events;
99*4882a593Smuzhiyun int n, idx;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun cascade = irq_desc_get_handler_data(desc);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun pic = cascade->pic;
104*4882a593Smuzhiyun idx = cascade->index;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun while ((events = soc_readl(&pic->regs->mevtflag[idx])) != 0) {
107*4882a593Smuzhiyun n = __ffs(events);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun irq = irq_linear_revmap(pic->irqhost, idx * 32 + n);
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun soc_writel(1 << n, &pic->regs->evtclr[idx]);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun generic_handle_irq(irq);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
megamod_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)117*4882a593Smuzhiyun static int megamod_map(struct irq_domain *h, unsigned int virq,
118*4882a593Smuzhiyun irq_hw_number_t hw)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct megamod_pic *pic = h->host_data;
121*4882a593Smuzhiyun int i;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /* We shouldn't see a hwirq which is muxed to core controller */
124*4882a593Smuzhiyun for (i = 0; i < NR_MUX_OUTPUTS; i++)
125*4882a593Smuzhiyun if (pic->output_to_irq[i] == hw)
126*4882a593Smuzhiyun return -1;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun irq_set_chip_data(virq, pic);
129*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &megamod_chip, handle_level_irq);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Set default irq type */
132*4882a593Smuzhiyun irq_set_irq_type(virq, IRQ_TYPE_NONE);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static const struct irq_domain_ops megamod_domain_ops = {
138*4882a593Smuzhiyun .map = megamod_map,
139*4882a593Smuzhiyun .xlate = irq_domain_xlate_onecell,
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
set_megamod_mux(struct megamod_pic * pic,int src,int output)142*4882a593Smuzhiyun static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun int index, offset;
145*4882a593Smuzhiyun u32 val;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (src < 0 || src >= (NR_COMBINERS * 32)) {
148*4882a593Smuzhiyun pic->output_to_irq[output] = IRQ_UNMAPPED;
149*4882a593Smuzhiyun return;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* four mappings per mux register */
153*4882a593Smuzhiyun index = output / 4;
154*4882a593Smuzhiyun offset = (output & 3) * 8;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun val = soc_readl(&pic->regs->intmux[index]);
157*4882a593Smuzhiyun val &= ~(0xff << offset);
158*4882a593Smuzhiyun val |= src << offset;
159*4882a593Smuzhiyun soc_writel(val, &pic->regs->intmux[index]);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /*
163*4882a593Smuzhiyun * Parse the MUX mapping, if one exists.
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * The MUX map is an array of up to 12 cells; one for each usable core priority
166*4882a593Smuzhiyun * interrupt. The value of a given cell is the megamodule interrupt source
167*4882a593Smuzhiyun * which is to me MUXed to the output corresponding to the cell position
168*4882a593Smuzhiyun * withing the array. The first cell in the array corresponds to priority
169*4882a593Smuzhiyun * 4 and the last (12th) cell corresponds to priority 15. The allowed
170*4882a593Smuzhiyun * values are 4 - ((NR_COMBINERS * 32) - 1). Note that the combined interrupt
171*4882a593Smuzhiyun * sources (0 - 3) are not allowed to be mapped through this property. They
172*4882a593Smuzhiyun * are handled through the "interrupts" property. This allows us to use a
173*4882a593Smuzhiyun * value of zero as a "do not map" placeholder.
174*4882a593Smuzhiyun */
parse_priority_map(struct megamod_pic * pic,int * mapping,int size)175*4882a593Smuzhiyun static void __init parse_priority_map(struct megamod_pic *pic,
176*4882a593Smuzhiyun int *mapping, int size)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct device_node *np = irq_domain_get_of_node(pic->irqhost);
179*4882a593Smuzhiyun const __be32 *map;
180*4882a593Smuzhiyun int i, maplen;
181*4882a593Smuzhiyun u32 val;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen);
184*4882a593Smuzhiyun if (map) {
185*4882a593Smuzhiyun maplen /= 4;
186*4882a593Smuzhiyun if (maplen > size)
187*4882a593Smuzhiyun maplen = size;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun for (i = 0; i < maplen; i++) {
190*4882a593Smuzhiyun val = be32_to_cpup(map);
191*4882a593Smuzhiyun if (val && val >= 4)
192*4882a593Smuzhiyun mapping[i] = val;
193*4882a593Smuzhiyun ++map;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
init_megamod_pic(struct device_node * np)198*4882a593Smuzhiyun static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct megamod_pic *pic;
201*4882a593Smuzhiyun int i, irq;
202*4882a593Smuzhiyun int mapping[NR_MUX_OUTPUTS];
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun pr_info("Initializing C64x+ Megamodule PIC\n");
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun pic = kzalloc(sizeof(struct megamod_pic), GFP_KERNEL);
207*4882a593Smuzhiyun if (!pic) {
208*4882a593Smuzhiyun pr_err("%pOF: Could not alloc PIC structure.\n", np);
209*4882a593Smuzhiyun return NULL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
213*4882a593Smuzhiyun &megamod_domain_ops, pic);
214*4882a593Smuzhiyun if (!pic->irqhost) {
215*4882a593Smuzhiyun pr_err("%pOF: Could not alloc host.\n", np);
216*4882a593Smuzhiyun goto error_free;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun pic->irqhost->host_data = pic;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun raw_spin_lock_init(&pic->lock);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun pic->regs = of_iomap(np, 0);
224*4882a593Smuzhiyun if (!pic->regs) {
225*4882a593Smuzhiyun pr_err("%pOF: Could not map registers.\n", np);
226*4882a593Smuzhiyun goto error_free;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Initialize MUX map */
230*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(mapping); i++)
231*4882a593Smuzhiyun mapping[i] = IRQ_UNMAPPED;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun parse_priority_map(pic, mapping, ARRAY_SIZE(mapping));
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * We can have up to 12 interrupts cascading to the core controller.
237*4882a593Smuzhiyun * These cascades can be from the combined interrupt sources or for
238*4882a593Smuzhiyun * individual interrupt sources. The "interrupts" property only
239*4882a593Smuzhiyun * deals with the cascaded combined interrupts. The individual
240*4882a593Smuzhiyun * interrupts muxed to the core controller use the core controller
241*4882a593Smuzhiyun * as their interrupt parent.
242*4882a593Smuzhiyun */
243*4882a593Smuzhiyun for (i = 0; i < NR_COMBINERS; i++) {
244*4882a593Smuzhiyun struct irq_data *irq_data;
245*4882a593Smuzhiyun irq_hw_number_t hwirq;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun irq = irq_of_parse_and_map(np, i);
248*4882a593Smuzhiyun if (irq == NO_IRQ)
249*4882a593Smuzhiyun continue;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun irq_data = irq_get_irq_data(irq);
252*4882a593Smuzhiyun if (!irq_data) {
253*4882a593Smuzhiyun pr_err("%pOF: combiner-%d no irq_data for virq %d!\n",
254*4882a593Smuzhiyun np, i, irq);
255*4882a593Smuzhiyun continue;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun hwirq = irq_data->hwirq;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Check that device tree provided something in the range
262*4882a593Smuzhiyun * of the core priority interrupts (4 - 15).
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun if (hwirq < 4 || hwirq >= NR_PRIORITY_IRQS) {
265*4882a593Smuzhiyun pr_err("%pOF: combiner-%d core irq %ld out of range!\n",
266*4882a593Smuzhiyun np, i, hwirq);
267*4882a593Smuzhiyun continue;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* record the mapping */
271*4882a593Smuzhiyun mapping[hwirq - 4] = i;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun pr_debug("%pOF: combiner-%d cascading to hwirq %ld\n",
274*4882a593Smuzhiyun np, i, hwirq);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun cascade_data[i].pic = pic;
277*4882a593Smuzhiyun cascade_data[i].index = i;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* mask and clear all events in combiner */
280*4882a593Smuzhiyun soc_writel(~0, &pic->regs->evtmask[i]);
281*4882a593Smuzhiyun soc_writel(~0, &pic->regs->evtclr[i]);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun irq_set_chained_handler_and_data(irq, megamod_irq_cascade,
284*4882a593Smuzhiyun &cascade_data[i]);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* Finally, set up the MUX registers */
288*4882a593Smuzhiyun for (i = 0; i < NR_MUX_OUTPUTS; i++) {
289*4882a593Smuzhiyun if (mapping[i] != IRQ_UNMAPPED) {
290*4882a593Smuzhiyun pr_debug("%pOF: setting mux %d to priority %d\n",
291*4882a593Smuzhiyun np, mapping[i], i + 4);
292*4882a593Smuzhiyun set_megamod_mux(pic, mapping[i], i);
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return pic;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun error_free:
299*4882a593Smuzhiyun kfree(pic);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun return NULL;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * Return next active event after ACK'ing it.
306*4882a593Smuzhiyun * Return -1 if no events active.
307*4882a593Smuzhiyun */
get_exception(void)308*4882a593Smuzhiyun static int get_exception(void)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun int i, bit;
311*4882a593Smuzhiyun u32 mask;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun for (i = 0; i < NR_COMBINERS; i++) {
314*4882a593Smuzhiyun mask = soc_readl(&mm_pic->regs->mexpflag[i]);
315*4882a593Smuzhiyun if (mask) {
316*4882a593Smuzhiyun bit = __ffs(mask);
317*4882a593Smuzhiyun soc_writel(1 << bit, &mm_pic->regs->evtclr[i]);
318*4882a593Smuzhiyun return (i * 32) + bit;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun return -1;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
assert_event(unsigned int val)324*4882a593Smuzhiyun static void assert_event(unsigned int val)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun soc_writel(val, &mm_pic->regs->evtasrt);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
megamod_pic_init(void)329*4882a593Smuzhiyun void __init megamod_pic_init(void)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct device_node *np;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun np = of_find_compatible_node(NULL, NULL, "ti,c64x+megamod-pic");
334*4882a593Smuzhiyun if (!np)
335*4882a593Smuzhiyun return;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun mm_pic = init_megamod_pic(np);
338*4882a593Smuzhiyun of_node_put(np);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun soc_ops.get_exception = get_exception;
341*4882a593Smuzhiyun soc_ops.assert_event = assert_event;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun return;
344*4882a593Smuzhiyun }
345