1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Xtensa MX interrupt distributor
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2002 - 2013 Tensilica, Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
7*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
8*4882a593Smuzhiyun * for more details.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/irqdomain.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/irqchip.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <asm/mxregs.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define HW_IRQ_IPI_COUNT 2
20*4882a593Smuzhiyun #define HW_IRQ_MX_BASE 2
21*4882a593Smuzhiyun #define HW_IRQ_EXTERN_BASE 3
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
24*4882a593Smuzhiyun
xtensa_mx_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)25*4882a593Smuzhiyun static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
26*4882a593Smuzhiyun irq_hw_number_t hw)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun if (hw < HW_IRQ_IPI_COUNT) {
29*4882a593Smuzhiyun struct irq_chip *irq_chip = d->host_data;
30*4882a593Smuzhiyun irq_set_chip_and_handler_name(irq, irq_chip,
31*4882a593Smuzhiyun handle_percpu_irq, "ipi");
32*4882a593Smuzhiyun irq_set_status_flags(irq, IRQ_LEVEL);
33*4882a593Smuzhiyun return 0;
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
36*4882a593Smuzhiyun return xtensa_irq_map(d, irq, hw);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * Device Tree IRQ specifier translation function which works with one or
41*4882a593Smuzhiyun * two cell bindings. First cell value maps directly to the hwirq number.
42*4882a593Smuzhiyun * Second cell if present specifies whether hwirq number is external (1) or
43*4882a593Smuzhiyun * internal (0).
44*4882a593Smuzhiyun */
xtensa_mx_irq_domain_xlate(struct irq_domain * d,struct device_node * ctrlr,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)45*4882a593Smuzhiyun static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
46*4882a593Smuzhiyun struct device_node *ctrlr,
47*4882a593Smuzhiyun const u32 *intspec, unsigned int intsize,
48*4882a593Smuzhiyun unsigned long *out_hwirq, unsigned int *out_type)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun return xtensa_irq_domain_xlate(intspec, intsize,
51*4882a593Smuzhiyun intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
52*4882a593Smuzhiyun out_hwirq, out_type);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
56*4882a593Smuzhiyun .xlate = xtensa_mx_irq_domain_xlate,
57*4882a593Smuzhiyun .map = xtensa_mx_irq_map,
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun
secondary_init_irq(void)60*4882a593Smuzhiyun void secondary_init_irq(void)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun __this_cpu_write(cached_irq_mask,
63*4882a593Smuzhiyun XCHAL_INTTYPE_MASK_EXTERN_EDGE |
64*4882a593Smuzhiyun XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
65*4882a593Smuzhiyun xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
66*4882a593Smuzhiyun XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
xtensa_mx_irq_mask(struct irq_data * d)69*4882a593Smuzhiyun static void xtensa_mx_irq_mask(struct irq_data *d)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun unsigned int mask = 1u << d->hwirq;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
74*4882a593Smuzhiyun XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
75*4882a593Smuzhiyun unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (ext_irq >= HW_IRQ_MX_BASE) {
78*4882a593Smuzhiyun set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
79*4882a593Smuzhiyun return;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun mask = __this_cpu_read(cached_irq_mask) & ~mask;
83*4882a593Smuzhiyun __this_cpu_write(cached_irq_mask, mask);
84*4882a593Smuzhiyun xtensa_set_sr(mask, intenable);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
xtensa_mx_irq_unmask(struct irq_data * d)87*4882a593Smuzhiyun static void xtensa_mx_irq_unmask(struct irq_data *d)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun unsigned int mask = 1u << d->hwirq;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
92*4882a593Smuzhiyun XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
93*4882a593Smuzhiyun unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun if (ext_irq >= HW_IRQ_MX_BASE) {
96*4882a593Smuzhiyun set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
97*4882a593Smuzhiyun return;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun mask |= __this_cpu_read(cached_irq_mask);
101*4882a593Smuzhiyun __this_cpu_write(cached_irq_mask, mask);
102*4882a593Smuzhiyun xtensa_set_sr(mask, intenable);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
xtensa_mx_irq_enable(struct irq_data * d)105*4882a593Smuzhiyun static void xtensa_mx_irq_enable(struct irq_data *d)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun xtensa_mx_irq_unmask(d);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
xtensa_mx_irq_disable(struct irq_data * d)110*4882a593Smuzhiyun static void xtensa_mx_irq_disable(struct irq_data *d)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun xtensa_mx_irq_mask(d);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
xtensa_mx_irq_ack(struct irq_data * d)115*4882a593Smuzhiyun static void xtensa_mx_irq_ack(struct irq_data *d)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun xtensa_set_sr(1 << d->hwirq, intclear);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
xtensa_mx_irq_retrigger(struct irq_data * d)120*4882a593Smuzhiyun static int xtensa_mx_irq_retrigger(struct irq_data *d)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun unsigned int mask = 1u << d->hwirq;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun xtensa_set_sr(mask, intset);
127*4882a593Smuzhiyun return 1;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
xtensa_mx_irq_set_affinity(struct irq_data * d,const struct cpumask * dest,bool force)130*4882a593Smuzhiyun static int xtensa_mx_irq_set_affinity(struct irq_data *d,
131*4882a593Smuzhiyun const struct cpumask *dest, bool force)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun int cpu = cpumask_any_and(dest, cpu_online_mask);
134*4882a593Smuzhiyun unsigned mask = 1u << cpu;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
137*4882a593Smuzhiyun irq_data_update_effective_affinity(d, cpumask_of(cpu));
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return 0;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun static struct irq_chip xtensa_mx_irq_chip = {
144*4882a593Smuzhiyun .name = "xtensa-mx",
145*4882a593Smuzhiyun .irq_enable = xtensa_mx_irq_enable,
146*4882a593Smuzhiyun .irq_disable = xtensa_mx_irq_disable,
147*4882a593Smuzhiyun .irq_mask = xtensa_mx_irq_mask,
148*4882a593Smuzhiyun .irq_unmask = xtensa_mx_irq_unmask,
149*4882a593Smuzhiyun .irq_ack = xtensa_mx_irq_ack,
150*4882a593Smuzhiyun .irq_retrigger = xtensa_mx_irq_retrigger,
151*4882a593Smuzhiyun .irq_set_affinity = xtensa_mx_irq_set_affinity,
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
xtensa_mx_init_common(struct irq_domain * root_domain)154*4882a593Smuzhiyun static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun unsigned int i;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun irq_set_default_host(root_domain);
159*4882a593Smuzhiyun secondary_init_irq();
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* Initialize default IRQ routing to CPU 0 */
162*4882a593Smuzhiyun for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
163*4882a593Smuzhiyun set_er(1, MIROUT(i));
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
xtensa_mx_init_legacy(struct device_node * interrupt_parent)166*4882a593Smuzhiyun int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct irq_domain *root_domain =
169*4882a593Smuzhiyun irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
170*4882a593Smuzhiyun &xtensa_mx_irq_domain_ops,
171*4882a593Smuzhiyun &xtensa_mx_irq_chip);
172*4882a593Smuzhiyun xtensa_mx_init_common(root_domain);
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
xtensa_mx_init(struct device_node * np,struct device_node * interrupt_parent)176*4882a593Smuzhiyun static int __init xtensa_mx_init(struct device_node *np,
177*4882a593Smuzhiyun struct device_node *interrupt_parent)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct irq_domain *root_domain =
180*4882a593Smuzhiyun irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
181*4882a593Smuzhiyun &xtensa_mx_irq_chip);
182*4882a593Smuzhiyun xtensa_mx_init_common(root_domain);
183*4882a593Smuzhiyun return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
186