1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mach-pxa/irq.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Generic PXA IRQ handling
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Nicolas Pitre
8*4882a593Smuzhiyun * Created: Jun 15, 2001
9*4882a593Smuzhiyun * Copyright: MontaVista Software Inc.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/syscore_ops.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/irq.h>
18*4882a593Smuzhiyun #include <linux/of_address.h>
19*4882a593Smuzhiyun #include <linux/of_irq.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/exception.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <mach/hardware.h>
24*4882a593Smuzhiyun #include <mach/irqs.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include "generic.h"
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define ICIP (0x000)
29*4882a593Smuzhiyun #define ICMR (0x004)
30*4882a593Smuzhiyun #define ICLR (0x008)
31*4882a593Smuzhiyun #define ICFR (0x00c)
32*4882a593Smuzhiyun #define ICPR (0x010)
33*4882a593Smuzhiyun #define ICCR (0x014)
34*4882a593Smuzhiyun #define ICHP (0x018)
35*4882a593Smuzhiyun #define IPR(i) (((i) < 32) ? (0x01c + ((i) << 2)) : \
36*4882a593Smuzhiyun ((i) < 64) ? (0x0b0 + (((i) - 32) << 2)) : \
37*4882a593Smuzhiyun (0x144 + (((i) - 64) << 2)))
38*4882a593Smuzhiyun #define ICHP_VAL_IRQ (1 << 31)
39*4882a593Smuzhiyun #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
40*4882a593Smuzhiyun #define IPR_VALID (1 << 31)
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define MAX_INTERNAL_IRQS 128
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * This is for peripheral IRQs internal to the PXA chip.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static void __iomem *pxa_irq_base;
49*4882a593Smuzhiyun static int pxa_internal_irq_nr;
50*4882a593Smuzhiyun static bool cpu_has_ipr;
51*4882a593Smuzhiyun static struct irq_domain *pxa_irq_domain;
52*4882a593Smuzhiyun
irq_base(int i)53*4882a593Smuzhiyun static inline void __iomem *irq_base(int i)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun static unsigned long phys_base_offset[] = {
56*4882a593Smuzhiyun 0x0,
57*4882a593Smuzhiyun 0x9c,
58*4882a593Smuzhiyun 0x130,
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun return pxa_irq_base + phys_base_offset[i];
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
pxa_mask_irq(struct irq_data * d)64*4882a593Smuzhiyun void pxa_mask_irq(struct irq_data *d)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun void __iomem *base = irq_data_get_irq_chip_data(d);
67*4882a593Smuzhiyun irq_hw_number_t irq = irqd_to_hwirq(d);
68*4882a593Smuzhiyun uint32_t icmr = __raw_readl(base + ICMR);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun icmr &= ~BIT(irq & 0x1f);
71*4882a593Smuzhiyun __raw_writel(icmr, base + ICMR);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
pxa_unmask_irq(struct irq_data * d)74*4882a593Smuzhiyun void pxa_unmask_irq(struct irq_data *d)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun void __iomem *base = irq_data_get_irq_chip_data(d);
77*4882a593Smuzhiyun irq_hw_number_t irq = irqd_to_hwirq(d);
78*4882a593Smuzhiyun uint32_t icmr = __raw_readl(base + ICMR);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun icmr |= BIT(irq & 0x1f);
81*4882a593Smuzhiyun __raw_writel(icmr, base + ICMR);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static struct irq_chip pxa_internal_irq_chip = {
85*4882a593Smuzhiyun .name = "SC",
86*4882a593Smuzhiyun .irq_ack = pxa_mask_irq,
87*4882a593Smuzhiyun .irq_mask = pxa_mask_irq,
88*4882a593Smuzhiyun .irq_unmask = pxa_unmask_irq,
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
icip_handle_irq(struct pt_regs * regs)91*4882a593Smuzhiyun asmlinkage void __exception_irq_entry icip_handle_irq(struct pt_regs *regs)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun uint32_t icip, icmr, mask;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun do {
96*4882a593Smuzhiyun icip = __raw_readl(pxa_irq_base + ICIP);
97*4882a593Smuzhiyun icmr = __raw_readl(pxa_irq_base + ICMR);
98*4882a593Smuzhiyun mask = icip & icmr;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun if (mask == 0)
101*4882a593Smuzhiyun break;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun handle_IRQ(PXA_IRQ(fls(mask) - 1), regs);
104*4882a593Smuzhiyun } while (1);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
ichp_handle_irq(struct pt_regs * regs)107*4882a593Smuzhiyun asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun uint32_t ichp;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun do {
112*4882a593Smuzhiyun __asm__ __volatile__("mrc p6, 0, %0, c5, c0, 0\n": "=r"(ichp));
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if ((ichp & ICHP_VAL_IRQ) == 0)
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun handle_IRQ(PXA_IRQ(ICHP_IRQ(ichp)), regs);
118*4882a593Smuzhiyun } while (1);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
pxa_irq_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)121*4882a593Smuzhiyun static int pxa_irq_map(struct irq_domain *h, unsigned int virq,
122*4882a593Smuzhiyun irq_hw_number_t hw)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun void __iomem *base = irq_base(hw / 32);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* initialize interrupt priority */
127*4882a593Smuzhiyun if (cpu_has_ipr)
128*4882a593Smuzhiyun __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw));
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun irq_set_chip_and_handler(virq, &pxa_internal_irq_chip,
131*4882a593Smuzhiyun handle_level_irq);
132*4882a593Smuzhiyun irq_set_chip_data(virq, base);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun return 0;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static const struct irq_domain_ops pxa_irq_ops = {
138*4882a593Smuzhiyun .map = pxa_irq_map,
139*4882a593Smuzhiyun .xlate = irq_domain_xlate_onecell,
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun static __init void
pxa_init_irq_common(struct device_node * node,int irq_nr,int (* fn)(struct irq_data *,unsigned int))143*4882a593Smuzhiyun pxa_init_irq_common(struct device_node *node, int irq_nr,
144*4882a593Smuzhiyun int (*fn)(struct irq_data *, unsigned int))
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun int n;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun pxa_internal_irq_nr = irq_nr;
149*4882a593Smuzhiyun pxa_irq_domain = irq_domain_add_legacy(node, irq_nr,
150*4882a593Smuzhiyun PXA_IRQ(0), 0,
151*4882a593Smuzhiyun &pxa_irq_ops, NULL);
152*4882a593Smuzhiyun if (!pxa_irq_domain)
153*4882a593Smuzhiyun panic("Unable to add PXA IRQ domain\n");
154*4882a593Smuzhiyun irq_set_default_host(pxa_irq_domain);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun for (n = 0; n < irq_nr; n += 32) {
157*4882a593Smuzhiyun void __iomem *base = irq_base(n >> 5);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun __raw_writel(0, base + ICMR); /* disable all IRQs */
160*4882a593Smuzhiyun __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun /* only unmasked interrupts kick us out of idle */
163*4882a593Smuzhiyun __raw_writel(1, irq_base(0) + ICCR);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun pxa_internal_irq_chip.irq_set_wake = fn;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
pxa_init_irq(int irq_nr,int (* fn)(struct irq_data *,unsigned int))168*4882a593Smuzhiyun void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int))
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun BUG_ON(irq_nr > MAX_INTERNAL_IRQS);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun pxa_irq_base = io_p2v(0x40d00000);
173*4882a593Smuzhiyun cpu_has_ipr = !cpu_is_pxa25x();
174*4882a593Smuzhiyun pxa_init_irq_common(NULL, irq_nr, fn);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #ifdef CONFIG_PM
178*4882a593Smuzhiyun static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32];
179*4882a593Smuzhiyun static unsigned long saved_ipr[MAX_INTERNAL_IRQS];
180*4882a593Smuzhiyun
pxa_irq_suspend(void)181*4882a593Smuzhiyun static int pxa_irq_suspend(void)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun int i;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
186*4882a593Smuzhiyun void __iomem *base = irq_base(i);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun saved_icmr[i] = __raw_readl(base + ICMR);
189*4882a593Smuzhiyun __raw_writel(0, base + ICMR);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (cpu_has_ipr) {
193*4882a593Smuzhiyun for (i = 0; i < pxa_internal_irq_nr; i++)
194*4882a593Smuzhiyun saved_ipr[i] = __raw_readl(pxa_irq_base + IPR(i));
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
pxa_irq_resume(void)200*4882a593Smuzhiyun static void pxa_irq_resume(void)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun int i;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
205*4882a593Smuzhiyun void __iomem *base = irq_base(i);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun __raw_writel(saved_icmr[i], base + ICMR);
208*4882a593Smuzhiyun __raw_writel(0, base + ICLR);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (cpu_has_ipr)
212*4882a593Smuzhiyun for (i = 0; i < pxa_internal_irq_nr; i++)
213*4882a593Smuzhiyun __raw_writel(saved_ipr[i], pxa_irq_base + IPR(i));
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun __raw_writel(1, pxa_irq_base + ICCR);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun #else
218*4882a593Smuzhiyun #define pxa_irq_suspend NULL
219*4882a593Smuzhiyun #define pxa_irq_resume NULL
220*4882a593Smuzhiyun #endif
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun struct syscore_ops pxa_irq_syscore_ops = {
223*4882a593Smuzhiyun .suspend = pxa_irq_suspend,
224*4882a593Smuzhiyun .resume = pxa_irq_resume,
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #ifdef CONFIG_OF
228*4882a593Smuzhiyun static const struct of_device_id intc_ids[] __initconst = {
229*4882a593Smuzhiyun { .compatible = "marvell,pxa-intc", },
230*4882a593Smuzhiyun {}
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
pxa_dt_irq_init(int (* fn)(struct irq_data *,unsigned int))233*4882a593Smuzhiyun void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct device_node *node;
236*4882a593Smuzhiyun struct resource res;
237*4882a593Smuzhiyun int ret;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun node = of_find_matching_node(NULL, intc_ids);
240*4882a593Smuzhiyun if (!node) {
241*4882a593Smuzhiyun pr_err("Failed to find interrupt controller in arch-pxa\n");
242*4882a593Smuzhiyun return;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun ret = of_property_read_u32(node, "marvell,intc-nr-irqs",
246*4882a593Smuzhiyun &pxa_internal_irq_nr);
247*4882a593Smuzhiyun if (ret) {
248*4882a593Smuzhiyun pr_err("Not found marvell,intc-nr-irqs property\n");
249*4882a593Smuzhiyun return;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ret = of_address_to_resource(node, 0, &res);
253*4882a593Smuzhiyun if (ret < 0) {
254*4882a593Smuzhiyun pr_err("No registers defined for node\n");
255*4882a593Smuzhiyun return;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun pxa_irq_base = io_p2v(res.start);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (of_find_property(node, "marvell,intc-priority", NULL))
260*4882a593Smuzhiyun cpu_has_ipr = 1;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun ret = irq_alloc_descs(-1, 0, pxa_internal_irq_nr, 0);
263*4882a593Smuzhiyun if (ret < 0) {
264*4882a593Smuzhiyun pr_err("Failed to allocate IRQ numbers\n");
265*4882a593Smuzhiyun return;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun pxa_init_irq_common(node, pxa_internal_irq_nr, fn);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun #endif /* CONFIG_OF */
271