1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Author: Steve Chen <schen@mvista.com>
4*4882a593Smuzhiyun // Copyright (C) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
5*4882a593Smuzhiyun // Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
6*4882a593Smuzhiyun // Copyright (C) 2019, Texas Instruments
7*4882a593Smuzhiyun //
8*4882a593Smuzhiyun // TI Common Platform Interrupt Controller (cp_intc) driver
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/export.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/irq.h>
13*4882a593Smuzhiyun #include <linux/irqchip.h>
14*4882a593Smuzhiyun #include <linux/irqchip/irq-davinci-cp-intc.h>
15*4882a593Smuzhiyun #include <linux/irqdomain.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/of.h>
18*4882a593Smuzhiyun #include <linux/of_address.h>
19*4882a593Smuzhiyun #include <linux/of_irq.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/exception.h>
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define DAVINCI_CP_INTC_CTRL 0x04
24*4882a593Smuzhiyun #define DAVINCI_CP_INTC_HOST_CTRL 0x0c
25*4882a593Smuzhiyun #define DAVINCI_CP_INTC_GLOBAL_ENABLE 0x10
26*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_STAT_IDX_CLR 0x24
27*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET 0x28
28*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR 0x2c
29*4882a593Smuzhiyun #define DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET 0x34
30*4882a593Smuzhiyun #define DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR 0x38
31*4882a593Smuzhiyun #define DAVINCI_CP_INTC_PRIO_IDX 0x80
32*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_STAT_CLR(n) (0x0280 + (n << 2))
33*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_ENABLE_CLR(n) (0x0380 + (n << 2))
34*4882a593Smuzhiyun #define DAVINCI_CP_INTC_CHAN_MAP(n) (0x0400 + (n << 2))
35*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_POLARITY(n) (0x0d00 + (n << 2))
36*4882a593Smuzhiyun #define DAVINCI_CP_INTC_SYS_TYPE(n) (0x0d80 + (n << 2))
37*4882a593Smuzhiyun #define DAVINCI_CP_INTC_HOST_ENABLE(n) (0x1500 + (n << 2))
38*4882a593Smuzhiyun #define DAVINCI_CP_INTC_PRI_INDX_MASK GENMASK(9, 0)
39*4882a593Smuzhiyun #define DAVINCI_CP_INTC_GPIR_NONE BIT(31)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun static void __iomem *davinci_cp_intc_base;
42*4882a593Smuzhiyun static struct irq_domain *davinci_cp_intc_irq_domain;
43*4882a593Smuzhiyun
davinci_cp_intc_read(unsigned int offset)44*4882a593Smuzhiyun static inline unsigned int davinci_cp_intc_read(unsigned int offset)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun return readl_relaxed(davinci_cp_intc_base + offset);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
davinci_cp_intc_write(unsigned long value,unsigned int offset)49*4882a593Smuzhiyun static inline void davinci_cp_intc_write(unsigned long value,
50*4882a593Smuzhiyun unsigned int offset)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun writel_relaxed(value, davinci_cp_intc_base + offset);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
davinci_cp_intc_ack_irq(struct irq_data * d)55*4882a593Smuzhiyun static void davinci_cp_intc_ack_irq(struct irq_data *d)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_STAT_IDX_CLR);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
davinci_cp_intc_mask_irq(struct irq_data * d)60*4882a593Smuzhiyun static void davinci_cp_intc_mask_irq(struct irq_data *d)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun /* XXX don't know why we need to disable nIRQ here... */
63*4882a593Smuzhiyun davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_CLR);
64*4882a593Smuzhiyun davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_CLR);
65*4882a593Smuzhiyun davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
davinci_cp_intc_unmask_irq(struct irq_data * d)68*4882a593Smuzhiyun static void davinci_cp_intc_unmask_irq(struct irq_data *d)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun davinci_cp_intc_write(d->hwirq, DAVINCI_CP_INTC_SYS_ENABLE_IDX_SET);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
davinci_cp_intc_set_irq_type(struct irq_data * d,unsigned int flow_type)73*4882a593Smuzhiyun static int davinci_cp_intc_set_irq_type(struct irq_data *d,
74*4882a593Smuzhiyun unsigned int flow_type)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun unsigned int reg, mask, polarity, type;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun reg = BIT_WORD(d->hwirq);
79*4882a593Smuzhiyun mask = BIT_MASK(d->hwirq);
80*4882a593Smuzhiyun polarity = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_POLARITY(reg));
81*4882a593Smuzhiyun type = davinci_cp_intc_read(DAVINCI_CP_INTC_SYS_TYPE(reg));
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun switch (flow_type) {
84*4882a593Smuzhiyun case IRQ_TYPE_EDGE_RISING:
85*4882a593Smuzhiyun polarity |= mask;
86*4882a593Smuzhiyun type |= mask;
87*4882a593Smuzhiyun break;
88*4882a593Smuzhiyun case IRQ_TYPE_EDGE_FALLING:
89*4882a593Smuzhiyun polarity &= ~mask;
90*4882a593Smuzhiyun type |= mask;
91*4882a593Smuzhiyun break;
92*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_HIGH:
93*4882a593Smuzhiyun polarity |= mask;
94*4882a593Smuzhiyun type &= ~mask;
95*4882a593Smuzhiyun break;
96*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_LOW:
97*4882a593Smuzhiyun polarity &= ~mask;
98*4882a593Smuzhiyun type &= ~mask;
99*4882a593Smuzhiyun break;
100*4882a593Smuzhiyun default:
101*4882a593Smuzhiyun return -EINVAL;
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun davinci_cp_intc_write(polarity, DAVINCI_CP_INTC_SYS_POLARITY(reg));
105*4882a593Smuzhiyun davinci_cp_intc_write(type, DAVINCI_CP_INTC_SYS_TYPE(reg));
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static struct irq_chip davinci_cp_intc_irq_chip = {
111*4882a593Smuzhiyun .name = "cp_intc",
112*4882a593Smuzhiyun .irq_ack = davinci_cp_intc_ack_irq,
113*4882a593Smuzhiyun .irq_mask = davinci_cp_intc_mask_irq,
114*4882a593Smuzhiyun .irq_unmask = davinci_cp_intc_unmask_irq,
115*4882a593Smuzhiyun .irq_set_type = davinci_cp_intc_set_irq_type,
116*4882a593Smuzhiyun .flags = IRQCHIP_SKIP_SET_WAKE,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun static asmlinkage void __exception_irq_entry
davinci_cp_intc_handle_irq(struct pt_regs * regs)120*4882a593Smuzhiyun davinci_cp_intc_handle_irq(struct pt_regs *regs)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun int gpir, irqnr, none;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /*
125*4882a593Smuzhiyun * The interrupt number is in first ten bits. The NONE field set to 1
126*4882a593Smuzhiyun * indicates a spurious irq.
127*4882a593Smuzhiyun */
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun gpir = davinci_cp_intc_read(DAVINCI_CP_INTC_PRIO_IDX);
130*4882a593Smuzhiyun irqnr = gpir & DAVINCI_CP_INTC_PRI_INDX_MASK;
131*4882a593Smuzhiyun none = gpir & DAVINCI_CP_INTC_GPIR_NONE;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (unlikely(none)) {
134*4882a593Smuzhiyun pr_err_once("%s: spurious irq!\n", __func__);
135*4882a593Smuzhiyun return;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun handle_domain_irq(davinci_cp_intc_irq_domain, irqnr, regs);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
davinci_cp_intc_host_map(struct irq_domain * h,unsigned int virq,irq_hw_number_t hw)141*4882a593Smuzhiyun static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
142*4882a593Smuzhiyun irq_hw_number_t hw)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun pr_debug("cp_intc_host_map(%d, 0x%lx)\n", virq, hw);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun irq_set_chip(virq, &davinci_cp_intc_irq_chip);
147*4882a593Smuzhiyun irq_set_probe(virq);
148*4882a593Smuzhiyun irq_set_handler(virq, handle_edge_irq);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun static const struct irq_domain_ops davinci_cp_intc_irq_domain_ops = {
154*4882a593Smuzhiyun .map = davinci_cp_intc_host_map,
155*4882a593Smuzhiyun .xlate = irq_domain_xlate_onetwocell,
156*4882a593Smuzhiyun };
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun static int __init
davinci_cp_intc_do_init(const struct davinci_cp_intc_config * config,struct device_node * node)159*4882a593Smuzhiyun davinci_cp_intc_do_init(const struct davinci_cp_intc_config *config,
160*4882a593Smuzhiyun struct device_node *node)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun unsigned int num_regs = BITS_TO_LONGS(config->num_irqs);
163*4882a593Smuzhiyun int offset, irq_base;
164*4882a593Smuzhiyun void __iomem *req;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun req = request_mem_region(config->reg.start,
167*4882a593Smuzhiyun resource_size(&config->reg),
168*4882a593Smuzhiyun "davinci-cp-intc");
169*4882a593Smuzhiyun if (!req) {
170*4882a593Smuzhiyun pr_err("%s: register range busy\n", __func__);
171*4882a593Smuzhiyun return -EBUSY;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun davinci_cp_intc_base = ioremap(config->reg.start,
175*4882a593Smuzhiyun resource_size(&config->reg));
176*4882a593Smuzhiyun if (!davinci_cp_intc_base) {
177*4882a593Smuzhiyun pr_err("%s: unable to ioremap register range\n", __func__);
178*4882a593Smuzhiyun return -EINVAL;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun davinci_cp_intc_write(0, DAVINCI_CP_INTC_GLOBAL_ENABLE);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Disable all host interrupts */
184*4882a593Smuzhiyun davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_ENABLE(0));
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Disable system interrupts */
187*4882a593Smuzhiyun for (offset = 0; offset < num_regs; offset++)
188*4882a593Smuzhiyun davinci_cp_intc_write(~0,
189*4882a593Smuzhiyun DAVINCI_CP_INTC_SYS_ENABLE_CLR(offset));
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Set to normal mode, no nesting, no priority hold */
192*4882a593Smuzhiyun davinci_cp_intc_write(0, DAVINCI_CP_INTC_CTRL);
193*4882a593Smuzhiyun davinci_cp_intc_write(0, DAVINCI_CP_INTC_HOST_CTRL);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Clear system interrupt status */
196*4882a593Smuzhiyun for (offset = 0; offset < num_regs; offset++)
197*4882a593Smuzhiyun davinci_cp_intc_write(~0,
198*4882a593Smuzhiyun DAVINCI_CP_INTC_SYS_STAT_CLR(offset));
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Enable nIRQ (what about nFIQ?) */
201*4882a593Smuzhiyun davinci_cp_intc_write(1, DAVINCI_CP_INTC_HOST_ENABLE_IDX_SET);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Default all priorities to channel 7. */
204*4882a593Smuzhiyun num_regs = (config->num_irqs + 3) >> 2; /* 4 channels per register */
205*4882a593Smuzhiyun for (offset = 0; offset < num_regs; offset++)
206*4882a593Smuzhiyun davinci_cp_intc_write(0x07070707,
207*4882a593Smuzhiyun DAVINCI_CP_INTC_CHAN_MAP(offset));
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun irq_base = irq_alloc_descs(-1, 0, config->num_irqs, 0);
210*4882a593Smuzhiyun if (irq_base < 0) {
211*4882a593Smuzhiyun pr_err("%s: unable to allocate interrupt descriptors: %d\n",
212*4882a593Smuzhiyun __func__, irq_base);
213*4882a593Smuzhiyun return irq_base;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun davinci_cp_intc_irq_domain = irq_domain_add_legacy(
217*4882a593Smuzhiyun node, config->num_irqs, irq_base, 0,
218*4882a593Smuzhiyun &davinci_cp_intc_irq_domain_ops, NULL);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (!davinci_cp_intc_irq_domain) {
221*4882a593Smuzhiyun pr_err("%s: unable to create an interrupt domain\n", __func__);
222*4882a593Smuzhiyun return -EINVAL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun set_handle_irq(davinci_cp_intc_handle_irq);
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /* Enable global interrupt */
228*4882a593Smuzhiyun davinci_cp_intc_write(1, DAVINCI_CP_INTC_GLOBAL_ENABLE);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun return 0;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
davinci_cp_intc_init(const struct davinci_cp_intc_config * config)233*4882a593Smuzhiyun int __init davinci_cp_intc_init(const struct davinci_cp_intc_config *config)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun return davinci_cp_intc_do_init(config, NULL);
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
davinci_cp_intc_of_init(struct device_node * node,struct device_node * parent)238*4882a593Smuzhiyun static int __init davinci_cp_intc_of_init(struct device_node *node,
239*4882a593Smuzhiyun struct device_node *parent)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct davinci_cp_intc_config config = { };
242*4882a593Smuzhiyun int ret;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun ret = of_address_to_resource(node, 0, &config.reg);
245*4882a593Smuzhiyun if (ret) {
246*4882a593Smuzhiyun pr_err("%s: unable to get the register range from device-tree\n",
247*4882a593Smuzhiyun __func__);
248*4882a593Smuzhiyun return ret;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun ret = of_property_read_u32(node, "ti,intc-size", &config.num_irqs);
252*4882a593Smuzhiyun if (ret) {
253*4882a593Smuzhiyun pr_err("%s: unable to read the 'ti,intc-size' property\n",
254*4882a593Smuzhiyun __func__);
255*4882a593Smuzhiyun return ret;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return davinci_cp_intc_do_init(&config, node);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun IRQCHIP_DECLARE(cp_intc, "ti,cp-intc", davinci_cp_intc_of_init);
261