1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 MediaTek Inc.
4*4882a593Smuzhiyun * Author: Youlin.Pei <youlin.pei@mediatek.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/irq.h>
10*4882a593Smuzhiyun #include <linux/irqchip.h>
11*4882a593Smuzhiyun #include <linux/irqdomain.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include <linux/of_irq.h>
14*4882a593Smuzhiyun #include <linux/of_address.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/syscore_ops.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define CIRQ_ACK 0x40
19*4882a593Smuzhiyun #define CIRQ_MASK_SET 0xc0
20*4882a593Smuzhiyun #define CIRQ_MASK_CLR 0x100
21*4882a593Smuzhiyun #define CIRQ_SENS_SET 0x180
22*4882a593Smuzhiyun #define CIRQ_SENS_CLR 0x1c0
23*4882a593Smuzhiyun #define CIRQ_POL_SET 0x240
24*4882a593Smuzhiyun #define CIRQ_POL_CLR 0x280
25*4882a593Smuzhiyun #define CIRQ_CONTROL 0x300
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define CIRQ_EN 0x1
28*4882a593Smuzhiyun #define CIRQ_EDGE 0x2
29*4882a593Smuzhiyun #define CIRQ_FLUSH 0x4
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct mtk_cirq_chip_data {
32*4882a593Smuzhiyun void __iomem *base;
33*4882a593Smuzhiyun unsigned int ext_irq_start;
34*4882a593Smuzhiyun unsigned int ext_irq_end;
35*4882a593Smuzhiyun struct irq_domain *domain;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static struct mtk_cirq_chip_data *cirq_data;
39*4882a593Smuzhiyun
mtk_cirq_write_mask(struct irq_data * data,unsigned int offset)40*4882a593Smuzhiyun static void mtk_cirq_write_mask(struct irq_data *data, unsigned int offset)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun struct mtk_cirq_chip_data *chip_data = data->chip_data;
43*4882a593Smuzhiyun unsigned int cirq_num = data->hwirq;
44*4882a593Smuzhiyun u32 mask = 1 << (cirq_num % 32);
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun writel_relaxed(mask, chip_data->base + offset + (cirq_num / 32) * 4);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
mtk_cirq_mask(struct irq_data * data)49*4882a593Smuzhiyun static void mtk_cirq_mask(struct irq_data *data)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_MASK_SET);
52*4882a593Smuzhiyun irq_chip_mask_parent(data);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
mtk_cirq_unmask(struct irq_data * data)55*4882a593Smuzhiyun static void mtk_cirq_unmask(struct irq_data *data)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_MASK_CLR);
58*4882a593Smuzhiyun irq_chip_unmask_parent(data);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
mtk_cirq_set_type(struct irq_data * data,unsigned int type)61*4882a593Smuzhiyun static int mtk_cirq_set_type(struct irq_data *data, unsigned int type)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun int ret;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun switch (type & IRQ_TYPE_SENSE_MASK) {
66*4882a593Smuzhiyun case IRQ_TYPE_EDGE_FALLING:
67*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_POL_CLR);
68*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
69*4882a593Smuzhiyun break;
70*4882a593Smuzhiyun case IRQ_TYPE_EDGE_RISING:
71*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_POL_SET);
72*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_SENS_CLR);
73*4882a593Smuzhiyun break;
74*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_LOW:
75*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_POL_CLR);
76*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_SENS_SET);
77*4882a593Smuzhiyun break;
78*4882a593Smuzhiyun case IRQ_TYPE_LEVEL_HIGH:
79*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_POL_SET);
80*4882a593Smuzhiyun mtk_cirq_write_mask(data, CIRQ_SENS_SET);
81*4882a593Smuzhiyun break;
82*4882a593Smuzhiyun default:
83*4882a593Smuzhiyun break;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun data = data->parent_data;
87*4882a593Smuzhiyun ret = data->chip->irq_set_type(data, type);
88*4882a593Smuzhiyun return ret;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun static struct irq_chip mtk_cirq_chip = {
92*4882a593Smuzhiyun .name = "MT_CIRQ",
93*4882a593Smuzhiyun .irq_mask = mtk_cirq_mask,
94*4882a593Smuzhiyun .irq_unmask = mtk_cirq_unmask,
95*4882a593Smuzhiyun .irq_eoi = irq_chip_eoi_parent,
96*4882a593Smuzhiyun .irq_set_type = mtk_cirq_set_type,
97*4882a593Smuzhiyun .irq_retrigger = irq_chip_retrigger_hierarchy,
98*4882a593Smuzhiyun #ifdef CONFIG_SMP
99*4882a593Smuzhiyun .irq_set_affinity = irq_chip_set_affinity_parent,
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
mtk_cirq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)103*4882a593Smuzhiyun static int mtk_cirq_domain_translate(struct irq_domain *d,
104*4882a593Smuzhiyun struct irq_fwspec *fwspec,
105*4882a593Smuzhiyun unsigned long *hwirq,
106*4882a593Smuzhiyun unsigned int *type)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun if (is_of_node(fwspec->fwnode)) {
109*4882a593Smuzhiyun if (fwspec->param_count != 3)
110*4882a593Smuzhiyun return -EINVAL;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* No PPI should point to this domain */
113*4882a593Smuzhiyun if (fwspec->param[0] != 0)
114*4882a593Smuzhiyun return -EINVAL;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* cirq support irq number check */
117*4882a593Smuzhiyun if (fwspec->param[1] < cirq_data->ext_irq_start ||
118*4882a593Smuzhiyun fwspec->param[1] > cirq_data->ext_irq_end)
119*4882a593Smuzhiyun return -EINVAL;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun *hwirq = fwspec->param[1] - cirq_data->ext_irq_start;
122*4882a593Smuzhiyun *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
123*4882a593Smuzhiyun return 0;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return -EINVAL;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
mtk_cirq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)129*4882a593Smuzhiyun static int mtk_cirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
130*4882a593Smuzhiyun unsigned int nr_irqs, void *arg)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun int ret;
133*4882a593Smuzhiyun irq_hw_number_t hwirq;
134*4882a593Smuzhiyun unsigned int type;
135*4882a593Smuzhiyun struct irq_fwspec *fwspec = arg;
136*4882a593Smuzhiyun struct irq_fwspec parent_fwspec = *fwspec;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun ret = mtk_cirq_domain_translate(domain, fwspec, &hwirq, &type);
139*4882a593Smuzhiyun if (ret)
140*4882a593Smuzhiyun return ret;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (WARN_ON(nr_irqs != 1))
143*4882a593Smuzhiyun return -EINVAL;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
146*4882a593Smuzhiyun &mtk_cirq_chip,
147*4882a593Smuzhiyun domain->host_data);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun parent_fwspec.fwnode = domain->parent->fwnode;
150*4882a593Smuzhiyun return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
151*4882a593Smuzhiyun &parent_fwspec);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun static const struct irq_domain_ops cirq_domain_ops = {
155*4882a593Smuzhiyun .translate = mtk_cirq_domain_translate,
156*4882a593Smuzhiyun .alloc = mtk_cirq_domain_alloc,
157*4882a593Smuzhiyun .free = irq_domain_free_irqs_common,
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
mtk_cirq_suspend(void)161*4882a593Smuzhiyun static int mtk_cirq_suspend(void)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun u32 value, mask;
164*4882a593Smuzhiyun unsigned int irq, hwirq_num;
165*4882a593Smuzhiyun bool pending, masked;
166*4882a593Smuzhiyun int i, pendret, maskret;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * When external interrupts happened, CIRQ will record the status
170*4882a593Smuzhiyun * even CIRQ is not enabled. When execute flush command, CIRQ will
171*4882a593Smuzhiyun * resend the signals according to the status. So if don't clear the
172*4882a593Smuzhiyun * status, CIRQ will resend the wrong signals.
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * arch_suspend_disable_irqs() will be called before CIRQ suspend
175*4882a593Smuzhiyun * callback. If clear all the status simply, the external interrupts
176*4882a593Smuzhiyun * which happened between arch_suspend_disable_irqs and CIRQ suspend
177*4882a593Smuzhiyun * callback will be lost. Using following steps to avoid this issue;
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * - Iterate over all the CIRQ supported interrupts;
180*4882a593Smuzhiyun * - For each interrupt, inspect its pending and masked status at GIC
181*4882a593Smuzhiyun * level;
182*4882a593Smuzhiyun * - If pending and unmasked, it happened between
183*4882a593Smuzhiyun * arch_suspend_disable_irqs and CIRQ suspend callback, don't ACK
184*4882a593Smuzhiyun * it. Otherwise, ACK it.
185*4882a593Smuzhiyun */
186*4882a593Smuzhiyun hwirq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
187*4882a593Smuzhiyun for (i = 0; i < hwirq_num; i++) {
188*4882a593Smuzhiyun irq = irq_find_mapping(cirq_data->domain, i);
189*4882a593Smuzhiyun if (irq) {
190*4882a593Smuzhiyun pendret = irq_get_irqchip_state(irq,
191*4882a593Smuzhiyun IRQCHIP_STATE_PENDING,
192*4882a593Smuzhiyun &pending);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun maskret = irq_get_irqchip_state(irq,
195*4882a593Smuzhiyun IRQCHIP_STATE_MASKED,
196*4882a593Smuzhiyun &masked);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (pendret == 0 && maskret == 0 &&
199*4882a593Smuzhiyun (pending && !masked))
200*4882a593Smuzhiyun continue;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun mask = 1 << (i % 32);
204*4882a593Smuzhiyun writel_relaxed(mask, cirq_data->base + CIRQ_ACK + (i / 32) * 4);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* set edge_only mode, record edge-triggerd interrupts */
208*4882a593Smuzhiyun /* enable cirq */
209*4882a593Smuzhiyun value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
210*4882a593Smuzhiyun value |= (CIRQ_EDGE | CIRQ_EN);
211*4882a593Smuzhiyun writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return 0;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
mtk_cirq_resume(void)216*4882a593Smuzhiyun static void mtk_cirq_resume(void)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun u32 value;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* flush recored interrupts, will send signals to parent controller */
221*4882a593Smuzhiyun value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
222*4882a593Smuzhiyun writel_relaxed(value | CIRQ_FLUSH, cirq_data->base + CIRQ_CONTROL);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* disable cirq */
225*4882a593Smuzhiyun value = readl_relaxed(cirq_data->base + CIRQ_CONTROL);
226*4882a593Smuzhiyun value &= ~(CIRQ_EDGE | CIRQ_EN);
227*4882a593Smuzhiyun writel_relaxed(value, cirq_data->base + CIRQ_CONTROL);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun static struct syscore_ops mtk_cirq_syscore_ops = {
231*4882a593Smuzhiyun .suspend = mtk_cirq_suspend,
232*4882a593Smuzhiyun .resume = mtk_cirq_resume,
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun
mtk_cirq_syscore_init(void)235*4882a593Smuzhiyun static void mtk_cirq_syscore_init(void)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun register_syscore_ops(&mtk_cirq_syscore_ops);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun #else
mtk_cirq_syscore_init(void)240*4882a593Smuzhiyun static inline void mtk_cirq_syscore_init(void) {}
241*4882a593Smuzhiyun #endif
242*4882a593Smuzhiyun
mtk_cirq_of_init(struct device_node * node,struct device_node * parent)243*4882a593Smuzhiyun static int __init mtk_cirq_of_init(struct device_node *node,
244*4882a593Smuzhiyun struct device_node *parent)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun struct irq_domain *domain, *domain_parent;
247*4882a593Smuzhiyun unsigned int irq_num;
248*4882a593Smuzhiyun int ret;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun domain_parent = irq_find_host(parent);
251*4882a593Smuzhiyun if (!domain_parent) {
252*4882a593Smuzhiyun pr_err("mtk_cirq: interrupt-parent not found\n");
253*4882a593Smuzhiyun return -EINVAL;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun cirq_data = kzalloc(sizeof(*cirq_data), GFP_KERNEL);
257*4882a593Smuzhiyun if (!cirq_data)
258*4882a593Smuzhiyun return -ENOMEM;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun cirq_data->base = of_iomap(node, 0);
261*4882a593Smuzhiyun if (!cirq_data->base) {
262*4882a593Smuzhiyun pr_err("mtk_cirq: unable to map cirq register\n");
263*4882a593Smuzhiyun ret = -ENXIO;
264*4882a593Smuzhiyun goto out_free;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 0,
268*4882a593Smuzhiyun &cirq_data->ext_irq_start);
269*4882a593Smuzhiyun if (ret)
270*4882a593Smuzhiyun goto out_unmap;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun ret = of_property_read_u32_index(node, "mediatek,ext-irq-range", 1,
273*4882a593Smuzhiyun &cirq_data->ext_irq_end);
274*4882a593Smuzhiyun if (ret)
275*4882a593Smuzhiyun goto out_unmap;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun irq_num = cirq_data->ext_irq_end - cirq_data->ext_irq_start + 1;
278*4882a593Smuzhiyun domain = irq_domain_add_hierarchy(domain_parent, 0,
279*4882a593Smuzhiyun irq_num, node,
280*4882a593Smuzhiyun &cirq_domain_ops, cirq_data);
281*4882a593Smuzhiyun if (!domain) {
282*4882a593Smuzhiyun ret = -ENOMEM;
283*4882a593Smuzhiyun goto out_unmap;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun cirq_data->domain = domain;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun mtk_cirq_syscore_init();
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun return 0;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun out_unmap:
292*4882a593Smuzhiyun iounmap(cirq_data->base);
293*4882a593Smuzhiyun out_free:
294*4882a593Smuzhiyun kfree(cirq_data);
295*4882a593Smuzhiyun return ret;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun IRQCHIP_DECLARE(mtk_cirq, "mediatek,mtk-cirq", mtk_cirq_of_init);
299