1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4*4882a593Smuzhiyun * http://www.samsung.com
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Combiner irqchip for EXYNOS
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/err.h>
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/syscore_ops.h>
14*4882a593Smuzhiyun #include <linux/irqdomain.h>
15*4882a593Smuzhiyun #include <linux/irqchip.h>
16*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
17*4882a593Smuzhiyun #include <linux/interrupt.h>
18*4882a593Smuzhiyun #include <linux/of_address.h>
19*4882a593Smuzhiyun #include <linux/of_irq.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define COMBINER_ENABLE_SET 0x0
22*4882a593Smuzhiyun #define COMBINER_ENABLE_CLEAR 0x4
23*4882a593Smuzhiyun #define COMBINER_INT_STATUS 0xC
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define IRQ_IN_COMBINER 8
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static DEFINE_SPINLOCK(irq_controller_lock);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun struct combiner_chip_data {
30*4882a593Smuzhiyun unsigned int hwirq_offset;
31*4882a593Smuzhiyun unsigned int irq_mask;
32*4882a593Smuzhiyun void __iomem *base;
33*4882a593Smuzhiyun unsigned int parent_irq;
34*4882a593Smuzhiyun #ifdef CONFIG_PM
35*4882a593Smuzhiyun u32 pm_save;
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun static struct combiner_chip_data *combiner_data;
40*4882a593Smuzhiyun static struct irq_domain *combiner_irq_domain;
41*4882a593Smuzhiyun static unsigned int max_nr = 20;
42*4882a593Smuzhiyun
combiner_base(struct irq_data * data)43*4882a593Smuzhiyun static inline void __iomem *combiner_base(struct irq_data *data)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun struct combiner_chip_data *combiner_data =
46*4882a593Smuzhiyun irq_data_get_irq_chip_data(data);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun return combiner_data->base;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
combiner_mask_irq(struct irq_data * data)51*4882a593Smuzhiyun static void combiner_mask_irq(struct irq_data *data)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun u32 mask = 1 << (data->hwirq % 32);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
combiner_unmask_irq(struct irq_data * data)58*4882a593Smuzhiyun static void combiner_unmask_irq(struct irq_data *data)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun u32 mask = 1 << (data->hwirq % 32);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
combiner_handle_cascade_irq(struct irq_desc * desc)65*4882a593Smuzhiyun static void combiner_handle_cascade_irq(struct irq_desc *desc)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
68*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
69*4882a593Smuzhiyun unsigned int cascade_irq, combiner_irq;
70*4882a593Smuzhiyun unsigned long status;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun chained_irq_enter(chip, desc);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun spin_lock(&irq_controller_lock);
75*4882a593Smuzhiyun status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
76*4882a593Smuzhiyun spin_unlock(&irq_controller_lock);
77*4882a593Smuzhiyun status &= chip_data->irq_mask;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (status == 0)
80*4882a593Smuzhiyun goto out;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun combiner_irq = chip_data->hwirq_offset + __ffs(status);
83*4882a593Smuzhiyun cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (unlikely(!cascade_irq))
86*4882a593Smuzhiyun handle_bad_irq(desc);
87*4882a593Smuzhiyun else
88*4882a593Smuzhiyun generic_handle_irq(cascade_irq);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun out:
91*4882a593Smuzhiyun chained_irq_exit(chip, desc);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #ifdef CONFIG_SMP
combiner_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)95*4882a593Smuzhiyun static int combiner_set_affinity(struct irq_data *d,
96*4882a593Smuzhiyun const struct cpumask *mask_val, bool force)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
99*4882a593Smuzhiyun struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
100*4882a593Smuzhiyun struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (chip && chip->irq_set_affinity)
103*4882a593Smuzhiyun return chip->irq_set_affinity(data, mask_val, force);
104*4882a593Smuzhiyun else
105*4882a593Smuzhiyun return -EINVAL;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun static struct irq_chip combiner_chip = {
110*4882a593Smuzhiyun .name = "COMBINER",
111*4882a593Smuzhiyun .irq_mask = combiner_mask_irq,
112*4882a593Smuzhiyun .irq_unmask = combiner_unmask_irq,
113*4882a593Smuzhiyun #ifdef CONFIG_SMP
114*4882a593Smuzhiyun .irq_set_affinity = combiner_set_affinity,
115*4882a593Smuzhiyun #endif
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
combiner_cascade_irq(struct combiner_chip_data * combiner_data,unsigned int irq)118*4882a593Smuzhiyun static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
119*4882a593Smuzhiyun unsigned int irq)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
122*4882a593Smuzhiyun combiner_data);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
combiner_init_one(struct combiner_chip_data * combiner_data,unsigned int combiner_nr,void __iomem * base,unsigned int irq)125*4882a593Smuzhiyun static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
126*4882a593Smuzhiyun unsigned int combiner_nr,
127*4882a593Smuzhiyun void __iomem *base, unsigned int irq)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun combiner_data->base = base;
130*4882a593Smuzhiyun combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
131*4882a593Smuzhiyun combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
132*4882a593Smuzhiyun combiner_data->parent_irq = irq;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Disable all interrupts */
135*4882a593Smuzhiyun writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
combiner_irq_domain_xlate(struct irq_domain * d,struct device_node * controller,const u32 * intspec,unsigned int intsize,unsigned long * out_hwirq,unsigned int * out_type)138*4882a593Smuzhiyun static int combiner_irq_domain_xlate(struct irq_domain *d,
139*4882a593Smuzhiyun struct device_node *controller,
140*4882a593Smuzhiyun const u32 *intspec, unsigned int intsize,
141*4882a593Smuzhiyun unsigned long *out_hwirq,
142*4882a593Smuzhiyun unsigned int *out_type)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun if (irq_domain_get_of_node(d) != controller)
145*4882a593Smuzhiyun return -EINVAL;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (intsize < 2)
148*4882a593Smuzhiyun return -EINVAL;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun *out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
151*4882a593Smuzhiyun *out_type = 0;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun return 0;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
combiner_irq_domain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)156*4882a593Smuzhiyun static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
157*4882a593Smuzhiyun irq_hw_number_t hw)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun struct combiner_chip_data *combiner_data = d->host_data;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
162*4882a593Smuzhiyun irq_set_chip_data(irq, &combiner_data[hw >> 3]);
163*4882a593Smuzhiyun irq_set_probe(irq);
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun static const struct irq_domain_ops combiner_irq_domain_ops = {
169*4882a593Smuzhiyun .xlate = combiner_irq_domain_xlate,
170*4882a593Smuzhiyun .map = combiner_irq_domain_map,
171*4882a593Smuzhiyun };
172*4882a593Smuzhiyun
combiner_init(void __iomem * combiner_base,struct device_node * np)173*4882a593Smuzhiyun static void __init combiner_init(void __iomem *combiner_base,
174*4882a593Smuzhiyun struct device_node *np)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun int i, irq;
177*4882a593Smuzhiyun unsigned int nr_irq;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun nr_irq = max_nr * IRQ_IN_COMBINER;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
182*4882a593Smuzhiyun if (!combiner_data) {
183*4882a593Smuzhiyun pr_warn("%s: could not allocate combiner data\n", __func__);
184*4882a593Smuzhiyun return;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
188*4882a593Smuzhiyun &combiner_irq_domain_ops, combiner_data);
189*4882a593Smuzhiyun if (WARN_ON(!combiner_irq_domain)) {
190*4882a593Smuzhiyun pr_warn("%s: irq domain init failed\n", __func__);
191*4882a593Smuzhiyun return;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun for (i = 0; i < max_nr; i++) {
195*4882a593Smuzhiyun irq = irq_of_parse_and_map(np, i);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun combiner_init_one(&combiner_data[i], i,
198*4882a593Smuzhiyun combiner_base + (i >> 2) * 0x10, irq);
199*4882a593Smuzhiyun combiner_cascade_irq(&combiner_data[i], irq);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun #ifdef CONFIG_PM
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun /**
206*4882a593Smuzhiyun * combiner_suspend - save interrupt combiner state before suspend
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * Save the interrupt enable set register for all combiner groups since
209*4882a593Smuzhiyun * the state is lost when the system enters into a sleep state.
210*4882a593Smuzhiyun *
211*4882a593Smuzhiyun */
combiner_suspend(void)212*4882a593Smuzhiyun static int combiner_suspend(void)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun int i;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun for (i = 0; i < max_nr; i++)
217*4882a593Smuzhiyun combiner_data[i].pm_save =
218*4882a593Smuzhiyun readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * combiner_resume - restore interrupt combiner state after resume
225*4882a593Smuzhiyun *
226*4882a593Smuzhiyun * Restore the interrupt enable set register for all combiner groups since
227*4882a593Smuzhiyun * the state is lost when the system enters into a sleep state on suspend.
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun */
combiner_resume(void)230*4882a593Smuzhiyun static void combiner_resume(void)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun int i;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun for (i = 0; i < max_nr; i++) {
235*4882a593Smuzhiyun writel_relaxed(combiner_data[i].irq_mask,
236*4882a593Smuzhiyun combiner_data[i].base + COMBINER_ENABLE_CLEAR);
237*4882a593Smuzhiyun writel_relaxed(combiner_data[i].pm_save,
238*4882a593Smuzhiyun combiner_data[i].base + COMBINER_ENABLE_SET);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun #else
243*4882a593Smuzhiyun #define combiner_suspend NULL
244*4882a593Smuzhiyun #define combiner_resume NULL
245*4882a593Smuzhiyun #endif
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun static struct syscore_ops combiner_syscore_ops = {
248*4882a593Smuzhiyun .suspend = combiner_suspend,
249*4882a593Smuzhiyun .resume = combiner_resume,
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
combiner_of_init(struct device_node * np,struct device_node * parent)252*4882a593Smuzhiyun static int __init combiner_of_init(struct device_node *np,
253*4882a593Smuzhiyun struct device_node *parent)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun void __iomem *combiner_base;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun combiner_base = of_iomap(np, 0);
258*4882a593Smuzhiyun if (!combiner_base) {
259*4882a593Smuzhiyun pr_err("%s: failed to map combiner registers\n", __func__);
260*4882a593Smuzhiyun return -ENXIO;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
264*4882a593Smuzhiyun pr_info("%s: number of combiners not specified, "
265*4882a593Smuzhiyun "setting default as %d.\n",
266*4882a593Smuzhiyun __func__, max_nr);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun combiner_init(combiner_base, np);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun register_syscore_ops(&combiner_syscore_ops);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return 0;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
276*4882a593Smuzhiyun combiner_of_init);
277