1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3*4882a593Smuzhiyun */
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * Driver for interrupt combiners in the Top-level Control and Status
7*4882a593Smuzhiyun * Registers (TCSR) hardware block in Qualcomm Technologies chips.
8*4882a593Smuzhiyun * An interrupt combiner in this block combines a set of interrupts by
9*4882a593Smuzhiyun * OR'ing the individual interrupt signals into a summary interrupt
10*4882a593Smuzhiyun * signal routed to a parent interrupt controller, and provides read-
11*4882a593Smuzhiyun * only, 32-bit registers to query the status of individual interrupts.
12*4882a593Smuzhiyun * The status bit for IRQ n is bit (n % 32) within register (n / 32)
13*4882a593Smuzhiyun * of the given combiner. Thus, each combiner can be described as a set
14*4882a593Smuzhiyun * of register offsets and the number of IRQs managed.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define pr_fmt(fmt) "QCOM80B1:" fmt
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #include <linux/acpi.h>
20*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
21*4882a593Smuzhiyun #include <linux/irqdomain.h>
22*4882a593Smuzhiyun #include <linux/platform_device.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define REG_SIZE 32
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun struct combiner_reg {
27*4882a593Smuzhiyun void __iomem *addr;
28*4882a593Smuzhiyun unsigned long enabled;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun struct combiner {
32*4882a593Smuzhiyun struct irq_domain *domain;
33*4882a593Smuzhiyun int parent_irq;
34*4882a593Smuzhiyun u32 nirqs;
35*4882a593Smuzhiyun u32 nregs;
36*4882a593Smuzhiyun struct combiner_reg regs[];
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
irq_nr(u32 reg,u32 bit)39*4882a593Smuzhiyun static inline int irq_nr(u32 reg, u32 bit)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun return reg * REG_SIZE + bit;
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Handler for the cascaded IRQ.
46*4882a593Smuzhiyun */
combiner_handle_irq(struct irq_desc * desc)47*4882a593Smuzhiyun static void combiner_handle_irq(struct irq_desc *desc)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct combiner *combiner = irq_desc_get_handler_data(desc);
50*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
51*4882a593Smuzhiyun u32 reg;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun chained_irq_enter(chip, desc);
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun for (reg = 0; reg < combiner->nregs; reg++) {
56*4882a593Smuzhiyun int virq;
57*4882a593Smuzhiyun int hwirq;
58*4882a593Smuzhiyun u32 bit;
59*4882a593Smuzhiyun u32 status;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun bit = readl_relaxed(combiner->regs[reg].addr);
62*4882a593Smuzhiyun status = bit & combiner->regs[reg].enabled;
63*4882a593Smuzhiyun if (bit && !status)
64*4882a593Smuzhiyun pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
65*4882a593Smuzhiyun smp_processor_id(), bit,
66*4882a593Smuzhiyun combiner->regs[reg].enabled,
67*4882a593Smuzhiyun combiner->regs[reg].addr);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun while (status) {
70*4882a593Smuzhiyun bit = __ffs(status);
71*4882a593Smuzhiyun status &= ~(1 << bit);
72*4882a593Smuzhiyun hwirq = irq_nr(reg, bit);
73*4882a593Smuzhiyun virq = irq_find_mapping(combiner->domain, hwirq);
74*4882a593Smuzhiyun if (virq > 0)
75*4882a593Smuzhiyun generic_handle_irq(virq);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun chained_irq_exit(chip, desc);
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
combiner_irq_chip_mask_irq(struct irq_data * data)83*4882a593Smuzhiyun static void combiner_irq_chip_mask_irq(struct irq_data *data)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct combiner *combiner = irq_data_get_irq_chip_data(data);
86*4882a593Smuzhiyun struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun clear_bit(data->hwirq % REG_SIZE, ®->enabled);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
combiner_irq_chip_unmask_irq(struct irq_data * data)91*4882a593Smuzhiyun static void combiner_irq_chip_unmask_irq(struct irq_data *data)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun struct combiner *combiner = irq_data_get_irq_chip_data(data);
94*4882a593Smuzhiyun struct combiner_reg *reg = combiner->regs + data->hwirq / REG_SIZE;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun set_bit(data->hwirq % REG_SIZE, ®->enabled);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun static struct irq_chip irq_chip = {
100*4882a593Smuzhiyun .irq_mask = combiner_irq_chip_mask_irq,
101*4882a593Smuzhiyun .irq_unmask = combiner_irq_chip_unmask_irq,
102*4882a593Smuzhiyun .name = "qcom-irq-combiner"
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun
combiner_irq_map(struct irq_domain * domain,unsigned int irq,irq_hw_number_t hwirq)105*4882a593Smuzhiyun static int combiner_irq_map(struct irq_domain *domain, unsigned int irq,
106*4882a593Smuzhiyun irq_hw_number_t hwirq)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &irq_chip, handle_level_irq);
109*4882a593Smuzhiyun irq_set_chip_data(irq, domain->host_data);
110*4882a593Smuzhiyun irq_set_noprobe(irq);
111*4882a593Smuzhiyun return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
combiner_irq_unmap(struct irq_domain * domain,unsigned int irq)114*4882a593Smuzhiyun static void combiner_irq_unmap(struct irq_domain *domain, unsigned int irq)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun irq_domain_reset_irq_data(irq_get_irq_data(irq));
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
combiner_irq_translate(struct irq_domain * d,struct irq_fwspec * fws,unsigned long * hwirq,unsigned int * type)119*4882a593Smuzhiyun static int combiner_irq_translate(struct irq_domain *d, struct irq_fwspec *fws,
120*4882a593Smuzhiyun unsigned long *hwirq, unsigned int *type)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun struct combiner *combiner = d->host_data;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (is_acpi_node(fws->fwnode)) {
125*4882a593Smuzhiyun if (WARN_ON((fws->param_count != 2) ||
126*4882a593Smuzhiyun (fws->param[0] >= combiner->nirqs) ||
127*4882a593Smuzhiyun (fws->param[1] & IORESOURCE_IRQ_LOWEDGE) ||
128*4882a593Smuzhiyun (fws->param[1] & IORESOURCE_IRQ_HIGHEDGE)))
129*4882a593Smuzhiyun return -EINVAL;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun *hwirq = fws->param[0];
132*4882a593Smuzhiyun *type = fws->param[1];
133*4882a593Smuzhiyun return 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun return -EINVAL;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun static const struct irq_domain_ops domain_ops = {
140*4882a593Smuzhiyun .map = combiner_irq_map,
141*4882a593Smuzhiyun .unmap = combiner_irq_unmap,
142*4882a593Smuzhiyun .translate = combiner_irq_translate
143*4882a593Smuzhiyun };
144*4882a593Smuzhiyun
count_registers_cb(struct acpi_resource * ares,void * context)145*4882a593Smuzhiyun static acpi_status count_registers_cb(struct acpi_resource *ares, void *context)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun int *count = context;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (ares->type == ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
150*4882a593Smuzhiyun ++(*count);
151*4882a593Smuzhiyun return AE_OK;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
count_registers(struct platform_device * pdev)154*4882a593Smuzhiyun static int count_registers(struct platform_device *pdev)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
157*4882a593Smuzhiyun acpi_status status;
158*4882a593Smuzhiyun int count = 0;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
161*4882a593Smuzhiyun return -EINVAL;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
164*4882a593Smuzhiyun count_registers_cb, &count);
165*4882a593Smuzhiyun if (ACPI_FAILURE(status))
166*4882a593Smuzhiyun return -EINVAL;
167*4882a593Smuzhiyun return count;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun struct get_registers_context {
171*4882a593Smuzhiyun struct device *dev;
172*4882a593Smuzhiyun struct combiner *combiner;
173*4882a593Smuzhiyun int err;
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun
get_registers_cb(struct acpi_resource * ares,void * context)176*4882a593Smuzhiyun static acpi_status get_registers_cb(struct acpi_resource *ares, void *context)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct get_registers_context *ctx = context;
179*4882a593Smuzhiyun struct acpi_resource_generic_register *reg;
180*4882a593Smuzhiyun phys_addr_t paddr;
181*4882a593Smuzhiyun void __iomem *vaddr;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun if (ares->type != ACPI_RESOURCE_TYPE_GENERIC_REGISTER)
184*4882a593Smuzhiyun return AE_OK;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun reg = &ares->data.generic_reg;
187*4882a593Smuzhiyun paddr = reg->address;
188*4882a593Smuzhiyun if ((reg->space_id != ACPI_SPACE_MEM) ||
189*4882a593Smuzhiyun (reg->bit_offset != 0) ||
190*4882a593Smuzhiyun (reg->bit_width > REG_SIZE)) {
191*4882a593Smuzhiyun dev_err(ctx->dev, "Bad register resource @%pa\n", &paddr);
192*4882a593Smuzhiyun ctx->err = -EINVAL;
193*4882a593Smuzhiyun return AE_ERROR;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun vaddr = devm_ioremap(ctx->dev, reg->address, REG_SIZE);
197*4882a593Smuzhiyun if (!vaddr) {
198*4882a593Smuzhiyun dev_err(ctx->dev, "Can't map register @%pa\n", &paddr);
199*4882a593Smuzhiyun ctx->err = -ENOMEM;
200*4882a593Smuzhiyun return AE_ERROR;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun ctx->combiner->regs[ctx->combiner->nregs].addr = vaddr;
204*4882a593Smuzhiyun ctx->combiner->nirqs += reg->bit_width;
205*4882a593Smuzhiyun ctx->combiner->nregs++;
206*4882a593Smuzhiyun return AE_OK;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
get_registers(struct platform_device * pdev,struct combiner * comb)209*4882a593Smuzhiyun static int get_registers(struct platform_device *pdev, struct combiner *comb)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun acpi_handle ahandle = ACPI_HANDLE(&pdev->dev);
212*4882a593Smuzhiyun acpi_status status;
213*4882a593Smuzhiyun struct get_registers_context ctx;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (!acpi_has_method(ahandle, METHOD_NAME__CRS))
216*4882a593Smuzhiyun return -EINVAL;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun ctx.dev = &pdev->dev;
219*4882a593Smuzhiyun ctx.combiner = comb;
220*4882a593Smuzhiyun ctx.err = 0;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun status = acpi_walk_resources(ahandle, METHOD_NAME__CRS,
223*4882a593Smuzhiyun get_registers_cb, &ctx);
224*4882a593Smuzhiyun if (ACPI_FAILURE(status))
225*4882a593Smuzhiyun return ctx.err;
226*4882a593Smuzhiyun return 0;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
combiner_probe(struct platform_device * pdev)229*4882a593Smuzhiyun static int __init combiner_probe(struct platform_device *pdev)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct combiner *combiner;
232*4882a593Smuzhiyun int nregs;
233*4882a593Smuzhiyun int err;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun nregs = count_registers(pdev);
236*4882a593Smuzhiyun if (nregs <= 0) {
237*4882a593Smuzhiyun dev_err(&pdev->dev, "Error reading register resources\n");
238*4882a593Smuzhiyun return -EINVAL;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun combiner = devm_kzalloc(&pdev->dev, struct_size(combiner, regs, nregs),
242*4882a593Smuzhiyun GFP_KERNEL);
243*4882a593Smuzhiyun if (!combiner)
244*4882a593Smuzhiyun return -ENOMEM;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun err = get_registers(pdev, combiner);
247*4882a593Smuzhiyun if (err < 0)
248*4882a593Smuzhiyun return err;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun combiner->parent_irq = platform_get_irq(pdev, 0);
251*4882a593Smuzhiyun if (combiner->parent_irq <= 0)
252*4882a593Smuzhiyun return -EPROBE_DEFER;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun combiner->domain = irq_domain_create_linear(pdev->dev.fwnode, combiner->nirqs,
255*4882a593Smuzhiyun &domain_ops, combiner);
256*4882a593Smuzhiyun if (!combiner->domain)
257*4882a593Smuzhiyun /* Errors printed by irq_domain_create_linear */
258*4882a593Smuzhiyun return -ENODEV;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun irq_set_chained_handler_and_data(combiner->parent_irq,
261*4882a593Smuzhiyun combiner_handle_irq, combiner);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun dev_info(&pdev->dev, "Initialized with [p=%d,n=%d,r=%p]\n",
264*4882a593Smuzhiyun combiner->parent_irq, combiner->nirqs, combiner->regs[0].addr);
265*4882a593Smuzhiyun return 0;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun static const struct acpi_device_id qcom_irq_combiner_ids[] = {
269*4882a593Smuzhiyun { "QCOM80B1", },
270*4882a593Smuzhiyun { }
271*4882a593Smuzhiyun };
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun static struct platform_driver qcom_irq_combiner_probe = {
274*4882a593Smuzhiyun .driver = {
275*4882a593Smuzhiyun .name = "qcom-irq-combiner",
276*4882a593Smuzhiyun .acpi_match_table = ACPI_PTR(qcom_irq_combiner_ids),
277*4882a593Smuzhiyun },
278*4882a593Smuzhiyun .probe = combiner_probe,
279*4882a593Smuzhiyun };
280*4882a593Smuzhiyun builtin_platform_driver(qcom_irq_combiner_probe);
281