1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2020, Jiaxun Yang <jiaxun.yang@flygoat.com>
4*4882a593Smuzhiyun * Loongson HyperTransport Interrupt Vector support
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define pr_fmt(fmt) "htvec: " fmt
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/irq.h>
11*4882a593Smuzhiyun #include <linux/irqchip.h>
12*4882a593Smuzhiyun #include <linux/irqdomain.h>
13*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_irq.h>
18*4882a593Smuzhiyun #include <linux/of_platform.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* Registers */
21*4882a593Smuzhiyun #define HTVEC_EN_OFF 0x20
22*4882a593Smuzhiyun #define HTVEC_MAX_PARENT_IRQ 8
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define VEC_COUNT_PER_REG 32
25*4882a593Smuzhiyun #define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
26*4882a593Smuzhiyun #define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct htvec {
29*4882a593Smuzhiyun int num_parents;
30*4882a593Smuzhiyun void __iomem *base;
31*4882a593Smuzhiyun struct irq_domain *htvec_domain;
32*4882a593Smuzhiyun raw_spinlock_t htvec_lock;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
htvec_irq_dispatch(struct irq_desc * desc)35*4882a593Smuzhiyun static void htvec_irq_dispatch(struct irq_desc *desc)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun int i;
38*4882a593Smuzhiyun u32 pending;
39*4882a593Smuzhiyun bool handled = false;
40*4882a593Smuzhiyun struct irq_chip *chip = irq_desc_get_chip(desc);
41*4882a593Smuzhiyun struct htvec *priv = irq_desc_get_handler_data(desc);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun chained_irq_enter(chip, desc);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun for (i = 0; i < priv->num_parents; i++) {
46*4882a593Smuzhiyun pending = readl(priv->base + 4 * i);
47*4882a593Smuzhiyun while (pending) {
48*4882a593Smuzhiyun int bit = __ffs(pending);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun generic_handle_irq(irq_linear_revmap(priv->htvec_domain, bit +
51*4882a593Smuzhiyun VEC_COUNT_PER_REG * i));
52*4882a593Smuzhiyun pending &= ~BIT(bit);
53*4882a593Smuzhiyun handled = true;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun if (!handled)
58*4882a593Smuzhiyun spurious_interrupt();
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun chained_irq_exit(chip, desc);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
htvec_ack_irq(struct irq_data * d)63*4882a593Smuzhiyun static void htvec_ack_irq(struct irq_data *d)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun struct htvec *priv = irq_data_get_irq_chip_data(d);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun writel(BIT(VEC_REG_BIT(d->hwirq)),
68*4882a593Smuzhiyun priv->base + VEC_REG_IDX(d->hwirq) * 4);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
htvec_mask_irq(struct irq_data * d)71*4882a593Smuzhiyun static void htvec_mask_irq(struct irq_data *d)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun u32 reg;
74*4882a593Smuzhiyun void __iomem *addr;
75*4882a593Smuzhiyun struct htvec *priv = irq_data_get_irq_chip_data(d);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun raw_spin_lock(&priv->htvec_lock);
78*4882a593Smuzhiyun addr = priv->base + HTVEC_EN_OFF;
79*4882a593Smuzhiyun addr += VEC_REG_IDX(d->hwirq) * 4;
80*4882a593Smuzhiyun reg = readl(addr);
81*4882a593Smuzhiyun reg &= ~BIT(VEC_REG_BIT(d->hwirq));
82*4882a593Smuzhiyun writel(reg, addr);
83*4882a593Smuzhiyun raw_spin_unlock(&priv->htvec_lock);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
htvec_unmask_irq(struct irq_data * d)86*4882a593Smuzhiyun static void htvec_unmask_irq(struct irq_data *d)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun u32 reg;
89*4882a593Smuzhiyun void __iomem *addr;
90*4882a593Smuzhiyun struct htvec *priv = irq_data_get_irq_chip_data(d);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun raw_spin_lock(&priv->htvec_lock);
93*4882a593Smuzhiyun addr = priv->base + HTVEC_EN_OFF;
94*4882a593Smuzhiyun addr += VEC_REG_IDX(d->hwirq) * 4;
95*4882a593Smuzhiyun reg = readl(addr);
96*4882a593Smuzhiyun reg |= BIT(VEC_REG_BIT(d->hwirq));
97*4882a593Smuzhiyun writel(reg, addr);
98*4882a593Smuzhiyun raw_spin_unlock(&priv->htvec_lock);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun static struct irq_chip htvec_irq_chip = {
102*4882a593Smuzhiyun .name = "LOONGSON_HTVEC",
103*4882a593Smuzhiyun .irq_mask = htvec_mask_irq,
104*4882a593Smuzhiyun .irq_unmask = htvec_unmask_irq,
105*4882a593Smuzhiyun .irq_ack = htvec_ack_irq,
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun
htvec_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)108*4882a593Smuzhiyun static int htvec_domain_alloc(struct irq_domain *domain, unsigned int virq,
109*4882a593Smuzhiyun unsigned int nr_irqs, void *arg)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun int ret;
112*4882a593Smuzhiyun unsigned long hwirq;
113*4882a593Smuzhiyun unsigned int type, i;
114*4882a593Smuzhiyun struct htvec *priv = domain->host_data;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
117*4882a593Smuzhiyun if (ret)
118*4882a593Smuzhiyun return ret;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun for (i = 0; i < nr_irqs; i++) {
121*4882a593Smuzhiyun irq_domain_set_info(domain, virq + i, hwirq + i, &htvec_irq_chip,
122*4882a593Smuzhiyun priv, handle_edge_irq, NULL, NULL);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
htvec_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)128*4882a593Smuzhiyun static void htvec_domain_free(struct irq_domain *domain, unsigned int virq,
129*4882a593Smuzhiyun unsigned int nr_irqs)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun int i;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun for (i = 0; i < nr_irqs; i++) {
134*4882a593Smuzhiyun struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun irq_set_handler(virq + i, NULL);
137*4882a593Smuzhiyun irq_domain_reset_irq_data(d);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun static const struct irq_domain_ops htvec_domain_ops = {
142*4882a593Smuzhiyun .translate = irq_domain_translate_onecell,
143*4882a593Smuzhiyun .alloc = htvec_domain_alloc,
144*4882a593Smuzhiyun .free = htvec_domain_free,
145*4882a593Smuzhiyun };
146*4882a593Smuzhiyun
htvec_reset(struct htvec * priv)147*4882a593Smuzhiyun static void htvec_reset(struct htvec *priv)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun u32 idx;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Clear IRQ cause registers, mask all interrupts */
152*4882a593Smuzhiyun for (idx = 0; idx < priv->num_parents; idx++) {
153*4882a593Smuzhiyun writel_relaxed(0x0, priv->base + HTVEC_EN_OFF + 4 * idx);
154*4882a593Smuzhiyun writel_relaxed(0xFFFFFFFF, priv->base + 4 * idx);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
htvec_of_init(struct device_node * node,struct device_node * parent)158*4882a593Smuzhiyun static int htvec_of_init(struct device_node *node,
159*4882a593Smuzhiyun struct device_node *parent)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct htvec *priv;
162*4882a593Smuzhiyun int err, parent_irq[8], i;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun priv = kzalloc(sizeof(*priv), GFP_KERNEL);
165*4882a593Smuzhiyun if (!priv)
166*4882a593Smuzhiyun return -ENOMEM;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun raw_spin_lock_init(&priv->htvec_lock);
169*4882a593Smuzhiyun priv->base = of_iomap(node, 0);
170*4882a593Smuzhiyun if (!priv->base) {
171*4882a593Smuzhiyun err = -ENOMEM;
172*4882a593Smuzhiyun goto free_priv;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun /* Interrupt may come from any of the 8 interrupt lines */
176*4882a593Smuzhiyun for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++) {
177*4882a593Smuzhiyun parent_irq[i] = irq_of_parse_and_map(node, i);
178*4882a593Smuzhiyun if (parent_irq[i] <= 0)
179*4882a593Smuzhiyun break;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun priv->num_parents++;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun if (!priv->num_parents) {
185*4882a593Smuzhiyun pr_err("Failed to get parent irqs\n");
186*4882a593Smuzhiyun err = -ENODEV;
187*4882a593Smuzhiyun goto iounmap_base;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun priv->htvec_domain = irq_domain_create_linear(of_node_to_fwnode(node),
191*4882a593Smuzhiyun (VEC_COUNT_PER_REG * priv->num_parents),
192*4882a593Smuzhiyun &htvec_domain_ops, priv);
193*4882a593Smuzhiyun if (!priv->htvec_domain) {
194*4882a593Smuzhiyun pr_err("Failed to create IRQ domain\n");
195*4882a593Smuzhiyun err = -ENOMEM;
196*4882a593Smuzhiyun goto irq_dispose;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun htvec_reset(priv);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun for (i = 0; i < priv->num_parents; i++)
202*4882a593Smuzhiyun irq_set_chained_handler_and_data(parent_irq[i],
203*4882a593Smuzhiyun htvec_irq_dispatch, priv);
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return 0;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun irq_dispose:
208*4882a593Smuzhiyun for (; i > 0; i--)
209*4882a593Smuzhiyun irq_dispose_mapping(parent_irq[i - 1]);
210*4882a593Smuzhiyun iounmap_base:
211*4882a593Smuzhiyun iounmap(priv->base);
212*4882a593Smuzhiyun free_priv:
213*4882a593Smuzhiyun kfree(priv);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun return err;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun IRQCHIP_DECLARE(htvec, "loongson,htvec-1.0", htvec_of_init);
219