xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-mvebu-odmi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (C) 2016 Marvell
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file is licensed under the terms of the GNU General Public
7*4882a593Smuzhiyun  * License version 2.  This program is licensed "as is" without any
8*4882a593Smuzhiyun  * warranty of any kind, whether express or implied.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define pr_fmt(fmt) "GIC-ODMI: " fmt
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/irqchip.h>
15*4882a593Smuzhiyun #include <linux/irqdomain.h>
16*4882a593Smuzhiyun #include <linux/kernel.h>
17*4882a593Smuzhiyun #include <linux/msi.h>
18*4882a593Smuzhiyun #include <linux/of_address.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <dt-bindings/interrupt-controller/arm-gic.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define GICP_ODMIN_SET			0x40
23*4882a593Smuzhiyun #define   GICP_ODMI_INT_NUM_SHIFT	12
24*4882a593Smuzhiyun #define GICP_ODMIN_GM_EP_R0		0x110
25*4882a593Smuzhiyun #define GICP_ODMIN_GM_EP_R1		0x114
26*4882a593Smuzhiyun #define GICP_ODMIN_GM_EA_R0		0x108
27*4882a593Smuzhiyun #define GICP_ODMIN_GM_EA_R1		0x118
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * We don't support the group events, so we simply have 8 interrupts
31*4882a593Smuzhiyun  * per frame.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #define NODMIS_SHIFT		3
34*4882a593Smuzhiyun #define NODMIS_PER_FRAME	(1 << NODMIS_SHIFT)
35*4882a593Smuzhiyun #define NODMIS_MASK		(NODMIS_PER_FRAME - 1)
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct odmi_data {
38*4882a593Smuzhiyun 	struct resource res;
39*4882a593Smuzhiyun 	void __iomem *base;
40*4882a593Smuzhiyun 	unsigned int spi_base;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun static struct odmi_data *odmis;
44*4882a593Smuzhiyun static unsigned long *odmis_bm;
45*4882a593Smuzhiyun static unsigned int odmis_count;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /* Protects odmis_bm */
48*4882a593Smuzhiyun static DEFINE_SPINLOCK(odmis_bm_lock);
49*4882a593Smuzhiyun 
odmi_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)50*4882a593Smuzhiyun static void odmi_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun 	struct odmi_data *odmi;
53*4882a593Smuzhiyun 	phys_addr_t addr;
54*4882a593Smuzhiyun 	unsigned int odmin;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (WARN_ON(d->hwirq >= odmis_count * NODMIS_PER_FRAME))
57*4882a593Smuzhiyun 		return;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	odmi = &odmis[d->hwirq >> NODMIS_SHIFT];
60*4882a593Smuzhiyun 	odmin = d->hwirq & NODMIS_MASK;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	addr = odmi->res.start + GICP_ODMIN_SET;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	msg->address_hi = upper_32_bits(addr);
65*4882a593Smuzhiyun 	msg->address_lo = lower_32_bits(addr);
66*4882a593Smuzhiyun 	msg->data = odmin << GICP_ODMI_INT_NUM_SHIFT;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static struct irq_chip odmi_irq_chip = {
70*4882a593Smuzhiyun 	.name			= "ODMI",
71*4882a593Smuzhiyun 	.irq_mask		= irq_chip_mask_parent,
72*4882a593Smuzhiyun 	.irq_unmask		= irq_chip_unmask_parent,
73*4882a593Smuzhiyun 	.irq_eoi		= irq_chip_eoi_parent,
74*4882a593Smuzhiyun 	.irq_set_affinity	= irq_chip_set_affinity_parent,
75*4882a593Smuzhiyun 	.irq_compose_msi_msg	= odmi_compose_msi_msg,
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
odmi_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * args)78*4882a593Smuzhiyun static int odmi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
79*4882a593Smuzhiyun 				 unsigned int nr_irqs, void *args)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct odmi_data *odmi = NULL;
82*4882a593Smuzhiyun 	struct irq_fwspec fwspec;
83*4882a593Smuzhiyun 	struct irq_data *d;
84*4882a593Smuzhiyun 	unsigned int hwirq, odmin;
85*4882a593Smuzhiyun 	int ret;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	spin_lock(&odmis_bm_lock);
88*4882a593Smuzhiyun 	hwirq = find_first_zero_bit(odmis_bm, NODMIS_PER_FRAME * odmis_count);
89*4882a593Smuzhiyun 	if (hwirq >= NODMIS_PER_FRAME * odmis_count) {
90*4882a593Smuzhiyun 		spin_unlock(&odmis_bm_lock);
91*4882a593Smuzhiyun 		return -ENOSPC;
92*4882a593Smuzhiyun 	}
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	__set_bit(hwirq, odmis_bm);
95*4882a593Smuzhiyun 	spin_unlock(&odmis_bm_lock);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	odmi = &odmis[hwirq >> NODMIS_SHIFT];
98*4882a593Smuzhiyun 	odmin = hwirq & NODMIS_MASK;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	fwspec.fwnode = domain->parent->fwnode;
101*4882a593Smuzhiyun 	fwspec.param_count = 3;
102*4882a593Smuzhiyun 	fwspec.param[0] = GIC_SPI;
103*4882a593Smuzhiyun 	fwspec.param[1] = odmi->spi_base - 32 + odmin;
104*4882a593Smuzhiyun 	fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
107*4882a593Smuzhiyun 	if (ret) {
108*4882a593Smuzhiyun 		pr_err("Cannot allocate parent IRQ\n");
109*4882a593Smuzhiyun 		spin_lock(&odmis_bm_lock);
110*4882a593Smuzhiyun 		__clear_bit(odmin, odmis_bm);
111*4882a593Smuzhiyun 		spin_unlock(&odmis_bm_lock);
112*4882a593Smuzhiyun 		return ret;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* Configure the interrupt line to be edge */
116*4882a593Smuzhiyun 	d = irq_domain_get_irq_data(domain->parent, virq);
117*4882a593Smuzhiyun 	d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
120*4882a593Smuzhiyun 				      &odmi_irq_chip, NULL);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
odmi_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)125*4882a593Smuzhiyun static void odmi_irq_domain_free(struct irq_domain *domain,
126*4882a593Smuzhiyun 				 unsigned int virq, unsigned int nr_irqs)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (d->hwirq >= odmis_count * NODMIS_PER_FRAME) {
131*4882a593Smuzhiyun 		pr_err("Failed to teardown msi. Invalid hwirq %lu\n", d->hwirq);
132*4882a593Smuzhiyun 		return;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	irq_domain_free_irqs_parent(domain, virq, nr_irqs);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* Actually free the MSI */
138*4882a593Smuzhiyun 	spin_lock(&odmis_bm_lock);
139*4882a593Smuzhiyun 	__clear_bit(d->hwirq, odmis_bm);
140*4882a593Smuzhiyun 	spin_unlock(&odmis_bm_lock);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun static const struct irq_domain_ops odmi_domain_ops = {
144*4882a593Smuzhiyun 	.alloc	= odmi_irq_domain_alloc,
145*4882a593Smuzhiyun 	.free	= odmi_irq_domain_free,
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun static struct irq_chip odmi_msi_irq_chip = {
149*4882a593Smuzhiyun 	.name	= "ODMI",
150*4882a593Smuzhiyun };
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun static struct msi_domain_ops odmi_msi_ops = {
153*4882a593Smuzhiyun };
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun static struct msi_domain_info odmi_msi_domain_info = {
156*4882a593Smuzhiyun 	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS),
157*4882a593Smuzhiyun 	.ops	= &odmi_msi_ops,
158*4882a593Smuzhiyun 	.chip	= &odmi_msi_irq_chip,
159*4882a593Smuzhiyun };
160*4882a593Smuzhiyun 
mvebu_odmi_init(struct device_node * node,struct device_node * parent)161*4882a593Smuzhiyun static int __init mvebu_odmi_init(struct device_node *node,
162*4882a593Smuzhiyun 				  struct device_node *parent)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	struct irq_domain *inner_domain, *plat_domain;
165*4882a593Smuzhiyun 	int ret, i;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
168*4882a593Smuzhiyun 		return -EINVAL;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	odmis = kcalloc(odmis_count, sizeof(struct odmi_data), GFP_KERNEL);
171*4882a593Smuzhiyun 	if (!odmis)
172*4882a593Smuzhiyun 		return -ENOMEM;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	odmis_bm = kcalloc(BITS_TO_LONGS(odmis_count * NODMIS_PER_FRAME),
175*4882a593Smuzhiyun 			   sizeof(long), GFP_KERNEL);
176*4882a593Smuzhiyun 	if (!odmis_bm) {
177*4882a593Smuzhiyun 		ret = -ENOMEM;
178*4882a593Smuzhiyun 		goto err_alloc;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	for (i = 0; i < odmis_count; i++) {
182*4882a593Smuzhiyun 		struct odmi_data *odmi = &odmis[i];
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		ret = of_address_to_resource(node, i, &odmi->res);
185*4882a593Smuzhiyun 		if (ret)
186*4882a593Smuzhiyun 			goto err_unmap;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 		odmi->base = of_io_request_and_map(node, i, "odmi");
189*4882a593Smuzhiyun 		if (IS_ERR(odmi->base)) {
190*4882a593Smuzhiyun 			ret = PTR_ERR(odmi->base);
191*4882a593Smuzhiyun 			goto err_unmap;
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		if (of_property_read_u32_index(node, "marvell,spi-base",
195*4882a593Smuzhiyun 					       i, &odmi->spi_base)) {
196*4882a593Smuzhiyun 			ret = -EINVAL;
197*4882a593Smuzhiyun 			goto err_unmap;
198*4882a593Smuzhiyun 		}
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	inner_domain = irq_domain_create_linear(of_node_to_fwnode(node),
202*4882a593Smuzhiyun 						odmis_count * NODMIS_PER_FRAME,
203*4882a593Smuzhiyun 						&odmi_domain_ops, NULL);
204*4882a593Smuzhiyun 	if (!inner_domain) {
205*4882a593Smuzhiyun 		ret = -ENOMEM;
206*4882a593Smuzhiyun 		goto err_unmap;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	inner_domain->parent = irq_find_host(parent);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
212*4882a593Smuzhiyun 						     &odmi_msi_domain_info,
213*4882a593Smuzhiyun 						     inner_domain);
214*4882a593Smuzhiyun 	if (!plat_domain) {
215*4882a593Smuzhiyun 		ret = -ENOMEM;
216*4882a593Smuzhiyun 		goto err_remove_inner;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	return 0;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun err_remove_inner:
222*4882a593Smuzhiyun 	irq_domain_remove(inner_domain);
223*4882a593Smuzhiyun err_unmap:
224*4882a593Smuzhiyun 	for (i = 0; i < odmis_count; i++) {
225*4882a593Smuzhiyun 		struct odmi_data *odmi = &odmis[i];
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		if (odmi->base && !IS_ERR(odmi->base))
228*4882a593Smuzhiyun 			iounmap(odmis[i].base);
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 	kfree(odmis_bm);
231*4882a593Smuzhiyun err_alloc:
232*4882a593Smuzhiyun 	kfree(odmis);
233*4882a593Smuzhiyun 	return ret;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun IRQCHIP_DECLARE(mvebu_odmi, "marvell,odmi-controller", mvebu_odmi_init);
237