xref: /OK3568_Linux_fs/kernel/drivers/irqchip/irq-imx-irqsteer.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2017 NXP
4*4882a593Smuzhiyun  * Copyright (C) 2018 Pengutronix, Lucas Stach <kernel@pengutronix.de>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/clk.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/irq.h>
10*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
11*4882a593Smuzhiyun #include <linux/irqdomain.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/of_irq.h>
14*4882a593Smuzhiyun #include <linux/of_platform.h>
15*4882a593Smuzhiyun #include <linux/spinlock.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define CTRL_STRIDE_OFF(_t, _r)	(_t * 4 * _r)
18*4882a593Smuzhiyun #define CHANCTRL		0x0
19*4882a593Smuzhiyun #define CHANMASK(n, t)		(CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
20*4882a593Smuzhiyun #define CHANSET(n, t)		(CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
21*4882a593Smuzhiyun #define CHANSTATUS(n, t)	(CTRL_STRIDE_OFF(t, 2) + 0x4 * (n) + 0x4)
22*4882a593Smuzhiyun #define CHAN_MINTDIS(t)		(CTRL_STRIDE_OFF(t, 3) + 0x4)
23*4882a593Smuzhiyun #define CHAN_MASTRSTAT(t)	(CTRL_STRIDE_OFF(t, 3) + 0x8)
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define CHAN_MAX_OUTPUT_INT	0x8
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun struct irqsteer_data {
28*4882a593Smuzhiyun 	void __iomem		*regs;
29*4882a593Smuzhiyun 	struct clk		*ipg_clk;
30*4882a593Smuzhiyun 	int			irq[CHAN_MAX_OUTPUT_INT];
31*4882a593Smuzhiyun 	int			irq_count;
32*4882a593Smuzhiyun 	raw_spinlock_t		lock;
33*4882a593Smuzhiyun 	int			reg_num;
34*4882a593Smuzhiyun 	int			channel;
35*4882a593Smuzhiyun 	struct irq_domain	*domain;
36*4882a593Smuzhiyun 	u32			*saved_reg;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
imx_irqsteer_get_reg_index(struct irqsteer_data * data,unsigned long irqnum)39*4882a593Smuzhiyun static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
40*4882a593Smuzhiyun 				      unsigned long irqnum)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	return (data->reg_num - irqnum / 32 - 1);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
imx_irqsteer_irq_unmask(struct irq_data * d)45*4882a593Smuzhiyun static void imx_irqsteer_irq_unmask(struct irq_data *d)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct irqsteer_data *data = d->chip_data;
48*4882a593Smuzhiyun 	int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
49*4882a593Smuzhiyun 	unsigned long flags;
50*4882a593Smuzhiyun 	u32 val;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&data->lock, flags);
53*4882a593Smuzhiyun 	val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
54*4882a593Smuzhiyun 	val |= BIT(d->hwirq % 32);
55*4882a593Smuzhiyun 	writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
56*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&data->lock, flags);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
imx_irqsteer_irq_mask(struct irq_data * d)59*4882a593Smuzhiyun static void imx_irqsteer_irq_mask(struct irq_data *d)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	struct irqsteer_data *data = d->chip_data;
62*4882a593Smuzhiyun 	int idx = imx_irqsteer_get_reg_index(data, d->hwirq);
63*4882a593Smuzhiyun 	unsigned long flags;
64*4882a593Smuzhiyun 	u32 val;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&data->lock, flags);
67*4882a593Smuzhiyun 	val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
68*4882a593Smuzhiyun 	val &= ~BIT(d->hwirq % 32);
69*4882a593Smuzhiyun 	writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
70*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&data->lock, flags);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun static struct irq_chip imx_irqsteer_irq_chip = {
74*4882a593Smuzhiyun 	.name		= "irqsteer",
75*4882a593Smuzhiyun 	.irq_mask	= imx_irqsteer_irq_mask,
76*4882a593Smuzhiyun 	.irq_unmask	= imx_irqsteer_irq_unmask,
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun 
imx_irqsteer_irq_map(struct irq_domain * h,unsigned int irq,irq_hw_number_t hwirq)79*4882a593Smuzhiyun static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
80*4882a593Smuzhiyun 				irq_hw_number_t hwirq)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	irq_set_status_flags(irq, IRQ_LEVEL);
83*4882a593Smuzhiyun 	irq_set_chip_data(irq, h->host_data);
84*4882a593Smuzhiyun 	irq_set_chip_and_handler(irq, &imx_irqsteer_irq_chip, handle_level_irq);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun static const struct irq_domain_ops imx_irqsteer_domain_ops = {
90*4882a593Smuzhiyun 	.map		= imx_irqsteer_irq_map,
91*4882a593Smuzhiyun 	.xlate		= irq_domain_xlate_onecell,
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun 
imx_irqsteer_get_hwirq_base(struct irqsteer_data * data,u32 irq)94*4882a593Smuzhiyun static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	int i;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	for (i = 0; i < data->irq_count; i++) {
99*4882a593Smuzhiyun 		if (data->irq[i] == irq)
100*4882a593Smuzhiyun 			return i * 64;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	return -EINVAL;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
imx_irqsteer_irq_handler(struct irq_desc * desc)106*4882a593Smuzhiyun static void imx_irqsteer_irq_handler(struct irq_desc *desc)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	struct irqsteer_data *data = irq_desc_get_handler_data(desc);
109*4882a593Smuzhiyun 	int hwirq;
110*4882a593Smuzhiyun 	int irq, i;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	chained_irq_enter(irq_desc_get_chip(desc), desc);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	irq = irq_desc_get_irq(desc);
115*4882a593Smuzhiyun 	hwirq = imx_irqsteer_get_hwirq_base(data, irq);
116*4882a593Smuzhiyun 	if (hwirq < 0) {
117*4882a593Smuzhiyun 		pr_warn("%s: unable to get hwirq base for irq %d\n",
118*4882a593Smuzhiyun 			__func__, irq);
119*4882a593Smuzhiyun 		return;
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	for (i = 0; i < 2; i++, hwirq += 32) {
123*4882a593Smuzhiyun 		int idx = imx_irqsteer_get_reg_index(data, hwirq);
124*4882a593Smuzhiyun 		unsigned long irqmap;
125*4882a593Smuzhiyun 		int pos, virq;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 		if (hwirq >= data->reg_num * 32)
128*4882a593Smuzhiyun 			break;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 		irqmap = readl_relaxed(data->regs +
131*4882a593Smuzhiyun 				       CHANSTATUS(idx, data->reg_num));
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 		for_each_set_bit(pos, &irqmap, 32) {
134*4882a593Smuzhiyun 			virq = irq_find_mapping(data->domain, pos + hwirq);
135*4882a593Smuzhiyun 			if (virq)
136*4882a593Smuzhiyun 				generic_handle_irq(virq);
137*4882a593Smuzhiyun 		}
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	chained_irq_exit(irq_desc_get_chip(desc), desc);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
imx_irqsteer_probe(struct platform_device * pdev)143*4882a593Smuzhiyun static int imx_irqsteer_probe(struct platform_device *pdev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct device_node *np = pdev->dev.of_node;
146*4882a593Smuzhiyun 	struct irqsteer_data *data;
147*4882a593Smuzhiyun 	u32 irqs_num;
148*4882a593Smuzhiyun 	int i, ret;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
151*4882a593Smuzhiyun 	if (!data)
152*4882a593Smuzhiyun 		return -ENOMEM;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	data->regs = devm_platform_ioremap_resource(pdev, 0);
155*4882a593Smuzhiyun 	if (IS_ERR(data->regs)) {
156*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to initialize reg\n");
157*4882a593Smuzhiyun 		return PTR_ERR(data->regs);
158*4882a593Smuzhiyun 	}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
161*4882a593Smuzhiyun 	if (IS_ERR(data->ipg_clk))
162*4882a593Smuzhiyun 		return dev_err_probe(&pdev->dev, PTR_ERR(data->ipg_clk),
163*4882a593Smuzhiyun 				     "failed to get ipg clk\n");
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	raw_spin_lock_init(&data->lock);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
168*4882a593Smuzhiyun 	if (ret)
169*4882a593Smuzhiyun 		return ret;
170*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "fsl,channel", &data->channel);
171*4882a593Smuzhiyun 	if (ret)
172*4882a593Smuzhiyun 		return ret;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/*
175*4882a593Smuzhiyun 	 * There is one output irq for each group of 64 inputs.
176*4882a593Smuzhiyun 	 * One register bit map can represent 32 input interrupts.
177*4882a593Smuzhiyun 	 */
178*4882a593Smuzhiyun 	data->irq_count = DIV_ROUND_UP(irqs_num, 64);
179*4882a593Smuzhiyun 	data->reg_num = irqs_num / 32;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	if (IS_ENABLED(CONFIG_PM_SLEEP)) {
182*4882a593Smuzhiyun 		data->saved_reg = devm_kzalloc(&pdev->dev,
183*4882a593Smuzhiyun 					sizeof(u32) * data->reg_num,
184*4882a593Smuzhiyun 					GFP_KERNEL);
185*4882a593Smuzhiyun 		if (!data->saved_reg)
186*4882a593Smuzhiyun 			return -ENOMEM;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	ret = clk_prepare_enable(data->ipg_clk);
190*4882a593Smuzhiyun 	if (ret) {
191*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to enable ipg clk: %d\n", ret);
192*4882a593Smuzhiyun 		return ret;
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* steer all IRQs into configured channel */
196*4882a593Smuzhiyun 	writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	data->domain = irq_domain_add_linear(np, data->reg_num * 32,
199*4882a593Smuzhiyun 					     &imx_irqsteer_domain_ops, data);
200*4882a593Smuzhiyun 	if (!data->domain) {
201*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to create IRQ domain\n");
202*4882a593Smuzhiyun 		ret = -ENOMEM;
203*4882a593Smuzhiyun 		goto out;
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
207*4882a593Smuzhiyun 		ret = -EINVAL;
208*4882a593Smuzhiyun 		goto out;
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	for (i = 0; i < data->irq_count; i++) {
212*4882a593Smuzhiyun 		data->irq[i] = irq_of_parse_and_map(np, i);
213*4882a593Smuzhiyun 		if (!data->irq[i]) {
214*4882a593Smuzhiyun 			ret = -EINVAL;
215*4882a593Smuzhiyun 			goto out;
216*4882a593Smuzhiyun 		}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		irq_set_chained_handler_and_data(data->irq[i],
219*4882a593Smuzhiyun 						 imx_irqsteer_irq_handler,
220*4882a593Smuzhiyun 						 data);
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	platform_set_drvdata(pdev, data);
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return 0;
226*4882a593Smuzhiyun out:
227*4882a593Smuzhiyun 	clk_disable_unprepare(data->ipg_clk);
228*4882a593Smuzhiyun 	return ret;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
imx_irqsteer_remove(struct platform_device * pdev)231*4882a593Smuzhiyun static int imx_irqsteer_remove(struct platform_device *pdev)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
234*4882a593Smuzhiyun 	int i;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	for (i = 0; i < irqsteer_data->irq_count; i++)
237*4882a593Smuzhiyun 		irq_set_chained_handler_and_data(irqsteer_data->irq[i],
238*4882a593Smuzhiyun 						 NULL, NULL);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	irq_domain_remove(irqsteer_data->domain);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	clk_disable_unprepare(irqsteer_data->ipg_clk);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return 0;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
imx_irqsteer_save_regs(struct irqsteer_data * data)248*4882a593Smuzhiyun static void imx_irqsteer_save_regs(struct irqsteer_data *data)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	int i;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	for (i = 0; i < data->reg_num; i++)
253*4882a593Smuzhiyun 		data->saved_reg[i] = readl_relaxed(data->regs +
254*4882a593Smuzhiyun 						CHANMASK(i, data->reg_num));
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
imx_irqsteer_restore_regs(struct irqsteer_data * data)257*4882a593Smuzhiyun static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	int i;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
262*4882a593Smuzhiyun 	for (i = 0; i < data->reg_num; i++)
263*4882a593Smuzhiyun 		writel_relaxed(data->saved_reg[i],
264*4882a593Smuzhiyun 			       data->regs + CHANMASK(i, data->reg_num));
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
imx_irqsteer_suspend(struct device * dev)267*4882a593Smuzhiyun static int imx_irqsteer_suspend(struct device *dev)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	imx_irqsteer_save_regs(irqsteer_data);
272*4882a593Smuzhiyun 	clk_disable_unprepare(irqsteer_data->ipg_clk);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	return 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
imx_irqsteer_resume(struct device * dev)277*4882a593Smuzhiyun static int imx_irqsteer_resume(struct device *dev)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	struct irqsteer_data *irqsteer_data = dev_get_drvdata(dev);
280*4882a593Smuzhiyun 	int ret;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	ret = clk_prepare_enable(irqsteer_data->ipg_clk);
283*4882a593Smuzhiyun 	if (ret) {
284*4882a593Smuzhiyun 		dev_err(dev, "failed to enable ipg clk: %d\n", ret);
285*4882a593Smuzhiyun 		return ret;
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 	imx_irqsteer_restore_regs(irqsteer_data);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	return 0;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun #endif
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun static const struct dev_pm_ops imx_irqsteer_pm_ops = {
294*4882a593Smuzhiyun 	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_irqsteer_suspend, imx_irqsteer_resume)
295*4882a593Smuzhiyun };
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun static const struct of_device_id imx_irqsteer_dt_ids[] = {
298*4882a593Smuzhiyun 	{ .compatible = "fsl,imx-irqsteer", },
299*4882a593Smuzhiyun 	{},
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun static struct platform_driver imx_irqsteer_driver = {
303*4882a593Smuzhiyun 	.driver = {
304*4882a593Smuzhiyun 		.name = "imx-irqsteer",
305*4882a593Smuzhiyun 		.of_match_table = imx_irqsteer_dt_ids,
306*4882a593Smuzhiyun 		.pm = &imx_irqsteer_pm_ops,
307*4882a593Smuzhiyun 	},
308*4882a593Smuzhiyun 	.probe = imx_irqsteer_probe,
309*4882a593Smuzhiyun 	.remove = imx_irqsteer_remove,
310*4882a593Smuzhiyun };
311*4882a593Smuzhiyun builtin_platform_driver(imx_irqsteer_driver);
312