xref: /OK3568_Linux_fs/kernel/drivers/pinctrl/mediatek/mtk-eint.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun // Copyright (c) 2014-2018 MediaTek Inc.
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * Library for MediaTek External Interrupt Support
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8*4882a593Smuzhiyun  *	   Sean Wang <sean.wang@mediatek.com>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/err.h>
14*4882a593Smuzhiyun #include <linux/gpio/driver.h>
15*4882a593Smuzhiyun #include <linux/io.h>
16*4882a593Smuzhiyun #include <linux/irqchip/chained_irq.h>
17*4882a593Smuzhiyun #include <linux/irqdomain.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/of_irq.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include "mtk-eint.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define MTK_EINT_EDGE_SENSITIVE           0
25*4882a593Smuzhiyun #define MTK_EINT_LEVEL_SENSITIVE          1
26*4882a593Smuzhiyun #define MTK_EINT_DBNC_SET_DBNC_BITS	  4
27*4882a593Smuzhiyun #define MTK_EINT_DBNC_RST_BIT		  (0x1 << 1)
28*4882a593Smuzhiyun #define MTK_EINT_DBNC_SET_EN		  (0x1 << 0)
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static const struct mtk_eint_regs mtk_generic_eint_regs = {
31*4882a593Smuzhiyun 	.stat      = 0x000,
32*4882a593Smuzhiyun 	.ack       = 0x040,
33*4882a593Smuzhiyun 	.mask      = 0x080,
34*4882a593Smuzhiyun 	.mask_set  = 0x0c0,
35*4882a593Smuzhiyun 	.mask_clr  = 0x100,
36*4882a593Smuzhiyun 	.sens      = 0x140,
37*4882a593Smuzhiyun 	.sens_set  = 0x180,
38*4882a593Smuzhiyun 	.sens_clr  = 0x1c0,
39*4882a593Smuzhiyun 	.soft      = 0x200,
40*4882a593Smuzhiyun 	.soft_set  = 0x240,
41*4882a593Smuzhiyun 	.soft_clr  = 0x280,
42*4882a593Smuzhiyun 	.pol       = 0x300,
43*4882a593Smuzhiyun 	.pol_set   = 0x340,
44*4882a593Smuzhiyun 	.pol_clr   = 0x380,
45*4882a593Smuzhiyun 	.dom_en    = 0x400,
46*4882a593Smuzhiyun 	.dbnc_ctrl = 0x500,
47*4882a593Smuzhiyun 	.dbnc_set  = 0x600,
48*4882a593Smuzhiyun 	.dbnc_clr  = 0x700,
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
mtk_eint_get_offset(struct mtk_eint * eint,unsigned int eint_num,unsigned int offset)51*4882a593Smuzhiyun static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
52*4882a593Smuzhiyun 					 unsigned int eint_num,
53*4882a593Smuzhiyun 					 unsigned int offset)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	unsigned int eint_base = 0;
56*4882a593Smuzhiyun 	void __iomem *reg;
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	if (eint_num >= eint->hw->ap_num)
59*4882a593Smuzhiyun 		eint_base = eint->hw->ap_num;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	reg = eint->base + offset + ((eint_num - eint_base) / 32) * 4;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return reg;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
mtk_eint_can_en_debounce(struct mtk_eint * eint,unsigned int eint_num)66*4882a593Smuzhiyun static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
67*4882a593Smuzhiyun 					     unsigned int eint_num)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	unsigned int sens;
70*4882a593Smuzhiyun 	unsigned int bit = BIT(eint_num % 32);
71*4882a593Smuzhiyun 	void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
72*4882a593Smuzhiyun 						eint->regs->sens);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (readl(reg) & bit)
75*4882a593Smuzhiyun 		sens = MTK_EINT_LEVEL_SENSITIVE;
76*4882a593Smuzhiyun 	else
77*4882a593Smuzhiyun 		sens = MTK_EINT_EDGE_SENSITIVE;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	if (eint_num < eint->hw->db_cnt && sens != MTK_EINT_EDGE_SENSITIVE)
80*4882a593Smuzhiyun 		return 1;
81*4882a593Smuzhiyun 	else
82*4882a593Smuzhiyun 		return 0;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
mtk_eint_flip_edge(struct mtk_eint * eint,int hwirq)85*4882a593Smuzhiyun static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	int start_level, curr_level;
88*4882a593Smuzhiyun 	unsigned int reg_offset;
89*4882a593Smuzhiyun 	u32 mask = BIT(hwirq & 0x1f);
90*4882a593Smuzhiyun 	u32 port = (hwirq >> 5) & eint->hw->port_mask;
91*4882a593Smuzhiyun 	void __iomem *reg = eint->base + (port << 2);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	do {
96*4882a593Smuzhiyun 		start_level = curr_level;
97*4882a593Smuzhiyun 		if (start_level)
98*4882a593Smuzhiyun 			reg_offset = eint->regs->pol_clr;
99*4882a593Smuzhiyun 		else
100*4882a593Smuzhiyun 			reg_offset = eint->regs->pol_set;
101*4882a593Smuzhiyun 		writel(mask, reg + reg_offset);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
104*4882a593Smuzhiyun 							      hwirq);
105*4882a593Smuzhiyun 	} while (start_level != curr_level);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return start_level;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
mtk_eint_mask(struct irq_data * d)110*4882a593Smuzhiyun static void mtk_eint_mask(struct irq_data *d)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
113*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq & 0x1f);
114*4882a593Smuzhiyun 	void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
115*4882a593Smuzhiyun 						eint->regs->mask_set);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	eint->cur_mask[d->hwirq >> 5] &= ~mask;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	writel(mask, reg);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
mtk_eint_unmask(struct irq_data * d)122*4882a593Smuzhiyun static void mtk_eint_unmask(struct irq_data *d)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
125*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq & 0x1f);
126*4882a593Smuzhiyun 	void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
127*4882a593Smuzhiyun 						eint->regs->mask_clr);
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	eint->cur_mask[d->hwirq >> 5] |= mask;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	writel(mask, reg);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (eint->dual_edge[d->hwirq])
134*4882a593Smuzhiyun 		mtk_eint_flip_edge(eint, d->hwirq);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
mtk_eint_get_mask(struct mtk_eint * eint,unsigned int eint_num)137*4882a593Smuzhiyun static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
138*4882a593Smuzhiyun 				      unsigned int eint_num)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	unsigned int bit = BIT(eint_num % 32);
141*4882a593Smuzhiyun 	void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
142*4882a593Smuzhiyun 						eint->regs->mask);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return !!(readl(reg) & bit);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
mtk_eint_ack(struct irq_data * d)147*4882a593Smuzhiyun static void mtk_eint_ack(struct irq_data *d)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
150*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq & 0x1f);
151*4882a593Smuzhiyun 	void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
152*4882a593Smuzhiyun 						eint->regs->ack);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	writel(mask, reg);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
mtk_eint_set_type(struct irq_data * d,unsigned int type)157*4882a593Smuzhiyun static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
160*4882a593Smuzhiyun 	u32 mask = BIT(d->hwirq & 0x1f);
161*4882a593Smuzhiyun 	void __iomem *reg;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
164*4882a593Smuzhiyun 	    ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
165*4882a593Smuzhiyun 		dev_err(eint->dev,
166*4882a593Smuzhiyun 			"Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
167*4882a593Smuzhiyun 			d->irq, d->hwirq, type);
168*4882a593Smuzhiyun 		return -EINVAL;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
172*4882a593Smuzhiyun 		eint->dual_edge[d->hwirq] = 1;
173*4882a593Smuzhiyun 	else
174*4882a593Smuzhiyun 		eint->dual_edge[d->hwirq] = 0;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
177*4882a593Smuzhiyun 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
178*4882a593Smuzhiyun 		writel(mask, reg);
179*4882a593Smuzhiyun 	} else {
180*4882a593Smuzhiyun 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
181*4882a593Smuzhiyun 		writel(mask, reg);
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
185*4882a593Smuzhiyun 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
186*4882a593Smuzhiyun 		writel(mask, reg);
187*4882a593Smuzhiyun 	} else {
188*4882a593Smuzhiyun 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
189*4882a593Smuzhiyun 		writel(mask, reg);
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	if (eint->dual_edge[d->hwirq])
193*4882a593Smuzhiyun 		mtk_eint_flip_edge(eint, d->hwirq);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
mtk_eint_irq_set_wake(struct irq_data * d,unsigned int on)198*4882a593Smuzhiyun static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
201*4882a593Smuzhiyun 	int shift = d->hwirq & 0x1f;
202*4882a593Smuzhiyun 	int reg = d->hwirq >> 5;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	if (on)
205*4882a593Smuzhiyun 		eint->wake_mask[reg] |= BIT(shift);
206*4882a593Smuzhiyun 	else
207*4882a593Smuzhiyun 		eint->wake_mask[reg] &= ~BIT(shift);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
mtk_eint_chip_write_mask(const struct mtk_eint * eint,void __iomem * base,u32 * buf)212*4882a593Smuzhiyun static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
213*4882a593Smuzhiyun 				     void __iomem *base, u32 *buf)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	int port;
216*4882a593Smuzhiyun 	void __iomem *reg;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	for (port = 0; port < eint->hw->ports; port++) {
219*4882a593Smuzhiyun 		reg = base + (port << 2);
220*4882a593Smuzhiyun 		writel_relaxed(~buf[port], reg + eint->regs->mask_set);
221*4882a593Smuzhiyun 		writel_relaxed(buf[port], reg + eint->regs->mask_clr);
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
mtk_eint_irq_request_resources(struct irq_data * d)225*4882a593Smuzhiyun static int mtk_eint_irq_request_resources(struct irq_data *d)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
228*4882a593Smuzhiyun 	struct gpio_chip *gpio_c;
229*4882a593Smuzhiyun 	unsigned int gpio_n;
230*4882a593Smuzhiyun 	int err;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
233*4882a593Smuzhiyun 					   &gpio_n, &gpio_c);
234*4882a593Smuzhiyun 	if (err < 0) {
235*4882a593Smuzhiyun 		dev_err(eint->dev, "Can not find pin\n");
236*4882a593Smuzhiyun 		return err;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	err = gpiochip_lock_as_irq(gpio_c, gpio_n);
240*4882a593Smuzhiyun 	if (err < 0) {
241*4882a593Smuzhiyun 		dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
242*4882a593Smuzhiyun 			irqd_to_hwirq(d));
243*4882a593Smuzhiyun 		return err;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
247*4882a593Smuzhiyun 	if (err < 0) {
248*4882a593Smuzhiyun 		dev_err(eint->dev, "Can not eint mode\n");
249*4882a593Smuzhiyun 		return err;
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return 0;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
mtk_eint_irq_release_resources(struct irq_data * d)255*4882a593Smuzhiyun static void mtk_eint_irq_release_resources(struct irq_data *d)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
258*4882a593Smuzhiyun 	struct gpio_chip *gpio_c;
259*4882a593Smuzhiyun 	unsigned int gpio_n;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
262*4882a593Smuzhiyun 				     &gpio_c);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	gpiochip_unlock_as_irq(gpio_c, gpio_n);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun static struct irq_chip mtk_eint_irq_chip = {
268*4882a593Smuzhiyun 	.name = "mt-eint",
269*4882a593Smuzhiyun 	.irq_disable = mtk_eint_mask,
270*4882a593Smuzhiyun 	.irq_mask = mtk_eint_mask,
271*4882a593Smuzhiyun 	.irq_unmask = mtk_eint_unmask,
272*4882a593Smuzhiyun 	.irq_ack = mtk_eint_ack,
273*4882a593Smuzhiyun 	.irq_set_type = mtk_eint_set_type,
274*4882a593Smuzhiyun 	.irq_set_wake = mtk_eint_irq_set_wake,
275*4882a593Smuzhiyun 	.irq_request_resources = mtk_eint_irq_request_resources,
276*4882a593Smuzhiyun 	.irq_release_resources = mtk_eint_irq_release_resources,
277*4882a593Smuzhiyun };
278*4882a593Smuzhiyun 
mtk_eint_hw_init(struct mtk_eint * eint)279*4882a593Smuzhiyun static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	void __iomem *dom_en = eint->base + eint->regs->dom_en;
282*4882a593Smuzhiyun 	void __iomem *mask_set = eint->base + eint->regs->mask_set;
283*4882a593Smuzhiyun 	unsigned int i;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	for (i = 0; i < eint->hw->ap_num; i += 32) {
286*4882a593Smuzhiyun 		writel(0xffffffff, dom_en);
287*4882a593Smuzhiyun 		writel(0xffffffff, mask_set);
288*4882a593Smuzhiyun 		dom_en += 4;
289*4882a593Smuzhiyun 		mask_set += 4;
290*4882a593Smuzhiyun 	}
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun static inline void
mtk_eint_debounce_process(struct mtk_eint * eint,int index)296*4882a593Smuzhiyun mtk_eint_debounce_process(struct mtk_eint *eint, int index)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	unsigned int rst, ctrl_offset;
299*4882a593Smuzhiyun 	unsigned int bit, dbnc;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_ctrl;
302*4882a593Smuzhiyun 	dbnc = readl(eint->base + ctrl_offset);
303*4882a593Smuzhiyun 	bit = MTK_EINT_DBNC_SET_EN << ((index % 4) * 8);
304*4882a593Smuzhiyun 	if ((bit & dbnc) > 0) {
305*4882a593Smuzhiyun 		ctrl_offset = (index / 4) * 4 + eint->regs->dbnc_set;
306*4882a593Smuzhiyun 		rst = MTK_EINT_DBNC_RST_BIT << ((index % 4) * 8);
307*4882a593Smuzhiyun 		writel(rst, eint->base + ctrl_offset);
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
mtk_eint_irq_handler(struct irq_desc * desc)311*4882a593Smuzhiyun static void mtk_eint_irq_handler(struct irq_desc *desc)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct irq_chip *chip = irq_desc_get_chip(desc);
314*4882a593Smuzhiyun 	struct mtk_eint *eint = irq_desc_get_handler_data(desc);
315*4882a593Smuzhiyun 	unsigned int status, eint_num;
316*4882a593Smuzhiyun 	int offset, mask_offset, index, virq;
317*4882a593Smuzhiyun 	void __iomem *reg =  mtk_eint_get_offset(eint, 0, eint->regs->stat);
318*4882a593Smuzhiyun 	int dual_edge, start_level, curr_level;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	chained_irq_enter(chip, desc);
321*4882a593Smuzhiyun 	for (eint_num = 0; eint_num < eint->hw->ap_num; eint_num += 32,
322*4882a593Smuzhiyun 	     reg += 4) {
323*4882a593Smuzhiyun 		status = readl(reg);
324*4882a593Smuzhiyun 		while (status) {
325*4882a593Smuzhiyun 			offset = __ffs(status);
326*4882a593Smuzhiyun 			mask_offset = eint_num >> 5;
327*4882a593Smuzhiyun 			index = eint_num + offset;
328*4882a593Smuzhiyun 			virq = irq_find_mapping(eint->domain, index);
329*4882a593Smuzhiyun 			status &= ~BIT(offset);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 			/*
332*4882a593Smuzhiyun 			 * If we get an interrupt on pin that was only required
333*4882a593Smuzhiyun 			 * for wake (but no real interrupt requested), mask the
334*4882a593Smuzhiyun 			 * interrupt (as would mtk_eint_resume do anyway later
335*4882a593Smuzhiyun 			 * in the resume sequence).
336*4882a593Smuzhiyun 			 */
337*4882a593Smuzhiyun 			if (eint->wake_mask[mask_offset] & BIT(offset) &&
338*4882a593Smuzhiyun 			    !(eint->cur_mask[mask_offset] & BIT(offset))) {
339*4882a593Smuzhiyun 				writel_relaxed(BIT(offset), reg -
340*4882a593Smuzhiyun 					eint->regs->stat +
341*4882a593Smuzhiyun 					eint->regs->mask_set);
342*4882a593Smuzhiyun 			}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 			dual_edge = eint->dual_edge[index];
345*4882a593Smuzhiyun 			if (dual_edge) {
346*4882a593Smuzhiyun 				/*
347*4882a593Smuzhiyun 				 * Clear soft-irq in case we raised it last
348*4882a593Smuzhiyun 				 * time.
349*4882a593Smuzhiyun 				 */
350*4882a593Smuzhiyun 				writel(BIT(offset), reg - eint->regs->stat +
351*4882a593Smuzhiyun 				       eint->regs->soft_clr);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 				start_level =
354*4882a593Smuzhiyun 				eint->gpio_xlate->get_gpio_state(eint->pctl,
355*4882a593Smuzhiyun 								 index);
356*4882a593Smuzhiyun 			}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 			generic_handle_irq(virq);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 			if (dual_edge) {
361*4882a593Smuzhiyun 				curr_level = mtk_eint_flip_edge(eint, index);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 				/*
364*4882a593Smuzhiyun 				 * If level changed, we might lost one edge
365*4882a593Smuzhiyun 				 * interrupt, raised it through soft-irq.
366*4882a593Smuzhiyun 				 */
367*4882a593Smuzhiyun 				if (start_level != curr_level)
368*4882a593Smuzhiyun 					writel(BIT(offset), reg -
369*4882a593Smuzhiyun 					       eint->regs->stat +
370*4882a593Smuzhiyun 					       eint->regs->soft_set);
371*4882a593Smuzhiyun 			}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 			if (index < eint->hw->db_cnt)
374*4882a593Smuzhiyun 				mtk_eint_debounce_process(eint, index);
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 	chained_irq_exit(chip, desc);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
mtk_eint_do_suspend(struct mtk_eint * eint)380*4882a593Smuzhiyun int mtk_eint_do_suspend(struct mtk_eint *eint)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtk_eint_do_suspend);
387*4882a593Smuzhiyun 
mtk_eint_do_resume(struct mtk_eint * eint)388*4882a593Smuzhiyun int mtk_eint_do_resume(struct mtk_eint *eint)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtk_eint_do_resume);
395*4882a593Smuzhiyun 
mtk_eint_set_debounce(struct mtk_eint * eint,unsigned long eint_num,unsigned int debounce)396*4882a593Smuzhiyun int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
397*4882a593Smuzhiyun 			  unsigned int debounce)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	int virq, eint_offset;
400*4882a593Smuzhiyun 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
401*4882a593Smuzhiyun 		     dbnc;
402*4882a593Smuzhiyun 	static const unsigned int debounce_time[] = {500, 1000, 16000, 32000,
403*4882a593Smuzhiyun 						     64000, 128000, 256000};
404*4882a593Smuzhiyun 	struct irq_data *d;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	virq = irq_find_mapping(eint->domain, eint_num);
407*4882a593Smuzhiyun 	eint_offset = (eint_num % 4) * 8;
408*4882a593Smuzhiyun 	d = irq_get_irq_data(virq);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	set_offset = (eint_num / 4) * 4 + eint->regs->dbnc_set;
411*4882a593Smuzhiyun 	clr_offset = (eint_num / 4) * 4 + eint->regs->dbnc_clr;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (!mtk_eint_can_en_debounce(eint, eint_num))
414*4882a593Smuzhiyun 		return -EINVAL;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	dbnc = ARRAY_SIZE(debounce_time);
417*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
418*4882a593Smuzhiyun 		if (debounce <= debounce_time[i]) {
419*4882a593Smuzhiyun 			dbnc = i;
420*4882a593Smuzhiyun 			break;
421*4882a593Smuzhiyun 		}
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	if (!mtk_eint_get_mask(eint, eint_num)) {
425*4882a593Smuzhiyun 		mtk_eint_mask(d);
426*4882a593Smuzhiyun 		unmask = 1;
427*4882a593Smuzhiyun 	} else {
428*4882a593Smuzhiyun 		unmask = 0;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	clr_bit = 0xff << eint_offset;
432*4882a593Smuzhiyun 	writel(clr_bit, eint->base + clr_offset);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
435*4882a593Smuzhiyun 		eint_offset;
436*4882a593Smuzhiyun 	rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
437*4882a593Smuzhiyun 	writel(rst | bit, eint->base + set_offset);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/*
440*4882a593Smuzhiyun 	 * Delay a while (more than 2T) to wait for hw debounce counter reset
441*4882a593Smuzhiyun 	 * work correctly.
442*4882a593Smuzhiyun 	 */
443*4882a593Smuzhiyun 	udelay(1);
444*4882a593Smuzhiyun 	if (unmask == 1)
445*4882a593Smuzhiyun 		mtk_eint_unmask(d);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	return 0;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtk_eint_set_debounce);
450*4882a593Smuzhiyun 
mtk_eint_find_irq(struct mtk_eint * eint,unsigned long eint_n)451*4882a593Smuzhiyun int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	int irq;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	irq = irq_find_mapping(eint->domain, eint_n);
456*4882a593Smuzhiyun 	if (!irq)
457*4882a593Smuzhiyun 		return -EINVAL;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	return irq;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
462*4882a593Smuzhiyun 
mtk_eint_do_init(struct mtk_eint * eint)463*4882a593Smuzhiyun int mtk_eint_do_init(struct mtk_eint *eint)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	int i;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	/* If clients don't assign a specific regs, let's use generic one */
468*4882a593Smuzhiyun 	if (!eint->regs)
469*4882a593Smuzhiyun 		eint->regs = &mtk_generic_eint_regs;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	eint->wake_mask = devm_kcalloc(eint->dev, eint->hw->ports,
472*4882a593Smuzhiyun 				       sizeof(*eint->wake_mask), GFP_KERNEL);
473*4882a593Smuzhiyun 	if (!eint->wake_mask)
474*4882a593Smuzhiyun 		return -ENOMEM;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	eint->cur_mask = devm_kcalloc(eint->dev, eint->hw->ports,
477*4882a593Smuzhiyun 				      sizeof(*eint->cur_mask), GFP_KERNEL);
478*4882a593Smuzhiyun 	if (!eint->cur_mask)
479*4882a593Smuzhiyun 		return -ENOMEM;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	eint->dual_edge = devm_kcalloc(eint->dev, eint->hw->ap_num,
482*4882a593Smuzhiyun 				       sizeof(int), GFP_KERNEL);
483*4882a593Smuzhiyun 	if (!eint->dual_edge)
484*4882a593Smuzhiyun 		return -ENOMEM;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	eint->domain = irq_domain_add_linear(eint->dev->of_node,
487*4882a593Smuzhiyun 					     eint->hw->ap_num,
488*4882a593Smuzhiyun 					     &irq_domain_simple_ops, NULL);
489*4882a593Smuzhiyun 	if (!eint->domain)
490*4882a593Smuzhiyun 		return -ENOMEM;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	mtk_eint_hw_init(eint);
493*4882a593Smuzhiyun 	for (i = 0; i < eint->hw->ap_num; i++) {
494*4882a593Smuzhiyun 		int virq = irq_create_mapping(eint->domain, i);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
497*4882a593Smuzhiyun 					 handle_level_irq);
498*4882a593Smuzhiyun 		irq_set_chip_data(virq, eint);
499*4882a593Smuzhiyun 	}
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
502*4882a593Smuzhiyun 					 eint);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	return 0;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mtk_eint_do_init);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
509*4882a593Smuzhiyun MODULE_DESCRIPTION("MediaTek EINT Driver");
510