xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/ath/wil6210/interrupt.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
4*4882a593Smuzhiyun  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "wil6210.h"
10*4882a593Smuzhiyun #include "trace.h"
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun  * Theory of operation:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * There is ISR pseudo-cause register,
16*4882a593Smuzhiyun  * dma_rgf->DMA_RGF.PSEUDO_CAUSE.PSEUDO_CAUSE
17*4882a593Smuzhiyun  * Its bits represents OR'ed bits from 3 real ISR registers:
18*4882a593Smuzhiyun  * TX, RX, and MISC.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Registers may be configured to either "write 1 to clear" or
21*4882a593Smuzhiyun  * "clear on read" mode
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * When handling interrupt, one have to mask/unmask interrupts for the
24*4882a593Smuzhiyun  * real ISR registers, or hardware may malfunction.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define WIL6210_IRQ_DISABLE		(0xFFFFFFFFUL)
29*4882a593Smuzhiyun #define WIL6210_IRQ_DISABLE_NO_HALP	(0xF7FFFFFFUL)
30*4882a593Smuzhiyun #define WIL6210_IMC_RX		(BIT_DMA_EP_RX_ICR_RX_DONE | \
31*4882a593Smuzhiyun 				 BIT_DMA_EP_RX_ICR_RX_HTRSH)
32*4882a593Smuzhiyun #define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \
33*4882a593Smuzhiyun 				    (~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
34*4882a593Smuzhiyun #define WIL6210_IMC_TX		(BIT_DMA_EP_TX_ICR_TX_DONE | \
35*4882a593Smuzhiyun 				BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
36*4882a593Smuzhiyun #define WIL6210_IMC_TX_EDMA		BIT_TX_STATUS_IRQ
37*4882a593Smuzhiyun #define WIL6210_IMC_RX_EDMA		BIT_RX_STATUS_IRQ
38*4882a593Smuzhiyun #define WIL6210_IMC_MISC_NO_HALP	(ISR_MISC_FW_READY | \
39*4882a593Smuzhiyun 					 ISR_MISC_MBOX_EVT | \
40*4882a593Smuzhiyun 					 ISR_MISC_FW_ERROR)
41*4882a593Smuzhiyun #define WIL6210_IMC_MISC		(WIL6210_IMC_MISC_NO_HALP | \
42*4882a593Smuzhiyun 					 BIT_DMA_EP_MISC_ICR_HALP)
43*4882a593Smuzhiyun #define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
44*4882a593Smuzhiyun 					BIT_DMA_PSEUDO_CAUSE_TX | \
45*4882a593Smuzhiyun 					BIT_DMA_PSEUDO_CAUSE_MISC))
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #if defined(CONFIG_WIL6210_ISR_COR)
48*4882a593Smuzhiyun /* configure to Clear-On-Read mode */
49*4882a593Smuzhiyun #define WIL_ICR_ICC_VALUE	(0xFFFFFFFFUL)
50*4882a593Smuzhiyun #define WIL_ICR_ICC_MISC_VALUE	(0xF7FFFFFFUL)
51*4882a593Smuzhiyun 
wil_icr_clear(u32 x,void __iomem * addr)52*4882a593Smuzhiyun static inline void wil_icr_clear(u32 x, void __iomem *addr)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun #else /* defined(CONFIG_WIL6210_ISR_COR) */
56*4882a593Smuzhiyun /* configure to Write-1-to-Clear mode */
57*4882a593Smuzhiyun #define WIL_ICR_ICC_VALUE	(0UL)
58*4882a593Smuzhiyun #define WIL_ICR_ICC_MISC_VALUE	(0UL)
59*4882a593Smuzhiyun 
wil_icr_clear(u32 x,void __iomem * addr)60*4882a593Smuzhiyun static inline void wil_icr_clear(u32 x, void __iomem *addr)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	writel(x, addr);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun #endif /* defined(CONFIG_WIL6210_ISR_COR) */
65*4882a593Smuzhiyun 
wil_ioread32_and_clear(void __iomem * addr)66*4882a593Smuzhiyun static inline u32 wil_ioread32_and_clear(void __iomem *addr)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	u32 x = readl(addr);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	wil_icr_clear(x, addr);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	return x;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
wil6210_mask_irq_tx(struct wil6210_priv * wil)75*4882a593Smuzhiyun static void wil6210_mask_irq_tx(struct wil6210_priv *wil)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMS),
78*4882a593Smuzhiyun 	      WIL6210_IRQ_DISABLE);
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
wil6210_mask_irq_tx_edma(struct wil6210_priv * wil)81*4882a593Smuzhiyun static void wil6210_mask_irq_tx_edma(struct wil6210_priv *wil)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMS),
84*4882a593Smuzhiyun 	      WIL6210_IRQ_DISABLE);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
wil6210_mask_irq_rx(struct wil6210_priv * wil)87*4882a593Smuzhiyun static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMS),
90*4882a593Smuzhiyun 	      WIL6210_IRQ_DISABLE);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
wil6210_mask_irq_rx_edma(struct wil6210_priv * wil)93*4882a593Smuzhiyun static void wil6210_mask_irq_rx_edma(struct wil6210_priv *wil)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMS),
96*4882a593Smuzhiyun 	      WIL6210_IRQ_DISABLE);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
wil6210_mask_irq_misc(struct wil6210_priv * wil,bool mask_halp)99*4882a593Smuzhiyun static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
102*4882a593Smuzhiyun 		    mask_halp ? "true" : "false");
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
105*4882a593Smuzhiyun 	      mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
wil6210_mask_halp(struct wil6210_priv * wil)108*4882a593Smuzhiyun void wil6210_mask_halp(struct wil6210_priv *wil)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	wil_dbg_irq(wil, "mask_halp\n");
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
113*4882a593Smuzhiyun 	      BIT_DMA_EP_MISC_ICR_HALP);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
wil6210_mask_irq_pseudo(struct wil6210_priv * wil)116*4882a593Smuzhiyun static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	wil_dbg_irq(wil, "mask_irq_pseudo\n");
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	clear_bit(wil_status_irqen, wil->status);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
wil6210_unmask_irq_tx(struct wil6210_priv * wil)125*4882a593Smuzhiyun void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, IMC),
128*4882a593Smuzhiyun 	      WIL6210_IMC_TX);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
wil6210_unmask_irq_tx_edma(struct wil6210_priv * wil)131*4882a593Smuzhiyun void wil6210_unmask_irq_tx_edma(struct wil6210_priv *wil)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, IMC),
134*4882a593Smuzhiyun 	      WIL6210_IMC_TX_EDMA);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
wil6210_unmask_irq_rx(struct wil6210_priv * wil)137*4882a593Smuzhiyun void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	bool unmask_rx_htrsh = atomic_read(&wil->connected_vifs) > 0;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
142*4882a593Smuzhiyun 	      unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
wil6210_unmask_irq_rx_edma(struct wil6210_priv * wil)145*4882a593Smuzhiyun void wil6210_unmask_irq_rx_edma(struct wil6210_priv *wil)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, IMC),
148*4882a593Smuzhiyun 	      WIL6210_IMC_RX_EDMA);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
wil6210_unmask_irq_misc(struct wil6210_priv * wil,bool unmask_halp)151*4882a593Smuzhiyun static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun 	wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
154*4882a593Smuzhiyun 		    unmask_halp ? "true" : "false");
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
157*4882a593Smuzhiyun 	      unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
wil6210_unmask_halp(struct wil6210_priv * wil)160*4882a593Smuzhiyun static void wil6210_unmask_halp(struct wil6210_priv *wil)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	wil_dbg_irq(wil, "unmask_halp\n");
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
165*4882a593Smuzhiyun 	      BIT_DMA_EP_MISC_ICR_HALP);
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
wil6210_unmask_irq_pseudo(struct wil6210_priv * wil)168*4882a593Smuzhiyun static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	wil_dbg_irq(wil, "unmask_irq_pseudo\n");
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	set_bit(wil_status_irqen, wil->status);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_PSEUDO_MASK);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
wil_mask_irq(struct wil6210_priv * wil)177*4882a593Smuzhiyun void wil_mask_irq(struct wil6210_priv *wil)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	wil_dbg_irq(wil, "mask_irq\n");
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	wil6210_mask_irq_tx(wil);
182*4882a593Smuzhiyun 	wil6210_mask_irq_tx_edma(wil);
183*4882a593Smuzhiyun 	wil6210_mask_irq_rx(wil);
184*4882a593Smuzhiyun 	wil6210_mask_irq_rx_edma(wil);
185*4882a593Smuzhiyun 	wil6210_mask_irq_misc(wil, true);
186*4882a593Smuzhiyun 	wil6210_mask_irq_pseudo(wil);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
wil_unmask_irq(struct wil6210_priv * wil)189*4882a593Smuzhiyun void wil_unmask_irq(struct wil6210_priv *wil)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun 	wil_dbg_irq(wil, "unmask_irq\n");
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
194*4882a593Smuzhiyun 	      WIL_ICR_ICC_VALUE);
195*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
196*4882a593Smuzhiyun 	      WIL_ICR_ICC_VALUE);
197*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
198*4882a593Smuzhiyun 	      WIL_ICR_ICC_MISC_VALUE);
199*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
200*4882a593Smuzhiyun 	      WIL_ICR_ICC_VALUE);
201*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_GEN_RX_ICR + offsetof(struct RGF_ICR, ICC),
202*4882a593Smuzhiyun 	      WIL_ICR_ICC_VALUE);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	wil6210_unmask_irq_pseudo(wil);
205*4882a593Smuzhiyun 	if (wil->use_enhanced_dma_hw) {
206*4882a593Smuzhiyun 		wil6210_unmask_irq_tx_edma(wil);
207*4882a593Smuzhiyun 		wil6210_unmask_irq_rx_edma(wil);
208*4882a593Smuzhiyun 	} else {
209*4882a593Smuzhiyun 		wil6210_unmask_irq_tx(wil);
210*4882a593Smuzhiyun 		wil6210_unmask_irq_rx(wil);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	wil6210_unmask_irq_misc(wil, true);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
wil_configure_interrupt_moderation_edma(struct wil6210_priv * wil)215*4882a593Smuzhiyun void wil_configure_interrupt_moderation_edma(struct wil6210_priv *wil)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	u32 moderation;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	wil_s(wil, RGF_INT_GEN_IDLE_TIME_LIMIT, WIL_EDMA_IDLE_TIME_LIMIT_USEC);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	wil_s(wil, RGF_INT_GEN_TIME_UNIT_LIMIT, WIL_EDMA_TIME_UNIT_CLK_CYCLES);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Update RX and TX moderation */
224*4882a593Smuzhiyun 	moderation = wil->rx_max_burst_duration |
225*4882a593Smuzhiyun 		(WIL_EDMA_AGG_WATERMARK << WIL_EDMA_AGG_WATERMARK_POS);
226*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_0, moderation);
227*4882a593Smuzhiyun 	wil_w(wil, RGF_INT_CTRL_INT_GEN_CFG_1, moderation);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Treat special events as regular
230*4882a593Smuzhiyun 	 * (set bit 0 to 0x1 and clear bits 1-8)
231*4882a593Smuzhiyun 	 */
232*4882a593Smuzhiyun 	wil_c(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1FE);
233*4882a593Smuzhiyun 	wil_s(wil, RGF_INT_COUNT_ON_SPECIAL_EVT, 0x1);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
wil_configure_interrupt_moderation(struct wil6210_priv * wil)236*4882a593Smuzhiyun void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	wil_dbg_irq(wil, "configure_interrupt_moderation\n");
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	/* disable interrupt moderation for monitor
243*4882a593Smuzhiyun 	 * to get better timestamp precision
244*4882a593Smuzhiyun 	 */
245*4882a593Smuzhiyun 	if (wdev->iftype == NL80211_IFTYPE_MONITOR)
246*4882a593Smuzhiyun 		return;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* Disable and clear tx counter before (re)configuration */
249*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL, BIT_DMA_ITR_TX_CNT_CTL_CLR);
250*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_TX_CNT_TRSH, wil->tx_max_burst_duration);
251*4882a593Smuzhiyun 	wil_info(wil, "set ITR_TX_CNT_TRSH = %d usec\n",
252*4882a593Smuzhiyun 		 wil->tx_max_burst_duration);
253*4882a593Smuzhiyun 	/* Configure TX max burst duration timer to use usec units */
254*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_TX_CNT_CTL,
255*4882a593Smuzhiyun 	      BIT_DMA_ITR_TX_CNT_CTL_EN | BIT_DMA_ITR_TX_CNT_CTL_EXT_TIC_SEL);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* Disable and clear tx idle counter before (re)configuration */
258*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_CLR);
259*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_TRSH, wil->tx_interframe_timeout);
260*4882a593Smuzhiyun 	wil_info(wil, "set ITR_TX_IDL_CNT_TRSH = %d usec\n",
261*4882a593Smuzhiyun 		 wil->tx_interframe_timeout);
262*4882a593Smuzhiyun 	/* Configure TX max burst duration timer to use usec units */
263*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_TX_IDL_CNT_CTL, BIT_DMA_ITR_TX_IDL_CNT_CTL_EN |
264*4882a593Smuzhiyun 	      BIT_DMA_ITR_TX_IDL_CNT_CTL_EXT_TIC_SEL);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* Disable and clear rx counter before (re)configuration */
267*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL, BIT_DMA_ITR_RX_CNT_CTL_CLR);
268*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_RX_CNT_TRSH, wil->rx_max_burst_duration);
269*4882a593Smuzhiyun 	wil_info(wil, "set ITR_RX_CNT_TRSH = %d usec\n",
270*4882a593Smuzhiyun 		 wil->rx_max_burst_duration);
271*4882a593Smuzhiyun 	/* Configure TX max burst duration timer to use usec units */
272*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_RX_CNT_CTL,
273*4882a593Smuzhiyun 	      BIT_DMA_ITR_RX_CNT_CTL_EN | BIT_DMA_ITR_RX_CNT_CTL_EXT_TIC_SEL);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/* Disable and clear rx idle counter before (re)configuration */
276*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_CLR);
277*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_TRSH, wil->rx_interframe_timeout);
278*4882a593Smuzhiyun 	wil_info(wil, "set ITR_RX_IDL_CNT_TRSH = %d usec\n",
279*4882a593Smuzhiyun 		 wil->rx_interframe_timeout);
280*4882a593Smuzhiyun 	/* Configure TX max burst duration timer to use usec units */
281*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_ITR_RX_IDL_CNT_CTL, BIT_DMA_ITR_RX_IDL_CNT_CTL_EN |
282*4882a593Smuzhiyun 	      BIT_DMA_ITR_RX_IDL_CNT_CTL_EXT_TIC_SEL);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
wil6210_irq_rx(int irq,void * cookie)285*4882a593Smuzhiyun static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
288*4882a593Smuzhiyun 	u32 isr;
289*4882a593Smuzhiyun 	bool need_unmask = true;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	wil6210_mask_irq_rx(wil);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	isr = wil_ioread32_and_clear(wil->csr +
294*4882a593Smuzhiyun 				     HOSTADDR(RGF_DMA_EP_RX_ICR) +
295*4882a593Smuzhiyun 				     offsetof(struct RGF_ICR, ICR));
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	trace_wil6210_irq_rx(isr);
298*4882a593Smuzhiyun 	wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	if (unlikely(!isr)) {
301*4882a593Smuzhiyun 		wil_err_ratelimited(wil, "spurious IRQ: RX\n");
302*4882a593Smuzhiyun 		wil6210_unmask_irq_rx(wil);
303*4882a593Smuzhiyun 		return IRQ_NONE;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* RX_DONE and RX_HTRSH interrupts are the same if interrupt
307*4882a593Smuzhiyun 	 * moderation is not used. Interrupt moderation may cause RX
308*4882a593Smuzhiyun 	 * buffer overflow while RX_DONE is delayed. The required
309*4882a593Smuzhiyun 	 * action is always the same - should empty the accumulated
310*4882a593Smuzhiyun 	 * packets from the RX ring.
311*4882a593Smuzhiyun 	 */
312*4882a593Smuzhiyun 	if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
313*4882a593Smuzhiyun 			  BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
314*4882a593Smuzhiyun 		wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n",
315*4882a593Smuzhiyun 			    isr);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 		isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
318*4882a593Smuzhiyun 			 BIT_DMA_EP_RX_ICR_RX_HTRSH);
319*4882a593Smuzhiyun 		if (likely(test_bit(wil_status_fwready, wil->status))) {
320*4882a593Smuzhiyun 			if (likely(test_bit(wil_status_napi_en, wil->status))) {
321*4882a593Smuzhiyun 				wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
322*4882a593Smuzhiyun 				need_unmask = false;
323*4882a593Smuzhiyun 				napi_schedule(&wil->napi_rx);
324*4882a593Smuzhiyun 			} else {
325*4882a593Smuzhiyun 				wil_err_ratelimited(
326*4882a593Smuzhiyun 					wil,
327*4882a593Smuzhiyun 					"Got Rx interrupt while stopping interface\n");
328*4882a593Smuzhiyun 			}
329*4882a593Smuzhiyun 		} else {
330*4882a593Smuzhiyun 			wil_err_ratelimited(wil, "Got Rx interrupt while in reset\n");
331*4882a593Smuzhiyun 		}
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (unlikely(isr))
335*4882a593Smuzhiyun 		wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	/* Rx IRQ will be enabled when NAPI processing finished */
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	atomic_inc(&wil->isr_count_rx);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	if (unlikely(need_unmask))
342*4882a593Smuzhiyun 		wil6210_unmask_irq_rx(wil);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return IRQ_HANDLED;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
wil6210_irq_rx_edma(int irq,void * cookie)347*4882a593Smuzhiyun static irqreturn_t wil6210_irq_rx_edma(int irq, void *cookie)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
350*4882a593Smuzhiyun 	u32 isr;
351*4882a593Smuzhiyun 	bool need_unmask = true;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	wil6210_mask_irq_rx_edma(wil);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	isr = wil_ioread32_and_clear(wil->csr +
356*4882a593Smuzhiyun 				     HOSTADDR(RGF_INT_GEN_RX_ICR) +
357*4882a593Smuzhiyun 				     offsetof(struct RGF_ICR, ICR));
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	trace_wil6210_irq_rx(isr);
360*4882a593Smuzhiyun 	wil_dbg_irq(wil, "ISR RX 0x%08x\n", isr);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (unlikely(!isr)) {
363*4882a593Smuzhiyun 		wil_err(wil, "spurious IRQ: RX\n");
364*4882a593Smuzhiyun 		wil6210_unmask_irq_rx_edma(wil);
365*4882a593Smuzhiyun 		return IRQ_NONE;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (likely(isr & BIT_RX_STATUS_IRQ)) {
369*4882a593Smuzhiyun 		wil_dbg_irq(wil, "RX status ring\n");
370*4882a593Smuzhiyun 		isr &= ~BIT_RX_STATUS_IRQ;
371*4882a593Smuzhiyun 		if (likely(test_bit(wil_status_fwready, wil->status))) {
372*4882a593Smuzhiyun 			if (likely(test_bit(wil_status_napi_en, wil->status))) {
373*4882a593Smuzhiyun 				wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
374*4882a593Smuzhiyun 				need_unmask = false;
375*4882a593Smuzhiyun 				napi_schedule(&wil->napi_rx);
376*4882a593Smuzhiyun 			} else {
377*4882a593Smuzhiyun 				wil_err(wil,
378*4882a593Smuzhiyun 					"Got Rx interrupt while stopping interface\n");
379*4882a593Smuzhiyun 			}
380*4882a593Smuzhiyun 		} else {
381*4882a593Smuzhiyun 			wil_err(wil, "Got Rx interrupt while in reset\n");
382*4882a593Smuzhiyun 		}
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	if (unlikely(isr))
386*4882a593Smuzhiyun 		wil_err(wil, "un-handled RX ISR bits 0x%08x\n", isr);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/* Rx IRQ will be enabled when NAPI processing finished */
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	atomic_inc(&wil->isr_count_rx);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (unlikely(need_unmask))
393*4882a593Smuzhiyun 		wil6210_unmask_irq_rx_edma(wil);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return IRQ_HANDLED;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
wil6210_irq_tx_edma(int irq,void * cookie)398*4882a593Smuzhiyun static irqreturn_t wil6210_irq_tx_edma(int irq, void *cookie)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
401*4882a593Smuzhiyun 	u32 isr;
402*4882a593Smuzhiyun 	bool need_unmask = true;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	wil6210_mask_irq_tx_edma(wil);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	isr = wil_ioread32_and_clear(wil->csr +
407*4882a593Smuzhiyun 				     HOSTADDR(RGF_INT_GEN_TX_ICR) +
408*4882a593Smuzhiyun 				     offsetof(struct RGF_ICR, ICR));
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	trace_wil6210_irq_tx(isr);
411*4882a593Smuzhiyun 	wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (unlikely(!isr)) {
414*4882a593Smuzhiyun 		wil_err(wil, "spurious IRQ: TX\n");
415*4882a593Smuzhiyun 		wil6210_unmask_irq_tx_edma(wil);
416*4882a593Smuzhiyun 		return IRQ_NONE;
417*4882a593Smuzhiyun 	}
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (likely(isr & BIT_TX_STATUS_IRQ)) {
420*4882a593Smuzhiyun 		wil_dbg_irq(wil, "TX status ring\n");
421*4882a593Smuzhiyun 		isr &= ~BIT_TX_STATUS_IRQ;
422*4882a593Smuzhiyun 		if (likely(test_bit(wil_status_fwready, wil->status))) {
423*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
424*4882a593Smuzhiyun 			need_unmask = false;
425*4882a593Smuzhiyun 			napi_schedule(&wil->napi_tx);
426*4882a593Smuzhiyun 		} else {
427*4882a593Smuzhiyun 			wil_err(wil, "Got Tx status ring IRQ while in reset\n");
428*4882a593Smuzhiyun 		}
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	if (unlikely(isr))
432*4882a593Smuzhiyun 		wil_err(wil, "un-handled TX ISR bits 0x%08x\n", isr);
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	/* Tx IRQ will be enabled when NAPI processing finished */
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	atomic_inc(&wil->isr_count_tx);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (unlikely(need_unmask))
439*4882a593Smuzhiyun 		wil6210_unmask_irq_tx_edma(wil);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	return IRQ_HANDLED;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
wil6210_irq_tx(int irq,void * cookie)444*4882a593Smuzhiyun static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
447*4882a593Smuzhiyun 	u32 isr;
448*4882a593Smuzhiyun 	bool need_unmask = true;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	wil6210_mask_irq_tx(wil);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	isr = wil_ioread32_and_clear(wil->csr +
453*4882a593Smuzhiyun 				     HOSTADDR(RGF_DMA_EP_TX_ICR) +
454*4882a593Smuzhiyun 				     offsetof(struct RGF_ICR, ICR));
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	trace_wil6210_irq_tx(isr);
457*4882a593Smuzhiyun 	wil_dbg_irq(wil, "ISR TX 0x%08x\n", isr);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (unlikely(!isr)) {
460*4882a593Smuzhiyun 		wil_err_ratelimited(wil, "spurious IRQ: TX\n");
461*4882a593Smuzhiyun 		wil6210_unmask_irq_tx(wil);
462*4882a593Smuzhiyun 		return IRQ_NONE;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	if (likely(isr & BIT_DMA_EP_TX_ICR_TX_DONE)) {
466*4882a593Smuzhiyun 		wil_dbg_irq(wil, "TX done\n");
467*4882a593Smuzhiyun 		isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
468*4882a593Smuzhiyun 		/* clear also all VRING interrupts */
469*4882a593Smuzhiyun 		isr &= ~(BIT(25) - 1UL);
470*4882a593Smuzhiyun 		if (likely(test_bit(wil_status_fwready, wil->status))) {
471*4882a593Smuzhiyun 			wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
472*4882a593Smuzhiyun 			need_unmask = false;
473*4882a593Smuzhiyun 			napi_schedule(&wil->napi_tx);
474*4882a593Smuzhiyun 		} else {
475*4882a593Smuzhiyun 			wil_err_ratelimited(wil, "Got Tx interrupt while in reset\n");
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 	}
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	if (unlikely(isr))
480*4882a593Smuzhiyun 		wil_err_ratelimited(wil, "un-handled TX ISR bits 0x%08x\n",
481*4882a593Smuzhiyun 				    isr);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* Tx IRQ will be enabled when NAPI processing finished */
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	atomic_inc(&wil->isr_count_tx);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (unlikely(need_unmask))
488*4882a593Smuzhiyun 		wil6210_unmask_irq_tx(wil);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	return IRQ_HANDLED;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun 
wil_notify_fw_error(struct wil6210_priv * wil)493*4882a593Smuzhiyun static void wil_notify_fw_error(struct wil6210_priv *wil)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct device *dev = &wil->main_ndev->dev;
496*4882a593Smuzhiyun 	char *envp[3] = {
497*4882a593Smuzhiyun 		[0] = "SOURCE=wil6210",
498*4882a593Smuzhiyun 		[1] = "EVENT=FW_ERROR",
499*4882a593Smuzhiyun 		[2] = NULL,
500*4882a593Smuzhiyun 	};
501*4882a593Smuzhiyun 	wil_err(wil, "Notify about firmware error\n");
502*4882a593Smuzhiyun 	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
wil_cache_mbox_regs(struct wil6210_priv * wil)505*4882a593Smuzhiyun static void wil_cache_mbox_regs(struct wil6210_priv *wil)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	/* make shadow copy of registers that should not change on run time */
508*4882a593Smuzhiyun 	wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
509*4882a593Smuzhiyun 			     sizeof(struct wil6210_mbox_ctl));
510*4882a593Smuzhiyun 	wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
511*4882a593Smuzhiyun 	wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
wil_validate_mbox_regs(struct wil6210_priv * wil)514*4882a593Smuzhiyun static bool wil_validate_mbox_regs(struct wil6210_priv *wil)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	size_t min_size = sizeof(struct wil6210_mbox_hdr) +
517*4882a593Smuzhiyun 		sizeof(struct wmi_cmd_hdr);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	if (wil->mbox_ctl.rx.entry_size < min_size) {
520*4882a593Smuzhiyun 		wil_err(wil, "rx mbox entry too small (%d)\n",
521*4882a593Smuzhiyun 			wil->mbox_ctl.rx.entry_size);
522*4882a593Smuzhiyun 		return false;
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 	if (wil->mbox_ctl.tx.entry_size < min_size) {
525*4882a593Smuzhiyun 		wil_err(wil, "tx mbox entry too small (%d)\n",
526*4882a593Smuzhiyun 			wil->mbox_ctl.tx.entry_size);
527*4882a593Smuzhiyun 		return false;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	return true;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun 
wil6210_irq_misc(int irq,void * cookie)533*4882a593Smuzhiyun static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
536*4882a593Smuzhiyun 	u32 isr;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	wil6210_mask_irq_misc(wil, false);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	isr = wil_ioread32_and_clear(wil->csr +
541*4882a593Smuzhiyun 				     HOSTADDR(RGF_DMA_EP_MISC_ICR) +
542*4882a593Smuzhiyun 				     offsetof(struct RGF_ICR, ICR));
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	trace_wil6210_irq_misc(isr);
545*4882a593Smuzhiyun 	wil_dbg_irq(wil, "ISR MISC 0x%08x\n", isr);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (!isr) {
548*4882a593Smuzhiyun 		wil_err(wil, "spurious IRQ: MISC\n");
549*4882a593Smuzhiyun 		wil6210_unmask_irq_misc(wil, false);
550*4882a593Smuzhiyun 		return IRQ_NONE;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (isr & ISR_MISC_FW_ERROR) {
554*4882a593Smuzhiyun 		u32 fw_assert_code = wil_r(wil, wil->rgf_fw_assert_code_addr);
555*4882a593Smuzhiyun 		u32 ucode_assert_code =
556*4882a593Smuzhiyun 			wil_r(wil, wil->rgf_ucode_assert_code_addr);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		wil_err(wil,
559*4882a593Smuzhiyun 			"Firmware error detected, assert codes FW 0x%08x, UCODE 0x%08x\n",
560*4882a593Smuzhiyun 			fw_assert_code, ucode_assert_code);
561*4882a593Smuzhiyun 		clear_bit(wil_status_fwready, wil->status);
562*4882a593Smuzhiyun 		/*
563*4882a593Smuzhiyun 		 * do not clear @isr here - we do 2-nd part in thread
564*4882a593Smuzhiyun 		 * there, user space get notified, and it should be done
565*4882a593Smuzhiyun 		 * in non-atomic context
566*4882a593Smuzhiyun 		 */
567*4882a593Smuzhiyun 	}
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	if (isr & ISR_MISC_FW_READY) {
570*4882a593Smuzhiyun 		wil_dbg_irq(wil, "IRQ: FW ready\n");
571*4882a593Smuzhiyun 		wil_cache_mbox_regs(wil);
572*4882a593Smuzhiyun 		if (wil_validate_mbox_regs(wil))
573*4882a593Smuzhiyun 			set_bit(wil_status_mbox_ready, wil->status);
574*4882a593Smuzhiyun 		/**
575*4882a593Smuzhiyun 		 * Actual FW ready indicated by the
576*4882a593Smuzhiyun 		 * WMI_FW_READY_EVENTID
577*4882a593Smuzhiyun 		 */
578*4882a593Smuzhiyun 		isr &= ~ISR_MISC_FW_READY;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
582*4882a593Smuzhiyun 		isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
583*4882a593Smuzhiyun 		if (wil->halp.handle_icr) {
584*4882a593Smuzhiyun 			/* no need to handle HALP ICRs until next vote */
585*4882a593Smuzhiyun 			wil->halp.handle_icr = false;
586*4882a593Smuzhiyun 			wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
587*4882a593Smuzhiyun 			wil6210_mask_irq_misc(wil, true);
588*4882a593Smuzhiyun 			complete(&wil->halp.comp);
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	wil->isr_misc = isr;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	if (isr) {
595*4882a593Smuzhiyun 		return IRQ_WAKE_THREAD;
596*4882a593Smuzhiyun 	} else {
597*4882a593Smuzhiyun 		wil6210_unmask_irq_misc(wil, false);
598*4882a593Smuzhiyun 		return IRQ_HANDLED;
599*4882a593Smuzhiyun 	}
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
wil6210_irq_misc_thread(int irq,void * cookie)602*4882a593Smuzhiyun static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
605*4882a593Smuzhiyun 	u32 isr = wil->isr_misc;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	trace_wil6210_irq_misc_thread(isr);
608*4882a593Smuzhiyun 	wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	if (isr & ISR_MISC_FW_ERROR) {
611*4882a593Smuzhiyun 		wil->recovery_state = fw_recovery_pending;
612*4882a593Smuzhiyun 		wil_fw_core_dump(wil);
613*4882a593Smuzhiyun 		wil_notify_fw_error(wil);
614*4882a593Smuzhiyun 		isr &= ~ISR_MISC_FW_ERROR;
615*4882a593Smuzhiyun 		if (wil->platform_ops.notify) {
616*4882a593Smuzhiyun 			wil_err(wil, "notify platform driver about FW crash");
617*4882a593Smuzhiyun 			wil->platform_ops.notify(wil->platform_handle,
618*4882a593Smuzhiyun 						 WIL_PLATFORM_EVT_FW_CRASH);
619*4882a593Smuzhiyun 		} else {
620*4882a593Smuzhiyun 			wil_fw_error_recovery(wil);
621*4882a593Smuzhiyun 		}
622*4882a593Smuzhiyun 	}
623*4882a593Smuzhiyun 	if (isr & ISR_MISC_MBOX_EVT) {
624*4882a593Smuzhiyun 		wil_dbg_irq(wil, "MBOX event\n");
625*4882a593Smuzhiyun 		wmi_recv_cmd(wil);
626*4882a593Smuzhiyun 		isr &= ~ISR_MISC_MBOX_EVT;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (isr)
630*4882a593Smuzhiyun 		wil_dbg_irq(wil, "un-handled MISC ISR bits 0x%08x\n", isr);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	wil->isr_misc = 0;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	wil6210_unmask_irq_misc(wil, false);
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* in non-triple MSI case, this is done inside wil6210_thread_irq
637*4882a593Smuzhiyun 	 * because it has to be done after unmasking the pseudo.
638*4882a593Smuzhiyun 	 */
639*4882a593Smuzhiyun 	if (wil->n_msi == 3 && wil->suspend_resp_rcvd) {
640*4882a593Smuzhiyun 		wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
641*4882a593Smuzhiyun 		wil->suspend_resp_comp = true;
642*4882a593Smuzhiyun 		wake_up_interruptible(&wil->wq);
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	return IRQ_HANDLED;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun /* thread IRQ handler */
wil6210_thread_irq(int irq,void * cookie)649*4882a593Smuzhiyun static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	wil_dbg_irq(wil, "Thread IRQ\n");
654*4882a593Smuzhiyun 	/* Discover real IRQ cause */
655*4882a593Smuzhiyun 	if (wil->isr_misc)
656*4882a593Smuzhiyun 		wil6210_irq_misc_thread(irq, cookie);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	wil6210_unmask_irq_pseudo(wil);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	if (wil->suspend_resp_rcvd) {
661*4882a593Smuzhiyun 		wil_dbg_irq(wil, "set suspend_resp_comp to true\n");
662*4882a593Smuzhiyun 		wil->suspend_resp_comp = true;
663*4882a593Smuzhiyun 		wake_up_interruptible(&wil->wq);
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return IRQ_HANDLED;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /* DEBUG
670*4882a593Smuzhiyun  * There is subtle bug in hardware that causes IRQ to raise when it should be
671*4882a593Smuzhiyun  * masked. It is quite rare and hard to debug.
672*4882a593Smuzhiyun  *
673*4882a593Smuzhiyun  * Catch irq issue if it happens and print all I can.
674*4882a593Smuzhiyun  */
wil6210_debug_irq_mask(struct wil6210_priv * wil,u32 pseudo_cause)675*4882a593Smuzhiyun static int wil6210_debug_irq_mask(struct wil6210_priv *wil, u32 pseudo_cause)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	u32 icm_rx, icr_rx, imv_rx;
678*4882a593Smuzhiyun 	u32 icm_tx, icr_tx, imv_tx;
679*4882a593Smuzhiyun 	u32 icm_misc, icr_misc, imv_misc;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (!test_bit(wil_status_irqen, wil->status)) {
682*4882a593Smuzhiyun 		if (wil->use_enhanced_dma_hw) {
683*4882a593Smuzhiyun 			icm_rx = wil_ioread32_and_clear(wil->csr +
684*4882a593Smuzhiyun 					HOSTADDR(RGF_INT_GEN_RX_ICR) +
685*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICM));
686*4882a593Smuzhiyun 			icr_rx = wil_ioread32_and_clear(wil->csr +
687*4882a593Smuzhiyun 					HOSTADDR(RGF_INT_GEN_RX_ICR) +
688*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICR));
689*4882a593Smuzhiyun 			imv_rx = wil_r(wil, RGF_INT_GEN_RX_ICR +
690*4882a593Smuzhiyun 				   offsetof(struct RGF_ICR, IMV));
691*4882a593Smuzhiyun 			icm_tx = wil_ioread32_and_clear(wil->csr +
692*4882a593Smuzhiyun 					HOSTADDR(RGF_INT_GEN_TX_ICR) +
693*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICM));
694*4882a593Smuzhiyun 			icr_tx = wil_ioread32_and_clear(wil->csr +
695*4882a593Smuzhiyun 					HOSTADDR(RGF_INT_GEN_TX_ICR) +
696*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICR));
697*4882a593Smuzhiyun 			imv_tx = wil_r(wil, RGF_INT_GEN_TX_ICR +
698*4882a593Smuzhiyun 					   offsetof(struct RGF_ICR, IMV));
699*4882a593Smuzhiyun 		} else {
700*4882a593Smuzhiyun 			icm_rx = wil_ioread32_and_clear(wil->csr +
701*4882a593Smuzhiyun 					HOSTADDR(RGF_DMA_EP_RX_ICR) +
702*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICM));
703*4882a593Smuzhiyun 			icr_rx = wil_ioread32_and_clear(wil->csr +
704*4882a593Smuzhiyun 					HOSTADDR(RGF_DMA_EP_RX_ICR) +
705*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICR));
706*4882a593Smuzhiyun 			imv_rx = wil_r(wil, RGF_DMA_EP_RX_ICR +
707*4882a593Smuzhiyun 				   offsetof(struct RGF_ICR, IMV));
708*4882a593Smuzhiyun 			icm_tx = wil_ioread32_and_clear(wil->csr +
709*4882a593Smuzhiyun 					HOSTADDR(RGF_DMA_EP_TX_ICR) +
710*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICM));
711*4882a593Smuzhiyun 			icr_tx = wil_ioread32_and_clear(wil->csr +
712*4882a593Smuzhiyun 					HOSTADDR(RGF_DMA_EP_TX_ICR) +
713*4882a593Smuzhiyun 					offsetof(struct RGF_ICR, ICR));
714*4882a593Smuzhiyun 			imv_tx = wil_r(wil, RGF_DMA_EP_TX_ICR +
715*4882a593Smuzhiyun 					   offsetof(struct RGF_ICR, IMV));
716*4882a593Smuzhiyun 		}
717*4882a593Smuzhiyun 		icm_misc = wil_ioread32_and_clear(wil->csr +
718*4882a593Smuzhiyun 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
719*4882a593Smuzhiyun 				offsetof(struct RGF_ICR, ICM));
720*4882a593Smuzhiyun 		icr_misc = wil_ioread32_and_clear(wil->csr +
721*4882a593Smuzhiyun 				HOSTADDR(RGF_DMA_EP_MISC_ICR) +
722*4882a593Smuzhiyun 				offsetof(struct RGF_ICR, ICR));
723*4882a593Smuzhiyun 		imv_misc = wil_r(wil, RGF_DMA_EP_MISC_ICR +
724*4882a593Smuzhiyun 				     offsetof(struct RGF_ICR, IMV));
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 		/* HALP interrupt can be unmasked when misc interrupts are
727*4882a593Smuzhiyun 		 * masked
728*4882a593Smuzhiyun 		 */
729*4882a593Smuzhiyun 		if (icr_misc & BIT_DMA_EP_MISC_ICR_HALP)
730*4882a593Smuzhiyun 			return 0;
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 		wil_err(wil, "IRQ when it should be masked: pseudo 0x%08x\n"
733*4882a593Smuzhiyun 				"Rx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
734*4882a593Smuzhiyun 				"Tx   icm:icr:imv 0x%08x 0x%08x 0x%08x\n"
735*4882a593Smuzhiyun 				"Misc icm:icr:imv 0x%08x 0x%08x 0x%08x\n",
736*4882a593Smuzhiyun 				pseudo_cause,
737*4882a593Smuzhiyun 				icm_rx, icr_rx, imv_rx,
738*4882a593Smuzhiyun 				icm_tx, icr_tx, imv_tx,
739*4882a593Smuzhiyun 				icm_misc, icr_misc, imv_misc);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 		return -EINVAL;
742*4882a593Smuzhiyun 	}
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	return 0;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
wil6210_hardirq(int irq,void * cookie)747*4882a593Smuzhiyun static irqreturn_t wil6210_hardirq(int irq, void *cookie)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	irqreturn_t rc = IRQ_HANDLED;
750*4882a593Smuzhiyun 	struct wil6210_priv *wil = cookie;
751*4882a593Smuzhiyun 	u32 pseudo_cause = wil_r(wil, RGF_DMA_PSEUDO_CAUSE);
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	/**
754*4882a593Smuzhiyun 	 * pseudo_cause is Clear-On-Read, no need to ACK
755*4882a593Smuzhiyun 	 */
756*4882a593Smuzhiyun 	if (unlikely((pseudo_cause == 0) || ((pseudo_cause & 0xff) == 0xff)))
757*4882a593Smuzhiyun 		return IRQ_NONE;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* IRQ mask debug */
760*4882a593Smuzhiyun 	if (unlikely(wil6210_debug_irq_mask(wil, pseudo_cause)))
761*4882a593Smuzhiyun 		return IRQ_NONE;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	trace_wil6210_irq_pseudo(pseudo_cause);
764*4882a593Smuzhiyun 	wil_dbg_irq(wil, "Pseudo IRQ 0x%08x\n", pseudo_cause);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	wil6210_mask_irq_pseudo(wil);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	/* Discover real IRQ cause
769*4882a593Smuzhiyun 	 * There are 2 possible phases for every IRQ:
770*4882a593Smuzhiyun 	 * - hard IRQ handler called right here
771*4882a593Smuzhiyun 	 * - threaded handler called later
772*4882a593Smuzhiyun 	 *
773*4882a593Smuzhiyun 	 * Hard IRQ handler reads and clears ISR.
774*4882a593Smuzhiyun 	 *
775*4882a593Smuzhiyun 	 * If threaded handler requested, hard IRQ handler
776*4882a593Smuzhiyun 	 * returns IRQ_WAKE_THREAD and saves ISR register value
777*4882a593Smuzhiyun 	 * for the threaded handler use.
778*4882a593Smuzhiyun 	 *
779*4882a593Smuzhiyun 	 * voting for wake thread - need at least 1 vote
780*4882a593Smuzhiyun 	 */
781*4882a593Smuzhiyun 	if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_RX) &&
782*4882a593Smuzhiyun 	    (wil->txrx_ops.irq_rx(irq, cookie) == IRQ_WAKE_THREAD))
783*4882a593Smuzhiyun 		rc = IRQ_WAKE_THREAD;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_TX) &&
786*4882a593Smuzhiyun 	    (wil->txrx_ops.irq_tx(irq, cookie) == IRQ_WAKE_THREAD))
787*4882a593Smuzhiyun 		rc = IRQ_WAKE_THREAD;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if ((pseudo_cause & BIT_DMA_PSEUDO_CAUSE_MISC) &&
790*4882a593Smuzhiyun 	    (wil6210_irq_misc(irq, cookie) == IRQ_WAKE_THREAD))
791*4882a593Smuzhiyun 		rc = IRQ_WAKE_THREAD;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	/* if thread is requested, it will unmask IRQ */
794*4882a593Smuzhiyun 	if (rc != IRQ_WAKE_THREAD)
795*4882a593Smuzhiyun 		wil6210_unmask_irq_pseudo(wil);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	return rc;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
wil6210_request_3msi(struct wil6210_priv * wil,int irq)800*4882a593Smuzhiyun static int wil6210_request_3msi(struct wil6210_priv *wil, int irq)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	int rc;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	/* IRQ's are in the following order:
805*4882a593Smuzhiyun 	 * - Tx
806*4882a593Smuzhiyun 	 * - Rx
807*4882a593Smuzhiyun 	 * - Misc
808*4882a593Smuzhiyun 	 */
809*4882a593Smuzhiyun 	rc = request_irq(irq, wil->txrx_ops.irq_tx, IRQF_SHARED,
810*4882a593Smuzhiyun 			 WIL_NAME "_tx", wil);
811*4882a593Smuzhiyun 	if (rc)
812*4882a593Smuzhiyun 		return rc;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	rc = request_irq(irq + 1, wil->txrx_ops.irq_rx, IRQF_SHARED,
815*4882a593Smuzhiyun 			 WIL_NAME "_rx", wil);
816*4882a593Smuzhiyun 	if (rc)
817*4882a593Smuzhiyun 		goto free0;
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	rc = request_threaded_irq(irq + 2, wil6210_irq_misc,
820*4882a593Smuzhiyun 				  wil6210_irq_misc_thread,
821*4882a593Smuzhiyun 				  IRQF_SHARED, WIL_NAME "_misc", wil);
822*4882a593Smuzhiyun 	if (rc)
823*4882a593Smuzhiyun 		goto free1;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	return 0;
826*4882a593Smuzhiyun free1:
827*4882a593Smuzhiyun 	free_irq(irq + 1, wil);
828*4882a593Smuzhiyun free0:
829*4882a593Smuzhiyun 	free_irq(irq, wil);
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	return rc;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun /* can't use wil_ioread32_and_clear because ICC value is not set yet */
wil_clear32(void __iomem * addr)835*4882a593Smuzhiyun static inline void wil_clear32(void __iomem *addr)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	u32 x = readl(addr);
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	writel(x, addr);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
wil6210_clear_irq(struct wil6210_priv * wil)842*4882a593Smuzhiyun void wil6210_clear_irq(struct wil6210_priv *wil)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_RX_ICR) +
845*4882a593Smuzhiyun 		    offsetof(struct RGF_ICR, ICR));
846*4882a593Smuzhiyun 	wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
847*4882a593Smuzhiyun 		    offsetof(struct RGF_ICR, ICR));
848*4882a593Smuzhiyun 	wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
849*4882a593Smuzhiyun 		    offsetof(struct RGF_ICR, ICR));
850*4882a593Smuzhiyun 	wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
851*4882a593Smuzhiyun 		    offsetof(struct RGF_ICR, ICR));
852*4882a593Smuzhiyun 	wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
853*4882a593Smuzhiyun 		    offsetof(struct RGF_ICR, ICR));
854*4882a593Smuzhiyun 	wmb(); /* make sure write completed */
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun 
wil6210_set_halp(struct wil6210_priv * wil)857*4882a593Smuzhiyun void wil6210_set_halp(struct wil6210_priv *wil)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	wil_dbg_irq(wil, "set_halp\n");
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
862*4882a593Smuzhiyun 	      BIT_DMA_EP_MISC_ICR_HALP);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun 
wil6210_clear_halp(struct wil6210_priv * wil)865*4882a593Smuzhiyun void wil6210_clear_halp(struct wil6210_priv *wil)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	wil_dbg_irq(wil, "clear_halp\n");
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
870*4882a593Smuzhiyun 	      BIT_DMA_EP_MISC_ICR_HALP);
871*4882a593Smuzhiyun 	wil6210_unmask_halp(wil);
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
wil6210_init_irq(struct wil6210_priv * wil,int irq)874*4882a593Smuzhiyun int wil6210_init_irq(struct wil6210_priv *wil, int irq)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun 	int rc;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	wil_dbg_misc(wil, "init_irq: %s, n_msi=%d\n",
879*4882a593Smuzhiyun 		     wil->n_msi ? "MSI" : "INTx", wil->n_msi);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	if (wil->use_enhanced_dma_hw) {
882*4882a593Smuzhiyun 		wil->txrx_ops.irq_tx = wil6210_irq_tx_edma;
883*4882a593Smuzhiyun 		wil->txrx_ops.irq_rx = wil6210_irq_rx_edma;
884*4882a593Smuzhiyun 	} else {
885*4882a593Smuzhiyun 		wil->txrx_ops.irq_tx = wil6210_irq_tx;
886*4882a593Smuzhiyun 		wil->txrx_ops.irq_rx = wil6210_irq_rx;
887*4882a593Smuzhiyun 	}
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (wil->n_msi == 3)
890*4882a593Smuzhiyun 		rc = wil6210_request_3msi(wil, irq);
891*4882a593Smuzhiyun 	else
892*4882a593Smuzhiyun 		rc = request_threaded_irq(irq, wil6210_hardirq,
893*4882a593Smuzhiyun 					  wil6210_thread_irq,
894*4882a593Smuzhiyun 					  wil->n_msi ? 0 : IRQF_SHARED,
895*4882a593Smuzhiyun 					  WIL_NAME, wil);
896*4882a593Smuzhiyun 	return rc;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun 
wil6210_fini_irq(struct wil6210_priv * wil,int irq)899*4882a593Smuzhiyun void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun 	wil_dbg_misc(wil, "fini_irq:\n");
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	wil_mask_irq(wil);
904*4882a593Smuzhiyun 	free_irq(irq, wil);
905*4882a593Smuzhiyun 	if (wil->n_msi == 3) {
906*4882a593Smuzhiyun 		free_irq(irq + 1, wil);
907*4882a593Smuzhiyun 		free_irq(irq + 2, wil);
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun }
910