xref: /OK3568_Linux_fs/kernel/drivers/scsi/csiostor/csio_isr.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is part of the Chelsio FCoE driver for Linux.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * OpenIB.org BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include <linux/kernel.h>
36*4882a593Smuzhiyun #include <linux/pci.h>
37*4882a593Smuzhiyun #include <linux/interrupt.h>
38*4882a593Smuzhiyun #include <linux/cpumask.h>
39*4882a593Smuzhiyun #include <linux/string.h>
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #include "csio_init.h"
42*4882a593Smuzhiyun #include "csio_hw.h"
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static irqreturn_t
csio_nondata_isr(int irq,void * dev_id)45*4882a593Smuzhiyun csio_nondata_isr(int irq, void *dev_id)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun 	struct csio_hw *hw = (struct csio_hw *) dev_id;
48*4882a593Smuzhiyun 	int rv;
49*4882a593Smuzhiyun 	unsigned long flags;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (unlikely(!hw))
52*4882a593Smuzhiyun 		return IRQ_NONE;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(hw->pdev))) {
55*4882a593Smuzhiyun 		CSIO_INC_STATS(hw, n_pcich_offline);
56*4882a593Smuzhiyun 		return IRQ_NONE;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	spin_lock_irqsave(&hw->lock, flags);
60*4882a593Smuzhiyun 	csio_hw_slow_intr_handler(hw);
61*4882a593Smuzhiyun 	rv = csio_mb_isr_handler(hw);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
64*4882a593Smuzhiyun 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
65*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hw->lock, flags);
66*4882a593Smuzhiyun 		schedule_work(&hw->evtq_work);
67*4882a593Smuzhiyun 		return IRQ_HANDLED;
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hw->lock, flags);
70*4882a593Smuzhiyun 	return IRQ_HANDLED;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * csio_fwevt_handler - Common FW event handler routine.
75*4882a593Smuzhiyun  * @hw: HW module.
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * This is the ISR for FW events. It is shared b/w MSIX
78*4882a593Smuzhiyun  * and INTx handlers.
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun static void
csio_fwevt_handler(struct csio_hw * hw)81*4882a593Smuzhiyun csio_fwevt_handler(struct csio_hw *hw)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	int rv;
84*4882a593Smuzhiyun 	unsigned long flags;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	rv = csio_fwevtq_handler(hw);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	spin_lock_irqsave(&hw->lock, flags);
89*4882a593Smuzhiyun 	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
90*4882a593Smuzhiyun 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
91*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hw->lock, flags);
92*4882a593Smuzhiyun 		schedule_work(&hw->evtq_work);
93*4882a593Smuzhiyun 		return;
94*4882a593Smuzhiyun 	}
95*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hw->lock, flags);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun } /* csio_fwevt_handler */
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /*
100*4882a593Smuzhiyun  * csio_fwevt_isr() - FW events MSIX ISR
101*4882a593Smuzhiyun  * @irq:
102*4882a593Smuzhiyun  * @dev_id:
103*4882a593Smuzhiyun  *
104*4882a593Smuzhiyun  * Process WRs on the FW event queue.
105*4882a593Smuzhiyun  *
106*4882a593Smuzhiyun  */
107*4882a593Smuzhiyun static irqreturn_t
csio_fwevt_isr(int irq,void * dev_id)108*4882a593Smuzhiyun csio_fwevt_isr(int irq, void *dev_id)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct csio_hw *hw = (struct csio_hw *) dev_id;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (unlikely(!hw))
113*4882a593Smuzhiyun 		return IRQ_NONE;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(hw->pdev))) {
116*4882a593Smuzhiyun 		CSIO_INC_STATS(hw, n_pcich_offline);
117*4882a593Smuzhiyun 		return IRQ_NONE;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	csio_fwevt_handler(hw);
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return IRQ_HANDLED;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * csio_fwevt_isr() - INTx wrapper for handling FW events.
127*4882a593Smuzhiyun  * @irq:
128*4882a593Smuzhiyun  * @dev_id:
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun void
csio_fwevt_intx_handler(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * priv)131*4882a593Smuzhiyun csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
132*4882a593Smuzhiyun 			   struct csio_fl_dma_buf *flb, void *priv)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	csio_fwevt_handler(hw);
135*4882a593Smuzhiyun } /* csio_fwevt_intx_handler */
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun  * csio_process_scsi_cmpl - Process a SCSI WR completion.
139*4882a593Smuzhiyun  * @hw: HW module.
140*4882a593Smuzhiyun  * @wr: The completed WR from the ingress queue.
141*4882a593Smuzhiyun  * @len: Length of the WR.
142*4882a593Smuzhiyun  * @flb: Freelist buffer array.
143*4882a593Smuzhiyun  *
144*4882a593Smuzhiyun  */
145*4882a593Smuzhiyun static void
csio_process_scsi_cmpl(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * cbfn_q)146*4882a593Smuzhiyun csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
147*4882a593Smuzhiyun 			struct csio_fl_dma_buf *flb, void *cbfn_q)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	struct csio_ioreq *ioreq;
150*4882a593Smuzhiyun 	uint8_t *scsiwr;
151*4882a593Smuzhiyun 	uint8_t subop;
152*4882a593Smuzhiyun 	void *cmnd;
153*4882a593Smuzhiyun 	unsigned long flags;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
156*4882a593Smuzhiyun 	if (likely(ioreq)) {
157*4882a593Smuzhiyun 		if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
158*4882a593Smuzhiyun 			subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
159*4882a593Smuzhiyun 					((struct fw_scsi_abrt_cls_wr *)
160*4882a593Smuzhiyun 					    scsiwr)->sub_opcode_to_chk_all_io);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 			csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
163*4882a593Smuzhiyun 				    subop ? "Close" : "Abort",
164*4882a593Smuzhiyun 				    ioreq, ioreq->wr_status);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 			spin_lock_irqsave(&hw->lock, flags);
167*4882a593Smuzhiyun 			if (subop)
168*4882a593Smuzhiyun 				csio_scsi_closed(ioreq,
169*4882a593Smuzhiyun 						 (struct list_head *)cbfn_q);
170*4882a593Smuzhiyun 			else
171*4882a593Smuzhiyun 				csio_scsi_aborted(ioreq,
172*4882a593Smuzhiyun 						  (struct list_head *)cbfn_q);
173*4882a593Smuzhiyun 			/*
174*4882a593Smuzhiyun 			 * We call scsi_done for I/Os that driver thinks aborts
175*4882a593Smuzhiyun 			 * have timed out. If there is a race caused by FW
176*4882a593Smuzhiyun 			 * completing abort at the exact same time that the
177*4882a593Smuzhiyun 			 * driver has deteced the abort timeout, the following
178*4882a593Smuzhiyun 			 * check prevents calling of scsi_done twice for the
179*4882a593Smuzhiyun 			 * same command: once from the eh_abort_handler, another
180*4882a593Smuzhiyun 			 * from csio_scsi_isr_handler(). This also avoids the
181*4882a593Smuzhiyun 			 * need to check if csio_scsi_cmnd(req) is NULL in the
182*4882a593Smuzhiyun 			 * fast path.
183*4882a593Smuzhiyun 			 */
184*4882a593Smuzhiyun 			cmnd = csio_scsi_cmnd(ioreq);
185*4882a593Smuzhiyun 			if (unlikely(cmnd == NULL))
186*4882a593Smuzhiyun 				list_del_init(&ioreq->sm.sm_list);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hw->lock, flags);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 			if (unlikely(cmnd == NULL))
191*4882a593Smuzhiyun 				csio_put_scsi_ioreq_lock(hw,
192*4882a593Smuzhiyun 						csio_hw_to_scsim(hw), ioreq);
193*4882a593Smuzhiyun 		} else {
194*4882a593Smuzhiyun 			spin_lock_irqsave(&hw->lock, flags);
195*4882a593Smuzhiyun 			csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
196*4882a593Smuzhiyun 			spin_unlock_irqrestore(&hw->lock, flags);
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun  * csio_scsi_isr_handler() - Common SCSI ISR handler.
203*4882a593Smuzhiyun  * @iq: Ingress queue pointer.
204*4882a593Smuzhiyun  *
205*4882a593Smuzhiyun  * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
206*4882a593Smuzhiyun  * by calling csio_wr_process_iq_idx. If there are completions on the
207*4882a593Smuzhiyun  * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
208*4882a593Smuzhiyun  * Once done, add these completions onto the freelist.
209*4882a593Smuzhiyun  * This routine is shared b/w MSIX and INTx.
210*4882a593Smuzhiyun  */
211*4882a593Smuzhiyun static inline irqreturn_t
csio_scsi_isr_handler(struct csio_q * iq)212*4882a593Smuzhiyun csio_scsi_isr_handler(struct csio_q *iq)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct csio_hw *hw = (struct csio_hw *)iq->owner;
215*4882a593Smuzhiyun 	LIST_HEAD(cbfn_q);
216*4882a593Smuzhiyun 	struct list_head *tmp;
217*4882a593Smuzhiyun 	struct csio_scsim *scm;
218*4882a593Smuzhiyun 	struct csio_ioreq *ioreq;
219*4882a593Smuzhiyun 	int isr_completions = 0;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	scm = csio_hw_to_scsim(hw);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
224*4882a593Smuzhiyun 					&cbfn_q) != 0))
225*4882a593Smuzhiyun 		return IRQ_NONE;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	/* Call back the completion routines */
228*4882a593Smuzhiyun 	list_for_each(tmp, &cbfn_q) {
229*4882a593Smuzhiyun 		ioreq = (struct csio_ioreq *)tmp;
230*4882a593Smuzhiyun 		isr_completions++;
231*4882a593Smuzhiyun 		ioreq->io_cbfn(hw, ioreq);
232*4882a593Smuzhiyun 		/* Release ddp buffer if used for this req */
233*4882a593Smuzhiyun 		if (unlikely(ioreq->dcopy))
234*4882a593Smuzhiyun 			csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
235*4882a593Smuzhiyun 						    ioreq->nsge);
236*4882a593Smuzhiyun 	}
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	if (isr_completions) {
239*4882a593Smuzhiyun 		/* Return the ioreqs back to ioreq->freelist */
240*4882a593Smuzhiyun 		csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
241*4882a593Smuzhiyun 					      isr_completions);
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return IRQ_HANDLED;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun  * csio_scsi_isr() - SCSI MSIX handler
249*4882a593Smuzhiyun  * @irq:
250*4882a593Smuzhiyun  * @dev_id:
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
253*4882a593Smuzhiyun  * for handling SCSI completions.
254*4882a593Smuzhiyun  */
255*4882a593Smuzhiyun static irqreturn_t
csio_scsi_isr(int irq,void * dev_id)256*4882a593Smuzhiyun csio_scsi_isr(int irq, void *dev_id)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct csio_q *iq = (struct csio_q *) dev_id;
259*4882a593Smuzhiyun 	struct csio_hw *hw;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	if (unlikely(!iq))
262*4882a593Smuzhiyun 		return IRQ_NONE;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	hw = (struct csio_hw *)iq->owner;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(hw->pdev))) {
267*4882a593Smuzhiyun 		CSIO_INC_STATS(hw, n_pcich_offline);
268*4882a593Smuzhiyun 		return IRQ_NONE;
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	csio_scsi_isr_handler(iq);
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	return IRQ_HANDLED;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun  * csio_scsi_intx_handler() - SCSI INTx handler
278*4882a593Smuzhiyun  * @irq:
279*4882a593Smuzhiyun  * @dev_id:
280*4882a593Smuzhiyun  *
281*4882a593Smuzhiyun  * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
282*4882a593Smuzhiyun  * for handling SCSI completions.
283*4882a593Smuzhiyun  */
284*4882a593Smuzhiyun void
csio_scsi_intx_handler(struct csio_hw * hw,void * wr,uint32_t len,struct csio_fl_dma_buf * flb,void * priv)285*4882a593Smuzhiyun csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
286*4882a593Smuzhiyun 			struct csio_fl_dma_buf *flb, void *priv)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct csio_q *iq = priv;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	csio_scsi_isr_handler(iq);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun } /* csio_scsi_intx_handler */
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun  * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
296*4882a593Smuzhiyun  * @irq:
297*4882a593Smuzhiyun  * @dev_id:
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  *
300*4882a593Smuzhiyun  */
301*4882a593Smuzhiyun static irqreturn_t
csio_fcoe_isr(int irq,void * dev_id)302*4882a593Smuzhiyun csio_fcoe_isr(int irq, void *dev_id)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	struct csio_hw *hw = (struct csio_hw *) dev_id;
305*4882a593Smuzhiyun 	struct csio_q *intx_q = NULL;
306*4882a593Smuzhiyun 	int rv;
307*4882a593Smuzhiyun 	irqreturn_t ret = IRQ_NONE;
308*4882a593Smuzhiyun 	unsigned long flags;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	if (unlikely(!hw))
311*4882a593Smuzhiyun 		return IRQ_NONE;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	if (unlikely(pci_channel_offline(hw->pdev))) {
314*4882a593Smuzhiyun 		CSIO_INC_STATS(hw, n_pcich_offline);
315*4882a593Smuzhiyun 		return IRQ_NONE;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* Disable the interrupt for this PCI function. */
319*4882a593Smuzhiyun 	if (hw->intr_mode == CSIO_IM_INTX)
320*4882a593Smuzhiyun 		csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	/*
323*4882a593Smuzhiyun 	 * The read in the following function will flush the
324*4882a593Smuzhiyun 	 * above write.
325*4882a593Smuzhiyun 	 */
326*4882a593Smuzhiyun 	if (csio_hw_slow_intr_handler(hw))
327*4882a593Smuzhiyun 		ret = IRQ_HANDLED;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/* Get the INTx Forward interrupt IQ. */
330*4882a593Smuzhiyun 	intx_q = csio_get_q(hw, hw->intr_iq_idx);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	CSIO_DB_ASSERT(intx_q);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/* IQ handler is not possible for intx_q, hence pass in NULL */
335*4882a593Smuzhiyun 	if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
336*4882a593Smuzhiyun 		ret = IRQ_HANDLED;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	spin_lock_irqsave(&hw->lock, flags);
339*4882a593Smuzhiyun 	rv = csio_mb_isr_handler(hw);
340*4882a593Smuzhiyun 	if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
341*4882a593Smuzhiyun 		hw->flags |= CSIO_HWF_FWEVT_PENDING;
342*4882a593Smuzhiyun 		spin_unlock_irqrestore(&hw->lock, flags);
343*4882a593Smuzhiyun 		schedule_work(&hw->evtq_work);
344*4882a593Smuzhiyun 		return IRQ_HANDLED;
345*4882a593Smuzhiyun 	}
346*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hw->lock, flags);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	return ret;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun static void
csio_add_msix_desc(struct csio_hw * hw)352*4882a593Smuzhiyun csio_add_msix_desc(struct csio_hw *hw)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	int i;
355*4882a593Smuzhiyun 	struct csio_msix_entries *entryp = &hw->msix_entries[0];
356*4882a593Smuzhiyun 	int k = CSIO_EXTRA_VECS;
357*4882a593Smuzhiyun 	int len = sizeof(entryp->desc) - 1;
358*4882a593Smuzhiyun 	int cnt = hw->num_sqsets + k;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Non-data vector */
361*4882a593Smuzhiyun 	memset(entryp->desc, 0, len + 1);
362*4882a593Smuzhiyun 	snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
363*4882a593Smuzhiyun 		 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	entryp++;
366*4882a593Smuzhiyun 	memset(entryp->desc, 0, len + 1);
367*4882a593Smuzhiyun 	snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
368*4882a593Smuzhiyun 		 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
369*4882a593Smuzhiyun 	entryp++;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* Name SCSI vecs */
372*4882a593Smuzhiyun 	for (i = k; i < cnt; i++, entryp++) {
373*4882a593Smuzhiyun 		memset(entryp->desc, 0, len + 1);
374*4882a593Smuzhiyun 		snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
375*4882a593Smuzhiyun 			 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
376*4882a593Smuzhiyun 			 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun int
csio_request_irqs(struct csio_hw * hw)381*4882a593Smuzhiyun csio_request_irqs(struct csio_hw *hw)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	int rv, i, j, k = 0;
384*4882a593Smuzhiyun 	struct csio_msix_entries *entryp = &hw->msix_entries[0];
385*4882a593Smuzhiyun 	struct csio_scsi_cpu_info *info;
386*4882a593Smuzhiyun 	struct pci_dev *pdev = hw->pdev;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (hw->intr_mode != CSIO_IM_MSIX) {
389*4882a593Smuzhiyun 		rv = request_irq(pci_irq_vector(pdev, 0), csio_fcoe_isr,
390*4882a593Smuzhiyun 				hw->intr_mode == CSIO_IM_MSI ? 0 : IRQF_SHARED,
391*4882a593Smuzhiyun 				KBUILD_MODNAME, hw);
392*4882a593Smuzhiyun 		if (rv) {
393*4882a593Smuzhiyun 			csio_err(hw, "Failed to allocate interrupt line.\n");
394*4882a593Smuzhiyun 			goto out_free_irqs;
395*4882a593Smuzhiyun 		}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 		goto out;
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	/* Add the MSIX vector descriptions */
401*4882a593Smuzhiyun 	csio_add_msix_desc(hw);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	rv = request_irq(pci_irq_vector(pdev, k), csio_nondata_isr, 0,
404*4882a593Smuzhiyun 			 entryp[k].desc, hw);
405*4882a593Smuzhiyun 	if (rv) {
406*4882a593Smuzhiyun 		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
407*4882a593Smuzhiyun 			 pci_irq_vector(pdev, k), rv);
408*4882a593Smuzhiyun 		goto out_free_irqs;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	entryp[k++].dev_id = hw;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	rv = request_irq(pci_irq_vector(pdev, k), csio_fwevt_isr, 0,
414*4882a593Smuzhiyun 			 entryp[k].desc, hw);
415*4882a593Smuzhiyun 	if (rv) {
416*4882a593Smuzhiyun 		csio_err(hw, "IRQ request failed for vec %d err:%d\n",
417*4882a593Smuzhiyun 			 pci_irq_vector(pdev, k), rv);
418*4882a593Smuzhiyun 		goto out_free_irqs;
419*4882a593Smuzhiyun 	}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	entryp[k++].dev_id = (void *)hw;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/* Allocate IRQs for SCSI */
424*4882a593Smuzhiyun 	for (i = 0; i < hw->num_pports; i++) {
425*4882a593Smuzhiyun 		info = &hw->scsi_cpu_info[i];
426*4882a593Smuzhiyun 		for (j = 0; j < info->max_cpus; j++, k++) {
427*4882a593Smuzhiyun 			struct csio_scsi_qset *sqset = &hw->sqset[i][j];
428*4882a593Smuzhiyun 			struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 			rv = request_irq(pci_irq_vector(pdev, k), csio_scsi_isr, 0,
431*4882a593Smuzhiyun 					 entryp[k].desc, q);
432*4882a593Smuzhiyun 			if (rv) {
433*4882a593Smuzhiyun 				csio_err(hw,
434*4882a593Smuzhiyun 				       "IRQ request failed for vec %d err:%d\n",
435*4882a593Smuzhiyun 				       pci_irq_vector(pdev, k), rv);
436*4882a593Smuzhiyun 				goto out_free_irqs;
437*4882a593Smuzhiyun 			}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 			entryp[k].dev_id = q;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		} /* for all scsi cpus */
442*4882a593Smuzhiyun 	} /* for all ports */
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun out:
445*4882a593Smuzhiyun 	hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
446*4882a593Smuzhiyun 	return 0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun out_free_irqs:
449*4882a593Smuzhiyun 	for (i = 0; i < k; i++)
450*4882a593Smuzhiyun 		free_irq(pci_irq_vector(pdev, i), hw->msix_entries[i].dev_id);
451*4882a593Smuzhiyun 	pci_free_irq_vectors(hw->pdev);
452*4882a593Smuzhiyun 	return -EINVAL;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun /* Reduce per-port max possible CPUs */
456*4882a593Smuzhiyun static void
csio_reduce_sqsets(struct csio_hw * hw,int cnt)457*4882a593Smuzhiyun csio_reduce_sqsets(struct csio_hw *hw, int cnt)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun 	int i;
460*4882a593Smuzhiyun 	struct csio_scsi_cpu_info *info;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	while (cnt < hw->num_sqsets) {
463*4882a593Smuzhiyun 		for (i = 0; i < hw->num_pports; i++) {
464*4882a593Smuzhiyun 			info = &hw->scsi_cpu_info[i];
465*4882a593Smuzhiyun 			if (info->max_cpus > 1) {
466*4882a593Smuzhiyun 				info->max_cpus--;
467*4882a593Smuzhiyun 				hw->num_sqsets--;
468*4882a593Smuzhiyun 				if (hw->num_sqsets <= cnt)
469*4882a593Smuzhiyun 					break;
470*4882a593Smuzhiyun 			}
471*4882a593Smuzhiyun 		}
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun 
csio_calc_sets(struct irq_affinity * affd,unsigned int nvecs)477*4882a593Smuzhiyun static void csio_calc_sets(struct irq_affinity *affd, unsigned int nvecs)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	struct csio_hw *hw = affd->priv;
480*4882a593Smuzhiyun 	u8 i;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	if (!nvecs)
483*4882a593Smuzhiyun 		return;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (nvecs < hw->num_pports) {
486*4882a593Smuzhiyun 		affd->nr_sets = 1;
487*4882a593Smuzhiyun 		affd->set_size[0] = nvecs;
488*4882a593Smuzhiyun 		return;
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	affd->nr_sets = hw->num_pports;
492*4882a593Smuzhiyun 	for (i = 0; i < hw->num_pports; i++)
493*4882a593Smuzhiyun 		affd->set_size[i] = nvecs / hw->num_pports;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun static int
csio_enable_msix(struct csio_hw * hw)497*4882a593Smuzhiyun csio_enable_msix(struct csio_hw *hw)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	int i, j, k, n, min, cnt;
500*4882a593Smuzhiyun 	int extra = CSIO_EXTRA_VECS;
501*4882a593Smuzhiyun 	struct csio_scsi_cpu_info *info;
502*4882a593Smuzhiyun 	struct irq_affinity desc = {
503*4882a593Smuzhiyun 		.pre_vectors = CSIO_EXTRA_VECS,
504*4882a593Smuzhiyun 		.calc_sets = csio_calc_sets,
505*4882a593Smuzhiyun 		.priv = hw,
506*4882a593Smuzhiyun 	};
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	if (hw->num_pports > IRQ_AFFINITY_MAX_SETS)
509*4882a593Smuzhiyun 		return -ENOSPC;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	min = hw->num_pports + extra;
512*4882a593Smuzhiyun 	cnt = hw->num_sqsets + extra;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Max vectors required based on #niqs configured in fw */
515*4882a593Smuzhiyun 	if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
516*4882a593Smuzhiyun 		cnt = min_t(uint8_t, hw->cfg_niq, cnt);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	cnt = pci_alloc_irq_vectors_affinity(hw->pdev, min, cnt,
521*4882a593Smuzhiyun 			PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc);
522*4882a593Smuzhiyun 	if (cnt < 0)
523*4882a593Smuzhiyun 		return cnt;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (cnt < (hw->num_sqsets + extra)) {
526*4882a593Smuzhiyun 		csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
527*4882a593Smuzhiyun 		csio_reduce_sqsets(hw, cnt - extra);
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	/* Distribute vectors */
531*4882a593Smuzhiyun 	k = 0;
532*4882a593Smuzhiyun 	csio_set_nondata_intr_idx(hw, k);
533*4882a593Smuzhiyun 	csio_set_mb_intr_idx(csio_hw_to_mbm(hw), k++);
534*4882a593Smuzhiyun 	csio_set_fwevt_intr_idx(hw, k++);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	for (i = 0; i < hw->num_pports; i++) {
537*4882a593Smuzhiyun 		info = &hw->scsi_cpu_info[i];
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
540*4882a593Smuzhiyun 			n = (j % info->max_cpus) +  k;
541*4882a593Smuzhiyun 			hw->sqset[i][j].intr_idx = n;
542*4882a593Smuzhiyun 		}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		k += info->max_cpus;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	return 0;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun void
csio_intr_enable(struct csio_hw * hw)551*4882a593Smuzhiyun csio_intr_enable(struct csio_hw *hw)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	hw->intr_mode = CSIO_IM_NONE;
554*4882a593Smuzhiyun 	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* Try MSIX, then MSI or fall back to INTx */
557*4882a593Smuzhiyun 	if ((csio_msi == 2) && !csio_enable_msix(hw))
558*4882a593Smuzhiyun 		hw->intr_mode = CSIO_IM_MSIX;
559*4882a593Smuzhiyun 	else {
560*4882a593Smuzhiyun 		/* Max iqs required based on #niqs configured in fw */
561*4882a593Smuzhiyun 		if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
562*4882a593Smuzhiyun 			!csio_is_hw_master(hw)) {
563*4882a593Smuzhiyun 			int extra = CSIO_EXTRA_MSI_IQS;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 			if (hw->cfg_niq < (hw->num_sqsets + extra)) {
566*4882a593Smuzhiyun 				csio_dbg(hw, "Reducing sqsets to %d\n",
567*4882a593Smuzhiyun 					 hw->cfg_niq - extra);
568*4882a593Smuzhiyun 				csio_reduce_sqsets(hw, hw->cfg_niq - extra);
569*4882a593Smuzhiyun 			}
570*4882a593Smuzhiyun 		}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
573*4882a593Smuzhiyun 			hw->intr_mode = CSIO_IM_MSI;
574*4882a593Smuzhiyun 		else
575*4882a593Smuzhiyun 			hw->intr_mode = CSIO_IM_INTX;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	csio_dbg(hw, "Using %s interrupt mode.\n",
579*4882a593Smuzhiyun 		(hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
580*4882a593Smuzhiyun 		((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun void
csio_intr_disable(struct csio_hw * hw,bool free)584*4882a593Smuzhiyun csio_intr_disable(struct csio_hw *hw, bool free)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	csio_hw_intr_disable(hw);
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	if (free) {
589*4882a593Smuzhiyun 		int i;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		switch (hw->intr_mode) {
592*4882a593Smuzhiyun 		case CSIO_IM_MSIX:
593*4882a593Smuzhiyun 			for (i = 0; i < hw->num_sqsets + CSIO_EXTRA_VECS; i++) {
594*4882a593Smuzhiyun 				free_irq(pci_irq_vector(hw->pdev, i),
595*4882a593Smuzhiyun 					 hw->msix_entries[i].dev_id);
596*4882a593Smuzhiyun 			}
597*4882a593Smuzhiyun 			break;
598*4882a593Smuzhiyun 		case CSIO_IM_MSI:
599*4882a593Smuzhiyun 		case CSIO_IM_INTX:
600*4882a593Smuzhiyun 			free_irq(pci_irq_vector(hw->pdev, 0), hw);
601*4882a593Smuzhiyun 			break;
602*4882a593Smuzhiyun 		default:
603*4882a593Smuzhiyun 			break;
604*4882a593Smuzhiyun 		}
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	pci_free_irq_vectors(hw->pdev);
608*4882a593Smuzhiyun 	hw->intr_mode = CSIO_IM_NONE;
609*4882a593Smuzhiyun 	hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
610*4882a593Smuzhiyun }
611