1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2018 - 2020 Intel Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
6*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * GPL LICENSE SUMMARY
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
12*4882a593Smuzhiyun * published by the Free Software Foundation.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but
15*4882a593Smuzhiyun * WITHOUT ANY WARRANTY; without even the implied warranty of
16*4882a593Smuzhiyun * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17*4882a593Smuzhiyun * General Public License for more details.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * BSD LICENSE
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
22*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
23*4882a593Smuzhiyun * are met:
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * - Redistributions of source code must retain the above copyright
26*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
27*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above copyright
28*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
29*4882a593Smuzhiyun * the documentation and/or other materials provided with the
30*4882a593Smuzhiyun * distribution.
31*4882a593Smuzhiyun * - Neither the name of Intel Corporation nor the names of its
32*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
33*4882a593Smuzhiyun * from this software without specific prior written permission.
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include "hfi.h"
50*4882a593Smuzhiyun #include "affinity.h"
51*4882a593Smuzhiyun #include "sdma.h"
52*4882a593Smuzhiyun #include "netdev.h"
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /**
55*4882a593Smuzhiyun * msix_initialize() - Calculate, request and configure MSIx IRQs
56*4882a593Smuzhiyun * @dd: valid hfi1 devdata
57*4882a593Smuzhiyun *
58*4882a593Smuzhiyun */
msix_initialize(struct hfi1_devdata * dd)59*4882a593Smuzhiyun int msix_initialize(struct hfi1_devdata *dd)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun u32 total;
62*4882a593Smuzhiyun int ret;
63*4882a593Smuzhiyun struct hfi1_msix_entry *entries;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * MSIx interrupt count:
67*4882a593Smuzhiyun * one for the general, "slow path" interrupt
68*4882a593Smuzhiyun * one per used SDMA engine
69*4882a593Smuzhiyun * one per kernel receive context
70*4882a593Smuzhiyun * one for each VNIC context
71*4882a593Smuzhiyun * ...any new IRQs should be added here.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_netdev_contexts;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun if (total >= CCE_NUM_MSIX_VECTORS)
76*4882a593Smuzhiyun return -EINVAL;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
79*4882a593Smuzhiyun if (ret < 0) {
80*4882a593Smuzhiyun dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
81*4882a593Smuzhiyun return ret;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
85*4882a593Smuzhiyun GFP_KERNEL);
86*4882a593Smuzhiyun if (!entries) {
87*4882a593Smuzhiyun pci_free_irq_vectors(dd->pcidev);
88*4882a593Smuzhiyun return -ENOMEM;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun dd->msix_info.msix_entries = entries;
92*4882a593Smuzhiyun spin_lock_init(&dd->msix_info.msix_lock);
93*4882a593Smuzhiyun bitmap_zero(dd->msix_info.in_use_msix, total);
94*4882a593Smuzhiyun dd->msix_info.max_requested = total;
95*4882a593Smuzhiyun dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /**
101*4882a593Smuzhiyun * msix_request_irq() - Allocate a free MSIx IRQ
102*4882a593Smuzhiyun * @dd: valid devdata
103*4882a593Smuzhiyun * @arg: context information for the IRQ
104*4882a593Smuzhiyun * @handler: IRQ handler
105*4882a593Smuzhiyun * @thread: IRQ thread handler (could be NULL)
106*4882a593Smuzhiyun * @idx: zero base idx if multiple devices are needed
107*4882a593Smuzhiyun * @type: affinty IRQ type
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * Allocated an MSIx vector if available, and then create the appropriate
110*4882a593Smuzhiyun * meta data needed to keep track of the pci IRQ request.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * Return:
113*4882a593Smuzhiyun * < 0 Error
114*4882a593Smuzhiyun * >= 0 MSIx vector
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun */
msix_request_irq(struct hfi1_devdata * dd,void * arg,irq_handler_t handler,irq_handler_t thread,enum irq_type type,const char * name)117*4882a593Smuzhiyun static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
118*4882a593Smuzhiyun irq_handler_t handler, irq_handler_t thread,
119*4882a593Smuzhiyun enum irq_type type, const char *name)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun unsigned long nr;
122*4882a593Smuzhiyun int irq;
123*4882a593Smuzhiyun int ret;
124*4882a593Smuzhiyun struct hfi1_msix_entry *me;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Allocate an MSIx vector */
127*4882a593Smuzhiyun spin_lock(&dd->msix_info.msix_lock);
128*4882a593Smuzhiyun nr = find_first_zero_bit(dd->msix_info.in_use_msix,
129*4882a593Smuzhiyun dd->msix_info.max_requested);
130*4882a593Smuzhiyun if (nr < dd->msix_info.max_requested)
131*4882a593Smuzhiyun __set_bit(nr, dd->msix_info.in_use_msix);
132*4882a593Smuzhiyun spin_unlock(&dd->msix_info.msix_lock);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun if (nr == dd->msix_info.max_requested)
135*4882a593Smuzhiyun return -ENOSPC;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (type < IRQ_SDMA || type >= IRQ_OTHER)
138*4882a593Smuzhiyun return -EINVAL;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun irq = pci_irq_vector(dd->pcidev, nr);
141*4882a593Smuzhiyun ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
142*4882a593Smuzhiyun if (ret) {
143*4882a593Smuzhiyun dd_dev_err(dd,
144*4882a593Smuzhiyun "%s: request for IRQ %d failed, MSIx %lx, err %d\n",
145*4882a593Smuzhiyun name, irq, nr, ret);
146*4882a593Smuzhiyun spin_lock(&dd->msix_info.msix_lock);
147*4882a593Smuzhiyun __clear_bit(nr, dd->msix_info.in_use_msix);
148*4882a593Smuzhiyun spin_unlock(&dd->msix_info.msix_lock);
149*4882a593Smuzhiyun return ret;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /*
153*4882a593Smuzhiyun * assign arg after pci_request_irq call, so it will be
154*4882a593Smuzhiyun * cleaned up
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun me = &dd->msix_info.msix_entries[nr];
157*4882a593Smuzhiyun me->irq = irq;
158*4882a593Smuzhiyun me->arg = arg;
159*4882a593Smuzhiyun me->type = type;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* This is a request, so a failure is not fatal */
162*4882a593Smuzhiyun ret = hfi1_get_irq_affinity(dd, me);
163*4882a593Smuzhiyun if (ret)
164*4882a593Smuzhiyun dd_dev_err(dd, "%s: unable to pin IRQ %d\n", name, ret);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun return nr;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
msix_request_rcd_irq_common(struct hfi1_ctxtdata * rcd,irq_handler_t handler,irq_handler_t thread,const char * name)169*4882a593Smuzhiyun static int msix_request_rcd_irq_common(struct hfi1_ctxtdata *rcd,
170*4882a593Smuzhiyun irq_handler_t handler,
171*4882a593Smuzhiyun irq_handler_t thread,
172*4882a593Smuzhiyun const char *name)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun int nr = msix_request_irq(rcd->dd, rcd, handler, thread,
175*4882a593Smuzhiyun rcd->is_vnic ? IRQ_NETDEVCTXT : IRQ_RCVCTXT,
176*4882a593Smuzhiyun name);
177*4882a593Smuzhiyun if (nr < 0)
178*4882a593Smuzhiyun return nr;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * Set the interrupt register and mask for this
182*4882a593Smuzhiyun * context's interrupt.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
185*4882a593Smuzhiyun rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
186*4882a593Smuzhiyun rcd->msix_intr = nr;
187*4882a593Smuzhiyun remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun return 0;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /**
193*4882a593Smuzhiyun * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
194*4882a593Smuzhiyun * @rcd: valid rcd context
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun */
msix_request_rcd_irq(struct hfi1_ctxtdata * rcd)197*4882a593Smuzhiyun int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun char name[MAX_NAME_SIZE];
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
202*4882a593Smuzhiyun rcd->dd->unit, rcd->ctxt);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun return msix_request_rcd_irq_common(rcd, receive_context_interrupt,
205*4882a593Smuzhiyun receive_context_thread, name);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /**
209*4882a593Smuzhiyun * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
210*4882a593Smuzhiyun * for netdev context
211*4882a593Smuzhiyun * @rcd: valid netdev contexti
212*4882a593Smuzhiyun */
msix_netdev_request_rcd_irq(struct hfi1_ctxtdata * rcd)213*4882a593Smuzhiyun int msix_netdev_request_rcd_irq(struct hfi1_ctxtdata *rcd)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun char name[MAX_NAME_SIZE];
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun snprintf(name, sizeof(name), DRIVER_NAME "_%d nd kctxt%d",
218*4882a593Smuzhiyun rcd->dd->unit, rcd->ctxt);
219*4882a593Smuzhiyun return msix_request_rcd_irq_common(rcd, receive_context_interrupt_napi,
220*4882a593Smuzhiyun NULL, name);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
225*4882a593Smuzhiyun * @sde: valid sdma engine
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun */
msix_request_sdma_irq(struct sdma_engine * sde)228*4882a593Smuzhiyun int msix_request_sdma_irq(struct sdma_engine *sde)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun int nr;
231*4882a593Smuzhiyun char name[MAX_NAME_SIZE];
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
234*4882a593Smuzhiyun sde->dd->unit, sde->this_idx);
235*4882a593Smuzhiyun nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
236*4882a593Smuzhiyun IRQ_SDMA, name);
237*4882a593Smuzhiyun if (nr < 0)
238*4882a593Smuzhiyun return nr;
239*4882a593Smuzhiyun sde->msix_intr = nr;
240*4882a593Smuzhiyun remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return 0;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun /**
246*4882a593Smuzhiyun * msix_request_general_irq(void) - Helper for getting general IRQ
247*4882a593Smuzhiyun * resources
248*4882a593Smuzhiyun * @dd: valid device data
249*4882a593Smuzhiyun */
msix_request_general_irq(struct hfi1_devdata * dd)250*4882a593Smuzhiyun int msix_request_general_irq(struct hfi1_devdata *dd)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun int nr;
253*4882a593Smuzhiyun char name[MAX_NAME_SIZE];
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
256*4882a593Smuzhiyun nr = msix_request_irq(dd, dd, general_interrupt, NULL, IRQ_GENERAL,
257*4882a593Smuzhiyun name);
258*4882a593Smuzhiyun if (nr < 0)
259*4882a593Smuzhiyun return nr;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* general interrupt must be MSIx vector 0 */
262*4882a593Smuzhiyun if (nr) {
263*4882a593Smuzhiyun msix_free_irq(dd, (u8)nr);
264*4882a593Smuzhiyun dd_dev_err(dd, "Invalid index %d for GENERAL IRQ\n", nr);
265*4882a593Smuzhiyun return -EINVAL;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return 0;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /**
272*4882a593Smuzhiyun * enable_sdma_src() - Helper to enable SDMA IRQ srcs
273*4882a593Smuzhiyun * @dd: valid devdata structure
274*4882a593Smuzhiyun * @i: index of SDMA engine
275*4882a593Smuzhiyun */
enable_sdma_srcs(struct hfi1_devdata * dd,int i)276*4882a593Smuzhiyun static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
279*4882a593Smuzhiyun set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
280*4882a593Smuzhiyun IS_SDMA_PROGRESS_START + i, true);
281*4882a593Smuzhiyun set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
282*4882a593Smuzhiyun set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
283*4882a593Smuzhiyun true);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * msix_request_irqs() - Allocate all MSIx IRQs
288*4882a593Smuzhiyun * @dd: valid devdata structure
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Helper function to request the used MSIx IRQs.
291*4882a593Smuzhiyun *
292*4882a593Smuzhiyun */
msix_request_irqs(struct hfi1_devdata * dd)293*4882a593Smuzhiyun int msix_request_irqs(struct hfi1_devdata *dd)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun int i;
296*4882a593Smuzhiyun int ret = msix_request_general_irq(dd);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (ret)
299*4882a593Smuzhiyun return ret;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun for (i = 0; i < dd->num_sdma; i++) {
302*4882a593Smuzhiyun struct sdma_engine *sde = &dd->per_sdma[i];
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun ret = msix_request_sdma_irq(sde);
305*4882a593Smuzhiyun if (ret)
306*4882a593Smuzhiyun return ret;
307*4882a593Smuzhiyun enable_sdma_srcs(sde->dd, i);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun for (i = 0; i < dd->n_krcv_queues; i++) {
311*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (rcd)
314*4882a593Smuzhiyun ret = msix_request_rcd_irq(rcd);
315*4882a593Smuzhiyun hfi1_rcd_put(rcd);
316*4882a593Smuzhiyun if (ret)
317*4882a593Smuzhiyun return ret;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return 0;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun * msix_free_irq() - Free the specified MSIx resources and IRQ
325*4882a593Smuzhiyun * @dd: valid devdata
326*4882a593Smuzhiyun * @msix_intr: MSIx vector to free.
327*4882a593Smuzhiyun *
328*4882a593Smuzhiyun */
msix_free_irq(struct hfi1_devdata * dd,u8 msix_intr)329*4882a593Smuzhiyun void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct hfi1_msix_entry *me;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun if (msix_intr >= dd->msix_info.max_requested)
334*4882a593Smuzhiyun return;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun me = &dd->msix_info.msix_entries[msix_intr];
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun if (!me->arg) /* => no irq, no affinity */
339*4882a593Smuzhiyun return;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun hfi1_put_irq_affinity(dd, me);
342*4882a593Smuzhiyun pci_free_irq(dd->pcidev, msix_intr, me->arg);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun me->arg = NULL;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun spin_lock(&dd->msix_info.msix_lock);
347*4882a593Smuzhiyun __clear_bit(msix_intr, dd->msix_info.in_use_msix);
348*4882a593Smuzhiyun spin_unlock(&dd->msix_info.msix_lock);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /**
352*4882a593Smuzhiyun * hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
353*4882a593Smuzhiyun * @dd: valid device data data structure
354*4882a593Smuzhiyun *
355*4882a593Smuzhiyun * Free the MSIx and associated PCI resources, if they have been allocated.
356*4882a593Smuzhiyun */
msix_clean_up_interrupts(struct hfi1_devdata * dd)357*4882a593Smuzhiyun void msix_clean_up_interrupts(struct hfi1_devdata *dd)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun int i;
360*4882a593Smuzhiyun struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* remove irqs - must happen before disabling/turning off */
363*4882a593Smuzhiyun for (i = 0; i < dd->msix_info.max_requested; i++, me++)
364*4882a593Smuzhiyun msix_free_irq(dd, i);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* clean structures */
367*4882a593Smuzhiyun kfree(dd->msix_info.msix_entries);
368*4882a593Smuzhiyun dd->msix_info.msix_entries = NULL;
369*4882a593Smuzhiyun dd->msix_info.max_requested = 0;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun pci_free_irq_vectors(dd->pcidev);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun * msix_netdev_syncrhonize_irq() - netdev IRQ synchronize
376*4882a593Smuzhiyun * @dd: valid devdata
377*4882a593Smuzhiyun */
msix_netdev_synchronize_irq(struct hfi1_devdata * dd)378*4882a593Smuzhiyun void msix_netdev_synchronize_irq(struct hfi1_devdata *dd)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun int i;
381*4882a593Smuzhiyun int ctxt_count = hfi1_netdev_ctxt_count(dd);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun for (i = 0; i < ctxt_count; i++) {
384*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd = hfi1_netdev_get_ctxt(dd, i);
385*4882a593Smuzhiyun struct hfi1_msix_entry *me;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun me = &dd->msix_info.msix_entries[rcd->msix_intr];
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun synchronize_irq(me->irq);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun }
392