1*4882a593Smuzhiyun /*******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This software is available to you under a choice of one of two
6*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
7*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
8*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
9*4882a593Smuzhiyun * OpenFabrics.org BSD license below:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
12*4882a593Smuzhiyun * without modification, are permitted provided that the following
13*4882a593Smuzhiyun * conditions are met:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * - Redistributions of source code must retain the above
16*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
17*4882a593Smuzhiyun * disclaimer.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
20*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
21*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
22*4882a593Smuzhiyun * provided with the distribution.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31*4882a593Smuzhiyun * SOFTWARE.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun *******************************************************************************/
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/module.h>
36*4882a593Smuzhiyun #include <linux/moduleparam.h>
37*4882a593Smuzhiyun #include <linux/netdevice.h>
38*4882a593Smuzhiyun #include <linux/etherdevice.h>
39*4882a593Smuzhiyun #include <linux/ip.h>
40*4882a593Smuzhiyun #include <linux/tcp.h>
41*4882a593Smuzhiyun #include <linux/if_vlan.h>
42*4882a593Smuzhiyun #include <net/addrconf.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include "i40iw.h"
45*4882a593Smuzhiyun #include "i40iw_register.h"
46*4882a593Smuzhiyun #include <net/netevent.h>
47*4882a593Smuzhiyun #define CLIENT_IW_INTERFACE_VERSION_MAJOR 0
48*4882a593Smuzhiyun #define CLIENT_IW_INTERFACE_VERSION_MINOR 01
49*4882a593Smuzhiyun #define CLIENT_IW_INTERFACE_VERSION_BUILD 00
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define DRV_VERSION_MAJOR 0
52*4882a593Smuzhiyun #define DRV_VERSION_MINOR 5
53*4882a593Smuzhiyun #define DRV_VERSION_BUILD 123
54*4882a593Smuzhiyun #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
55*4882a593Smuzhiyun __stringify(DRV_VERSION_MINOR) "." __stringify(DRV_VERSION_BUILD)
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static int debug;
58*4882a593Smuzhiyun module_param(debug, int, 0644);
59*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "debug flags: 0=disabled (default), 0x7fffffff=all");
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static int resource_profile;
62*4882a593Smuzhiyun module_param(resource_profile, int, 0644);
63*4882a593Smuzhiyun MODULE_PARM_DESC(resource_profile,
64*4882a593Smuzhiyun "Resource Profile: 0=no VF RDMA support (default), 1=Weighted VF, 2=Even Distribution");
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static int max_rdma_vfs = 32;
67*4882a593Smuzhiyun module_param(max_rdma_vfs, int, 0644);
68*4882a593Smuzhiyun MODULE_PARM_DESC(max_rdma_vfs, "Maximum VF count: 0-32 32=default");
69*4882a593Smuzhiyun static int mpa_version = 2;
70*4882a593Smuzhiyun module_param(mpa_version, int, 0644);
71*4882a593Smuzhiyun MODULE_PARM_DESC(mpa_version, "MPA version to be used in MPA Req/Resp 1 or 2");
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation, <e1000-rdma@lists.sourceforge.net>");
74*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel(R) Ethernet Connection X722 iWARP RDMA Driver");
75*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun static struct i40e_client i40iw_client;
78*4882a593Smuzhiyun static char i40iw_client_name[I40E_CLIENT_STR_LENGTH] = "i40iw";
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static LIST_HEAD(i40iw_handlers);
81*4882a593Smuzhiyun static spinlock_t i40iw_handler_lock;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
84*4882a593Smuzhiyun u32 vf_id, u8 *msg, u16 len);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun static struct notifier_block i40iw_inetaddr_notifier = {
87*4882a593Smuzhiyun .notifier_call = i40iw_inetaddr_event
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun static struct notifier_block i40iw_inetaddr6_notifier = {
91*4882a593Smuzhiyun .notifier_call = i40iw_inet6addr_event
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun static struct notifier_block i40iw_net_notifier = {
95*4882a593Smuzhiyun .notifier_call = i40iw_net_event
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static struct notifier_block i40iw_netdevice_notifier = {
99*4882a593Smuzhiyun .notifier_call = i40iw_netdevice_event
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * i40iw_find_i40e_handler - find a handler given a client info
104*4882a593Smuzhiyun * @ldev: pointer to a client info
105*4882a593Smuzhiyun */
i40iw_find_i40e_handler(struct i40e_info * ldev)106*4882a593Smuzhiyun static struct i40iw_handler *i40iw_find_i40e_handler(struct i40e_info *ldev)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct i40iw_handler *hdl;
109*4882a593Smuzhiyun unsigned long flags;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun spin_lock_irqsave(&i40iw_handler_lock, flags);
112*4882a593Smuzhiyun list_for_each_entry(hdl, &i40iw_handlers, list) {
113*4882a593Smuzhiyun if (hdl->ldev.netdev == ldev->netdev) {
114*4882a593Smuzhiyun spin_unlock_irqrestore(&i40iw_handler_lock, flags);
115*4882a593Smuzhiyun return hdl;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun spin_unlock_irqrestore(&i40iw_handler_lock, flags);
119*4882a593Smuzhiyun return NULL;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun * i40iw_find_netdev - find a handler given a netdev
124*4882a593Smuzhiyun * @netdev: pointer to net_device
125*4882a593Smuzhiyun */
i40iw_find_netdev(struct net_device * netdev)126*4882a593Smuzhiyun struct i40iw_handler *i40iw_find_netdev(struct net_device *netdev)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun struct i40iw_handler *hdl;
129*4882a593Smuzhiyun unsigned long flags;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun spin_lock_irqsave(&i40iw_handler_lock, flags);
132*4882a593Smuzhiyun list_for_each_entry(hdl, &i40iw_handlers, list) {
133*4882a593Smuzhiyun if (hdl->ldev.netdev == netdev) {
134*4882a593Smuzhiyun spin_unlock_irqrestore(&i40iw_handler_lock, flags);
135*4882a593Smuzhiyun return hdl;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun spin_unlock_irqrestore(&i40iw_handler_lock, flags);
139*4882a593Smuzhiyun return NULL;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /**
143*4882a593Smuzhiyun * i40iw_add_handler - add a handler to the list
144*4882a593Smuzhiyun * @hdl: handler to be added to the handler list
145*4882a593Smuzhiyun */
i40iw_add_handler(struct i40iw_handler * hdl)146*4882a593Smuzhiyun static void i40iw_add_handler(struct i40iw_handler *hdl)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun unsigned long flags;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun spin_lock_irqsave(&i40iw_handler_lock, flags);
151*4882a593Smuzhiyun list_add(&hdl->list, &i40iw_handlers);
152*4882a593Smuzhiyun spin_unlock_irqrestore(&i40iw_handler_lock, flags);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * i40iw_del_handler - delete a handler from the list
157*4882a593Smuzhiyun * @hdl: handler to be deleted from the handler list
158*4882a593Smuzhiyun */
i40iw_del_handler(struct i40iw_handler * hdl)159*4882a593Smuzhiyun static int i40iw_del_handler(struct i40iw_handler *hdl)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun unsigned long flags;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun spin_lock_irqsave(&i40iw_handler_lock, flags);
164*4882a593Smuzhiyun list_del(&hdl->list);
165*4882a593Smuzhiyun spin_unlock_irqrestore(&i40iw_handler_lock, flags);
166*4882a593Smuzhiyun return 0;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun * i40iw_enable_intr - set up device interrupts
171*4882a593Smuzhiyun * @dev: hardware control device structure
172*4882a593Smuzhiyun * @msix_id: id of the interrupt to be enabled
173*4882a593Smuzhiyun */
i40iw_enable_intr(struct i40iw_sc_dev * dev,u32 msix_id)174*4882a593Smuzhiyun static void i40iw_enable_intr(struct i40iw_sc_dev *dev, u32 msix_id)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun u32 val;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
179*4882a593Smuzhiyun I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
180*4882a593Smuzhiyun (3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
181*4882a593Smuzhiyun if (dev->is_pf)
182*4882a593Smuzhiyun i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_id - 1), val);
183*4882a593Smuzhiyun else
184*4882a593Smuzhiyun i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_id - 1), val);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /**
188*4882a593Smuzhiyun * i40iw_dpc - tasklet for aeq and ceq 0
189*4882a593Smuzhiyun * @data: iwarp device
190*4882a593Smuzhiyun */
i40iw_dpc(struct tasklet_struct * t)191*4882a593Smuzhiyun static void i40iw_dpc(struct tasklet_struct *t)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct i40iw_device *iwdev = from_tasklet(iwdev, t, dpc_tasklet);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (iwdev->msix_shared)
196*4882a593Smuzhiyun i40iw_process_ceq(iwdev, iwdev->ceqlist);
197*4882a593Smuzhiyun i40iw_process_aeq(iwdev);
198*4882a593Smuzhiyun i40iw_enable_intr(&iwdev->sc_dev, iwdev->iw_msixtbl[0].idx);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun * i40iw_ceq_dpc - dpc handler for CEQ
203*4882a593Smuzhiyun * @data: data points to CEQ
204*4882a593Smuzhiyun */
i40iw_ceq_dpc(struct tasklet_struct * t)205*4882a593Smuzhiyun static void i40iw_ceq_dpc(struct tasklet_struct *t)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun struct i40iw_ceq *iwceq = from_tasklet(iwceq, t, dpc_tasklet);
208*4882a593Smuzhiyun struct i40iw_device *iwdev = iwceq->iwdev;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun i40iw_process_ceq(iwdev, iwceq);
211*4882a593Smuzhiyun i40iw_enable_intr(&iwdev->sc_dev, iwceq->msix_idx);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /**
215*4882a593Smuzhiyun * i40iw_irq_handler - interrupt handler for aeq and ceq0
216*4882a593Smuzhiyun * @irq: Interrupt request number
217*4882a593Smuzhiyun * @data: iwarp device
218*4882a593Smuzhiyun */
i40iw_irq_handler(int irq,void * data)219*4882a593Smuzhiyun static irqreturn_t i40iw_irq_handler(int irq, void *data)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct i40iw_device *iwdev = (struct i40iw_device *)data;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun tasklet_schedule(&iwdev->dpc_tasklet);
224*4882a593Smuzhiyun return IRQ_HANDLED;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun /**
228*4882a593Smuzhiyun * i40iw_destroy_cqp - destroy control qp
229*4882a593Smuzhiyun * @iwdev: iwarp device
230*4882a593Smuzhiyun * @create_done: 1 if cqp create poll was success
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun * Issue destroy cqp request and
233*4882a593Smuzhiyun * free the resources associated with the cqp
234*4882a593Smuzhiyun */
i40iw_destroy_cqp(struct i40iw_device * iwdev,bool free_hwcqp)235*4882a593Smuzhiyun static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
238*4882a593Smuzhiyun struct i40iw_cqp *cqp = &iwdev->cqp;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (free_hwcqp)
241*4882a593Smuzhiyun dev->cqp_ops->cqp_destroy(dev->cqp);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun i40iw_cleanup_pending_cqp_op(iwdev);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &cqp->sq);
246*4882a593Smuzhiyun kfree(cqp->scratch_array);
247*4882a593Smuzhiyun iwdev->cqp.scratch_array = NULL;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun kfree(cqp->cqp_requests);
250*4882a593Smuzhiyun cqp->cqp_requests = NULL;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /**
254*4882a593Smuzhiyun * i40iw_disable_irqs - disable device interrupts
255*4882a593Smuzhiyun * @dev: hardware control device structure
256*4882a593Smuzhiyun * @msic_vec: msix vector to disable irq
257*4882a593Smuzhiyun * @dev_id: parameter to pass to free_irq (used during irq setup)
258*4882a593Smuzhiyun *
259*4882a593Smuzhiyun * The function is called when destroying aeq/ceq
260*4882a593Smuzhiyun */
i40iw_disable_irq(struct i40iw_sc_dev * dev,struct i40iw_msix_vector * msix_vec,void * dev_id)261*4882a593Smuzhiyun static void i40iw_disable_irq(struct i40iw_sc_dev *dev,
262*4882a593Smuzhiyun struct i40iw_msix_vector *msix_vec,
263*4882a593Smuzhiyun void *dev_id)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun if (dev->is_pf)
266*4882a593Smuzhiyun i40iw_wr32(dev->hw, I40E_PFINT_DYN_CTLN(msix_vec->idx - 1), 0);
267*4882a593Smuzhiyun else
268*4882a593Smuzhiyun i40iw_wr32(dev->hw, I40E_VFINT_DYN_CTLN1(msix_vec->idx - 1), 0);
269*4882a593Smuzhiyun irq_set_affinity_hint(msix_vec->irq, NULL);
270*4882a593Smuzhiyun free_irq(msix_vec->irq, dev_id);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * i40iw_destroy_aeq - destroy aeq
275*4882a593Smuzhiyun * @iwdev: iwarp device
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * Issue a destroy aeq request and
278*4882a593Smuzhiyun * free the resources associated with the aeq
279*4882a593Smuzhiyun * The function is called during driver unload
280*4882a593Smuzhiyun */
i40iw_destroy_aeq(struct i40iw_device * iwdev)281*4882a593Smuzhiyun static void i40iw_destroy_aeq(struct i40iw_device *iwdev)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun enum i40iw_status_code status = I40IW_ERR_NOT_READY;
284*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
285*4882a593Smuzhiyun struct i40iw_aeq *aeq = &iwdev->aeq;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (!iwdev->msix_shared)
288*4882a593Smuzhiyun i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev);
289*4882a593Smuzhiyun if (iwdev->reset)
290*4882a593Smuzhiyun goto exit;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1))
293*4882a593Smuzhiyun status = dev->aeq_ops->aeq_destroy_done(&aeq->sc_aeq);
294*4882a593Smuzhiyun if (status)
295*4882a593Smuzhiyun i40iw_pr_err("destroy aeq failed %d\n", status);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun exit:
298*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &aeq->mem);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /**
302*4882a593Smuzhiyun * i40iw_destroy_ceq - destroy ceq
303*4882a593Smuzhiyun * @iwdev: iwarp device
304*4882a593Smuzhiyun * @iwceq: ceq to be destroyed
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * Issue a destroy ceq request and
307*4882a593Smuzhiyun * free the resources associated with the ceq
308*4882a593Smuzhiyun */
i40iw_destroy_ceq(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq)309*4882a593Smuzhiyun static void i40iw_destroy_ceq(struct i40iw_device *iwdev,
310*4882a593Smuzhiyun struct i40iw_ceq *iwceq)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun enum i40iw_status_code status;
313*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (iwdev->reset)
316*4882a593Smuzhiyun goto exit;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1);
319*4882a593Smuzhiyun if (status) {
320*4882a593Smuzhiyun i40iw_pr_err("ceq destroy command failed %d\n", status);
321*4882a593Smuzhiyun goto exit;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun status = dev->ceq_ops->cceq_destroy_done(&iwceq->sc_ceq);
325*4882a593Smuzhiyun if (status)
326*4882a593Smuzhiyun i40iw_pr_err("ceq destroy completion failed %d\n", status);
327*4882a593Smuzhiyun exit:
328*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &iwceq->mem);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /**
332*4882a593Smuzhiyun * i40iw_dele_ceqs - destroy all ceq's
333*4882a593Smuzhiyun * @iwdev: iwarp device
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * Go through all of the device ceq's and for each ceq
336*4882a593Smuzhiyun * disable the ceq interrupt and destroy the ceq
337*4882a593Smuzhiyun */
i40iw_dele_ceqs(struct i40iw_device * iwdev)338*4882a593Smuzhiyun static void i40iw_dele_ceqs(struct i40iw_device *iwdev)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun u32 i = 0;
341*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
342*4882a593Smuzhiyun struct i40iw_ceq *iwceq = iwdev->ceqlist;
343*4882a593Smuzhiyun struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (iwdev->msix_shared) {
346*4882a593Smuzhiyun i40iw_disable_irq(dev, msix_vec, (void *)iwdev);
347*4882a593Smuzhiyun i40iw_destroy_ceq(iwdev, iwceq);
348*4882a593Smuzhiyun iwceq++;
349*4882a593Smuzhiyun i++;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) {
353*4882a593Smuzhiyun i40iw_disable_irq(dev, msix_vec, (void *)iwceq);
354*4882a593Smuzhiyun i40iw_destroy_ceq(iwdev, iwceq);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun iwdev->sc_dev.ceq_valid = false;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * i40iw_destroy_ccq - destroy control cq
362*4882a593Smuzhiyun * @iwdev: iwarp device
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * Issue destroy ccq request and
365*4882a593Smuzhiyun * free the resources associated with the ccq
366*4882a593Smuzhiyun */
i40iw_destroy_ccq(struct i40iw_device * iwdev)367*4882a593Smuzhiyun static void i40iw_destroy_ccq(struct i40iw_device *iwdev)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
370*4882a593Smuzhiyun struct i40iw_ccq *ccq = &iwdev->ccq;
371*4882a593Smuzhiyun enum i40iw_status_code status = 0;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun if (!iwdev->reset)
374*4882a593Smuzhiyun status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true);
375*4882a593Smuzhiyun if (status)
376*4882a593Smuzhiyun i40iw_pr_err("ccq destroy failed %d\n", status);
377*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* types of hmc objects */
381*4882a593Smuzhiyun static enum i40iw_hmc_rsrc_type iw_hmc_obj_types[] = {
382*4882a593Smuzhiyun I40IW_HMC_IW_QP,
383*4882a593Smuzhiyun I40IW_HMC_IW_CQ,
384*4882a593Smuzhiyun I40IW_HMC_IW_HTE,
385*4882a593Smuzhiyun I40IW_HMC_IW_ARP,
386*4882a593Smuzhiyun I40IW_HMC_IW_APBVT_ENTRY,
387*4882a593Smuzhiyun I40IW_HMC_IW_MR,
388*4882a593Smuzhiyun I40IW_HMC_IW_XF,
389*4882a593Smuzhiyun I40IW_HMC_IW_XFFL,
390*4882a593Smuzhiyun I40IW_HMC_IW_Q1,
391*4882a593Smuzhiyun I40IW_HMC_IW_Q1FL,
392*4882a593Smuzhiyun I40IW_HMC_IW_TIMER,
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /**
396*4882a593Smuzhiyun * i40iw_close_hmc_objects_type - delete hmc objects of a given type
397*4882a593Smuzhiyun * @iwdev: iwarp device
398*4882a593Smuzhiyun * @obj_type: the hmc object type to be deleted
399*4882a593Smuzhiyun * @is_pf: true if the function is PF otherwise false
400*4882a593Smuzhiyun * @reset: true if called before reset
401*4882a593Smuzhiyun */
i40iw_close_hmc_objects_type(struct i40iw_sc_dev * dev,enum i40iw_hmc_rsrc_type obj_type,struct i40iw_hmc_info * hmc_info,bool is_pf,bool reset)402*4882a593Smuzhiyun static void i40iw_close_hmc_objects_type(struct i40iw_sc_dev *dev,
403*4882a593Smuzhiyun enum i40iw_hmc_rsrc_type obj_type,
404*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info,
405*4882a593Smuzhiyun bool is_pf,
406*4882a593Smuzhiyun bool reset)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct i40iw_hmc_del_obj_info info;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
411*4882a593Smuzhiyun info.hmc_info = hmc_info;
412*4882a593Smuzhiyun info.rsrc_type = obj_type;
413*4882a593Smuzhiyun info.count = hmc_info->hmc_obj[obj_type].cnt;
414*4882a593Smuzhiyun info.is_pf = is_pf;
415*4882a593Smuzhiyun if (dev->hmc_ops->del_hmc_object(dev, &info, reset))
416*4882a593Smuzhiyun i40iw_pr_err("del obj of type %d failed\n", obj_type);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /**
420*4882a593Smuzhiyun * i40iw_del_hmc_objects - remove all device hmc objects
421*4882a593Smuzhiyun * @dev: iwarp device
422*4882a593Smuzhiyun * @hmc_info: hmc_info to free
423*4882a593Smuzhiyun * @is_pf: true if hmc_info belongs to PF, not vf nor allocated
424*4882a593Smuzhiyun * by PF on behalf of VF
425*4882a593Smuzhiyun * @reset: true if called before reset
426*4882a593Smuzhiyun */
i40iw_del_hmc_objects(struct i40iw_sc_dev * dev,struct i40iw_hmc_info * hmc_info,bool is_pf,bool reset)427*4882a593Smuzhiyun static void i40iw_del_hmc_objects(struct i40iw_sc_dev *dev,
428*4882a593Smuzhiyun struct i40iw_hmc_info *hmc_info,
429*4882a593Smuzhiyun bool is_pf,
430*4882a593Smuzhiyun bool reset)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun unsigned int i;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++)
435*4882a593Smuzhiyun i40iw_close_hmc_objects_type(dev, iw_hmc_obj_types[i], hmc_info, is_pf, reset);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /**
439*4882a593Smuzhiyun * i40iw_ceq_handler - interrupt handler for ceq
440*4882a593Smuzhiyun * @data: ceq pointer
441*4882a593Smuzhiyun */
i40iw_ceq_handler(int irq,void * data)442*4882a593Smuzhiyun static irqreturn_t i40iw_ceq_handler(int irq, void *data)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun struct i40iw_ceq *iwceq = (struct i40iw_ceq *)data;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (iwceq->irq != irq)
447*4882a593Smuzhiyun i40iw_pr_err("expected irq = %d received irq = %d\n", iwceq->irq, irq);
448*4882a593Smuzhiyun tasklet_schedule(&iwceq->dpc_tasklet);
449*4882a593Smuzhiyun return IRQ_HANDLED;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun * i40iw_create_hmc_obj_type - create hmc object of a given type
454*4882a593Smuzhiyun * @dev: hardware control device structure
455*4882a593Smuzhiyun * @info: information for the hmc object to create
456*4882a593Smuzhiyun */
i40iw_create_hmc_obj_type(struct i40iw_sc_dev * dev,struct i40iw_hmc_create_obj_info * info)457*4882a593Smuzhiyun static enum i40iw_status_code i40iw_create_hmc_obj_type(struct i40iw_sc_dev *dev,
458*4882a593Smuzhiyun struct i40iw_hmc_create_obj_info *info)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun return dev->hmc_ops->create_hmc_object(dev, info);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /**
464*4882a593Smuzhiyun * i40iw_create_hmc_objs - create all hmc objects for the device
465*4882a593Smuzhiyun * @iwdev: iwarp device
466*4882a593Smuzhiyun * @is_pf: true if the function is PF otherwise false
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * Create the device hmc objects and allocate hmc pages
469*4882a593Smuzhiyun * Return 0 if successful, otherwise clean up and return error
470*4882a593Smuzhiyun */
i40iw_create_hmc_objs(struct i40iw_device * iwdev,bool is_pf)471*4882a593Smuzhiyun static enum i40iw_status_code i40iw_create_hmc_objs(struct i40iw_device *iwdev,
472*4882a593Smuzhiyun bool is_pf)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
475*4882a593Smuzhiyun struct i40iw_hmc_create_obj_info info;
476*4882a593Smuzhiyun enum i40iw_status_code status;
477*4882a593Smuzhiyun int i;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
480*4882a593Smuzhiyun info.hmc_info = dev->hmc_info;
481*4882a593Smuzhiyun info.is_pf = is_pf;
482*4882a593Smuzhiyun info.entry_type = iwdev->sd_type;
483*4882a593Smuzhiyun for (i = 0; i < IW_HMC_OBJ_TYPE_NUM; i++) {
484*4882a593Smuzhiyun info.rsrc_type = iw_hmc_obj_types[i];
485*4882a593Smuzhiyun info.count = dev->hmc_info->hmc_obj[info.rsrc_type].cnt;
486*4882a593Smuzhiyun info.add_sd_cnt = 0;
487*4882a593Smuzhiyun status = i40iw_create_hmc_obj_type(dev, &info);
488*4882a593Smuzhiyun if (status) {
489*4882a593Smuzhiyun i40iw_pr_err("create obj type %d status = %d\n",
490*4882a593Smuzhiyun iw_hmc_obj_types[i], status);
491*4882a593Smuzhiyun break;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun if (!status)
495*4882a593Smuzhiyun return (dev->cqp_misc_ops->static_hmc_pages_allocated(dev->cqp, 0,
496*4882a593Smuzhiyun dev->hmc_fn_id,
497*4882a593Smuzhiyun true, true));
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun while (i) {
500*4882a593Smuzhiyun i--;
501*4882a593Smuzhiyun /* destroy the hmc objects of a given type */
502*4882a593Smuzhiyun i40iw_close_hmc_objects_type(dev,
503*4882a593Smuzhiyun iw_hmc_obj_types[i],
504*4882a593Smuzhiyun dev->hmc_info,
505*4882a593Smuzhiyun is_pf,
506*4882a593Smuzhiyun false);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun return status;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /**
512*4882a593Smuzhiyun * i40iw_obj_aligned_mem - get aligned memory from device allocated memory
513*4882a593Smuzhiyun * @iwdev: iwarp device
514*4882a593Smuzhiyun * @memptr: points to the memory addresses
515*4882a593Smuzhiyun * @size: size of memory needed
516*4882a593Smuzhiyun * @mask: mask for the aligned memory
517*4882a593Smuzhiyun *
518*4882a593Smuzhiyun * Get aligned memory of the requested size and
519*4882a593Smuzhiyun * update the memptr to point to the new aligned memory
520*4882a593Smuzhiyun * Return 0 if successful, otherwise return no memory error
521*4882a593Smuzhiyun */
i40iw_obj_aligned_mem(struct i40iw_device * iwdev,struct i40iw_dma_mem * memptr,u32 size,u32 mask)522*4882a593Smuzhiyun enum i40iw_status_code i40iw_obj_aligned_mem(struct i40iw_device *iwdev,
523*4882a593Smuzhiyun struct i40iw_dma_mem *memptr,
524*4882a593Smuzhiyun u32 size,
525*4882a593Smuzhiyun u32 mask)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun unsigned long va, newva;
528*4882a593Smuzhiyun unsigned long extra;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun va = (unsigned long)iwdev->obj_next.va;
531*4882a593Smuzhiyun newva = va;
532*4882a593Smuzhiyun if (mask)
533*4882a593Smuzhiyun newva = ALIGN(va, (mask + 1));
534*4882a593Smuzhiyun extra = newva - va;
535*4882a593Smuzhiyun memptr->va = (u8 *)va + extra;
536*4882a593Smuzhiyun memptr->pa = iwdev->obj_next.pa + extra;
537*4882a593Smuzhiyun memptr->size = size;
538*4882a593Smuzhiyun if ((memptr->va + size) > (iwdev->obj_mem.va + iwdev->obj_mem.size))
539*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun iwdev->obj_next.va = memptr->va + size;
542*4882a593Smuzhiyun iwdev->obj_next.pa = memptr->pa + size;
543*4882a593Smuzhiyun return 0;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /**
547*4882a593Smuzhiyun * i40iw_create_cqp - create control qp
548*4882a593Smuzhiyun * @iwdev: iwarp device
549*4882a593Smuzhiyun *
550*4882a593Smuzhiyun * Return 0, if the cqp and all the resources associated with it
551*4882a593Smuzhiyun * are successfully created, otherwise return error
552*4882a593Smuzhiyun */
i40iw_create_cqp(struct i40iw_device * iwdev)553*4882a593Smuzhiyun static enum i40iw_status_code i40iw_create_cqp(struct i40iw_device *iwdev)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun enum i40iw_status_code status;
556*4882a593Smuzhiyun u32 sqsize = I40IW_CQP_SW_SQSIZE_2048;
557*4882a593Smuzhiyun struct i40iw_dma_mem mem;
558*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
559*4882a593Smuzhiyun struct i40iw_cqp_init_info cqp_init_info;
560*4882a593Smuzhiyun struct i40iw_cqp *cqp = &iwdev->cqp;
561*4882a593Smuzhiyun u16 maj_err, min_err;
562*4882a593Smuzhiyun int i;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
565*4882a593Smuzhiyun if (!cqp->cqp_requests)
566*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
567*4882a593Smuzhiyun cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
568*4882a593Smuzhiyun if (!cqp->scratch_array) {
569*4882a593Smuzhiyun kfree(cqp->cqp_requests);
570*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun dev->cqp = &cqp->sc_cqp;
573*4882a593Smuzhiyun dev->cqp->dev = dev;
574*4882a593Smuzhiyun memset(&cqp_init_info, 0, sizeof(cqp_init_info));
575*4882a593Smuzhiyun status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq,
576*4882a593Smuzhiyun (sizeof(struct i40iw_cqp_sq_wqe) * sqsize),
577*4882a593Smuzhiyun I40IW_CQP_ALIGNMENT);
578*4882a593Smuzhiyun if (status)
579*4882a593Smuzhiyun goto exit;
580*4882a593Smuzhiyun status = i40iw_obj_aligned_mem(iwdev, &mem, sizeof(struct i40iw_cqp_ctx),
581*4882a593Smuzhiyun I40IW_HOST_CTX_ALIGNMENT_MASK);
582*4882a593Smuzhiyun if (status)
583*4882a593Smuzhiyun goto exit;
584*4882a593Smuzhiyun dev->cqp->host_ctx_pa = mem.pa;
585*4882a593Smuzhiyun dev->cqp->host_ctx = mem.va;
586*4882a593Smuzhiyun /* populate the cqp init info */
587*4882a593Smuzhiyun cqp_init_info.dev = dev;
588*4882a593Smuzhiyun cqp_init_info.sq_size = sqsize;
589*4882a593Smuzhiyun cqp_init_info.sq = cqp->sq.va;
590*4882a593Smuzhiyun cqp_init_info.sq_pa = cqp->sq.pa;
591*4882a593Smuzhiyun cqp_init_info.host_ctx_pa = mem.pa;
592*4882a593Smuzhiyun cqp_init_info.host_ctx = mem.va;
593*4882a593Smuzhiyun cqp_init_info.hmc_profile = iwdev->resource_profile;
594*4882a593Smuzhiyun cqp_init_info.enabled_vf_count = iwdev->max_rdma_vfs;
595*4882a593Smuzhiyun cqp_init_info.scratch_array = cqp->scratch_array;
596*4882a593Smuzhiyun status = dev->cqp_ops->cqp_init(dev->cqp, &cqp_init_info);
597*4882a593Smuzhiyun if (status) {
598*4882a593Smuzhiyun i40iw_pr_err("cqp init status %d\n", status);
599*4882a593Smuzhiyun goto exit;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun status = dev->cqp_ops->cqp_create(dev->cqp, &maj_err, &min_err);
602*4882a593Smuzhiyun if (status) {
603*4882a593Smuzhiyun i40iw_pr_err("cqp create status %d maj_err %d min_err %d\n",
604*4882a593Smuzhiyun status, maj_err, min_err);
605*4882a593Smuzhiyun goto exit;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun spin_lock_init(&cqp->req_lock);
608*4882a593Smuzhiyun INIT_LIST_HEAD(&cqp->cqp_avail_reqs);
609*4882a593Smuzhiyun INIT_LIST_HEAD(&cqp->cqp_pending_reqs);
610*4882a593Smuzhiyun /* init the waitq of the cqp_requests and add them to the list */
611*4882a593Smuzhiyun for (i = 0; i < sqsize; i++) {
612*4882a593Smuzhiyun init_waitqueue_head(&cqp->cqp_requests[i].waitq);
613*4882a593Smuzhiyun list_add_tail(&cqp->cqp_requests[i].list, &cqp->cqp_avail_reqs);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun return 0;
616*4882a593Smuzhiyun exit:
617*4882a593Smuzhiyun /* clean up the created resources */
618*4882a593Smuzhiyun i40iw_destroy_cqp(iwdev, false);
619*4882a593Smuzhiyun return status;
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /**
623*4882a593Smuzhiyun * i40iw_create_ccq - create control cq
624*4882a593Smuzhiyun * @iwdev: iwarp device
625*4882a593Smuzhiyun *
626*4882a593Smuzhiyun * Return 0, if the ccq and the resources associated with it
627*4882a593Smuzhiyun * are successfully created, otherwise return error
628*4882a593Smuzhiyun */
i40iw_create_ccq(struct i40iw_device * iwdev)629*4882a593Smuzhiyun static enum i40iw_status_code i40iw_create_ccq(struct i40iw_device *iwdev)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
632*4882a593Smuzhiyun struct i40iw_dma_mem mem;
633*4882a593Smuzhiyun enum i40iw_status_code status;
634*4882a593Smuzhiyun struct i40iw_ccq_init_info info;
635*4882a593Smuzhiyun struct i40iw_ccq *ccq = &iwdev->ccq;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
638*4882a593Smuzhiyun dev->ccq = &ccq->sc_cq;
639*4882a593Smuzhiyun dev->ccq->dev = dev;
640*4882a593Smuzhiyun info.dev = dev;
641*4882a593Smuzhiyun ccq->shadow_area.size = sizeof(struct i40iw_cq_shadow_area);
642*4882a593Smuzhiyun ccq->mem_cq.size = sizeof(struct i40iw_cqe) * IW_CCQ_SIZE;
643*4882a593Smuzhiyun status = i40iw_allocate_dma_mem(dev->hw, &ccq->mem_cq,
644*4882a593Smuzhiyun ccq->mem_cq.size, I40IW_CQ0_ALIGNMENT);
645*4882a593Smuzhiyun if (status)
646*4882a593Smuzhiyun goto exit;
647*4882a593Smuzhiyun status = i40iw_obj_aligned_mem(iwdev, &mem, ccq->shadow_area.size,
648*4882a593Smuzhiyun I40IW_SHADOWAREA_MASK);
649*4882a593Smuzhiyun if (status)
650*4882a593Smuzhiyun goto exit;
651*4882a593Smuzhiyun ccq->sc_cq.back_cq = (void *)ccq;
652*4882a593Smuzhiyun /* populate the ccq init info */
653*4882a593Smuzhiyun info.cq_base = ccq->mem_cq.va;
654*4882a593Smuzhiyun info.cq_pa = ccq->mem_cq.pa;
655*4882a593Smuzhiyun info.num_elem = IW_CCQ_SIZE;
656*4882a593Smuzhiyun info.shadow_area = mem.va;
657*4882a593Smuzhiyun info.shadow_area_pa = mem.pa;
658*4882a593Smuzhiyun info.ceqe_mask = false;
659*4882a593Smuzhiyun info.ceq_id_valid = true;
660*4882a593Smuzhiyun info.shadow_read_threshold = 16;
661*4882a593Smuzhiyun status = dev->ccq_ops->ccq_init(dev->ccq, &info);
662*4882a593Smuzhiyun if (!status)
663*4882a593Smuzhiyun status = dev->ccq_ops->ccq_create(dev->ccq, 0, true, true);
664*4882a593Smuzhiyun exit:
665*4882a593Smuzhiyun if (status)
666*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &ccq->mem_cq);
667*4882a593Smuzhiyun return status;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /**
671*4882a593Smuzhiyun * i40iw_configure_ceq_vector - set up the msix interrupt vector for ceq
672*4882a593Smuzhiyun * @iwdev: iwarp device
673*4882a593Smuzhiyun * @msix_vec: interrupt vector information
674*4882a593Smuzhiyun * @iwceq: ceq associated with the vector
675*4882a593Smuzhiyun * @ceq_id: the id number of the iwceq
676*4882a593Smuzhiyun *
677*4882a593Smuzhiyun * Allocate interrupt resources and enable irq handling
678*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
679*4882a593Smuzhiyun */
i40iw_configure_ceq_vector(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq,u32 ceq_id,struct i40iw_msix_vector * msix_vec)680*4882a593Smuzhiyun static enum i40iw_status_code i40iw_configure_ceq_vector(struct i40iw_device *iwdev,
681*4882a593Smuzhiyun struct i40iw_ceq *iwceq,
682*4882a593Smuzhiyun u32 ceq_id,
683*4882a593Smuzhiyun struct i40iw_msix_vector *msix_vec)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun enum i40iw_status_code status;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (iwdev->msix_shared && !ceq_id) {
688*4882a593Smuzhiyun tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc);
689*4882a593Smuzhiyun status = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "AEQCEQ", iwdev);
690*4882a593Smuzhiyun } else {
691*4882a593Smuzhiyun tasklet_setup(&iwceq->dpc_tasklet, i40iw_ceq_dpc);
692*4882a593Smuzhiyun status = request_irq(msix_vec->irq, i40iw_ceq_handler, 0, "CEQ", iwceq);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun cpumask_clear(&msix_vec->mask);
696*4882a593Smuzhiyun cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
697*4882a593Smuzhiyun irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (status) {
700*4882a593Smuzhiyun i40iw_pr_err("ceq irq config fail\n");
701*4882a593Smuzhiyun return I40IW_ERR_CONFIG;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun msix_vec->ceq_id = ceq_id;
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun return 0;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /**
709*4882a593Smuzhiyun * i40iw_create_ceq - create completion event queue
710*4882a593Smuzhiyun * @iwdev: iwarp device
711*4882a593Smuzhiyun * @iwceq: pointer to the ceq resources to be created
712*4882a593Smuzhiyun * @ceq_id: the id number of the iwceq
713*4882a593Smuzhiyun *
714*4882a593Smuzhiyun * Return 0, if the ceq and the resources associated with it
715*4882a593Smuzhiyun * are successfully created, otherwise return error
716*4882a593Smuzhiyun */
i40iw_create_ceq(struct i40iw_device * iwdev,struct i40iw_ceq * iwceq,u32 ceq_id)717*4882a593Smuzhiyun static enum i40iw_status_code i40iw_create_ceq(struct i40iw_device *iwdev,
718*4882a593Smuzhiyun struct i40iw_ceq *iwceq,
719*4882a593Smuzhiyun u32 ceq_id)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun enum i40iw_status_code status;
722*4882a593Smuzhiyun struct i40iw_ceq_init_info info;
723*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
724*4882a593Smuzhiyun u64 scratch;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
727*4882a593Smuzhiyun info.ceq_id = ceq_id;
728*4882a593Smuzhiyun iwceq->iwdev = iwdev;
729*4882a593Smuzhiyun iwceq->mem.size = sizeof(struct i40iw_ceqe) *
730*4882a593Smuzhiyun iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
731*4882a593Smuzhiyun status = i40iw_allocate_dma_mem(dev->hw, &iwceq->mem, iwceq->mem.size,
732*4882a593Smuzhiyun I40IW_CEQ_ALIGNMENT);
733*4882a593Smuzhiyun if (status)
734*4882a593Smuzhiyun goto exit;
735*4882a593Smuzhiyun info.ceq_id = ceq_id;
736*4882a593Smuzhiyun info.ceqe_base = iwceq->mem.va;
737*4882a593Smuzhiyun info.ceqe_pa = iwceq->mem.pa;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun info.elem_cnt = iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
740*4882a593Smuzhiyun iwceq->sc_ceq.ceq_id = ceq_id;
741*4882a593Smuzhiyun info.dev = dev;
742*4882a593Smuzhiyun scratch = (uintptr_t)&iwdev->cqp.sc_cqp;
743*4882a593Smuzhiyun status = dev->ceq_ops->ceq_init(&iwceq->sc_ceq, &info);
744*4882a593Smuzhiyun if (!status)
745*4882a593Smuzhiyun status = dev->ceq_ops->cceq_create(&iwceq->sc_ceq, scratch);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun exit:
748*4882a593Smuzhiyun if (status)
749*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &iwceq->mem);
750*4882a593Smuzhiyun return status;
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
i40iw_request_reset(struct i40iw_device * iwdev)753*4882a593Smuzhiyun void i40iw_request_reset(struct i40iw_device *iwdev)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct i40e_info *ldev = iwdev->ldev;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun ldev->ops->request_reset(ldev, iwdev->client, 1);
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun /**
761*4882a593Smuzhiyun * i40iw_setup_ceqs - manage the device ceq's and their interrupt resources
762*4882a593Smuzhiyun * @iwdev: iwarp device
763*4882a593Smuzhiyun * @ldev: i40e lan device
764*4882a593Smuzhiyun *
765*4882a593Smuzhiyun * Allocate a list for all device completion event queues
766*4882a593Smuzhiyun * Create the ceq's and configure their msix interrupt vectors
767*4882a593Smuzhiyun * Return 0, if at least one ceq is successfully set up, otherwise return error
768*4882a593Smuzhiyun */
i40iw_setup_ceqs(struct i40iw_device * iwdev,struct i40e_info * ldev)769*4882a593Smuzhiyun static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev,
770*4882a593Smuzhiyun struct i40e_info *ldev)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun u32 i;
773*4882a593Smuzhiyun u32 ceq_id;
774*4882a593Smuzhiyun struct i40iw_ceq *iwceq;
775*4882a593Smuzhiyun struct i40iw_msix_vector *msix_vec;
776*4882a593Smuzhiyun enum i40iw_status_code status = 0;
777*4882a593Smuzhiyun u32 num_ceqs;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (ldev && ldev->ops && ldev->ops->setup_qvlist) {
780*4882a593Smuzhiyun status = ldev->ops->setup_qvlist(ldev, &i40iw_client,
781*4882a593Smuzhiyun iwdev->iw_qvlist);
782*4882a593Smuzhiyun if (status)
783*4882a593Smuzhiyun goto exit;
784*4882a593Smuzhiyun } else {
785*4882a593Smuzhiyun status = I40IW_ERR_BAD_PTR;
786*4882a593Smuzhiyun goto exit;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun num_ceqs = min(iwdev->msix_count, iwdev->sc_dev.hmc_fpm_misc.max_ceqs);
790*4882a593Smuzhiyun iwdev->ceqlist = kcalloc(num_ceqs, sizeof(*iwdev->ceqlist), GFP_KERNEL);
791*4882a593Smuzhiyun if (!iwdev->ceqlist) {
792*4882a593Smuzhiyun status = I40IW_ERR_NO_MEMORY;
793*4882a593Smuzhiyun goto exit;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun i = (iwdev->msix_shared) ? 0 : 1;
796*4882a593Smuzhiyun for (ceq_id = 0; i < num_ceqs; i++, ceq_id++) {
797*4882a593Smuzhiyun iwceq = &iwdev->ceqlist[ceq_id];
798*4882a593Smuzhiyun status = i40iw_create_ceq(iwdev, iwceq, ceq_id);
799*4882a593Smuzhiyun if (status) {
800*4882a593Smuzhiyun i40iw_pr_err("create ceq status = %d\n", status);
801*4882a593Smuzhiyun break;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun msix_vec = &iwdev->iw_msixtbl[i];
805*4882a593Smuzhiyun iwceq->irq = msix_vec->irq;
806*4882a593Smuzhiyun iwceq->msix_idx = msix_vec->idx;
807*4882a593Smuzhiyun status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec);
808*4882a593Smuzhiyun if (status) {
809*4882a593Smuzhiyun i40iw_destroy_ceq(iwdev, iwceq);
810*4882a593Smuzhiyun break;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx);
813*4882a593Smuzhiyun iwdev->ceqs_count++;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun exit:
816*4882a593Smuzhiyun if (status && !iwdev->ceqs_count) {
817*4882a593Smuzhiyun kfree(iwdev->ceqlist);
818*4882a593Smuzhiyun iwdev->ceqlist = NULL;
819*4882a593Smuzhiyun return status;
820*4882a593Smuzhiyun } else {
821*4882a593Smuzhiyun iwdev->sc_dev.ceq_valid = true;
822*4882a593Smuzhiyun return 0;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun /**
828*4882a593Smuzhiyun * i40iw_configure_aeq_vector - set up the msix vector for aeq
829*4882a593Smuzhiyun * @iwdev: iwarp device
830*4882a593Smuzhiyun *
831*4882a593Smuzhiyun * Allocate interrupt resources and enable irq handling
832*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
833*4882a593Smuzhiyun */
i40iw_configure_aeq_vector(struct i40iw_device * iwdev)834*4882a593Smuzhiyun static enum i40iw_status_code i40iw_configure_aeq_vector(struct i40iw_device *iwdev)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun struct i40iw_msix_vector *msix_vec = iwdev->iw_msixtbl;
837*4882a593Smuzhiyun u32 ret = 0;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun if (!iwdev->msix_shared) {
840*4882a593Smuzhiyun tasklet_setup(&iwdev->dpc_tasklet, i40iw_dpc);
841*4882a593Smuzhiyun ret = request_irq(msix_vec->irq, i40iw_irq_handler, 0, "i40iw", iwdev);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun if (ret) {
844*4882a593Smuzhiyun i40iw_pr_err("aeq irq config fail\n");
845*4882a593Smuzhiyun return I40IW_ERR_CONFIG;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /**
852*4882a593Smuzhiyun * i40iw_create_aeq - create async event queue
853*4882a593Smuzhiyun * @iwdev: iwarp device
854*4882a593Smuzhiyun *
855*4882a593Smuzhiyun * Return 0, if the aeq and the resources associated with it
856*4882a593Smuzhiyun * are successfully created, otherwise return error
857*4882a593Smuzhiyun */
i40iw_create_aeq(struct i40iw_device * iwdev)858*4882a593Smuzhiyun static enum i40iw_status_code i40iw_create_aeq(struct i40iw_device *iwdev)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun enum i40iw_status_code status;
861*4882a593Smuzhiyun struct i40iw_aeq_init_info info;
862*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
863*4882a593Smuzhiyun struct i40iw_aeq *aeq = &iwdev->aeq;
864*4882a593Smuzhiyun u64 scratch = 0;
865*4882a593Smuzhiyun u32 aeq_size;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun aeq_size = 2 * iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_QP].cnt +
868*4882a593Smuzhiyun iwdev->sc_dev.hmc_info->hmc_obj[I40IW_HMC_IW_CQ].cnt;
869*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
870*4882a593Smuzhiyun aeq->mem.size = sizeof(struct i40iw_sc_aeqe) * aeq_size;
871*4882a593Smuzhiyun status = i40iw_allocate_dma_mem(dev->hw, &aeq->mem, aeq->mem.size,
872*4882a593Smuzhiyun I40IW_AEQ_ALIGNMENT);
873*4882a593Smuzhiyun if (status)
874*4882a593Smuzhiyun goto exit;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun info.aeqe_base = aeq->mem.va;
877*4882a593Smuzhiyun info.aeq_elem_pa = aeq->mem.pa;
878*4882a593Smuzhiyun info.elem_cnt = aeq_size;
879*4882a593Smuzhiyun info.dev = dev;
880*4882a593Smuzhiyun status = dev->aeq_ops->aeq_init(&aeq->sc_aeq, &info);
881*4882a593Smuzhiyun if (status)
882*4882a593Smuzhiyun goto exit;
883*4882a593Smuzhiyun status = dev->aeq_ops->aeq_create(&aeq->sc_aeq, scratch, 1);
884*4882a593Smuzhiyun if (!status)
885*4882a593Smuzhiyun status = dev->aeq_ops->aeq_create_done(&aeq->sc_aeq);
886*4882a593Smuzhiyun exit:
887*4882a593Smuzhiyun if (status)
888*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &aeq->mem);
889*4882a593Smuzhiyun return status;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun /**
893*4882a593Smuzhiyun * i40iw_setup_aeq - set up the device aeq
894*4882a593Smuzhiyun * @iwdev: iwarp device
895*4882a593Smuzhiyun *
896*4882a593Smuzhiyun * Create the aeq and configure its msix interrupt vector
897*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
898*4882a593Smuzhiyun */
i40iw_setup_aeq(struct i40iw_device * iwdev)899*4882a593Smuzhiyun static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
902*4882a593Smuzhiyun enum i40iw_status_code status;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun status = i40iw_create_aeq(iwdev);
905*4882a593Smuzhiyun if (status)
906*4882a593Smuzhiyun return status;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun status = i40iw_configure_aeq_vector(iwdev);
909*4882a593Smuzhiyun if (status) {
910*4882a593Smuzhiyun i40iw_destroy_aeq(iwdev);
911*4882a593Smuzhiyun return status;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun if (!iwdev->msix_shared)
915*4882a593Smuzhiyun i40iw_enable_intr(dev, iwdev->iw_msixtbl[0].idx);
916*4882a593Smuzhiyun return 0;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /**
920*4882a593Smuzhiyun * i40iw_initialize_ilq - create iwarp local queue for cm
921*4882a593Smuzhiyun * @iwdev: iwarp device
922*4882a593Smuzhiyun *
923*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
924*4882a593Smuzhiyun */
i40iw_initialize_ilq(struct i40iw_device * iwdev)925*4882a593Smuzhiyun static enum i40iw_status_code i40iw_initialize_ilq(struct i40iw_device *iwdev)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun struct i40iw_puda_rsrc_info info;
928*4882a593Smuzhiyun enum i40iw_status_code status;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
931*4882a593Smuzhiyun info.type = I40IW_PUDA_RSRC_TYPE_ILQ;
932*4882a593Smuzhiyun info.cq_id = 1;
933*4882a593Smuzhiyun info.qp_id = 0;
934*4882a593Smuzhiyun info.count = 1;
935*4882a593Smuzhiyun info.pd_id = 1;
936*4882a593Smuzhiyun info.sq_size = 8192;
937*4882a593Smuzhiyun info.rq_size = 8192;
938*4882a593Smuzhiyun info.buf_size = 1024;
939*4882a593Smuzhiyun info.tx_buf_cnt = 16384;
940*4882a593Smuzhiyun info.receive = i40iw_receive_ilq;
941*4882a593Smuzhiyun info.xmit_complete = i40iw_free_sqbuf;
942*4882a593Smuzhiyun status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
943*4882a593Smuzhiyun if (status)
944*4882a593Smuzhiyun i40iw_pr_err("ilq create fail\n");
945*4882a593Smuzhiyun return status;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun /**
949*4882a593Smuzhiyun * i40iw_initialize_ieq - create iwarp exception queue
950*4882a593Smuzhiyun * @iwdev: iwarp device
951*4882a593Smuzhiyun *
952*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
953*4882a593Smuzhiyun */
i40iw_initialize_ieq(struct i40iw_device * iwdev)954*4882a593Smuzhiyun static enum i40iw_status_code i40iw_initialize_ieq(struct i40iw_device *iwdev)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun struct i40iw_puda_rsrc_info info;
957*4882a593Smuzhiyun enum i40iw_status_code status;
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
960*4882a593Smuzhiyun info.type = I40IW_PUDA_RSRC_TYPE_IEQ;
961*4882a593Smuzhiyun info.cq_id = 2;
962*4882a593Smuzhiyun info.qp_id = iwdev->vsi.exception_lan_queue;
963*4882a593Smuzhiyun info.count = 1;
964*4882a593Smuzhiyun info.pd_id = 2;
965*4882a593Smuzhiyun info.sq_size = 8192;
966*4882a593Smuzhiyun info.rq_size = 8192;
967*4882a593Smuzhiyun info.buf_size = iwdev->vsi.mtu + VLAN_ETH_HLEN;
968*4882a593Smuzhiyun info.tx_buf_cnt = 4096;
969*4882a593Smuzhiyun status = i40iw_puda_create_rsrc(&iwdev->vsi, &info);
970*4882a593Smuzhiyun if (status)
971*4882a593Smuzhiyun i40iw_pr_err("ieq create fail\n");
972*4882a593Smuzhiyun return status;
973*4882a593Smuzhiyun }
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun /**
976*4882a593Smuzhiyun * i40iw_reinitialize_ieq - destroy and re-create ieq
977*4882a593Smuzhiyun * @dev: iwarp device
978*4882a593Smuzhiyun */
i40iw_reinitialize_ieq(struct i40iw_sc_dev * dev)979*4882a593Smuzhiyun void i40iw_reinitialize_ieq(struct i40iw_sc_dev *dev)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, false);
984*4882a593Smuzhiyun if (i40iw_initialize_ieq(iwdev)) {
985*4882a593Smuzhiyun iwdev->reset = true;
986*4882a593Smuzhiyun i40iw_request_reset(iwdev);
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /**
991*4882a593Smuzhiyun * i40iw_hmc_setup - create hmc objects for the device
992*4882a593Smuzhiyun * @iwdev: iwarp device
993*4882a593Smuzhiyun *
994*4882a593Smuzhiyun * Set up the device private memory space for the number and size of
995*4882a593Smuzhiyun * the hmc objects and create the objects
996*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
997*4882a593Smuzhiyun */
i40iw_hmc_setup(struct i40iw_device * iwdev)998*4882a593Smuzhiyun static enum i40iw_status_code i40iw_hmc_setup(struct i40iw_device *iwdev)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun enum i40iw_status_code status;
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun iwdev->sd_type = I40IW_SD_TYPE_DIRECT;
1003*4882a593Smuzhiyun status = i40iw_config_fpm_values(&iwdev->sc_dev, IW_CFG_FPM_QP_COUNT);
1004*4882a593Smuzhiyun if (status)
1005*4882a593Smuzhiyun goto exit;
1006*4882a593Smuzhiyun status = i40iw_create_hmc_objs(iwdev, true);
1007*4882a593Smuzhiyun if (status)
1008*4882a593Smuzhiyun goto exit;
1009*4882a593Smuzhiyun iwdev->init_state = HMC_OBJS_CREATED;
1010*4882a593Smuzhiyun exit:
1011*4882a593Smuzhiyun return status;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /**
1015*4882a593Smuzhiyun * i40iw_del_init_mem - deallocate memory resources
1016*4882a593Smuzhiyun * @iwdev: iwarp device
1017*4882a593Smuzhiyun */
i40iw_del_init_mem(struct i40iw_device * iwdev)1018*4882a593Smuzhiyun static void i40iw_del_init_mem(struct i40iw_device *iwdev)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun i40iw_free_dma_mem(&iwdev->hw, &iwdev->obj_mem);
1023*4882a593Smuzhiyun kfree(dev->hmc_info->sd_table.sd_entry);
1024*4882a593Smuzhiyun dev->hmc_info->sd_table.sd_entry = NULL;
1025*4882a593Smuzhiyun kfree(iwdev->mem_resources);
1026*4882a593Smuzhiyun iwdev->mem_resources = NULL;
1027*4882a593Smuzhiyun kfree(iwdev->ceqlist);
1028*4882a593Smuzhiyun iwdev->ceqlist = NULL;
1029*4882a593Smuzhiyun kfree(iwdev->iw_msixtbl);
1030*4882a593Smuzhiyun iwdev->iw_msixtbl = NULL;
1031*4882a593Smuzhiyun kfree(iwdev->hmc_info_mem);
1032*4882a593Smuzhiyun iwdev->hmc_info_mem = NULL;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /**
1036*4882a593Smuzhiyun * i40iw_del_macip_entry - remove a mac ip address entry from the hw table
1037*4882a593Smuzhiyun * @iwdev: iwarp device
1038*4882a593Smuzhiyun * @idx: the index of the mac ip address to delete
1039*4882a593Smuzhiyun */
i40iw_del_macip_entry(struct i40iw_device * iwdev,u8 idx)1040*4882a593Smuzhiyun static void i40iw_del_macip_entry(struct i40iw_device *iwdev, u8 idx)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun struct i40iw_cqp *iwcqp = &iwdev->cqp;
1043*4882a593Smuzhiyun struct i40iw_cqp_request *cqp_request;
1044*4882a593Smuzhiyun struct cqp_commands_info *cqp_info;
1045*4882a593Smuzhiyun enum i40iw_status_code status = 0;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun cqp_request = i40iw_get_cqp_request(iwcqp, true);
1048*4882a593Smuzhiyun if (!cqp_request) {
1049*4882a593Smuzhiyun i40iw_pr_err("cqp_request memory failed\n");
1050*4882a593Smuzhiyun return;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun cqp_info = &cqp_request->info;
1053*4882a593Smuzhiyun cqp_info->cqp_cmd = OP_DELETE_LOCAL_MAC_IPADDR_ENTRY;
1054*4882a593Smuzhiyun cqp_info->post_sq = 1;
1055*4882a593Smuzhiyun cqp_info->in.u.del_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1056*4882a593Smuzhiyun cqp_info->in.u.del_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1057*4882a593Smuzhiyun cqp_info->in.u.del_local_mac_ipaddr_entry.entry_idx = idx;
1058*4882a593Smuzhiyun cqp_info->in.u.del_local_mac_ipaddr_entry.ignore_ref_count = 0;
1059*4882a593Smuzhiyun status = i40iw_handle_cqp_op(iwdev, cqp_request);
1060*4882a593Smuzhiyun if (status)
1061*4882a593Smuzhiyun i40iw_pr_err("CQP-OP Del MAC Ip entry fail");
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun
1064*4882a593Smuzhiyun /**
1065*4882a593Smuzhiyun * i40iw_add_mac_ipaddr_entry - add a mac ip address entry to the hw table
1066*4882a593Smuzhiyun * @iwdev: iwarp device
1067*4882a593Smuzhiyun * @mac_addr: pointer to mac address
1068*4882a593Smuzhiyun * @idx: the index of the mac ip address to add
1069*4882a593Smuzhiyun */
i40iw_add_mac_ipaddr_entry(struct i40iw_device * iwdev,u8 * mac_addr,u8 idx)1070*4882a593Smuzhiyun static enum i40iw_status_code i40iw_add_mac_ipaddr_entry(struct i40iw_device *iwdev,
1071*4882a593Smuzhiyun u8 *mac_addr,
1072*4882a593Smuzhiyun u8 idx)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun struct i40iw_local_mac_ipaddr_entry_info *info;
1075*4882a593Smuzhiyun struct i40iw_cqp *iwcqp = &iwdev->cqp;
1076*4882a593Smuzhiyun struct i40iw_cqp_request *cqp_request;
1077*4882a593Smuzhiyun struct cqp_commands_info *cqp_info;
1078*4882a593Smuzhiyun enum i40iw_status_code status = 0;
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun cqp_request = i40iw_get_cqp_request(iwcqp, true);
1081*4882a593Smuzhiyun if (!cqp_request) {
1082*4882a593Smuzhiyun i40iw_pr_err("cqp_request memory failed\n");
1083*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun cqp_info = &cqp_request->info;
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun cqp_info->post_sq = 1;
1089*4882a593Smuzhiyun info = &cqp_info->in.u.add_local_mac_ipaddr_entry.info;
1090*4882a593Smuzhiyun ether_addr_copy(info->mac_addr, mac_addr);
1091*4882a593Smuzhiyun info->entry_idx = idx;
1092*4882a593Smuzhiyun cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1093*4882a593Smuzhiyun cqp_info->cqp_cmd = OP_ADD_LOCAL_MAC_IPADDR_ENTRY;
1094*4882a593Smuzhiyun cqp_info->in.u.add_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1095*4882a593Smuzhiyun cqp_info->in.u.add_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1096*4882a593Smuzhiyun status = i40iw_handle_cqp_op(iwdev, cqp_request);
1097*4882a593Smuzhiyun if (status)
1098*4882a593Smuzhiyun i40iw_pr_err("CQP-OP Add MAC Ip entry fail");
1099*4882a593Smuzhiyun return status;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /**
1103*4882a593Smuzhiyun * i40iw_alloc_local_mac_ipaddr_entry - allocate a mac ip address entry
1104*4882a593Smuzhiyun * @iwdev: iwarp device
1105*4882a593Smuzhiyun * @mac_ip_tbl_idx: the index of the new mac ip address
1106*4882a593Smuzhiyun *
1107*4882a593Smuzhiyun * Allocate a mac ip address entry and update the mac_ip_tbl_idx
1108*4882a593Smuzhiyun * to hold the index of the newly created mac ip address
1109*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1110*4882a593Smuzhiyun */
i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device * iwdev,u16 * mac_ip_tbl_idx)1111*4882a593Smuzhiyun static enum i40iw_status_code i40iw_alloc_local_mac_ipaddr_entry(struct i40iw_device *iwdev,
1112*4882a593Smuzhiyun u16 *mac_ip_tbl_idx)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun struct i40iw_cqp *iwcqp = &iwdev->cqp;
1115*4882a593Smuzhiyun struct i40iw_cqp_request *cqp_request;
1116*4882a593Smuzhiyun struct cqp_commands_info *cqp_info;
1117*4882a593Smuzhiyun enum i40iw_status_code status = 0;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun cqp_request = i40iw_get_cqp_request(iwcqp, true);
1120*4882a593Smuzhiyun if (!cqp_request) {
1121*4882a593Smuzhiyun i40iw_pr_err("cqp_request memory failed\n");
1122*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* increment refcount, because we need the cqp request ret value */
1126*4882a593Smuzhiyun atomic_inc(&cqp_request->refcount);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun cqp_info = &cqp_request->info;
1129*4882a593Smuzhiyun cqp_info->cqp_cmd = OP_ALLOC_LOCAL_MAC_IPADDR_ENTRY;
1130*4882a593Smuzhiyun cqp_info->post_sq = 1;
1131*4882a593Smuzhiyun cqp_info->in.u.alloc_local_mac_ipaddr_entry.cqp = &iwcqp->sc_cqp;
1132*4882a593Smuzhiyun cqp_info->in.u.alloc_local_mac_ipaddr_entry.scratch = (uintptr_t)cqp_request;
1133*4882a593Smuzhiyun status = i40iw_handle_cqp_op(iwdev, cqp_request);
1134*4882a593Smuzhiyun if (!status)
1135*4882a593Smuzhiyun *mac_ip_tbl_idx = cqp_request->compl_info.op_ret_val;
1136*4882a593Smuzhiyun else
1137*4882a593Smuzhiyun i40iw_pr_err("CQP-OP Alloc MAC Ip entry fail");
1138*4882a593Smuzhiyun /* decrement refcount and free the cqp request, if no longer used */
1139*4882a593Smuzhiyun i40iw_put_cqp_request(iwcqp, cqp_request);
1140*4882a593Smuzhiyun return status;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun
1143*4882a593Smuzhiyun /**
1144*4882a593Smuzhiyun * i40iw_alloc_set_mac_ipaddr - set up a mac ip address table entry
1145*4882a593Smuzhiyun * @iwdev: iwarp device
1146*4882a593Smuzhiyun * @macaddr: pointer to mac address
1147*4882a593Smuzhiyun *
1148*4882a593Smuzhiyun * Allocate a mac ip address entry and add it to the hw table
1149*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1150*4882a593Smuzhiyun */
i40iw_alloc_set_mac_ipaddr(struct i40iw_device * iwdev,u8 * macaddr)1151*4882a593Smuzhiyun static enum i40iw_status_code i40iw_alloc_set_mac_ipaddr(struct i40iw_device *iwdev,
1152*4882a593Smuzhiyun u8 *macaddr)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun enum i40iw_status_code status;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun status = i40iw_alloc_local_mac_ipaddr_entry(iwdev, &iwdev->mac_ip_table_idx);
1157*4882a593Smuzhiyun if (!status) {
1158*4882a593Smuzhiyun status = i40iw_add_mac_ipaddr_entry(iwdev, macaddr,
1159*4882a593Smuzhiyun (u8)iwdev->mac_ip_table_idx);
1160*4882a593Smuzhiyun if (status)
1161*4882a593Smuzhiyun i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun return status;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun /**
1167*4882a593Smuzhiyun * i40iw_add_ipv6_addr - add ipv6 address to the hw arp table
1168*4882a593Smuzhiyun * @iwdev: iwarp device
1169*4882a593Smuzhiyun */
i40iw_add_ipv6_addr(struct i40iw_device * iwdev)1170*4882a593Smuzhiyun static void i40iw_add_ipv6_addr(struct i40iw_device *iwdev)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun struct net_device *ip_dev;
1173*4882a593Smuzhiyun struct inet6_dev *idev;
1174*4882a593Smuzhiyun struct inet6_ifaddr *ifp, *tmp;
1175*4882a593Smuzhiyun u32 local_ipaddr6[4];
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun rcu_read_lock();
1178*4882a593Smuzhiyun for_each_netdev_rcu(&init_net, ip_dev) {
1179*4882a593Smuzhiyun if ((((rdma_vlan_dev_vlan_id(ip_dev) < 0xFFFF) &&
1180*4882a593Smuzhiyun (rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
1181*4882a593Smuzhiyun (ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
1182*4882a593Smuzhiyun idev = __in6_dev_get(ip_dev);
1183*4882a593Smuzhiyun if (!idev) {
1184*4882a593Smuzhiyun i40iw_pr_err("ipv6 inet device not found\n");
1185*4882a593Smuzhiyun break;
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
1188*4882a593Smuzhiyun i40iw_pr_info("IP=%pI6, vlan_id=%d, MAC=%pM\n", &ifp->addr,
1189*4882a593Smuzhiyun rdma_vlan_dev_vlan_id(ip_dev), ip_dev->dev_addr);
1190*4882a593Smuzhiyun i40iw_copy_ip_ntohl(local_ipaddr6,
1191*4882a593Smuzhiyun ifp->addr.in6_u.u6_addr32);
1192*4882a593Smuzhiyun i40iw_manage_arp_cache(iwdev,
1193*4882a593Smuzhiyun ip_dev->dev_addr,
1194*4882a593Smuzhiyun local_ipaddr6,
1195*4882a593Smuzhiyun false,
1196*4882a593Smuzhiyun I40IW_ARP_ADD);
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun rcu_read_unlock();
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /**
1204*4882a593Smuzhiyun * i40iw_add_ipv4_addr - add ipv4 address to the hw arp table
1205*4882a593Smuzhiyun * @iwdev: iwarp device
1206*4882a593Smuzhiyun */
i40iw_add_ipv4_addr(struct i40iw_device * iwdev)1207*4882a593Smuzhiyun static void i40iw_add_ipv4_addr(struct i40iw_device *iwdev)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun struct net_device *dev;
1210*4882a593Smuzhiyun struct in_device *idev;
1211*4882a593Smuzhiyun u32 ip_addr;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun rcu_read_lock();
1214*4882a593Smuzhiyun for_each_netdev_rcu(&init_net, dev) {
1215*4882a593Smuzhiyun if ((((rdma_vlan_dev_vlan_id(dev) < 0xFFFF) &&
1216*4882a593Smuzhiyun (rdma_vlan_dev_real_dev(dev) == iwdev->netdev)) ||
1217*4882a593Smuzhiyun (dev == iwdev->netdev)) && (READ_ONCE(dev->flags) & IFF_UP)) {
1218*4882a593Smuzhiyun const struct in_ifaddr *ifa;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun idev = __in_dev_get_rcu(dev);
1221*4882a593Smuzhiyun if (!idev)
1222*4882a593Smuzhiyun continue;
1223*4882a593Smuzhiyun in_dev_for_each_ifa_rcu(ifa, idev) {
1224*4882a593Smuzhiyun i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_CM,
1225*4882a593Smuzhiyun "IP=%pI4, vlan_id=%d, MAC=%pM\n", &ifa->ifa_address,
1226*4882a593Smuzhiyun rdma_vlan_dev_vlan_id(dev), dev->dev_addr);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun ip_addr = ntohl(ifa->ifa_address);
1229*4882a593Smuzhiyun i40iw_manage_arp_cache(iwdev,
1230*4882a593Smuzhiyun dev->dev_addr,
1231*4882a593Smuzhiyun &ip_addr,
1232*4882a593Smuzhiyun true,
1233*4882a593Smuzhiyun I40IW_ARP_ADD);
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun rcu_read_unlock();
1238*4882a593Smuzhiyun }
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /**
1241*4882a593Smuzhiyun * i40iw_add_mac_ip - add mac and ip addresses
1242*4882a593Smuzhiyun * @iwdev: iwarp device
1243*4882a593Smuzhiyun *
1244*4882a593Smuzhiyun * Create and add a mac ip address entry to the hw table and
1245*4882a593Smuzhiyun * ipv4/ipv6 addresses to the arp cache
1246*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1247*4882a593Smuzhiyun */
i40iw_add_mac_ip(struct i40iw_device * iwdev)1248*4882a593Smuzhiyun static enum i40iw_status_code i40iw_add_mac_ip(struct i40iw_device *iwdev)
1249*4882a593Smuzhiyun {
1250*4882a593Smuzhiyun struct net_device *netdev = iwdev->netdev;
1251*4882a593Smuzhiyun enum i40iw_status_code status;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun status = i40iw_alloc_set_mac_ipaddr(iwdev, (u8 *)netdev->dev_addr);
1254*4882a593Smuzhiyun if (status)
1255*4882a593Smuzhiyun return status;
1256*4882a593Smuzhiyun i40iw_add_ipv4_addr(iwdev);
1257*4882a593Smuzhiyun i40iw_add_ipv6_addr(iwdev);
1258*4882a593Smuzhiyun return 0;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /**
1262*4882a593Smuzhiyun * i40iw_wait_pe_ready - Check if firmware is ready
1263*4882a593Smuzhiyun * @hw: provides access to registers
1264*4882a593Smuzhiyun */
i40iw_wait_pe_ready(struct i40iw_hw * hw)1265*4882a593Smuzhiyun static void i40iw_wait_pe_ready(struct i40iw_hw *hw)
1266*4882a593Smuzhiyun {
1267*4882a593Smuzhiyun u32 statusfw;
1268*4882a593Smuzhiyun u32 statuscpu0;
1269*4882a593Smuzhiyun u32 statuscpu1;
1270*4882a593Smuzhiyun u32 statuscpu2;
1271*4882a593Smuzhiyun u32 retrycount = 0;
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun do {
1274*4882a593Smuzhiyun statusfw = i40iw_rd32(hw, I40E_GLPE_FWLDSTATUS);
1275*4882a593Smuzhiyun i40iw_pr_info("[%04d] fm load status[x%04X]\n", __LINE__, statusfw);
1276*4882a593Smuzhiyun statuscpu0 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS0);
1277*4882a593Smuzhiyun i40iw_pr_info("[%04d] CSR_CQP status[x%04X]\n", __LINE__, statuscpu0);
1278*4882a593Smuzhiyun statuscpu1 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS1);
1279*4882a593Smuzhiyun i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS1 status[x%04X]\n",
1280*4882a593Smuzhiyun __LINE__, statuscpu1);
1281*4882a593Smuzhiyun statuscpu2 = i40iw_rd32(hw, I40E_GLPE_CPUSTATUS2);
1282*4882a593Smuzhiyun i40iw_pr_info("[%04d] I40E_GLPE_CPUSTATUS2 status[x%04X]\n",
1283*4882a593Smuzhiyun __LINE__, statuscpu2);
1284*4882a593Smuzhiyun if ((statuscpu0 == 0x80) && (statuscpu1 == 0x80) && (statuscpu2 == 0x80))
1285*4882a593Smuzhiyun break; /* SUCCESS */
1286*4882a593Smuzhiyun msleep(1000);
1287*4882a593Smuzhiyun retrycount++;
1288*4882a593Smuzhiyun } while (retrycount < 14);
1289*4882a593Smuzhiyun i40iw_wr32(hw, 0xb4040, 0x4C104C5);
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /**
1293*4882a593Smuzhiyun * i40iw_initialize_dev - initialize device
1294*4882a593Smuzhiyun * @iwdev: iwarp device
1295*4882a593Smuzhiyun * @ldev: lan device information
1296*4882a593Smuzhiyun *
1297*4882a593Smuzhiyun * Allocate memory for the hmc objects and initialize iwdev
1298*4882a593Smuzhiyun * Return 0 if successful, otherwise clean up the resources
1299*4882a593Smuzhiyun * and return error
1300*4882a593Smuzhiyun */
i40iw_initialize_dev(struct i40iw_device * iwdev,struct i40e_info * ldev)1301*4882a593Smuzhiyun static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
1302*4882a593Smuzhiyun struct i40e_info *ldev)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun enum i40iw_status_code status;
1305*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1306*4882a593Smuzhiyun struct i40iw_device_init_info info;
1307*4882a593Smuzhiyun struct i40iw_vsi_init_info vsi_info;
1308*4882a593Smuzhiyun struct i40iw_dma_mem mem;
1309*4882a593Smuzhiyun struct i40iw_l2params l2params;
1310*4882a593Smuzhiyun u32 size;
1311*4882a593Smuzhiyun struct i40iw_vsi_stats_info stats_info;
1312*4882a593Smuzhiyun u16 last_qset = I40IW_NO_QSET;
1313*4882a593Smuzhiyun u16 qset;
1314*4882a593Smuzhiyun u32 i;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun memset(&l2params, 0, sizeof(l2params));
1317*4882a593Smuzhiyun memset(&info, 0, sizeof(info));
1318*4882a593Smuzhiyun size = sizeof(struct i40iw_hmc_pble_rsrc) + sizeof(struct i40iw_hmc_info) +
1319*4882a593Smuzhiyun (sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX);
1320*4882a593Smuzhiyun iwdev->hmc_info_mem = kzalloc(size, GFP_KERNEL);
1321*4882a593Smuzhiyun if (!iwdev->hmc_info_mem)
1322*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun iwdev->pble_rsrc = (struct i40iw_hmc_pble_rsrc *)iwdev->hmc_info_mem;
1325*4882a593Smuzhiyun dev->hmc_info = &iwdev->hw.hmc;
1326*4882a593Smuzhiyun dev->hmc_info->hmc_obj = (struct i40iw_hmc_obj_info *)(iwdev->pble_rsrc + 1);
1327*4882a593Smuzhiyun status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
1328*4882a593Smuzhiyun I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
1329*4882a593Smuzhiyun if (status)
1330*4882a593Smuzhiyun goto error;
1331*4882a593Smuzhiyun info.fpm_query_buf_pa = mem.pa;
1332*4882a593Smuzhiyun info.fpm_query_buf = mem.va;
1333*4882a593Smuzhiyun status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
1334*4882a593Smuzhiyun I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
1335*4882a593Smuzhiyun if (status)
1336*4882a593Smuzhiyun goto error;
1337*4882a593Smuzhiyun info.fpm_commit_buf_pa = mem.pa;
1338*4882a593Smuzhiyun info.fpm_commit_buf = mem.va;
1339*4882a593Smuzhiyun info.hmc_fn_id = ldev->fid;
1340*4882a593Smuzhiyun info.is_pf = (ldev->ftype) ? false : true;
1341*4882a593Smuzhiyun info.bar0 = ldev->hw_addr;
1342*4882a593Smuzhiyun info.hw = &iwdev->hw;
1343*4882a593Smuzhiyun info.debug_mask = debug;
1344*4882a593Smuzhiyun l2params.mtu =
1345*4882a593Smuzhiyun (ldev->params.mtu) ? ldev->params.mtu : I40IW_DEFAULT_MTU;
1346*4882a593Smuzhiyun for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++) {
1347*4882a593Smuzhiyun qset = ldev->params.qos.prio_qos[i].qs_handle;
1348*4882a593Smuzhiyun l2params.qs_handle_list[i] = qset;
1349*4882a593Smuzhiyun if (last_qset == I40IW_NO_QSET)
1350*4882a593Smuzhiyun last_qset = qset;
1351*4882a593Smuzhiyun else if ((qset != last_qset) && (qset != I40IW_NO_QSET))
1352*4882a593Smuzhiyun iwdev->dcb = true;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun i40iw_pr_info("DCB is set/clear = %d\n", iwdev->dcb);
1355*4882a593Smuzhiyun info.vchnl_send = i40iw_virtchnl_send;
1356*4882a593Smuzhiyun status = i40iw_device_init(&iwdev->sc_dev, &info);
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun if (status)
1359*4882a593Smuzhiyun goto error;
1360*4882a593Smuzhiyun memset(&vsi_info, 0, sizeof(vsi_info));
1361*4882a593Smuzhiyun vsi_info.dev = &iwdev->sc_dev;
1362*4882a593Smuzhiyun vsi_info.back_vsi = (void *)iwdev;
1363*4882a593Smuzhiyun vsi_info.params = &l2params;
1364*4882a593Smuzhiyun vsi_info.exception_lan_queue = 1;
1365*4882a593Smuzhiyun i40iw_sc_vsi_init(&iwdev->vsi, &vsi_info);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun if (dev->is_pf) {
1368*4882a593Smuzhiyun memset(&stats_info, 0, sizeof(stats_info));
1369*4882a593Smuzhiyun stats_info.fcn_id = ldev->fid;
1370*4882a593Smuzhiyun stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
1371*4882a593Smuzhiyun if (!stats_info.pestat) {
1372*4882a593Smuzhiyun status = I40IW_ERR_NO_MEMORY;
1373*4882a593Smuzhiyun goto error;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun stats_info.stats_initialize = true;
1376*4882a593Smuzhiyun if (stats_info.pestat)
1377*4882a593Smuzhiyun i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun return status;
1380*4882a593Smuzhiyun error:
1381*4882a593Smuzhiyun kfree(iwdev->hmc_info_mem);
1382*4882a593Smuzhiyun iwdev->hmc_info_mem = NULL;
1383*4882a593Smuzhiyun return status;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun /**
1387*4882a593Smuzhiyun * i40iw_register_notifiers - register tcp ip notifiers
1388*4882a593Smuzhiyun */
i40iw_register_notifiers(void)1389*4882a593Smuzhiyun static void i40iw_register_notifiers(void)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1392*4882a593Smuzhiyun register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1393*4882a593Smuzhiyun register_netevent_notifier(&i40iw_net_notifier);
1394*4882a593Smuzhiyun register_netdevice_notifier(&i40iw_netdevice_notifier);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun /**
1398*4882a593Smuzhiyun * i40iw_unregister_notifiers - unregister tcp ip notifiers
1399*4882a593Smuzhiyun */
1400*4882a593Smuzhiyun
i40iw_unregister_notifiers(void)1401*4882a593Smuzhiyun static void i40iw_unregister_notifiers(void)
1402*4882a593Smuzhiyun {
1403*4882a593Smuzhiyun unregister_netevent_notifier(&i40iw_net_notifier);
1404*4882a593Smuzhiyun unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1405*4882a593Smuzhiyun unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1406*4882a593Smuzhiyun unregister_netdevice_notifier(&i40iw_netdevice_notifier);
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun /**
1410*4882a593Smuzhiyun * i40iw_save_msix_info - copy msix vector information to iwarp device
1411*4882a593Smuzhiyun * @iwdev: iwarp device
1412*4882a593Smuzhiyun * @ldev: lan device information
1413*4882a593Smuzhiyun *
1414*4882a593Smuzhiyun * Allocate iwdev msix table and copy the ldev msix info to the table
1415*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1416*4882a593Smuzhiyun */
i40iw_save_msix_info(struct i40iw_device * iwdev,struct i40e_info * ldev)1417*4882a593Smuzhiyun static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1418*4882a593Smuzhiyun struct i40e_info *ldev)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun struct i40e_qvlist_info *iw_qvlist;
1421*4882a593Smuzhiyun struct i40e_qv_info *iw_qvinfo;
1422*4882a593Smuzhiyun u32 ceq_idx;
1423*4882a593Smuzhiyun u32 i;
1424*4882a593Smuzhiyun u32 size;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun if (!ldev->msix_count) {
1427*4882a593Smuzhiyun i40iw_pr_err("No MSI-X vectors\n");
1428*4882a593Smuzhiyun return I40IW_ERR_CONFIG;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun iwdev->msix_count = ldev->msix_count;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
1434*4882a593Smuzhiyun size += sizeof(struct i40e_qvlist_info);
1435*4882a593Smuzhiyun size += sizeof(struct i40e_qv_info) * iwdev->msix_count - 1;
1436*4882a593Smuzhiyun iwdev->iw_msixtbl = kzalloc(size, GFP_KERNEL);
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun if (!iwdev->iw_msixtbl)
1439*4882a593Smuzhiyun return I40IW_ERR_NO_MEMORY;
1440*4882a593Smuzhiyun iwdev->iw_qvlist = (struct i40e_qvlist_info *)(&iwdev->iw_msixtbl[iwdev->msix_count]);
1441*4882a593Smuzhiyun iw_qvlist = iwdev->iw_qvlist;
1442*4882a593Smuzhiyun iw_qvinfo = iw_qvlist->qv_info;
1443*4882a593Smuzhiyun iw_qvlist->num_vectors = iwdev->msix_count;
1444*4882a593Smuzhiyun if (iwdev->msix_count <= num_online_cpus())
1445*4882a593Smuzhiyun iwdev->msix_shared = true;
1446*4882a593Smuzhiyun for (i = 0, ceq_idx = 0; i < iwdev->msix_count; i++, iw_qvinfo++) {
1447*4882a593Smuzhiyun iwdev->iw_msixtbl[i].idx = ldev->msix_entries[i].entry;
1448*4882a593Smuzhiyun iwdev->iw_msixtbl[i].irq = ldev->msix_entries[i].vector;
1449*4882a593Smuzhiyun iwdev->iw_msixtbl[i].cpu_affinity = ceq_idx;
1450*4882a593Smuzhiyun if (i == 0) {
1451*4882a593Smuzhiyun iw_qvinfo->aeq_idx = 0;
1452*4882a593Smuzhiyun if (iwdev->msix_shared)
1453*4882a593Smuzhiyun iw_qvinfo->ceq_idx = ceq_idx++;
1454*4882a593Smuzhiyun else
1455*4882a593Smuzhiyun iw_qvinfo->ceq_idx = I40E_QUEUE_INVALID_IDX;
1456*4882a593Smuzhiyun } else {
1457*4882a593Smuzhiyun iw_qvinfo->aeq_idx = I40E_QUEUE_INVALID_IDX;
1458*4882a593Smuzhiyun iw_qvinfo->ceq_idx = ceq_idx++;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun iw_qvinfo->itr_idx = 3;
1461*4882a593Smuzhiyun iw_qvinfo->v_idx = iwdev->iw_msixtbl[i].idx;
1462*4882a593Smuzhiyun }
1463*4882a593Smuzhiyun return 0;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun /**
1467*4882a593Smuzhiyun * i40iw_deinit_device - clean up the device resources
1468*4882a593Smuzhiyun * @iwdev: iwarp device
1469*4882a593Smuzhiyun *
1470*4882a593Smuzhiyun * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses,
1471*4882a593Smuzhiyun * destroy the device queues and free the pble and the hmc objects
1472*4882a593Smuzhiyun */
i40iw_deinit_device(struct i40iw_device * iwdev)1473*4882a593Smuzhiyun static void i40iw_deinit_device(struct i40iw_device *iwdev)
1474*4882a593Smuzhiyun {
1475*4882a593Smuzhiyun struct i40e_info *ldev = iwdev->ldev;
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun i40iw_pr_info("state = %d\n", iwdev->init_state);
1480*4882a593Smuzhiyun if (iwdev->param_wq)
1481*4882a593Smuzhiyun destroy_workqueue(iwdev->param_wq);
1482*4882a593Smuzhiyun
1483*4882a593Smuzhiyun switch (iwdev->init_state) {
1484*4882a593Smuzhiyun case RDMA_DEV_REGISTERED:
1485*4882a593Smuzhiyun iwdev->iw_status = 0;
1486*4882a593Smuzhiyun i40iw_port_ibevent(iwdev);
1487*4882a593Smuzhiyun i40iw_destroy_rdma_device(iwdev->iwibdev);
1488*4882a593Smuzhiyun fallthrough;
1489*4882a593Smuzhiyun case IP_ADDR_REGISTERED:
1490*4882a593Smuzhiyun if (!iwdev->reset)
1491*4882a593Smuzhiyun i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1492*4882a593Smuzhiyun fallthrough;
1493*4882a593Smuzhiyun case PBLE_CHUNK_MEM:
1494*4882a593Smuzhiyun i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
1495*4882a593Smuzhiyun fallthrough;
1496*4882a593Smuzhiyun case CEQ_CREATED:
1497*4882a593Smuzhiyun i40iw_dele_ceqs(iwdev);
1498*4882a593Smuzhiyun fallthrough;
1499*4882a593Smuzhiyun case AEQ_CREATED:
1500*4882a593Smuzhiyun i40iw_destroy_aeq(iwdev);
1501*4882a593Smuzhiyun fallthrough;
1502*4882a593Smuzhiyun case IEQ_CREATED:
1503*4882a593Smuzhiyun i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset);
1504*4882a593Smuzhiyun fallthrough;
1505*4882a593Smuzhiyun case ILQ_CREATED:
1506*4882a593Smuzhiyun i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset);
1507*4882a593Smuzhiyun fallthrough;
1508*4882a593Smuzhiyun case CCQ_CREATED:
1509*4882a593Smuzhiyun i40iw_destroy_ccq(iwdev);
1510*4882a593Smuzhiyun fallthrough;
1511*4882a593Smuzhiyun case HMC_OBJS_CREATED:
1512*4882a593Smuzhiyun i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset);
1513*4882a593Smuzhiyun fallthrough;
1514*4882a593Smuzhiyun case CQP_CREATED:
1515*4882a593Smuzhiyun i40iw_destroy_cqp(iwdev, true);
1516*4882a593Smuzhiyun fallthrough;
1517*4882a593Smuzhiyun case INITIAL_STATE:
1518*4882a593Smuzhiyun i40iw_cleanup_cm_core(&iwdev->cm_core);
1519*4882a593Smuzhiyun if (iwdev->vsi.pestat) {
1520*4882a593Smuzhiyun i40iw_vsi_stats_free(&iwdev->vsi);
1521*4882a593Smuzhiyun kfree(iwdev->vsi.pestat);
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun i40iw_del_init_mem(iwdev);
1524*4882a593Smuzhiyun break;
1525*4882a593Smuzhiyun case INVALID_STATE:
1526*4882a593Smuzhiyun default:
1527*4882a593Smuzhiyun i40iw_pr_err("bad init_state = %d\n", iwdev->init_state);
1528*4882a593Smuzhiyun break;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun i40iw_del_handler(i40iw_find_i40e_handler(ldev));
1532*4882a593Smuzhiyun kfree(iwdev->hdl);
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun /**
1536*4882a593Smuzhiyun * i40iw_setup_init_state - set up the initial device struct
1537*4882a593Smuzhiyun * @hdl: handler for iwarp device - one per instance
1538*4882a593Smuzhiyun * @ldev: lan device information
1539*4882a593Smuzhiyun * @client: iwarp client information, provided during registration
1540*4882a593Smuzhiyun *
1541*4882a593Smuzhiyun * Initialize the iwarp device and its hdl information
1542*4882a593Smuzhiyun * using the ldev and client information
1543*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1544*4882a593Smuzhiyun */
i40iw_setup_init_state(struct i40iw_handler * hdl,struct i40e_info * ldev,struct i40e_client * client)1545*4882a593Smuzhiyun static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1546*4882a593Smuzhiyun struct i40e_info *ldev,
1547*4882a593Smuzhiyun struct i40e_client *client)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun struct i40iw_device *iwdev = &hdl->device;
1550*4882a593Smuzhiyun struct i40iw_sc_dev *dev = &iwdev->sc_dev;
1551*4882a593Smuzhiyun enum i40iw_status_code status;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun memcpy(&hdl->ldev, ldev, sizeof(*ldev));
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun iwdev->mpa_version = mpa_version;
1556*4882a593Smuzhiyun iwdev->resource_profile = (resource_profile < I40IW_HMC_PROFILE_EQUAL) ?
1557*4882a593Smuzhiyun (u8)resource_profile + I40IW_HMC_PROFILE_DEFAULT :
1558*4882a593Smuzhiyun I40IW_HMC_PROFILE_DEFAULT;
1559*4882a593Smuzhiyun iwdev->max_rdma_vfs =
1560*4882a593Smuzhiyun (iwdev->resource_profile != I40IW_HMC_PROFILE_DEFAULT) ? max_rdma_vfs : 0;
1561*4882a593Smuzhiyun iwdev->max_enabled_vfs = iwdev->max_rdma_vfs;
1562*4882a593Smuzhiyun iwdev->netdev = ldev->netdev;
1563*4882a593Smuzhiyun hdl->client = client;
1564*4882a593Smuzhiyun if (!ldev->ftype)
1565*4882a593Smuzhiyun iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_DB_ADDR_OFFSET;
1566*4882a593Smuzhiyun else
1567*4882a593Smuzhiyun iwdev->db_start = pci_resource_start(ldev->pcidev, 0) + I40IW_VF_DB_ADDR_OFFSET;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun status = i40iw_save_msix_info(iwdev, ldev);
1570*4882a593Smuzhiyun if (status)
1571*4882a593Smuzhiyun return status;
1572*4882a593Smuzhiyun iwdev->hw.pcidev = ldev->pcidev;
1573*4882a593Smuzhiyun iwdev->hw.hw_addr = ldev->hw_addr;
1574*4882a593Smuzhiyun status = i40iw_allocate_dma_mem(&iwdev->hw,
1575*4882a593Smuzhiyun &iwdev->obj_mem, 8192, 4096);
1576*4882a593Smuzhiyun if (status)
1577*4882a593Smuzhiyun goto exit;
1578*4882a593Smuzhiyun iwdev->obj_next = iwdev->obj_mem;
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun init_waitqueue_head(&iwdev->vchnl_waitq);
1581*4882a593Smuzhiyun init_waitqueue_head(&dev->vf_reqs);
1582*4882a593Smuzhiyun init_waitqueue_head(&iwdev->close_wq);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun status = i40iw_initialize_dev(iwdev, ldev);
1585*4882a593Smuzhiyun exit:
1586*4882a593Smuzhiyun if (status) {
1587*4882a593Smuzhiyun kfree(iwdev->iw_msixtbl);
1588*4882a593Smuzhiyun i40iw_free_dma_mem(dev->hw, &iwdev->obj_mem);
1589*4882a593Smuzhiyun iwdev->iw_msixtbl = NULL;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun return status;
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun /**
1595*4882a593Smuzhiyun * i40iw_get_used_rsrc - determine resources used internally
1596*4882a593Smuzhiyun * @iwdev: iwarp device
1597*4882a593Smuzhiyun *
1598*4882a593Smuzhiyun * Called after internal allocations
1599*4882a593Smuzhiyun */
i40iw_get_used_rsrc(struct i40iw_device * iwdev)1600*4882a593Smuzhiyun static void i40iw_get_used_rsrc(struct i40iw_device *iwdev)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun iwdev->used_pds = find_next_zero_bit(iwdev->allocated_pds, iwdev->max_pd, 0);
1603*4882a593Smuzhiyun iwdev->used_qps = find_next_zero_bit(iwdev->allocated_qps, iwdev->max_qp, 0);
1604*4882a593Smuzhiyun iwdev->used_cqs = find_next_zero_bit(iwdev->allocated_cqs, iwdev->max_cq, 0);
1605*4882a593Smuzhiyun iwdev->used_mrs = find_next_zero_bit(iwdev->allocated_mrs, iwdev->max_mr, 0);
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun /**
1609*4882a593Smuzhiyun * i40iw_open - client interface operation open for iwarp/uda device
1610*4882a593Smuzhiyun * @ldev: lan device information
1611*4882a593Smuzhiyun * @client: iwarp client information, provided during registration
1612*4882a593Smuzhiyun *
1613*4882a593Smuzhiyun * Called by the lan driver during the processing of client register
1614*4882a593Smuzhiyun * Create device resources, set up queues, pble and hmc objects and
1615*4882a593Smuzhiyun * register the device with the ib verbs interface
1616*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1617*4882a593Smuzhiyun */
i40iw_open(struct i40e_info * ldev,struct i40e_client * client)1618*4882a593Smuzhiyun static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun struct i40iw_device *iwdev;
1621*4882a593Smuzhiyun struct i40iw_sc_dev *dev;
1622*4882a593Smuzhiyun enum i40iw_status_code status;
1623*4882a593Smuzhiyun struct i40iw_handler *hdl;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun hdl = i40iw_find_netdev(ldev->netdev);
1626*4882a593Smuzhiyun if (hdl)
1627*4882a593Smuzhiyun return 0;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun hdl = kzalloc(sizeof(*hdl), GFP_KERNEL);
1630*4882a593Smuzhiyun if (!hdl)
1631*4882a593Smuzhiyun return -ENOMEM;
1632*4882a593Smuzhiyun iwdev = &hdl->device;
1633*4882a593Smuzhiyun iwdev->hdl = hdl;
1634*4882a593Smuzhiyun dev = &iwdev->sc_dev;
1635*4882a593Smuzhiyun if (i40iw_setup_cm_core(iwdev)) {
1636*4882a593Smuzhiyun kfree(iwdev->hdl);
1637*4882a593Smuzhiyun return -ENOMEM;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun dev->back_dev = (void *)iwdev;
1641*4882a593Smuzhiyun iwdev->ldev = &hdl->ldev;
1642*4882a593Smuzhiyun iwdev->client = client;
1643*4882a593Smuzhiyun mutex_init(&iwdev->pbl_mutex);
1644*4882a593Smuzhiyun i40iw_add_handler(hdl);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun do {
1647*4882a593Smuzhiyun status = i40iw_setup_init_state(hdl, ldev, client);
1648*4882a593Smuzhiyun if (status)
1649*4882a593Smuzhiyun break;
1650*4882a593Smuzhiyun iwdev->init_state = INITIAL_STATE;
1651*4882a593Smuzhiyun if (dev->is_pf)
1652*4882a593Smuzhiyun i40iw_wait_pe_ready(dev->hw);
1653*4882a593Smuzhiyun status = i40iw_create_cqp(iwdev);
1654*4882a593Smuzhiyun if (status)
1655*4882a593Smuzhiyun break;
1656*4882a593Smuzhiyun iwdev->init_state = CQP_CREATED;
1657*4882a593Smuzhiyun status = i40iw_hmc_setup(iwdev);
1658*4882a593Smuzhiyun if (status)
1659*4882a593Smuzhiyun break;
1660*4882a593Smuzhiyun status = i40iw_create_ccq(iwdev);
1661*4882a593Smuzhiyun if (status)
1662*4882a593Smuzhiyun break;
1663*4882a593Smuzhiyun iwdev->init_state = CCQ_CREATED;
1664*4882a593Smuzhiyun status = i40iw_initialize_ilq(iwdev);
1665*4882a593Smuzhiyun if (status)
1666*4882a593Smuzhiyun break;
1667*4882a593Smuzhiyun iwdev->init_state = ILQ_CREATED;
1668*4882a593Smuzhiyun status = i40iw_initialize_ieq(iwdev);
1669*4882a593Smuzhiyun if (status)
1670*4882a593Smuzhiyun break;
1671*4882a593Smuzhiyun iwdev->init_state = IEQ_CREATED;
1672*4882a593Smuzhiyun status = i40iw_setup_aeq(iwdev);
1673*4882a593Smuzhiyun if (status)
1674*4882a593Smuzhiyun break;
1675*4882a593Smuzhiyun iwdev->init_state = AEQ_CREATED;
1676*4882a593Smuzhiyun status = i40iw_setup_ceqs(iwdev, ldev);
1677*4882a593Smuzhiyun if (status)
1678*4882a593Smuzhiyun break;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun status = i40iw_get_rdma_features(dev);
1681*4882a593Smuzhiyun if (status)
1682*4882a593Smuzhiyun dev->feature_info[I40IW_FEATURE_FW_INFO] =
1683*4882a593Smuzhiyun I40IW_FW_VER_DEFAULT;
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun iwdev->init_state = CEQ_CREATED;
1686*4882a593Smuzhiyun status = i40iw_initialize_hw_resources(iwdev);
1687*4882a593Smuzhiyun if (status)
1688*4882a593Smuzhiyun break;
1689*4882a593Smuzhiyun i40iw_get_used_rsrc(iwdev);
1690*4882a593Smuzhiyun dev->ccq_ops->ccq_arm(dev->ccq);
1691*4882a593Smuzhiyun status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc);
1692*4882a593Smuzhiyun if (status)
1693*4882a593Smuzhiyun break;
1694*4882a593Smuzhiyun iwdev->init_state = PBLE_CHUNK_MEM;
1695*4882a593Smuzhiyun iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1696*4882a593Smuzhiyun status = i40iw_add_mac_ip(iwdev);
1697*4882a593Smuzhiyun if (status)
1698*4882a593Smuzhiyun break;
1699*4882a593Smuzhiyun iwdev->init_state = IP_ADDR_REGISTERED;
1700*4882a593Smuzhiyun if (i40iw_register_rdma_device(iwdev)) {
1701*4882a593Smuzhiyun i40iw_pr_err("register rdma device fail\n");
1702*4882a593Smuzhiyun break;
1703*4882a593Smuzhiyun };
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun iwdev->init_state = RDMA_DEV_REGISTERED;
1706*4882a593Smuzhiyun iwdev->iw_status = 1;
1707*4882a593Smuzhiyun i40iw_port_ibevent(iwdev);
1708*4882a593Smuzhiyun iwdev->param_wq = alloc_ordered_workqueue("l2params", WQ_MEM_RECLAIM);
1709*4882a593Smuzhiyun if(iwdev->param_wq == NULL)
1710*4882a593Smuzhiyun break;
1711*4882a593Smuzhiyun i40iw_pr_info("i40iw_open completed\n");
1712*4882a593Smuzhiyun return 0;
1713*4882a593Smuzhiyun } while (0);
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state);
1716*4882a593Smuzhiyun i40iw_deinit_device(iwdev);
1717*4882a593Smuzhiyun return -ERESTART;
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /**
1721*4882a593Smuzhiyun * i40iw_l2params_worker - worker for l2 params change
1722*4882a593Smuzhiyun * @work: work pointer for l2 params
1723*4882a593Smuzhiyun */
i40iw_l2params_worker(struct work_struct * work)1724*4882a593Smuzhiyun static void i40iw_l2params_worker(struct work_struct *work)
1725*4882a593Smuzhiyun {
1726*4882a593Smuzhiyun struct l2params_work *dwork =
1727*4882a593Smuzhiyun container_of(work, struct l2params_work, work);
1728*4882a593Smuzhiyun struct i40iw_device *iwdev = dwork->iwdev;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun i40iw_change_l2params(&iwdev->vsi, &dwork->l2params);
1731*4882a593Smuzhiyun atomic_dec(&iwdev->params_busy);
1732*4882a593Smuzhiyun kfree(work);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun
1735*4882a593Smuzhiyun /**
1736*4882a593Smuzhiyun * i40iw_l2param_change - handle qs handles for qos and mss change
1737*4882a593Smuzhiyun * @ldev: lan device information
1738*4882a593Smuzhiyun * @client: client for paramater change
1739*4882a593Smuzhiyun * @params: new parameters from L2
1740*4882a593Smuzhiyun */
i40iw_l2param_change(struct i40e_info * ldev,struct i40e_client * client,struct i40e_params * params)1741*4882a593Smuzhiyun static void i40iw_l2param_change(struct i40e_info *ldev, struct i40e_client *client,
1742*4882a593Smuzhiyun struct i40e_params *params)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun struct i40iw_handler *hdl;
1745*4882a593Smuzhiyun struct i40iw_l2params *l2params;
1746*4882a593Smuzhiyun struct l2params_work *work;
1747*4882a593Smuzhiyun struct i40iw_device *iwdev;
1748*4882a593Smuzhiyun int i;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun hdl = i40iw_find_i40e_handler(ldev);
1751*4882a593Smuzhiyun if (!hdl)
1752*4882a593Smuzhiyun return;
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun iwdev = &hdl->device;
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun if (atomic_read(&iwdev->params_busy))
1757*4882a593Smuzhiyun return;
1758*4882a593Smuzhiyun
1759*4882a593Smuzhiyun
1760*4882a593Smuzhiyun work = kzalloc(sizeof(*work), GFP_KERNEL);
1761*4882a593Smuzhiyun if (!work)
1762*4882a593Smuzhiyun return;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun atomic_inc(&iwdev->params_busy);
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun work->iwdev = iwdev;
1767*4882a593Smuzhiyun l2params = &work->l2params;
1768*4882a593Smuzhiyun for (i = 0; i < I40E_CLIENT_MAX_USER_PRIORITY; i++)
1769*4882a593Smuzhiyun l2params->qs_handle_list[i] = params->qos.prio_qos[i].qs_handle;
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun l2params->mtu = (params->mtu) ? params->mtu : iwdev->vsi.mtu;
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun INIT_WORK(&work->work, i40iw_l2params_worker);
1774*4882a593Smuzhiyun queue_work(iwdev->param_wq, &work->work);
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun /**
1778*4882a593Smuzhiyun * i40iw_close - client interface operation close for iwarp/uda device
1779*4882a593Smuzhiyun * @ldev: lan device information
1780*4882a593Smuzhiyun * @client: client to close
1781*4882a593Smuzhiyun *
1782*4882a593Smuzhiyun * Called by the lan driver during the processing of client unregister
1783*4882a593Smuzhiyun * Destroy and clean up the driver resources
1784*4882a593Smuzhiyun */
i40iw_close(struct i40e_info * ldev,struct i40e_client * client,bool reset)1785*4882a593Smuzhiyun static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool reset)
1786*4882a593Smuzhiyun {
1787*4882a593Smuzhiyun struct i40iw_device *iwdev;
1788*4882a593Smuzhiyun struct i40iw_handler *hdl;
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun hdl = i40iw_find_i40e_handler(ldev);
1791*4882a593Smuzhiyun if (!hdl)
1792*4882a593Smuzhiyun return;
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun iwdev = &hdl->device;
1795*4882a593Smuzhiyun iwdev->closing = true;
1796*4882a593Smuzhiyun
1797*4882a593Smuzhiyun if (reset)
1798*4882a593Smuzhiyun iwdev->reset = true;
1799*4882a593Smuzhiyun
1800*4882a593Smuzhiyun i40iw_cm_teardown_connections(iwdev, NULL, NULL, true);
1801*4882a593Smuzhiyun destroy_workqueue(iwdev->virtchnl_wq);
1802*4882a593Smuzhiyun i40iw_deinit_device(iwdev);
1803*4882a593Smuzhiyun }
1804*4882a593Smuzhiyun
1805*4882a593Smuzhiyun /**
1806*4882a593Smuzhiyun * i40iw_vf_reset - process VF reset
1807*4882a593Smuzhiyun * @ldev: lan device information
1808*4882a593Smuzhiyun * @client: client interface instance
1809*4882a593Smuzhiyun * @vf_id: virtual function id
1810*4882a593Smuzhiyun *
1811*4882a593Smuzhiyun * Called when a VF is reset by the PF
1812*4882a593Smuzhiyun * Destroy and clean up the VF resources
1813*4882a593Smuzhiyun */
i40iw_vf_reset(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id)1814*4882a593Smuzhiyun static void i40iw_vf_reset(struct i40e_info *ldev, struct i40e_client *client, u32 vf_id)
1815*4882a593Smuzhiyun {
1816*4882a593Smuzhiyun struct i40iw_handler *hdl;
1817*4882a593Smuzhiyun struct i40iw_sc_dev *dev;
1818*4882a593Smuzhiyun struct i40iw_hmc_fcn_info hmc_fcn_info;
1819*4882a593Smuzhiyun struct i40iw_virt_mem vf_dev_mem;
1820*4882a593Smuzhiyun struct i40iw_vfdev *tmp_vfdev;
1821*4882a593Smuzhiyun unsigned int i;
1822*4882a593Smuzhiyun unsigned long flags;
1823*4882a593Smuzhiyun struct i40iw_device *iwdev;
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun hdl = i40iw_find_i40e_handler(ldev);
1826*4882a593Smuzhiyun if (!hdl)
1827*4882a593Smuzhiyun return;
1828*4882a593Smuzhiyun
1829*4882a593Smuzhiyun dev = &hdl->device.sc_dev;
1830*4882a593Smuzhiyun iwdev = (struct i40iw_device *)dev->back_dev;
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun for (i = 0; i < I40IW_MAX_PE_ENABLED_VF_COUNT; i++) {
1833*4882a593Smuzhiyun if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id != vf_id))
1834*4882a593Smuzhiyun continue;
1835*4882a593Smuzhiyun /* free all resources allocated on behalf of vf */
1836*4882a593Smuzhiyun tmp_vfdev = dev->vf_dev[i];
1837*4882a593Smuzhiyun spin_lock_irqsave(&iwdev->vsi.pestat->lock, flags);
1838*4882a593Smuzhiyun dev->vf_dev[i] = NULL;
1839*4882a593Smuzhiyun spin_unlock_irqrestore(&iwdev->vsi.pestat->lock, flags);
1840*4882a593Smuzhiyun i40iw_del_hmc_objects(dev, &tmp_vfdev->hmc_info, false, false);
1841*4882a593Smuzhiyun /* remove vf hmc function */
1842*4882a593Smuzhiyun memset(&hmc_fcn_info, 0, sizeof(hmc_fcn_info));
1843*4882a593Smuzhiyun hmc_fcn_info.vf_id = vf_id;
1844*4882a593Smuzhiyun hmc_fcn_info.iw_vf_idx = tmp_vfdev->iw_vf_idx;
1845*4882a593Smuzhiyun hmc_fcn_info.free_fcn = true;
1846*4882a593Smuzhiyun i40iw_cqp_manage_hmc_fcn_cmd(dev, &hmc_fcn_info);
1847*4882a593Smuzhiyun /* free vf_dev */
1848*4882a593Smuzhiyun vf_dev_mem.va = tmp_vfdev;
1849*4882a593Smuzhiyun vf_dev_mem.size = sizeof(struct i40iw_vfdev) +
1850*4882a593Smuzhiyun sizeof(struct i40iw_hmc_obj_info) * I40IW_HMC_IW_MAX;
1851*4882a593Smuzhiyun i40iw_free_virt_mem(dev->hw, &vf_dev_mem);
1852*4882a593Smuzhiyun break;
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun
1856*4882a593Smuzhiyun /**
1857*4882a593Smuzhiyun * i40iw_vf_enable - enable a number of VFs
1858*4882a593Smuzhiyun * @ldev: lan device information
1859*4882a593Smuzhiyun * @client: client interface instance
1860*4882a593Smuzhiyun * @num_vfs: number of VFs for the PF
1861*4882a593Smuzhiyun *
1862*4882a593Smuzhiyun * Called when the number of VFs changes
1863*4882a593Smuzhiyun */
i40iw_vf_enable(struct i40e_info * ldev,struct i40e_client * client,u32 num_vfs)1864*4882a593Smuzhiyun static void i40iw_vf_enable(struct i40e_info *ldev,
1865*4882a593Smuzhiyun struct i40e_client *client,
1866*4882a593Smuzhiyun u32 num_vfs)
1867*4882a593Smuzhiyun {
1868*4882a593Smuzhiyun struct i40iw_handler *hdl;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun hdl = i40iw_find_i40e_handler(ldev);
1871*4882a593Smuzhiyun if (!hdl)
1872*4882a593Smuzhiyun return;
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun if (num_vfs > I40IW_MAX_PE_ENABLED_VF_COUNT)
1875*4882a593Smuzhiyun hdl->device.max_enabled_vfs = I40IW_MAX_PE_ENABLED_VF_COUNT;
1876*4882a593Smuzhiyun else
1877*4882a593Smuzhiyun hdl->device.max_enabled_vfs = num_vfs;
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun /**
1881*4882a593Smuzhiyun * i40iw_vf_capable - check if VF capable
1882*4882a593Smuzhiyun * @ldev: lan device information
1883*4882a593Smuzhiyun * @client: client interface instance
1884*4882a593Smuzhiyun * @vf_id: virtual function id
1885*4882a593Smuzhiyun *
1886*4882a593Smuzhiyun * Return 1 if a VF slot is available or if VF is already RDMA enabled
1887*4882a593Smuzhiyun * Return 0 otherwise
1888*4882a593Smuzhiyun */
i40iw_vf_capable(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id)1889*4882a593Smuzhiyun static int i40iw_vf_capable(struct i40e_info *ldev,
1890*4882a593Smuzhiyun struct i40e_client *client,
1891*4882a593Smuzhiyun u32 vf_id)
1892*4882a593Smuzhiyun {
1893*4882a593Smuzhiyun struct i40iw_handler *hdl;
1894*4882a593Smuzhiyun struct i40iw_sc_dev *dev;
1895*4882a593Smuzhiyun unsigned int i;
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun hdl = i40iw_find_i40e_handler(ldev);
1898*4882a593Smuzhiyun if (!hdl)
1899*4882a593Smuzhiyun return 0;
1900*4882a593Smuzhiyun
1901*4882a593Smuzhiyun dev = &hdl->device.sc_dev;
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun for (i = 0; i < hdl->device.max_enabled_vfs; i++) {
1904*4882a593Smuzhiyun if (!dev->vf_dev[i] || (dev->vf_dev[i]->vf_id == vf_id))
1905*4882a593Smuzhiyun return 1;
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun return 0;
1909*4882a593Smuzhiyun }
1910*4882a593Smuzhiyun
1911*4882a593Smuzhiyun /**
1912*4882a593Smuzhiyun * i40iw_virtchnl_receive - receive a message through the virtual channel
1913*4882a593Smuzhiyun * @ldev: lan device information
1914*4882a593Smuzhiyun * @client: client interface instance
1915*4882a593Smuzhiyun * @vf_id: virtual function id associated with the message
1916*4882a593Smuzhiyun * @msg: message buffer pointer
1917*4882a593Smuzhiyun * @len: length of the message
1918*4882a593Smuzhiyun *
1919*4882a593Smuzhiyun * Invoke virtual channel receive operation for the given msg
1920*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1921*4882a593Smuzhiyun */
i40iw_virtchnl_receive(struct i40e_info * ldev,struct i40e_client * client,u32 vf_id,u8 * msg,u16 len)1922*4882a593Smuzhiyun static int i40iw_virtchnl_receive(struct i40e_info *ldev,
1923*4882a593Smuzhiyun struct i40e_client *client,
1924*4882a593Smuzhiyun u32 vf_id,
1925*4882a593Smuzhiyun u8 *msg,
1926*4882a593Smuzhiyun u16 len)
1927*4882a593Smuzhiyun {
1928*4882a593Smuzhiyun struct i40iw_handler *hdl;
1929*4882a593Smuzhiyun struct i40iw_sc_dev *dev;
1930*4882a593Smuzhiyun struct i40iw_device *iwdev;
1931*4882a593Smuzhiyun int ret_code = I40IW_NOT_SUPPORTED;
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun if (!len || !msg)
1934*4882a593Smuzhiyun return I40IW_ERR_PARAM;
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun hdl = i40iw_find_i40e_handler(ldev);
1937*4882a593Smuzhiyun if (!hdl)
1938*4882a593Smuzhiyun return I40IW_ERR_PARAM;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun dev = &hdl->device.sc_dev;
1941*4882a593Smuzhiyun iwdev = dev->back_dev;
1942*4882a593Smuzhiyun
1943*4882a593Smuzhiyun if (dev->vchnl_if.vchnl_recv) {
1944*4882a593Smuzhiyun ret_code = dev->vchnl_if.vchnl_recv(dev, vf_id, msg, len);
1945*4882a593Smuzhiyun if (!dev->is_pf) {
1946*4882a593Smuzhiyun atomic_dec(&iwdev->vchnl_msgs);
1947*4882a593Smuzhiyun wake_up(&iwdev->vchnl_waitq);
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun }
1950*4882a593Smuzhiyun return ret_code;
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun /**
1954*4882a593Smuzhiyun * i40iw_vf_clear_to_send - wait to send virtual channel message
1955*4882a593Smuzhiyun * @dev: iwarp device *
1956*4882a593Smuzhiyun * Wait for until virtual channel is clear
1957*4882a593Smuzhiyun * before sending the next message
1958*4882a593Smuzhiyun *
1959*4882a593Smuzhiyun * Returns false if error
1960*4882a593Smuzhiyun * Returns true if clear to send
1961*4882a593Smuzhiyun */
i40iw_vf_clear_to_send(struct i40iw_sc_dev * dev)1962*4882a593Smuzhiyun bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
1963*4882a593Smuzhiyun {
1964*4882a593Smuzhiyun struct i40iw_device *iwdev;
1965*4882a593Smuzhiyun wait_queue_entry_t wait;
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun iwdev = dev->back_dev;
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun if (!wq_has_sleeper(&dev->vf_reqs) &&
1970*4882a593Smuzhiyun (atomic_read(&iwdev->vchnl_msgs) == 0))
1971*4882a593Smuzhiyun return true; /* virtual channel is clear */
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun init_wait(&wait);
1974*4882a593Smuzhiyun add_wait_queue_exclusive(&dev->vf_reqs, &wait);
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun if (!wait_event_timeout(dev->vf_reqs,
1977*4882a593Smuzhiyun (atomic_read(&iwdev->vchnl_msgs) == 0),
1978*4882a593Smuzhiyun I40IW_VCHNL_EVENT_TIMEOUT))
1979*4882a593Smuzhiyun dev->vchnl_up = false;
1980*4882a593Smuzhiyun
1981*4882a593Smuzhiyun remove_wait_queue(&dev->vf_reqs, &wait);
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun return dev->vchnl_up;
1984*4882a593Smuzhiyun }
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun /**
1987*4882a593Smuzhiyun * i40iw_virtchnl_send - send a message through the virtual channel
1988*4882a593Smuzhiyun * @dev: iwarp device
1989*4882a593Smuzhiyun * @vf_id: virtual function id associated with the message
1990*4882a593Smuzhiyun * @msg: virtual channel message buffer pointer
1991*4882a593Smuzhiyun * @len: length of the message
1992*4882a593Smuzhiyun *
1993*4882a593Smuzhiyun * Invoke virtual channel send operation for the given msg
1994*4882a593Smuzhiyun * Return 0 if successful, otherwise return error
1995*4882a593Smuzhiyun */
i40iw_virtchnl_send(struct i40iw_sc_dev * dev,u32 vf_id,u8 * msg,u16 len)1996*4882a593Smuzhiyun static enum i40iw_status_code i40iw_virtchnl_send(struct i40iw_sc_dev *dev,
1997*4882a593Smuzhiyun u32 vf_id,
1998*4882a593Smuzhiyun u8 *msg,
1999*4882a593Smuzhiyun u16 len)
2000*4882a593Smuzhiyun {
2001*4882a593Smuzhiyun struct i40iw_device *iwdev;
2002*4882a593Smuzhiyun struct i40e_info *ldev;
2003*4882a593Smuzhiyun
2004*4882a593Smuzhiyun if (!dev || !dev->back_dev)
2005*4882a593Smuzhiyun return I40IW_ERR_BAD_PTR;
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun iwdev = dev->back_dev;
2008*4882a593Smuzhiyun ldev = iwdev->ldev;
2009*4882a593Smuzhiyun
2010*4882a593Smuzhiyun if (ldev && ldev->ops && ldev->ops->virtchnl_send)
2011*4882a593Smuzhiyun return ldev->ops->virtchnl_send(ldev, &i40iw_client, vf_id, msg, len);
2012*4882a593Smuzhiyun return I40IW_ERR_BAD_PTR;
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun
2015*4882a593Smuzhiyun /* client interface functions */
2016*4882a593Smuzhiyun static const struct i40e_client_ops i40e_ops = {
2017*4882a593Smuzhiyun .open = i40iw_open,
2018*4882a593Smuzhiyun .close = i40iw_close,
2019*4882a593Smuzhiyun .l2_param_change = i40iw_l2param_change,
2020*4882a593Smuzhiyun .virtchnl_receive = i40iw_virtchnl_receive,
2021*4882a593Smuzhiyun .vf_reset = i40iw_vf_reset,
2022*4882a593Smuzhiyun .vf_enable = i40iw_vf_enable,
2023*4882a593Smuzhiyun .vf_capable = i40iw_vf_capable
2024*4882a593Smuzhiyun };
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun /**
2027*4882a593Smuzhiyun * i40iw_init_module - driver initialization function
2028*4882a593Smuzhiyun *
2029*4882a593Smuzhiyun * First function to call when the driver is loaded
2030*4882a593Smuzhiyun * Register the driver as i40e client and port mapper client
2031*4882a593Smuzhiyun */
i40iw_init_module(void)2032*4882a593Smuzhiyun static int __init i40iw_init_module(void)
2033*4882a593Smuzhiyun {
2034*4882a593Smuzhiyun int ret;
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun memset(&i40iw_client, 0, sizeof(i40iw_client));
2037*4882a593Smuzhiyun i40iw_client.version.major = CLIENT_IW_INTERFACE_VERSION_MAJOR;
2038*4882a593Smuzhiyun i40iw_client.version.minor = CLIENT_IW_INTERFACE_VERSION_MINOR;
2039*4882a593Smuzhiyun i40iw_client.version.build = CLIENT_IW_INTERFACE_VERSION_BUILD;
2040*4882a593Smuzhiyun i40iw_client.ops = &i40e_ops;
2041*4882a593Smuzhiyun memcpy(i40iw_client.name, i40iw_client_name, I40E_CLIENT_STR_LENGTH);
2042*4882a593Smuzhiyun i40iw_client.type = I40E_CLIENT_IWARP;
2043*4882a593Smuzhiyun spin_lock_init(&i40iw_handler_lock);
2044*4882a593Smuzhiyun ret = i40e_register_client(&i40iw_client);
2045*4882a593Smuzhiyun i40iw_register_notifiers();
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun return ret;
2048*4882a593Smuzhiyun }
2049*4882a593Smuzhiyun
2050*4882a593Smuzhiyun /**
2051*4882a593Smuzhiyun * i40iw_exit_module - driver exit clean up function
2052*4882a593Smuzhiyun *
2053*4882a593Smuzhiyun * The function is called just before the driver is unloaded
2054*4882a593Smuzhiyun * Unregister the driver as i40e client and port mapper client
2055*4882a593Smuzhiyun */
i40iw_exit_module(void)2056*4882a593Smuzhiyun static void __exit i40iw_exit_module(void)
2057*4882a593Smuzhiyun {
2058*4882a593Smuzhiyun i40iw_unregister_notifiers();
2059*4882a593Smuzhiyun i40e_unregister_client(&i40iw_client);
2060*4882a593Smuzhiyun }
2061*4882a593Smuzhiyun
2062*4882a593Smuzhiyun module_init(i40iw_init_module);
2063*4882a593Smuzhiyun module_exit(i40iw_exit_module);
2064