1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2013, Cisco Systems, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Author: Upinder Malhi <umalhi@cisco.com>
33*4882a593Smuzhiyun * Author: Anant Deepak <anadeepa@cisco.com>
34*4882a593Smuzhiyun * Author: Cesare Cantu' <cantuc@cisco.com>
35*4882a593Smuzhiyun * Author: Jeff Squyres <jsquyres@cisco.com>
36*4882a593Smuzhiyun * Author: Kiran Thirumalai <kithirum@cisco.com>
37*4882a593Smuzhiyun * Author: Xuyang Wang <xuywang@cisco.com>
38*4882a593Smuzhiyun * Author: Reese Faucette <rfaucett@cisco.com>
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include <linux/module.h>
43*4882a593Smuzhiyun #include <linux/inetdevice.h>
44*4882a593Smuzhiyun #include <linux/init.h>
45*4882a593Smuzhiyun #include <linux/slab.h>
46*4882a593Smuzhiyun #include <linux/errno.h>
47*4882a593Smuzhiyun #include <linux/pci.h>
48*4882a593Smuzhiyun #include <linux/netdevice.h>
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include <rdma/ib_user_verbs.h>
51*4882a593Smuzhiyun #include <rdma/ib_addr.h>
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #include "usnic_abi.h"
54*4882a593Smuzhiyun #include "usnic_common_util.h"
55*4882a593Smuzhiyun #include "usnic_ib.h"
56*4882a593Smuzhiyun #include "usnic_ib_qp_grp.h"
57*4882a593Smuzhiyun #include "usnic_log.h"
58*4882a593Smuzhiyun #include "usnic_fwd.h"
59*4882a593Smuzhiyun #include "usnic_debugfs.h"
60*4882a593Smuzhiyun #include "usnic_ib_verbs.h"
61*4882a593Smuzhiyun #include "usnic_transport.h"
62*4882a593Smuzhiyun #include "usnic_uiom.h"
63*4882a593Smuzhiyun #include "usnic_ib_sysfs.h"
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun unsigned int usnic_log_lvl = USNIC_LOG_LVL_ERR;
66*4882a593Smuzhiyun unsigned int usnic_ib_share_vf = 1;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static const char usnic_version[] =
69*4882a593Smuzhiyun DRV_NAME ": Cisco VIC (USNIC) Verbs Driver v"
70*4882a593Smuzhiyun DRV_VERSION " (" DRV_RELDATE ")\n";
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static DEFINE_MUTEX(usnic_ib_ibdev_list_lock);
73*4882a593Smuzhiyun static LIST_HEAD(usnic_ib_ibdev_list);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* Callback dump funcs */
usnic_ib_dump_vf_hdr(void * obj,char * buf,int buf_sz)76*4882a593Smuzhiyun static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun struct usnic_ib_vf *vf = obj;
79*4882a593Smuzhiyun return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun /* End callback dump funcs */
82*4882a593Smuzhiyun
usnic_ib_dump_vf(struct usnic_ib_vf * vf,char * buf,int buf_sz)83*4882a593Smuzhiyun static void usnic_ib_dump_vf(struct usnic_ib_vf *vf, char *buf, int buf_sz)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun usnic_vnic_dump(vf->vnic, buf, buf_sz, vf,
86*4882a593Smuzhiyun usnic_ib_dump_vf_hdr,
87*4882a593Smuzhiyun usnic_ib_qp_grp_dump_hdr, usnic_ib_qp_grp_dump_rows);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
usnic_ib_log_vf(struct usnic_ib_vf * vf)90*4882a593Smuzhiyun void usnic_ib_log_vf(struct usnic_ib_vf *vf)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun char *buf = kzalloc(1000, GFP_KERNEL);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (!buf)
95*4882a593Smuzhiyun return;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun usnic_ib_dump_vf(vf, buf, 1000);
98*4882a593Smuzhiyun usnic_dbg("%s\n", buf);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun kfree(buf);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Start of netdev section */
usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev * us_ibdev)104*4882a593Smuzhiyun static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct usnic_ib_ucontext *ctx;
107*4882a593Smuzhiyun struct usnic_ib_qp_grp *qp_grp;
108*4882a593Smuzhiyun enum ib_qp_state cur_state;
109*4882a593Smuzhiyun int status;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun BUG_ON(!mutex_is_locked(&us_ibdev->usdev_lock));
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun list_for_each_entry(ctx, &us_ibdev->ctx_list, link) {
114*4882a593Smuzhiyun list_for_each_entry(qp_grp, &ctx->qp_grp_list, link) {
115*4882a593Smuzhiyun cur_state = qp_grp->state;
116*4882a593Smuzhiyun if (cur_state == IB_QPS_INIT ||
117*4882a593Smuzhiyun cur_state == IB_QPS_RTR ||
118*4882a593Smuzhiyun cur_state == IB_QPS_RTS) {
119*4882a593Smuzhiyun status = usnic_ib_qp_grp_modify(qp_grp,
120*4882a593Smuzhiyun IB_QPS_ERR,
121*4882a593Smuzhiyun NULL);
122*4882a593Smuzhiyun if (status) {
123*4882a593Smuzhiyun usnic_err("Failed to transition qp grp %u from %s to %s\n",
124*4882a593Smuzhiyun qp_grp->grp_id,
125*4882a593Smuzhiyun usnic_ib_qp_grp_state_to_string
126*4882a593Smuzhiyun (cur_state),
127*4882a593Smuzhiyun usnic_ib_qp_grp_state_to_string
128*4882a593Smuzhiyun (IB_QPS_ERR));
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
usnic_ib_handle_usdev_event(struct usnic_ib_dev * us_ibdev,unsigned long event)135*4882a593Smuzhiyun static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
136*4882a593Smuzhiyun unsigned long event)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun struct net_device *netdev;
139*4882a593Smuzhiyun struct ib_event ib_event;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun memset(&ib_event, 0, sizeof(ib_event));
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun mutex_lock(&us_ibdev->usdev_lock);
144*4882a593Smuzhiyun netdev = us_ibdev->netdev;
145*4882a593Smuzhiyun switch (event) {
146*4882a593Smuzhiyun case NETDEV_REBOOT:
147*4882a593Smuzhiyun usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
148*4882a593Smuzhiyun usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
149*4882a593Smuzhiyun ib_event.event = IB_EVENT_PORT_ERR;
150*4882a593Smuzhiyun ib_event.device = &us_ibdev->ib_dev;
151*4882a593Smuzhiyun ib_event.element.port_num = 1;
152*4882a593Smuzhiyun ib_dispatch_event(&ib_event);
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun case NETDEV_UP:
155*4882a593Smuzhiyun case NETDEV_DOWN:
156*4882a593Smuzhiyun case NETDEV_CHANGE:
157*4882a593Smuzhiyun if (!us_ibdev->ufdev->link_up &&
158*4882a593Smuzhiyun netif_carrier_ok(netdev)) {
159*4882a593Smuzhiyun usnic_fwd_carrier_up(us_ibdev->ufdev);
160*4882a593Smuzhiyun usnic_info("Link UP on %s\n",
161*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
162*4882a593Smuzhiyun ib_event.event = IB_EVENT_PORT_ACTIVE;
163*4882a593Smuzhiyun ib_event.device = &us_ibdev->ib_dev;
164*4882a593Smuzhiyun ib_event.element.port_num = 1;
165*4882a593Smuzhiyun ib_dispatch_event(&ib_event);
166*4882a593Smuzhiyun } else if (us_ibdev->ufdev->link_up &&
167*4882a593Smuzhiyun !netif_carrier_ok(netdev)) {
168*4882a593Smuzhiyun usnic_fwd_carrier_down(us_ibdev->ufdev);
169*4882a593Smuzhiyun usnic_info("Link DOWN on %s\n",
170*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
171*4882a593Smuzhiyun usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
172*4882a593Smuzhiyun ib_event.event = IB_EVENT_PORT_ERR;
173*4882a593Smuzhiyun ib_event.device = &us_ibdev->ib_dev;
174*4882a593Smuzhiyun ib_event.element.port_num = 1;
175*4882a593Smuzhiyun ib_dispatch_event(&ib_event);
176*4882a593Smuzhiyun } else {
177*4882a593Smuzhiyun usnic_dbg("Ignoring %s on %s\n",
178*4882a593Smuzhiyun netdev_cmd_to_name(event),
179*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun break;
182*4882a593Smuzhiyun case NETDEV_CHANGEADDR:
183*4882a593Smuzhiyun if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
184*4882a593Smuzhiyun sizeof(us_ibdev->ufdev->mac))) {
185*4882a593Smuzhiyun usnic_dbg("Ignoring addr change on %s\n",
186*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
187*4882a593Smuzhiyun } else {
188*4882a593Smuzhiyun usnic_info(" %s old mac: %pM new mac: %pM\n",
189*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev),
190*4882a593Smuzhiyun us_ibdev->ufdev->mac,
191*4882a593Smuzhiyun netdev->dev_addr);
192*4882a593Smuzhiyun usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
193*4882a593Smuzhiyun usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
194*4882a593Smuzhiyun ib_event.event = IB_EVENT_GID_CHANGE;
195*4882a593Smuzhiyun ib_event.device = &us_ibdev->ib_dev;
196*4882a593Smuzhiyun ib_event.element.port_num = 1;
197*4882a593Smuzhiyun ib_dispatch_event(&ib_event);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun break;
201*4882a593Smuzhiyun case NETDEV_CHANGEMTU:
202*4882a593Smuzhiyun if (us_ibdev->ufdev->mtu != netdev->mtu) {
203*4882a593Smuzhiyun usnic_info("MTU Change on %s old: %u new: %u\n",
204*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev),
205*4882a593Smuzhiyun us_ibdev->ufdev->mtu, netdev->mtu);
206*4882a593Smuzhiyun usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
207*4882a593Smuzhiyun usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
208*4882a593Smuzhiyun } else {
209*4882a593Smuzhiyun usnic_dbg("Ignoring MTU change on %s\n",
210*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun break;
213*4882a593Smuzhiyun default:
214*4882a593Smuzhiyun usnic_dbg("Ignoring event %s on %s",
215*4882a593Smuzhiyun netdev_cmd_to_name(event),
216*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun mutex_unlock(&us_ibdev->usdev_lock);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
usnic_ib_netdevice_event(struct notifier_block * notifier,unsigned long event,void * ptr)221*4882a593Smuzhiyun static int usnic_ib_netdevice_event(struct notifier_block *notifier,
222*4882a593Smuzhiyun unsigned long event, void *ptr)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct usnic_ib_dev *us_ibdev;
225*4882a593Smuzhiyun struct ib_device *ibdev;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
230*4882a593Smuzhiyun if (!ibdev)
231*4882a593Smuzhiyun return NOTIFY_DONE;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
234*4882a593Smuzhiyun usnic_ib_handle_usdev_event(us_ibdev, event);
235*4882a593Smuzhiyun ib_device_put(ibdev);
236*4882a593Smuzhiyun return NOTIFY_DONE;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun static struct notifier_block usnic_ib_netdevice_notifier = {
240*4882a593Smuzhiyun .notifier_call = usnic_ib_netdevice_event
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun /* End of netdev section */
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Start of inet section */
usnic_ib_handle_inet_event(struct usnic_ib_dev * us_ibdev,unsigned long event,void * ptr)245*4882a593Smuzhiyun static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
246*4882a593Smuzhiyun unsigned long event, void *ptr)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct in_ifaddr *ifa = ptr;
249*4882a593Smuzhiyun struct ib_event ib_event;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun mutex_lock(&us_ibdev->usdev_lock);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun switch (event) {
254*4882a593Smuzhiyun case NETDEV_DOWN:
255*4882a593Smuzhiyun usnic_info("%s via ip notifiers",
256*4882a593Smuzhiyun netdev_cmd_to_name(event));
257*4882a593Smuzhiyun usnic_fwd_del_ipaddr(us_ibdev->ufdev);
258*4882a593Smuzhiyun usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
259*4882a593Smuzhiyun ib_event.event = IB_EVENT_GID_CHANGE;
260*4882a593Smuzhiyun ib_event.device = &us_ibdev->ib_dev;
261*4882a593Smuzhiyun ib_event.element.port_num = 1;
262*4882a593Smuzhiyun ib_dispatch_event(&ib_event);
263*4882a593Smuzhiyun break;
264*4882a593Smuzhiyun case NETDEV_UP:
265*4882a593Smuzhiyun usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
266*4882a593Smuzhiyun usnic_info("%s via ip notifiers: ip %pI4",
267*4882a593Smuzhiyun netdev_cmd_to_name(event),
268*4882a593Smuzhiyun &us_ibdev->ufdev->inaddr);
269*4882a593Smuzhiyun ib_event.event = IB_EVENT_GID_CHANGE;
270*4882a593Smuzhiyun ib_event.device = &us_ibdev->ib_dev;
271*4882a593Smuzhiyun ib_event.element.port_num = 1;
272*4882a593Smuzhiyun ib_dispatch_event(&ib_event);
273*4882a593Smuzhiyun break;
274*4882a593Smuzhiyun default:
275*4882a593Smuzhiyun usnic_info("Ignoring event %s on %s",
276*4882a593Smuzhiyun netdev_cmd_to_name(event),
277*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev));
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun mutex_unlock(&us_ibdev->usdev_lock);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return NOTIFY_DONE;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
usnic_ib_inetaddr_event(struct notifier_block * notifier,unsigned long event,void * ptr)284*4882a593Smuzhiyun static int usnic_ib_inetaddr_event(struct notifier_block *notifier,
285*4882a593Smuzhiyun unsigned long event, void *ptr)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct usnic_ib_dev *us_ibdev;
288*4882a593Smuzhiyun struct in_ifaddr *ifa = ptr;
289*4882a593Smuzhiyun struct net_device *netdev = ifa->ifa_dev->dev;
290*4882a593Smuzhiyun struct ib_device *ibdev;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_USNIC);
293*4882a593Smuzhiyun if (!ibdev)
294*4882a593Smuzhiyun return NOTIFY_DONE;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev);
297*4882a593Smuzhiyun usnic_ib_handle_inet_event(us_ibdev, event, ptr);
298*4882a593Smuzhiyun ib_device_put(ibdev);
299*4882a593Smuzhiyun return NOTIFY_DONE;
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun static struct notifier_block usnic_ib_inetaddr_notifier = {
302*4882a593Smuzhiyun .notifier_call = usnic_ib_inetaddr_event
303*4882a593Smuzhiyun };
304*4882a593Smuzhiyun /* End of inet section*/
305*4882a593Smuzhiyun
usnic_port_immutable(struct ib_device * ibdev,u8 port_num,struct ib_port_immutable * immutable)306*4882a593Smuzhiyun static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num,
307*4882a593Smuzhiyun struct ib_port_immutable *immutable)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun struct ib_port_attr attr;
310*4882a593Smuzhiyun int err;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun immutable->core_cap_flags = RDMA_CORE_PORT_USNIC;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun err = ib_query_port(ibdev, port_num, &attr);
315*4882a593Smuzhiyun if (err)
316*4882a593Smuzhiyun return err;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun immutable->gid_tbl_len = attr.gid_tbl_len;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return 0;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
usnic_get_dev_fw_str(struct ib_device * device,char * str)323*4882a593Smuzhiyun static void usnic_get_dev_fw_str(struct ib_device *device, char *str)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct usnic_ib_dev *us_ibdev =
326*4882a593Smuzhiyun container_of(device, struct usnic_ib_dev, ib_dev);
327*4882a593Smuzhiyun struct ethtool_drvinfo info;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun mutex_lock(&us_ibdev->usdev_lock);
330*4882a593Smuzhiyun us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info);
331*4882a593Smuzhiyun mutex_unlock(&us_ibdev->usdev_lock);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", info.fw_version);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun static const struct ib_device_ops usnic_dev_ops = {
337*4882a593Smuzhiyun .owner = THIS_MODULE,
338*4882a593Smuzhiyun .driver_id = RDMA_DRIVER_USNIC,
339*4882a593Smuzhiyun .uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION,
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun .alloc_pd = usnic_ib_alloc_pd,
342*4882a593Smuzhiyun .alloc_ucontext = usnic_ib_alloc_ucontext,
343*4882a593Smuzhiyun .create_cq = usnic_ib_create_cq,
344*4882a593Smuzhiyun .create_qp = usnic_ib_create_qp,
345*4882a593Smuzhiyun .dealloc_pd = usnic_ib_dealloc_pd,
346*4882a593Smuzhiyun .dealloc_ucontext = usnic_ib_dealloc_ucontext,
347*4882a593Smuzhiyun .dereg_mr = usnic_ib_dereg_mr,
348*4882a593Smuzhiyun .destroy_cq = usnic_ib_destroy_cq,
349*4882a593Smuzhiyun .destroy_qp = usnic_ib_destroy_qp,
350*4882a593Smuzhiyun .get_dev_fw_str = usnic_get_dev_fw_str,
351*4882a593Smuzhiyun .get_link_layer = usnic_ib_port_link_layer,
352*4882a593Smuzhiyun .get_port_immutable = usnic_port_immutable,
353*4882a593Smuzhiyun .mmap = usnic_ib_mmap,
354*4882a593Smuzhiyun .modify_qp = usnic_ib_modify_qp,
355*4882a593Smuzhiyun .query_device = usnic_ib_query_device,
356*4882a593Smuzhiyun .query_gid = usnic_ib_query_gid,
357*4882a593Smuzhiyun .query_port = usnic_ib_query_port,
358*4882a593Smuzhiyun .query_qp = usnic_ib_query_qp,
359*4882a593Smuzhiyun .reg_user_mr = usnic_ib_reg_mr,
360*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_pd, usnic_ib_pd, ibpd),
361*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_cq, usnic_ib_cq, ibcq),
362*4882a593Smuzhiyun INIT_RDMA_OBJ_SIZE(ib_ucontext, usnic_ib_ucontext, ibucontext),
363*4882a593Smuzhiyun };
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* Start of PF discovery section */
usnic_ib_device_add(struct pci_dev * dev)366*4882a593Smuzhiyun static void *usnic_ib_device_add(struct pci_dev *dev)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun struct usnic_ib_dev *us_ibdev;
369*4882a593Smuzhiyun union ib_gid gid;
370*4882a593Smuzhiyun struct in_device *ind;
371*4882a593Smuzhiyun struct net_device *netdev;
372*4882a593Smuzhiyun int ret;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun usnic_dbg("\n");
375*4882a593Smuzhiyun netdev = pci_get_drvdata(dev);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev);
378*4882a593Smuzhiyun if (!us_ibdev) {
379*4882a593Smuzhiyun usnic_err("Device %s context alloc failed\n",
380*4882a593Smuzhiyun netdev_name(pci_get_drvdata(dev)));
381*4882a593Smuzhiyun return ERR_PTR(-EFAULT);
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun us_ibdev->ufdev = usnic_fwd_dev_alloc(dev);
385*4882a593Smuzhiyun if (!us_ibdev->ufdev) {
386*4882a593Smuzhiyun usnic_err("Failed to alloc ufdev for %s\n", pci_name(dev));
387*4882a593Smuzhiyun goto err_dealloc;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun mutex_init(&us_ibdev->usdev_lock);
391*4882a593Smuzhiyun INIT_LIST_HEAD(&us_ibdev->vf_dev_list);
392*4882a593Smuzhiyun INIT_LIST_HEAD(&us_ibdev->ctx_list);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun us_ibdev->pdev = dev;
395*4882a593Smuzhiyun us_ibdev->netdev = pci_get_drvdata(dev);
396*4882a593Smuzhiyun us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP;
397*4882a593Smuzhiyun us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT;
398*4882a593Smuzhiyun us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
399*4882a593Smuzhiyun us_ibdev->ib_dev.dev.parent = &dev->dev;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun us_ibdev->ib_dev.uverbs_cmd_mask =
402*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
403*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
404*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
405*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
406*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
407*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_REG_MR) |
408*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
409*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
410*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
411*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
412*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
413*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
414*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
415*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
416*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
417*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
418*4882a593Smuzhiyun (1ull << IB_USER_VERBS_CMD_OPEN_QP);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1);
425*4882a593Smuzhiyun if (ret)
426*4882a593Smuzhiyun goto err_fwd_dealloc;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun dma_set_max_seg_size(&dev->dev, SZ_2G);
429*4882a593Smuzhiyun if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", &dev->dev))
430*4882a593Smuzhiyun goto err_fwd_dealloc;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
433*4882a593Smuzhiyun usnic_fwd_set_mac(us_ibdev->ufdev, us_ibdev->netdev->dev_addr);
434*4882a593Smuzhiyun if (netif_carrier_ok(us_ibdev->netdev))
435*4882a593Smuzhiyun usnic_fwd_carrier_up(us_ibdev->ufdev);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun rcu_read_lock();
438*4882a593Smuzhiyun ind = __in_dev_get_rcu(netdev);
439*4882a593Smuzhiyun if (ind) {
440*4882a593Smuzhiyun const struct in_ifaddr *ifa;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun ifa = rcu_dereference(ind->ifa_list);
443*4882a593Smuzhiyun if (ifa)
444*4882a593Smuzhiyun usnic_fwd_add_ipaddr(us_ibdev->ufdev, ifa->ifa_address);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun rcu_read_unlock();
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
449*4882a593Smuzhiyun us_ibdev->ufdev->inaddr, &gid.raw[0]);
450*4882a593Smuzhiyun memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id,
451*4882a593Smuzhiyun sizeof(gid.global.interface_id));
452*4882a593Smuzhiyun kref_init(&us_ibdev->vf_cnt);
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
455*4882a593Smuzhiyun dev_name(&us_ibdev->ib_dev.dev),
456*4882a593Smuzhiyun netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
457*4882a593Smuzhiyun us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
458*4882a593Smuzhiyun return us_ibdev;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun err_fwd_dealloc:
461*4882a593Smuzhiyun usnic_fwd_dev_free(us_ibdev->ufdev);
462*4882a593Smuzhiyun err_dealloc:
463*4882a593Smuzhiyun usnic_err("failed -- deallocing device\n");
464*4882a593Smuzhiyun ib_dealloc_device(&us_ibdev->ib_dev);
465*4882a593Smuzhiyun return NULL;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
usnic_ib_device_remove(struct usnic_ib_dev * us_ibdev)468*4882a593Smuzhiyun static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
471*4882a593Smuzhiyun usnic_ib_sysfs_unregister_usdev(us_ibdev);
472*4882a593Smuzhiyun usnic_fwd_dev_free(us_ibdev->ufdev);
473*4882a593Smuzhiyun ib_unregister_device(&us_ibdev->ib_dev);
474*4882a593Smuzhiyun ib_dealloc_device(&us_ibdev->ib_dev);
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
usnic_ib_undiscover_pf(struct kref * kref)477*4882a593Smuzhiyun static void usnic_ib_undiscover_pf(struct kref *kref)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun struct usnic_ib_dev *us_ibdev, *tmp;
480*4882a593Smuzhiyun struct pci_dev *dev;
481*4882a593Smuzhiyun bool found = false;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun dev = container_of(kref, struct usnic_ib_dev, vf_cnt)->pdev;
484*4882a593Smuzhiyun mutex_lock(&usnic_ib_ibdev_list_lock);
485*4882a593Smuzhiyun list_for_each_entry_safe(us_ibdev, tmp,
486*4882a593Smuzhiyun &usnic_ib_ibdev_list, ib_dev_link) {
487*4882a593Smuzhiyun if (us_ibdev->pdev == dev) {
488*4882a593Smuzhiyun list_del(&us_ibdev->ib_dev_link);
489*4882a593Smuzhiyun found = true;
490*4882a593Smuzhiyun break;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun mutex_unlock(&usnic_ib_ibdev_list_lock);
496*4882a593Smuzhiyun if (found)
497*4882a593Smuzhiyun usnic_ib_device_remove(us_ibdev);
498*4882a593Smuzhiyun else
499*4882a593Smuzhiyun WARN(1, "Failed to remove PF %s\n", pci_name(dev));
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
usnic_ib_discover_pf(struct usnic_vnic * vnic)502*4882a593Smuzhiyun static struct usnic_ib_dev *usnic_ib_discover_pf(struct usnic_vnic *vnic)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun struct usnic_ib_dev *us_ibdev;
505*4882a593Smuzhiyun struct pci_dev *parent_pci, *vf_pci;
506*4882a593Smuzhiyun int err;
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun vf_pci = usnic_vnic_get_pdev(vnic);
509*4882a593Smuzhiyun parent_pci = pci_physfn(vf_pci);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun BUG_ON(!parent_pci);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun mutex_lock(&usnic_ib_ibdev_list_lock);
514*4882a593Smuzhiyun list_for_each_entry(us_ibdev, &usnic_ib_ibdev_list, ib_dev_link) {
515*4882a593Smuzhiyun if (us_ibdev->pdev == parent_pci) {
516*4882a593Smuzhiyun kref_get(&us_ibdev->vf_cnt);
517*4882a593Smuzhiyun goto out;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun us_ibdev = usnic_ib_device_add(parent_pci);
522*4882a593Smuzhiyun if (IS_ERR_OR_NULL(us_ibdev)) {
523*4882a593Smuzhiyun us_ibdev = us_ibdev ? us_ibdev : ERR_PTR(-EFAULT);
524*4882a593Smuzhiyun goto out;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun err = usnic_ib_sysfs_register_usdev(us_ibdev);
528*4882a593Smuzhiyun if (err) {
529*4882a593Smuzhiyun usnic_ib_device_remove(us_ibdev);
530*4882a593Smuzhiyun us_ibdev = ERR_PTR(err);
531*4882a593Smuzhiyun goto out;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun list_add(&us_ibdev->ib_dev_link, &usnic_ib_ibdev_list);
535*4882a593Smuzhiyun out:
536*4882a593Smuzhiyun mutex_unlock(&usnic_ib_ibdev_list_lock);
537*4882a593Smuzhiyun return us_ibdev;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun /* End of PF discovery section */
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun /* Start of PCI section */
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun static const struct pci_device_id usnic_ib_pci_ids[] = {
544*4882a593Smuzhiyun {PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_VIC_USPACE_NIC)},
545*4882a593Smuzhiyun {0,}
546*4882a593Smuzhiyun };
547*4882a593Smuzhiyun
usnic_ib_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)548*4882a593Smuzhiyun static int usnic_ib_pci_probe(struct pci_dev *pdev,
549*4882a593Smuzhiyun const struct pci_device_id *id)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun int err;
552*4882a593Smuzhiyun struct usnic_ib_dev *pf;
553*4882a593Smuzhiyun struct usnic_ib_vf *vf;
554*4882a593Smuzhiyun enum usnic_vnic_res_type res_type;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun vf = kzalloc(sizeof(*vf), GFP_KERNEL);
557*4882a593Smuzhiyun if (!vf)
558*4882a593Smuzhiyun return -ENOMEM;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun err = pci_enable_device(pdev);
561*4882a593Smuzhiyun if (err) {
562*4882a593Smuzhiyun usnic_err("Failed to enable %s with err %d\n",
563*4882a593Smuzhiyun pci_name(pdev), err);
564*4882a593Smuzhiyun goto out_clean_vf;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun err = pci_request_regions(pdev, DRV_NAME);
568*4882a593Smuzhiyun if (err) {
569*4882a593Smuzhiyun usnic_err("Failed to request region for %s with err %d\n",
570*4882a593Smuzhiyun pci_name(pdev), err);
571*4882a593Smuzhiyun goto out_disable_device;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun pci_set_master(pdev);
575*4882a593Smuzhiyun pci_set_drvdata(pdev, vf);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun vf->vnic = usnic_vnic_alloc(pdev);
578*4882a593Smuzhiyun if (IS_ERR_OR_NULL(vf->vnic)) {
579*4882a593Smuzhiyun err = vf->vnic ? PTR_ERR(vf->vnic) : -ENOMEM;
580*4882a593Smuzhiyun usnic_err("Failed to alloc vnic for %s with err %d\n",
581*4882a593Smuzhiyun pci_name(pdev), err);
582*4882a593Smuzhiyun goto out_release_regions;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun pf = usnic_ib_discover_pf(vf->vnic);
586*4882a593Smuzhiyun if (IS_ERR_OR_NULL(pf)) {
587*4882a593Smuzhiyun usnic_err("Failed to discover pf of vnic %s with err%ld\n",
588*4882a593Smuzhiyun pci_name(pdev), PTR_ERR(pf));
589*4882a593Smuzhiyun err = pf ? PTR_ERR(pf) : -EFAULT;
590*4882a593Smuzhiyun goto out_clean_vnic;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun vf->pf = pf;
594*4882a593Smuzhiyun spin_lock_init(&vf->lock);
595*4882a593Smuzhiyun mutex_lock(&pf->usdev_lock);
596*4882a593Smuzhiyun list_add_tail(&vf->link, &pf->vf_dev_list);
597*4882a593Smuzhiyun /*
598*4882a593Smuzhiyun * Save max settings (will be same for each VF, easier to re-write than
599*4882a593Smuzhiyun * to say "if (!set) { set_values(); set=1; }
600*4882a593Smuzhiyun */
601*4882a593Smuzhiyun for (res_type = USNIC_VNIC_RES_TYPE_EOL+1;
602*4882a593Smuzhiyun res_type < USNIC_VNIC_RES_TYPE_MAX;
603*4882a593Smuzhiyun res_type++) {
604*4882a593Smuzhiyun pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic,
605*4882a593Smuzhiyun res_type);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun mutex_unlock(&pf->usdev_lock);
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
611*4882a593Smuzhiyun dev_name(&pf->ib_dev.dev));
612*4882a593Smuzhiyun usnic_ib_log_vf(vf);
613*4882a593Smuzhiyun return 0;
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun out_clean_vnic:
616*4882a593Smuzhiyun usnic_vnic_free(vf->vnic);
617*4882a593Smuzhiyun out_release_regions:
618*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
619*4882a593Smuzhiyun pci_clear_master(pdev);
620*4882a593Smuzhiyun pci_release_regions(pdev);
621*4882a593Smuzhiyun out_disable_device:
622*4882a593Smuzhiyun pci_disable_device(pdev);
623*4882a593Smuzhiyun out_clean_vf:
624*4882a593Smuzhiyun kfree(vf);
625*4882a593Smuzhiyun return err;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
usnic_ib_pci_remove(struct pci_dev * pdev)628*4882a593Smuzhiyun static void usnic_ib_pci_remove(struct pci_dev *pdev)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct usnic_ib_vf *vf = pci_get_drvdata(pdev);
631*4882a593Smuzhiyun struct usnic_ib_dev *pf = vf->pf;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun mutex_lock(&pf->usdev_lock);
634*4882a593Smuzhiyun list_del(&vf->link);
635*4882a593Smuzhiyun mutex_unlock(&pf->usdev_lock);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf);
638*4882a593Smuzhiyun usnic_vnic_free(vf->vnic);
639*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
640*4882a593Smuzhiyun pci_clear_master(pdev);
641*4882a593Smuzhiyun pci_release_regions(pdev);
642*4882a593Smuzhiyun pci_disable_device(pdev);
643*4882a593Smuzhiyun kfree(vf);
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun usnic_info("Removed VF %s\n", pci_name(pdev));
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* PCI driver entry points */
649*4882a593Smuzhiyun static struct pci_driver usnic_ib_pci_driver = {
650*4882a593Smuzhiyun .name = DRV_NAME,
651*4882a593Smuzhiyun .id_table = usnic_ib_pci_ids,
652*4882a593Smuzhiyun .probe = usnic_ib_pci_probe,
653*4882a593Smuzhiyun .remove = usnic_ib_pci_remove,
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun /* End of PCI section */
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Start of module section */
usnic_ib_init(void)658*4882a593Smuzhiyun static int __init usnic_ib_init(void)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun int err;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun printk_once(KERN_INFO "%s", usnic_version);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun err = usnic_uiom_init(DRV_NAME);
665*4882a593Smuzhiyun if (err) {
666*4882a593Smuzhiyun usnic_err("Unable to initialize umem with err %d\n", err);
667*4882a593Smuzhiyun return err;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun err = pci_register_driver(&usnic_ib_pci_driver);
671*4882a593Smuzhiyun if (err) {
672*4882a593Smuzhiyun usnic_err("Unable to register with PCI\n");
673*4882a593Smuzhiyun goto out_umem_fini;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun err = register_netdevice_notifier(&usnic_ib_netdevice_notifier);
677*4882a593Smuzhiyun if (err) {
678*4882a593Smuzhiyun usnic_err("Failed to register netdev notifier\n");
679*4882a593Smuzhiyun goto out_pci_unreg;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun err = register_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
683*4882a593Smuzhiyun if (err) {
684*4882a593Smuzhiyun usnic_err("Failed to register inet addr notifier\n");
685*4882a593Smuzhiyun goto out_unreg_netdev_notifier;
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun err = usnic_transport_init();
689*4882a593Smuzhiyun if (err) {
690*4882a593Smuzhiyun usnic_err("Failed to initialize transport\n");
691*4882a593Smuzhiyun goto out_unreg_inetaddr_notifier;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun usnic_debugfs_init();
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun return 0;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun out_unreg_inetaddr_notifier:
699*4882a593Smuzhiyun unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
700*4882a593Smuzhiyun out_unreg_netdev_notifier:
701*4882a593Smuzhiyun unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
702*4882a593Smuzhiyun out_pci_unreg:
703*4882a593Smuzhiyun pci_unregister_driver(&usnic_ib_pci_driver);
704*4882a593Smuzhiyun out_umem_fini:
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun return err;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
usnic_ib_destroy(void)709*4882a593Smuzhiyun static void __exit usnic_ib_destroy(void)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun usnic_dbg("\n");
712*4882a593Smuzhiyun usnic_debugfs_exit();
713*4882a593Smuzhiyun usnic_transport_fini();
714*4882a593Smuzhiyun unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
715*4882a593Smuzhiyun unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
716*4882a593Smuzhiyun pci_unregister_driver(&usnic_ib_pci_driver);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");
720*4882a593Smuzhiyun MODULE_AUTHOR("Upinder Malhi <umalhi@cisco.com>");
721*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
722*4882a593Smuzhiyun module_param(usnic_log_lvl, uint, S_IRUGO | S_IWUSR);
723*4882a593Smuzhiyun module_param(usnic_ib_share_vf, uint, S_IRUGO | S_IWUSR);
724*4882a593Smuzhiyun MODULE_PARM_DESC(usnic_log_lvl, " Off=0, Err=1, Info=2, Debug=3");
725*4882a593Smuzhiyun MODULE_PARM_DESC(usnic_ib_share_vf, "Off=0, On=1 VF sharing amongst QPs");
726*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, usnic_ib_pci_ids);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun module_init(usnic_ib_init);
729*4882a593Smuzhiyun module_exit(usnic_ib_destroy);
730*4882a593Smuzhiyun /* End of module section */
731