1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2*4882a593Smuzhiyun /* QLogic qedr NIC Driver
3*4882a593Smuzhiyun * Copyright (c) 2015-2017 QLogic Corporation
4*4882a593Smuzhiyun * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/pci.h>
8*4882a593Smuzhiyun #include <linux/netdevice.h>
9*4882a593Smuzhiyun #include <linux/list.h>
10*4882a593Smuzhiyun #include <linux/mutex.h>
11*4882a593Smuzhiyun #include <linux/qed/qede_rdma.h>
12*4882a593Smuzhiyun #include "qede.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun static struct qedr_driver *qedr_drv;
15*4882a593Smuzhiyun static LIST_HEAD(qedr_dev_list);
16*4882a593Smuzhiyun static DEFINE_MUTEX(qedr_dev_list_lock);
17*4882a593Smuzhiyun
qede_rdma_supported(struct qede_dev * dev)18*4882a593Smuzhiyun bool qede_rdma_supported(struct qede_dev *dev)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun return dev->dev_info.common.rdma_supported;
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
_qede_rdma_dev_add(struct qede_dev * edev)23*4882a593Smuzhiyun static void _qede_rdma_dev_add(struct qede_dev *edev)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun if (!qedr_drv)
26*4882a593Smuzhiyun return;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* Leftovers from previous error recovery */
29*4882a593Smuzhiyun edev->rdma_info.exp_recovery = false;
30*4882a593Smuzhiyun edev->rdma_info.qedr_dev = qedr_drv->add(edev->cdev, edev->pdev,
31*4882a593Smuzhiyun edev->ndev);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
qede_rdma_create_wq(struct qede_dev * edev)34*4882a593Smuzhiyun static int qede_rdma_create_wq(struct qede_dev *edev)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
37*4882a593Smuzhiyun kref_init(&edev->rdma_info.refcnt);
38*4882a593Smuzhiyun init_completion(&edev->rdma_info.event_comp);
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
41*4882a593Smuzhiyun if (!edev->rdma_info.rdma_wq) {
42*4882a593Smuzhiyun DP_NOTICE(edev, "qedr: Could not create workqueue\n");
43*4882a593Smuzhiyun return -ENOMEM;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
qede_rdma_cleanup_event(struct qede_dev * edev)49*4882a593Smuzhiyun static void qede_rdma_cleanup_event(struct qede_dev *edev)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct list_head *head = &edev->rdma_info.rdma_event_list;
52*4882a593Smuzhiyun struct qede_rdma_event_work *event_node;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun flush_workqueue(edev->rdma_info.rdma_wq);
55*4882a593Smuzhiyun while (!list_empty(head)) {
56*4882a593Smuzhiyun event_node = list_entry(head->next, struct qede_rdma_event_work,
57*4882a593Smuzhiyun list);
58*4882a593Smuzhiyun cancel_work_sync(&event_node->work);
59*4882a593Smuzhiyun list_del(&event_node->list);
60*4882a593Smuzhiyun kfree(event_node);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
qede_rdma_complete_event(struct kref * ref)64*4882a593Smuzhiyun static void qede_rdma_complete_event(struct kref *ref)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct qede_rdma_dev *rdma_dev =
67*4882a593Smuzhiyun container_of(ref, struct qede_rdma_dev, refcnt);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* no more events will be added after this */
70*4882a593Smuzhiyun complete(&rdma_dev->event_comp);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
qede_rdma_destroy_wq(struct qede_dev * edev)73*4882a593Smuzhiyun static void qede_rdma_destroy_wq(struct qede_dev *edev)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun /* Avoid race with add_event flow, make sure it finishes before
76*4882a593Smuzhiyun * we start accessing the list and cleaning up the work
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
79*4882a593Smuzhiyun wait_for_completion(&edev->rdma_info.event_comp);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun qede_rdma_cleanup_event(edev);
82*4882a593Smuzhiyun destroy_workqueue(edev->rdma_info.rdma_wq);
83*4882a593Smuzhiyun edev->rdma_info.rdma_wq = NULL;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
qede_rdma_dev_add(struct qede_dev * edev,bool recovery)86*4882a593Smuzhiyun int qede_rdma_dev_add(struct qede_dev *edev, bool recovery)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun int rc;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun if (!qede_rdma_supported(edev))
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Cannot start qedr while recovering since it wasn't fully stopped */
94*4882a593Smuzhiyun if (recovery)
95*4882a593Smuzhiyun return 0;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun rc = qede_rdma_create_wq(edev);
98*4882a593Smuzhiyun if (rc)
99*4882a593Smuzhiyun return rc;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun INIT_LIST_HEAD(&edev->rdma_info.entry);
102*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
103*4882a593Smuzhiyun list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
104*4882a593Smuzhiyun _qede_rdma_dev_add(edev);
105*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return rc;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
_qede_rdma_dev_remove(struct qede_dev * edev)110*4882a593Smuzhiyun static void _qede_rdma_dev_remove(struct qede_dev *edev)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
113*4882a593Smuzhiyun qedr_drv->remove(edev->rdma_info.qedr_dev);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
qede_rdma_dev_remove(struct qede_dev * edev,bool recovery)116*4882a593Smuzhiyun void qede_rdma_dev_remove(struct qede_dev *edev, bool recovery)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun if (!qede_rdma_supported(edev))
119*4882a593Smuzhiyun return;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Cannot remove qedr while recovering since it wasn't fully stopped */
122*4882a593Smuzhiyun if (!recovery) {
123*4882a593Smuzhiyun qede_rdma_destroy_wq(edev);
124*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
125*4882a593Smuzhiyun if (!edev->rdma_info.exp_recovery)
126*4882a593Smuzhiyun _qede_rdma_dev_remove(edev);
127*4882a593Smuzhiyun edev->rdma_info.qedr_dev = NULL;
128*4882a593Smuzhiyun list_del(&edev->rdma_info.entry);
129*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
130*4882a593Smuzhiyun } else {
131*4882a593Smuzhiyun if (!edev->rdma_info.exp_recovery) {
132*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
133*4882a593Smuzhiyun _qede_rdma_dev_remove(edev);
134*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun edev->rdma_info.exp_recovery = true;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
_qede_rdma_dev_open(struct qede_dev * edev)140*4882a593Smuzhiyun static void _qede_rdma_dev_open(struct qede_dev *edev)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
143*4882a593Smuzhiyun qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
qede_rdma_dev_open(struct qede_dev * edev)146*4882a593Smuzhiyun static void qede_rdma_dev_open(struct qede_dev *edev)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun if (!qede_rdma_supported(edev))
149*4882a593Smuzhiyun return;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
152*4882a593Smuzhiyun _qede_rdma_dev_open(edev);
153*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
_qede_rdma_dev_close(struct qede_dev * edev)156*4882a593Smuzhiyun static void _qede_rdma_dev_close(struct qede_dev *edev)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
159*4882a593Smuzhiyun qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
qede_rdma_dev_close(struct qede_dev * edev)162*4882a593Smuzhiyun static void qede_rdma_dev_close(struct qede_dev *edev)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun if (!qede_rdma_supported(edev))
165*4882a593Smuzhiyun return;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
168*4882a593Smuzhiyun _qede_rdma_dev_close(edev);
169*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
qede_rdma_dev_shutdown(struct qede_dev * edev)172*4882a593Smuzhiyun static void qede_rdma_dev_shutdown(struct qede_dev *edev)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun if (!qede_rdma_supported(edev))
175*4882a593Smuzhiyun return;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
178*4882a593Smuzhiyun if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
179*4882a593Smuzhiyun qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CLOSE);
180*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
qede_rdma_register_driver(struct qedr_driver * drv)183*4882a593Smuzhiyun int qede_rdma_register_driver(struct qedr_driver *drv)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun struct qede_dev *edev;
186*4882a593Smuzhiyun u8 qedr_counter = 0;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
189*4882a593Smuzhiyun if (qedr_drv) {
190*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
191*4882a593Smuzhiyun return -EINVAL;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun qedr_drv = drv;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
196*4882a593Smuzhiyun struct net_device *ndev;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun qedr_counter++;
199*4882a593Smuzhiyun _qede_rdma_dev_add(edev);
200*4882a593Smuzhiyun ndev = edev->ndev;
201*4882a593Smuzhiyun if (netif_running(ndev) && netif_oper_up(ndev))
202*4882a593Smuzhiyun _qede_rdma_dev_open(edev);
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun pr_notice("qedr: discovered and registered %d RDMA funcs\n",
207*4882a593Smuzhiyun qedr_counter);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun EXPORT_SYMBOL(qede_rdma_register_driver);
212*4882a593Smuzhiyun
qede_rdma_unregister_driver(struct qedr_driver * drv)213*4882a593Smuzhiyun void qede_rdma_unregister_driver(struct qedr_driver *drv)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct qede_dev *edev;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun mutex_lock(&qedr_dev_list_lock);
218*4882a593Smuzhiyun list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
219*4882a593Smuzhiyun /* If device has experienced recovery it was already removed */
220*4882a593Smuzhiyun if (edev->rdma_info.qedr_dev && !edev->rdma_info.exp_recovery)
221*4882a593Smuzhiyun _qede_rdma_dev_remove(edev);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun qedr_drv = NULL;
224*4882a593Smuzhiyun mutex_unlock(&qedr_dev_list_lock);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun EXPORT_SYMBOL(qede_rdma_unregister_driver);
227*4882a593Smuzhiyun
qede_rdma_changeaddr(struct qede_dev * edev)228*4882a593Smuzhiyun static void qede_rdma_changeaddr(struct qede_dev *edev)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun if (!qede_rdma_supported(edev))
231*4882a593Smuzhiyun return;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
234*4882a593Smuzhiyun qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
qede_rdma_change_mtu(struct qede_dev * edev)237*4882a593Smuzhiyun static void qede_rdma_change_mtu(struct qede_dev *edev)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun if (qede_rdma_supported(edev)) {
240*4882a593Smuzhiyun if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
241*4882a593Smuzhiyun qedr_drv->notify(edev->rdma_info.qedr_dev,
242*4882a593Smuzhiyun QEDE_CHANGE_MTU);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun static struct qede_rdma_event_work *
qede_rdma_get_free_event_node(struct qede_dev * edev)247*4882a593Smuzhiyun qede_rdma_get_free_event_node(struct qede_dev *edev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct qede_rdma_event_work *event_node = NULL;
250*4882a593Smuzhiyun struct list_head *list_node = NULL;
251*4882a593Smuzhiyun bool found = false;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
254*4882a593Smuzhiyun event_node = list_entry(list_node, struct qede_rdma_event_work,
255*4882a593Smuzhiyun list);
256*4882a593Smuzhiyun if (!work_pending(&event_node->work)) {
257*4882a593Smuzhiyun found = true;
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun if (!found) {
263*4882a593Smuzhiyun event_node = kzalloc(sizeof(*event_node), GFP_ATOMIC);
264*4882a593Smuzhiyun if (!event_node) {
265*4882a593Smuzhiyun DP_NOTICE(edev,
266*4882a593Smuzhiyun "qedr: Could not allocate memory for rdma work\n");
267*4882a593Smuzhiyun return NULL;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun list_add_tail(&event_node->list,
270*4882a593Smuzhiyun &edev->rdma_info.rdma_event_list);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun return event_node;
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
qede_rdma_handle_event(struct work_struct * work)276*4882a593Smuzhiyun static void qede_rdma_handle_event(struct work_struct *work)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct qede_rdma_event_work *event_node;
279*4882a593Smuzhiyun enum qede_rdma_event event;
280*4882a593Smuzhiyun struct qede_dev *edev;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun event_node = container_of(work, struct qede_rdma_event_work, work);
283*4882a593Smuzhiyun event = event_node->event;
284*4882a593Smuzhiyun edev = event_node->ptr;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun switch (event) {
287*4882a593Smuzhiyun case QEDE_UP:
288*4882a593Smuzhiyun qede_rdma_dev_open(edev);
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun case QEDE_DOWN:
291*4882a593Smuzhiyun qede_rdma_dev_close(edev);
292*4882a593Smuzhiyun break;
293*4882a593Smuzhiyun case QEDE_CLOSE:
294*4882a593Smuzhiyun qede_rdma_dev_shutdown(edev);
295*4882a593Smuzhiyun break;
296*4882a593Smuzhiyun case QEDE_CHANGE_ADDR:
297*4882a593Smuzhiyun qede_rdma_changeaddr(edev);
298*4882a593Smuzhiyun break;
299*4882a593Smuzhiyun case QEDE_CHANGE_MTU:
300*4882a593Smuzhiyun qede_rdma_change_mtu(edev);
301*4882a593Smuzhiyun break;
302*4882a593Smuzhiyun default:
303*4882a593Smuzhiyun DP_NOTICE(edev, "Invalid rdma event %d", event);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
qede_rdma_add_event(struct qede_dev * edev,enum qede_rdma_event event)307*4882a593Smuzhiyun static void qede_rdma_add_event(struct qede_dev *edev,
308*4882a593Smuzhiyun enum qede_rdma_event event)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct qede_rdma_event_work *event_node;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* If a recovery was experienced avoid adding the event */
313*4882a593Smuzhiyun if (edev->rdma_info.exp_recovery)
314*4882a593Smuzhiyun return;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (!edev->rdma_info.qedr_dev || !edev->rdma_info.rdma_wq)
317*4882a593Smuzhiyun return;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* We don't want the cleanup flow to start while we're allocating and
320*4882a593Smuzhiyun * scheduling the work
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
323*4882a593Smuzhiyun return; /* already being destroyed */
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun event_node = qede_rdma_get_free_event_node(edev);
326*4882a593Smuzhiyun if (!event_node)
327*4882a593Smuzhiyun goto out;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun event_node->event = event;
330*4882a593Smuzhiyun event_node->ptr = edev;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun INIT_WORK(&event_node->work, qede_rdma_handle_event);
333*4882a593Smuzhiyun queue_work(edev->rdma_info.rdma_wq, &event_node->work);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun out:
336*4882a593Smuzhiyun kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
qede_rdma_dev_event_open(struct qede_dev * edev)339*4882a593Smuzhiyun void qede_rdma_dev_event_open(struct qede_dev *edev)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun qede_rdma_add_event(edev, QEDE_UP);
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
qede_rdma_dev_event_close(struct qede_dev * edev)344*4882a593Smuzhiyun void qede_rdma_dev_event_close(struct qede_dev *edev)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun qede_rdma_add_event(edev, QEDE_DOWN);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
qede_rdma_event_changeaddr(struct qede_dev * edev)349*4882a593Smuzhiyun void qede_rdma_event_changeaddr(struct qede_dev *edev)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
qede_rdma_event_change_mtu(struct qede_dev * edev)354*4882a593Smuzhiyun void qede_rdma_event_change_mtu(struct qede_dev *edev)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun qede_rdma_add_event(edev, QEDE_CHANGE_MTU);
357*4882a593Smuzhiyun }
358