1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2020 Intel Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun /*
8*4882a593Smuzhiyun * This file contains HFI1 support for netdev RX functionality
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "sdma.h"
12*4882a593Smuzhiyun #include "verbs.h"
13*4882a593Smuzhiyun #include "netdev.h"
14*4882a593Smuzhiyun #include "hfi.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <linux/netdevice.h>
17*4882a593Smuzhiyun #include <linux/etherdevice.h>
18*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
19*4882a593Smuzhiyun
hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv * priv,struct hfi1_ctxtdata * uctxt)20*4882a593Smuzhiyun static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
21*4882a593Smuzhiyun struct hfi1_ctxtdata *uctxt)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun unsigned int rcvctrl_ops;
24*4882a593Smuzhiyun struct hfi1_devdata *dd = priv->dd;
25*4882a593Smuzhiyun int ret;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
28*4882a593Smuzhiyun uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Now allocate the RcvHdr queue and eager buffers. */
31*4882a593Smuzhiyun ret = hfi1_create_rcvhdrq(dd, uctxt);
32*4882a593Smuzhiyun if (ret)
33*4882a593Smuzhiyun goto done;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun ret = hfi1_setup_eagerbufs(uctxt);
36*4882a593Smuzhiyun if (ret)
37*4882a593Smuzhiyun goto done;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun clear_rcvhdrtail(uctxt);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
42*4882a593Smuzhiyun rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
45*4882a593Smuzhiyun rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
46*4882a593Smuzhiyun if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
47*4882a593Smuzhiyun rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
48*4882a593Smuzhiyun if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
49*4882a593Smuzhiyun rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
50*4882a593Smuzhiyun if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
51*4882a593Smuzhiyun rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
54*4882a593Smuzhiyun done:
55*4882a593Smuzhiyun return ret;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
hfi1_netdev_allocate_ctxt(struct hfi1_devdata * dd,struct hfi1_ctxtdata ** ctxt)58*4882a593Smuzhiyun static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
59*4882a593Smuzhiyun struct hfi1_ctxtdata **ctxt)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct hfi1_ctxtdata *uctxt;
62*4882a593Smuzhiyun int ret;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (dd->flags & HFI1_FROZEN)
65*4882a593Smuzhiyun return -EIO;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
68*4882a593Smuzhiyun if (ret < 0) {
69*4882a593Smuzhiyun dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
70*4882a593Smuzhiyun return -ENOMEM;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
74*4882a593Smuzhiyun HFI1_CAP_KGET(NODROP_RHQ_FULL) |
75*4882a593Smuzhiyun HFI1_CAP_KGET(NODROP_EGR_FULL) |
76*4882a593Smuzhiyun HFI1_CAP_KGET(DMA_RTAIL);
77*4882a593Smuzhiyun /* Netdev contexts are always NO_RDMA_RTAIL */
78*4882a593Smuzhiyun uctxt->fast_handler = handle_receive_interrupt_napi_fp;
79*4882a593Smuzhiyun uctxt->slow_handler = handle_receive_interrupt_napi_sp;
80*4882a593Smuzhiyun hfi1_set_seq_cnt(uctxt, 1);
81*4882a593Smuzhiyun uctxt->is_vnic = true;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun hfi1_stats.sps_ctxts++;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
86*4882a593Smuzhiyun *ctxt = uctxt;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun return 0;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
hfi1_netdev_deallocate_ctxt(struct hfi1_devdata * dd,struct hfi1_ctxtdata * uctxt)91*4882a593Smuzhiyun static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
92*4882a593Smuzhiyun struct hfi1_ctxtdata *uctxt)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun flush_wc();
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * Disable receive context and interrupt available, reset all
98*4882a593Smuzhiyun * RcvCtxtCtrl bits to default values.
99*4882a593Smuzhiyun */
100*4882a593Smuzhiyun hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
101*4882a593Smuzhiyun HFI1_RCVCTRL_TIDFLOW_DIS |
102*4882a593Smuzhiyun HFI1_RCVCTRL_INTRAVAIL_DIS |
103*4882a593Smuzhiyun HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
104*4882a593Smuzhiyun HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
105*4882a593Smuzhiyun HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
108*4882a593Smuzhiyun msix_free_irq(dd, uctxt->msix_intr);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
111*4882a593Smuzhiyun uctxt->event_flags = 0;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun hfi1_clear_tids(uctxt);
114*4882a593Smuzhiyun hfi1_clear_ctxt_pkey(dd, uctxt);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun hfi1_stats.sps_ctxts--;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun hfi1_free_ctxt(uctxt);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv * priv,struct hfi1_ctxtdata ** ctxt)121*4882a593Smuzhiyun static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
122*4882a593Smuzhiyun struct hfi1_ctxtdata **ctxt)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun int rc;
125*4882a593Smuzhiyun struct hfi1_devdata *dd = priv->dd;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
128*4882a593Smuzhiyun if (rc) {
129*4882a593Smuzhiyun dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
130*4882a593Smuzhiyun return rc;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
134*4882a593Smuzhiyun if (rc) {
135*4882a593Smuzhiyun dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
136*4882a593Smuzhiyun hfi1_netdev_deallocate_ctxt(dd, *ctxt);
137*4882a593Smuzhiyun *ctxt = NULL;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return rc;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /**
144*4882a593Smuzhiyun * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
145*4882a593Smuzhiyun * @dd: device on which to allocate netdev contexts
146*4882a593Smuzhiyun * @available_contexts: count of available receive contexts
147*4882a593Smuzhiyun * @cpu_mask: mask of possible cpus to include for contexts
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * Return: count of physical cores on a node or the remaining available recv
150*4882a593Smuzhiyun * contexts for netdev recv context usage up to the maximum of
151*4882a593Smuzhiyun * HFI1_MAX_NETDEV_CTXTS.
152*4882a593Smuzhiyun * A value of 0 can be returned when acceleration is explicitly turned off,
153*4882a593Smuzhiyun * a memory allocation error occurs or when there are no available contexts.
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun */
hfi1_num_netdev_contexts(struct hfi1_devdata * dd,u32 available_contexts,struct cpumask * cpu_mask)156*4882a593Smuzhiyun u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
157*4882a593Smuzhiyun struct cpumask *cpu_mask)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun cpumask_var_t node_cpu_mask;
160*4882a593Smuzhiyun unsigned int available_cpus;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun if (!HFI1_CAP_IS_KSET(AIP))
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Always give user contexts priority over netdev contexts */
166*4882a593Smuzhiyun if (available_contexts == 0) {
167*4882a593Smuzhiyun dd_dev_info(dd, "No receive contexts available for netdevs.\n");
168*4882a593Smuzhiyun return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
172*4882a593Smuzhiyun dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun available_cpus = cpumask_weight(node_cpu_mask);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun free_cpumask_var(node_cpu_mask);
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun return min3(available_cpus, available_contexts,
183*4882a593Smuzhiyun (u32)HFI1_MAX_NETDEV_CTXTS);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
hfi1_netdev_rxq_init(struct net_device * dev)186*4882a593Smuzhiyun static int hfi1_netdev_rxq_init(struct net_device *dev)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun int i;
189*4882a593Smuzhiyun int rc;
190*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
191*4882a593Smuzhiyun struct hfi1_devdata *dd = priv->dd;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun priv->num_rx_q = dd->num_netdev_contexts;
194*4882a593Smuzhiyun priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
195*4882a593Smuzhiyun GFP_KERNEL, dd->node);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (!priv->rxq) {
198*4882a593Smuzhiyun dd_dev_err(dd, "Unable to allocate netdev queue data\n");
199*4882a593Smuzhiyun return (-ENOMEM);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_q; i++) {
203*4882a593Smuzhiyun struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
206*4882a593Smuzhiyun if (rc)
207*4882a593Smuzhiyun goto bail_context_irq_failure;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun hfi1_rcd_get(rxq->rcd);
210*4882a593Smuzhiyun rxq->priv = priv;
211*4882a593Smuzhiyun rxq->rcd->napi = &rxq->napi;
212*4882a593Smuzhiyun dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
213*4882a593Smuzhiyun i, rxq->rcd->ctxt);
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun * Disable BUSY_POLL on this NAPI as this is not supported
216*4882a593Smuzhiyun * right now.
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
219*4882a593Smuzhiyun netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
220*4882a593Smuzhiyun rc = msix_netdev_request_rcd_irq(rxq->rcd);
221*4882a593Smuzhiyun if (rc)
222*4882a593Smuzhiyun goto bail_context_irq_failure;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun return 0;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun bail_context_irq_failure:
228*4882a593Smuzhiyun dd_dev_err(dd, "Unable to allot receive context\n");
229*4882a593Smuzhiyun for (; i >= 0; i--) {
230*4882a593Smuzhiyun struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (rxq->rcd) {
233*4882a593Smuzhiyun hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
234*4882a593Smuzhiyun hfi1_rcd_put(rxq->rcd);
235*4882a593Smuzhiyun rxq->rcd = NULL;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun kfree(priv->rxq);
239*4882a593Smuzhiyun priv->rxq = NULL;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun return rc;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
hfi1_netdev_rxq_deinit(struct net_device * dev)244*4882a593Smuzhiyun static void hfi1_netdev_rxq_deinit(struct net_device *dev)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int i;
247*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
248*4882a593Smuzhiyun struct hfi1_devdata *dd = priv->dd;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_q; i++) {
251*4882a593Smuzhiyun struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun netif_napi_del(&rxq->napi);
254*4882a593Smuzhiyun hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
255*4882a593Smuzhiyun hfi1_rcd_put(rxq->rcd);
256*4882a593Smuzhiyun rxq->rcd = NULL;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun kfree(priv->rxq);
260*4882a593Smuzhiyun priv->rxq = NULL;
261*4882a593Smuzhiyun priv->num_rx_q = 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
enable_queues(struct hfi1_netdev_priv * priv)264*4882a593Smuzhiyun static void enable_queues(struct hfi1_netdev_priv *priv)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun int i;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_q; i++) {
269*4882a593Smuzhiyun struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
272*4882a593Smuzhiyun rxq->rcd->ctxt);
273*4882a593Smuzhiyun napi_enable(&rxq->napi);
274*4882a593Smuzhiyun hfi1_rcvctrl(priv->dd,
275*4882a593Smuzhiyun HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
276*4882a593Smuzhiyun rxq->rcd);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
disable_queues(struct hfi1_netdev_priv * priv)280*4882a593Smuzhiyun static void disable_queues(struct hfi1_netdev_priv *priv)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun int i;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun msix_netdev_synchronize_irq(priv->dd);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun for (i = 0; i < priv->num_rx_q; i++) {
287*4882a593Smuzhiyun struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
290*4882a593Smuzhiyun rxq->rcd->ctxt);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* wait for napi if it was scheduled */
293*4882a593Smuzhiyun hfi1_rcvctrl(priv->dd,
294*4882a593Smuzhiyun HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
295*4882a593Smuzhiyun rxq->rcd);
296*4882a593Smuzhiyun napi_synchronize(&rxq->napi);
297*4882a593Smuzhiyun napi_disable(&rxq->napi);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /**
302*4882a593Smuzhiyun * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
303*4882a593Smuzhiyun * it allocates receive queue data and calls netif_napi_add
304*4882a593Smuzhiyun * for each queue.
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * @dd: hfi1 dev data
307*4882a593Smuzhiyun */
hfi1_netdev_rx_init(struct hfi1_devdata * dd)308*4882a593Smuzhiyun int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
311*4882a593Smuzhiyun int res;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if (atomic_fetch_inc(&priv->netdevs))
314*4882a593Smuzhiyun return 0;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun mutex_lock(&hfi1_mutex);
317*4882a593Smuzhiyun init_dummy_netdev(dd->dummy_netdev);
318*4882a593Smuzhiyun res = hfi1_netdev_rxq_init(dd->dummy_netdev);
319*4882a593Smuzhiyun mutex_unlock(&hfi1_mutex);
320*4882a593Smuzhiyun return res;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /**
324*4882a593Smuzhiyun * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
325*4882a593Smuzhiyun * napi is deleted and receive queses memory is freed.
326*4882a593Smuzhiyun *
327*4882a593Smuzhiyun * @dd: hfi1 dev data
328*4882a593Smuzhiyun */
hfi1_netdev_rx_destroy(struct hfi1_devdata * dd)329*4882a593Smuzhiyun int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /* destroy the RX queues only if it is the last netdev going away */
334*4882a593Smuzhiyun if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
335*4882a593Smuzhiyun mutex_lock(&hfi1_mutex);
336*4882a593Smuzhiyun hfi1_netdev_rxq_deinit(dd->dummy_netdev);
337*4882a593Smuzhiyun mutex_unlock(&hfi1_mutex);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return 0;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /**
344*4882a593Smuzhiyun * hfi1_netdev_alloc - Allocates netdev and private data. It is required
345*4882a593Smuzhiyun * because RMT index and MSI-X interrupt can be set only
346*4882a593Smuzhiyun * during driver initialization.
347*4882a593Smuzhiyun *
348*4882a593Smuzhiyun * @dd: hfi1 dev data
349*4882a593Smuzhiyun */
hfi1_netdev_alloc(struct hfi1_devdata * dd)350*4882a593Smuzhiyun int hfi1_netdev_alloc(struct hfi1_devdata *dd)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun struct hfi1_netdev_priv *priv;
353*4882a593Smuzhiyun const int netdev_size = sizeof(*dd->dummy_netdev) +
354*4882a593Smuzhiyun sizeof(struct hfi1_netdev_priv);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
357*4882a593Smuzhiyun dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun if (!dd->dummy_netdev)
360*4882a593Smuzhiyun return -ENOMEM;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun priv = hfi1_netdev_priv(dd->dummy_netdev);
363*4882a593Smuzhiyun priv->dd = dd;
364*4882a593Smuzhiyun xa_init(&priv->dev_tbl);
365*4882a593Smuzhiyun atomic_set(&priv->enabled, 0);
366*4882a593Smuzhiyun atomic_set(&priv->netdevs, 0);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
hfi1_netdev_free(struct hfi1_devdata * dd)371*4882a593Smuzhiyun void hfi1_netdev_free(struct hfi1_devdata *dd)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun if (dd->dummy_netdev) {
374*4882a593Smuzhiyun dd_dev_info(dd, "hfi1 netdev freed\n");
375*4882a593Smuzhiyun kfree(dd->dummy_netdev);
376*4882a593Smuzhiyun dd->dummy_netdev = NULL;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /**
381*4882a593Smuzhiyun * hfi1_netdev_enable_queues - This is napi enable function.
382*4882a593Smuzhiyun * It enables napi objects associated with queues.
383*4882a593Smuzhiyun * When at least one device has called it it increments atomic counter.
384*4882a593Smuzhiyun * Disable function decrements counter and when it is 0,
385*4882a593Smuzhiyun * calls napi_disable for every queue.
386*4882a593Smuzhiyun *
387*4882a593Smuzhiyun * @dd: hfi1 dev data
388*4882a593Smuzhiyun */
hfi1_netdev_enable_queues(struct hfi1_devdata * dd)389*4882a593Smuzhiyun void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun struct hfi1_netdev_priv *priv;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (!dd->dummy_netdev)
394*4882a593Smuzhiyun return;
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun priv = hfi1_netdev_priv(dd->dummy_netdev);
397*4882a593Smuzhiyun if (atomic_fetch_inc(&priv->enabled))
398*4882a593Smuzhiyun return;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun mutex_lock(&hfi1_mutex);
401*4882a593Smuzhiyun enable_queues(priv);
402*4882a593Smuzhiyun mutex_unlock(&hfi1_mutex);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
hfi1_netdev_disable_queues(struct hfi1_devdata * dd)405*4882a593Smuzhiyun void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun struct hfi1_netdev_priv *priv;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun if (!dd->dummy_netdev)
410*4882a593Smuzhiyun return;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun priv = hfi1_netdev_priv(dd->dummy_netdev);
413*4882a593Smuzhiyun if (atomic_dec_if_positive(&priv->enabled))
414*4882a593Smuzhiyun return;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun mutex_lock(&hfi1_mutex);
417*4882a593Smuzhiyun disable_queues(priv);
418*4882a593Smuzhiyun mutex_unlock(&hfi1_mutex);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * hfi1_netdev_add_data - Registers data with unique identifier
423*4882a593Smuzhiyun * to be requested later this is needed for VNIC and IPoIB VLANs
424*4882a593Smuzhiyun * implementations.
425*4882a593Smuzhiyun * This call is protected by mutex idr_lock.
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * @dd: hfi1 dev data
428*4882a593Smuzhiyun * @id: requested integer id up to INT_MAX
429*4882a593Smuzhiyun * @data: data to be associated with index
430*4882a593Smuzhiyun */
hfi1_netdev_add_data(struct hfi1_devdata * dd,int id,void * data)431*4882a593Smuzhiyun int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /**
439*4882a593Smuzhiyun * hfi1_netdev_remove_data - Removes data with previously given id.
440*4882a593Smuzhiyun * Returns the reference to removed entry.
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun * @dd: hfi1 dev data
443*4882a593Smuzhiyun * @id: requested integer id up to INT_MAX
444*4882a593Smuzhiyun */
hfi1_netdev_remove_data(struct hfi1_devdata * dd,int id)445*4882a593Smuzhiyun void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun return xa_erase(&priv->dev_tbl, id);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /**
453*4882a593Smuzhiyun * hfi1_netdev_get_data - Gets data with given id
454*4882a593Smuzhiyun *
455*4882a593Smuzhiyun * @dd: hfi1 dev data
456*4882a593Smuzhiyun * @id: requested integer id up to INT_MAX
457*4882a593Smuzhiyun */
hfi1_netdev_get_data(struct hfi1_devdata * dd,int id)458*4882a593Smuzhiyun void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun return xa_load(&priv->dev_tbl, id);
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun /**
466*4882a593Smuzhiyun * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * @dd: hfi1 dev data
469*4882a593Smuzhiyun * @id: requested integer id up to INT_MAX
470*4882a593Smuzhiyun */
hfi1_netdev_get_first_data(struct hfi1_devdata * dd,int * start_id)471*4882a593Smuzhiyun void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
474*4882a593Smuzhiyun unsigned long index = *start_id;
475*4882a593Smuzhiyun void *ret;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
478*4882a593Smuzhiyun *start_id = (int)index;
479*4882a593Smuzhiyun return ret;
480*4882a593Smuzhiyun }
481