1*4882a593Smuzhiyun /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright(c) 2020 Intel Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef HFI1_NETDEV_H
8*4882a593Smuzhiyun #define HFI1_NETDEV_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "hfi.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/xarray.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /**
16*4882a593Smuzhiyun * struct hfi1_netdev_rxq - Receive Queue for HFI
17*4882a593Smuzhiyun * dummy netdev. Both IPoIB and VNIC netdevices will be working on
18*4882a593Smuzhiyun * top of this device.
19*4882a593Smuzhiyun * @napi: napi object
20*4882a593Smuzhiyun * @priv: ptr to netdev_priv
21*4882a593Smuzhiyun * @rcd: ptr to receive context data
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun struct hfi1_netdev_rxq {
24*4882a593Smuzhiyun struct napi_struct napi;
25*4882a593Smuzhiyun struct hfi1_netdev_priv *priv;
26*4882a593Smuzhiyun struct hfi1_ctxtdata *rcd;
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun * Number of netdev contexts used. Ensure it is less than or equal to
31*4882a593Smuzhiyun * max queues supported by VNIC (HFI1_VNIC_MAX_QUEUE).
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #define HFI1_MAX_NETDEV_CTXTS 8
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* Number of NETDEV RSM entries */
36*4882a593Smuzhiyun #define NUM_NETDEV_MAP_ENTRIES HFI1_MAX_NETDEV_CTXTS
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /**
39*4882a593Smuzhiyun * struct hfi1_netdev_priv: data required to setup and run HFI netdev.
40*4882a593Smuzhiyun * @dd: hfi1_devdata
41*4882a593Smuzhiyun * @rxq: pointer to dummy netdev receive queues.
42*4882a593Smuzhiyun * @num_rx_q: number of receive queues
43*4882a593Smuzhiyun * @rmt_index: first free index in RMT Array
44*4882a593Smuzhiyun * @msix_start: first free MSI-X interrupt vector.
45*4882a593Smuzhiyun * @dev_tbl: netdev table for unique identifier VNIC and IPoIb VLANs.
46*4882a593Smuzhiyun * @enabled: atomic counter of netdevs enabling receive queues.
47*4882a593Smuzhiyun * When 0 NAPI will be disabled.
48*4882a593Smuzhiyun * @netdevs: atomic counter of netdevs using dummy netdev.
49*4882a593Smuzhiyun * When 0 receive queues will be freed.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun struct hfi1_netdev_priv {
52*4882a593Smuzhiyun struct hfi1_devdata *dd;
53*4882a593Smuzhiyun struct hfi1_netdev_rxq *rxq;
54*4882a593Smuzhiyun int num_rx_q;
55*4882a593Smuzhiyun int rmt_start;
56*4882a593Smuzhiyun struct xarray dev_tbl;
57*4882a593Smuzhiyun /* count of enabled napi polls */
58*4882a593Smuzhiyun atomic_t enabled;
59*4882a593Smuzhiyun /* count of netdevs on top */
60*4882a593Smuzhiyun atomic_t netdevs;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static inline
hfi1_netdev_priv(struct net_device * dev)64*4882a593Smuzhiyun struct hfi1_netdev_priv *hfi1_netdev_priv(struct net_device *dev)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun return (struct hfi1_netdev_priv *)&dev[1];
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun static inline
hfi1_netdev_ctxt_count(struct hfi1_devdata * dd)70*4882a593Smuzhiyun int hfi1_netdev_ctxt_count(struct hfi1_devdata *dd)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun return priv->num_rx_q;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun static inline
hfi1_netdev_get_ctxt(struct hfi1_devdata * dd,int ctxt)78*4882a593Smuzhiyun struct hfi1_ctxtdata *hfi1_netdev_get_ctxt(struct hfi1_devdata *dd, int ctxt)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return priv->rxq[ctxt].rcd;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun static inline
hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata * dd)86*4882a593Smuzhiyun int hfi1_netdev_get_free_rmt_idx(struct hfi1_devdata *dd)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun return priv->rmt_start;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static inline
hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata * dd,int rmt_idx)94*4882a593Smuzhiyun void hfi1_netdev_set_free_rmt_idx(struct hfi1_devdata *dd, int rmt_idx)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun priv->rmt_start = rmt_idx;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
102*4882a593Smuzhiyun struct cpumask *cpu_mask);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun void hfi1_netdev_enable_queues(struct hfi1_devdata *dd);
105*4882a593Smuzhiyun void hfi1_netdev_disable_queues(struct hfi1_devdata *dd);
106*4882a593Smuzhiyun int hfi1_netdev_rx_init(struct hfi1_devdata *dd);
107*4882a593Smuzhiyun int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd);
108*4882a593Smuzhiyun int hfi1_netdev_alloc(struct hfi1_devdata *dd);
109*4882a593Smuzhiyun void hfi1_netdev_free(struct hfi1_devdata *dd);
110*4882a593Smuzhiyun int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data);
111*4882a593Smuzhiyun void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id);
112*4882a593Smuzhiyun void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id);
113*4882a593Smuzhiyun void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* chip.c */
116*4882a593Smuzhiyun int hfi1_netdev_rx_napi(struct napi_struct *napi, int budget);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #endif /* HFI1_NETDEV_H */
119