1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is provided under a dual BSD/GPLv2 license. When using or
3*4882a593Smuzhiyun * redistributing this file, you may do so under either license.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * GPL LICENSE SUMMARY
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright(c) 2012 Intel Corporation. All rights reserved.
8*4882a593Smuzhiyun * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
11*4882a593Smuzhiyun * it under the terms of version 2 of the GNU General Public License as
12*4882a593Smuzhiyun * published by the Free Software Foundation.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * BSD LICENSE
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Copyright(c) 2012 Intel Corporation. All rights reserved.
17*4882a593Smuzhiyun * Copyright (C) 2015 EMC Corporation. All Rights Reserved.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
20*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
21*4882a593Smuzhiyun * are met:
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
24*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
25*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copy
26*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
27*4882a593Smuzhiyun * the documentation and/or other materials provided with the
28*4882a593Smuzhiyun * distribution.
29*4882a593Smuzhiyun * * Neither the name of Intel Corporation nor the names of its
30*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
31*4882a593Smuzhiyun * from this software without specific prior written permission.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
34*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
35*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
36*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
37*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
39*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
40*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
41*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
42*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
43*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * PCIe NTB Network Linux driver
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * Contact Information:
48*4882a593Smuzhiyun * Jon Mason <jon.mason@intel.com>
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun #include <linux/etherdevice.h>
51*4882a593Smuzhiyun #include <linux/ethtool.h>
52*4882a593Smuzhiyun #include <linux/module.h>
53*4882a593Smuzhiyun #include <linux/pci.h>
54*4882a593Smuzhiyun #include <linux/ntb.h>
55*4882a593Smuzhiyun #include <linux/ntb_transport.h>
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define NTB_NETDEV_VER "0.7"
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun MODULE_DESCRIPTION(KBUILD_MODNAME);
60*4882a593Smuzhiyun MODULE_VERSION(NTB_NETDEV_VER);
61*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
62*4882a593Smuzhiyun MODULE_AUTHOR("Intel Corporation");
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Time in usecs for tx resource reaper */
65*4882a593Smuzhiyun static unsigned int tx_time = 1;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Number of descriptors to free before resuming tx */
68*4882a593Smuzhiyun static unsigned int tx_start = 10;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Number of descriptors still available before stop upper layer tx */
71*4882a593Smuzhiyun static unsigned int tx_stop = 5;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct ntb_netdev {
74*4882a593Smuzhiyun struct pci_dev *pdev;
75*4882a593Smuzhiyun struct net_device *ndev;
76*4882a593Smuzhiyun struct ntb_transport_qp *qp;
77*4882a593Smuzhiyun struct timer_list tx_timer;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define NTB_TX_TIMEOUT_MS 1000
81*4882a593Smuzhiyun #define NTB_RXQ_SIZE 100
82*4882a593Smuzhiyun
ntb_netdev_event_handler(void * data,int link_is_up)83*4882a593Smuzhiyun static void ntb_netdev_event_handler(void *data, int link_is_up)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct net_device *ndev = data;
86*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun netdev_dbg(ndev, "Event %x, Link %x\n", link_is_up,
89*4882a593Smuzhiyun ntb_transport_link_query(dev->qp));
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (link_is_up) {
92*4882a593Smuzhiyun if (ntb_transport_link_query(dev->qp))
93*4882a593Smuzhiyun netif_carrier_on(ndev);
94*4882a593Smuzhiyun } else {
95*4882a593Smuzhiyun netif_carrier_off(ndev);
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
ntb_netdev_rx_handler(struct ntb_transport_qp * qp,void * qp_data,void * data,int len)99*4882a593Smuzhiyun static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
100*4882a593Smuzhiyun void *data, int len)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct net_device *ndev = qp_data;
103*4882a593Smuzhiyun struct sk_buff *skb;
104*4882a593Smuzhiyun int rc;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun skb = data;
107*4882a593Smuzhiyun if (!skb)
108*4882a593Smuzhiyun return;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun if (len < 0) {
113*4882a593Smuzhiyun ndev->stats.rx_errors++;
114*4882a593Smuzhiyun ndev->stats.rx_length_errors++;
115*4882a593Smuzhiyun goto enqueue_again;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun skb_put(skb, len);
119*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, ndev);
120*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_NONE;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun if (netif_rx(skb) == NET_RX_DROP) {
123*4882a593Smuzhiyun ndev->stats.rx_errors++;
124*4882a593Smuzhiyun ndev->stats.rx_dropped++;
125*4882a593Smuzhiyun } else {
126*4882a593Smuzhiyun ndev->stats.rx_packets++;
127*4882a593Smuzhiyun ndev->stats.rx_bytes += len;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
131*4882a593Smuzhiyun if (!skb) {
132*4882a593Smuzhiyun ndev->stats.rx_errors++;
133*4882a593Smuzhiyun ndev->stats.rx_frame_errors++;
134*4882a593Smuzhiyun return;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun enqueue_again:
138*4882a593Smuzhiyun rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
139*4882a593Smuzhiyun if (rc) {
140*4882a593Smuzhiyun dev_kfree_skb(skb);
141*4882a593Smuzhiyun ndev->stats.rx_errors++;
142*4882a593Smuzhiyun ndev->stats.rx_fifo_errors++;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
__ntb_netdev_maybe_stop_tx(struct net_device * netdev,struct ntb_transport_qp * qp,int size)146*4882a593Smuzhiyun static int __ntb_netdev_maybe_stop_tx(struct net_device *netdev,
147*4882a593Smuzhiyun struct ntb_transport_qp *qp, int size)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(netdev);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun netif_stop_queue(netdev);
152*4882a593Smuzhiyun /* Make sure to see the latest value of ntb_transport_tx_free_entry()
153*4882a593Smuzhiyun * since the queue was last started.
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun smp_mb();
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (likely(ntb_transport_tx_free_entry(qp) < size)) {
158*4882a593Smuzhiyun mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
159*4882a593Smuzhiyun return -EBUSY;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun netif_start_queue(netdev);
163*4882a593Smuzhiyun return 0;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
ntb_netdev_maybe_stop_tx(struct net_device * ndev,struct ntb_transport_qp * qp,int size)166*4882a593Smuzhiyun static int ntb_netdev_maybe_stop_tx(struct net_device *ndev,
167*4882a593Smuzhiyun struct ntb_transport_qp *qp, int size)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun if (netif_queue_stopped(ndev) ||
170*4882a593Smuzhiyun (ntb_transport_tx_free_entry(qp) >= size))
171*4882a593Smuzhiyun return 0;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return __ntb_netdev_maybe_stop_tx(ndev, qp, size);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
ntb_netdev_tx_handler(struct ntb_transport_qp * qp,void * qp_data,void * data,int len)176*4882a593Smuzhiyun static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
177*4882a593Smuzhiyun void *data, int len)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct net_device *ndev = qp_data;
180*4882a593Smuzhiyun struct sk_buff *skb;
181*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun skb = data;
184*4882a593Smuzhiyun if (!skb || !ndev)
185*4882a593Smuzhiyun return;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (len > 0) {
188*4882a593Smuzhiyun ndev->stats.tx_packets++;
189*4882a593Smuzhiyun ndev->stats.tx_bytes += skb->len;
190*4882a593Smuzhiyun } else {
191*4882a593Smuzhiyun ndev->stats.tx_errors++;
192*4882a593Smuzhiyun ndev->stats.tx_aborted_errors++;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun dev_kfree_skb(skb);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) {
198*4882a593Smuzhiyun /* Make sure anybody stopping the queue after this sees the new
199*4882a593Smuzhiyun * value of ntb_transport_tx_free_entry()
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyun smp_mb();
202*4882a593Smuzhiyun if (netif_queue_stopped(ndev))
203*4882a593Smuzhiyun netif_wake_queue(ndev);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
ntb_netdev_start_xmit(struct sk_buff * skb,struct net_device * ndev)207*4882a593Smuzhiyun static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
208*4882a593Smuzhiyun struct net_device *ndev)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
211*4882a593Smuzhiyun int rc;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
216*4882a593Smuzhiyun if (rc)
217*4882a593Smuzhiyun goto err;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* check for next submit */
220*4882a593Smuzhiyun ntb_netdev_maybe_stop_tx(ndev, dev->qp, tx_stop);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun return NETDEV_TX_OK;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun err:
225*4882a593Smuzhiyun ndev->stats.tx_dropped++;
226*4882a593Smuzhiyun ndev->stats.tx_errors++;
227*4882a593Smuzhiyun return NETDEV_TX_BUSY;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
ntb_netdev_tx_timer(struct timer_list * t)230*4882a593Smuzhiyun static void ntb_netdev_tx_timer(struct timer_list *t)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct ntb_netdev *dev = from_timer(dev, t, tx_timer);
233*4882a593Smuzhiyun struct net_device *ndev = dev->ndev;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (ntb_transport_tx_free_entry(dev->qp) < tx_stop) {
236*4882a593Smuzhiyun mod_timer(&dev->tx_timer, jiffies + usecs_to_jiffies(tx_time));
237*4882a593Smuzhiyun } else {
238*4882a593Smuzhiyun /* Make sure anybody stopping the queue after this sees the new
239*4882a593Smuzhiyun * value of ntb_transport_tx_free_entry()
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun smp_mb();
242*4882a593Smuzhiyun if (netif_queue_stopped(ndev))
243*4882a593Smuzhiyun netif_wake_queue(ndev);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
ntb_netdev_open(struct net_device * ndev)247*4882a593Smuzhiyun static int ntb_netdev_open(struct net_device *ndev)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
250*4882a593Smuzhiyun struct sk_buff *skb;
251*4882a593Smuzhiyun int rc, i, len;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Add some empty rx bufs */
254*4882a593Smuzhiyun for (i = 0; i < NTB_RXQ_SIZE; i++) {
255*4882a593Smuzhiyun skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
256*4882a593Smuzhiyun if (!skb) {
257*4882a593Smuzhiyun rc = -ENOMEM;
258*4882a593Smuzhiyun goto err;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
262*4882a593Smuzhiyun ndev->mtu + ETH_HLEN);
263*4882a593Smuzhiyun if (rc) {
264*4882a593Smuzhiyun dev_kfree_skb(skb);
265*4882a593Smuzhiyun goto err;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun timer_setup(&dev->tx_timer, ntb_netdev_tx_timer, 0);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun netif_carrier_off(ndev);
272*4882a593Smuzhiyun ntb_transport_link_up(dev->qp);
273*4882a593Smuzhiyun netif_start_queue(ndev);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun err:
278*4882a593Smuzhiyun while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
279*4882a593Smuzhiyun dev_kfree_skb(skb);
280*4882a593Smuzhiyun return rc;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
ntb_netdev_close(struct net_device * ndev)283*4882a593Smuzhiyun static int ntb_netdev_close(struct net_device *ndev)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
286*4882a593Smuzhiyun struct sk_buff *skb;
287*4882a593Smuzhiyun int len;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun ntb_transport_link_down(dev->qp);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
292*4882a593Smuzhiyun dev_kfree_skb(skb);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun del_timer_sync(&dev->tx_timer);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
ntb_netdev_change_mtu(struct net_device * ndev,int new_mtu)299*4882a593Smuzhiyun static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
302*4882a593Smuzhiyun struct sk_buff *skb;
303*4882a593Smuzhiyun int len, rc;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
306*4882a593Smuzhiyun return -EINVAL;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (!netif_running(ndev)) {
309*4882a593Smuzhiyun ndev->mtu = new_mtu;
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* Bring down the link and dispose of posted rx entries */
314*4882a593Smuzhiyun ntb_transport_link_down(dev->qp);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun if (ndev->mtu < new_mtu) {
317*4882a593Smuzhiyun int i;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
320*4882a593Smuzhiyun dev_kfree_skb(skb);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun for (; i; i--) {
323*4882a593Smuzhiyun skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
324*4882a593Smuzhiyun if (!skb) {
325*4882a593Smuzhiyun rc = -ENOMEM;
326*4882a593Smuzhiyun goto err;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
330*4882a593Smuzhiyun new_mtu + ETH_HLEN);
331*4882a593Smuzhiyun if (rc) {
332*4882a593Smuzhiyun dev_kfree_skb(skb);
333*4882a593Smuzhiyun goto err;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ndev->mtu = new_mtu;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun ntb_transport_link_up(dev->qp);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return 0;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun err:
345*4882a593Smuzhiyun ntb_transport_link_down(dev->qp);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
348*4882a593Smuzhiyun dev_kfree_skb(skb);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun netdev_err(ndev, "Error changing MTU, device inoperable\n");
351*4882a593Smuzhiyun return rc;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun static const struct net_device_ops ntb_netdev_ops = {
355*4882a593Smuzhiyun .ndo_open = ntb_netdev_open,
356*4882a593Smuzhiyun .ndo_stop = ntb_netdev_close,
357*4882a593Smuzhiyun .ndo_start_xmit = ntb_netdev_start_xmit,
358*4882a593Smuzhiyun .ndo_change_mtu = ntb_netdev_change_mtu,
359*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
ntb_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)362*4882a593Smuzhiyun static void ntb_get_drvinfo(struct net_device *ndev,
363*4882a593Smuzhiyun struct ethtool_drvinfo *info)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
368*4882a593Smuzhiyun strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
369*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
ntb_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)372*4882a593Smuzhiyun static int ntb_get_link_ksettings(struct net_device *dev,
373*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun ethtool_link_ksettings_zero_link_mode(cmd, supported);
376*4882a593Smuzhiyun ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
377*4882a593Smuzhiyun ethtool_link_ksettings_zero_link_mode(cmd, advertising);
378*4882a593Smuzhiyun ethtool_link_ksettings_add_link_mode(cmd, advertising, Backplane);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun cmd->base.speed = SPEED_UNKNOWN;
381*4882a593Smuzhiyun cmd->base.duplex = DUPLEX_FULL;
382*4882a593Smuzhiyun cmd->base.port = PORT_OTHER;
383*4882a593Smuzhiyun cmd->base.phy_address = 0;
384*4882a593Smuzhiyun cmd->base.autoneg = AUTONEG_ENABLE;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun static const struct ethtool_ops ntb_ethtool_ops = {
390*4882a593Smuzhiyun .get_drvinfo = ntb_get_drvinfo,
391*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
392*4882a593Smuzhiyun .get_link_ksettings = ntb_get_link_ksettings,
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun static const struct ntb_queue_handlers ntb_netdev_handlers = {
396*4882a593Smuzhiyun .tx_handler = ntb_netdev_tx_handler,
397*4882a593Smuzhiyun .rx_handler = ntb_netdev_rx_handler,
398*4882a593Smuzhiyun .event_handler = ntb_netdev_event_handler,
399*4882a593Smuzhiyun };
400*4882a593Smuzhiyun
ntb_netdev_probe(struct device * client_dev)401*4882a593Smuzhiyun static int ntb_netdev_probe(struct device *client_dev)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct ntb_dev *ntb;
404*4882a593Smuzhiyun struct net_device *ndev;
405*4882a593Smuzhiyun struct pci_dev *pdev;
406*4882a593Smuzhiyun struct ntb_netdev *dev;
407*4882a593Smuzhiyun int rc;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ntb = dev_ntb(client_dev->parent);
410*4882a593Smuzhiyun pdev = ntb->pdev;
411*4882a593Smuzhiyun if (!pdev)
412*4882a593Smuzhiyun return -ENODEV;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun ndev = alloc_etherdev(sizeof(*dev));
415*4882a593Smuzhiyun if (!ndev)
416*4882a593Smuzhiyun return -ENOMEM;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun SET_NETDEV_DEV(ndev, client_dev);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun dev = netdev_priv(ndev);
421*4882a593Smuzhiyun dev->ndev = ndev;
422*4882a593Smuzhiyun dev->pdev = pdev;
423*4882a593Smuzhiyun ndev->features = NETIF_F_HIGHDMA;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun ndev->hw_features = ndev->features;
428*4882a593Smuzhiyun ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun eth_random_addr(ndev->perm_addr);
431*4882a593Smuzhiyun memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun ndev->netdev_ops = &ntb_netdev_ops;
434*4882a593Smuzhiyun ndev->ethtool_ops = &ntb_ethtool_ops;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun ndev->min_mtu = 0;
437*4882a593Smuzhiyun ndev->max_mtu = ETH_MAX_MTU;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun dev->qp = ntb_transport_create_queue(ndev, client_dev,
440*4882a593Smuzhiyun &ntb_netdev_handlers);
441*4882a593Smuzhiyun if (!dev->qp) {
442*4882a593Smuzhiyun rc = -EIO;
443*4882a593Smuzhiyun goto err;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun rc = register_netdev(ndev);
449*4882a593Smuzhiyun if (rc)
450*4882a593Smuzhiyun goto err1;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun dev_set_drvdata(client_dev, ndev);
453*4882a593Smuzhiyun dev_info(&pdev->dev, "%s created\n", ndev->name);
454*4882a593Smuzhiyun return 0;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun err1:
457*4882a593Smuzhiyun ntb_transport_free_queue(dev->qp);
458*4882a593Smuzhiyun err:
459*4882a593Smuzhiyun free_netdev(ndev);
460*4882a593Smuzhiyun return rc;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
ntb_netdev_remove(struct device * client_dev)463*4882a593Smuzhiyun static void ntb_netdev_remove(struct device *client_dev)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun struct net_device *ndev = dev_get_drvdata(client_dev);
466*4882a593Smuzhiyun struct ntb_netdev *dev = netdev_priv(ndev);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun unregister_netdev(ndev);
469*4882a593Smuzhiyun ntb_transport_free_queue(dev->qp);
470*4882a593Smuzhiyun free_netdev(ndev);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun static struct ntb_transport_client ntb_netdev_client = {
474*4882a593Smuzhiyun .driver.name = KBUILD_MODNAME,
475*4882a593Smuzhiyun .driver.owner = THIS_MODULE,
476*4882a593Smuzhiyun .probe = ntb_netdev_probe,
477*4882a593Smuzhiyun .remove = ntb_netdev_remove,
478*4882a593Smuzhiyun };
479*4882a593Smuzhiyun
ntb_netdev_init_module(void)480*4882a593Smuzhiyun static int __init ntb_netdev_init_module(void)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun int rc;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun rc = ntb_transport_register_client_dev(KBUILD_MODNAME);
485*4882a593Smuzhiyun if (rc)
486*4882a593Smuzhiyun return rc;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun rc = ntb_transport_register_client(&ntb_netdev_client);
489*4882a593Smuzhiyun if (rc) {
490*4882a593Smuzhiyun ntb_transport_unregister_client_dev(KBUILD_MODNAME);
491*4882a593Smuzhiyun return rc;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun module_init(ntb_netdev_init_module);
497*4882a593Smuzhiyun
ntb_netdev_exit_module(void)498*4882a593Smuzhiyun static void __exit ntb_netdev_exit_module(void)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun ntb_transport_unregister_client(&ntb_netdev_client);
501*4882a593Smuzhiyun ntb_transport_unregister_client_dev(KBUILD_MODNAME);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun module_exit(ntb_netdev_exit_module);
504