1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3*4882a593Smuzhiyun * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5*4882a593Smuzhiyun * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun * OpenIB.org BSD license below:
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
14*4882a593Smuzhiyun * without modification, are permitted provided that the following
15*4882a593Smuzhiyun * conditions are met:
16*4882a593Smuzhiyun *
17*4882a593Smuzhiyun * - Redistributions of source code must retain the above
18*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun * disclaimer.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
22*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
23*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
24*4882a593Smuzhiyun * provided with the distribution.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33*4882a593Smuzhiyun * SOFTWARE.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #include <linux/delay.h>
37*4882a593Smuzhiyun #include <linux/moduleparam.h>
38*4882a593Smuzhiyun #include <linux/dma-mapping.h>
39*4882a593Smuzhiyun #include <linux/slab.h>
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #include <linux/ip.h>
42*4882a593Smuzhiyun #include <linux/tcp.h>
43*4882a593Smuzhiyun #include <rdma/ib_cache.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include "ipoib.h"
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
48*4882a593Smuzhiyun static int data_debug_level;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun module_param(data_debug_level, int, 0644);
51*4882a593Smuzhiyun MODULE_PARM_DESC(data_debug_level,
52*4882a593Smuzhiyun "Enable data path debug tracing if > 0");
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun
ipoib_create_ah(struct net_device * dev,struct ib_pd * pd,struct rdma_ah_attr * attr)55*4882a593Smuzhiyun struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
56*4882a593Smuzhiyun struct ib_pd *pd, struct rdma_ah_attr *attr)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct ipoib_ah *ah;
59*4882a593Smuzhiyun struct ib_ah *vah;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun ah = kmalloc(sizeof(*ah), GFP_KERNEL);
62*4882a593Smuzhiyun if (!ah)
63*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun ah->dev = dev;
66*4882a593Smuzhiyun ah->last_send = 0;
67*4882a593Smuzhiyun kref_init(&ah->ref);
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
70*4882a593Smuzhiyun if (IS_ERR(vah)) {
71*4882a593Smuzhiyun kfree(ah);
72*4882a593Smuzhiyun ah = (struct ipoib_ah *)vah;
73*4882a593Smuzhiyun } else {
74*4882a593Smuzhiyun ah->ah = vah;
75*4882a593Smuzhiyun ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun return ah;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
ipoib_free_ah(struct kref * kref)81*4882a593Smuzhiyun void ipoib_free_ah(struct kref *kref)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
84*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun unsigned long flags;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun spin_lock_irqsave(&priv->lock, flags);
89*4882a593Smuzhiyun list_add_tail(&ah->list, &priv->dead_ahs);
90*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->lock, flags);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv * priv,u64 mapping[IPOIB_UD_RX_SG])93*4882a593Smuzhiyun static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
94*4882a593Smuzhiyun u64 mapping[IPOIB_UD_RX_SG])
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun ib_dma_unmap_single(priv->ca, mapping[0],
97*4882a593Smuzhiyun IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
98*4882a593Smuzhiyun DMA_FROM_DEVICE);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
ipoib_ib_post_receive(struct net_device * dev,int id)101*4882a593Smuzhiyun static int ipoib_ib_post_receive(struct net_device *dev, int id)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
104*4882a593Smuzhiyun int ret;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
107*4882a593Smuzhiyun priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
108*4882a593Smuzhiyun priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
112*4882a593Smuzhiyun if (unlikely(ret)) {
113*4882a593Smuzhiyun ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
114*4882a593Smuzhiyun ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
115*4882a593Smuzhiyun dev_kfree_skb_any(priv->rx_ring[id].skb);
116*4882a593Smuzhiyun priv->rx_ring[id].skb = NULL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun return ret;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
ipoib_alloc_rx_skb(struct net_device * dev,int id)122*4882a593Smuzhiyun static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
125*4882a593Smuzhiyun struct sk_buff *skb;
126*4882a593Smuzhiyun int buf_size;
127*4882a593Smuzhiyun u64 *mapping;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
132*4882a593Smuzhiyun if (unlikely(!skb))
133*4882a593Smuzhiyun return NULL;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /*
136*4882a593Smuzhiyun * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
137*4882a593Smuzhiyun * 64 bytes aligned
138*4882a593Smuzhiyun */
139*4882a593Smuzhiyun skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun mapping = priv->rx_ring[id].mapping;
142*4882a593Smuzhiyun mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
143*4882a593Smuzhiyun DMA_FROM_DEVICE);
144*4882a593Smuzhiyun if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
145*4882a593Smuzhiyun goto error;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun priv->rx_ring[id].skb = skb;
148*4882a593Smuzhiyun return skb;
149*4882a593Smuzhiyun error:
150*4882a593Smuzhiyun dev_kfree_skb_any(skb);
151*4882a593Smuzhiyun return NULL;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
ipoib_ib_post_receives(struct net_device * dev)154*4882a593Smuzhiyun static int ipoib_ib_post_receives(struct net_device *dev)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
157*4882a593Smuzhiyun int i;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun for (i = 0; i < ipoib_recvq_size; ++i) {
160*4882a593Smuzhiyun if (!ipoib_alloc_rx_skb(dev, i)) {
161*4882a593Smuzhiyun ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
162*4882a593Smuzhiyun return -ENOMEM;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun if (ipoib_ib_post_receive(dev, i)) {
165*4882a593Smuzhiyun ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
166*4882a593Smuzhiyun return -EIO;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
ipoib_ib_handle_rx_wc(struct net_device * dev,struct ib_wc * wc)173*4882a593Smuzhiyun static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
176*4882a593Smuzhiyun unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
177*4882a593Smuzhiyun struct sk_buff *skb;
178*4882a593Smuzhiyun u64 mapping[IPOIB_UD_RX_SG];
179*4882a593Smuzhiyun union ib_gid *dgid;
180*4882a593Smuzhiyun union ib_gid *sgid;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
183*4882a593Smuzhiyun wr_id, wc->status);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (unlikely(wr_id >= ipoib_recvq_size)) {
186*4882a593Smuzhiyun ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
187*4882a593Smuzhiyun wr_id, ipoib_recvq_size);
188*4882a593Smuzhiyun return;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun skb = priv->rx_ring[wr_id].skb;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (unlikely(wc->status != IB_WC_SUCCESS)) {
194*4882a593Smuzhiyun if (wc->status != IB_WC_WR_FLUSH_ERR)
195*4882a593Smuzhiyun ipoib_warn(priv,
196*4882a593Smuzhiyun "failed recv event (status=%d, wrid=%d vend_err %#x)\n",
197*4882a593Smuzhiyun wc->status, wr_id, wc->vendor_err);
198*4882a593Smuzhiyun ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
199*4882a593Smuzhiyun dev_kfree_skb_any(skb);
200*4882a593Smuzhiyun priv->rx_ring[wr_id].skb = NULL;
201*4882a593Smuzhiyun return;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun memcpy(mapping, priv->rx_ring[wr_id].mapping,
205*4882a593Smuzhiyun IPOIB_UD_RX_SG * sizeof(*mapping));
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /*
208*4882a593Smuzhiyun * If we can't allocate a new RX buffer, dump
209*4882a593Smuzhiyun * this packet and reuse the old buffer.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
212*4882a593Smuzhiyun ++dev->stats.rx_dropped;
213*4882a593Smuzhiyun goto repost;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
217*4882a593Smuzhiyun wc->byte_len, wc->slid);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun ipoib_ud_dma_unmap_rx(priv, mapping);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun skb_put(skb, wc->byte_len);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* First byte of dgid signals multicast when 0xff */
224*4882a593Smuzhiyun dgid = &((struct ib_grh *)skb->data)->dgid;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
227*4882a593Smuzhiyun skb->pkt_type = PACKET_HOST;
228*4882a593Smuzhiyun else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
229*4882a593Smuzhiyun skb->pkt_type = PACKET_BROADCAST;
230*4882a593Smuzhiyun else
231*4882a593Smuzhiyun skb->pkt_type = PACKET_MULTICAST;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun sgid = &((struct ib_grh *)skb->data)->sgid;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * Drop packets that this interface sent, ie multicast packets
237*4882a593Smuzhiyun * that the HCA has replicated.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
240*4882a593Smuzhiyun int need_repost = 1;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if ((wc->wc_flags & IB_WC_GRH) &&
243*4882a593Smuzhiyun sgid->global.interface_id != priv->local_gid.global.interface_id)
244*4882a593Smuzhiyun need_repost = 0;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun if (need_repost) {
247*4882a593Smuzhiyun dev_kfree_skb_any(skb);
248*4882a593Smuzhiyun goto repost;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun skb_pull(skb, IB_GRH_BYTES);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun skb->protocol = ((struct ipoib_header *) skb->data)->proto;
255*4882a593Smuzhiyun skb_add_pseudo_hdr(skb);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun ++dev->stats.rx_packets;
258*4882a593Smuzhiyun dev->stats.rx_bytes += skb->len;
259*4882a593Smuzhiyun if (skb->pkt_type == PACKET_MULTICAST)
260*4882a593Smuzhiyun dev->stats.multicast++;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun skb->dev = dev;
263*4882a593Smuzhiyun if ((dev->features & NETIF_F_RXCSUM) &&
264*4882a593Smuzhiyun likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
265*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun napi_gro_receive(&priv->recv_napi, skb);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun repost:
270*4882a593Smuzhiyun if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
271*4882a593Smuzhiyun ipoib_warn(priv, "ipoib_ib_post_receive failed "
272*4882a593Smuzhiyun "for buf %d\n", wr_id);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
ipoib_dma_map_tx(struct ib_device * ca,struct ipoib_tx_buf * tx_req)275*4882a593Smuzhiyun int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct sk_buff *skb = tx_req->skb;
278*4882a593Smuzhiyun u64 *mapping = tx_req->mapping;
279*4882a593Smuzhiyun int i;
280*4882a593Smuzhiyun int off;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (skb_headlen(skb)) {
283*4882a593Smuzhiyun mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
284*4882a593Smuzhiyun DMA_TO_DEVICE);
285*4882a593Smuzhiyun if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
286*4882a593Smuzhiyun return -EIO;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun off = 1;
289*4882a593Smuzhiyun } else
290*4882a593Smuzhiyun off = 0;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
293*4882a593Smuzhiyun const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
294*4882a593Smuzhiyun mapping[i + off] = ib_dma_map_page(ca,
295*4882a593Smuzhiyun skb_frag_page(frag),
296*4882a593Smuzhiyun skb_frag_off(frag),
297*4882a593Smuzhiyun skb_frag_size(frag),
298*4882a593Smuzhiyun DMA_TO_DEVICE);
299*4882a593Smuzhiyun if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
300*4882a593Smuzhiyun goto partial_error;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun return 0;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun partial_error:
305*4882a593Smuzhiyun for (; i > 0; --i) {
306*4882a593Smuzhiyun const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (off)
312*4882a593Smuzhiyun ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return -EIO;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
ipoib_dma_unmap_tx(struct ipoib_dev_priv * priv,struct ipoib_tx_buf * tx_req)317*4882a593Smuzhiyun void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
318*4882a593Smuzhiyun struct ipoib_tx_buf *tx_req)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct sk_buff *skb = tx_req->skb;
321*4882a593Smuzhiyun u64 *mapping = tx_req->mapping;
322*4882a593Smuzhiyun int i;
323*4882a593Smuzhiyun int off;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (skb_headlen(skb)) {
326*4882a593Smuzhiyun ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
327*4882a593Smuzhiyun DMA_TO_DEVICE);
328*4882a593Smuzhiyun off = 1;
329*4882a593Smuzhiyun } else
330*4882a593Smuzhiyun off = 0;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
333*4882a593Smuzhiyun const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun ib_dma_unmap_page(priv->ca, mapping[i + off],
336*4882a593Smuzhiyun skb_frag_size(frag), DMA_TO_DEVICE);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /*
341*4882a593Smuzhiyun * As the result of a completion error the QP Can be transferred to SQE states.
342*4882a593Smuzhiyun * The function checks if the (send)QP is in SQE state and
343*4882a593Smuzhiyun * moves it back to RTS state, that in order to have it functional again.
344*4882a593Smuzhiyun */
ipoib_qp_state_validate_work(struct work_struct * work)345*4882a593Smuzhiyun static void ipoib_qp_state_validate_work(struct work_struct *work)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun struct ipoib_qp_state_validate *qp_work =
348*4882a593Smuzhiyun container_of(work, struct ipoib_qp_state_validate, work);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun struct ipoib_dev_priv *priv = qp_work->priv;
351*4882a593Smuzhiyun struct ib_qp_attr qp_attr;
352*4882a593Smuzhiyun struct ib_qp_init_attr query_init_attr;
353*4882a593Smuzhiyun int ret;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
356*4882a593Smuzhiyun if (ret) {
357*4882a593Smuzhiyun ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
358*4882a593Smuzhiyun __func__, ret);
359*4882a593Smuzhiyun goto free_res;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun pr_info("%s: QP: 0x%x is in state: %d\n",
362*4882a593Smuzhiyun __func__, priv->qp->qp_num, qp_attr.qp_state);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* currently support only in SQE->RTS transition*/
365*4882a593Smuzhiyun if (qp_attr.qp_state == IB_QPS_SQE) {
366*4882a593Smuzhiyun qp_attr.qp_state = IB_QPS_RTS;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
369*4882a593Smuzhiyun if (ret) {
370*4882a593Smuzhiyun pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
371*4882a593Smuzhiyun ret, priv->qp->qp_num);
372*4882a593Smuzhiyun goto free_res;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
375*4882a593Smuzhiyun __func__, priv->qp->qp_num);
376*4882a593Smuzhiyun } else {
377*4882a593Smuzhiyun pr_warn("QP (%d) will stay in state: %d\n",
378*4882a593Smuzhiyun priv->qp->qp_num, qp_attr.qp_state);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun free_res:
382*4882a593Smuzhiyun kfree(qp_work);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
ipoib_ib_handle_tx_wc(struct net_device * dev,struct ib_wc * wc)385*4882a593Smuzhiyun static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
388*4882a593Smuzhiyun unsigned int wr_id = wc->wr_id;
389*4882a593Smuzhiyun struct ipoib_tx_buf *tx_req;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
392*4882a593Smuzhiyun wr_id, wc->status);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (unlikely(wr_id >= ipoib_sendq_size)) {
395*4882a593Smuzhiyun ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
396*4882a593Smuzhiyun wr_id, ipoib_sendq_size);
397*4882a593Smuzhiyun return;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun tx_req = &priv->tx_ring[wr_id];
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun ipoib_dma_unmap_tx(priv, tx_req);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun ++dev->stats.tx_packets;
405*4882a593Smuzhiyun dev->stats.tx_bytes += tx_req->skb->len;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun dev_kfree_skb_any(tx_req->skb);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ++priv->tx_tail;
410*4882a593Smuzhiyun ++priv->global_tx_tail;
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun if (unlikely(netif_queue_stopped(dev) &&
413*4882a593Smuzhiyun ((priv->global_tx_head - priv->global_tx_tail) <=
414*4882a593Smuzhiyun ipoib_sendq_size >> 1) &&
415*4882a593Smuzhiyun test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
416*4882a593Smuzhiyun netif_wake_queue(dev);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (wc->status != IB_WC_SUCCESS &&
419*4882a593Smuzhiyun wc->status != IB_WC_WR_FLUSH_ERR) {
420*4882a593Smuzhiyun struct ipoib_qp_state_validate *qp_work;
421*4882a593Smuzhiyun ipoib_warn(priv,
422*4882a593Smuzhiyun "failed send event (status=%d, wrid=%d vend_err %#x)\n",
423*4882a593Smuzhiyun wc->status, wr_id, wc->vendor_err);
424*4882a593Smuzhiyun qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
425*4882a593Smuzhiyun if (!qp_work)
426*4882a593Smuzhiyun return;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
429*4882a593Smuzhiyun qp_work->priv = priv;
430*4882a593Smuzhiyun queue_work(priv->wq, &qp_work->work);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
poll_tx(struct ipoib_dev_priv * priv)434*4882a593Smuzhiyun static int poll_tx(struct ipoib_dev_priv *priv)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun int n, i;
437*4882a593Smuzhiyun struct ib_wc *wc;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
440*4882a593Smuzhiyun for (i = 0; i < n; ++i) {
441*4882a593Smuzhiyun wc = priv->send_wc + i;
442*4882a593Smuzhiyun if (wc->wr_id & IPOIB_OP_CM)
443*4882a593Smuzhiyun ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
444*4882a593Smuzhiyun else
445*4882a593Smuzhiyun ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun return n == MAX_SEND_CQE;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
ipoib_rx_poll(struct napi_struct * napi,int budget)450*4882a593Smuzhiyun int ipoib_rx_poll(struct napi_struct *napi, int budget)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun struct ipoib_dev_priv *priv =
453*4882a593Smuzhiyun container_of(napi, struct ipoib_dev_priv, recv_napi);
454*4882a593Smuzhiyun struct net_device *dev = priv->dev;
455*4882a593Smuzhiyun int done;
456*4882a593Smuzhiyun int t;
457*4882a593Smuzhiyun int n, i;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun done = 0;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun poll_more:
462*4882a593Smuzhiyun while (done < budget) {
463*4882a593Smuzhiyun int max = (budget - done);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun t = min(IPOIB_NUM_WC, max);
466*4882a593Smuzhiyun n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun for (i = 0; i < n; i++) {
469*4882a593Smuzhiyun struct ib_wc *wc = priv->ibwc + i;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (wc->wr_id & IPOIB_OP_RECV) {
472*4882a593Smuzhiyun ++done;
473*4882a593Smuzhiyun if (wc->wr_id & IPOIB_OP_CM)
474*4882a593Smuzhiyun ipoib_cm_handle_rx_wc(dev, wc);
475*4882a593Smuzhiyun else
476*4882a593Smuzhiyun ipoib_ib_handle_rx_wc(dev, wc);
477*4882a593Smuzhiyun } else {
478*4882a593Smuzhiyun pr_warn("%s: Got unexpected wqe id\n", __func__);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (n != t)
483*4882a593Smuzhiyun break;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (done < budget) {
487*4882a593Smuzhiyun napi_complete(napi);
488*4882a593Smuzhiyun if (unlikely(ib_req_notify_cq(priv->recv_cq,
489*4882a593Smuzhiyun IB_CQ_NEXT_COMP |
490*4882a593Smuzhiyun IB_CQ_REPORT_MISSED_EVENTS)) &&
491*4882a593Smuzhiyun napi_reschedule(napi))
492*4882a593Smuzhiyun goto poll_more;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun return done;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
ipoib_tx_poll(struct napi_struct * napi,int budget)498*4882a593Smuzhiyun int ipoib_tx_poll(struct napi_struct *napi, int budget)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
501*4882a593Smuzhiyun send_napi);
502*4882a593Smuzhiyun struct net_device *dev = priv->dev;
503*4882a593Smuzhiyun int n, i;
504*4882a593Smuzhiyun struct ib_wc *wc;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun poll_more:
507*4882a593Smuzhiyun n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun for (i = 0; i < n; i++) {
510*4882a593Smuzhiyun wc = priv->send_wc + i;
511*4882a593Smuzhiyun if (wc->wr_id & IPOIB_OP_CM)
512*4882a593Smuzhiyun ipoib_cm_handle_tx_wc(dev, wc);
513*4882a593Smuzhiyun else
514*4882a593Smuzhiyun ipoib_ib_handle_tx_wc(dev, wc);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (n < budget) {
518*4882a593Smuzhiyun napi_complete(napi);
519*4882a593Smuzhiyun if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
520*4882a593Smuzhiyun IB_CQ_REPORT_MISSED_EVENTS)) &&
521*4882a593Smuzhiyun napi_reschedule(napi))
522*4882a593Smuzhiyun goto poll_more;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun return n < 0 ? 0 : n;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
ipoib_ib_rx_completion(struct ib_cq * cq,void * ctx_ptr)527*4882a593Smuzhiyun void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ctx_ptr;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun napi_schedule(&priv->recv_napi);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
ipoib_ib_tx_completion(struct ib_cq * cq,void * ctx_ptr)534*4882a593Smuzhiyun void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ctx_ptr;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun napi_schedule(&priv->send_napi);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
post_send(struct ipoib_dev_priv * priv,unsigned int wr_id,struct ib_ah * address,u32 dqpn,struct ipoib_tx_buf * tx_req,void * head,int hlen)541*4882a593Smuzhiyun static inline int post_send(struct ipoib_dev_priv *priv,
542*4882a593Smuzhiyun unsigned int wr_id,
543*4882a593Smuzhiyun struct ib_ah *address, u32 dqpn,
544*4882a593Smuzhiyun struct ipoib_tx_buf *tx_req,
545*4882a593Smuzhiyun void *head, int hlen)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct sk_buff *skb = tx_req->skb;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun ipoib_build_sge(priv, tx_req);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun priv->tx_wr.wr.wr_id = wr_id;
552*4882a593Smuzhiyun priv->tx_wr.remote_qpn = dqpn;
553*4882a593Smuzhiyun priv->tx_wr.ah = address;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (head) {
556*4882a593Smuzhiyun priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
557*4882a593Smuzhiyun priv->tx_wr.header = head;
558*4882a593Smuzhiyun priv->tx_wr.hlen = hlen;
559*4882a593Smuzhiyun priv->tx_wr.wr.opcode = IB_WR_LSO;
560*4882a593Smuzhiyun } else
561*4882a593Smuzhiyun priv->tx_wr.wr.opcode = IB_WR_SEND;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
ipoib_send(struct net_device * dev,struct sk_buff * skb,struct ib_ah * address,u32 dqpn)566*4882a593Smuzhiyun int ipoib_send(struct net_device *dev, struct sk_buff *skb,
567*4882a593Smuzhiyun struct ib_ah *address, u32 dqpn)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
570*4882a593Smuzhiyun struct ipoib_tx_buf *tx_req;
571*4882a593Smuzhiyun int hlen, rc;
572*4882a593Smuzhiyun void *phead;
573*4882a593Smuzhiyun unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun if (skb_is_gso(skb)) {
576*4882a593Smuzhiyun hlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
577*4882a593Smuzhiyun phead = skb->data;
578*4882a593Smuzhiyun if (unlikely(!skb_pull(skb, hlen))) {
579*4882a593Smuzhiyun ipoib_warn(priv, "linear data too small\n");
580*4882a593Smuzhiyun ++dev->stats.tx_dropped;
581*4882a593Smuzhiyun ++dev->stats.tx_errors;
582*4882a593Smuzhiyun dev_kfree_skb_any(skb);
583*4882a593Smuzhiyun return -1;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun } else {
586*4882a593Smuzhiyun if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
587*4882a593Smuzhiyun ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
588*4882a593Smuzhiyun skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
589*4882a593Smuzhiyun ++dev->stats.tx_dropped;
590*4882a593Smuzhiyun ++dev->stats.tx_errors;
591*4882a593Smuzhiyun ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
592*4882a593Smuzhiyun return -1;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun phead = NULL;
595*4882a593Smuzhiyun hlen = 0;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun if (skb_shinfo(skb)->nr_frags > usable_sge) {
598*4882a593Smuzhiyun if (skb_linearize(skb) < 0) {
599*4882a593Smuzhiyun ipoib_warn(priv, "skb could not be linearized\n");
600*4882a593Smuzhiyun ++dev->stats.tx_dropped;
601*4882a593Smuzhiyun ++dev->stats.tx_errors;
602*4882a593Smuzhiyun dev_kfree_skb_any(skb);
603*4882a593Smuzhiyun return -1;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun /* Does skb_linearize return ok without reducing nr_frags? */
606*4882a593Smuzhiyun if (skb_shinfo(skb)->nr_frags > usable_sge) {
607*4882a593Smuzhiyun ipoib_warn(priv, "too many frags after skb linearize\n");
608*4882a593Smuzhiyun ++dev->stats.tx_dropped;
609*4882a593Smuzhiyun ++dev->stats.tx_errors;
610*4882a593Smuzhiyun dev_kfree_skb_any(skb);
611*4882a593Smuzhiyun return -1;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun ipoib_dbg_data(priv,
616*4882a593Smuzhiyun "sending packet, length=%d address=%p dqpn=0x%06x\n",
617*4882a593Smuzhiyun skb->len, address, dqpn);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun /*
620*4882a593Smuzhiyun * We put the skb into the tx_ring _before_ we call post_send()
621*4882a593Smuzhiyun * because it's entirely possible that the completion handler will
622*4882a593Smuzhiyun * run before we execute anything after the post_send(). That
623*4882a593Smuzhiyun * means we have to make sure everything is properly recorded and
624*4882a593Smuzhiyun * our state is consistent before we call post_send().
625*4882a593Smuzhiyun */
626*4882a593Smuzhiyun tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
627*4882a593Smuzhiyun tx_req->skb = skb;
628*4882a593Smuzhiyun if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
629*4882a593Smuzhiyun ++dev->stats.tx_errors;
630*4882a593Smuzhiyun dev_kfree_skb_any(skb);
631*4882a593Smuzhiyun return -1;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL)
635*4882a593Smuzhiyun priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
636*4882a593Smuzhiyun else
637*4882a593Smuzhiyun priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
638*4882a593Smuzhiyun /* increase the tx_head after send success, but use it for queue state */
639*4882a593Smuzhiyun if ((priv->global_tx_head - priv->global_tx_tail) ==
640*4882a593Smuzhiyun ipoib_sendq_size - 1) {
641*4882a593Smuzhiyun ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
642*4882a593Smuzhiyun netif_stop_queue(dev);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun skb_orphan(skb);
646*4882a593Smuzhiyun skb_dst_drop(skb);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun if (netif_queue_stopped(dev))
649*4882a593Smuzhiyun if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
650*4882a593Smuzhiyun IB_CQ_REPORT_MISSED_EVENTS) < 0)
651*4882a593Smuzhiyun ipoib_warn(priv, "request notify on send CQ failed\n");
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
654*4882a593Smuzhiyun address, dqpn, tx_req, phead, hlen);
655*4882a593Smuzhiyun if (unlikely(rc)) {
656*4882a593Smuzhiyun ipoib_warn(priv, "post_send failed, error %d\n", rc);
657*4882a593Smuzhiyun ++dev->stats.tx_errors;
658*4882a593Smuzhiyun ipoib_dma_unmap_tx(priv, tx_req);
659*4882a593Smuzhiyun dev_kfree_skb_any(skb);
660*4882a593Smuzhiyun if (netif_queue_stopped(dev))
661*4882a593Smuzhiyun netif_wake_queue(dev);
662*4882a593Smuzhiyun rc = 0;
663*4882a593Smuzhiyun } else {
664*4882a593Smuzhiyun netif_trans_update(dev);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun rc = priv->tx_head;
667*4882a593Smuzhiyun ++priv->tx_head;
668*4882a593Smuzhiyun ++priv->global_tx_head;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun return rc;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
ipoib_reap_dead_ahs(struct ipoib_dev_priv * priv)673*4882a593Smuzhiyun static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun struct ipoib_ah *ah, *tah;
676*4882a593Smuzhiyun unsigned long flags;
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun netif_tx_lock_bh(priv->dev);
679*4882a593Smuzhiyun spin_lock_irqsave(&priv->lock, flags);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
682*4882a593Smuzhiyun if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
683*4882a593Smuzhiyun list_del(&ah->list);
684*4882a593Smuzhiyun rdma_destroy_ah(ah->ah, 0);
685*4882a593Smuzhiyun kfree(ah);
686*4882a593Smuzhiyun }
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->lock, flags);
689*4882a593Smuzhiyun netif_tx_unlock_bh(priv->dev);
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun
ipoib_reap_ah(struct work_struct * work)692*4882a593Smuzhiyun void ipoib_reap_ah(struct work_struct *work)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun struct ipoib_dev_priv *priv =
695*4882a593Smuzhiyun container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun ipoib_reap_dead_ahs(priv);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
700*4882a593Smuzhiyun queue_delayed_work(priv->wq, &priv->ah_reap_task,
701*4882a593Smuzhiyun round_jiffies_relative(HZ));
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
ipoib_start_ah_reaper(struct ipoib_dev_priv * priv)704*4882a593Smuzhiyun static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun clear_bit(IPOIB_STOP_REAPER, &priv->flags);
707*4882a593Smuzhiyun queue_delayed_work(priv->wq, &priv->ah_reap_task,
708*4882a593Smuzhiyun round_jiffies_relative(HZ));
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
ipoib_stop_ah_reaper(struct ipoib_dev_priv * priv)711*4882a593Smuzhiyun static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun set_bit(IPOIB_STOP_REAPER, &priv->flags);
714*4882a593Smuzhiyun cancel_delayed_work(&priv->ah_reap_task);
715*4882a593Smuzhiyun /*
716*4882a593Smuzhiyun * After ipoib_stop_ah_reaper() we always go through
717*4882a593Smuzhiyun * ipoib_reap_dead_ahs() which ensures the work is really stopped and
718*4882a593Smuzhiyun * does a final flush out of the dead_ah's list
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
recvs_pending(struct net_device * dev)722*4882a593Smuzhiyun static int recvs_pending(struct net_device *dev)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
725*4882a593Smuzhiyun int pending = 0;
726*4882a593Smuzhiyun int i;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun for (i = 0; i < ipoib_recvq_size; ++i)
729*4882a593Smuzhiyun if (priv->rx_ring[i].skb)
730*4882a593Smuzhiyun ++pending;
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun return pending;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
check_qp_movement_and_print(struct ipoib_dev_priv * priv,struct ib_qp * qp,enum ib_qp_state new_state)735*4882a593Smuzhiyun static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
736*4882a593Smuzhiyun struct ib_qp *qp,
737*4882a593Smuzhiyun enum ib_qp_state new_state)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct ib_qp_attr qp_attr;
740*4882a593Smuzhiyun struct ib_qp_init_attr query_init_attr;
741*4882a593Smuzhiyun int ret;
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
744*4882a593Smuzhiyun if (ret) {
745*4882a593Smuzhiyun ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
746*4882a593Smuzhiyun return;
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun /* print according to the new-state and the previous state.*/
749*4882a593Smuzhiyun if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
750*4882a593Smuzhiyun ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
751*4882a593Smuzhiyun else
752*4882a593Smuzhiyun ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
753*4882a593Smuzhiyun new_state, qp_attr.qp_state);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
ipoib_napi_enable(struct net_device * dev)756*4882a593Smuzhiyun static void ipoib_napi_enable(struct net_device *dev)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun napi_enable(&priv->recv_napi);
761*4882a593Smuzhiyun napi_enable(&priv->send_napi);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
ipoib_napi_disable(struct net_device * dev)764*4882a593Smuzhiyun static void ipoib_napi_disable(struct net_device *dev)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun napi_disable(&priv->recv_napi);
769*4882a593Smuzhiyun napi_disable(&priv->send_napi);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
ipoib_ib_dev_stop_default(struct net_device * dev)772*4882a593Smuzhiyun int ipoib_ib_dev_stop_default(struct net_device *dev)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
775*4882a593Smuzhiyun struct ib_qp_attr qp_attr;
776*4882a593Smuzhiyun unsigned long begin;
777*4882a593Smuzhiyun struct ipoib_tx_buf *tx_req;
778*4882a593Smuzhiyun int i;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
781*4882a593Smuzhiyun ipoib_napi_disable(dev);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun ipoib_cm_dev_stop(dev);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /*
786*4882a593Smuzhiyun * Move our QP to the error state and then reinitialize in
787*4882a593Smuzhiyun * when all work requests have completed or have been flushed.
788*4882a593Smuzhiyun */
789*4882a593Smuzhiyun qp_attr.qp_state = IB_QPS_ERR;
790*4882a593Smuzhiyun if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
791*4882a593Smuzhiyun check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /* Wait for all sends and receives to complete */
794*4882a593Smuzhiyun begin = jiffies;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
797*4882a593Smuzhiyun if (time_after(jiffies, begin + 5 * HZ)) {
798*4882a593Smuzhiyun ipoib_warn(priv,
799*4882a593Smuzhiyun "timing out; %d sends %d receives not completed\n",
800*4882a593Smuzhiyun priv->tx_head - priv->tx_tail,
801*4882a593Smuzhiyun recvs_pending(dev));
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun /*
804*4882a593Smuzhiyun * assume the HW is wedged and just free up
805*4882a593Smuzhiyun * all our pending work requests.
806*4882a593Smuzhiyun */
807*4882a593Smuzhiyun while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
808*4882a593Smuzhiyun tx_req = &priv->tx_ring[priv->tx_tail &
809*4882a593Smuzhiyun (ipoib_sendq_size - 1)];
810*4882a593Smuzhiyun ipoib_dma_unmap_tx(priv, tx_req);
811*4882a593Smuzhiyun dev_kfree_skb_any(tx_req->skb);
812*4882a593Smuzhiyun ++priv->tx_tail;
813*4882a593Smuzhiyun ++priv->global_tx_tail;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun for (i = 0; i < ipoib_recvq_size; ++i) {
817*4882a593Smuzhiyun struct ipoib_rx_buf *rx_req;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun rx_req = &priv->rx_ring[i];
820*4882a593Smuzhiyun if (!rx_req->skb)
821*4882a593Smuzhiyun continue;
822*4882a593Smuzhiyun ipoib_ud_dma_unmap_rx(priv,
823*4882a593Smuzhiyun priv->rx_ring[i].mapping);
824*4882a593Smuzhiyun dev_kfree_skb_any(rx_req->skb);
825*4882a593Smuzhiyun rx_req->skb = NULL;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun goto timeout;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun ipoib_drain_cq(dev);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun usleep_range(1000, 2000);
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun ipoib_dbg(priv, "All sends and receives done.\n");
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun timeout:
839*4882a593Smuzhiyun qp_attr.qp_state = IB_QPS_RESET;
840*4882a593Smuzhiyun if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
841*4882a593Smuzhiyun ipoib_warn(priv, "Failed to modify QP to RESET state\n");
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun return 0;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
ipoib_ib_dev_open_default(struct net_device * dev)848*4882a593Smuzhiyun int ipoib_ib_dev_open_default(struct net_device *dev)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
851*4882a593Smuzhiyun int ret;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun ret = ipoib_init_qp(dev);
854*4882a593Smuzhiyun if (ret) {
855*4882a593Smuzhiyun ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
856*4882a593Smuzhiyun return -1;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun ret = ipoib_ib_post_receives(dev);
860*4882a593Smuzhiyun if (ret) {
861*4882a593Smuzhiyun ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
862*4882a593Smuzhiyun goto out;
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun ret = ipoib_cm_dev_open(dev);
866*4882a593Smuzhiyun if (ret) {
867*4882a593Smuzhiyun ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
868*4882a593Smuzhiyun goto out;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
872*4882a593Smuzhiyun ipoib_napi_enable(dev);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun return 0;
875*4882a593Smuzhiyun out:
876*4882a593Smuzhiyun return -1;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
ipoib_ib_dev_open(struct net_device * dev)879*4882a593Smuzhiyun int ipoib_ib_dev_open(struct net_device *dev)
880*4882a593Smuzhiyun {
881*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun ipoib_pkey_dev_check_presence(dev);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
886*4882a593Smuzhiyun ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
887*4882a593Smuzhiyun (!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
888*4882a593Smuzhiyun return -1;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun ipoib_start_ah_reaper(priv);
892*4882a593Smuzhiyun if (priv->rn_ops->ndo_open(dev)) {
893*4882a593Smuzhiyun pr_warn("%s: Failed to open dev\n", dev->name);
894*4882a593Smuzhiyun goto dev_stop;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return 0;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun dev_stop:
902*4882a593Smuzhiyun ipoib_stop_ah_reaper(priv);
903*4882a593Smuzhiyun return -1;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
ipoib_ib_dev_stop(struct net_device * dev)906*4882a593Smuzhiyun void ipoib_ib_dev_stop(struct net_device *dev)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun priv->rn_ops->ndo_stop(dev);
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
913*4882a593Smuzhiyun ipoib_stop_ah_reaper(priv);
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
ipoib_pkey_dev_check_presence(struct net_device * dev)916*4882a593Smuzhiyun void ipoib_pkey_dev_check_presence(struct net_device *dev)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
919*4882a593Smuzhiyun struct rdma_netdev *rn = netdev_priv(dev);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (!(priv->pkey & 0x7fff) ||
922*4882a593Smuzhiyun ib_find_pkey(priv->ca, priv->port, priv->pkey,
923*4882a593Smuzhiyun &priv->pkey_index)) {
924*4882a593Smuzhiyun clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
925*4882a593Smuzhiyun } else {
926*4882a593Smuzhiyun if (rn->set_id)
927*4882a593Smuzhiyun rn->set_id(dev, priv->pkey_index);
928*4882a593Smuzhiyun set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
ipoib_ib_dev_up(struct net_device * dev)932*4882a593Smuzhiyun void ipoib_ib_dev_up(struct net_device *dev)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun ipoib_pkey_dev_check_presence(dev);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
939*4882a593Smuzhiyun ipoib_dbg(priv, "PKEY is not assigned.\n");
940*4882a593Smuzhiyun return;
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun ipoib_mcast_start_thread(dev);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
ipoib_ib_dev_down(struct net_device * dev)948*4882a593Smuzhiyun void ipoib_ib_dev_down(struct net_device *dev)
949*4882a593Smuzhiyun {
950*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun ipoib_dbg(priv, "downing ib_dev\n");
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
955*4882a593Smuzhiyun netif_carrier_off(dev);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun ipoib_mcast_stop_thread(dev);
958*4882a593Smuzhiyun ipoib_mcast_dev_flush(dev);
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun ipoib_flush_paths(dev);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
ipoib_drain_cq(struct net_device * dev)963*4882a593Smuzhiyun void ipoib_drain_cq(struct net_device *dev)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
966*4882a593Smuzhiyun int i, n;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /*
969*4882a593Smuzhiyun * We call completion handling routines that expect to be
970*4882a593Smuzhiyun * called from the BH-disabled NAPI poll context, so disable
971*4882a593Smuzhiyun * BHs here too.
972*4882a593Smuzhiyun */
973*4882a593Smuzhiyun local_bh_disable();
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun do {
976*4882a593Smuzhiyun n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
977*4882a593Smuzhiyun for (i = 0; i < n; ++i) {
978*4882a593Smuzhiyun /*
979*4882a593Smuzhiyun * Convert any successful completions to flush
980*4882a593Smuzhiyun * errors to avoid passing packets up the
981*4882a593Smuzhiyun * stack after bringing the device down.
982*4882a593Smuzhiyun */
983*4882a593Smuzhiyun if (priv->ibwc[i].status == IB_WC_SUCCESS)
984*4882a593Smuzhiyun priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
987*4882a593Smuzhiyun if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
988*4882a593Smuzhiyun ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
989*4882a593Smuzhiyun else
990*4882a593Smuzhiyun ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
991*4882a593Smuzhiyun } else {
992*4882a593Smuzhiyun pr_warn("%s: Got unexpected wqe id\n", __func__);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun } while (n == IPOIB_NUM_WC);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun while (poll_tx(priv))
998*4882a593Smuzhiyun ; /* nothing */
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun local_bh_enable();
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /*
1004*4882a593Smuzhiyun * Takes whatever value which is in pkey index 0 and updates priv->pkey
1005*4882a593Smuzhiyun * returns 0 if the pkey value was changed.
1006*4882a593Smuzhiyun */
update_parent_pkey(struct ipoib_dev_priv * priv)1007*4882a593Smuzhiyun static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1008*4882a593Smuzhiyun {
1009*4882a593Smuzhiyun int result;
1010*4882a593Smuzhiyun u16 prev_pkey;
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun prev_pkey = priv->pkey;
1013*4882a593Smuzhiyun result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
1014*4882a593Smuzhiyun if (result) {
1015*4882a593Smuzhiyun ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
1016*4882a593Smuzhiyun priv->port, result);
1017*4882a593Smuzhiyun return result;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun priv->pkey |= 0x8000;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun if (prev_pkey != priv->pkey) {
1023*4882a593Smuzhiyun ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
1024*4882a593Smuzhiyun prev_pkey, priv->pkey);
1025*4882a593Smuzhiyun /*
1026*4882a593Smuzhiyun * Update the pkey in the broadcast address, while making sure to set
1027*4882a593Smuzhiyun * the full membership bit, so that we join the right broadcast group.
1028*4882a593Smuzhiyun */
1029*4882a593Smuzhiyun priv->dev->broadcast[8] = priv->pkey >> 8;
1030*4882a593Smuzhiyun priv->dev->broadcast[9] = priv->pkey & 0xff;
1031*4882a593Smuzhiyun return 0;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun return 1;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun /*
1037*4882a593Smuzhiyun * returns 0 if pkey value was found in a different slot.
1038*4882a593Smuzhiyun */
update_child_pkey(struct ipoib_dev_priv * priv)1039*4882a593Smuzhiyun static inline int update_child_pkey(struct ipoib_dev_priv *priv)
1040*4882a593Smuzhiyun {
1041*4882a593Smuzhiyun u16 old_index = priv->pkey_index;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun priv->pkey_index = 0;
1044*4882a593Smuzhiyun ipoib_pkey_dev_check_presence(priv->dev);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
1047*4882a593Smuzhiyun (old_index == priv->pkey_index))
1048*4882a593Smuzhiyun return 1;
1049*4882a593Smuzhiyun return 0;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /*
1053*4882a593Smuzhiyun * returns true if the device address of the ipoib interface has changed and the
1054*4882a593Smuzhiyun * new address is a valid one (i.e in the gid table), return false otherwise.
1055*4882a593Smuzhiyun */
ipoib_dev_addr_changed_valid(struct ipoib_dev_priv * priv)1056*4882a593Smuzhiyun static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun union ib_gid search_gid;
1059*4882a593Smuzhiyun union ib_gid gid0;
1060*4882a593Smuzhiyun union ib_gid *netdev_gid;
1061*4882a593Smuzhiyun int err;
1062*4882a593Smuzhiyun u16 index;
1063*4882a593Smuzhiyun u8 port;
1064*4882a593Smuzhiyun bool ret = false;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
1067*4882a593Smuzhiyun if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
1068*4882a593Smuzhiyun return false;
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun netif_addr_lock_bh(priv->dev);
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun /* The subnet prefix may have changed, update it now so we won't have
1073*4882a593Smuzhiyun * to do it later
1074*4882a593Smuzhiyun */
1075*4882a593Smuzhiyun priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1076*4882a593Smuzhiyun netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
1077*4882a593Smuzhiyun search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun search_gid.global.interface_id = priv->local_gid.global.interface_id;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun netif_addr_unlock_bh(priv->dev);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun err = ib_find_gid(priv->ca, &search_gid, &port, &index);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun netif_addr_lock_bh(priv->dev);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if (search_gid.global.interface_id !=
1088*4882a593Smuzhiyun priv->local_gid.global.interface_id)
1089*4882a593Smuzhiyun /* There was a change while we were looking up the gid, bail
1090*4882a593Smuzhiyun * here and let the next work sort this out
1091*4882a593Smuzhiyun */
1092*4882a593Smuzhiyun goto out;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* The next section of code needs some background:
1095*4882a593Smuzhiyun * Per IB spec the port GUID can't change if the HCA is powered on.
1096*4882a593Smuzhiyun * port GUID is the basis for GID at index 0 which is the basis for
1097*4882a593Smuzhiyun * the default device address of a ipoib interface.
1098*4882a593Smuzhiyun *
1099*4882a593Smuzhiyun * so it seems the flow should be:
1100*4882a593Smuzhiyun * if user_changed_dev_addr && gid in gid tbl
1101*4882a593Smuzhiyun * set bit dev_addr_set
1102*4882a593Smuzhiyun * return true
1103*4882a593Smuzhiyun * else
1104*4882a593Smuzhiyun * return false
1105*4882a593Smuzhiyun *
1106*4882a593Smuzhiyun * The issue is that there are devices that don't follow the spec,
1107*4882a593Smuzhiyun * they change the port GUID when the HCA is powered, so in order
1108*4882a593Smuzhiyun * not to break userspace applications, We need to check if the
1109*4882a593Smuzhiyun * user wanted to control the device address and we assume that
1110*4882a593Smuzhiyun * if he sets the device address back to be based on GID index 0,
1111*4882a593Smuzhiyun * he no longer wishs to control it.
1112*4882a593Smuzhiyun *
1113*4882a593Smuzhiyun * If the user doesn't control the the device address,
1114*4882a593Smuzhiyun * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1115*4882a593Smuzhiyun * the port GUID has changed and GID at index 0 has changed
1116*4882a593Smuzhiyun * so we need to change priv->local_gid and priv->dev->dev_addr
1117*4882a593Smuzhiyun * to reflect the new GID.
1118*4882a593Smuzhiyun */
1119*4882a593Smuzhiyun if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
1120*4882a593Smuzhiyun if (!err && port == priv->port) {
1121*4882a593Smuzhiyun set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
1122*4882a593Smuzhiyun if (index == 0)
1123*4882a593Smuzhiyun clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
1124*4882a593Smuzhiyun &priv->flags);
1125*4882a593Smuzhiyun else
1126*4882a593Smuzhiyun set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
1127*4882a593Smuzhiyun ret = true;
1128*4882a593Smuzhiyun } else {
1129*4882a593Smuzhiyun ret = false;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun } else {
1132*4882a593Smuzhiyun if (!err && port == priv->port) {
1133*4882a593Smuzhiyun ret = true;
1134*4882a593Smuzhiyun } else {
1135*4882a593Smuzhiyun if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
1136*4882a593Smuzhiyun memcpy(&priv->local_gid, &gid0,
1137*4882a593Smuzhiyun sizeof(priv->local_gid));
1138*4882a593Smuzhiyun memcpy(priv->dev->dev_addr + 4, &gid0,
1139*4882a593Smuzhiyun sizeof(priv->local_gid));
1140*4882a593Smuzhiyun ret = true;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun out:
1146*4882a593Smuzhiyun netif_addr_unlock_bh(priv->dev);
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun return ret;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
__ipoib_ib_dev_flush(struct ipoib_dev_priv * priv,enum ipoib_flush_level level,int nesting)1151*4882a593Smuzhiyun static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
1152*4882a593Smuzhiyun enum ipoib_flush_level level,
1153*4882a593Smuzhiyun int nesting)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun struct ipoib_dev_priv *cpriv;
1156*4882a593Smuzhiyun struct net_device *dev = priv->dev;
1157*4882a593Smuzhiyun int result;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun down_read_nested(&priv->vlan_rwsem, nesting);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /*
1162*4882a593Smuzhiyun * Flush any child interfaces too -- they might be up even if
1163*4882a593Smuzhiyun * the parent is down.
1164*4882a593Smuzhiyun */
1165*4882a593Smuzhiyun list_for_each_entry(cpriv, &priv->child_intfs, list)
1166*4882a593Smuzhiyun __ipoib_ib_dev_flush(cpriv, level, nesting + 1);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun up_read(&priv->vlan_rwsem);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
1171*4882a593Smuzhiyun level != IPOIB_FLUSH_HEAVY) {
1172*4882a593Smuzhiyun /* Make sure the dev_addr is set even if not flushing */
1173*4882a593Smuzhiyun if (level == IPOIB_FLUSH_LIGHT)
1174*4882a593Smuzhiyun ipoib_dev_addr_changed_valid(priv);
1175*4882a593Smuzhiyun ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1176*4882a593Smuzhiyun return;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1180*4882a593Smuzhiyun /* interface is down. update pkey and leave. */
1181*4882a593Smuzhiyun if (level == IPOIB_FLUSH_HEAVY) {
1182*4882a593Smuzhiyun if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
1183*4882a593Smuzhiyun update_parent_pkey(priv);
1184*4882a593Smuzhiyun else
1185*4882a593Smuzhiyun update_child_pkey(priv);
1186*4882a593Smuzhiyun } else if (level == IPOIB_FLUSH_LIGHT)
1187*4882a593Smuzhiyun ipoib_dev_addr_changed_valid(priv);
1188*4882a593Smuzhiyun ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1189*4882a593Smuzhiyun return;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun if (level == IPOIB_FLUSH_HEAVY) {
1193*4882a593Smuzhiyun /* child devices chase their origin pkey value, while non-child
1194*4882a593Smuzhiyun * (parent) devices should always takes what present in pkey index 0
1195*4882a593Smuzhiyun */
1196*4882a593Smuzhiyun if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
1197*4882a593Smuzhiyun result = update_child_pkey(priv);
1198*4882a593Smuzhiyun if (result) {
1199*4882a593Smuzhiyun /* restart QP only if P_Key index is changed */
1200*4882a593Smuzhiyun ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
1201*4882a593Smuzhiyun return;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun } else {
1205*4882a593Smuzhiyun result = update_parent_pkey(priv);
1206*4882a593Smuzhiyun /* restart QP only if P_Key value changed */
1207*4882a593Smuzhiyun if (result) {
1208*4882a593Smuzhiyun ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
1209*4882a593Smuzhiyun return;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun if (level == IPOIB_FLUSH_LIGHT) {
1215*4882a593Smuzhiyun int oper_up;
1216*4882a593Smuzhiyun ipoib_mark_paths_invalid(dev);
1217*4882a593Smuzhiyun /* Set IPoIB operation as down to prevent races between:
1218*4882a593Smuzhiyun * the flush flow which leaves MCG and on the fly joins
1219*4882a593Smuzhiyun * which can happen during that time. mcast restart task
1220*4882a593Smuzhiyun * should deal with join requests we missed.
1221*4882a593Smuzhiyun */
1222*4882a593Smuzhiyun oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1223*4882a593Smuzhiyun ipoib_mcast_dev_flush(dev);
1224*4882a593Smuzhiyun if (oper_up)
1225*4882a593Smuzhiyun set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
1226*4882a593Smuzhiyun ipoib_reap_dead_ahs(priv);
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun if (level >= IPOIB_FLUSH_NORMAL)
1230*4882a593Smuzhiyun ipoib_ib_dev_down(dev);
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun if (level == IPOIB_FLUSH_HEAVY) {
1233*4882a593Smuzhiyun if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
1234*4882a593Smuzhiyun ipoib_ib_dev_stop(dev);
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (ipoib_ib_dev_open(dev))
1237*4882a593Smuzhiyun return;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (netif_queue_stopped(dev))
1240*4882a593Smuzhiyun netif_start_queue(dev);
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun /*
1244*4882a593Smuzhiyun * The device could have been brought down between the start and when
1245*4882a593Smuzhiyun * we get here, don't bring it back up if it's not configured up
1246*4882a593Smuzhiyun */
1247*4882a593Smuzhiyun if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
1248*4882a593Smuzhiyun if (level >= IPOIB_FLUSH_NORMAL)
1249*4882a593Smuzhiyun ipoib_ib_dev_up(dev);
1250*4882a593Smuzhiyun if (ipoib_dev_addr_changed_valid(priv))
1251*4882a593Smuzhiyun ipoib_mcast_restart_task(&priv->restart_task);
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
ipoib_ib_dev_flush_light(struct work_struct * work)1255*4882a593Smuzhiyun void ipoib_ib_dev_flush_light(struct work_struct *work)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun struct ipoib_dev_priv *priv =
1258*4882a593Smuzhiyun container_of(work, struct ipoib_dev_priv, flush_light);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun
ipoib_ib_dev_flush_normal(struct work_struct * work)1263*4882a593Smuzhiyun void ipoib_ib_dev_flush_normal(struct work_struct *work)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun struct ipoib_dev_priv *priv =
1266*4882a593Smuzhiyun container_of(work, struct ipoib_dev_priv, flush_normal);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
1269*4882a593Smuzhiyun }
1270*4882a593Smuzhiyun
ipoib_ib_dev_flush_heavy(struct work_struct * work)1271*4882a593Smuzhiyun void ipoib_ib_dev_flush_heavy(struct work_struct *work)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun struct ipoib_dev_priv *priv =
1274*4882a593Smuzhiyun container_of(work, struct ipoib_dev_priv, flush_heavy);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun rtnl_lock();
1277*4882a593Smuzhiyun __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
1278*4882a593Smuzhiyun rtnl_unlock();
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
ipoib_ib_dev_cleanup(struct net_device * dev)1281*4882a593Smuzhiyun void ipoib_ib_dev_cleanup(struct net_device *dev)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun struct ipoib_dev_priv *priv = ipoib_priv(dev);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun ipoib_dbg(priv, "cleaning up ib_dev\n");
1286*4882a593Smuzhiyun /*
1287*4882a593Smuzhiyun * We must make sure there are no more (path) completions
1288*4882a593Smuzhiyun * that may wish to touch priv fields that are no longer valid
1289*4882a593Smuzhiyun */
1290*4882a593Smuzhiyun ipoib_flush_paths(dev);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun ipoib_mcast_stop_thread(dev);
1293*4882a593Smuzhiyun ipoib_mcast_dev_flush(dev);
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun /*
1296*4882a593Smuzhiyun * All of our ah references aren't free until after
1297*4882a593Smuzhiyun * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1298*4882a593Smuzhiyun * the neighbor garbage collection is stopped and reaped.
1299*4882a593Smuzhiyun * That should all be done now, so make a final ah flush.
1300*4882a593Smuzhiyun */
1301*4882a593Smuzhiyun ipoib_reap_dead_ahs(priv);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun priv->rn_ops->ndo_uninit(dev);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (priv->pd) {
1308*4882a593Smuzhiyun ib_dealloc_pd(priv->pd);
1309*4882a593Smuzhiyun priv->pd = NULL;
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313