1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <rdma/ib_mad.h>
34*4882a593Smuzhiyun #include <rdma/ib_smi.h>
35*4882a593Smuzhiyun #include <rdma/ib_sa.h>
36*4882a593Smuzhiyun #include <rdma/ib_cache.h>
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include <linux/random.h>
39*4882a593Smuzhiyun #include <linux/mlx4/cmd.h>
40*4882a593Smuzhiyun #include <linux/gfp.h>
41*4882a593Smuzhiyun #include <rdma/ib_pma.h>
42*4882a593Smuzhiyun #include <linux/ip.h>
43*4882a593Smuzhiyun #include <net/ipv6.h>
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun #include <linux/mlx4/driver.h>
46*4882a593Smuzhiyun #include "mlx4_ib.h"
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun enum {
49*4882a593Smuzhiyun MLX4_IB_VENDOR_CLASS1 = 0x9,
50*4882a593Smuzhiyun MLX4_IB_VENDOR_CLASS2 = 0xa
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define MLX4_TUN_SEND_WRID_SHIFT 34
54*4882a593Smuzhiyun #define MLX4_TUN_QPN_SHIFT 32
55*4882a593Smuzhiyun #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
56*4882a593Smuzhiyun #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #define MLX4_TUN_IS_RECV(a) (((a) >> MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
59*4882a593Smuzhiyun #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Port mgmt change event handling */
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
64*4882a593Smuzhiyun #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
65*4882a593Smuzhiyun #define NUM_IDX_IN_PKEY_TBL_BLK 32
66*4882a593Smuzhiyun #define GUID_TBL_ENTRY_SIZE 8 /* size in bytes */
67*4882a593Smuzhiyun #define GUID_TBL_BLK_NUM_ENTRIES 8
68*4882a593Smuzhiyun #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct mlx4_mad_rcv_buf {
71*4882a593Smuzhiyun struct ib_grh grh;
72*4882a593Smuzhiyun u8 payload[256];
73*4882a593Smuzhiyun } __packed;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun struct mlx4_mad_snd_buf {
76*4882a593Smuzhiyun u8 payload[256];
77*4882a593Smuzhiyun } __packed;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct mlx4_tunnel_mad {
80*4882a593Smuzhiyun struct ib_grh grh;
81*4882a593Smuzhiyun struct mlx4_ib_tunnel_header hdr;
82*4882a593Smuzhiyun struct ib_mad mad;
83*4882a593Smuzhiyun } __packed;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun struct mlx4_rcv_tunnel_mad {
86*4882a593Smuzhiyun struct mlx4_rcv_tunnel_hdr hdr;
87*4882a593Smuzhiyun struct ib_grh grh;
88*4882a593Smuzhiyun struct ib_mad mad;
89*4882a593Smuzhiyun } __packed;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
92*4882a593Smuzhiyun static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
93*4882a593Smuzhiyun static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
94*4882a593Smuzhiyun int block, u32 change_bitmap);
95*4882a593Smuzhiyun
mlx4_ib_gen_node_guid(void)96*4882a593Smuzhiyun __be64 mlx4_ib_gen_node_guid(void)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun #define NODE_GUID_HI ((u64) (((u64)IB_OPENIB_OUI) << 40))
99*4882a593Smuzhiyun return cpu_to_be64(NODE_GUID_HI | prandom_u32());
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx * ctx)102*4882a593Smuzhiyun __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
105*4882a593Smuzhiyun cpu_to_be64(0xff00000000000000LL);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
mlx4_MAD_IFC(struct mlx4_ib_dev * dev,int mad_ifc_flags,int port,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const void * in_mad,void * response_mad)108*4882a593Smuzhiyun int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
109*4882a593Smuzhiyun int port, const struct ib_wc *in_wc,
110*4882a593Smuzhiyun const struct ib_grh *in_grh,
111*4882a593Smuzhiyun const void *in_mad, void *response_mad)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
114*4882a593Smuzhiyun void *inbox;
115*4882a593Smuzhiyun int err;
116*4882a593Smuzhiyun u32 in_modifier = port;
117*4882a593Smuzhiyun u8 op_modifier = 0;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
120*4882a593Smuzhiyun if (IS_ERR(inmailbox))
121*4882a593Smuzhiyun return PTR_ERR(inmailbox);
122*4882a593Smuzhiyun inbox = inmailbox->buf;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
125*4882a593Smuzhiyun if (IS_ERR(outmailbox)) {
126*4882a593Smuzhiyun mlx4_free_cmd_mailbox(dev->dev, inmailbox);
127*4882a593Smuzhiyun return PTR_ERR(outmailbox);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun memcpy(inbox, in_mad, 256);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Key check traps can't be generated unless we have in_wc to
134*4882a593Smuzhiyun * tell us where to send the trap.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
137*4882a593Smuzhiyun op_modifier |= 0x1;
138*4882a593Smuzhiyun if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
139*4882a593Smuzhiyun op_modifier |= 0x2;
140*4882a593Smuzhiyun if (mlx4_is_mfunc(dev->dev) &&
141*4882a593Smuzhiyun (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
142*4882a593Smuzhiyun op_modifier |= 0x8;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (in_wc) {
145*4882a593Smuzhiyun struct {
146*4882a593Smuzhiyun __be32 my_qpn;
147*4882a593Smuzhiyun u32 reserved1;
148*4882a593Smuzhiyun __be32 rqpn;
149*4882a593Smuzhiyun u8 sl;
150*4882a593Smuzhiyun u8 g_path;
151*4882a593Smuzhiyun u16 reserved2[2];
152*4882a593Smuzhiyun __be16 pkey;
153*4882a593Smuzhiyun u32 reserved3[11];
154*4882a593Smuzhiyun u8 grh[40];
155*4882a593Smuzhiyun } *ext_info;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun memset(inbox + 256, 0, 256);
158*4882a593Smuzhiyun ext_info = inbox + 256;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
161*4882a593Smuzhiyun ext_info->rqpn = cpu_to_be32(in_wc->src_qp);
162*4882a593Smuzhiyun ext_info->sl = in_wc->sl << 4;
163*4882a593Smuzhiyun ext_info->g_path = in_wc->dlid_path_bits |
164*4882a593Smuzhiyun (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
165*4882a593Smuzhiyun ext_info->pkey = cpu_to_be16(in_wc->pkey_index);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (in_grh)
168*4882a593Smuzhiyun memcpy(ext_info->grh, in_grh, 40);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun op_modifier |= 0x4;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun in_modifier |= ib_lid_cpu16(in_wc->slid) << 16;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
176*4882a593Smuzhiyun mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
177*4882a593Smuzhiyun MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
178*4882a593Smuzhiyun (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (!err)
181*4882a593Smuzhiyun memcpy(response_mad, outmailbox->buf, 256);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun mlx4_free_cmd_mailbox(dev->dev, inmailbox);
184*4882a593Smuzhiyun mlx4_free_cmd_mailbox(dev->dev, outmailbox);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return err;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
update_sm_ah(struct mlx4_ib_dev * dev,u8 port_num,u16 lid,u8 sl)189*4882a593Smuzhiyun static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct ib_ah *new_ah;
192*4882a593Smuzhiyun struct rdma_ah_attr ah_attr;
193*4882a593Smuzhiyun unsigned long flags;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (!dev->send_agent[port_num - 1][0])
196*4882a593Smuzhiyun return;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun memset(&ah_attr, 0, sizeof ah_attr);
199*4882a593Smuzhiyun ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
200*4882a593Smuzhiyun rdma_ah_set_dlid(&ah_attr, lid);
201*4882a593Smuzhiyun rdma_ah_set_sl(&ah_attr, sl);
202*4882a593Smuzhiyun rdma_ah_set_port_num(&ah_attr, port_num);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
205*4882a593Smuzhiyun &ah_attr, 0);
206*4882a593Smuzhiyun if (IS_ERR(new_ah))
207*4882a593Smuzhiyun return;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun spin_lock_irqsave(&dev->sm_lock, flags);
210*4882a593Smuzhiyun if (dev->sm_ah[port_num - 1])
211*4882a593Smuzhiyun rdma_destroy_ah(dev->sm_ah[port_num - 1], 0);
212*4882a593Smuzhiyun dev->sm_ah[port_num - 1] = new_ah;
213*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sm_lock, flags);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun * Snoop SM MADs for port info, GUID info, and P_Key table sets, so we can
218*4882a593Smuzhiyun * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
219*4882a593Smuzhiyun */
smp_snoop(struct ib_device * ibdev,u8 port_num,const struct ib_mad * mad,u16 prev_lid)220*4882a593Smuzhiyun static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
221*4882a593Smuzhiyun u16 prev_lid)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct ib_port_info *pinfo;
224*4882a593Smuzhiyun u16 lid;
225*4882a593Smuzhiyun __be16 *base;
226*4882a593Smuzhiyun u32 bn, pkey_change_bitmap;
227*4882a593Smuzhiyun int i;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
231*4882a593Smuzhiyun if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
232*4882a593Smuzhiyun mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
233*4882a593Smuzhiyun mad->mad_hdr.method == IB_MGMT_METHOD_SET)
234*4882a593Smuzhiyun switch (mad->mad_hdr.attr_id) {
235*4882a593Smuzhiyun case IB_SMP_ATTR_PORT_INFO:
236*4882a593Smuzhiyun if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
237*4882a593Smuzhiyun return;
238*4882a593Smuzhiyun pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
239*4882a593Smuzhiyun lid = be16_to_cpu(pinfo->lid);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun update_sm_ah(dev, port_num,
242*4882a593Smuzhiyun be16_to_cpu(pinfo->sm_lid),
243*4882a593Smuzhiyun pinfo->neighbormtu_mastersmsl & 0xf);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if (pinfo->clientrereg_resv_subnetto & 0x80)
246*4882a593Smuzhiyun handle_client_rereg_event(dev, port_num);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (prev_lid != lid)
249*4882a593Smuzhiyun handle_lid_change_event(dev, port_num);
250*4882a593Smuzhiyun break;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun case IB_SMP_ATTR_PKEY_TABLE:
253*4882a593Smuzhiyun if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
254*4882a593Smuzhiyun return;
255*4882a593Smuzhiyun if (!mlx4_is_mfunc(dev->dev)) {
256*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port_num,
257*4882a593Smuzhiyun IB_EVENT_PKEY_CHANGE);
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* at this point, we are running in the master.
262*4882a593Smuzhiyun * Slaves do not receive SMPs.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
265*4882a593Smuzhiyun base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
266*4882a593Smuzhiyun pkey_change_bitmap = 0;
267*4882a593Smuzhiyun for (i = 0; i < 32; i++) {
268*4882a593Smuzhiyun pr_debug("PKEY[%d] = x%x\n",
269*4882a593Smuzhiyun i + bn*32, be16_to_cpu(base[i]));
270*4882a593Smuzhiyun if (be16_to_cpu(base[i]) !=
271*4882a593Smuzhiyun dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
272*4882a593Smuzhiyun pkey_change_bitmap |= (1 << i);
273*4882a593Smuzhiyun dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
274*4882a593Smuzhiyun be16_to_cpu(base[i]);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun pr_debug("PKEY Change event: port=%d, "
278*4882a593Smuzhiyun "block=0x%x, change_bitmap=0x%x\n",
279*4882a593Smuzhiyun port_num, bn, pkey_change_bitmap);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (pkey_change_bitmap) {
282*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port_num,
283*4882a593Smuzhiyun IB_EVENT_PKEY_CHANGE);
284*4882a593Smuzhiyun if (!dev->sriov.is_going_down)
285*4882a593Smuzhiyun __propagate_pkey_ev(dev, port_num, bn,
286*4882a593Smuzhiyun pkey_change_bitmap);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun break;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun case IB_SMP_ATTR_GUID_INFO:
291*4882a593Smuzhiyun if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun /* paravirtualized master's guid is guid 0 -- does not change */
294*4882a593Smuzhiyun if (!mlx4_is_master(dev->dev))
295*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port_num,
296*4882a593Smuzhiyun IB_EVENT_GID_CHANGE);
297*4882a593Smuzhiyun /*if master, notify relevant slaves*/
298*4882a593Smuzhiyun if (mlx4_is_master(dev->dev) &&
299*4882a593Smuzhiyun !dev->sriov.is_going_down) {
300*4882a593Smuzhiyun bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
301*4882a593Smuzhiyun mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
302*4882a593Smuzhiyun (u8 *)(&((struct ib_smp *)mad)->data));
303*4882a593Smuzhiyun mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
304*4882a593Smuzhiyun (u8 *)(&((struct ib_smp *)mad)->data));
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun case IB_SMP_ATTR_SL_TO_VL_TABLE:
309*4882a593Smuzhiyun /* cache sl to vl mapping changes for use in
310*4882a593Smuzhiyun * filling QP1 LRH VL field when sending packets
311*4882a593Smuzhiyun */
312*4882a593Smuzhiyun if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
313*4882a593Smuzhiyun dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
314*4882a593Smuzhiyun return;
315*4882a593Smuzhiyun if (!mlx4_is_slave(dev->dev)) {
316*4882a593Smuzhiyun union sl2vl_tbl_to_u64 sl2vl64;
317*4882a593Smuzhiyun int jj;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun for (jj = 0; jj < 8; jj++) {
320*4882a593Smuzhiyun sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
321*4882a593Smuzhiyun pr_debug("port %u, sl2vl[%d] = %02x\n",
322*4882a593Smuzhiyun port_num, jj, sl2vl64.sl8[jj]);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun break;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun default:
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
__propagate_pkey_ev(struct mlx4_ib_dev * dev,int port_num,int block,u32 change_bitmap)333*4882a593Smuzhiyun static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
334*4882a593Smuzhiyun int block, u32 change_bitmap)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun int i, ix, slave, err;
337*4882a593Smuzhiyun int have_event = 0;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
340*4882a593Smuzhiyun if (slave == mlx4_master_func_num(dev->dev))
341*4882a593Smuzhiyun continue;
342*4882a593Smuzhiyun if (!mlx4_is_slave_active(dev->dev, slave))
343*4882a593Smuzhiyun continue;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun have_event = 0;
346*4882a593Smuzhiyun for (i = 0; i < 32; i++) {
347*4882a593Smuzhiyun if (!(change_bitmap & (1 << i)))
348*4882a593Smuzhiyun continue;
349*4882a593Smuzhiyun for (ix = 0;
350*4882a593Smuzhiyun ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
351*4882a593Smuzhiyun if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
352*4882a593Smuzhiyun [ix] == i + 32 * block) {
353*4882a593Smuzhiyun err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
354*4882a593Smuzhiyun pr_debug("propagate_pkey_ev: slave %d,"
355*4882a593Smuzhiyun " port %d, ix %d (%d)\n",
356*4882a593Smuzhiyun slave, port_num, ix, err);
357*4882a593Smuzhiyun have_event = 1;
358*4882a593Smuzhiyun break;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun if (have_event)
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
node_desc_override(struct ib_device * dev,struct ib_mad * mad)367*4882a593Smuzhiyun static void node_desc_override(struct ib_device *dev,
368*4882a593Smuzhiyun struct ib_mad *mad)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun unsigned long flags;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
373*4882a593Smuzhiyun mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
374*4882a593Smuzhiyun mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
375*4882a593Smuzhiyun mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
376*4882a593Smuzhiyun spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
377*4882a593Smuzhiyun memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
378*4882a593Smuzhiyun IB_DEVICE_NODE_DESC_MAX);
379*4882a593Smuzhiyun spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
forward_trap(struct mlx4_ib_dev * dev,u8 port_num,const struct ib_mad * mad)383*4882a593Smuzhiyun static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
386*4882a593Smuzhiyun struct ib_mad_send_buf *send_buf;
387*4882a593Smuzhiyun struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
388*4882a593Smuzhiyun int ret;
389*4882a593Smuzhiyun unsigned long flags;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (agent) {
392*4882a593Smuzhiyun send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
393*4882a593Smuzhiyun IB_MGMT_MAD_DATA, GFP_ATOMIC,
394*4882a593Smuzhiyun IB_MGMT_BASE_VERSION);
395*4882a593Smuzhiyun if (IS_ERR(send_buf))
396*4882a593Smuzhiyun return;
397*4882a593Smuzhiyun /*
398*4882a593Smuzhiyun * We rely here on the fact that MLX QPs don't use the
399*4882a593Smuzhiyun * address handle after the send is posted (this is
400*4882a593Smuzhiyun * wrong following the IB spec strictly, but we know
401*4882a593Smuzhiyun * it's OK for our devices).
402*4882a593Smuzhiyun */
403*4882a593Smuzhiyun spin_lock_irqsave(&dev->sm_lock, flags);
404*4882a593Smuzhiyun memcpy(send_buf->mad, mad, sizeof *mad);
405*4882a593Smuzhiyun if ((send_buf->ah = dev->sm_ah[port_num - 1]))
406*4882a593Smuzhiyun ret = ib_post_send_mad(send_buf, NULL);
407*4882a593Smuzhiyun else
408*4882a593Smuzhiyun ret = -EINVAL;
409*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sm_lock, flags);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (ret)
412*4882a593Smuzhiyun ib_free_send_mad(send_buf);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
mlx4_ib_demux_sa_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * sa_mad)416*4882a593Smuzhiyun static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
417*4882a593Smuzhiyun struct ib_sa_mad *sa_mad)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun int ret = 0;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* dispatch to different sa handlers */
422*4882a593Smuzhiyun switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
423*4882a593Smuzhiyun case IB_SA_ATTR_MC_MEMBER_REC:
424*4882a593Smuzhiyun ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
425*4882a593Smuzhiyun break;
426*4882a593Smuzhiyun default:
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun return ret;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
mlx4_ib_find_real_gid(struct ib_device * ibdev,u8 port,__be64 guid)432*4882a593Smuzhiyun int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
435*4882a593Smuzhiyun int i;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
438*4882a593Smuzhiyun if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
439*4882a593Smuzhiyun return i;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun return -1;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun
find_slave_port_pkey_ix(struct mlx4_ib_dev * dev,int slave,u8 port,u16 pkey,u16 * ix)445*4882a593Smuzhiyun static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
446*4882a593Smuzhiyun u8 port, u16 pkey, u16 *ix)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun int i, ret;
449*4882a593Smuzhiyun u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
450*4882a593Smuzhiyun u16 slot_pkey;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (slave == mlx4_master_func_num(dev->dev))
453*4882a593Smuzhiyun return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
458*4882a593Smuzhiyun if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
459*4882a593Smuzhiyun continue;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
464*4882a593Smuzhiyun if (ret)
465*4882a593Smuzhiyun continue;
466*4882a593Smuzhiyun if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
467*4882a593Smuzhiyun if (slot_pkey & 0x8000) {
468*4882a593Smuzhiyun *ix = (u16) pkey_ix;
469*4882a593Smuzhiyun return 0;
470*4882a593Smuzhiyun } else {
471*4882a593Smuzhiyun /* take first partial pkey index found */
472*4882a593Smuzhiyun if (partial_ix == 0xFF)
473*4882a593Smuzhiyun partial_ix = pkey_ix;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (partial_ix < 0xFF) {
479*4882a593Smuzhiyun *ix = (u16) partial_ix;
480*4882a593Smuzhiyun return 0;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return -EINVAL;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
get_gids_from_l3_hdr(struct ib_grh * grh,union ib_gid * sgid,union ib_gid * dgid)486*4882a593Smuzhiyun static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
487*4882a593Smuzhiyun union ib_gid *dgid)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
490*4882a593Smuzhiyun enum rdma_network_type net_type;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (version == 4)
493*4882a593Smuzhiyun net_type = RDMA_NETWORK_IPV4;
494*4882a593Smuzhiyun else if (version == 6)
495*4882a593Smuzhiyun net_type = RDMA_NETWORK_IPV6;
496*4882a593Smuzhiyun else
497*4882a593Smuzhiyun return -EINVAL;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
500*4882a593Smuzhiyun sgid, dgid);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
is_proxy_qp0(struct mlx4_ib_dev * dev,int qpn,int slave)503*4882a593Smuzhiyun static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return (qpn >= proxy_start && qpn <= proxy_start + 1);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
mlx4_ib_send_to_slave(struct mlx4_ib_dev * dev,int slave,u8 port,enum ib_qp_type dest_qpt,struct ib_wc * wc,struct ib_grh * grh,struct ib_mad * mad)510*4882a593Smuzhiyun int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
511*4882a593Smuzhiyun enum ib_qp_type dest_qpt, struct ib_wc *wc,
512*4882a593Smuzhiyun struct ib_grh *grh, struct ib_mad *mad)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct ib_sge list;
515*4882a593Smuzhiyun struct ib_ud_wr wr;
516*4882a593Smuzhiyun const struct ib_send_wr *bad_wr;
517*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *tun_ctx;
518*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp;
519*4882a593Smuzhiyun struct mlx4_rcv_tunnel_mad *tun_mad;
520*4882a593Smuzhiyun struct rdma_ah_attr attr;
521*4882a593Smuzhiyun struct ib_ah *ah;
522*4882a593Smuzhiyun struct ib_qp *src_qp = NULL;
523*4882a593Smuzhiyun unsigned tun_tx_ix = 0;
524*4882a593Smuzhiyun int dqpn;
525*4882a593Smuzhiyun int ret = 0;
526*4882a593Smuzhiyun u16 tun_pkey_ix;
527*4882a593Smuzhiyun u16 cached_pkey;
528*4882a593Smuzhiyun u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (dest_qpt > IB_QPT_GSI) {
531*4882a593Smuzhiyun pr_debug("dest_qpt (%d) > IB_QPT_GSI\n", dest_qpt);
532*4882a593Smuzhiyun return -EINVAL;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun tun_ctx = dev->sriov.demux[port-1].tun[slave];
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* check if proxy qp created */
538*4882a593Smuzhiyun if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
539*4882a593Smuzhiyun return -EAGAIN;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun if (!dest_qpt)
542*4882a593Smuzhiyun tun_qp = &tun_ctx->qp[0];
543*4882a593Smuzhiyun else
544*4882a593Smuzhiyun tun_qp = &tun_ctx->qp[1];
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun /* compute P_Key index to put in tunnel header for slave */
547*4882a593Smuzhiyun if (dest_qpt) {
548*4882a593Smuzhiyun u16 pkey_ix;
549*4882a593Smuzhiyun ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
550*4882a593Smuzhiyun if (ret) {
551*4882a593Smuzhiyun pr_debug("unable to get %s cached pkey for index %d, ret %d\n",
552*4882a593Smuzhiyun is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
553*4882a593Smuzhiyun wc->pkey_index, ret);
554*4882a593Smuzhiyun return -EINVAL;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
558*4882a593Smuzhiyun if (ret) {
559*4882a593Smuzhiyun pr_debug("unable to get %s pkey ix for pkey 0x%x, ret %d\n",
560*4882a593Smuzhiyun is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
561*4882a593Smuzhiyun cached_pkey, ret);
562*4882a593Smuzhiyun return -EINVAL;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun tun_pkey_ix = pkey_ix;
565*4882a593Smuzhiyun } else
566*4882a593Smuzhiyun tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /* get tunnel tx data buf for slave */
571*4882a593Smuzhiyun src_qp = tun_qp->qp;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /* create ah. Just need an empty one with the port num for the post send.
574*4882a593Smuzhiyun * The driver will set the force loopback bit in post_send */
575*4882a593Smuzhiyun memset(&attr, 0, sizeof attr);
576*4882a593Smuzhiyun attr.type = rdma_ah_find_type(&dev->ib_dev, port);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun rdma_ah_set_port_num(&attr, port);
579*4882a593Smuzhiyun if (is_eth) {
580*4882a593Smuzhiyun union ib_gid sgid;
581*4882a593Smuzhiyun union ib_gid dgid;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
584*4882a593Smuzhiyun return -EINVAL;
585*4882a593Smuzhiyun rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun ah = rdma_create_ah(tun_ctx->pd, &attr, 0);
588*4882a593Smuzhiyun if (IS_ERR(ah))
589*4882a593Smuzhiyun return -ENOMEM;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /* allocate tunnel tx buf after pass failure returns */
592*4882a593Smuzhiyun spin_lock(&tun_qp->tx_lock);
593*4882a593Smuzhiyun if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
594*4882a593Smuzhiyun (MLX4_NUM_TUNNEL_BUFS - 1))
595*4882a593Smuzhiyun ret = -EAGAIN;
596*4882a593Smuzhiyun else
597*4882a593Smuzhiyun tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
598*4882a593Smuzhiyun spin_unlock(&tun_qp->tx_lock);
599*4882a593Smuzhiyun if (ret)
600*4882a593Smuzhiyun goto end;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
603*4882a593Smuzhiyun if (tun_qp->tx_ring[tun_tx_ix].ah)
604*4882a593Smuzhiyun rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
605*4882a593Smuzhiyun tun_qp->tx_ring[tun_tx_ix].ah = ah;
606*4882a593Smuzhiyun ib_dma_sync_single_for_cpu(&dev->ib_dev,
607*4882a593Smuzhiyun tun_qp->tx_ring[tun_tx_ix].buf.map,
608*4882a593Smuzhiyun sizeof (struct mlx4_rcv_tunnel_mad),
609*4882a593Smuzhiyun DMA_TO_DEVICE);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /* copy over to tunnel buffer */
612*4882a593Smuzhiyun if (grh)
613*4882a593Smuzhiyun memcpy(&tun_mad->grh, grh, sizeof *grh);
614*4882a593Smuzhiyun memcpy(&tun_mad->mad, mad, sizeof *mad);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /* adjust tunnel data */
617*4882a593Smuzhiyun tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
618*4882a593Smuzhiyun tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
619*4882a593Smuzhiyun tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (is_eth) {
622*4882a593Smuzhiyun u16 vlan = 0;
623*4882a593Smuzhiyun if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
624*4882a593Smuzhiyun NULL)) {
625*4882a593Smuzhiyun /* VST mode */
626*4882a593Smuzhiyun if (vlan != wc->vlan_id)
627*4882a593Smuzhiyun /* Packet vlan is not the VST-assigned vlan.
628*4882a593Smuzhiyun * Drop the packet.
629*4882a593Smuzhiyun */
630*4882a593Smuzhiyun goto out;
631*4882a593Smuzhiyun else
632*4882a593Smuzhiyun /* Remove the vlan tag before forwarding
633*4882a593Smuzhiyun * the packet to the VF.
634*4882a593Smuzhiyun */
635*4882a593Smuzhiyun vlan = 0xffff;
636*4882a593Smuzhiyun } else {
637*4882a593Smuzhiyun vlan = wc->vlan_id;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
641*4882a593Smuzhiyun memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
642*4882a593Smuzhiyun memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
643*4882a593Smuzhiyun } else {
644*4882a593Smuzhiyun tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
645*4882a593Smuzhiyun tun_mad->hdr.slid_mac_47_32 = ib_lid_be16(wc->slid);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun ib_dma_sync_single_for_device(&dev->ib_dev,
649*4882a593Smuzhiyun tun_qp->tx_ring[tun_tx_ix].buf.map,
650*4882a593Smuzhiyun sizeof (struct mlx4_rcv_tunnel_mad),
651*4882a593Smuzhiyun DMA_TO_DEVICE);
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
654*4882a593Smuzhiyun list.length = sizeof (struct mlx4_rcv_tunnel_mad);
655*4882a593Smuzhiyun list.lkey = tun_ctx->pd->local_dma_lkey;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun wr.ah = ah;
658*4882a593Smuzhiyun wr.port_num = port;
659*4882a593Smuzhiyun wr.remote_qkey = IB_QP_SET_QKEY;
660*4882a593Smuzhiyun wr.remote_qpn = dqpn;
661*4882a593Smuzhiyun wr.wr.next = NULL;
662*4882a593Smuzhiyun wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
663*4882a593Smuzhiyun wr.wr.sg_list = &list;
664*4882a593Smuzhiyun wr.wr.num_sge = 1;
665*4882a593Smuzhiyun wr.wr.opcode = IB_WR_SEND;
666*4882a593Smuzhiyun wr.wr.send_flags = IB_SEND_SIGNALED;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
669*4882a593Smuzhiyun if (!ret)
670*4882a593Smuzhiyun return 0;
671*4882a593Smuzhiyun out:
672*4882a593Smuzhiyun spin_lock(&tun_qp->tx_lock);
673*4882a593Smuzhiyun tun_qp->tx_ix_tail++;
674*4882a593Smuzhiyun spin_unlock(&tun_qp->tx_lock);
675*4882a593Smuzhiyun tun_qp->tx_ring[tun_tx_ix].ah = NULL;
676*4882a593Smuzhiyun end:
677*4882a593Smuzhiyun rdma_destroy_ah(ah, 0);
678*4882a593Smuzhiyun return ret;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
mlx4_ib_demux_mad(struct ib_device * ibdev,u8 port,struct ib_wc * wc,struct ib_grh * grh,struct ib_mad * mad)681*4882a593Smuzhiyun static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
682*4882a593Smuzhiyun struct ib_wc *wc, struct ib_grh *grh,
683*4882a593Smuzhiyun struct ib_mad *mad)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
686*4882a593Smuzhiyun int err, other_port;
687*4882a593Smuzhiyun int slave = -1;
688*4882a593Smuzhiyun u8 *slave_id;
689*4882a593Smuzhiyun int is_eth = 0;
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
692*4882a593Smuzhiyun is_eth = 0;
693*4882a593Smuzhiyun else
694*4882a593Smuzhiyun is_eth = 1;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (is_eth) {
697*4882a593Smuzhiyun union ib_gid dgid;
698*4882a593Smuzhiyun union ib_gid sgid;
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
701*4882a593Smuzhiyun return -EINVAL;
702*4882a593Smuzhiyun if (!(wc->wc_flags & IB_WC_GRH)) {
703*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
704*4882a593Smuzhiyun return -EINVAL;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
707*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
708*4882a593Smuzhiyun return -EINVAL;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
711*4882a593Smuzhiyun if (err && mlx4_is_mf_bonded(dev->dev)) {
712*4882a593Smuzhiyun other_port = (port == 1) ? 2 : 1;
713*4882a593Smuzhiyun err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
714*4882a593Smuzhiyun if (!err) {
715*4882a593Smuzhiyun port = other_port;
716*4882a593Smuzhiyun pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
717*4882a593Smuzhiyun slave, grh->dgid.raw, port, other_port);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun if (err) {
721*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "failed matching grh\n");
722*4882a593Smuzhiyun return -ENOENT;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun if (slave >= dev->dev->caps.sqp_demux) {
725*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
726*4882a593Smuzhiyun slave, dev->dev->caps.sqp_demux);
727*4882a593Smuzhiyun return -ENOENT;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
731*4882a593Smuzhiyun return 0;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
734*4882a593Smuzhiyun if (err)
735*4882a593Smuzhiyun pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
736*4882a593Smuzhiyun is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
737*4882a593Smuzhiyun slave, err);
738*4882a593Smuzhiyun return 0;
739*4882a593Smuzhiyun }
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun /* Initially assume that this mad is for us */
742*4882a593Smuzhiyun slave = mlx4_master_func_num(dev->dev);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun /* See if the slave id is encoded in a response mad */
745*4882a593Smuzhiyun if (mad->mad_hdr.method & 0x80) {
746*4882a593Smuzhiyun slave_id = (u8 *) &mad->mad_hdr.tid;
747*4882a593Smuzhiyun slave = *slave_id;
748*4882a593Smuzhiyun if (slave != 255) /*255 indicates the dom0*/
749*4882a593Smuzhiyun *slave_id = 0; /* remap tid */
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* If a grh is present, we demux according to it */
753*4882a593Smuzhiyun if (wc->wc_flags & IB_WC_GRH) {
754*4882a593Smuzhiyun if (grh->dgid.global.interface_id ==
755*4882a593Smuzhiyun cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
756*4882a593Smuzhiyun grh->dgid.global.subnet_prefix == cpu_to_be64(
757*4882a593Smuzhiyun atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
758*4882a593Smuzhiyun slave = 0;
759*4882a593Smuzhiyun } else {
760*4882a593Smuzhiyun slave = mlx4_ib_find_real_gid(ibdev, port,
761*4882a593Smuzhiyun grh->dgid.global.interface_id);
762*4882a593Smuzhiyun if (slave < 0) {
763*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "failed matching grh\n");
764*4882a593Smuzhiyun return -ENOENT;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun /* Class-specific handling */
769*4882a593Smuzhiyun switch (mad->mad_hdr.mgmt_class) {
770*4882a593Smuzhiyun case IB_MGMT_CLASS_SUBN_LID_ROUTED:
771*4882a593Smuzhiyun case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
772*4882a593Smuzhiyun /* 255 indicates the dom0 */
773*4882a593Smuzhiyun if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
774*4882a593Smuzhiyun if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
775*4882a593Smuzhiyun return -EPERM;
776*4882a593Smuzhiyun /* for a VF. drop unsolicited MADs */
777*4882a593Smuzhiyun if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
778*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
779*4882a593Smuzhiyun slave, mad->mad_hdr.mgmt_class,
780*4882a593Smuzhiyun mad->mad_hdr.method);
781*4882a593Smuzhiyun return -EINVAL;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun break;
785*4882a593Smuzhiyun case IB_MGMT_CLASS_SUBN_ADM:
786*4882a593Smuzhiyun if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
787*4882a593Smuzhiyun (struct ib_sa_mad *) mad))
788*4882a593Smuzhiyun return 0;
789*4882a593Smuzhiyun break;
790*4882a593Smuzhiyun case IB_MGMT_CLASS_CM:
791*4882a593Smuzhiyun if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
792*4882a593Smuzhiyun return 0;
793*4882a593Smuzhiyun break;
794*4882a593Smuzhiyun case IB_MGMT_CLASS_DEVICE_MGMT:
795*4882a593Smuzhiyun if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
796*4882a593Smuzhiyun return 0;
797*4882a593Smuzhiyun break;
798*4882a593Smuzhiyun default:
799*4882a593Smuzhiyun /* Drop unsupported classes for slaves in tunnel mode */
800*4882a593Smuzhiyun if (slave != mlx4_master_func_num(dev->dev)) {
801*4882a593Smuzhiyun pr_debug("dropping unsupported ingress mad from class:%d "
802*4882a593Smuzhiyun "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
803*4882a593Smuzhiyun return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun /*make sure that no slave==255 was not handled yet.*/
807*4882a593Smuzhiyun if (slave >= dev->dev->caps.sqp_demux) {
808*4882a593Smuzhiyun mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
809*4882a593Smuzhiyun slave, dev->dev->caps.sqp_demux);
810*4882a593Smuzhiyun return -ENOENT;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
814*4882a593Smuzhiyun if (err)
815*4882a593Smuzhiyun pr_debug("failed sending %s to slave %d via tunnel qp (%d)\n",
816*4882a593Smuzhiyun is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
817*4882a593Smuzhiyun slave, err);
818*4882a593Smuzhiyun return 0;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun
ib_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in_mad,struct ib_mad * out_mad)821*4882a593Smuzhiyun static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
822*4882a593Smuzhiyun const struct ib_wc *in_wc, const struct ib_grh *in_grh,
823*4882a593Smuzhiyun const struct ib_mad *in_mad, struct ib_mad *out_mad)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun u16 slid, prev_lid = 0;
826*4882a593Smuzhiyun int err;
827*4882a593Smuzhiyun struct ib_port_attr pattr;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
832*4882a593Smuzhiyun forward_trap(to_mdev(ibdev), port_num, in_mad);
833*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
837*4882a593Smuzhiyun in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
838*4882a593Smuzhiyun if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
839*4882a593Smuzhiyun in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
840*4882a593Smuzhiyun in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
841*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS;
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /*
844*4882a593Smuzhiyun * Don't process SMInfo queries -- the SMA can't handle them.
845*4882a593Smuzhiyun */
846*4882a593Smuzhiyun if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
847*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS;
848*4882a593Smuzhiyun } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
849*4882a593Smuzhiyun in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
850*4882a593Smuzhiyun in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2 ||
851*4882a593Smuzhiyun in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
852*4882a593Smuzhiyun if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
853*4882a593Smuzhiyun in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
854*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS;
855*4882a593Smuzhiyun } else
856*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
859*4882a593Smuzhiyun in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
860*4882a593Smuzhiyun in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
861*4882a593Smuzhiyun in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
862*4882a593Smuzhiyun !ib_query_port(ibdev, port_num, &pattr))
863*4882a593Smuzhiyun prev_lid = ib_lid_cpu16(pattr.lid);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun err = mlx4_MAD_IFC(to_mdev(ibdev),
866*4882a593Smuzhiyun (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
867*4882a593Smuzhiyun (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
868*4882a593Smuzhiyun MLX4_MAD_IFC_NET_VIEW,
869*4882a593Smuzhiyun port_num, in_wc, in_grh, in_mad, out_mad);
870*4882a593Smuzhiyun if (err)
871*4882a593Smuzhiyun return IB_MAD_RESULT_FAILURE;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (!out_mad->mad_hdr.status) {
874*4882a593Smuzhiyun smp_snoop(ibdev, port_num, in_mad, prev_lid);
875*4882a593Smuzhiyun /* slaves get node desc from FW */
876*4882a593Smuzhiyun if (!mlx4_is_slave(to_mdev(ibdev)->dev))
877*4882a593Smuzhiyun node_desc_override(ibdev, out_mad);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /* set return bit in status of directed route responses */
881*4882a593Smuzhiyun if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
882*4882a593Smuzhiyun out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
885*4882a593Smuzhiyun /* no response for trap repress */
886*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
edit_counter(struct mlx4_counter * cnt,void * counters,__be16 attr_id)891*4882a593Smuzhiyun static void edit_counter(struct mlx4_counter *cnt, void *counters,
892*4882a593Smuzhiyun __be16 attr_id)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun switch (attr_id) {
895*4882a593Smuzhiyun case IB_PMA_PORT_COUNTERS:
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun struct ib_pma_portcounters *pma_cnt =
898*4882a593Smuzhiyun (struct ib_pma_portcounters *)counters;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
901*4882a593Smuzhiyun (be64_to_cpu(cnt->tx_bytes) >> 2));
902*4882a593Smuzhiyun ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
903*4882a593Smuzhiyun (be64_to_cpu(cnt->rx_bytes) >> 2));
904*4882a593Smuzhiyun ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
905*4882a593Smuzhiyun be64_to_cpu(cnt->tx_frames));
906*4882a593Smuzhiyun ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
907*4882a593Smuzhiyun be64_to_cpu(cnt->rx_frames));
908*4882a593Smuzhiyun break;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun case IB_PMA_PORT_COUNTERS_EXT:
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun struct ib_pma_portcounters_ext *pma_cnt_ext =
913*4882a593Smuzhiyun (struct ib_pma_portcounters_ext *)counters;
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun pma_cnt_ext->port_xmit_data =
916*4882a593Smuzhiyun cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
917*4882a593Smuzhiyun pma_cnt_ext->port_rcv_data =
918*4882a593Smuzhiyun cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
919*4882a593Smuzhiyun pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
920*4882a593Smuzhiyun pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
921*4882a593Smuzhiyun break;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
iboe_process_mad_port_info(void * out_mad)926*4882a593Smuzhiyun static int iboe_process_mad_port_info(void *out_mad)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun struct ib_class_port_info cpi = {};
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
931*4882a593Smuzhiyun memcpy(out_mad, &cpi, sizeof(cpi));
932*4882a593Smuzhiyun return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
iboe_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in_mad,struct ib_mad * out_mad)935*4882a593Smuzhiyun static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
936*4882a593Smuzhiyun const struct ib_wc *in_wc, const struct ib_grh *in_grh,
937*4882a593Smuzhiyun const struct ib_mad *in_mad, struct ib_mad *out_mad)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun struct mlx4_counter counter_stats;
940*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
941*4882a593Smuzhiyun struct counter_index *tmp_counter;
942*4882a593Smuzhiyun int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
945*4882a593Smuzhiyun return -EINVAL;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
948*4882a593Smuzhiyun return iboe_process_mad_port_info((void *)(out_mad->data + 40));
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun memset(&counter_stats, 0, sizeof(counter_stats));
951*4882a593Smuzhiyun mutex_lock(&dev->counters_table[port_num - 1].mutex);
952*4882a593Smuzhiyun list_for_each_entry(tmp_counter,
953*4882a593Smuzhiyun &dev->counters_table[port_num - 1].counters_list,
954*4882a593Smuzhiyun list) {
955*4882a593Smuzhiyun err = mlx4_get_counter_stats(dev->dev,
956*4882a593Smuzhiyun tmp_counter->index,
957*4882a593Smuzhiyun &counter_stats, 0);
958*4882a593Smuzhiyun if (err) {
959*4882a593Smuzhiyun err = IB_MAD_RESULT_FAILURE;
960*4882a593Smuzhiyun stats_avail = 0;
961*4882a593Smuzhiyun break;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun stats_avail = 1;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun mutex_unlock(&dev->counters_table[port_num - 1].mutex);
966*4882a593Smuzhiyun if (stats_avail) {
967*4882a593Smuzhiyun switch (counter_stats.counter_mode & 0xf) {
968*4882a593Smuzhiyun case 0:
969*4882a593Smuzhiyun edit_counter(&counter_stats,
970*4882a593Smuzhiyun (void *)(out_mad->data + 40),
971*4882a593Smuzhiyun in_mad->mad_hdr.attr_id);
972*4882a593Smuzhiyun err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
973*4882a593Smuzhiyun break;
974*4882a593Smuzhiyun default:
975*4882a593Smuzhiyun err = IB_MAD_RESULT_FAILURE;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun return err;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun
mlx4_ib_process_mad(struct ib_device * ibdev,int mad_flags,u8 port_num,const struct ib_wc * in_wc,const struct ib_grh * in_grh,const struct ib_mad * in,struct ib_mad * out,size_t * out_mad_size,u16 * out_mad_pkey_index)982*4882a593Smuzhiyun int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
983*4882a593Smuzhiyun const struct ib_wc *in_wc, const struct ib_grh *in_grh,
984*4882a593Smuzhiyun const struct ib_mad *in, struct ib_mad *out,
985*4882a593Smuzhiyun size_t *out_mad_size, u16 *out_mad_pkey_index)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ibdev);
988*4882a593Smuzhiyun enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
991*4882a593Smuzhiyun * queries, should be called only by VFs and for that specific purpose
992*4882a593Smuzhiyun */
993*4882a593Smuzhiyun if (link == IB_LINK_LAYER_INFINIBAND) {
994*4882a593Smuzhiyun if (mlx4_is_slave(dev->dev) &&
995*4882a593Smuzhiyun (in->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
996*4882a593Smuzhiyun (in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
997*4882a593Smuzhiyun in->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
998*4882a593Smuzhiyun in->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
999*4882a593Smuzhiyun return iboe_process_mad(ibdev, mad_flags, port_num,
1000*4882a593Smuzhiyun in_wc, in_grh, in, out);
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun return ib_process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
1003*4882a593Smuzhiyun in, out);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun if (link == IB_LINK_LAYER_ETHERNET)
1007*4882a593Smuzhiyun return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1008*4882a593Smuzhiyun in_grh, in, out);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun return -EINVAL;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
send_handler(struct ib_mad_agent * agent,struct ib_mad_send_wc * mad_send_wc)1013*4882a593Smuzhiyun static void send_handler(struct ib_mad_agent *agent,
1014*4882a593Smuzhiyun struct ib_mad_send_wc *mad_send_wc)
1015*4882a593Smuzhiyun {
1016*4882a593Smuzhiyun if (mad_send_wc->send_buf->context[0])
1017*4882a593Smuzhiyun rdma_destroy_ah(mad_send_wc->send_buf->context[0], 0);
1018*4882a593Smuzhiyun ib_free_send_mad(mad_send_wc->send_buf);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
mlx4_ib_mad_init(struct mlx4_ib_dev * dev)1021*4882a593Smuzhiyun int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun struct ib_mad_agent *agent;
1024*4882a593Smuzhiyun int p, q;
1025*4882a593Smuzhiyun int ret;
1026*4882a593Smuzhiyun enum rdma_link_layer ll;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun for (p = 0; p < dev->num_ports; ++p) {
1029*4882a593Smuzhiyun ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1030*4882a593Smuzhiyun for (q = 0; q <= 1; ++q) {
1031*4882a593Smuzhiyun if (ll == IB_LINK_LAYER_INFINIBAND) {
1032*4882a593Smuzhiyun agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1033*4882a593Smuzhiyun q ? IB_QPT_GSI : IB_QPT_SMI,
1034*4882a593Smuzhiyun NULL, 0, send_handler,
1035*4882a593Smuzhiyun NULL, NULL, 0);
1036*4882a593Smuzhiyun if (IS_ERR(agent)) {
1037*4882a593Smuzhiyun ret = PTR_ERR(agent);
1038*4882a593Smuzhiyun goto err;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun dev->send_agent[p][q] = agent;
1041*4882a593Smuzhiyun } else
1042*4882a593Smuzhiyun dev->send_agent[p][q] = NULL;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun return 0;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun err:
1049*4882a593Smuzhiyun for (p = 0; p < dev->num_ports; ++p)
1050*4882a593Smuzhiyun for (q = 0; q <= 1; ++q)
1051*4882a593Smuzhiyun if (dev->send_agent[p][q])
1052*4882a593Smuzhiyun ib_unregister_mad_agent(dev->send_agent[p][q]);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun return ret;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
mlx4_ib_mad_cleanup(struct mlx4_ib_dev * dev)1057*4882a593Smuzhiyun void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun struct ib_mad_agent *agent;
1060*4882a593Smuzhiyun int p, q;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun for (p = 0; p < dev->num_ports; ++p) {
1063*4882a593Smuzhiyun for (q = 0; q <= 1; ++q) {
1064*4882a593Smuzhiyun agent = dev->send_agent[p][q];
1065*4882a593Smuzhiyun if (agent) {
1066*4882a593Smuzhiyun dev->send_agent[p][q] = NULL;
1067*4882a593Smuzhiyun ib_unregister_mad_agent(agent);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun if (dev->sm_ah[p])
1072*4882a593Smuzhiyun rdma_destroy_ah(dev->sm_ah[p], 0);
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun }
1075*4882a593Smuzhiyun
handle_lid_change_event(struct mlx4_ib_dev * dev,u8 port_num)1076*4882a593Smuzhiyun static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1079*4882a593Smuzhiyun
1080*4882a593Smuzhiyun if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1081*4882a593Smuzhiyun mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1082*4882a593Smuzhiyun MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun
handle_client_rereg_event(struct mlx4_ib_dev * dev,u8 port_num)1085*4882a593Smuzhiyun static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun /* re-configure the alias-guid and mcg's */
1088*4882a593Smuzhiyun if (mlx4_is_master(dev->dev)) {
1089*4882a593Smuzhiyun mlx4_ib_invalidate_all_guid_record(dev, port_num);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (!dev->sriov.is_going_down) {
1092*4882a593Smuzhiyun mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1093*4882a593Smuzhiyun mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1094*4882a593Smuzhiyun MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
1095*4882a593Smuzhiyun }
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun /* Update the sl to vl table from inside client rereg
1099*4882a593Smuzhiyun * only if in secure-host mode (snooping is not possible)
1100*4882a593Smuzhiyun * and the sl-to-vl change event is not generated by FW.
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun if (!mlx4_is_slave(dev->dev) &&
1103*4882a593Smuzhiyun dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
1104*4882a593Smuzhiyun !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
1105*4882a593Smuzhiyun if (mlx4_is_master(dev->dev))
1106*4882a593Smuzhiyun /* already in work queue from mlx4_ib_event queueing
1107*4882a593Smuzhiyun * mlx4_handle_port_mgmt_change_event, which calls
1108*4882a593Smuzhiyun * this procedure. Therefore, call sl2vl_update directly.
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun mlx4_ib_sl2vl_update(dev, port_num);
1111*4882a593Smuzhiyun else
1112*4882a593Smuzhiyun mlx4_sched_ib_sl2vl_update_work(dev, port_num);
1113*4882a593Smuzhiyun }
1114*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
propagate_pkey_ev(struct mlx4_ib_dev * dev,int port_num,struct mlx4_eqe * eqe)1117*4882a593Smuzhiyun static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1118*4882a593Smuzhiyun struct mlx4_eqe *eqe)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1121*4882a593Smuzhiyun GET_MASK_FROM_EQE(eqe));
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
handle_slaves_guid_change(struct mlx4_ib_dev * dev,u8 port_num,u32 guid_tbl_blk_num,u32 change_bitmap)1124*4882a593Smuzhiyun static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1125*4882a593Smuzhiyun u32 guid_tbl_blk_num, u32 change_bitmap)
1126*4882a593Smuzhiyun {
1127*4882a593Smuzhiyun struct ib_smp *in_mad = NULL;
1128*4882a593Smuzhiyun struct ib_smp *out_mad = NULL;
1129*4882a593Smuzhiyun u16 i;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1132*4882a593Smuzhiyun return;
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun in_mad = kmalloc(sizeof *in_mad, GFP_KERNEL);
1135*4882a593Smuzhiyun out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1136*4882a593Smuzhiyun if (!in_mad || !out_mad)
1137*4882a593Smuzhiyun goto out;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun guid_tbl_blk_num *= 4;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
1142*4882a593Smuzhiyun if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1143*4882a593Smuzhiyun continue;
1144*4882a593Smuzhiyun memset(in_mad, 0, sizeof *in_mad);
1145*4882a593Smuzhiyun memset(out_mad, 0, sizeof *out_mad);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun in_mad->base_version = 1;
1148*4882a593Smuzhiyun in_mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1149*4882a593Smuzhiyun in_mad->class_version = 1;
1150*4882a593Smuzhiyun in_mad->method = IB_MGMT_METHOD_GET;
1151*4882a593Smuzhiyun in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
1152*4882a593Smuzhiyun in_mad->attr_mod = cpu_to_be32(guid_tbl_blk_num + i);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun if (mlx4_MAD_IFC(dev,
1155*4882a593Smuzhiyun MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1156*4882a593Smuzhiyun port_num, NULL, NULL, in_mad, out_mad)) {
1157*4882a593Smuzhiyun mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1158*4882a593Smuzhiyun goto out;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1162*4882a593Smuzhiyun port_num,
1163*4882a593Smuzhiyun (u8 *)(&((struct ib_smp *)out_mad)->data));
1164*4882a593Smuzhiyun mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1165*4882a593Smuzhiyun port_num,
1166*4882a593Smuzhiyun (u8 *)(&((struct ib_smp *)out_mad)->data));
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun out:
1170*4882a593Smuzhiyun kfree(in_mad);
1171*4882a593Smuzhiyun kfree(out_mad);
1172*4882a593Smuzhiyun return;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
handle_port_mgmt_change_event(struct work_struct * work)1175*4882a593Smuzhiyun void handle_port_mgmt_change_event(struct work_struct *work)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1178*4882a593Smuzhiyun struct mlx4_ib_dev *dev = ew->ib_dev;
1179*4882a593Smuzhiyun struct mlx4_eqe *eqe = &(ew->ib_eqe);
1180*4882a593Smuzhiyun u8 port = eqe->event.port_mgmt_change.port;
1181*4882a593Smuzhiyun u32 changed_attr;
1182*4882a593Smuzhiyun u32 tbl_block;
1183*4882a593Smuzhiyun u32 change_bitmap;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun switch (eqe->subtype) {
1186*4882a593Smuzhiyun case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1187*4882a593Smuzhiyun changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /* Update the SM ah - This should be done before handling
1190*4882a593Smuzhiyun the other changed attributes so that MADs can be sent to the SM */
1191*4882a593Smuzhiyun if (changed_attr & MSTR_SM_CHANGE_MASK) {
1192*4882a593Smuzhiyun u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1193*4882a593Smuzhiyun u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1194*4882a593Smuzhiyun update_sm_ah(dev, port, lid, sl);
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /* Check if it is a lid change event */
1198*4882a593Smuzhiyun if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1199*4882a593Smuzhiyun handle_lid_change_event(dev, port);
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /* Generate GUID changed event */
1202*4882a593Smuzhiyun if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1203*4882a593Smuzhiyun if (mlx4_is_master(dev->dev)) {
1204*4882a593Smuzhiyun union ib_gid gid;
1205*4882a593Smuzhiyun int err = 0;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
1208*4882a593Smuzhiyun err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
1209*4882a593Smuzhiyun else
1210*4882a593Smuzhiyun gid.global.subnet_prefix =
1211*4882a593Smuzhiyun eqe->event.port_mgmt_change.params.port_info.gid_prefix;
1212*4882a593Smuzhiyun if (err) {
1213*4882a593Smuzhiyun pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
1214*4882a593Smuzhiyun port, err);
1215*4882a593Smuzhiyun } else {
1216*4882a593Smuzhiyun pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
1217*4882a593Smuzhiyun port,
1218*4882a593Smuzhiyun (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
1219*4882a593Smuzhiyun be64_to_cpu(gid.global.subnet_prefix));
1220*4882a593Smuzhiyun atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
1221*4882a593Smuzhiyun be64_to_cpu(gid.global.subnet_prefix));
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1225*4882a593Smuzhiyun /*if master, notify all slaves*/
1226*4882a593Smuzhiyun if (mlx4_is_master(dev->dev))
1227*4882a593Smuzhiyun mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1228*4882a593Smuzhiyun MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1232*4882a593Smuzhiyun handle_client_rereg_event(dev, port);
1233*4882a593Smuzhiyun break;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1236*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1237*4882a593Smuzhiyun if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1238*4882a593Smuzhiyun propagate_pkey_ev(dev, port, eqe);
1239*4882a593Smuzhiyun break;
1240*4882a593Smuzhiyun case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1241*4882a593Smuzhiyun /* paravirtualized master's guid is guid 0 -- does not change */
1242*4882a593Smuzhiyun if (!mlx4_is_master(dev->dev))
1243*4882a593Smuzhiyun mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1244*4882a593Smuzhiyun /*if master, notify relevant slaves*/
1245*4882a593Smuzhiyun else if (!dev->sriov.is_going_down) {
1246*4882a593Smuzhiyun tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1247*4882a593Smuzhiyun change_bitmap = GET_MASK_FROM_EQE(eqe);
1248*4882a593Smuzhiyun handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun break;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
1253*4882a593Smuzhiyun /* cache sl to vl mapping changes for use in
1254*4882a593Smuzhiyun * filling QP1 LRH VL field when sending packets
1255*4882a593Smuzhiyun */
1256*4882a593Smuzhiyun if (!mlx4_is_slave(dev->dev)) {
1257*4882a593Smuzhiyun union sl2vl_tbl_to_u64 sl2vl64;
1258*4882a593Smuzhiyun int jj;
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun for (jj = 0; jj < 8; jj++) {
1261*4882a593Smuzhiyun sl2vl64.sl8[jj] =
1262*4882a593Smuzhiyun eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
1263*4882a593Smuzhiyun pr_debug("port %u, sl2vl[%d] = %02x\n",
1264*4882a593Smuzhiyun port, jj, sl2vl64.sl8[jj]);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun break;
1269*4882a593Smuzhiyun default:
1270*4882a593Smuzhiyun pr_warn("Unsupported subtype 0x%x for "
1271*4882a593Smuzhiyun "Port Management Change event\n", eqe->subtype);
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun kfree(ew);
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun
mlx4_ib_dispatch_event(struct mlx4_ib_dev * dev,u8 port_num,enum ib_event_type type)1277*4882a593Smuzhiyun void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1278*4882a593Smuzhiyun enum ib_event_type type)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun struct ib_event event;
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun event.device = &dev->ib_dev;
1283*4882a593Smuzhiyun event.element.port_num = port_num;
1284*4882a593Smuzhiyun event.event = type;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun ib_dispatch_event(&event);
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun
mlx4_ib_tunnel_comp_handler(struct ib_cq * cq,void * arg)1289*4882a593Smuzhiyun static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun unsigned long flags;
1292*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1293*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1294*4882a593Smuzhiyun spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1295*4882a593Smuzhiyun if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1296*4882a593Smuzhiyun queue_work(ctx->wq, &ctx->work);
1297*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
mlx4_ib_wire_comp_handler(struct ib_cq * cq,void * arg)1300*4882a593Smuzhiyun static void mlx4_ib_wire_comp_handler(struct ib_cq *cq, void *arg)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun unsigned long flags;
1303*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1304*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1307*4882a593Smuzhiyun if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1308*4882a593Smuzhiyun queue_work(ctx->wi_wq, &ctx->work);
1309*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun
mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx * ctx,struct mlx4_ib_demux_pv_qp * tun_qp,int index)1312*4882a593Smuzhiyun static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1313*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp,
1314*4882a593Smuzhiyun int index)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun struct ib_sge sg_list;
1317*4882a593Smuzhiyun struct ib_recv_wr recv_wr;
1318*4882a593Smuzhiyun const struct ib_recv_wr *bad_recv_wr;
1319*4882a593Smuzhiyun int size;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1322*4882a593Smuzhiyun sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun sg_list.addr = tun_qp->ring[index].map;
1325*4882a593Smuzhiyun sg_list.length = size;
1326*4882a593Smuzhiyun sg_list.lkey = ctx->pd->local_dma_lkey;
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun recv_wr.next = NULL;
1329*4882a593Smuzhiyun recv_wr.sg_list = &sg_list;
1330*4882a593Smuzhiyun recv_wr.num_sge = 1;
1331*4882a593Smuzhiyun recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1332*4882a593Smuzhiyun MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1333*4882a593Smuzhiyun ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1334*4882a593Smuzhiyun size, DMA_FROM_DEVICE);
1335*4882a593Smuzhiyun return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun
mlx4_ib_multiplex_sa_handler(struct ib_device * ibdev,int port,int slave,struct ib_sa_mad * sa_mad)1338*4882a593Smuzhiyun static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1339*4882a593Smuzhiyun int slave, struct ib_sa_mad *sa_mad)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun int ret = 0;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun /* dispatch to different sa handlers */
1344*4882a593Smuzhiyun switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1345*4882a593Smuzhiyun case IB_SA_ATTR_MC_MEMBER_REC:
1346*4882a593Smuzhiyun ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1347*4882a593Smuzhiyun break;
1348*4882a593Smuzhiyun default:
1349*4882a593Smuzhiyun break;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun return ret;
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
mlx4_ib_send_to_wire(struct mlx4_ib_dev * dev,int slave,u8 port,enum ib_qp_type dest_qpt,u16 pkey_index,u32 remote_qpn,u32 qkey,struct rdma_ah_attr * attr,u8 * s_mac,u16 vlan_id,struct ib_mad * mad)1354*4882a593Smuzhiyun int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1355*4882a593Smuzhiyun enum ib_qp_type dest_qpt, u16 pkey_index,
1356*4882a593Smuzhiyun u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
1357*4882a593Smuzhiyun u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun struct ib_sge list;
1360*4882a593Smuzhiyun struct ib_ud_wr wr;
1361*4882a593Smuzhiyun const struct ib_send_wr *bad_wr;
1362*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1363*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *sqp;
1364*4882a593Smuzhiyun struct mlx4_mad_snd_buf *sqp_mad;
1365*4882a593Smuzhiyun struct ib_ah *ah;
1366*4882a593Smuzhiyun struct ib_qp *send_qp = NULL;
1367*4882a593Smuzhiyun unsigned wire_tx_ix = 0;
1368*4882a593Smuzhiyun u16 wire_pkey_ix;
1369*4882a593Smuzhiyun int src_qpnum;
1370*4882a593Smuzhiyun int ret;
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun sqp_ctx = dev->sriov.sqps[port-1];
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun /* check if proxy qp created */
1375*4882a593Smuzhiyun if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1376*4882a593Smuzhiyun return -EAGAIN;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun if (dest_qpt == IB_QPT_SMI) {
1379*4882a593Smuzhiyun src_qpnum = 0;
1380*4882a593Smuzhiyun sqp = &sqp_ctx->qp[0];
1381*4882a593Smuzhiyun wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1382*4882a593Smuzhiyun } else {
1383*4882a593Smuzhiyun src_qpnum = 1;
1384*4882a593Smuzhiyun sqp = &sqp_ctx->qp[1];
1385*4882a593Smuzhiyun wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun send_qp = sqp->qp;
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun ah = rdma_zalloc_drv_obj(sqp_ctx->pd->device, ib_ah);
1391*4882a593Smuzhiyun if (!ah)
1392*4882a593Smuzhiyun return -ENOMEM;
1393*4882a593Smuzhiyun
1394*4882a593Smuzhiyun ah->device = sqp_ctx->pd->device;
1395*4882a593Smuzhiyun ah->pd = sqp_ctx->pd;
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun /* create ah */
1398*4882a593Smuzhiyun ret = mlx4_ib_create_ah_slave(ah, attr,
1399*4882a593Smuzhiyun rdma_ah_retrieve_grh(attr)->sgid_index,
1400*4882a593Smuzhiyun s_mac, vlan_id);
1401*4882a593Smuzhiyun if (ret)
1402*4882a593Smuzhiyun goto out;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun spin_lock(&sqp->tx_lock);
1405*4882a593Smuzhiyun if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1406*4882a593Smuzhiyun (MLX4_NUM_WIRE_BUFS - 1))
1407*4882a593Smuzhiyun ret = -EAGAIN;
1408*4882a593Smuzhiyun else
1409*4882a593Smuzhiyun wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_WIRE_BUFS - 1);
1410*4882a593Smuzhiyun spin_unlock(&sqp->tx_lock);
1411*4882a593Smuzhiyun if (ret)
1412*4882a593Smuzhiyun goto out;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1415*4882a593Smuzhiyun kfree(sqp->tx_ring[wire_tx_ix].ah);
1416*4882a593Smuzhiyun sqp->tx_ring[wire_tx_ix].ah = ah;
1417*4882a593Smuzhiyun ib_dma_sync_single_for_cpu(&dev->ib_dev,
1418*4882a593Smuzhiyun sqp->tx_ring[wire_tx_ix].buf.map,
1419*4882a593Smuzhiyun sizeof (struct mlx4_mad_snd_buf),
1420*4882a593Smuzhiyun DMA_TO_DEVICE);
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun memcpy(&sqp_mad->payload, mad, sizeof *mad);
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun ib_dma_sync_single_for_device(&dev->ib_dev,
1425*4882a593Smuzhiyun sqp->tx_ring[wire_tx_ix].buf.map,
1426*4882a593Smuzhiyun sizeof (struct mlx4_mad_snd_buf),
1427*4882a593Smuzhiyun DMA_TO_DEVICE);
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1430*4882a593Smuzhiyun list.length = sizeof (struct mlx4_mad_snd_buf);
1431*4882a593Smuzhiyun list.lkey = sqp_ctx->pd->local_dma_lkey;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun wr.ah = ah;
1434*4882a593Smuzhiyun wr.port_num = port;
1435*4882a593Smuzhiyun wr.pkey_index = wire_pkey_ix;
1436*4882a593Smuzhiyun wr.remote_qkey = qkey;
1437*4882a593Smuzhiyun wr.remote_qpn = remote_qpn;
1438*4882a593Smuzhiyun wr.wr.next = NULL;
1439*4882a593Smuzhiyun wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1440*4882a593Smuzhiyun wr.wr.sg_list = &list;
1441*4882a593Smuzhiyun wr.wr.num_sge = 1;
1442*4882a593Smuzhiyun wr.wr.opcode = IB_WR_SEND;
1443*4882a593Smuzhiyun wr.wr.send_flags = IB_SEND_SIGNALED;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1446*4882a593Smuzhiyun if (!ret)
1447*4882a593Smuzhiyun return 0;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun spin_lock(&sqp->tx_lock);
1450*4882a593Smuzhiyun sqp->tx_ix_tail++;
1451*4882a593Smuzhiyun spin_unlock(&sqp->tx_lock);
1452*4882a593Smuzhiyun sqp->tx_ring[wire_tx_ix].ah = NULL;
1453*4882a593Smuzhiyun out:
1454*4882a593Smuzhiyun kfree(ah);
1455*4882a593Smuzhiyun return ret;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun
get_slave_base_gid_ix(struct mlx4_ib_dev * dev,int slave,int port)1458*4882a593Smuzhiyun static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1459*4882a593Smuzhiyun {
1460*4882a593Smuzhiyun if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1461*4882a593Smuzhiyun return slave;
1462*4882a593Smuzhiyun return mlx4_get_base_gid_ix(dev->dev, slave, port);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
fill_in_real_sgid_index(struct mlx4_ib_dev * dev,int slave,int port,struct rdma_ah_attr * ah_attr)1465*4882a593Smuzhiyun static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1466*4882a593Smuzhiyun struct rdma_ah_attr *ah_attr)
1467*4882a593Smuzhiyun {
1468*4882a593Smuzhiyun struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
1469*4882a593Smuzhiyun if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1470*4882a593Smuzhiyun grh->sgid_index = slave;
1471*4882a593Smuzhiyun else
1472*4882a593Smuzhiyun grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
1473*4882a593Smuzhiyun }
1474*4882a593Smuzhiyun
mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx * ctx,struct ib_wc * wc)1475*4882a593Smuzhiyun static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1476*4882a593Smuzhiyun {
1477*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1478*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1479*4882a593Smuzhiyun int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1480*4882a593Smuzhiyun struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1481*4882a593Smuzhiyun struct mlx4_ib_ah ah;
1482*4882a593Smuzhiyun struct rdma_ah_attr ah_attr;
1483*4882a593Smuzhiyun u8 *slave_id;
1484*4882a593Smuzhiyun int slave;
1485*4882a593Smuzhiyun int port;
1486*4882a593Smuzhiyun u16 vlan_id;
1487*4882a593Smuzhiyun u8 qos;
1488*4882a593Smuzhiyun u8 *dmac;
1489*4882a593Smuzhiyun int sts;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun /* Get slave that sent this packet */
1492*4882a593Smuzhiyun if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1493*4882a593Smuzhiyun wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1494*4882a593Smuzhiyun (wc->src_qp & 0x1) != ctx->port - 1 ||
1495*4882a593Smuzhiyun wc->src_qp & 0x4) {
1496*4882a593Smuzhiyun mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1497*4882a593Smuzhiyun return;
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1500*4882a593Smuzhiyun if (slave != ctx->slave) {
1501*4882a593Smuzhiyun mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1502*4882a593Smuzhiyun "belongs to another slave\n", wc->src_qp);
1503*4882a593Smuzhiyun return;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
1506*4882a593Smuzhiyun /* Map transaction ID */
1507*4882a593Smuzhiyun ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1508*4882a593Smuzhiyun sizeof (struct mlx4_tunnel_mad),
1509*4882a593Smuzhiyun DMA_FROM_DEVICE);
1510*4882a593Smuzhiyun switch (tunnel->mad.mad_hdr.method) {
1511*4882a593Smuzhiyun case IB_MGMT_METHOD_SET:
1512*4882a593Smuzhiyun case IB_MGMT_METHOD_GET:
1513*4882a593Smuzhiyun case IB_MGMT_METHOD_REPORT:
1514*4882a593Smuzhiyun case IB_SA_METHOD_GET_TABLE:
1515*4882a593Smuzhiyun case IB_SA_METHOD_DELETE:
1516*4882a593Smuzhiyun case IB_SA_METHOD_GET_MULTI:
1517*4882a593Smuzhiyun case IB_SA_METHOD_GET_TRACE_TBL:
1518*4882a593Smuzhiyun slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1519*4882a593Smuzhiyun if (*slave_id) {
1520*4882a593Smuzhiyun mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1521*4882a593Smuzhiyun "class:%d slave:%d\n", *slave_id,
1522*4882a593Smuzhiyun tunnel->mad.mad_hdr.mgmt_class, slave);
1523*4882a593Smuzhiyun return;
1524*4882a593Smuzhiyun } else
1525*4882a593Smuzhiyun *slave_id = slave;
1526*4882a593Smuzhiyun default:
1527*4882a593Smuzhiyun /* nothing */;
1528*4882a593Smuzhiyun }
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun /* Class-specific handling */
1531*4882a593Smuzhiyun switch (tunnel->mad.mad_hdr.mgmt_class) {
1532*4882a593Smuzhiyun case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1533*4882a593Smuzhiyun case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1534*4882a593Smuzhiyun if (slave != mlx4_master_func_num(dev->dev) &&
1535*4882a593Smuzhiyun !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1536*4882a593Smuzhiyun return;
1537*4882a593Smuzhiyun break;
1538*4882a593Smuzhiyun case IB_MGMT_CLASS_SUBN_ADM:
1539*4882a593Smuzhiyun if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1540*4882a593Smuzhiyun (struct ib_sa_mad *) &tunnel->mad))
1541*4882a593Smuzhiyun return;
1542*4882a593Smuzhiyun break;
1543*4882a593Smuzhiyun case IB_MGMT_CLASS_CM:
1544*4882a593Smuzhiyun if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1545*4882a593Smuzhiyun (struct ib_mad *) &tunnel->mad))
1546*4882a593Smuzhiyun return;
1547*4882a593Smuzhiyun break;
1548*4882a593Smuzhiyun case IB_MGMT_CLASS_DEVICE_MGMT:
1549*4882a593Smuzhiyun if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1550*4882a593Smuzhiyun tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1551*4882a593Smuzhiyun return;
1552*4882a593Smuzhiyun break;
1553*4882a593Smuzhiyun default:
1554*4882a593Smuzhiyun /* Drop unsupported classes for slaves in tunnel mode */
1555*4882a593Smuzhiyun if (slave != mlx4_master_func_num(dev->dev)) {
1556*4882a593Smuzhiyun mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1557*4882a593Smuzhiyun "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1558*4882a593Smuzhiyun return;
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* We are using standard ib_core services to send the mad, so generate a
1563*4882a593Smuzhiyun * stadard address handle by decoding the tunnelled mlx4_ah fields */
1564*4882a593Smuzhiyun memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1565*4882a593Smuzhiyun ah.ibah.device = ctx->ib_dev;
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
1568*4882a593Smuzhiyun port = mlx4_slave_convert_port(dev->dev, slave, port);
1569*4882a593Smuzhiyun if (port < 0)
1570*4882a593Smuzhiyun return;
1571*4882a593Smuzhiyun ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1572*4882a593Smuzhiyun ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1575*4882a593Smuzhiyun if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
1576*4882a593Smuzhiyun fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1577*4882a593Smuzhiyun dmac = rdma_ah_retrieve_dmac(&ah_attr);
1578*4882a593Smuzhiyun if (dmac)
1579*4882a593Smuzhiyun memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
1580*4882a593Smuzhiyun vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1581*4882a593Smuzhiyun /* if slave have default vlan use it */
1582*4882a593Smuzhiyun if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1583*4882a593Smuzhiyun &vlan_id, &qos))
1584*4882a593Smuzhiyun rdma_ah_set_sl(&ah_attr, qos);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun sts = mlx4_ib_send_to_wire(dev, slave, ctx->port,
1587*4882a593Smuzhiyun is_proxy_qp0(dev, wc->src_qp, slave) ?
1588*4882a593Smuzhiyun IB_QPT_SMI : IB_QPT_GSI,
1589*4882a593Smuzhiyun be16_to_cpu(tunnel->hdr.pkey_index),
1590*4882a593Smuzhiyun be32_to_cpu(tunnel->hdr.remote_qpn),
1591*4882a593Smuzhiyun be32_to_cpu(tunnel->hdr.qkey),
1592*4882a593Smuzhiyun &ah_attr, wc->smac, vlan_id, &tunnel->mad);
1593*4882a593Smuzhiyun if (sts)
1594*4882a593Smuzhiyun pr_debug("failed sending %s to wire on behalf of slave %d (%d)\n",
1595*4882a593Smuzhiyun is_proxy_qp0(dev, wc->src_qp, slave) ? "SMI" : "GSI",
1596*4882a593Smuzhiyun slave, sts);
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun
mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx * ctx,enum ib_qp_type qp_type,int is_tun)1599*4882a593Smuzhiyun static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1600*4882a593Smuzhiyun enum ib_qp_type qp_type, int is_tun)
1601*4882a593Smuzhiyun {
1602*4882a593Smuzhiyun int i;
1603*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp;
1604*4882a593Smuzhiyun int rx_buf_size, tx_buf_size;
1605*4882a593Smuzhiyun const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun if (qp_type > IB_QPT_GSI)
1608*4882a593Smuzhiyun return -EINVAL;
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun tun_qp = &ctx->qp[qp_type];
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun tun_qp->ring = kcalloc(nmbr_bufs,
1613*4882a593Smuzhiyun sizeof(struct mlx4_ib_buf),
1614*4882a593Smuzhiyun GFP_KERNEL);
1615*4882a593Smuzhiyun if (!tun_qp->ring)
1616*4882a593Smuzhiyun return -ENOMEM;
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun tun_qp->tx_ring = kcalloc(nmbr_bufs,
1619*4882a593Smuzhiyun sizeof (struct mlx4_ib_tun_tx_buf),
1620*4882a593Smuzhiyun GFP_KERNEL);
1621*4882a593Smuzhiyun if (!tun_qp->tx_ring) {
1622*4882a593Smuzhiyun kfree(tun_qp->ring);
1623*4882a593Smuzhiyun tun_qp->ring = NULL;
1624*4882a593Smuzhiyun return -ENOMEM;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun if (is_tun) {
1628*4882a593Smuzhiyun rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1629*4882a593Smuzhiyun tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1630*4882a593Smuzhiyun } else {
1631*4882a593Smuzhiyun rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1632*4882a593Smuzhiyun tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun
1635*4882a593Smuzhiyun for (i = 0; i < nmbr_bufs; i++) {
1636*4882a593Smuzhiyun tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1637*4882a593Smuzhiyun if (!tun_qp->ring[i].addr)
1638*4882a593Smuzhiyun goto err;
1639*4882a593Smuzhiyun tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1640*4882a593Smuzhiyun tun_qp->ring[i].addr,
1641*4882a593Smuzhiyun rx_buf_size,
1642*4882a593Smuzhiyun DMA_FROM_DEVICE);
1643*4882a593Smuzhiyun if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1644*4882a593Smuzhiyun kfree(tun_qp->ring[i].addr);
1645*4882a593Smuzhiyun goto err;
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun }
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun for (i = 0; i < nmbr_bufs; i++) {
1650*4882a593Smuzhiyun tun_qp->tx_ring[i].buf.addr =
1651*4882a593Smuzhiyun kmalloc(tx_buf_size, GFP_KERNEL);
1652*4882a593Smuzhiyun if (!tun_qp->tx_ring[i].buf.addr)
1653*4882a593Smuzhiyun goto tx_err;
1654*4882a593Smuzhiyun tun_qp->tx_ring[i].buf.map =
1655*4882a593Smuzhiyun ib_dma_map_single(ctx->ib_dev,
1656*4882a593Smuzhiyun tun_qp->tx_ring[i].buf.addr,
1657*4882a593Smuzhiyun tx_buf_size,
1658*4882a593Smuzhiyun DMA_TO_DEVICE);
1659*4882a593Smuzhiyun if (ib_dma_mapping_error(ctx->ib_dev,
1660*4882a593Smuzhiyun tun_qp->tx_ring[i].buf.map)) {
1661*4882a593Smuzhiyun kfree(tun_qp->tx_ring[i].buf.addr);
1662*4882a593Smuzhiyun goto tx_err;
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun tun_qp->tx_ring[i].ah = NULL;
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun spin_lock_init(&tun_qp->tx_lock);
1667*4882a593Smuzhiyun tun_qp->tx_ix_head = 0;
1668*4882a593Smuzhiyun tun_qp->tx_ix_tail = 0;
1669*4882a593Smuzhiyun tun_qp->proxy_qpt = qp_type;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun return 0;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun tx_err:
1674*4882a593Smuzhiyun while (i > 0) {
1675*4882a593Smuzhiyun --i;
1676*4882a593Smuzhiyun ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1677*4882a593Smuzhiyun tx_buf_size, DMA_TO_DEVICE);
1678*4882a593Smuzhiyun kfree(tun_qp->tx_ring[i].buf.addr);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun i = nmbr_bufs;
1681*4882a593Smuzhiyun err:
1682*4882a593Smuzhiyun while (i > 0) {
1683*4882a593Smuzhiyun --i;
1684*4882a593Smuzhiyun ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1685*4882a593Smuzhiyun rx_buf_size, DMA_FROM_DEVICE);
1686*4882a593Smuzhiyun kfree(tun_qp->ring[i].addr);
1687*4882a593Smuzhiyun }
1688*4882a593Smuzhiyun kfree(tun_qp->tx_ring);
1689*4882a593Smuzhiyun tun_qp->tx_ring = NULL;
1690*4882a593Smuzhiyun kfree(tun_qp->ring);
1691*4882a593Smuzhiyun tun_qp->ring = NULL;
1692*4882a593Smuzhiyun return -ENOMEM;
1693*4882a593Smuzhiyun }
1694*4882a593Smuzhiyun
mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx * ctx,enum ib_qp_type qp_type,int is_tun)1695*4882a593Smuzhiyun static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1696*4882a593Smuzhiyun enum ib_qp_type qp_type, int is_tun)
1697*4882a593Smuzhiyun {
1698*4882a593Smuzhiyun int i;
1699*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp;
1700*4882a593Smuzhiyun int rx_buf_size, tx_buf_size;
1701*4882a593Smuzhiyun const int nmbr_bufs = is_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun if (qp_type > IB_QPT_GSI)
1704*4882a593Smuzhiyun return;
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun tun_qp = &ctx->qp[qp_type];
1707*4882a593Smuzhiyun if (is_tun) {
1708*4882a593Smuzhiyun rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1709*4882a593Smuzhiyun tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1710*4882a593Smuzhiyun } else {
1711*4882a593Smuzhiyun rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1712*4882a593Smuzhiyun tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun for (i = 0; i < nmbr_bufs; i++) {
1717*4882a593Smuzhiyun ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1718*4882a593Smuzhiyun rx_buf_size, DMA_FROM_DEVICE);
1719*4882a593Smuzhiyun kfree(tun_qp->ring[i].addr);
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun for (i = 0; i < nmbr_bufs; i++) {
1723*4882a593Smuzhiyun ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1724*4882a593Smuzhiyun tx_buf_size, DMA_TO_DEVICE);
1725*4882a593Smuzhiyun kfree(tun_qp->tx_ring[i].buf.addr);
1726*4882a593Smuzhiyun if (tun_qp->tx_ring[i].ah)
1727*4882a593Smuzhiyun rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
1728*4882a593Smuzhiyun }
1729*4882a593Smuzhiyun kfree(tun_qp->tx_ring);
1730*4882a593Smuzhiyun kfree(tun_qp->ring);
1731*4882a593Smuzhiyun }
1732*4882a593Smuzhiyun
mlx4_ib_tunnel_comp_worker(struct work_struct * work)1733*4882a593Smuzhiyun static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1734*4882a593Smuzhiyun {
1735*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *ctx;
1736*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp;
1737*4882a593Smuzhiyun struct ib_wc wc;
1738*4882a593Smuzhiyun int ret;
1739*4882a593Smuzhiyun ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1740*4882a593Smuzhiyun ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1741*4882a593Smuzhiyun
1742*4882a593Smuzhiyun while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1743*4882a593Smuzhiyun tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1744*4882a593Smuzhiyun if (wc.status == IB_WC_SUCCESS) {
1745*4882a593Smuzhiyun switch (wc.opcode) {
1746*4882a593Smuzhiyun case IB_WC_RECV:
1747*4882a593Smuzhiyun mlx4_ib_multiplex_mad(ctx, &wc);
1748*4882a593Smuzhiyun ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1749*4882a593Smuzhiyun wc.wr_id &
1750*4882a593Smuzhiyun (MLX4_NUM_TUNNEL_BUFS - 1));
1751*4882a593Smuzhiyun if (ret)
1752*4882a593Smuzhiyun pr_err("Failed reposting tunnel "
1753*4882a593Smuzhiyun "buf:%lld\n", wc.wr_id);
1754*4882a593Smuzhiyun break;
1755*4882a593Smuzhiyun case IB_WC_SEND:
1756*4882a593Smuzhiyun rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1757*4882a593Smuzhiyun (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1758*4882a593Smuzhiyun tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1759*4882a593Smuzhiyun = NULL;
1760*4882a593Smuzhiyun spin_lock(&tun_qp->tx_lock);
1761*4882a593Smuzhiyun tun_qp->tx_ix_tail++;
1762*4882a593Smuzhiyun spin_unlock(&tun_qp->tx_lock);
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun break;
1765*4882a593Smuzhiyun default:
1766*4882a593Smuzhiyun break;
1767*4882a593Smuzhiyun }
1768*4882a593Smuzhiyun } else {
1769*4882a593Smuzhiyun pr_debug("mlx4_ib: completion error in tunnel: %d."
1770*4882a593Smuzhiyun " status = %d, wrid = 0x%llx\n",
1771*4882a593Smuzhiyun ctx->slave, wc.status, wc.wr_id);
1772*4882a593Smuzhiyun if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1773*4882a593Smuzhiyun rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1774*4882a593Smuzhiyun (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0);
1775*4882a593Smuzhiyun tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1776*4882a593Smuzhiyun = NULL;
1777*4882a593Smuzhiyun spin_lock(&tun_qp->tx_lock);
1778*4882a593Smuzhiyun tun_qp->tx_ix_tail++;
1779*4882a593Smuzhiyun spin_unlock(&tun_qp->tx_lock);
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun }
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun
pv_qp_event_handler(struct ib_event * event,void * qp_context)1785*4882a593Smuzhiyun static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1786*4882a593Smuzhiyun {
1787*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun /* It's worse than that! He's dead, Jim! */
1790*4882a593Smuzhiyun pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1791*4882a593Smuzhiyun event->event, sqp->port);
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun
create_pv_sqp(struct mlx4_ib_demux_pv_ctx * ctx,enum ib_qp_type qp_type,int create_tun)1794*4882a593Smuzhiyun static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1795*4882a593Smuzhiyun enum ib_qp_type qp_type, int create_tun)
1796*4882a593Smuzhiyun {
1797*4882a593Smuzhiyun int i, ret;
1798*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *tun_qp;
1799*4882a593Smuzhiyun struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1800*4882a593Smuzhiyun struct ib_qp_attr attr;
1801*4882a593Smuzhiyun int qp_attr_mask_INIT;
1802*4882a593Smuzhiyun const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun if (qp_type > IB_QPT_GSI)
1805*4882a593Smuzhiyun return -EINVAL;
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun tun_qp = &ctx->qp[qp_type];
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun memset(&qp_init_attr, 0, sizeof qp_init_attr);
1810*4882a593Smuzhiyun qp_init_attr.init_attr.send_cq = ctx->cq;
1811*4882a593Smuzhiyun qp_init_attr.init_attr.recv_cq = ctx->cq;
1812*4882a593Smuzhiyun qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1813*4882a593Smuzhiyun qp_init_attr.init_attr.cap.max_send_wr = nmbr_bufs;
1814*4882a593Smuzhiyun qp_init_attr.init_attr.cap.max_recv_wr = nmbr_bufs;
1815*4882a593Smuzhiyun qp_init_attr.init_attr.cap.max_send_sge = 1;
1816*4882a593Smuzhiyun qp_init_attr.init_attr.cap.max_recv_sge = 1;
1817*4882a593Smuzhiyun if (create_tun) {
1818*4882a593Smuzhiyun qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1819*4882a593Smuzhiyun qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1820*4882a593Smuzhiyun qp_init_attr.port = ctx->port;
1821*4882a593Smuzhiyun qp_init_attr.slave = ctx->slave;
1822*4882a593Smuzhiyun qp_init_attr.proxy_qp_type = qp_type;
1823*4882a593Smuzhiyun qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1824*4882a593Smuzhiyun IB_QP_QKEY | IB_QP_PORT;
1825*4882a593Smuzhiyun } else {
1826*4882a593Smuzhiyun qp_init_attr.init_attr.qp_type = qp_type;
1827*4882a593Smuzhiyun qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1828*4882a593Smuzhiyun qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun qp_init_attr.init_attr.port_num = ctx->port;
1831*4882a593Smuzhiyun qp_init_attr.init_attr.qp_context = ctx;
1832*4882a593Smuzhiyun qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1833*4882a593Smuzhiyun tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1834*4882a593Smuzhiyun if (IS_ERR(tun_qp->qp)) {
1835*4882a593Smuzhiyun ret = PTR_ERR(tun_qp->qp);
1836*4882a593Smuzhiyun tun_qp->qp = NULL;
1837*4882a593Smuzhiyun pr_err("Couldn't create %s QP (%d)\n",
1838*4882a593Smuzhiyun create_tun ? "tunnel" : "special", ret);
1839*4882a593Smuzhiyun return ret;
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun
1842*4882a593Smuzhiyun memset(&attr, 0, sizeof attr);
1843*4882a593Smuzhiyun attr.qp_state = IB_QPS_INIT;
1844*4882a593Smuzhiyun ret = 0;
1845*4882a593Smuzhiyun if (create_tun)
1846*4882a593Smuzhiyun ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1847*4882a593Smuzhiyun ctx->port, IB_DEFAULT_PKEY_FULL,
1848*4882a593Smuzhiyun &attr.pkey_index);
1849*4882a593Smuzhiyun if (ret || !create_tun)
1850*4882a593Smuzhiyun attr.pkey_index =
1851*4882a593Smuzhiyun to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1852*4882a593Smuzhiyun attr.qkey = IB_QP1_QKEY;
1853*4882a593Smuzhiyun attr.port_num = ctx->port;
1854*4882a593Smuzhiyun ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1855*4882a593Smuzhiyun if (ret) {
1856*4882a593Smuzhiyun pr_err("Couldn't change %s qp state to INIT (%d)\n",
1857*4882a593Smuzhiyun create_tun ? "tunnel" : "special", ret);
1858*4882a593Smuzhiyun goto err_qp;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun attr.qp_state = IB_QPS_RTR;
1861*4882a593Smuzhiyun ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1862*4882a593Smuzhiyun if (ret) {
1863*4882a593Smuzhiyun pr_err("Couldn't change %s qp state to RTR (%d)\n",
1864*4882a593Smuzhiyun create_tun ? "tunnel" : "special", ret);
1865*4882a593Smuzhiyun goto err_qp;
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun attr.qp_state = IB_QPS_RTS;
1868*4882a593Smuzhiyun attr.sq_psn = 0;
1869*4882a593Smuzhiyun ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1870*4882a593Smuzhiyun if (ret) {
1871*4882a593Smuzhiyun pr_err("Couldn't change %s qp state to RTS (%d)\n",
1872*4882a593Smuzhiyun create_tun ? "tunnel" : "special", ret);
1873*4882a593Smuzhiyun goto err_qp;
1874*4882a593Smuzhiyun }
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun for (i = 0; i < nmbr_bufs; i++) {
1877*4882a593Smuzhiyun ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1878*4882a593Smuzhiyun if (ret) {
1879*4882a593Smuzhiyun pr_err(" mlx4_ib_post_pv_buf error"
1880*4882a593Smuzhiyun " (err = %d, i = %d)\n", ret, i);
1881*4882a593Smuzhiyun goto err_qp;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun }
1884*4882a593Smuzhiyun return 0;
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun err_qp:
1887*4882a593Smuzhiyun ib_destroy_qp(tun_qp->qp);
1888*4882a593Smuzhiyun tun_qp->qp = NULL;
1889*4882a593Smuzhiyun return ret;
1890*4882a593Smuzhiyun }
1891*4882a593Smuzhiyun
1892*4882a593Smuzhiyun /*
1893*4882a593Smuzhiyun * IB MAD completion callback for real SQPs
1894*4882a593Smuzhiyun */
mlx4_ib_sqp_comp_worker(struct work_struct * work)1895*4882a593Smuzhiyun static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1896*4882a593Smuzhiyun {
1897*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *ctx;
1898*4882a593Smuzhiyun struct mlx4_ib_demux_pv_qp *sqp;
1899*4882a593Smuzhiyun struct ib_wc wc;
1900*4882a593Smuzhiyun struct ib_grh *grh;
1901*4882a593Smuzhiyun struct ib_mad *mad;
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1904*4882a593Smuzhiyun ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1905*4882a593Smuzhiyun
1906*4882a593Smuzhiyun while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1907*4882a593Smuzhiyun sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1908*4882a593Smuzhiyun if (wc.status == IB_WC_SUCCESS) {
1909*4882a593Smuzhiyun switch (wc.opcode) {
1910*4882a593Smuzhiyun case IB_WC_SEND:
1911*4882a593Smuzhiyun kfree(sqp->tx_ring[wc.wr_id &
1912*4882a593Smuzhiyun (MLX4_NUM_WIRE_BUFS - 1)].ah);
1913*4882a593Smuzhiyun sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
1914*4882a593Smuzhiyun = NULL;
1915*4882a593Smuzhiyun spin_lock(&sqp->tx_lock);
1916*4882a593Smuzhiyun sqp->tx_ix_tail++;
1917*4882a593Smuzhiyun spin_unlock(&sqp->tx_lock);
1918*4882a593Smuzhiyun break;
1919*4882a593Smuzhiyun case IB_WC_RECV:
1920*4882a593Smuzhiyun mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1921*4882a593Smuzhiyun (sqp->ring[wc.wr_id &
1922*4882a593Smuzhiyun (MLX4_NUM_WIRE_BUFS - 1)].addr))->payload);
1923*4882a593Smuzhiyun grh = &(((struct mlx4_mad_rcv_buf *)
1924*4882a593Smuzhiyun (sqp->ring[wc.wr_id &
1925*4882a593Smuzhiyun (MLX4_NUM_WIRE_BUFS - 1)].addr))->grh);
1926*4882a593Smuzhiyun mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1927*4882a593Smuzhiyun if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1928*4882a593Smuzhiyun (MLX4_NUM_WIRE_BUFS - 1)))
1929*4882a593Smuzhiyun pr_err("Failed reposting SQP "
1930*4882a593Smuzhiyun "buf:%lld\n", wc.wr_id);
1931*4882a593Smuzhiyun break;
1932*4882a593Smuzhiyun default:
1933*4882a593Smuzhiyun break;
1934*4882a593Smuzhiyun }
1935*4882a593Smuzhiyun } else {
1936*4882a593Smuzhiyun pr_debug("mlx4_ib: completion error in tunnel: %d."
1937*4882a593Smuzhiyun " status = %d, wrid = 0x%llx\n",
1938*4882a593Smuzhiyun ctx->slave, wc.status, wc.wr_id);
1939*4882a593Smuzhiyun if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1940*4882a593Smuzhiyun kfree(sqp->tx_ring[wc.wr_id &
1941*4882a593Smuzhiyun (MLX4_NUM_WIRE_BUFS - 1)].ah);
1942*4882a593Smuzhiyun sqp->tx_ring[wc.wr_id & (MLX4_NUM_WIRE_BUFS - 1)].ah
1943*4882a593Smuzhiyun = NULL;
1944*4882a593Smuzhiyun spin_lock(&sqp->tx_lock);
1945*4882a593Smuzhiyun sqp->tx_ix_tail++;
1946*4882a593Smuzhiyun spin_unlock(&sqp->tx_lock);
1947*4882a593Smuzhiyun }
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun }
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun
alloc_pv_object(struct mlx4_ib_dev * dev,int slave,int port,struct mlx4_ib_demux_pv_ctx ** ret_ctx)1952*4882a593Smuzhiyun static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1953*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx **ret_ctx)
1954*4882a593Smuzhiyun {
1955*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *ctx;
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun *ret_ctx = NULL;
1958*4882a593Smuzhiyun ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1959*4882a593Smuzhiyun if (!ctx)
1960*4882a593Smuzhiyun return -ENOMEM;
1961*4882a593Smuzhiyun
1962*4882a593Smuzhiyun ctx->ib_dev = &dev->ib_dev;
1963*4882a593Smuzhiyun ctx->port = port;
1964*4882a593Smuzhiyun ctx->slave = slave;
1965*4882a593Smuzhiyun *ret_ctx = ctx;
1966*4882a593Smuzhiyun return 0;
1967*4882a593Smuzhiyun }
1968*4882a593Smuzhiyun
free_pv_object(struct mlx4_ib_dev * dev,int slave,int port)1969*4882a593Smuzhiyun static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun if (dev->sriov.demux[port - 1].tun[slave]) {
1972*4882a593Smuzhiyun kfree(dev->sriov.demux[port - 1].tun[slave]);
1973*4882a593Smuzhiyun dev->sriov.demux[port - 1].tun[slave] = NULL;
1974*4882a593Smuzhiyun }
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun
create_pv_resources(struct ib_device * ibdev,int slave,int port,int create_tun,struct mlx4_ib_demux_pv_ctx * ctx)1977*4882a593Smuzhiyun static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1978*4882a593Smuzhiyun int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1979*4882a593Smuzhiyun {
1980*4882a593Smuzhiyun int ret, cq_size;
1981*4882a593Smuzhiyun struct ib_cq_init_attr cq_attr = {};
1982*4882a593Smuzhiyun const int nmbr_bufs = create_tun ? MLX4_NUM_TUNNEL_BUFS : MLX4_NUM_WIRE_BUFS;
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun if (ctx->state != DEMUX_PV_STATE_DOWN)
1985*4882a593Smuzhiyun return -EEXIST;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun ctx->state = DEMUX_PV_STATE_STARTING;
1988*4882a593Smuzhiyun /* have QP0 only if link layer is IB */
1989*4882a593Smuzhiyun if (rdma_port_get_link_layer(ibdev, ctx->port) ==
1990*4882a593Smuzhiyun IB_LINK_LAYER_INFINIBAND)
1991*4882a593Smuzhiyun ctx->has_smi = 1;
1992*4882a593Smuzhiyun
1993*4882a593Smuzhiyun if (ctx->has_smi) {
1994*4882a593Smuzhiyun ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1995*4882a593Smuzhiyun if (ret) {
1996*4882a593Smuzhiyun pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
1997*4882a593Smuzhiyun goto err_out;
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
2002*4882a593Smuzhiyun if (ret) {
2003*4882a593Smuzhiyun pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
2004*4882a593Smuzhiyun goto err_out_qp0;
2005*4882a593Smuzhiyun }
2006*4882a593Smuzhiyun
2007*4882a593Smuzhiyun cq_size = 2 * nmbr_bufs;
2008*4882a593Smuzhiyun if (ctx->has_smi)
2009*4882a593Smuzhiyun cq_size *= 2;
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun cq_attr.cqe = cq_size;
2012*4882a593Smuzhiyun ctx->cq = ib_create_cq(ctx->ib_dev,
2013*4882a593Smuzhiyun create_tun ? mlx4_ib_tunnel_comp_handler : mlx4_ib_wire_comp_handler,
2014*4882a593Smuzhiyun NULL, ctx, &cq_attr);
2015*4882a593Smuzhiyun if (IS_ERR(ctx->cq)) {
2016*4882a593Smuzhiyun ret = PTR_ERR(ctx->cq);
2017*4882a593Smuzhiyun pr_err("Couldn't create tunnel CQ (%d)\n", ret);
2018*4882a593Smuzhiyun goto err_buf;
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
2022*4882a593Smuzhiyun if (IS_ERR(ctx->pd)) {
2023*4882a593Smuzhiyun ret = PTR_ERR(ctx->pd);
2024*4882a593Smuzhiyun pr_err("Couldn't create tunnel PD (%d)\n", ret);
2025*4882a593Smuzhiyun goto err_cq;
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
2028*4882a593Smuzhiyun if (ctx->has_smi) {
2029*4882a593Smuzhiyun ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
2030*4882a593Smuzhiyun if (ret) {
2031*4882a593Smuzhiyun pr_err("Couldn't create %s QP0 (%d)\n",
2032*4882a593Smuzhiyun create_tun ? "tunnel for" : "", ret);
2033*4882a593Smuzhiyun goto err_pd;
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun }
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
2038*4882a593Smuzhiyun if (ret) {
2039*4882a593Smuzhiyun pr_err("Couldn't create %s QP1 (%d)\n",
2040*4882a593Smuzhiyun create_tun ? "tunnel for" : "", ret);
2041*4882a593Smuzhiyun goto err_qp0;
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun if (create_tun)
2045*4882a593Smuzhiyun INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2046*4882a593Smuzhiyun else
2047*4882a593Smuzhiyun INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2050*4882a593Smuzhiyun ctx->wi_wq = to_mdev(ibdev)->sriov.demux[port - 1].wi_wq;
2051*4882a593Smuzhiyun
2052*4882a593Smuzhiyun ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2053*4882a593Smuzhiyun if (ret) {
2054*4882a593Smuzhiyun pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2055*4882a593Smuzhiyun goto err_wq;
2056*4882a593Smuzhiyun }
2057*4882a593Smuzhiyun ctx->state = DEMUX_PV_STATE_ACTIVE;
2058*4882a593Smuzhiyun return 0;
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun err_wq:
2061*4882a593Smuzhiyun ctx->wq = NULL;
2062*4882a593Smuzhiyun ib_destroy_qp(ctx->qp[1].qp);
2063*4882a593Smuzhiyun ctx->qp[1].qp = NULL;
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun
2066*4882a593Smuzhiyun err_qp0:
2067*4882a593Smuzhiyun if (ctx->has_smi)
2068*4882a593Smuzhiyun ib_destroy_qp(ctx->qp[0].qp);
2069*4882a593Smuzhiyun ctx->qp[0].qp = NULL;
2070*4882a593Smuzhiyun
2071*4882a593Smuzhiyun err_pd:
2072*4882a593Smuzhiyun ib_dealloc_pd(ctx->pd);
2073*4882a593Smuzhiyun ctx->pd = NULL;
2074*4882a593Smuzhiyun
2075*4882a593Smuzhiyun err_cq:
2076*4882a593Smuzhiyun ib_destroy_cq(ctx->cq);
2077*4882a593Smuzhiyun ctx->cq = NULL;
2078*4882a593Smuzhiyun
2079*4882a593Smuzhiyun err_buf:
2080*4882a593Smuzhiyun mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2081*4882a593Smuzhiyun
2082*4882a593Smuzhiyun err_out_qp0:
2083*4882a593Smuzhiyun if (ctx->has_smi)
2084*4882a593Smuzhiyun mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2085*4882a593Smuzhiyun err_out:
2086*4882a593Smuzhiyun ctx->state = DEMUX_PV_STATE_DOWN;
2087*4882a593Smuzhiyun return ret;
2088*4882a593Smuzhiyun }
2089*4882a593Smuzhiyun
destroy_pv_resources(struct mlx4_ib_dev * dev,int slave,int port,struct mlx4_ib_demux_pv_ctx * ctx,int flush)2090*4882a593Smuzhiyun static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2091*4882a593Smuzhiyun struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2092*4882a593Smuzhiyun {
2093*4882a593Smuzhiyun if (!ctx)
2094*4882a593Smuzhiyun return;
2095*4882a593Smuzhiyun if (ctx->state > DEMUX_PV_STATE_DOWN) {
2096*4882a593Smuzhiyun ctx->state = DEMUX_PV_STATE_DOWNING;
2097*4882a593Smuzhiyun if (flush)
2098*4882a593Smuzhiyun flush_workqueue(ctx->wq);
2099*4882a593Smuzhiyun if (ctx->has_smi) {
2100*4882a593Smuzhiyun ib_destroy_qp(ctx->qp[0].qp);
2101*4882a593Smuzhiyun ctx->qp[0].qp = NULL;
2102*4882a593Smuzhiyun mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2103*4882a593Smuzhiyun }
2104*4882a593Smuzhiyun ib_destroy_qp(ctx->qp[1].qp);
2105*4882a593Smuzhiyun ctx->qp[1].qp = NULL;
2106*4882a593Smuzhiyun mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2107*4882a593Smuzhiyun ib_dealloc_pd(ctx->pd);
2108*4882a593Smuzhiyun ctx->pd = NULL;
2109*4882a593Smuzhiyun ib_destroy_cq(ctx->cq);
2110*4882a593Smuzhiyun ctx->cq = NULL;
2111*4882a593Smuzhiyun ctx->state = DEMUX_PV_STATE_DOWN;
2112*4882a593Smuzhiyun }
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun
mlx4_ib_tunnels_update(struct mlx4_ib_dev * dev,int slave,int port,int do_init)2115*4882a593Smuzhiyun static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2116*4882a593Smuzhiyun int port, int do_init)
2117*4882a593Smuzhiyun {
2118*4882a593Smuzhiyun int ret = 0;
2119*4882a593Smuzhiyun
2120*4882a593Smuzhiyun if (!do_init) {
2121*4882a593Smuzhiyun clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2122*4882a593Smuzhiyun /* for master, destroy real sqp resources */
2123*4882a593Smuzhiyun if (slave == mlx4_master_func_num(dev->dev))
2124*4882a593Smuzhiyun destroy_pv_resources(dev, slave, port,
2125*4882a593Smuzhiyun dev->sriov.sqps[port - 1], 1);
2126*4882a593Smuzhiyun /* destroy the tunnel qp resources */
2127*4882a593Smuzhiyun destroy_pv_resources(dev, slave, port,
2128*4882a593Smuzhiyun dev->sriov.demux[port - 1].tun[slave], 1);
2129*4882a593Smuzhiyun return 0;
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun /* create the tunnel qp resources */
2133*4882a593Smuzhiyun ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2134*4882a593Smuzhiyun dev->sriov.demux[port - 1].tun[slave]);
2135*4882a593Smuzhiyun
2136*4882a593Smuzhiyun /* for master, create the real sqp resources */
2137*4882a593Smuzhiyun if (!ret && slave == mlx4_master_func_num(dev->dev))
2138*4882a593Smuzhiyun ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2139*4882a593Smuzhiyun dev->sriov.sqps[port - 1]);
2140*4882a593Smuzhiyun return ret;
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun
mlx4_ib_tunnels_update_work(struct work_struct * work)2143*4882a593Smuzhiyun void mlx4_ib_tunnels_update_work(struct work_struct *work)
2144*4882a593Smuzhiyun {
2145*4882a593Smuzhiyun struct mlx4_ib_demux_work *dmxw;
2146*4882a593Smuzhiyun
2147*4882a593Smuzhiyun dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2148*4882a593Smuzhiyun mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2149*4882a593Smuzhiyun dmxw->do_init);
2150*4882a593Smuzhiyun kfree(dmxw);
2151*4882a593Smuzhiyun return;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun
mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev * dev,struct mlx4_ib_demux_ctx * ctx,int port)2154*4882a593Smuzhiyun static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2155*4882a593Smuzhiyun struct mlx4_ib_demux_ctx *ctx,
2156*4882a593Smuzhiyun int port)
2157*4882a593Smuzhiyun {
2158*4882a593Smuzhiyun char name[12];
2159*4882a593Smuzhiyun int ret = 0;
2160*4882a593Smuzhiyun int i;
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2163*4882a593Smuzhiyun sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2164*4882a593Smuzhiyun if (!ctx->tun)
2165*4882a593Smuzhiyun return -ENOMEM;
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun ctx->dev = dev;
2168*4882a593Smuzhiyun ctx->port = port;
2169*4882a593Smuzhiyun ctx->ib_dev = &dev->ib_dev;
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun for (i = 0;
2172*4882a593Smuzhiyun i < min(dev->dev->caps.sqp_demux,
2173*4882a593Smuzhiyun (u16)(dev->dev->persist->num_vfs + 1));
2174*4882a593Smuzhiyun i++) {
2175*4882a593Smuzhiyun struct mlx4_active_ports actv_ports =
2176*4882a593Smuzhiyun mlx4_get_active_ports(dev->dev, i);
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun if (!test_bit(port - 1, actv_ports.ports))
2179*4882a593Smuzhiyun continue;
2180*4882a593Smuzhiyun
2181*4882a593Smuzhiyun ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2182*4882a593Smuzhiyun if (ret) {
2183*4882a593Smuzhiyun ret = -ENOMEM;
2184*4882a593Smuzhiyun goto err_mcg;
2185*4882a593Smuzhiyun }
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun ret = mlx4_ib_mcg_port_init(ctx);
2189*4882a593Smuzhiyun if (ret) {
2190*4882a593Smuzhiyun pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2191*4882a593Smuzhiyun goto err_mcg;
2192*4882a593Smuzhiyun }
2193*4882a593Smuzhiyun
2194*4882a593Smuzhiyun snprintf(name, sizeof(name), "mlx4_ibt%d", port);
2195*4882a593Smuzhiyun ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2196*4882a593Smuzhiyun if (!ctx->wq) {
2197*4882a593Smuzhiyun pr_err("Failed to create tunnelling WQ for port %d\n", port);
2198*4882a593Smuzhiyun ret = -ENOMEM;
2199*4882a593Smuzhiyun goto err_wq;
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun snprintf(name, sizeof(name), "mlx4_ibwi%d", port);
2203*4882a593Smuzhiyun ctx->wi_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2204*4882a593Smuzhiyun if (!ctx->wi_wq) {
2205*4882a593Smuzhiyun pr_err("Failed to create wire WQ for port %d\n", port);
2206*4882a593Smuzhiyun ret = -ENOMEM;
2207*4882a593Smuzhiyun goto err_wiwq;
2208*4882a593Smuzhiyun }
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun snprintf(name, sizeof(name), "mlx4_ibud%d", port);
2211*4882a593Smuzhiyun ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2212*4882a593Smuzhiyun if (!ctx->ud_wq) {
2213*4882a593Smuzhiyun pr_err("Failed to create up/down WQ for port %d\n", port);
2214*4882a593Smuzhiyun ret = -ENOMEM;
2215*4882a593Smuzhiyun goto err_udwq;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun return 0;
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun err_udwq:
2221*4882a593Smuzhiyun destroy_workqueue(ctx->wi_wq);
2222*4882a593Smuzhiyun ctx->wi_wq = NULL;
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun err_wiwq:
2225*4882a593Smuzhiyun destroy_workqueue(ctx->wq);
2226*4882a593Smuzhiyun ctx->wq = NULL;
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun err_wq:
2229*4882a593Smuzhiyun mlx4_ib_mcg_port_cleanup(ctx, 1);
2230*4882a593Smuzhiyun err_mcg:
2231*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2232*4882a593Smuzhiyun free_pv_object(dev, i, port);
2233*4882a593Smuzhiyun kfree(ctx->tun);
2234*4882a593Smuzhiyun ctx->tun = NULL;
2235*4882a593Smuzhiyun return ret;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun
mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx * sqp_ctx)2238*4882a593Smuzhiyun static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2241*4882a593Smuzhiyun sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2242*4882a593Smuzhiyun flush_workqueue(sqp_ctx->wq);
2243*4882a593Smuzhiyun if (sqp_ctx->has_smi) {
2244*4882a593Smuzhiyun ib_destroy_qp(sqp_ctx->qp[0].qp);
2245*4882a593Smuzhiyun sqp_ctx->qp[0].qp = NULL;
2246*4882a593Smuzhiyun mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2247*4882a593Smuzhiyun }
2248*4882a593Smuzhiyun ib_destroy_qp(sqp_ctx->qp[1].qp);
2249*4882a593Smuzhiyun sqp_ctx->qp[1].qp = NULL;
2250*4882a593Smuzhiyun mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2251*4882a593Smuzhiyun ib_dealloc_pd(sqp_ctx->pd);
2252*4882a593Smuzhiyun sqp_ctx->pd = NULL;
2253*4882a593Smuzhiyun ib_destroy_cq(sqp_ctx->cq);
2254*4882a593Smuzhiyun sqp_ctx->cq = NULL;
2255*4882a593Smuzhiyun sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun }
2258*4882a593Smuzhiyun
mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx * ctx)2259*4882a593Smuzhiyun static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun int i;
2262*4882a593Smuzhiyun if (ctx) {
2263*4882a593Smuzhiyun struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2264*4882a593Smuzhiyun mlx4_ib_mcg_port_cleanup(ctx, 1);
2265*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2266*4882a593Smuzhiyun if (!ctx->tun[i])
2267*4882a593Smuzhiyun continue;
2268*4882a593Smuzhiyun if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2269*4882a593Smuzhiyun ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun flush_workqueue(ctx->wq);
2272*4882a593Smuzhiyun flush_workqueue(ctx->wi_wq);
2273*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2274*4882a593Smuzhiyun destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2275*4882a593Smuzhiyun free_pv_object(dev, i, ctx->port);
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun kfree(ctx->tun);
2278*4882a593Smuzhiyun destroy_workqueue(ctx->ud_wq);
2279*4882a593Smuzhiyun destroy_workqueue(ctx->wi_wq);
2280*4882a593Smuzhiyun destroy_workqueue(ctx->wq);
2281*4882a593Smuzhiyun }
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun
mlx4_ib_master_tunnels(struct mlx4_ib_dev * dev,int do_init)2284*4882a593Smuzhiyun static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2285*4882a593Smuzhiyun {
2286*4882a593Smuzhiyun int i;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun if (!mlx4_is_master(dev->dev))
2289*4882a593Smuzhiyun return;
2290*4882a593Smuzhiyun /* initialize or tear down tunnel QPs for the master */
2291*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.num_ports; i++)
2292*4882a593Smuzhiyun mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2293*4882a593Smuzhiyun return;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun
mlx4_ib_init_sriov(struct mlx4_ib_dev * dev)2296*4882a593Smuzhiyun int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun int i = 0;
2299*4882a593Smuzhiyun int err;
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun if (!mlx4_is_mfunc(dev->dev))
2302*4882a593Smuzhiyun return 0;
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun dev->sriov.is_going_down = 0;
2305*4882a593Smuzhiyun spin_lock_init(&dev->sriov.going_down_lock);
2306*4882a593Smuzhiyun mlx4_ib_cm_paravirt_init(dev);
2307*4882a593Smuzhiyun
2308*4882a593Smuzhiyun mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2309*4882a593Smuzhiyun
2310*4882a593Smuzhiyun if (mlx4_is_slave(dev->dev)) {
2311*4882a593Smuzhiyun mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2312*4882a593Smuzhiyun return 0;
2313*4882a593Smuzhiyun }
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2316*4882a593Smuzhiyun if (i == mlx4_master_func_num(dev->dev))
2317*4882a593Smuzhiyun mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2318*4882a593Smuzhiyun else
2319*4882a593Smuzhiyun mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun
2322*4882a593Smuzhiyun err = mlx4_ib_init_alias_guid_service(dev);
2323*4882a593Smuzhiyun if (err) {
2324*4882a593Smuzhiyun mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2325*4882a593Smuzhiyun goto paravirt_err;
2326*4882a593Smuzhiyun }
2327*4882a593Smuzhiyun err = mlx4_ib_device_register_sysfs(dev);
2328*4882a593Smuzhiyun if (err) {
2329*4882a593Smuzhiyun mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2330*4882a593Smuzhiyun goto sysfs_err;
2331*4882a593Smuzhiyun }
2332*4882a593Smuzhiyun
2333*4882a593Smuzhiyun mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2334*4882a593Smuzhiyun dev->dev->caps.sqp_demux);
2335*4882a593Smuzhiyun for (i = 0; i < dev->num_ports; i++) {
2336*4882a593Smuzhiyun union ib_gid gid;
2337*4882a593Smuzhiyun err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2338*4882a593Smuzhiyun if (err)
2339*4882a593Smuzhiyun goto demux_err;
2340*4882a593Smuzhiyun dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2341*4882a593Smuzhiyun atomic64_set(&dev->sriov.demux[i].subnet_prefix,
2342*4882a593Smuzhiyun be64_to_cpu(gid.global.subnet_prefix));
2343*4882a593Smuzhiyun err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2344*4882a593Smuzhiyun &dev->sriov.sqps[i]);
2345*4882a593Smuzhiyun if (err)
2346*4882a593Smuzhiyun goto demux_err;
2347*4882a593Smuzhiyun err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2348*4882a593Smuzhiyun if (err)
2349*4882a593Smuzhiyun goto free_pv;
2350*4882a593Smuzhiyun }
2351*4882a593Smuzhiyun mlx4_ib_master_tunnels(dev, 1);
2352*4882a593Smuzhiyun return 0;
2353*4882a593Smuzhiyun
2354*4882a593Smuzhiyun free_pv:
2355*4882a593Smuzhiyun free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2356*4882a593Smuzhiyun demux_err:
2357*4882a593Smuzhiyun while (--i >= 0) {
2358*4882a593Smuzhiyun free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2359*4882a593Smuzhiyun mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun mlx4_ib_device_unregister_sysfs(dev);
2362*4882a593Smuzhiyun
2363*4882a593Smuzhiyun sysfs_err:
2364*4882a593Smuzhiyun mlx4_ib_destroy_alias_guid_service(dev);
2365*4882a593Smuzhiyun
2366*4882a593Smuzhiyun paravirt_err:
2367*4882a593Smuzhiyun mlx4_ib_cm_paravirt_clean(dev, -1);
2368*4882a593Smuzhiyun
2369*4882a593Smuzhiyun return err;
2370*4882a593Smuzhiyun }
2371*4882a593Smuzhiyun
mlx4_ib_close_sriov(struct mlx4_ib_dev * dev)2372*4882a593Smuzhiyun void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2373*4882a593Smuzhiyun {
2374*4882a593Smuzhiyun int i;
2375*4882a593Smuzhiyun unsigned long flags;
2376*4882a593Smuzhiyun
2377*4882a593Smuzhiyun if (!mlx4_is_mfunc(dev->dev))
2378*4882a593Smuzhiyun return;
2379*4882a593Smuzhiyun
2380*4882a593Smuzhiyun spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2381*4882a593Smuzhiyun dev->sriov.is_going_down = 1;
2382*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2383*4882a593Smuzhiyun if (mlx4_is_master(dev->dev)) {
2384*4882a593Smuzhiyun for (i = 0; i < dev->num_ports; i++) {
2385*4882a593Smuzhiyun flush_workqueue(dev->sriov.demux[i].ud_wq);
2386*4882a593Smuzhiyun mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2387*4882a593Smuzhiyun kfree(dev->sriov.sqps[i]);
2388*4882a593Smuzhiyun dev->sriov.sqps[i] = NULL;
2389*4882a593Smuzhiyun mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun
2392*4882a593Smuzhiyun mlx4_ib_cm_paravirt_clean(dev, -1);
2393*4882a593Smuzhiyun mlx4_ib_destroy_alias_guid_service(dev);
2394*4882a593Smuzhiyun mlx4_ib_device_unregister_sysfs(dev);
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun }
2397