1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
3*4882a593Smuzhiyun * Copyright (c) 2005 Intel Corporation. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
5*4882a593Smuzhiyun * Copyright (c) 2009 HNR Consulting. All rights reserved.
6*4882a593Smuzhiyun * Copyright (c) 2014,2018 Intel Corporation. All rights reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This software is available to you under a choice of one of two
9*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
10*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
11*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
12*4882a593Smuzhiyun * OpenIB.org BSD license below:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
15*4882a593Smuzhiyun * without modification, are permitted provided that the following
16*4882a593Smuzhiyun * conditions are met:
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions of source code must retain the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
23*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
24*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
25*4882a593Smuzhiyun * provided with the distribution.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34*4882a593Smuzhiyun * SOFTWARE.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include <linux/dma-mapping.h>
41*4882a593Smuzhiyun #include <linux/slab.h>
42*4882a593Smuzhiyun #include <linux/module.h>
43*4882a593Smuzhiyun #include <linux/security.h>
44*4882a593Smuzhiyun #include <linux/xarray.h>
45*4882a593Smuzhiyun #include <rdma/ib_cache.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include "mad_priv.h"
48*4882a593Smuzhiyun #include "core_priv.h"
49*4882a593Smuzhiyun #include "mad_rmpp.h"
50*4882a593Smuzhiyun #include "smi.h"
51*4882a593Smuzhiyun #include "opa_smi.h"
52*4882a593Smuzhiyun #include "agent.h"
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define CREATE_TRACE_POINTS
55*4882a593Smuzhiyun #include <trace/events/ib_mad.h>
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #ifdef CONFIG_TRACEPOINTS
create_mad_addr_info(struct ib_mad_send_wr_private * mad_send_wr,struct ib_mad_qp_info * qp_info,struct trace_event_raw_ib_mad_send_template * entry)58*4882a593Smuzhiyun static void create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr,
59*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info,
60*4882a593Smuzhiyun struct trace_event_raw_ib_mad_send_template *entry)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun u16 pkey;
63*4882a593Smuzhiyun struct ib_device *dev = qp_info->port_priv->device;
64*4882a593Smuzhiyun u8 pnum = qp_info->port_priv->port_num;
65*4882a593Smuzhiyun struct ib_ud_wr *wr = &mad_send_wr->send_wr;
66*4882a593Smuzhiyun struct rdma_ah_attr attr = {};
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun rdma_query_ah(wr->ah, &attr);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* These are common */
71*4882a593Smuzhiyun entry->sl = attr.sl;
72*4882a593Smuzhiyun ib_query_pkey(dev, pnum, wr->pkey_index, &pkey);
73*4882a593Smuzhiyun entry->pkey = pkey;
74*4882a593Smuzhiyun entry->rqpn = wr->remote_qpn;
75*4882a593Smuzhiyun entry->rqkey = wr->remote_qkey;
76*4882a593Smuzhiyun entry->dlid = rdma_ah_get_dlid(&attr);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun static int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
81*4882a593Smuzhiyun static int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun module_param_named(send_queue_size, mad_sendq_size, int, 0444);
84*4882a593Smuzhiyun MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
85*4882a593Smuzhiyun module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
86*4882a593Smuzhiyun MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
89*4882a593Smuzhiyun static u32 ib_mad_client_next;
90*4882a593Smuzhiyun static struct list_head ib_mad_port_list;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* Port list lock */
93*4882a593Smuzhiyun static DEFINE_SPINLOCK(ib_mad_port_list_lock);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Forward declarations */
96*4882a593Smuzhiyun static int method_in_use(struct ib_mad_mgmt_method_table **method,
97*4882a593Smuzhiyun struct ib_mad_reg_req *mad_reg_req);
98*4882a593Smuzhiyun static void remove_mad_reg_req(struct ib_mad_agent_private *priv);
99*4882a593Smuzhiyun static struct ib_mad_agent_private *find_mad_agent(
100*4882a593Smuzhiyun struct ib_mad_port_private *port_priv,
101*4882a593Smuzhiyun const struct ib_mad_hdr *mad);
102*4882a593Smuzhiyun static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
103*4882a593Smuzhiyun struct ib_mad_private *mad);
104*4882a593Smuzhiyun static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv);
105*4882a593Smuzhiyun static void timeout_sends(struct work_struct *work);
106*4882a593Smuzhiyun static void local_completions(struct work_struct *work);
107*4882a593Smuzhiyun static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
108*4882a593Smuzhiyun struct ib_mad_agent_private *agent_priv,
109*4882a593Smuzhiyun u8 mgmt_class);
110*4882a593Smuzhiyun static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
111*4882a593Smuzhiyun struct ib_mad_agent_private *agent_priv);
112*4882a593Smuzhiyun static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
113*4882a593Smuzhiyun struct ib_wc *wc);
114*4882a593Smuzhiyun static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun * Returns a ib_mad_port_private structure or NULL for a device/port
118*4882a593Smuzhiyun * Assumes ib_mad_port_list_lock is being held
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun static inline struct ib_mad_port_private *
__ib_get_mad_port(struct ib_device * device,int port_num)121*4882a593Smuzhiyun __ib_get_mad_port(struct ib_device *device, int port_num)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun struct ib_mad_port_private *entry;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun list_for_each_entry(entry, &ib_mad_port_list, port_list) {
126*4882a593Smuzhiyun if (entry->device == device && entry->port_num == port_num)
127*4882a593Smuzhiyun return entry;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun return NULL;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * Wrapper function to return a ib_mad_port_private structure or NULL
134*4882a593Smuzhiyun * for a device/port
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun static inline struct ib_mad_port_private *
ib_get_mad_port(struct ib_device * device,int port_num)137*4882a593Smuzhiyun ib_get_mad_port(struct ib_device *device, int port_num)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct ib_mad_port_private *entry;
140*4882a593Smuzhiyun unsigned long flags;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun spin_lock_irqsave(&ib_mad_port_list_lock, flags);
143*4882a593Smuzhiyun entry = __ib_get_mad_port(device, port_num);
144*4882a593Smuzhiyun spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return entry;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
convert_mgmt_class(u8 mgmt_class)149*4882a593Smuzhiyun static inline u8 convert_mgmt_class(u8 mgmt_class)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun /* Alias IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE to 0 */
152*4882a593Smuzhiyun return mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE ?
153*4882a593Smuzhiyun 0 : mgmt_class;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
get_spl_qp_index(enum ib_qp_type qp_type)156*4882a593Smuzhiyun static int get_spl_qp_index(enum ib_qp_type qp_type)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun switch (qp_type)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun case IB_QPT_SMI:
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun case IB_QPT_GSI:
163*4882a593Smuzhiyun return 1;
164*4882a593Smuzhiyun default:
165*4882a593Smuzhiyun return -1;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
vendor_class_index(u8 mgmt_class)169*4882a593Smuzhiyun static int vendor_class_index(u8 mgmt_class)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return mgmt_class - IB_MGMT_CLASS_VENDOR_RANGE2_START;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
is_vendor_class(u8 mgmt_class)174*4882a593Smuzhiyun static int is_vendor_class(u8 mgmt_class)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun if ((mgmt_class < IB_MGMT_CLASS_VENDOR_RANGE2_START) ||
177*4882a593Smuzhiyun (mgmt_class > IB_MGMT_CLASS_VENDOR_RANGE2_END))
178*4882a593Smuzhiyun return 0;
179*4882a593Smuzhiyun return 1;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
is_vendor_oui(char * oui)182*4882a593Smuzhiyun static int is_vendor_oui(char *oui)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun if (oui[0] || oui[1] || oui[2])
185*4882a593Smuzhiyun return 1;
186*4882a593Smuzhiyun return 0;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
is_vendor_method_in_use(struct ib_mad_mgmt_vendor_class * vendor_class,struct ib_mad_reg_req * mad_reg_req)189*4882a593Smuzhiyun static int is_vendor_method_in_use(
190*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class *vendor_class,
191*4882a593Smuzhiyun struct ib_mad_reg_req *mad_reg_req)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct ib_mad_mgmt_method_table *method;
194*4882a593Smuzhiyun int i;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_OUI; i++) {
197*4882a593Smuzhiyun if (!memcmp(vendor_class->oui[i], mad_reg_req->oui, 3)) {
198*4882a593Smuzhiyun method = vendor_class->method_table[i];
199*4882a593Smuzhiyun if (method) {
200*4882a593Smuzhiyun if (method_in_use(&method, mad_reg_req))
201*4882a593Smuzhiyun return 1;
202*4882a593Smuzhiyun else
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
ib_response_mad(const struct ib_mad_hdr * hdr)210*4882a593Smuzhiyun int ib_response_mad(const struct ib_mad_hdr *hdr)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun return ((hdr->method & IB_MGMT_METHOD_RESP) ||
213*4882a593Smuzhiyun (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) ||
214*4882a593Smuzhiyun ((hdr->mgmt_class == IB_MGMT_CLASS_BM) &&
215*4882a593Smuzhiyun (hdr->attr_mod & IB_BM_ATTR_MOD_RESP)));
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun EXPORT_SYMBOL(ib_response_mad);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * ib_register_mad_agent - Register to send/receive MADs
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun * Context: Process context.
223*4882a593Smuzhiyun */
ib_register_mad_agent(struct ib_device * device,u8 port_num,enum ib_qp_type qp_type,struct ib_mad_reg_req * mad_reg_req,u8 rmpp_version,ib_mad_send_handler send_handler,ib_mad_recv_handler recv_handler,void * context,u32 registration_flags)224*4882a593Smuzhiyun struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
225*4882a593Smuzhiyun u8 port_num,
226*4882a593Smuzhiyun enum ib_qp_type qp_type,
227*4882a593Smuzhiyun struct ib_mad_reg_req *mad_reg_req,
228*4882a593Smuzhiyun u8 rmpp_version,
229*4882a593Smuzhiyun ib_mad_send_handler send_handler,
230*4882a593Smuzhiyun ib_mad_recv_handler recv_handler,
231*4882a593Smuzhiyun void *context,
232*4882a593Smuzhiyun u32 registration_flags)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
235*4882a593Smuzhiyun struct ib_mad_agent *ret = ERR_PTR(-EINVAL);
236*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
237*4882a593Smuzhiyun struct ib_mad_reg_req *reg_req = NULL;
238*4882a593Smuzhiyun struct ib_mad_mgmt_class_table *class;
239*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class_table *vendor;
240*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class *vendor_class;
241*4882a593Smuzhiyun struct ib_mad_mgmt_method_table *method;
242*4882a593Smuzhiyun int ret2, qpn;
243*4882a593Smuzhiyun u8 mgmt_class, vclass;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
246*4882a593Smuzhiyun (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
247*4882a593Smuzhiyun return ERR_PTR(-EPROTONOSUPPORT);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* Validate parameters */
250*4882a593Smuzhiyun qpn = get_spl_qp_index(qp_type);
251*4882a593Smuzhiyun if (qpn == -1) {
252*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
253*4882a593Smuzhiyun __func__, qp_type);
254*4882a593Smuzhiyun goto error1;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
258*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
259*4882a593Smuzhiyun "%s: invalid RMPP Version %u\n",
260*4882a593Smuzhiyun __func__, rmpp_version);
261*4882a593Smuzhiyun goto error1;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* Validate MAD registration request if supplied */
265*4882a593Smuzhiyun if (mad_reg_req) {
266*4882a593Smuzhiyun if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
267*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
268*4882a593Smuzhiyun "%s: invalid Class Version %u\n",
269*4882a593Smuzhiyun __func__,
270*4882a593Smuzhiyun mad_reg_req->mgmt_class_version);
271*4882a593Smuzhiyun goto error1;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun if (!recv_handler) {
274*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
275*4882a593Smuzhiyun "%s: no recv_handler\n", __func__);
276*4882a593Smuzhiyun goto error1;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only
281*4882a593Smuzhiyun * one in this range currently allowed
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun if (mad_reg_req->mgmt_class !=
284*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
285*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
286*4882a593Smuzhiyun "%s: Invalid Mgmt Class 0x%x\n",
287*4882a593Smuzhiyun __func__, mad_reg_req->mgmt_class);
288*4882a593Smuzhiyun goto error1;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun } else if (mad_reg_req->mgmt_class == 0) {
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * Class 0 is reserved in IBA and is used for
293*4882a593Smuzhiyun * aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
294*4882a593Smuzhiyun */
295*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
296*4882a593Smuzhiyun "%s: Invalid Mgmt Class 0\n",
297*4882a593Smuzhiyun __func__);
298*4882a593Smuzhiyun goto error1;
299*4882a593Smuzhiyun } else if (is_vendor_class(mad_reg_req->mgmt_class)) {
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * If class is in "new" vendor range,
302*4882a593Smuzhiyun * ensure supplied OUI is not zero
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun if (!is_vendor_oui(mad_reg_req->oui)) {
305*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
306*4882a593Smuzhiyun "%s: No OUI specified for class 0x%x\n",
307*4882a593Smuzhiyun __func__,
308*4882a593Smuzhiyun mad_reg_req->mgmt_class);
309*4882a593Smuzhiyun goto error1;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun /* Make sure class supplied is consistent with RMPP */
313*4882a593Smuzhiyun if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
314*4882a593Smuzhiyun if (rmpp_version) {
315*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
316*4882a593Smuzhiyun "%s: RMPP version for non-RMPP class 0x%x\n",
317*4882a593Smuzhiyun __func__, mad_reg_req->mgmt_class);
318*4882a593Smuzhiyun goto error1;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* Make sure class supplied is consistent with QP type */
323*4882a593Smuzhiyun if (qp_type == IB_QPT_SMI) {
324*4882a593Smuzhiyun if ((mad_reg_req->mgmt_class !=
325*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
326*4882a593Smuzhiyun (mad_reg_req->mgmt_class !=
327*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
328*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
329*4882a593Smuzhiyun "%s: Invalid SM QP type: class 0x%x\n",
330*4882a593Smuzhiyun __func__, mad_reg_req->mgmt_class);
331*4882a593Smuzhiyun goto error1;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun } else {
334*4882a593Smuzhiyun if ((mad_reg_req->mgmt_class ==
335*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
336*4882a593Smuzhiyun (mad_reg_req->mgmt_class ==
337*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
338*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev,
339*4882a593Smuzhiyun "%s: Invalid GS QP type: class 0x%x\n",
340*4882a593Smuzhiyun __func__, mad_reg_req->mgmt_class);
341*4882a593Smuzhiyun goto error1;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun } else {
345*4882a593Smuzhiyun /* No registration request supplied */
346*4882a593Smuzhiyun if (!send_handler)
347*4882a593Smuzhiyun goto error1;
348*4882a593Smuzhiyun if (registration_flags & IB_MAD_USER_RMPP)
349*4882a593Smuzhiyun goto error1;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /* Validate device and port */
353*4882a593Smuzhiyun port_priv = ib_get_mad_port(device, port_num);
354*4882a593Smuzhiyun if (!port_priv) {
355*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
356*4882a593Smuzhiyun __func__, port_num);
357*4882a593Smuzhiyun ret = ERR_PTR(-ENODEV);
358*4882a593Smuzhiyun goto error1;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* Verify the QP requested is supported. For example, Ethernet devices
362*4882a593Smuzhiyun * will not have QP0.
363*4882a593Smuzhiyun */
364*4882a593Smuzhiyun if (!port_priv->qp_info[qpn].qp) {
365*4882a593Smuzhiyun dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
366*4882a593Smuzhiyun __func__, qpn);
367*4882a593Smuzhiyun ret = ERR_PTR(-EPROTONOSUPPORT);
368*4882a593Smuzhiyun goto error1;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* Allocate structures */
372*4882a593Smuzhiyun mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
373*4882a593Smuzhiyun if (!mad_agent_priv) {
374*4882a593Smuzhiyun ret = ERR_PTR(-ENOMEM);
375*4882a593Smuzhiyun goto error1;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (mad_reg_req) {
379*4882a593Smuzhiyun reg_req = kmemdup(mad_reg_req, sizeof *reg_req, GFP_KERNEL);
380*4882a593Smuzhiyun if (!reg_req) {
381*4882a593Smuzhiyun ret = ERR_PTR(-ENOMEM);
382*4882a593Smuzhiyun goto error3;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* Now, fill in the various structures */
387*4882a593Smuzhiyun mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
388*4882a593Smuzhiyun mad_agent_priv->reg_req = reg_req;
389*4882a593Smuzhiyun mad_agent_priv->agent.rmpp_version = rmpp_version;
390*4882a593Smuzhiyun mad_agent_priv->agent.device = device;
391*4882a593Smuzhiyun mad_agent_priv->agent.recv_handler = recv_handler;
392*4882a593Smuzhiyun mad_agent_priv->agent.send_handler = send_handler;
393*4882a593Smuzhiyun mad_agent_priv->agent.context = context;
394*4882a593Smuzhiyun mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp;
395*4882a593Smuzhiyun mad_agent_priv->agent.port_num = port_num;
396*4882a593Smuzhiyun mad_agent_priv->agent.flags = registration_flags;
397*4882a593Smuzhiyun spin_lock_init(&mad_agent_priv->lock);
398*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_agent_priv->send_list);
399*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_agent_priv->wait_list);
400*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_agent_priv->done_list);
401*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_agent_priv->rmpp_list);
402*4882a593Smuzhiyun INIT_DELAYED_WORK(&mad_agent_priv->timed_work, timeout_sends);
403*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_agent_priv->local_list);
404*4882a593Smuzhiyun INIT_WORK(&mad_agent_priv->local_work, local_completions);
405*4882a593Smuzhiyun refcount_set(&mad_agent_priv->refcount, 1);
406*4882a593Smuzhiyun init_completion(&mad_agent_priv->comp);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type);
409*4882a593Smuzhiyun if (ret2) {
410*4882a593Smuzhiyun ret = ERR_PTR(ret2);
411*4882a593Smuzhiyun goto error4;
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * The mlx4 driver uses the top byte to distinguish which virtual
416*4882a593Smuzhiyun * function generated the MAD, so we must avoid using it.
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
419*4882a593Smuzhiyun mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
420*4882a593Smuzhiyun &ib_mad_client_next, GFP_KERNEL);
421*4882a593Smuzhiyun if (ret2 < 0) {
422*4882a593Smuzhiyun ret = ERR_PTR(ret2);
423*4882a593Smuzhiyun goto error5;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun * Make sure MAD registration (if supplied)
428*4882a593Smuzhiyun * is non overlapping with any existing ones
429*4882a593Smuzhiyun */
430*4882a593Smuzhiyun spin_lock_irq(&port_priv->reg_lock);
431*4882a593Smuzhiyun if (mad_reg_req) {
432*4882a593Smuzhiyun mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class);
433*4882a593Smuzhiyun if (!is_vendor_class(mgmt_class)) {
434*4882a593Smuzhiyun class = port_priv->version[mad_reg_req->
435*4882a593Smuzhiyun mgmt_class_version].class;
436*4882a593Smuzhiyun if (class) {
437*4882a593Smuzhiyun method = class->method_table[mgmt_class];
438*4882a593Smuzhiyun if (method) {
439*4882a593Smuzhiyun if (method_in_use(&method,
440*4882a593Smuzhiyun mad_reg_req))
441*4882a593Smuzhiyun goto error6;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
445*4882a593Smuzhiyun mgmt_class);
446*4882a593Smuzhiyun } else {
447*4882a593Smuzhiyun /* "New" vendor class range */
448*4882a593Smuzhiyun vendor = port_priv->version[mad_reg_req->
449*4882a593Smuzhiyun mgmt_class_version].vendor;
450*4882a593Smuzhiyun if (vendor) {
451*4882a593Smuzhiyun vclass = vendor_class_index(mgmt_class);
452*4882a593Smuzhiyun vendor_class = vendor->vendor_class[vclass];
453*4882a593Smuzhiyun if (vendor_class) {
454*4882a593Smuzhiyun if (is_vendor_method_in_use(
455*4882a593Smuzhiyun vendor_class,
456*4882a593Smuzhiyun mad_reg_req))
457*4882a593Smuzhiyun goto error6;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun if (ret2) {
463*4882a593Smuzhiyun ret = ERR_PTR(ret2);
464*4882a593Smuzhiyun goto error6;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun spin_unlock_irq(&port_priv->reg_lock);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun trace_ib_mad_create_agent(mad_agent_priv);
470*4882a593Smuzhiyun return &mad_agent_priv->agent;
471*4882a593Smuzhiyun error6:
472*4882a593Smuzhiyun spin_unlock_irq(&port_priv->reg_lock);
473*4882a593Smuzhiyun xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
474*4882a593Smuzhiyun error5:
475*4882a593Smuzhiyun ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
476*4882a593Smuzhiyun error4:
477*4882a593Smuzhiyun kfree(reg_req);
478*4882a593Smuzhiyun error3:
479*4882a593Smuzhiyun kfree(mad_agent_priv);
480*4882a593Smuzhiyun error1:
481*4882a593Smuzhiyun return ret;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun EXPORT_SYMBOL(ib_register_mad_agent);
484*4882a593Smuzhiyun
deref_mad_agent(struct ib_mad_agent_private * mad_agent_priv)485*4882a593Smuzhiyun static inline void deref_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun if (refcount_dec_and_test(&mad_agent_priv->refcount))
488*4882a593Smuzhiyun complete(&mad_agent_priv->comp);
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
unregister_mad_agent(struct ib_mad_agent_private * mad_agent_priv)491*4882a593Smuzhiyun static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* Note that we could still be handling received MADs */
496*4882a593Smuzhiyun trace_ib_mad_unregister_agent(mad_agent_priv);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * Canceling all sends results in dropping received response
500*4882a593Smuzhiyun * MADs, preventing us from queuing additional work
501*4882a593Smuzhiyun */
502*4882a593Smuzhiyun cancel_mads(mad_agent_priv);
503*4882a593Smuzhiyun port_priv = mad_agent_priv->qp_info->port_priv;
504*4882a593Smuzhiyun cancel_delayed_work(&mad_agent_priv->timed_work);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun spin_lock_irq(&port_priv->reg_lock);
507*4882a593Smuzhiyun remove_mad_reg_req(mad_agent_priv);
508*4882a593Smuzhiyun spin_unlock_irq(&port_priv->reg_lock);
509*4882a593Smuzhiyun xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun flush_workqueue(port_priv->wq);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
514*4882a593Smuzhiyun wait_for_completion(&mad_agent_priv->comp);
515*4882a593Smuzhiyun ib_cancel_rmpp_recvs(mad_agent_priv);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun kfree(mad_agent_priv->reg_req);
520*4882a593Smuzhiyun kfree_rcu(mad_agent_priv, rcu);
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * ib_unregister_mad_agent - Unregisters a client from using MAD services
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * Context: Process context.
527*4882a593Smuzhiyun */
ib_unregister_mad_agent(struct ib_mad_agent * mad_agent)528*4882a593Smuzhiyun void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun mad_agent_priv = container_of(mad_agent,
533*4882a593Smuzhiyun struct ib_mad_agent_private,
534*4882a593Smuzhiyun agent);
535*4882a593Smuzhiyun unregister_mad_agent(mad_agent_priv);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun EXPORT_SYMBOL(ib_unregister_mad_agent);
538*4882a593Smuzhiyun
dequeue_mad(struct ib_mad_list_head * mad_list)539*4882a593Smuzhiyun static void dequeue_mad(struct ib_mad_list_head *mad_list)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct ib_mad_queue *mad_queue;
542*4882a593Smuzhiyun unsigned long flags;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun mad_queue = mad_list->mad_queue;
545*4882a593Smuzhiyun spin_lock_irqsave(&mad_queue->lock, flags);
546*4882a593Smuzhiyun list_del(&mad_list->list);
547*4882a593Smuzhiyun mad_queue->count--;
548*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_queue->lock, flags);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
build_smp_wc(struct ib_qp * qp,struct ib_cqe * cqe,u16 slid,u16 pkey_index,u8 port_num,struct ib_wc * wc)551*4882a593Smuzhiyun static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
552*4882a593Smuzhiyun u16 pkey_index, u8 port_num, struct ib_wc *wc)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun memset(wc, 0, sizeof *wc);
555*4882a593Smuzhiyun wc->wr_cqe = cqe;
556*4882a593Smuzhiyun wc->status = IB_WC_SUCCESS;
557*4882a593Smuzhiyun wc->opcode = IB_WC_RECV;
558*4882a593Smuzhiyun wc->pkey_index = pkey_index;
559*4882a593Smuzhiyun wc->byte_len = sizeof(struct ib_mad) + sizeof(struct ib_grh);
560*4882a593Smuzhiyun wc->src_qp = IB_QP0;
561*4882a593Smuzhiyun wc->qp = qp;
562*4882a593Smuzhiyun wc->slid = slid;
563*4882a593Smuzhiyun wc->sl = 0;
564*4882a593Smuzhiyun wc->dlid_path_bits = 0;
565*4882a593Smuzhiyun wc->port_num = port_num;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
mad_priv_size(const struct ib_mad_private * mp)568*4882a593Smuzhiyun static size_t mad_priv_size(const struct ib_mad_private *mp)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun return sizeof(struct ib_mad_private) + mp->mad_size;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
alloc_mad_private(size_t mad_size,gfp_t flags)573*4882a593Smuzhiyun static struct ib_mad_private *alloc_mad_private(size_t mad_size, gfp_t flags)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun size_t size = sizeof(struct ib_mad_private) + mad_size;
576*4882a593Smuzhiyun struct ib_mad_private *ret = kzalloc(size, flags);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (ret)
579*4882a593Smuzhiyun ret->mad_size = mad_size;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun return ret;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
port_mad_size(const struct ib_mad_port_private * port_priv)584*4882a593Smuzhiyun static size_t port_mad_size(const struct ib_mad_port_private *port_priv)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun return rdma_max_mad_size(port_priv->device, port_priv->port_num);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
mad_priv_dma_size(const struct ib_mad_private * mp)589*4882a593Smuzhiyun static size_t mad_priv_dma_size(const struct ib_mad_private *mp)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun return sizeof(struct ib_grh) + mp->mad_size;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /*
595*4882a593Smuzhiyun * Return 0 if SMP is to be sent
596*4882a593Smuzhiyun * Return 1 if SMP was consumed locally (whether or not solicited)
597*4882a593Smuzhiyun * Return < 0 if error
598*4882a593Smuzhiyun */
handle_outgoing_dr_smp(struct ib_mad_agent_private * mad_agent_priv,struct ib_mad_send_wr_private * mad_send_wr)599*4882a593Smuzhiyun static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
600*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun int ret = 0;
603*4882a593Smuzhiyun struct ib_smp *smp = mad_send_wr->send_buf.mad;
604*4882a593Smuzhiyun struct opa_smp *opa_smp = (struct opa_smp *)smp;
605*4882a593Smuzhiyun unsigned long flags;
606*4882a593Smuzhiyun struct ib_mad_local_private *local;
607*4882a593Smuzhiyun struct ib_mad_private *mad_priv;
608*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
609*4882a593Smuzhiyun struct ib_mad_agent_private *recv_mad_agent = NULL;
610*4882a593Smuzhiyun struct ib_device *device = mad_agent_priv->agent.device;
611*4882a593Smuzhiyun u8 port_num;
612*4882a593Smuzhiyun struct ib_wc mad_wc;
613*4882a593Smuzhiyun struct ib_ud_wr *send_wr = &mad_send_wr->send_wr;
614*4882a593Smuzhiyun size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv);
615*4882a593Smuzhiyun u16 out_mad_pkey_index = 0;
616*4882a593Smuzhiyun u16 drslid;
617*4882a593Smuzhiyun bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
618*4882a593Smuzhiyun mad_agent_priv->qp_info->port_priv->port_num);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (rdma_cap_ib_switch(device) &&
621*4882a593Smuzhiyun smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
622*4882a593Smuzhiyun port_num = send_wr->port_num;
623*4882a593Smuzhiyun else
624*4882a593Smuzhiyun port_num = mad_agent_priv->agent.port_num;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /*
627*4882a593Smuzhiyun * Directed route handling starts if the initial LID routed part of
628*4882a593Smuzhiyun * a request or the ending LID routed part of a response is empty.
629*4882a593Smuzhiyun * If we are at the start of the LID routed part, don't update the
630*4882a593Smuzhiyun * hop_ptr or hop_cnt. See section 14.2.2, Vol 1 IB spec.
631*4882a593Smuzhiyun */
632*4882a593Smuzhiyun if (opa && smp->class_version == OPA_SM_CLASS_VERSION) {
633*4882a593Smuzhiyun u32 opa_drslid;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun trace_ib_mad_handle_out_opa_smi(opa_smp);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if ((opa_get_smp_direction(opa_smp)
638*4882a593Smuzhiyun ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) ==
639*4882a593Smuzhiyun OPA_LID_PERMISSIVE &&
640*4882a593Smuzhiyun opa_smi_handle_dr_smp_send(opa_smp,
641*4882a593Smuzhiyun rdma_cap_ib_switch(device),
642*4882a593Smuzhiyun port_num) == IB_SMI_DISCARD) {
643*4882a593Smuzhiyun ret = -EINVAL;
644*4882a593Smuzhiyun dev_err(&device->dev, "OPA Invalid directed route\n");
645*4882a593Smuzhiyun goto out;
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid);
648*4882a593Smuzhiyun if (opa_drslid != be32_to_cpu(OPA_LID_PERMISSIVE) &&
649*4882a593Smuzhiyun opa_drslid & 0xffff0000) {
650*4882a593Smuzhiyun ret = -EINVAL;
651*4882a593Smuzhiyun dev_err(&device->dev, "OPA Invalid dr_slid 0x%x\n",
652*4882a593Smuzhiyun opa_drslid);
653*4882a593Smuzhiyun goto out;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun drslid = (u16)(opa_drslid & 0x0000ffff);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Check to post send on QP or process locally */
658*4882a593Smuzhiyun if (opa_smi_check_local_smp(opa_smp, device) == IB_SMI_DISCARD &&
659*4882a593Smuzhiyun opa_smi_check_local_returning_smp(opa_smp, device) == IB_SMI_DISCARD)
660*4882a593Smuzhiyun goto out;
661*4882a593Smuzhiyun } else {
662*4882a593Smuzhiyun trace_ib_mad_handle_out_ib_smi(smp);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
665*4882a593Smuzhiyun IB_LID_PERMISSIVE &&
666*4882a593Smuzhiyun smi_handle_dr_smp_send(smp, rdma_cap_ib_switch(device), port_num) ==
667*4882a593Smuzhiyun IB_SMI_DISCARD) {
668*4882a593Smuzhiyun ret = -EINVAL;
669*4882a593Smuzhiyun dev_err(&device->dev, "Invalid directed route\n");
670*4882a593Smuzhiyun goto out;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun drslid = be16_to_cpu(smp->dr_slid);
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /* Check to post send on QP or process locally */
675*4882a593Smuzhiyun if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD &&
676*4882a593Smuzhiyun smi_check_local_returning_smp(smp, device) == IB_SMI_DISCARD)
677*4882a593Smuzhiyun goto out;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun local = kmalloc(sizeof *local, GFP_ATOMIC);
681*4882a593Smuzhiyun if (!local) {
682*4882a593Smuzhiyun ret = -ENOMEM;
683*4882a593Smuzhiyun goto out;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun local->mad_priv = NULL;
686*4882a593Smuzhiyun local->recv_mad_agent = NULL;
687*4882a593Smuzhiyun mad_priv = alloc_mad_private(mad_size, GFP_ATOMIC);
688*4882a593Smuzhiyun if (!mad_priv) {
689*4882a593Smuzhiyun ret = -ENOMEM;
690*4882a593Smuzhiyun kfree(local);
691*4882a593Smuzhiyun goto out;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun build_smp_wc(mad_agent_priv->agent.qp,
695*4882a593Smuzhiyun send_wr->wr.wr_cqe, drslid,
696*4882a593Smuzhiyun send_wr->pkey_index,
697*4882a593Smuzhiyun send_wr->port_num, &mad_wc);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (opa && smp->base_version == OPA_MGMT_BASE_VERSION) {
700*4882a593Smuzhiyun mad_wc.byte_len = mad_send_wr->send_buf.hdr_len
701*4882a593Smuzhiyun + mad_send_wr->send_buf.data_len
702*4882a593Smuzhiyun + sizeof(struct ib_grh);
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun /* No GRH for DR SMP */
706*4882a593Smuzhiyun ret = device->ops.process_mad(device, 0, port_num, &mad_wc, NULL,
707*4882a593Smuzhiyun (const struct ib_mad *)smp,
708*4882a593Smuzhiyun (struct ib_mad *)mad_priv->mad, &mad_size,
709*4882a593Smuzhiyun &out_mad_pkey_index);
710*4882a593Smuzhiyun switch (ret)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
713*4882a593Smuzhiyun if (ib_response_mad((const struct ib_mad_hdr *)mad_priv->mad) &&
714*4882a593Smuzhiyun mad_agent_priv->agent.recv_handler) {
715*4882a593Smuzhiyun local->mad_priv = mad_priv;
716*4882a593Smuzhiyun local->recv_mad_agent = mad_agent_priv;
717*4882a593Smuzhiyun /*
718*4882a593Smuzhiyun * Reference MAD agent until receive
719*4882a593Smuzhiyun * side of local completion handled
720*4882a593Smuzhiyun */
721*4882a593Smuzhiyun refcount_inc(&mad_agent_priv->refcount);
722*4882a593Smuzhiyun } else
723*4882a593Smuzhiyun kfree(mad_priv);
724*4882a593Smuzhiyun break;
725*4882a593Smuzhiyun case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
726*4882a593Smuzhiyun kfree(mad_priv);
727*4882a593Smuzhiyun break;
728*4882a593Smuzhiyun case IB_MAD_RESULT_SUCCESS:
729*4882a593Smuzhiyun /* Treat like an incoming receive MAD */
730*4882a593Smuzhiyun port_priv = ib_get_mad_port(mad_agent_priv->agent.device,
731*4882a593Smuzhiyun mad_agent_priv->agent.port_num);
732*4882a593Smuzhiyun if (port_priv) {
733*4882a593Smuzhiyun memcpy(mad_priv->mad, smp, mad_priv->mad_size);
734*4882a593Smuzhiyun recv_mad_agent = find_mad_agent(port_priv,
735*4882a593Smuzhiyun (const struct ib_mad_hdr *)mad_priv->mad);
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun if (!port_priv || !recv_mad_agent) {
738*4882a593Smuzhiyun /*
739*4882a593Smuzhiyun * No receiving agent so drop packet and
740*4882a593Smuzhiyun * generate send completion.
741*4882a593Smuzhiyun */
742*4882a593Smuzhiyun kfree(mad_priv);
743*4882a593Smuzhiyun break;
744*4882a593Smuzhiyun }
745*4882a593Smuzhiyun local->mad_priv = mad_priv;
746*4882a593Smuzhiyun local->recv_mad_agent = recv_mad_agent;
747*4882a593Smuzhiyun break;
748*4882a593Smuzhiyun default:
749*4882a593Smuzhiyun kfree(mad_priv);
750*4882a593Smuzhiyun kfree(local);
751*4882a593Smuzhiyun ret = -EINVAL;
752*4882a593Smuzhiyun goto out;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun local->mad_send_wr = mad_send_wr;
756*4882a593Smuzhiyun if (opa) {
757*4882a593Smuzhiyun local->mad_send_wr->send_wr.pkey_index = out_mad_pkey_index;
758*4882a593Smuzhiyun local->return_wc_byte_len = mad_size;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun /* Reference MAD agent until send side of local completion handled */
761*4882a593Smuzhiyun refcount_inc(&mad_agent_priv->refcount);
762*4882a593Smuzhiyun /* Queue local completion to local list */
763*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
764*4882a593Smuzhiyun list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
765*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
766*4882a593Smuzhiyun queue_work(mad_agent_priv->qp_info->port_priv->wq,
767*4882a593Smuzhiyun &mad_agent_priv->local_work);
768*4882a593Smuzhiyun ret = 1;
769*4882a593Smuzhiyun out:
770*4882a593Smuzhiyun return ret;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
get_pad_size(int hdr_len,int data_len,size_t mad_size)773*4882a593Smuzhiyun static int get_pad_size(int hdr_len, int data_len, size_t mad_size)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun int seg_size, pad;
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun seg_size = mad_size - hdr_len;
778*4882a593Smuzhiyun if (data_len && seg_size) {
779*4882a593Smuzhiyun pad = seg_size - data_len % seg_size;
780*4882a593Smuzhiyun return pad == seg_size ? 0 : pad;
781*4882a593Smuzhiyun } else
782*4882a593Smuzhiyun return seg_size;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
free_send_rmpp_list(struct ib_mad_send_wr_private * mad_send_wr)785*4882a593Smuzhiyun static void free_send_rmpp_list(struct ib_mad_send_wr_private *mad_send_wr)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun struct ib_rmpp_segment *s, *t;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun list_for_each_entry_safe(s, t, &mad_send_wr->rmpp_list, list) {
790*4882a593Smuzhiyun list_del(&s->list);
791*4882a593Smuzhiyun kfree(s);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
alloc_send_rmpp_list(struct ib_mad_send_wr_private * send_wr,size_t mad_size,gfp_t gfp_mask)795*4882a593Smuzhiyun static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr,
796*4882a593Smuzhiyun size_t mad_size, gfp_t gfp_mask)
797*4882a593Smuzhiyun {
798*4882a593Smuzhiyun struct ib_mad_send_buf *send_buf = &send_wr->send_buf;
799*4882a593Smuzhiyun struct ib_rmpp_mad *rmpp_mad = send_buf->mad;
800*4882a593Smuzhiyun struct ib_rmpp_segment *seg = NULL;
801*4882a593Smuzhiyun int left, seg_size, pad;
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun send_buf->seg_size = mad_size - send_buf->hdr_len;
804*4882a593Smuzhiyun send_buf->seg_rmpp_size = mad_size - IB_MGMT_RMPP_HDR;
805*4882a593Smuzhiyun seg_size = send_buf->seg_size;
806*4882a593Smuzhiyun pad = send_wr->pad;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun /* Allocate data segments. */
809*4882a593Smuzhiyun for (left = send_buf->data_len + pad; left > 0; left -= seg_size) {
810*4882a593Smuzhiyun seg = kmalloc(sizeof (*seg) + seg_size, gfp_mask);
811*4882a593Smuzhiyun if (!seg) {
812*4882a593Smuzhiyun free_send_rmpp_list(send_wr);
813*4882a593Smuzhiyun return -ENOMEM;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun seg->num = ++send_buf->seg_count;
816*4882a593Smuzhiyun list_add_tail(&seg->list, &send_wr->rmpp_list);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* Zero any padding */
820*4882a593Smuzhiyun if (pad)
821*4882a593Smuzhiyun memset(seg->data + seg_size - pad, 0, pad);
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun rmpp_mad->rmpp_hdr.rmpp_version = send_wr->mad_agent_priv->
824*4882a593Smuzhiyun agent.rmpp_version;
825*4882a593Smuzhiyun rmpp_mad->rmpp_hdr.rmpp_type = IB_MGMT_RMPP_TYPE_DATA;
826*4882a593Smuzhiyun ib_set_rmpp_flags(&rmpp_mad->rmpp_hdr, IB_MGMT_RMPP_FLAG_ACTIVE);
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun send_wr->cur_seg = container_of(send_wr->rmpp_list.next,
829*4882a593Smuzhiyun struct ib_rmpp_segment, list);
830*4882a593Smuzhiyun send_wr->last_ack_seg = send_wr->cur_seg;
831*4882a593Smuzhiyun return 0;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
ib_mad_kernel_rmpp_agent(const struct ib_mad_agent * agent)834*4882a593Smuzhiyun int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP);
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun EXPORT_SYMBOL(ib_mad_kernel_rmpp_agent);
839*4882a593Smuzhiyun
ib_create_send_mad(struct ib_mad_agent * mad_agent,u32 remote_qpn,u16 pkey_index,int rmpp_active,int hdr_len,int data_len,gfp_t gfp_mask,u8 base_version)840*4882a593Smuzhiyun struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
841*4882a593Smuzhiyun u32 remote_qpn, u16 pkey_index,
842*4882a593Smuzhiyun int rmpp_active,
843*4882a593Smuzhiyun int hdr_len, int data_len,
844*4882a593Smuzhiyun gfp_t gfp_mask,
845*4882a593Smuzhiyun u8 base_version)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
848*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
849*4882a593Smuzhiyun int pad, message_size, ret, size;
850*4882a593Smuzhiyun void *buf;
851*4882a593Smuzhiyun size_t mad_size;
852*4882a593Smuzhiyun bool opa;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
855*4882a593Smuzhiyun agent);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun opa = rdma_cap_opa_mad(mad_agent->device, mad_agent->port_num);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun if (opa && base_version == OPA_MGMT_BASE_VERSION)
860*4882a593Smuzhiyun mad_size = sizeof(struct opa_mad);
861*4882a593Smuzhiyun else
862*4882a593Smuzhiyun mad_size = sizeof(struct ib_mad);
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun pad = get_pad_size(hdr_len, data_len, mad_size);
865*4882a593Smuzhiyun message_size = hdr_len + data_len + pad;
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun if (ib_mad_kernel_rmpp_agent(mad_agent)) {
868*4882a593Smuzhiyun if (!rmpp_active && message_size > mad_size)
869*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
870*4882a593Smuzhiyun } else
871*4882a593Smuzhiyun if (rmpp_active || message_size > mad_size)
872*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun size = rmpp_active ? hdr_len : mad_size;
875*4882a593Smuzhiyun buf = kzalloc(sizeof *mad_send_wr + size, gfp_mask);
876*4882a593Smuzhiyun if (!buf)
877*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun mad_send_wr = buf + size;
880*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_send_wr->rmpp_list);
881*4882a593Smuzhiyun mad_send_wr->send_buf.mad = buf;
882*4882a593Smuzhiyun mad_send_wr->send_buf.hdr_len = hdr_len;
883*4882a593Smuzhiyun mad_send_wr->send_buf.data_len = data_len;
884*4882a593Smuzhiyun mad_send_wr->pad = pad;
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun mad_send_wr->mad_agent_priv = mad_agent_priv;
887*4882a593Smuzhiyun mad_send_wr->sg_list[0].length = hdr_len;
888*4882a593Smuzhiyun mad_send_wr->sg_list[0].lkey = mad_agent->qp->pd->local_dma_lkey;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* OPA MADs don't have to be the full 2048 bytes */
891*4882a593Smuzhiyun if (opa && base_version == OPA_MGMT_BASE_VERSION &&
892*4882a593Smuzhiyun data_len < mad_size - hdr_len)
893*4882a593Smuzhiyun mad_send_wr->sg_list[1].length = data_len;
894*4882a593Smuzhiyun else
895*4882a593Smuzhiyun mad_send_wr->sg_list[1].length = mad_size - hdr_len;
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
902*4882a593Smuzhiyun mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
903*4882a593Smuzhiyun mad_send_wr->send_wr.wr.num_sge = 2;
904*4882a593Smuzhiyun mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
905*4882a593Smuzhiyun mad_send_wr->send_wr.wr.send_flags = IB_SEND_SIGNALED;
906*4882a593Smuzhiyun mad_send_wr->send_wr.remote_qpn = remote_qpn;
907*4882a593Smuzhiyun mad_send_wr->send_wr.remote_qkey = IB_QP_SET_QKEY;
908*4882a593Smuzhiyun mad_send_wr->send_wr.pkey_index = pkey_index;
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun if (rmpp_active) {
911*4882a593Smuzhiyun ret = alloc_send_rmpp_list(mad_send_wr, mad_size, gfp_mask);
912*4882a593Smuzhiyun if (ret) {
913*4882a593Smuzhiyun kfree(buf);
914*4882a593Smuzhiyun return ERR_PTR(ret);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun mad_send_wr->send_buf.mad_agent = mad_agent;
919*4882a593Smuzhiyun refcount_inc(&mad_agent_priv->refcount);
920*4882a593Smuzhiyun return &mad_send_wr->send_buf;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun EXPORT_SYMBOL(ib_create_send_mad);
923*4882a593Smuzhiyun
ib_get_mad_data_offset(u8 mgmt_class)924*4882a593Smuzhiyun int ib_get_mad_data_offset(u8 mgmt_class)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
927*4882a593Smuzhiyun return IB_MGMT_SA_HDR;
928*4882a593Smuzhiyun else if ((mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
929*4882a593Smuzhiyun (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
930*4882a593Smuzhiyun (mgmt_class == IB_MGMT_CLASS_BIS))
931*4882a593Smuzhiyun return IB_MGMT_DEVICE_HDR;
932*4882a593Smuzhiyun else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
933*4882a593Smuzhiyun (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
934*4882a593Smuzhiyun return IB_MGMT_VENDOR_HDR;
935*4882a593Smuzhiyun else
936*4882a593Smuzhiyun return IB_MGMT_MAD_HDR;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun EXPORT_SYMBOL(ib_get_mad_data_offset);
939*4882a593Smuzhiyun
ib_is_mad_class_rmpp(u8 mgmt_class)940*4882a593Smuzhiyun int ib_is_mad_class_rmpp(u8 mgmt_class)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun if ((mgmt_class == IB_MGMT_CLASS_SUBN_ADM) ||
943*4882a593Smuzhiyun (mgmt_class == IB_MGMT_CLASS_DEVICE_MGMT) ||
944*4882a593Smuzhiyun (mgmt_class == IB_MGMT_CLASS_DEVICE_ADM) ||
945*4882a593Smuzhiyun (mgmt_class == IB_MGMT_CLASS_BIS) ||
946*4882a593Smuzhiyun ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
947*4882a593Smuzhiyun (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)))
948*4882a593Smuzhiyun return 1;
949*4882a593Smuzhiyun return 0;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun EXPORT_SYMBOL(ib_is_mad_class_rmpp);
952*4882a593Smuzhiyun
ib_get_rmpp_segment(struct ib_mad_send_buf * send_buf,int seg_num)953*4882a593Smuzhiyun void *ib_get_rmpp_segment(struct ib_mad_send_buf *send_buf, int seg_num)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
956*4882a593Smuzhiyun struct list_head *list;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
959*4882a593Smuzhiyun send_buf);
960*4882a593Smuzhiyun list = &mad_send_wr->cur_seg->list;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun if (mad_send_wr->cur_seg->num < seg_num) {
963*4882a593Smuzhiyun list_for_each_entry(mad_send_wr->cur_seg, list, list)
964*4882a593Smuzhiyun if (mad_send_wr->cur_seg->num == seg_num)
965*4882a593Smuzhiyun break;
966*4882a593Smuzhiyun } else if (mad_send_wr->cur_seg->num > seg_num) {
967*4882a593Smuzhiyun list_for_each_entry_reverse(mad_send_wr->cur_seg, list, list)
968*4882a593Smuzhiyun if (mad_send_wr->cur_seg->num == seg_num)
969*4882a593Smuzhiyun break;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun return mad_send_wr->cur_seg->data;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun EXPORT_SYMBOL(ib_get_rmpp_segment);
974*4882a593Smuzhiyun
ib_get_payload(struct ib_mad_send_wr_private * mad_send_wr)975*4882a593Smuzhiyun static inline void *ib_get_payload(struct ib_mad_send_wr_private *mad_send_wr)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun if (mad_send_wr->send_buf.seg_count)
978*4882a593Smuzhiyun return ib_get_rmpp_segment(&mad_send_wr->send_buf,
979*4882a593Smuzhiyun mad_send_wr->seg_num);
980*4882a593Smuzhiyun else
981*4882a593Smuzhiyun return mad_send_wr->send_buf.mad +
982*4882a593Smuzhiyun mad_send_wr->send_buf.hdr_len;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
ib_free_send_mad(struct ib_mad_send_buf * send_buf)985*4882a593Smuzhiyun void ib_free_send_mad(struct ib_mad_send_buf *send_buf)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
988*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun mad_agent_priv = container_of(send_buf->mad_agent,
991*4882a593Smuzhiyun struct ib_mad_agent_private, agent);
992*4882a593Smuzhiyun mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private,
993*4882a593Smuzhiyun send_buf);
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun free_send_rmpp_list(mad_send_wr);
996*4882a593Smuzhiyun kfree(send_buf->mad);
997*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun EXPORT_SYMBOL(ib_free_send_mad);
1000*4882a593Smuzhiyun
ib_send_mad(struct ib_mad_send_wr_private * mad_send_wr)1001*4882a593Smuzhiyun int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info;
1004*4882a593Smuzhiyun struct list_head *list;
1005*4882a593Smuzhiyun struct ib_mad_agent *mad_agent;
1006*4882a593Smuzhiyun struct ib_sge *sge;
1007*4882a593Smuzhiyun unsigned long flags;
1008*4882a593Smuzhiyun int ret;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun /* Set WR ID to find mad_send_wr upon completion */
1011*4882a593Smuzhiyun qp_info = mad_send_wr->mad_agent_priv->qp_info;
1012*4882a593Smuzhiyun mad_send_wr->mad_list.mad_queue = &qp_info->send_queue;
1013*4882a593Smuzhiyun mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
1014*4882a593Smuzhiyun mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun mad_agent = mad_send_wr->send_buf.mad_agent;
1017*4882a593Smuzhiyun sge = mad_send_wr->sg_list;
1018*4882a593Smuzhiyun sge[0].addr = ib_dma_map_single(mad_agent->device,
1019*4882a593Smuzhiyun mad_send_wr->send_buf.mad,
1020*4882a593Smuzhiyun sge[0].length,
1021*4882a593Smuzhiyun DMA_TO_DEVICE);
1022*4882a593Smuzhiyun if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[0].addr)))
1023*4882a593Smuzhiyun return -ENOMEM;
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun mad_send_wr->header_mapping = sge[0].addr;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun sge[1].addr = ib_dma_map_single(mad_agent->device,
1028*4882a593Smuzhiyun ib_get_payload(mad_send_wr),
1029*4882a593Smuzhiyun sge[1].length,
1030*4882a593Smuzhiyun DMA_TO_DEVICE);
1031*4882a593Smuzhiyun if (unlikely(ib_dma_mapping_error(mad_agent->device, sge[1].addr))) {
1032*4882a593Smuzhiyun ib_dma_unmap_single(mad_agent->device,
1033*4882a593Smuzhiyun mad_send_wr->header_mapping,
1034*4882a593Smuzhiyun sge[0].length, DMA_TO_DEVICE);
1035*4882a593Smuzhiyun return -ENOMEM;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun mad_send_wr->payload_mapping = sge[1].addr;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun spin_lock_irqsave(&qp_info->send_queue.lock, flags);
1040*4882a593Smuzhiyun if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
1041*4882a593Smuzhiyun trace_ib_mad_ib_send_mad(mad_send_wr, qp_info);
1042*4882a593Smuzhiyun ret = ib_post_send(mad_agent->qp, &mad_send_wr->send_wr.wr,
1043*4882a593Smuzhiyun NULL);
1044*4882a593Smuzhiyun list = &qp_info->send_queue.list;
1045*4882a593Smuzhiyun } else {
1046*4882a593Smuzhiyun ret = 0;
1047*4882a593Smuzhiyun list = &qp_info->overflow_list;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun if (!ret) {
1051*4882a593Smuzhiyun qp_info->send_queue.count++;
1052*4882a593Smuzhiyun list_add_tail(&mad_send_wr->mad_list.list, list);
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
1055*4882a593Smuzhiyun if (ret) {
1056*4882a593Smuzhiyun ib_dma_unmap_single(mad_agent->device,
1057*4882a593Smuzhiyun mad_send_wr->header_mapping,
1058*4882a593Smuzhiyun sge[0].length, DMA_TO_DEVICE);
1059*4882a593Smuzhiyun ib_dma_unmap_single(mad_agent->device,
1060*4882a593Smuzhiyun mad_send_wr->payload_mapping,
1061*4882a593Smuzhiyun sge[1].length, DMA_TO_DEVICE);
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun return ret;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun /*
1067*4882a593Smuzhiyun * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
1068*4882a593Smuzhiyun * with the registered client
1069*4882a593Smuzhiyun */
ib_post_send_mad(struct ib_mad_send_buf * send_buf,struct ib_mad_send_buf ** bad_send_buf)1070*4882a593Smuzhiyun int ib_post_send_mad(struct ib_mad_send_buf *send_buf,
1071*4882a593Smuzhiyun struct ib_mad_send_buf **bad_send_buf)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
1074*4882a593Smuzhiyun struct ib_mad_send_buf *next_send_buf;
1075*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
1076*4882a593Smuzhiyun unsigned long flags;
1077*4882a593Smuzhiyun int ret = -EINVAL;
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun /* Walk list of send WRs and post each on send list */
1080*4882a593Smuzhiyun for (; send_buf; send_buf = next_send_buf) {
1081*4882a593Smuzhiyun mad_send_wr = container_of(send_buf,
1082*4882a593Smuzhiyun struct ib_mad_send_wr_private,
1083*4882a593Smuzhiyun send_buf);
1084*4882a593Smuzhiyun mad_agent_priv = mad_send_wr->mad_agent_priv;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun ret = ib_mad_enforce_security(mad_agent_priv,
1087*4882a593Smuzhiyun mad_send_wr->send_wr.pkey_index);
1088*4882a593Smuzhiyun if (ret)
1089*4882a593Smuzhiyun goto error;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun if (!send_buf->mad_agent->send_handler ||
1092*4882a593Smuzhiyun (send_buf->timeout_ms &&
1093*4882a593Smuzhiyun !send_buf->mad_agent->recv_handler)) {
1094*4882a593Smuzhiyun ret = -EINVAL;
1095*4882a593Smuzhiyun goto error;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun if (!ib_is_mad_class_rmpp(((struct ib_mad_hdr *) send_buf->mad)->mgmt_class)) {
1099*4882a593Smuzhiyun if (mad_agent_priv->agent.rmpp_version) {
1100*4882a593Smuzhiyun ret = -EINVAL;
1101*4882a593Smuzhiyun goto error;
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /*
1106*4882a593Smuzhiyun * Save pointer to next work request to post in case the
1107*4882a593Smuzhiyun * current one completes, and the user modifies the work
1108*4882a593Smuzhiyun * request associated with the completion
1109*4882a593Smuzhiyun */
1110*4882a593Smuzhiyun next_send_buf = send_buf->next;
1111*4882a593Smuzhiyun mad_send_wr->send_wr.ah = send_buf->ah;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun if (((struct ib_mad_hdr *) send_buf->mad)->mgmt_class ==
1114*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
1115*4882a593Smuzhiyun ret = handle_outgoing_dr_smp(mad_agent_priv,
1116*4882a593Smuzhiyun mad_send_wr);
1117*4882a593Smuzhiyun if (ret < 0) /* error */
1118*4882a593Smuzhiyun goto error;
1119*4882a593Smuzhiyun else if (ret == 1) /* locally consumed */
1120*4882a593Smuzhiyun continue;
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun mad_send_wr->tid = ((struct ib_mad_hdr *) send_buf->mad)->tid;
1124*4882a593Smuzhiyun /* Timeout will be updated after send completes */
1125*4882a593Smuzhiyun mad_send_wr->timeout = msecs_to_jiffies(send_buf->timeout_ms);
1126*4882a593Smuzhiyun mad_send_wr->max_retries = send_buf->retries;
1127*4882a593Smuzhiyun mad_send_wr->retries_left = send_buf->retries;
1128*4882a593Smuzhiyun send_buf->retries = 0;
1129*4882a593Smuzhiyun /* Reference for work request to QP + response */
1130*4882a593Smuzhiyun mad_send_wr->refcount = 1 + (mad_send_wr->timeout > 0);
1131*4882a593Smuzhiyun mad_send_wr->status = IB_WC_SUCCESS;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun /* Reference MAD agent until send completes */
1134*4882a593Smuzhiyun refcount_inc(&mad_agent_priv->refcount);
1135*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
1136*4882a593Smuzhiyun list_add_tail(&mad_send_wr->agent_list,
1137*4882a593Smuzhiyun &mad_agent_priv->send_list);
1138*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1141*4882a593Smuzhiyun ret = ib_send_rmpp_mad(mad_send_wr);
1142*4882a593Smuzhiyun if (ret >= 0 && ret != IB_RMPP_RESULT_CONSUMED)
1143*4882a593Smuzhiyun ret = ib_send_mad(mad_send_wr);
1144*4882a593Smuzhiyun } else
1145*4882a593Smuzhiyun ret = ib_send_mad(mad_send_wr);
1146*4882a593Smuzhiyun if (ret < 0) {
1147*4882a593Smuzhiyun /* Fail send request */
1148*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
1149*4882a593Smuzhiyun list_del(&mad_send_wr->agent_list);
1150*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1151*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1152*4882a593Smuzhiyun goto error;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun return 0;
1156*4882a593Smuzhiyun error:
1157*4882a593Smuzhiyun if (bad_send_buf)
1158*4882a593Smuzhiyun *bad_send_buf = send_buf;
1159*4882a593Smuzhiyun return ret;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun EXPORT_SYMBOL(ib_post_send_mad);
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun /*
1164*4882a593Smuzhiyun * ib_free_recv_mad - Returns data buffers used to receive
1165*4882a593Smuzhiyun * a MAD to the access layer
1166*4882a593Smuzhiyun */
ib_free_recv_mad(struct ib_mad_recv_wc * mad_recv_wc)1167*4882a593Smuzhiyun void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun struct ib_mad_recv_buf *mad_recv_buf, *temp_recv_buf;
1170*4882a593Smuzhiyun struct ib_mad_private_header *mad_priv_hdr;
1171*4882a593Smuzhiyun struct ib_mad_private *priv;
1172*4882a593Smuzhiyun struct list_head free_list;
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun INIT_LIST_HEAD(&free_list);
1175*4882a593Smuzhiyun list_splice_init(&mad_recv_wc->rmpp_list, &free_list);
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun list_for_each_entry_safe(mad_recv_buf, temp_recv_buf,
1178*4882a593Smuzhiyun &free_list, list) {
1179*4882a593Smuzhiyun mad_recv_wc = container_of(mad_recv_buf, struct ib_mad_recv_wc,
1180*4882a593Smuzhiyun recv_buf);
1181*4882a593Smuzhiyun mad_priv_hdr = container_of(mad_recv_wc,
1182*4882a593Smuzhiyun struct ib_mad_private_header,
1183*4882a593Smuzhiyun recv_wc);
1184*4882a593Smuzhiyun priv = container_of(mad_priv_hdr, struct ib_mad_private,
1185*4882a593Smuzhiyun header);
1186*4882a593Smuzhiyun kfree(priv);
1187*4882a593Smuzhiyun }
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun EXPORT_SYMBOL(ib_free_recv_mad);
1190*4882a593Smuzhiyun
method_in_use(struct ib_mad_mgmt_method_table ** method,struct ib_mad_reg_req * mad_reg_req)1191*4882a593Smuzhiyun static int method_in_use(struct ib_mad_mgmt_method_table **method,
1192*4882a593Smuzhiyun struct ib_mad_reg_req *mad_reg_req)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun int i;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
1197*4882a593Smuzhiyun if ((*method)->agent[i]) {
1198*4882a593Smuzhiyun pr_err("Method %d already in use\n", i);
1199*4882a593Smuzhiyun return -EINVAL;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun return 0;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
allocate_method_table(struct ib_mad_mgmt_method_table ** method)1205*4882a593Smuzhiyun static int allocate_method_table(struct ib_mad_mgmt_method_table **method)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun /* Allocate management method table */
1208*4882a593Smuzhiyun *method = kzalloc(sizeof **method, GFP_ATOMIC);
1209*4882a593Smuzhiyun return (*method) ? 0 : (-ENOMEM);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun /*
1213*4882a593Smuzhiyun * Check to see if there are any methods still in use
1214*4882a593Smuzhiyun */
check_method_table(struct ib_mad_mgmt_method_table * method)1215*4882a593Smuzhiyun static int check_method_table(struct ib_mad_mgmt_method_table *method)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun int i;
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun for (i = 0; i < IB_MGMT_MAX_METHODS; i++)
1220*4882a593Smuzhiyun if (method->agent[i])
1221*4882a593Smuzhiyun return 1;
1222*4882a593Smuzhiyun return 0;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun /*
1226*4882a593Smuzhiyun * Check to see if there are any method tables for this class still in use
1227*4882a593Smuzhiyun */
check_class_table(struct ib_mad_mgmt_class_table * class)1228*4882a593Smuzhiyun static int check_class_table(struct ib_mad_mgmt_class_table *class)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun int i;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_CLASS; i++)
1233*4882a593Smuzhiyun if (class->method_table[i])
1234*4882a593Smuzhiyun return 1;
1235*4882a593Smuzhiyun return 0;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
check_vendor_class(struct ib_mad_mgmt_vendor_class * vendor_class)1238*4882a593Smuzhiyun static int check_vendor_class(struct ib_mad_mgmt_vendor_class *vendor_class)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun int i;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_OUI; i++)
1243*4882a593Smuzhiyun if (vendor_class->method_table[i])
1244*4882a593Smuzhiyun return 1;
1245*4882a593Smuzhiyun return 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun
find_vendor_oui(struct ib_mad_mgmt_vendor_class * vendor_class,const char * oui)1248*4882a593Smuzhiyun static int find_vendor_oui(struct ib_mad_mgmt_vendor_class *vendor_class,
1249*4882a593Smuzhiyun const char *oui)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun int i;
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_OUI; i++)
1254*4882a593Smuzhiyun /* Is there matching OUI for this vendor class ? */
1255*4882a593Smuzhiyun if (!memcmp(vendor_class->oui[i], oui, 3))
1256*4882a593Smuzhiyun return i;
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun return -1;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
check_vendor_table(struct ib_mad_mgmt_vendor_class_table * vendor)1261*4882a593Smuzhiyun static int check_vendor_table(struct ib_mad_mgmt_vendor_class_table *vendor)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun int i;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_VENDOR_RANGE2; i++)
1266*4882a593Smuzhiyun if (vendor->vendor_class[i])
1267*4882a593Smuzhiyun return 1;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun return 0;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
remove_methods_mad_agent(struct ib_mad_mgmt_method_table * method,struct ib_mad_agent_private * agent)1272*4882a593Smuzhiyun static void remove_methods_mad_agent(struct ib_mad_mgmt_method_table *method,
1273*4882a593Smuzhiyun struct ib_mad_agent_private *agent)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun int i;
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun /* Remove any methods for this mad agent */
1278*4882a593Smuzhiyun for (i = 0; i < IB_MGMT_MAX_METHODS; i++) {
1279*4882a593Smuzhiyun if (method->agent[i] == agent) {
1280*4882a593Smuzhiyun method->agent[i] = NULL;
1281*4882a593Smuzhiyun }
1282*4882a593Smuzhiyun }
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
add_nonoui_reg_req(struct ib_mad_reg_req * mad_reg_req,struct ib_mad_agent_private * agent_priv,u8 mgmt_class)1285*4882a593Smuzhiyun static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1286*4882a593Smuzhiyun struct ib_mad_agent_private *agent_priv,
1287*4882a593Smuzhiyun u8 mgmt_class)
1288*4882a593Smuzhiyun {
1289*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
1290*4882a593Smuzhiyun struct ib_mad_mgmt_class_table **class;
1291*4882a593Smuzhiyun struct ib_mad_mgmt_method_table **method;
1292*4882a593Smuzhiyun int i, ret;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun port_priv = agent_priv->qp_info->port_priv;
1295*4882a593Smuzhiyun class = &port_priv->version[mad_reg_req->mgmt_class_version].class;
1296*4882a593Smuzhiyun if (!*class) {
1297*4882a593Smuzhiyun /* Allocate management class table for "new" class version */
1298*4882a593Smuzhiyun *class = kzalloc(sizeof **class, GFP_ATOMIC);
1299*4882a593Smuzhiyun if (!*class) {
1300*4882a593Smuzhiyun ret = -ENOMEM;
1301*4882a593Smuzhiyun goto error1;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun /* Allocate method table for this management class */
1305*4882a593Smuzhiyun method = &(*class)->method_table[mgmt_class];
1306*4882a593Smuzhiyun if ((ret = allocate_method_table(method)))
1307*4882a593Smuzhiyun goto error2;
1308*4882a593Smuzhiyun } else {
1309*4882a593Smuzhiyun method = &(*class)->method_table[mgmt_class];
1310*4882a593Smuzhiyun if (!*method) {
1311*4882a593Smuzhiyun /* Allocate method table for this management class */
1312*4882a593Smuzhiyun if ((ret = allocate_method_table(method)))
1313*4882a593Smuzhiyun goto error1;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun /* Now, make sure methods are not already in use */
1318*4882a593Smuzhiyun if (method_in_use(method, mad_reg_req))
1319*4882a593Smuzhiyun goto error3;
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun /* Finally, add in methods being registered */
1322*4882a593Smuzhiyun for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1323*4882a593Smuzhiyun (*method)->agent[i] = agent_priv;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun return 0;
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun error3:
1328*4882a593Smuzhiyun /* Remove any methods for this mad agent */
1329*4882a593Smuzhiyun remove_methods_mad_agent(*method, agent_priv);
1330*4882a593Smuzhiyun /* Now, check to see if there are any methods in use */
1331*4882a593Smuzhiyun if (!check_method_table(*method)) {
1332*4882a593Smuzhiyun /* If not, release management method table */
1333*4882a593Smuzhiyun kfree(*method);
1334*4882a593Smuzhiyun *method = NULL;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun ret = -EINVAL;
1337*4882a593Smuzhiyun goto error1;
1338*4882a593Smuzhiyun error2:
1339*4882a593Smuzhiyun kfree(*class);
1340*4882a593Smuzhiyun *class = NULL;
1341*4882a593Smuzhiyun error1:
1342*4882a593Smuzhiyun return ret;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
add_oui_reg_req(struct ib_mad_reg_req * mad_reg_req,struct ib_mad_agent_private * agent_priv)1345*4882a593Smuzhiyun static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
1346*4882a593Smuzhiyun struct ib_mad_agent_private *agent_priv)
1347*4882a593Smuzhiyun {
1348*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
1349*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class_table **vendor_table;
1350*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class_table *vendor = NULL;
1351*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class *vendor_class = NULL;
1352*4882a593Smuzhiyun struct ib_mad_mgmt_method_table **method;
1353*4882a593Smuzhiyun int i, ret = -ENOMEM;
1354*4882a593Smuzhiyun u8 vclass;
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun /* "New" vendor (with OUI) class */
1357*4882a593Smuzhiyun vclass = vendor_class_index(mad_reg_req->mgmt_class);
1358*4882a593Smuzhiyun port_priv = agent_priv->qp_info->port_priv;
1359*4882a593Smuzhiyun vendor_table = &port_priv->version[
1360*4882a593Smuzhiyun mad_reg_req->mgmt_class_version].vendor;
1361*4882a593Smuzhiyun if (!*vendor_table) {
1362*4882a593Smuzhiyun /* Allocate mgmt vendor class table for "new" class version */
1363*4882a593Smuzhiyun vendor = kzalloc(sizeof *vendor, GFP_ATOMIC);
1364*4882a593Smuzhiyun if (!vendor)
1365*4882a593Smuzhiyun goto error1;
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun *vendor_table = vendor;
1368*4882a593Smuzhiyun }
1369*4882a593Smuzhiyun if (!(*vendor_table)->vendor_class[vclass]) {
1370*4882a593Smuzhiyun /* Allocate table for this management vendor class */
1371*4882a593Smuzhiyun vendor_class = kzalloc(sizeof *vendor_class, GFP_ATOMIC);
1372*4882a593Smuzhiyun if (!vendor_class)
1373*4882a593Smuzhiyun goto error2;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun (*vendor_table)->vendor_class[vclass] = vendor_class;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_OUI; i++) {
1378*4882a593Smuzhiyun /* Is there matching OUI for this vendor class ? */
1379*4882a593Smuzhiyun if (!memcmp((*vendor_table)->vendor_class[vclass]->oui[i],
1380*4882a593Smuzhiyun mad_reg_req->oui, 3)) {
1381*4882a593Smuzhiyun method = &(*vendor_table)->vendor_class[
1382*4882a593Smuzhiyun vclass]->method_table[i];
1383*4882a593Smuzhiyun if (!*method)
1384*4882a593Smuzhiyun goto error3;
1385*4882a593Smuzhiyun goto check_in_use;
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun for (i = 0; i < MAX_MGMT_OUI; i++) {
1389*4882a593Smuzhiyun /* OUI slot available ? */
1390*4882a593Smuzhiyun if (!is_vendor_oui((*vendor_table)->vendor_class[
1391*4882a593Smuzhiyun vclass]->oui[i])) {
1392*4882a593Smuzhiyun method = &(*vendor_table)->vendor_class[
1393*4882a593Smuzhiyun vclass]->method_table[i];
1394*4882a593Smuzhiyun /* Allocate method table for this OUI */
1395*4882a593Smuzhiyun if (!*method) {
1396*4882a593Smuzhiyun ret = allocate_method_table(method);
1397*4882a593Smuzhiyun if (ret)
1398*4882a593Smuzhiyun goto error3;
1399*4882a593Smuzhiyun }
1400*4882a593Smuzhiyun memcpy((*vendor_table)->vendor_class[vclass]->oui[i],
1401*4882a593Smuzhiyun mad_reg_req->oui, 3);
1402*4882a593Smuzhiyun goto check_in_use;
1403*4882a593Smuzhiyun }
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun dev_err(&agent_priv->agent.device->dev, "All OUI slots in use\n");
1406*4882a593Smuzhiyun goto error3;
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun check_in_use:
1409*4882a593Smuzhiyun /* Now, make sure methods are not already in use */
1410*4882a593Smuzhiyun if (method_in_use(method, mad_reg_req))
1411*4882a593Smuzhiyun goto error4;
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun /* Finally, add in methods being registered */
1414*4882a593Smuzhiyun for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
1415*4882a593Smuzhiyun (*method)->agent[i] = agent_priv;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun return 0;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun error4:
1420*4882a593Smuzhiyun /* Remove any methods for this mad agent */
1421*4882a593Smuzhiyun remove_methods_mad_agent(*method, agent_priv);
1422*4882a593Smuzhiyun /* Now, check to see if there are any methods in use */
1423*4882a593Smuzhiyun if (!check_method_table(*method)) {
1424*4882a593Smuzhiyun /* If not, release management method table */
1425*4882a593Smuzhiyun kfree(*method);
1426*4882a593Smuzhiyun *method = NULL;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun ret = -EINVAL;
1429*4882a593Smuzhiyun error3:
1430*4882a593Smuzhiyun if (vendor_class) {
1431*4882a593Smuzhiyun (*vendor_table)->vendor_class[vclass] = NULL;
1432*4882a593Smuzhiyun kfree(vendor_class);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun error2:
1435*4882a593Smuzhiyun if (vendor) {
1436*4882a593Smuzhiyun *vendor_table = NULL;
1437*4882a593Smuzhiyun kfree(vendor);
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun error1:
1440*4882a593Smuzhiyun return ret;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
remove_mad_reg_req(struct ib_mad_agent_private * agent_priv)1443*4882a593Smuzhiyun static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
1446*4882a593Smuzhiyun struct ib_mad_mgmt_class_table *class;
1447*4882a593Smuzhiyun struct ib_mad_mgmt_method_table *method;
1448*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class_table *vendor;
1449*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class *vendor_class;
1450*4882a593Smuzhiyun int index;
1451*4882a593Smuzhiyun u8 mgmt_class;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun /*
1454*4882a593Smuzhiyun * Was MAD registration request supplied
1455*4882a593Smuzhiyun * with original registration ?
1456*4882a593Smuzhiyun */
1457*4882a593Smuzhiyun if (!agent_priv->reg_req) {
1458*4882a593Smuzhiyun goto out;
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun
1461*4882a593Smuzhiyun port_priv = agent_priv->qp_info->port_priv;
1462*4882a593Smuzhiyun mgmt_class = convert_mgmt_class(agent_priv->reg_req->mgmt_class);
1463*4882a593Smuzhiyun class = port_priv->version[
1464*4882a593Smuzhiyun agent_priv->reg_req->mgmt_class_version].class;
1465*4882a593Smuzhiyun if (!class)
1466*4882a593Smuzhiyun goto vendor_check;
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun method = class->method_table[mgmt_class];
1469*4882a593Smuzhiyun if (method) {
1470*4882a593Smuzhiyun /* Remove any methods for this mad agent */
1471*4882a593Smuzhiyun remove_methods_mad_agent(method, agent_priv);
1472*4882a593Smuzhiyun /* Now, check to see if there are any methods still in use */
1473*4882a593Smuzhiyun if (!check_method_table(method)) {
1474*4882a593Smuzhiyun /* If not, release management method table */
1475*4882a593Smuzhiyun kfree(method);
1476*4882a593Smuzhiyun class->method_table[mgmt_class] = NULL;
1477*4882a593Smuzhiyun /* Any management classes left ? */
1478*4882a593Smuzhiyun if (!check_class_table(class)) {
1479*4882a593Smuzhiyun /* If not, release management class table */
1480*4882a593Smuzhiyun kfree(class);
1481*4882a593Smuzhiyun port_priv->version[
1482*4882a593Smuzhiyun agent_priv->reg_req->
1483*4882a593Smuzhiyun mgmt_class_version].class = NULL;
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun vendor_check:
1489*4882a593Smuzhiyun if (!is_vendor_class(mgmt_class))
1490*4882a593Smuzhiyun goto out;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun /* normalize mgmt_class to vendor range 2 */
1493*4882a593Smuzhiyun mgmt_class = vendor_class_index(agent_priv->reg_req->mgmt_class);
1494*4882a593Smuzhiyun vendor = port_priv->version[
1495*4882a593Smuzhiyun agent_priv->reg_req->mgmt_class_version].vendor;
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun if (!vendor)
1498*4882a593Smuzhiyun goto out;
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun vendor_class = vendor->vendor_class[mgmt_class];
1501*4882a593Smuzhiyun if (vendor_class) {
1502*4882a593Smuzhiyun index = find_vendor_oui(vendor_class, agent_priv->reg_req->oui);
1503*4882a593Smuzhiyun if (index < 0)
1504*4882a593Smuzhiyun goto out;
1505*4882a593Smuzhiyun method = vendor_class->method_table[index];
1506*4882a593Smuzhiyun if (method) {
1507*4882a593Smuzhiyun /* Remove any methods for this mad agent */
1508*4882a593Smuzhiyun remove_methods_mad_agent(method, agent_priv);
1509*4882a593Smuzhiyun /*
1510*4882a593Smuzhiyun * Now, check to see if there are
1511*4882a593Smuzhiyun * any methods still in use
1512*4882a593Smuzhiyun */
1513*4882a593Smuzhiyun if (!check_method_table(method)) {
1514*4882a593Smuzhiyun /* If not, release management method table */
1515*4882a593Smuzhiyun kfree(method);
1516*4882a593Smuzhiyun vendor_class->method_table[index] = NULL;
1517*4882a593Smuzhiyun memset(vendor_class->oui[index], 0, 3);
1518*4882a593Smuzhiyun /* Any OUIs left ? */
1519*4882a593Smuzhiyun if (!check_vendor_class(vendor_class)) {
1520*4882a593Smuzhiyun /* If not, release vendor class table */
1521*4882a593Smuzhiyun kfree(vendor_class);
1522*4882a593Smuzhiyun vendor->vendor_class[mgmt_class] = NULL;
1523*4882a593Smuzhiyun /* Any other vendor classes left ? */
1524*4882a593Smuzhiyun if (!check_vendor_table(vendor)) {
1525*4882a593Smuzhiyun kfree(vendor);
1526*4882a593Smuzhiyun port_priv->version[
1527*4882a593Smuzhiyun agent_priv->reg_req->
1528*4882a593Smuzhiyun mgmt_class_version].
1529*4882a593Smuzhiyun vendor = NULL;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun out:
1537*4882a593Smuzhiyun return;
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun static struct ib_mad_agent_private *
find_mad_agent(struct ib_mad_port_private * port_priv,const struct ib_mad_hdr * mad_hdr)1541*4882a593Smuzhiyun find_mad_agent(struct ib_mad_port_private *port_priv,
1542*4882a593Smuzhiyun const struct ib_mad_hdr *mad_hdr)
1543*4882a593Smuzhiyun {
1544*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent = NULL;
1545*4882a593Smuzhiyun unsigned long flags;
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun if (ib_response_mad(mad_hdr)) {
1548*4882a593Smuzhiyun u32 hi_tid;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun /*
1551*4882a593Smuzhiyun * Routing is based on high 32 bits of transaction ID
1552*4882a593Smuzhiyun * of MAD.
1553*4882a593Smuzhiyun */
1554*4882a593Smuzhiyun hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
1555*4882a593Smuzhiyun rcu_read_lock();
1556*4882a593Smuzhiyun mad_agent = xa_load(&ib_mad_clients, hi_tid);
1557*4882a593Smuzhiyun if (mad_agent && !refcount_inc_not_zero(&mad_agent->refcount))
1558*4882a593Smuzhiyun mad_agent = NULL;
1559*4882a593Smuzhiyun rcu_read_unlock();
1560*4882a593Smuzhiyun } else {
1561*4882a593Smuzhiyun struct ib_mad_mgmt_class_table *class;
1562*4882a593Smuzhiyun struct ib_mad_mgmt_method_table *method;
1563*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class_table *vendor;
1564*4882a593Smuzhiyun struct ib_mad_mgmt_vendor_class *vendor_class;
1565*4882a593Smuzhiyun const struct ib_vendor_mad *vendor_mad;
1566*4882a593Smuzhiyun int index;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun spin_lock_irqsave(&port_priv->reg_lock, flags);
1569*4882a593Smuzhiyun /*
1570*4882a593Smuzhiyun * Routing is based on version, class, and method
1571*4882a593Smuzhiyun * For "newer" vendor MADs, also based on OUI
1572*4882a593Smuzhiyun */
1573*4882a593Smuzhiyun if (mad_hdr->class_version >= MAX_MGMT_VERSION)
1574*4882a593Smuzhiyun goto out;
1575*4882a593Smuzhiyun if (!is_vendor_class(mad_hdr->mgmt_class)) {
1576*4882a593Smuzhiyun class = port_priv->version[
1577*4882a593Smuzhiyun mad_hdr->class_version].class;
1578*4882a593Smuzhiyun if (!class)
1579*4882a593Smuzhiyun goto out;
1580*4882a593Smuzhiyun if (convert_mgmt_class(mad_hdr->mgmt_class) >=
1581*4882a593Smuzhiyun ARRAY_SIZE(class->method_table))
1582*4882a593Smuzhiyun goto out;
1583*4882a593Smuzhiyun method = class->method_table[convert_mgmt_class(
1584*4882a593Smuzhiyun mad_hdr->mgmt_class)];
1585*4882a593Smuzhiyun if (method)
1586*4882a593Smuzhiyun mad_agent = method->agent[mad_hdr->method &
1587*4882a593Smuzhiyun ~IB_MGMT_METHOD_RESP];
1588*4882a593Smuzhiyun } else {
1589*4882a593Smuzhiyun vendor = port_priv->version[
1590*4882a593Smuzhiyun mad_hdr->class_version].vendor;
1591*4882a593Smuzhiyun if (!vendor)
1592*4882a593Smuzhiyun goto out;
1593*4882a593Smuzhiyun vendor_class = vendor->vendor_class[vendor_class_index(
1594*4882a593Smuzhiyun mad_hdr->mgmt_class)];
1595*4882a593Smuzhiyun if (!vendor_class)
1596*4882a593Smuzhiyun goto out;
1597*4882a593Smuzhiyun /* Find matching OUI */
1598*4882a593Smuzhiyun vendor_mad = (const struct ib_vendor_mad *)mad_hdr;
1599*4882a593Smuzhiyun index = find_vendor_oui(vendor_class, vendor_mad->oui);
1600*4882a593Smuzhiyun if (index == -1)
1601*4882a593Smuzhiyun goto out;
1602*4882a593Smuzhiyun method = vendor_class->method_table[index];
1603*4882a593Smuzhiyun if (method) {
1604*4882a593Smuzhiyun mad_agent = method->agent[mad_hdr->method &
1605*4882a593Smuzhiyun ~IB_MGMT_METHOD_RESP];
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun }
1608*4882a593Smuzhiyun if (mad_agent)
1609*4882a593Smuzhiyun refcount_inc(&mad_agent->refcount);
1610*4882a593Smuzhiyun out:
1611*4882a593Smuzhiyun spin_unlock_irqrestore(&port_priv->reg_lock, flags);
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun if (mad_agent && !mad_agent->agent.recv_handler) {
1615*4882a593Smuzhiyun dev_notice(&port_priv->device->dev,
1616*4882a593Smuzhiyun "No receive handler for client %p on port %d\n",
1617*4882a593Smuzhiyun &mad_agent->agent, port_priv->port_num);
1618*4882a593Smuzhiyun deref_mad_agent(mad_agent);
1619*4882a593Smuzhiyun mad_agent = NULL;
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun
1622*4882a593Smuzhiyun return mad_agent;
1623*4882a593Smuzhiyun }
1624*4882a593Smuzhiyun
validate_mad(const struct ib_mad_hdr * mad_hdr,const struct ib_mad_qp_info * qp_info,bool opa)1625*4882a593Smuzhiyun static int validate_mad(const struct ib_mad_hdr *mad_hdr,
1626*4882a593Smuzhiyun const struct ib_mad_qp_info *qp_info,
1627*4882a593Smuzhiyun bool opa)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun int valid = 0;
1630*4882a593Smuzhiyun u32 qp_num = qp_info->qp->qp_num;
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun /* Make sure MAD base version is understood */
1633*4882a593Smuzhiyun if (mad_hdr->base_version != IB_MGMT_BASE_VERSION &&
1634*4882a593Smuzhiyun (!opa || mad_hdr->base_version != OPA_MGMT_BASE_VERSION)) {
1635*4882a593Smuzhiyun pr_err("MAD received with unsupported base version %d %s\n",
1636*4882a593Smuzhiyun mad_hdr->base_version, opa ? "(opa)" : "");
1637*4882a593Smuzhiyun goto out;
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun /* Filter SMI packets sent to other than QP0 */
1641*4882a593Smuzhiyun if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
1642*4882a593Smuzhiyun (mad_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
1643*4882a593Smuzhiyun if (qp_num == 0)
1644*4882a593Smuzhiyun valid = 1;
1645*4882a593Smuzhiyun } else {
1646*4882a593Smuzhiyun /* CM attributes other than ClassPortInfo only use Send method */
1647*4882a593Smuzhiyun if ((mad_hdr->mgmt_class == IB_MGMT_CLASS_CM) &&
1648*4882a593Smuzhiyun (mad_hdr->attr_id != IB_MGMT_CLASSPORTINFO_ATTR_ID) &&
1649*4882a593Smuzhiyun (mad_hdr->method != IB_MGMT_METHOD_SEND))
1650*4882a593Smuzhiyun goto out;
1651*4882a593Smuzhiyun /* Filter GSI packets sent to QP0 */
1652*4882a593Smuzhiyun if (qp_num != 0)
1653*4882a593Smuzhiyun valid = 1;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun out:
1657*4882a593Smuzhiyun return valid;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun
is_rmpp_data_mad(const struct ib_mad_agent_private * mad_agent_priv,const struct ib_mad_hdr * mad_hdr)1660*4882a593Smuzhiyun static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv,
1661*4882a593Smuzhiyun const struct ib_mad_hdr *mad_hdr)
1662*4882a593Smuzhiyun {
1663*4882a593Smuzhiyun struct ib_rmpp_mad *rmpp_mad;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun rmpp_mad = (struct ib_rmpp_mad *)mad_hdr;
1666*4882a593Smuzhiyun return !mad_agent_priv->agent.rmpp_version ||
1667*4882a593Smuzhiyun !ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent) ||
1668*4882a593Smuzhiyun !(ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
1669*4882a593Smuzhiyun IB_MGMT_RMPP_FLAG_ACTIVE) ||
1670*4882a593Smuzhiyun (rmpp_mad->rmpp_hdr.rmpp_type == IB_MGMT_RMPP_TYPE_DATA);
1671*4882a593Smuzhiyun }
1672*4882a593Smuzhiyun
rcv_has_same_class(const struct ib_mad_send_wr_private * wr,const struct ib_mad_recv_wc * rwc)1673*4882a593Smuzhiyun static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr,
1674*4882a593Smuzhiyun const struct ib_mad_recv_wc *rwc)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun return ((struct ib_mad_hdr *)(wr->send_buf.mad))->mgmt_class ==
1677*4882a593Smuzhiyun rwc->recv_buf.mad->mad_hdr.mgmt_class;
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun
rcv_has_same_gid(const struct ib_mad_agent_private * mad_agent_priv,const struct ib_mad_send_wr_private * wr,const struct ib_mad_recv_wc * rwc)1680*4882a593Smuzhiyun static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv,
1681*4882a593Smuzhiyun const struct ib_mad_send_wr_private *wr,
1682*4882a593Smuzhiyun const struct ib_mad_recv_wc *rwc )
1683*4882a593Smuzhiyun {
1684*4882a593Smuzhiyun struct rdma_ah_attr attr;
1685*4882a593Smuzhiyun u8 send_resp, rcv_resp;
1686*4882a593Smuzhiyun union ib_gid sgid;
1687*4882a593Smuzhiyun struct ib_device *device = mad_agent_priv->agent.device;
1688*4882a593Smuzhiyun u8 port_num = mad_agent_priv->agent.port_num;
1689*4882a593Smuzhiyun u8 lmc;
1690*4882a593Smuzhiyun bool has_grh;
1691*4882a593Smuzhiyun
1692*4882a593Smuzhiyun send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad);
1693*4882a593Smuzhiyun rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr);
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun if (send_resp == rcv_resp)
1696*4882a593Smuzhiyun /* both requests, or both responses. GIDs different */
1697*4882a593Smuzhiyun return 0;
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun if (rdma_query_ah(wr->send_buf.ah, &attr))
1700*4882a593Smuzhiyun /* Assume not equal, to avoid false positives. */
1701*4882a593Smuzhiyun return 0;
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun has_grh = !!(rdma_ah_get_ah_flags(&attr) & IB_AH_GRH);
1704*4882a593Smuzhiyun if (has_grh != !!(rwc->wc->wc_flags & IB_WC_GRH))
1705*4882a593Smuzhiyun /* one has GID, other does not. Assume different */
1706*4882a593Smuzhiyun return 0;
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun if (!send_resp && rcv_resp) {
1709*4882a593Smuzhiyun /* is request/response. */
1710*4882a593Smuzhiyun if (!has_grh) {
1711*4882a593Smuzhiyun if (ib_get_cached_lmc(device, port_num, &lmc))
1712*4882a593Smuzhiyun return 0;
1713*4882a593Smuzhiyun return (!lmc || !((rdma_ah_get_path_bits(&attr) ^
1714*4882a593Smuzhiyun rwc->wc->dlid_path_bits) &
1715*4882a593Smuzhiyun ((1 << lmc) - 1)));
1716*4882a593Smuzhiyun } else {
1717*4882a593Smuzhiyun const struct ib_global_route *grh =
1718*4882a593Smuzhiyun rdma_ah_read_grh(&attr);
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun if (rdma_query_gid(device, port_num,
1721*4882a593Smuzhiyun grh->sgid_index, &sgid))
1722*4882a593Smuzhiyun return 0;
1723*4882a593Smuzhiyun return !memcmp(sgid.raw, rwc->recv_buf.grh->dgid.raw,
1724*4882a593Smuzhiyun 16);
1725*4882a593Smuzhiyun }
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun if (!has_grh)
1729*4882a593Smuzhiyun return rdma_ah_get_dlid(&attr) == rwc->wc->slid;
1730*4882a593Smuzhiyun else
1731*4882a593Smuzhiyun return !memcmp(rdma_ah_read_grh(&attr)->dgid.raw,
1732*4882a593Smuzhiyun rwc->recv_buf.grh->sgid.raw,
1733*4882a593Smuzhiyun 16);
1734*4882a593Smuzhiyun }
1735*4882a593Smuzhiyun
is_direct(u8 class)1736*4882a593Smuzhiyun static inline int is_direct(u8 class)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun return (class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE);
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun struct ib_mad_send_wr_private*
ib_find_send_mad(const struct ib_mad_agent_private * mad_agent_priv,const struct ib_mad_recv_wc * wc)1742*4882a593Smuzhiyun ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv,
1743*4882a593Smuzhiyun const struct ib_mad_recv_wc *wc)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun struct ib_mad_send_wr_private *wr;
1746*4882a593Smuzhiyun const struct ib_mad_hdr *mad_hdr;
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun mad_hdr = &wc->recv_buf.mad->mad_hdr;
1749*4882a593Smuzhiyun
1750*4882a593Smuzhiyun list_for_each_entry(wr, &mad_agent_priv->wait_list, agent_list) {
1751*4882a593Smuzhiyun if ((wr->tid == mad_hdr->tid) &&
1752*4882a593Smuzhiyun rcv_has_same_class(wr, wc) &&
1753*4882a593Smuzhiyun /*
1754*4882a593Smuzhiyun * Don't check GID for direct routed MADs.
1755*4882a593Smuzhiyun * These might have permissive LIDs.
1756*4882a593Smuzhiyun */
1757*4882a593Smuzhiyun (is_direct(mad_hdr->mgmt_class) ||
1758*4882a593Smuzhiyun rcv_has_same_gid(mad_agent_priv, wr, wc)))
1759*4882a593Smuzhiyun return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun
1762*4882a593Smuzhiyun /*
1763*4882a593Smuzhiyun * It's possible to receive the response before we've
1764*4882a593Smuzhiyun * been notified that the send has completed
1765*4882a593Smuzhiyun */
1766*4882a593Smuzhiyun list_for_each_entry(wr, &mad_agent_priv->send_list, agent_list) {
1767*4882a593Smuzhiyun if (is_rmpp_data_mad(mad_agent_priv, wr->send_buf.mad) &&
1768*4882a593Smuzhiyun wr->tid == mad_hdr->tid &&
1769*4882a593Smuzhiyun wr->timeout &&
1770*4882a593Smuzhiyun rcv_has_same_class(wr, wc) &&
1771*4882a593Smuzhiyun /*
1772*4882a593Smuzhiyun * Don't check GID for direct routed MADs.
1773*4882a593Smuzhiyun * These might have permissive LIDs.
1774*4882a593Smuzhiyun */
1775*4882a593Smuzhiyun (is_direct(mad_hdr->mgmt_class) ||
1776*4882a593Smuzhiyun rcv_has_same_gid(mad_agent_priv, wr, wc)))
1777*4882a593Smuzhiyun /* Verify request has not been canceled */
1778*4882a593Smuzhiyun return (wr->status == IB_WC_SUCCESS) ? wr : NULL;
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun return NULL;
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun
ib_mark_mad_done(struct ib_mad_send_wr_private * mad_send_wr)1783*4882a593Smuzhiyun void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr)
1784*4882a593Smuzhiyun {
1785*4882a593Smuzhiyun mad_send_wr->timeout = 0;
1786*4882a593Smuzhiyun if (mad_send_wr->refcount == 1)
1787*4882a593Smuzhiyun list_move_tail(&mad_send_wr->agent_list,
1788*4882a593Smuzhiyun &mad_send_wr->mad_agent_priv->done_list);
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
ib_mad_complete_recv(struct ib_mad_agent_private * mad_agent_priv,struct ib_mad_recv_wc * mad_recv_wc)1791*4882a593Smuzhiyun static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
1792*4882a593Smuzhiyun struct ib_mad_recv_wc *mad_recv_wc)
1793*4882a593Smuzhiyun {
1794*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
1795*4882a593Smuzhiyun struct ib_mad_send_wc mad_send_wc;
1796*4882a593Smuzhiyun unsigned long flags;
1797*4882a593Smuzhiyun int ret;
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
1800*4882a593Smuzhiyun ret = ib_mad_enforce_security(mad_agent_priv,
1801*4882a593Smuzhiyun mad_recv_wc->wc->pkey_index);
1802*4882a593Smuzhiyun if (ret) {
1803*4882a593Smuzhiyun ib_free_recv_mad(mad_recv_wc);
1804*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1805*4882a593Smuzhiyun return;
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun
1808*4882a593Smuzhiyun list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
1809*4882a593Smuzhiyun if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
1810*4882a593Smuzhiyun mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
1811*4882a593Smuzhiyun mad_recv_wc);
1812*4882a593Smuzhiyun if (!mad_recv_wc) {
1813*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1814*4882a593Smuzhiyun return;
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun }
1817*4882a593Smuzhiyun
1818*4882a593Smuzhiyun /* Complete corresponding request */
1819*4882a593Smuzhiyun if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) {
1820*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
1821*4882a593Smuzhiyun mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
1822*4882a593Smuzhiyun if (!mad_send_wr) {
1823*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1824*4882a593Smuzhiyun if (!ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)
1825*4882a593Smuzhiyun && ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
1826*4882a593Smuzhiyun && (ib_get_rmpp_flags(&((struct ib_rmpp_mad *)mad_recv_wc->recv_buf.mad)->rmpp_hdr)
1827*4882a593Smuzhiyun & IB_MGMT_RMPP_FLAG_ACTIVE)) {
1828*4882a593Smuzhiyun /* user rmpp is in effect
1829*4882a593Smuzhiyun * and this is an active RMPP MAD
1830*4882a593Smuzhiyun */
1831*4882a593Smuzhiyun mad_agent_priv->agent.recv_handler(
1832*4882a593Smuzhiyun &mad_agent_priv->agent, NULL,
1833*4882a593Smuzhiyun mad_recv_wc);
1834*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1835*4882a593Smuzhiyun } else {
1836*4882a593Smuzhiyun /* not user rmpp, revert to normal behavior and
1837*4882a593Smuzhiyun * drop the mad */
1838*4882a593Smuzhiyun ib_free_recv_mad(mad_recv_wc);
1839*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1840*4882a593Smuzhiyun return;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun } else {
1843*4882a593Smuzhiyun ib_mark_mad_done(mad_send_wr);
1844*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
1845*4882a593Smuzhiyun
1846*4882a593Smuzhiyun /* Defined behavior is to complete response before request */
1847*4882a593Smuzhiyun mad_agent_priv->agent.recv_handler(
1848*4882a593Smuzhiyun &mad_agent_priv->agent,
1849*4882a593Smuzhiyun &mad_send_wr->send_buf,
1850*4882a593Smuzhiyun mad_recv_wc);
1851*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun mad_send_wc.status = IB_WC_SUCCESS;
1854*4882a593Smuzhiyun mad_send_wc.vendor_err = 0;
1855*4882a593Smuzhiyun mad_send_wc.send_buf = &mad_send_wr->send_buf;
1856*4882a593Smuzhiyun ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun } else {
1859*4882a593Smuzhiyun mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, NULL,
1860*4882a593Smuzhiyun mad_recv_wc);
1861*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun return;
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun
handle_ib_smi(const struct ib_mad_port_private * port_priv,const struct ib_mad_qp_info * qp_info,const struct ib_wc * wc,int port_num,struct ib_mad_private * recv,struct ib_mad_private * response)1867*4882a593Smuzhiyun static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv,
1868*4882a593Smuzhiyun const struct ib_mad_qp_info *qp_info,
1869*4882a593Smuzhiyun const struct ib_wc *wc,
1870*4882a593Smuzhiyun int port_num,
1871*4882a593Smuzhiyun struct ib_mad_private *recv,
1872*4882a593Smuzhiyun struct ib_mad_private *response)
1873*4882a593Smuzhiyun {
1874*4882a593Smuzhiyun enum smi_forward_action retsmi;
1875*4882a593Smuzhiyun struct ib_smp *smp = (struct ib_smp *)recv->mad;
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun trace_ib_mad_handle_ib_smi(smp);
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun if (smi_handle_dr_smp_recv(smp,
1880*4882a593Smuzhiyun rdma_cap_ib_switch(port_priv->device),
1881*4882a593Smuzhiyun port_num,
1882*4882a593Smuzhiyun port_priv->device->phys_port_cnt) ==
1883*4882a593Smuzhiyun IB_SMI_DISCARD)
1884*4882a593Smuzhiyun return IB_SMI_DISCARD;
1885*4882a593Smuzhiyun
1886*4882a593Smuzhiyun retsmi = smi_check_forward_dr_smp(smp);
1887*4882a593Smuzhiyun if (retsmi == IB_SMI_LOCAL)
1888*4882a593Smuzhiyun return IB_SMI_HANDLE;
1889*4882a593Smuzhiyun
1890*4882a593Smuzhiyun if (retsmi == IB_SMI_SEND) { /* don't forward */
1891*4882a593Smuzhiyun if (smi_handle_dr_smp_send(smp,
1892*4882a593Smuzhiyun rdma_cap_ib_switch(port_priv->device),
1893*4882a593Smuzhiyun port_num) == IB_SMI_DISCARD)
1894*4882a593Smuzhiyun return IB_SMI_DISCARD;
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun if (smi_check_local_smp(smp, port_priv->device) == IB_SMI_DISCARD)
1897*4882a593Smuzhiyun return IB_SMI_DISCARD;
1898*4882a593Smuzhiyun } else if (rdma_cap_ib_switch(port_priv->device)) {
1899*4882a593Smuzhiyun /* forward case for switches */
1900*4882a593Smuzhiyun memcpy(response, recv, mad_priv_size(response));
1901*4882a593Smuzhiyun response->header.recv_wc.wc = &response->header.wc;
1902*4882a593Smuzhiyun response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1903*4882a593Smuzhiyun response->header.recv_wc.recv_buf.grh = &response->grh;
1904*4882a593Smuzhiyun
1905*4882a593Smuzhiyun agent_send_response((const struct ib_mad_hdr *)response->mad,
1906*4882a593Smuzhiyun &response->grh, wc,
1907*4882a593Smuzhiyun port_priv->device,
1908*4882a593Smuzhiyun smi_get_fwd_port(smp),
1909*4882a593Smuzhiyun qp_info->qp->qp_num,
1910*4882a593Smuzhiyun response->mad_size,
1911*4882a593Smuzhiyun false);
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun return IB_SMI_DISCARD;
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun return IB_SMI_HANDLE;
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun
generate_unmatched_resp(const struct ib_mad_private * recv,struct ib_mad_private * response,size_t * resp_len,bool opa)1918*4882a593Smuzhiyun static bool generate_unmatched_resp(const struct ib_mad_private *recv,
1919*4882a593Smuzhiyun struct ib_mad_private *response,
1920*4882a593Smuzhiyun size_t *resp_len, bool opa)
1921*4882a593Smuzhiyun {
1922*4882a593Smuzhiyun const struct ib_mad_hdr *recv_hdr = (const struct ib_mad_hdr *)recv->mad;
1923*4882a593Smuzhiyun struct ib_mad_hdr *resp_hdr = (struct ib_mad_hdr *)response->mad;
1924*4882a593Smuzhiyun
1925*4882a593Smuzhiyun if (recv_hdr->method == IB_MGMT_METHOD_GET ||
1926*4882a593Smuzhiyun recv_hdr->method == IB_MGMT_METHOD_SET) {
1927*4882a593Smuzhiyun memcpy(response, recv, mad_priv_size(response));
1928*4882a593Smuzhiyun response->header.recv_wc.wc = &response->header.wc;
1929*4882a593Smuzhiyun response->header.recv_wc.recv_buf.mad = (struct ib_mad *)response->mad;
1930*4882a593Smuzhiyun response->header.recv_wc.recv_buf.grh = &response->grh;
1931*4882a593Smuzhiyun resp_hdr->method = IB_MGMT_METHOD_GET_RESP;
1932*4882a593Smuzhiyun resp_hdr->status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB);
1933*4882a593Smuzhiyun if (recv_hdr->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1934*4882a593Smuzhiyun resp_hdr->status |= IB_SMP_DIRECTION;
1935*4882a593Smuzhiyun
1936*4882a593Smuzhiyun if (opa && recv_hdr->base_version == OPA_MGMT_BASE_VERSION) {
1937*4882a593Smuzhiyun if (recv_hdr->mgmt_class ==
1938*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_LID_ROUTED ||
1939*4882a593Smuzhiyun recv_hdr->mgmt_class ==
1940*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
1941*4882a593Smuzhiyun *resp_len = opa_get_smp_header_size(
1942*4882a593Smuzhiyun (struct opa_smp *)recv->mad);
1943*4882a593Smuzhiyun else
1944*4882a593Smuzhiyun *resp_len = sizeof(struct ib_mad_hdr);
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun return true;
1948*4882a593Smuzhiyun } else {
1949*4882a593Smuzhiyun return false;
1950*4882a593Smuzhiyun }
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun static enum smi_action
handle_opa_smi(struct ib_mad_port_private * port_priv,struct ib_mad_qp_info * qp_info,struct ib_wc * wc,int port_num,struct ib_mad_private * recv,struct ib_mad_private * response)1954*4882a593Smuzhiyun handle_opa_smi(struct ib_mad_port_private *port_priv,
1955*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info,
1956*4882a593Smuzhiyun struct ib_wc *wc,
1957*4882a593Smuzhiyun int port_num,
1958*4882a593Smuzhiyun struct ib_mad_private *recv,
1959*4882a593Smuzhiyun struct ib_mad_private *response)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun enum smi_forward_action retsmi;
1962*4882a593Smuzhiyun struct opa_smp *smp = (struct opa_smp *)recv->mad;
1963*4882a593Smuzhiyun
1964*4882a593Smuzhiyun trace_ib_mad_handle_opa_smi(smp);
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun if (opa_smi_handle_dr_smp_recv(smp,
1967*4882a593Smuzhiyun rdma_cap_ib_switch(port_priv->device),
1968*4882a593Smuzhiyun port_num,
1969*4882a593Smuzhiyun port_priv->device->phys_port_cnt) ==
1970*4882a593Smuzhiyun IB_SMI_DISCARD)
1971*4882a593Smuzhiyun return IB_SMI_DISCARD;
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun retsmi = opa_smi_check_forward_dr_smp(smp);
1974*4882a593Smuzhiyun if (retsmi == IB_SMI_LOCAL)
1975*4882a593Smuzhiyun return IB_SMI_HANDLE;
1976*4882a593Smuzhiyun
1977*4882a593Smuzhiyun if (retsmi == IB_SMI_SEND) { /* don't forward */
1978*4882a593Smuzhiyun if (opa_smi_handle_dr_smp_send(smp,
1979*4882a593Smuzhiyun rdma_cap_ib_switch(port_priv->device),
1980*4882a593Smuzhiyun port_num) == IB_SMI_DISCARD)
1981*4882a593Smuzhiyun return IB_SMI_DISCARD;
1982*4882a593Smuzhiyun
1983*4882a593Smuzhiyun if (opa_smi_check_local_smp(smp, port_priv->device) ==
1984*4882a593Smuzhiyun IB_SMI_DISCARD)
1985*4882a593Smuzhiyun return IB_SMI_DISCARD;
1986*4882a593Smuzhiyun
1987*4882a593Smuzhiyun } else if (rdma_cap_ib_switch(port_priv->device)) {
1988*4882a593Smuzhiyun /* forward case for switches */
1989*4882a593Smuzhiyun memcpy(response, recv, mad_priv_size(response));
1990*4882a593Smuzhiyun response->header.recv_wc.wc = &response->header.wc;
1991*4882a593Smuzhiyun response->header.recv_wc.recv_buf.opa_mad =
1992*4882a593Smuzhiyun (struct opa_mad *)response->mad;
1993*4882a593Smuzhiyun response->header.recv_wc.recv_buf.grh = &response->grh;
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun agent_send_response((const struct ib_mad_hdr *)response->mad,
1996*4882a593Smuzhiyun &response->grh, wc,
1997*4882a593Smuzhiyun port_priv->device,
1998*4882a593Smuzhiyun opa_smi_get_fwd_port(smp),
1999*4882a593Smuzhiyun qp_info->qp->qp_num,
2000*4882a593Smuzhiyun recv->header.wc.byte_len,
2001*4882a593Smuzhiyun true);
2002*4882a593Smuzhiyun
2003*4882a593Smuzhiyun return IB_SMI_DISCARD;
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun
2006*4882a593Smuzhiyun return IB_SMI_HANDLE;
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun
2009*4882a593Smuzhiyun static enum smi_action
handle_smi(struct ib_mad_port_private * port_priv,struct ib_mad_qp_info * qp_info,struct ib_wc * wc,int port_num,struct ib_mad_private * recv,struct ib_mad_private * response,bool opa)2010*4882a593Smuzhiyun handle_smi(struct ib_mad_port_private *port_priv,
2011*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info,
2012*4882a593Smuzhiyun struct ib_wc *wc,
2013*4882a593Smuzhiyun int port_num,
2014*4882a593Smuzhiyun struct ib_mad_private *recv,
2015*4882a593Smuzhiyun struct ib_mad_private *response,
2016*4882a593Smuzhiyun bool opa)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun struct ib_mad_hdr *mad_hdr = (struct ib_mad_hdr *)recv->mad;
2019*4882a593Smuzhiyun
2020*4882a593Smuzhiyun if (opa && mad_hdr->base_version == OPA_MGMT_BASE_VERSION &&
2021*4882a593Smuzhiyun mad_hdr->class_version == OPA_SM_CLASS_VERSION)
2022*4882a593Smuzhiyun return handle_opa_smi(port_priv, qp_info, wc, port_num, recv,
2023*4882a593Smuzhiyun response);
2024*4882a593Smuzhiyun
2025*4882a593Smuzhiyun return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
2026*4882a593Smuzhiyun }
2027*4882a593Smuzhiyun
ib_mad_recv_done(struct ib_cq * cq,struct ib_wc * wc)2028*4882a593Smuzhiyun static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2029*4882a593Smuzhiyun {
2030*4882a593Smuzhiyun struct ib_mad_port_private *port_priv = cq->cq_context;
2031*4882a593Smuzhiyun struct ib_mad_list_head *mad_list =
2032*4882a593Smuzhiyun container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2033*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info;
2034*4882a593Smuzhiyun struct ib_mad_private_header *mad_priv_hdr;
2035*4882a593Smuzhiyun struct ib_mad_private *recv, *response = NULL;
2036*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent;
2037*4882a593Smuzhiyun int port_num;
2038*4882a593Smuzhiyun int ret = IB_MAD_RESULT_SUCCESS;
2039*4882a593Smuzhiyun size_t mad_size;
2040*4882a593Smuzhiyun u16 resp_mad_pkey_index = 0;
2041*4882a593Smuzhiyun bool opa;
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun if (list_empty_careful(&port_priv->port_list))
2044*4882a593Smuzhiyun return;
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun if (wc->status != IB_WC_SUCCESS) {
2047*4882a593Smuzhiyun /*
2048*4882a593Smuzhiyun * Receive errors indicate that the QP has entered the error
2049*4882a593Smuzhiyun * state - error handling/shutdown code will cleanup
2050*4882a593Smuzhiyun */
2051*4882a593Smuzhiyun return;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun qp_info = mad_list->mad_queue->qp_info;
2055*4882a593Smuzhiyun dequeue_mad(mad_list);
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun opa = rdma_cap_opa_mad(qp_info->port_priv->device,
2058*4882a593Smuzhiyun qp_info->port_priv->port_num);
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
2061*4882a593Smuzhiyun mad_list);
2062*4882a593Smuzhiyun recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
2063*4882a593Smuzhiyun ib_dma_unmap_single(port_priv->device,
2064*4882a593Smuzhiyun recv->header.mapping,
2065*4882a593Smuzhiyun mad_priv_dma_size(recv),
2066*4882a593Smuzhiyun DMA_FROM_DEVICE);
2067*4882a593Smuzhiyun
2068*4882a593Smuzhiyun /* Setup MAD receive work completion from "normal" work completion */
2069*4882a593Smuzhiyun recv->header.wc = *wc;
2070*4882a593Smuzhiyun recv->header.recv_wc.wc = &recv->header.wc;
2071*4882a593Smuzhiyun
2072*4882a593Smuzhiyun if (opa && ((struct ib_mad_hdr *)(recv->mad))->base_version == OPA_MGMT_BASE_VERSION) {
2073*4882a593Smuzhiyun recv->header.recv_wc.mad_len = wc->byte_len - sizeof(struct ib_grh);
2074*4882a593Smuzhiyun recv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2075*4882a593Smuzhiyun } else {
2076*4882a593Smuzhiyun recv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2077*4882a593Smuzhiyun recv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2078*4882a593Smuzhiyun }
2079*4882a593Smuzhiyun
2080*4882a593Smuzhiyun recv->header.recv_wc.recv_buf.mad = (struct ib_mad *)recv->mad;
2081*4882a593Smuzhiyun recv->header.recv_wc.recv_buf.grh = &recv->grh;
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun /* Validate MAD */
2084*4882a593Smuzhiyun if (!validate_mad((const struct ib_mad_hdr *)recv->mad, qp_info, opa))
2085*4882a593Smuzhiyun goto out;
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun trace_ib_mad_recv_done_handler(qp_info, wc,
2088*4882a593Smuzhiyun (struct ib_mad_hdr *)recv->mad);
2089*4882a593Smuzhiyun
2090*4882a593Smuzhiyun mad_size = recv->mad_size;
2091*4882a593Smuzhiyun response = alloc_mad_private(mad_size, GFP_KERNEL);
2092*4882a593Smuzhiyun if (!response)
2093*4882a593Smuzhiyun goto out;
2094*4882a593Smuzhiyun
2095*4882a593Smuzhiyun if (rdma_cap_ib_switch(port_priv->device))
2096*4882a593Smuzhiyun port_num = wc->port_num;
2097*4882a593Smuzhiyun else
2098*4882a593Smuzhiyun port_num = port_priv->port_num;
2099*4882a593Smuzhiyun
2100*4882a593Smuzhiyun if (((struct ib_mad_hdr *)recv->mad)->mgmt_class ==
2101*4882a593Smuzhiyun IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
2102*4882a593Smuzhiyun if (handle_smi(port_priv, qp_info, wc, port_num, recv,
2103*4882a593Smuzhiyun response, opa)
2104*4882a593Smuzhiyun == IB_SMI_DISCARD)
2105*4882a593Smuzhiyun goto out;
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun /* Give driver "right of first refusal" on incoming MAD */
2109*4882a593Smuzhiyun if (port_priv->device->ops.process_mad) {
2110*4882a593Smuzhiyun ret = port_priv->device->ops.process_mad(
2111*4882a593Smuzhiyun port_priv->device, 0, port_priv->port_num, wc,
2112*4882a593Smuzhiyun &recv->grh, (const struct ib_mad *)recv->mad,
2113*4882a593Smuzhiyun (struct ib_mad *)response->mad, &mad_size,
2114*4882a593Smuzhiyun &resp_mad_pkey_index);
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun if (opa)
2117*4882a593Smuzhiyun wc->pkey_index = resp_mad_pkey_index;
2118*4882a593Smuzhiyun
2119*4882a593Smuzhiyun if (ret & IB_MAD_RESULT_SUCCESS) {
2120*4882a593Smuzhiyun if (ret & IB_MAD_RESULT_CONSUMED)
2121*4882a593Smuzhiyun goto out;
2122*4882a593Smuzhiyun if (ret & IB_MAD_RESULT_REPLY) {
2123*4882a593Smuzhiyun agent_send_response((const struct ib_mad_hdr *)response->mad,
2124*4882a593Smuzhiyun &recv->grh, wc,
2125*4882a593Smuzhiyun port_priv->device,
2126*4882a593Smuzhiyun port_num,
2127*4882a593Smuzhiyun qp_info->qp->qp_num,
2128*4882a593Smuzhiyun mad_size, opa);
2129*4882a593Smuzhiyun goto out;
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun }
2132*4882a593Smuzhiyun }
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun mad_agent = find_mad_agent(port_priv, (const struct ib_mad_hdr *)recv->mad);
2135*4882a593Smuzhiyun if (mad_agent) {
2136*4882a593Smuzhiyun trace_ib_mad_recv_done_agent(mad_agent);
2137*4882a593Smuzhiyun ib_mad_complete_recv(mad_agent, &recv->header.recv_wc);
2138*4882a593Smuzhiyun /*
2139*4882a593Smuzhiyun * recv is freed up in error cases in ib_mad_complete_recv
2140*4882a593Smuzhiyun * or via recv_handler in ib_mad_complete_recv()
2141*4882a593Smuzhiyun */
2142*4882a593Smuzhiyun recv = NULL;
2143*4882a593Smuzhiyun } else if ((ret & IB_MAD_RESULT_SUCCESS) &&
2144*4882a593Smuzhiyun generate_unmatched_resp(recv, response, &mad_size, opa)) {
2145*4882a593Smuzhiyun agent_send_response((const struct ib_mad_hdr *)response->mad, &recv->grh, wc,
2146*4882a593Smuzhiyun port_priv->device, port_num,
2147*4882a593Smuzhiyun qp_info->qp->qp_num, mad_size, opa);
2148*4882a593Smuzhiyun }
2149*4882a593Smuzhiyun
2150*4882a593Smuzhiyun out:
2151*4882a593Smuzhiyun /* Post another receive request for this QP */
2152*4882a593Smuzhiyun if (response) {
2153*4882a593Smuzhiyun ib_mad_post_receive_mads(qp_info, response);
2154*4882a593Smuzhiyun kfree(recv);
2155*4882a593Smuzhiyun } else
2156*4882a593Smuzhiyun ib_mad_post_receive_mads(qp_info, recv);
2157*4882a593Smuzhiyun }
2158*4882a593Smuzhiyun
adjust_timeout(struct ib_mad_agent_private * mad_agent_priv)2159*4882a593Smuzhiyun static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
2160*4882a593Smuzhiyun {
2161*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
2162*4882a593Smuzhiyun unsigned long delay;
2163*4882a593Smuzhiyun
2164*4882a593Smuzhiyun if (list_empty(&mad_agent_priv->wait_list)) {
2165*4882a593Smuzhiyun cancel_delayed_work(&mad_agent_priv->timed_work);
2166*4882a593Smuzhiyun } else {
2167*4882a593Smuzhiyun mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2168*4882a593Smuzhiyun struct ib_mad_send_wr_private,
2169*4882a593Smuzhiyun agent_list);
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun if (time_after(mad_agent_priv->timeout,
2172*4882a593Smuzhiyun mad_send_wr->timeout)) {
2173*4882a593Smuzhiyun mad_agent_priv->timeout = mad_send_wr->timeout;
2174*4882a593Smuzhiyun delay = mad_send_wr->timeout - jiffies;
2175*4882a593Smuzhiyun if ((long)delay <= 0)
2176*4882a593Smuzhiyun delay = 1;
2177*4882a593Smuzhiyun mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2178*4882a593Smuzhiyun &mad_agent_priv->timed_work, delay);
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun }
2181*4882a593Smuzhiyun }
2182*4882a593Smuzhiyun
wait_for_response(struct ib_mad_send_wr_private * mad_send_wr)2183*4882a593Smuzhiyun static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
2186*4882a593Smuzhiyun struct ib_mad_send_wr_private *temp_mad_send_wr;
2187*4882a593Smuzhiyun struct list_head *list_item;
2188*4882a593Smuzhiyun unsigned long delay;
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun mad_agent_priv = mad_send_wr->mad_agent_priv;
2191*4882a593Smuzhiyun list_del(&mad_send_wr->agent_list);
2192*4882a593Smuzhiyun
2193*4882a593Smuzhiyun delay = mad_send_wr->timeout;
2194*4882a593Smuzhiyun mad_send_wr->timeout += jiffies;
2195*4882a593Smuzhiyun
2196*4882a593Smuzhiyun if (delay) {
2197*4882a593Smuzhiyun list_for_each_prev(list_item, &mad_agent_priv->wait_list) {
2198*4882a593Smuzhiyun temp_mad_send_wr = list_entry(list_item,
2199*4882a593Smuzhiyun struct ib_mad_send_wr_private,
2200*4882a593Smuzhiyun agent_list);
2201*4882a593Smuzhiyun if (time_after(mad_send_wr->timeout,
2202*4882a593Smuzhiyun temp_mad_send_wr->timeout))
2203*4882a593Smuzhiyun break;
2204*4882a593Smuzhiyun }
2205*4882a593Smuzhiyun }
2206*4882a593Smuzhiyun else
2207*4882a593Smuzhiyun list_item = &mad_agent_priv->wait_list;
2208*4882a593Smuzhiyun list_add(&mad_send_wr->agent_list, list_item);
2209*4882a593Smuzhiyun
2210*4882a593Smuzhiyun /* Reschedule a work item if we have a shorter timeout */
2211*4882a593Smuzhiyun if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list)
2212*4882a593Smuzhiyun mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
2213*4882a593Smuzhiyun &mad_agent_priv->timed_work, delay);
2214*4882a593Smuzhiyun }
2215*4882a593Smuzhiyun
ib_reset_mad_timeout(struct ib_mad_send_wr_private * mad_send_wr,unsigned long timeout_ms)2216*4882a593Smuzhiyun void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
2217*4882a593Smuzhiyun unsigned long timeout_ms)
2218*4882a593Smuzhiyun {
2219*4882a593Smuzhiyun mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2220*4882a593Smuzhiyun wait_for_response(mad_send_wr);
2221*4882a593Smuzhiyun }
2222*4882a593Smuzhiyun
2223*4882a593Smuzhiyun /*
2224*4882a593Smuzhiyun * Process a send work completion
2225*4882a593Smuzhiyun */
ib_mad_complete_send_wr(struct ib_mad_send_wr_private * mad_send_wr,struct ib_mad_send_wc * mad_send_wc)2226*4882a593Smuzhiyun void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
2227*4882a593Smuzhiyun struct ib_mad_send_wc *mad_send_wc)
2228*4882a593Smuzhiyun {
2229*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
2230*4882a593Smuzhiyun unsigned long flags;
2231*4882a593Smuzhiyun int ret;
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun mad_agent_priv = mad_send_wr->mad_agent_priv;
2234*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2235*4882a593Smuzhiyun if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
2236*4882a593Smuzhiyun ret = ib_process_rmpp_send_wc(mad_send_wr, mad_send_wc);
2237*4882a593Smuzhiyun if (ret == IB_RMPP_RESULT_CONSUMED)
2238*4882a593Smuzhiyun goto done;
2239*4882a593Smuzhiyun } else
2240*4882a593Smuzhiyun ret = IB_RMPP_RESULT_UNHANDLED;
2241*4882a593Smuzhiyun
2242*4882a593Smuzhiyun if (mad_send_wc->status != IB_WC_SUCCESS &&
2243*4882a593Smuzhiyun mad_send_wr->status == IB_WC_SUCCESS) {
2244*4882a593Smuzhiyun mad_send_wr->status = mad_send_wc->status;
2245*4882a593Smuzhiyun mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2246*4882a593Smuzhiyun }
2247*4882a593Smuzhiyun
2248*4882a593Smuzhiyun if (--mad_send_wr->refcount > 0) {
2249*4882a593Smuzhiyun if (mad_send_wr->refcount == 1 && mad_send_wr->timeout &&
2250*4882a593Smuzhiyun mad_send_wr->status == IB_WC_SUCCESS) {
2251*4882a593Smuzhiyun wait_for_response(mad_send_wr);
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun goto done;
2254*4882a593Smuzhiyun }
2255*4882a593Smuzhiyun
2256*4882a593Smuzhiyun /* Remove send from MAD agent and notify client of completion */
2257*4882a593Smuzhiyun list_del(&mad_send_wr->agent_list);
2258*4882a593Smuzhiyun adjust_timeout(mad_agent_priv);
2259*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun if (mad_send_wr->status != IB_WC_SUCCESS )
2262*4882a593Smuzhiyun mad_send_wc->status = mad_send_wr->status;
2263*4882a593Smuzhiyun if (ret == IB_RMPP_RESULT_INTERNAL)
2264*4882a593Smuzhiyun ib_rmpp_send_handler(mad_send_wc);
2265*4882a593Smuzhiyun else
2266*4882a593Smuzhiyun mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2267*4882a593Smuzhiyun mad_send_wc);
2268*4882a593Smuzhiyun
2269*4882a593Smuzhiyun /* Release reference on agent taken when sending */
2270*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
2271*4882a593Smuzhiyun return;
2272*4882a593Smuzhiyun done:
2273*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun
ib_mad_send_done(struct ib_cq * cq,struct ib_wc * wc)2276*4882a593Smuzhiyun static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun struct ib_mad_port_private *port_priv = cq->cq_context;
2279*4882a593Smuzhiyun struct ib_mad_list_head *mad_list =
2280*4882a593Smuzhiyun container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2281*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr, *queued_send_wr;
2282*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info;
2283*4882a593Smuzhiyun struct ib_mad_queue *send_queue;
2284*4882a593Smuzhiyun struct ib_mad_send_wc mad_send_wc;
2285*4882a593Smuzhiyun unsigned long flags;
2286*4882a593Smuzhiyun int ret;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun if (list_empty_careful(&port_priv->port_list))
2289*4882a593Smuzhiyun return;
2290*4882a593Smuzhiyun
2291*4882a593Smuzhiyun if (wc->status != IB_WC_SUCCESS) {
2292*4882a593Smuzhiyun if (!ib_mad_send_error(port_priv, wc))
2293*4882a593Smuzhiyun return;
2294*4882a593Smuzhiyun }
2295*4882a593Smuzhiyun
2296*4882a593Smuzhiyun mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2297*4882a593Smuzhiyun mad_list);
2298*4882a593Smuzhiyun send_queue = mad_list->mad_queue;
2299*4882a593Smuzhiyun qp_info = send_queue->qp_info;
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun trace_ib_mad_send_done_agent(mad_send_wr->mad_agent_priv);
2302*4882a593Smuzhiyun trace_ib_mad_send_done_handler(mad_send_wr, wc);
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun retry:
2305*4882a593Smuzhiyun ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2306*4882a593Smuzhiyun mad_send_wr->header_mapping,
2307*4882a593Smuzhiyun mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
2308*4882a593Smuzhiyun ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
2309*4882a593Smuzhiyun mad_send_wr->payload_mapping,
2310*4882a593Smuzhiyun mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
2311*4882a593Smuzhiyun queued_send_wr = NULL;
2312*4882a593Smuzhiyun spin_lock_irqsave(&send_queue->lock, flags);
2313*4882a593Smuzhiyun list_del(&mad_list->list);
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun /* Move queued send to the send queue */
2316*4882a593Smuzhiyun if (send_queue->count-- > send_queue->max_active) {
2317*4882a593Smuzhiyun mad_list = container_of(qp_info->overflow_list.next,
2318*4882a593Smuzhiyun struct ib_mad_list_head, list);
2319*4882a593Smuzhiyun queued_send_wr = container_of(mad_list,
2320*4882a593Smuzhiyun struct ib_mad_send_wr_private,
2321*4882a593Smuzhiyun mad_list);
2322*4882a593Smuzhiyun list_move_tail(&mad_list->list, &send_queue->list);
2323*4882a593Smuzhiyun }
2324*4882a593Smuzhiyun spin_unlock_irqrestore(&send_queue->lock, flags);
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun mad_send_wc.send_buf = &mad_send_wr->send_buf;
2327*4882a593Smuzhiyun mad_send_wc.status = wc->status;
2328*4882a593Smuzhiyun mad_send_wc.vendor_err = wc->vendor_err;
2329*4882a593Smuzhiyun ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun if (queued_send_wr) {
2332*4882a593Smuzhiyun trace_ib_mad_send_done_resend(queued_send_wr, qp_info);
2333*4882a593Smuzhiyun ret = ib_post_send(qp_info->qp, &queued_send_wr->send_wr.wr,
2334*4882a593Smuzhiyun NULL);
2335*4882a593Smuzhiyun if (ret) {
2336*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2337*4882a593Smuzhiyun "ib_post_send failed: %d\n", ret);
2338*4882a593Smuzhiyun mad_send_wr = queued_send_wr;
2339*4882a593Smuzhiyun wc->status = IB_WC_LOC_QP_OP_ERR;
2340*4882a593Smuzhiyun goto retry;
2341*4882a593Smuzhiyun }
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun }
2344*4882a593Smuzhiyun
mark_sends_for_retry(struct ib_mad_qp_info * qp_info)2345*4882a593Smuzhiyun static void mark_sends_for_retry(struct ib_mad_qp_info *qp_info)
2346*4882a593Smuzhiyun {
2347*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
2348*4882a593Smuzhiyun struct ib_mad_list_head *mad_list;
2349*4882a593Smuzhiyun unsigned long flags;
2350*4882a593Smuzhiyun
2351*4882a593Smuzhiyun spin_lock_irqsave(&qp_info->send_queue.lock, flags);
2352*4882a593Smuzhiyun list_for_each_entry(mad_list, &qp_info->send_queue.list, list) {
2353*4882a593Smuzhiyun mad_send_wr = container_of(mad_list,
2354*4882a593Smuzhiyun struct ib_mad_send_wr_private,
2355*4882a593Smuzhiyun mad_list);
2356*4882a593Smuzhiyun mad_send_wr->retry = 1;
2357*4882a593Smuzhiyun }
2358*4882a593Smuzhiyun spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun
ib_mad_send_error(struct ib_mad_port_private * port_priv,struct ib_wc * wc)2361*4882a593Smuzhiyun static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
2362*4882a593Smuzhiyun struct ib_wc *wc)
2363*4882a593Smuzhiyun {
2364*4882a593Smuzhiyun struct ib_mad_list_head *mad_list =
2365*4882a593Smuzhiyun container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
2366*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info;
2367*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
2368*4882a593Smuzhiyun int ret;
2369*4882a593Smuzhiyun
2370*4882a593Smuzhiyun /*
2371*4882a593Smuzhiyun * Send errors will transition the QP to SQE - move
2372*4882a593Smuzhiyun * QP to RTS and repost flushed work requests
2373*4882a593Smuzhiyun */
2374*4882a593Smuzhiyun mad_send_wr = container_of(mad_list, struct ib_mad_send_wr_private,
2375*4882a593Smuzhiyun mad_list);
2376*4882a593Smuzhiyun if (wc->status == IB_WC_WR_FLUSH_ERR) {
2377*4882a593Smuzhiyun if (mad_send_wr->retry) {
2378*4882a593Smuzhiyun /* Repost send */
2379*4882a593Smuzhiyun mad_send_wr->retry = 0;
2380*4882a593Smuzhiyun trace_ib_mad_error_handler(mad_send_wr, qp_info);
2381*4882a593Smuzhiyun ret = ib_post_send(qp_info->qp, &mad_send_wr->send_wr.wr,
2382*4882a593Smuzhiyun NULL);
2383*4882a593Smuzhiyun if (!ret)
2384*4882a593Smuzhiyun return false;
2385*4882a593Smuzhiyun }
2386*4882a593Smuzhiyun } else {
2387*4882a593Smuzhiyun struct ib_qp_attr *attr;
2388*4882a593Smuzhiyun
2389*4882a593Smuzhiyun /* Transition QP to RTS and fail offending send */
2390*4882a593Smuzhiyun attr = kmalloc(sizeof *attr, GFP_KERNEL);
2391*4882a593Smuzhiyun if (attr) {
2392*4882a593Smuzhiyun attr->qp_state = IB_QPS_RTS;
2393*4882a593Smuzhiyun attr->cur_qp_state = IB_QPS_SQE;
2394*4882a593Smuzhiyun ret = ib_modify_qp(qp_info->qp, attr,
2395*4882a593Smuzhiyun IB_QP_STATE | IB_QP_CUR_STATE);
2396*4882a593Smuzhiyun kfree(attr);
2397*4882a593Smuzhiyun if (ret)
2398*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2399*4882a593Smuzhiyun "%s - ib_modify_qp to RTS: %d\n",
2400*4882a593Smuzhiyun __func__, ret);
2401*4882a593Smuzhiyun else
2402*4882a593Smuzhiyun mark_sends_for_retry(qp_info);
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun }
2405*4882a593Smuzhiyun
2406*4882a593Smuzhiyun return true;
2407*4882a593Smuzhiyun }
2408*4882a593Smuzhiyun
cancel_mads(struct ib_mad_agent_private * mad_agent_priv)2409*4882a593Smuzhiyun static void cancel_mads(struct ib_mad_agent_private *mad_agent_priv)
2410*4882a593Smuzhiyun {
2411*4882a593Smuzhiyun unsigned long flags;
2412*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr, *temp_mad_send_wr;
2413*4882a593Smuzhiyun struct ib_mad_send_wc mad_send_wc;
2414*4882a593Smuzhiyun struct list_head cancel_list;
2415*4882a593Smuzhiyun
2416*4882a593Smuzhiyun INIT_LIST_HEAD(&cancel_list);
2417*4882a593Smuzhiyun
2418*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2419*4882a593Smuzhiyun list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2420*4882a593Smuzhiyun &mad_agent_priv->send_list, agent_list) {
2421*4882a593Smuzhiyun if (mad_send_wr->status == IB_WC_SUCCESS) {
2422*4882a593Smuzhiyun mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2423*4882a593Smuzhiyun mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2424*4882a593Smuzhiyun }
2425*4882a593Smuzhiyun }
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun /* Empty wait list to prevent receives from finding a request */
2428*4882a593Smuzhiyun list_splice_init(&mad_agent_priv->wait_list, &cancel_list);
2429*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2430*4882a593Smuzhiyun
2431*4882a593Smuzhiyun /* Report all cancelled requests */
2432*4882a593Smuzhiyun mad_send_wc.status = IB_WC_WR_FLUSH_ERR;
2433*4882a593Smuzhiyun mad_send_wc.vendor_err = 0;
2434*4882a593Smuzhiyun
2435*4882a593Smuzhiyun list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr,
2436*4882a593Smuzhiyun &cancel_list, agent_list) {
2437*4882a593Smuzhiyun mad_send_wc.send_buf = &mad_send_wr->send_buf;
2438*4882a593Smuzhiyun list_del(&mad_send_wr->agent_list);
2439*4882a593Smuzhiyun mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2440*4882a593Smuzhiyun &mad_send_wc);
2441*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
2442*4882a593Smuzhiyun }
2443*4882a593Smuzhiyun }
2444*4882a593Smuzhiyun
2445*4882a593Smuzhiyun static struct ib_mad_send_wr_private*
find_send_wr(struct ib_mad_agent_private * mad_agent_priv,struct ib_mad_send_buf * send_buf)2446*4882a593Smuzhiyun find_send_wr(struct ib_mad_agent_private *mad_agent_priv,
2447*4882a593Smuzhiyun struct ib_mad_send_buf *send_buf)
2448*4882a593Smuzhiyun {
2449*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
2450*4882a593Smuzhiyun
2451*4882a593Smuzhiyun list_for_each_entry(mad_send_wr, &mad_agent_priv->wait_list,
2452*4882a593Smuzhiyun agent_list) {
2453*4882a593Smuzhiyun if (&mad_send_wr->send_buf == send_buf)
2454*4882a593Smuzhiyun return mad_send_wr;
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun
2457*4882a593Smuzhiyun list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list,
2458*4882a593Smuzhiyun agent_list) {
2459*4882a593Smuzhiyun if (is_rmpp_data_mad(mad_agent_priv,
2460*4882a593Smuzhiyun mad_send_wr->send_buf.mad) &&
2461*4882a593Smuzhiyun &mad_send_wr->send_buf == send_buf)
2462*4882a593Smuzhiyun return mad_send_wr;
2463*4882a593Smuzhiyun }
2464*4882a593Smuzhiyun return NULL;
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun
ib_modify_mad(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf,u32 timeout_ms)2467*4882a593Smuzhiyun int ib_modify_mad(struct ib_mad_agent *mad_agent,
2468*4882a593Smuzhiyun struct ib_mad_send_buf *send_buf, u32 timeout_ms)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
2471*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
2472*4882a593Smuzhiyun unsigned long flags;
2473*4882a593Smuzhiyun int active;
2474*4882a593Smuzhiyun
2475*4882a593Smuzhiyun mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private,
2476*4882a593Smuzhiyun agent);
2477*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2478*4882a593Smuzhiyun mad_send_wr = find_send_wr(mad_agent_priv, send_buf);
2479*4882a593Smuzhiyun if (!mad_send_wr || mad_send_wr->status != IB_WC_SUCCESS) {
2480*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2481*4882a593Smuzhiyun return -EINVAL;
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun active = (!mad_send_wr->timeout || mad_send_wr->refcount > 1);
2485*4882a593Smuzhiyun if (!timeout_ms) {
2486*4882a593Smuzhiyun mad_send_wr->status = IB_WC_WR_FLUSH_ERR;
2487*4882a593Smuzhiyun mad_send_wr->refcount -= (mad_send_wr->timeout > 0);
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun
2490*4882a593Smuzhiyun mad_send_wr->send_buf.timeout_ms = timeout_ms;
2491*4882a593Smuzhiyun if (active)
2492*4882a593Smuzhiyun mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
2493*4882a593Smuzhiyun else
2494*4882a593Smuzhiyun ib_reset_mad_timeout(mad_send_wr, timeout_ms);
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2497*4882a593Smuzhiyun return 0;
2498*4882a593Smuzhiyun }
2499*4882a593Smuzhiyun EXPORT_SYMBOL(ib_modify_mad);
2500*4882a593Smuzhiyun
ib_cancel_mad(struct ib_mad_agent * mad_agent,struct ib_mad_send_buf * send_buf)2501*4882a593Smuzhiyun void ib_cancel_mad(struct ib_mad_agent *mad_agent,
2502*4882a593Smuzhiyun struct ib_mad_send_buf *send_buf)
2503*4882a593Smuzhiyun {
2504*4882a593Smuzhiyun ib_modify_mad(mad_agent, send_buf, 0);
2505*4882a593Smuzhiyun }
2506*4882a593Smuzhiyun EXPORT_SYMBOL(ib_cancel_mad);
2507*4882a593Smuzhiyun
local_completions(struct work_struct * work)2508*4882a593Smuzhiyun static void local_completions(struct work_struct *work)
2509*4882a593Smuzhiyun {
2510*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
2511*4882a593Smuzhiyun struct ib_mad_local_private *local;
2512*4882a593Smuzhiyun struct ib_mad_agent_private *recv_mad_agent;
2513*4882a593Smuzhiyun unsigned long flags;
2514*4882a593Smuzhiyun int free_mad;
2515*4882a593Smuzhiyun struct ib_wc wc;
2516*4882a593Smuzhiyun struct ib_mad_send_wc mad_send_wc;
2517*4882a593Smuzhiyun bool opa;
2518*4882a593Smuzhiyun
2519*4882a593Smuzhiyun mad_agent_priv =
2520*4882a593Smuzhiyun container_of(work, struct ib_mad_agent_private, local_work);
2521*4882a593Smuzhiyun
2522*4882a593Smuzhiyun opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device,
2523*4882a593Smuzhiyun mad_agent_priv->qp_info->port_priv->port_num);
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2526*4882a593Smuzhiyun while (!list_empty(&mad_agent_priv->local_list)) {
2527*4882a593Smuzhiyun local = list_entry(mad_agent_priv->local_list.next,
2528*4882a593Smuzhiyun struct ib_mad_local_private,
2529*4882a593Smuzhiyun completion_list);
2530*4882a593Smuzhiyun list_del(&local->completion_list);
2531*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2532*4882a593Smuzhiyun free_mad = 0;
2533*4882a593Smuzhiyun if (local->mad_priv) {
2534*4882a593Smuzhiyun u8 base_version;
2535*4882a593Smuzhiyun recv_mad_agent = local->recv_mad_agent;
2536*4882a593Smuzhiyun if (!recv_mad_agent) {
2537*4882a593Smuzhiyun dev_err(&mad_agent_priv->agent.device->dev,
2538*4882a593Smuzhiyun "No receive MAD agent for local completion\n");
2539*4882a593Smuzhiyun free_mad = 1;
2540*4882a593Smuzhiyun goto local_send_completion;
2541*4882a593Smuzhiyun }
2542*4882a593Smuzhiyun
2543*4882a593Smuzhiyun /*
2544*4882a593Smuzhiyun * Defined behavior is to complete response
2545*4882a593Smuzhiyun * before request
2546*4882a593Smuzhiyun */
2547*4882a593Smuzhiyun build_smp_wc(recv_mad_agent->agent.qp,
2548*4882a593Smuzhiyun local->mad_send_wr->send_wr.wr.wr_cqe,
2549*4882a593Smuzhiyun be16_to_cpu(IB_LID_PERMISSIVE),
2550*4882a593Smuzhiyun local->mad_send_wr->send_wr.pkey_index,
2551*4882a593Smuzhiyun recv_mad_agent->agent.port_num, &wc);
2552*4882a593Smuzhiyun
2553*4882a593Smuzhiyun local->mad_priv->header.recv_wc.wc = &wc;
2554*4882a593Smuzhiyun
2555*4882a593Smuzhiyun base_version = ((struct ib_mad_hdr *)(local->mad_priv->mad))->base_version;
2556*4882a593Smuzhiyun if (opa && base_version == OPA_MGMT_BASE_VERSION) {
2557*4882a593Smuzhiyun local->mad_priv->header.recv_wc.mad_len = local->return_wc_byte_len;
2558*4882a593Smuzhiyun local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct opa_mad);
2559*4882a593Smuzhiyun } else {
2560*4882a593Smuzhiyun local->mad_priv->header.recv_wc.mad_len = sizeof(struct ib_mad);
2561*4882a593Smuzhiyun local->mad_priv->header.recv_wc.mad_seg_size = sizeof(struct ib_mad);
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun
2564*4882a593Smuzhiyun INIT_LIST_HEAD(&local->mad_priv->header.recv_wc.rmpp_list);
2565*4882a593Smuzhiyun list_add(&local->mad_priv->header.recv_wc.recv_buf.list,
2566*4882a593Smuzhiyun &local->mad_priv->header.recv_wc.rmpp_list);
2567*4882a593Smuzhiyun local->mad_priv->header.recv_wc.recv_buf.grh = NULL;
2568*4882a593Smuzhiyun local->mad_priv->header.recv_wc.recv_buf.mad =
2569*4882a593Smuzhiyun (struct ib_mad *)local->mad_priv->mad;
2570*4882a593Smuzhiyun recv_mad_agent->agent.recv_handler(
2571*4882a593Smuzhiyun &recv_mad_agent->agent,
2572*4882a593Smuzhiyun &local->mad_send_wr->send_buf,
2573*4882a593Smuzhiyun &local->mad_priv->header.recv_wc);
2574*4882a593Smuzhiyun spin_lock_irqsave(&recv_mad_agent->lock, flags);
2575*4882a593Smuzhiyun deref_mad_agent(recv_mad_agent);
2576*4882a593Smuzhiyun spin_unlock_irqrestore(&recv_mad_agent->lock, flags);
2577*4882a593Smuzhiyun }
2578*4882a593Smuzhiyun
2579*4882a593Smuzhiyun local_send_completion:
2580*4882a593Smuzhiyun /* Complete send */
2581*4882a593Smuzhiyun mad_send_wc.status = IB_WC_SUCCESS;
2582*4882a593Smuzhiyun mad_send_wc.vendor_err = 0;
2583*4882a593Smuzhiyun mad_send_wc.send_buf = &local->mad_send_wr->send_buf;
2584*4882a593Smuzhiyun mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2585*4882a593Smuzhiyun &mad_send_wc);
2586*4882a593Smuzhiyun
2587*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2588*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
2589*4882a593Smuzhiyun if (free_mad)
2590*4882a593Smuzhiyun kfree(local->mad_priv);
2591*4882a593Smuzhiyun kfree(local);
2592*4882a593Smuzhiyun }
2593*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2594*4882a593Smuzhiyun }
2595*4882a593Smuzhiyun
retry_send(struct ib_mad_send_wr_private * mad_send_wr)2596*4882a593Smuzhiyun static int retry_send(struct ib_mad_send_wr_private *mad_send_wr)
2597*4882a593Smuzhiyun {
2598*4882a593Smuzhiyun int ret;
2599*4882a593Smuzhiyun
2600*4882a593Smuzhiyun if (!mad_send_wr->retries_left)
2601*4882a593Smuzhiyun return -ETIMEDOUT;
2602*4882a593Smuzhiyun
2603*4882a593Smuzhiyun mad_send_wr->retries_left--;
2604*4882a593Smuzhiyun mad_send_wr->send_buf.retries++;
2605*4882a593Smuzhiyun
2606*4882a593Smuzhiyun mad_send_wr->timeout = msecs_to_jiffies(mad_send_wr->send_buf.timeout_ms);
2607*4882a593Smuzhiyun
2608*4882a593Smuzhiyun if (ib_mad_kernel_rmpp_agent(&mad_send_wr->mad_agent_priv->agent)) {
2609*4882a593Smuzhiyun ret = ib_retry_rmpp(mad_send_wr);
2610*4882a593Smuzhiyun switch (ret) {
2611*4882a593Smuzhiyun case IB_RMPP_RESULT_UNHANDLED:
2612*4882a593Smuzhiyun ret = ib_send_mad(mad_send_wr);
2613*4882a593Smuzhiyun break;
2614*4882a593Smuzhiyun case IB_RMPP_RESULT_CONSUMED:
2615*4882a593Smuzhiyun ret = 0;
2616*4882a593Smuzhiyun break;
2617*4882a593Smuzhiyun default:
2618*4882a593Smuzhiyun ret = -ECOMM;
2619*4882a593Smuzhiyun break;
2620*4882a593Smuzhiyun }
2621*4882a593Smuzhiyun } else
2622*4882a593Smuzhiyun ret = ib_send_mad(mad_send_wr);
2623*4882a593Smuzhiyun
2624*4882a593Smuzhiyun if (!ret) {
2625*4882a593Smuzhiyun mad_send_wr->refcount++;
2626*4882a593Smuzhiyun list_add_tail(&mad_send_wr->agent_list,
2627*4882a593Smuzhiyun &mad_send_wr->mad_agent_priv->send_list);
2628*4882a593Smuzhiyun }
2629*4882a593Smuzhiyun return ret;
2630*4882a593Smuzhiyun }
2631*4882a593Smuzhiyun
timeout_sends(struct work_struct * work)2632*4882a593Smuzhiyun static void timeout_sends(struct work_struct *work)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun struct ib_mad_agent_private *mad_agent_priv;
2635*4882a593Smuzhiyun struct ib_mad_send_wr_private *mad_send_wr;
2636*4882a593Smuzhiyun struct ib_mad_send_wc mad_send_wc;
2637*4882a593Smuzhiyun unsigned long flags, delay;
2638*4882a593Smuzhiyun
2639*4882a593Smuzhiyun mad_agent_priv = container_of(work, struct ib_mad_agent_private,
2640*4882a593Smuzhiyun timed_work.work);
2641*4882a593Smuzhiyun mad_send_wc.vendor_err = 0;
2642*4882a593Smuzhiyun
2643*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2644*4882a593Smuzhiyun while (!list_empty(&mad_agent_priv->wait_list)) {
2645*4882a593Smuzhiyun mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
2646*4882a593Smuzhiyun struct ib_mad_send_wr_private,
2647*4882a593Smuzhiyun agent_list);
2648*4882a593Smuzhiyun
2649*4882a593Smuzhiyun if (time_after(mad_send_wr->timeout, jiffies)) {
2650*4882a593Smuzhiyun delay = mad_send_wr->timeout - jiffies;
2651*4882a593Smuzhiyun if ((long)delay <= 0)
2652*4882a593Smuzhiyun delay = 1;
2653*4882a593Smuzhiyun queue_delayed_work(mad_agent_priv->qp_info->
2654*4882a593Smuzhiyun port_priv->wq,
2655*4882a593Smuzhiyun &mad_agent_priv->timed_work, delay);
2656*4882a593Smuzhiyun break;
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun
2659*4882a593Smuzhiyun list_del(&mad_send_wr->agent_list);
2660*4882a593Smuzhiyun if (mad_send_wr->status == IB_WC_SUCCESS &&
2661*4882a593Smuzhiyun !retry_send(mad_send_wr))
2662*4882a593Smuzhiyun continue;
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2665*4882a593Smuzhiyun
2666*4882a593Smuzhiyun if (mad_send_wr->status == IB_WC_SUCCESS)
2667*4882a593Smuzhiyun mad_send_wc.status = IB_WC_RESP_TIMEOUT_ERR;
2668*4882a593Smuzhiyun else
2669*4882a593Smuzhiyun mad_send_wc.status = mad_send_wr->status;
2670*4882a593Smuzhiyun mad_send_wc.send_buf = &mad_send_wr->send_buf;
2671*4882a593Smuzhiyun mad_agent_priv->agent.send_handler(&mad_agent_priv->agent,
2672*4882a593Smuzhiyun &mad_send_wc);
2673*4882a593Smuzhiyun
2674*4882a593Smuzhiyun deref_mad_agent(mad_agent_priv);
2675*4882a593Smuzhiyun spin_lock_irqsave(&mad_agent_priv->lock, flags);
2676*4882a593Smuzhiyun }
2677*4882a593Smuzhiyun spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
2678*4882a593Smuzhiyun }
2679*4882a593Smuzhiyun
2680*4882a593Smuzhiyun /*
2681*4882a593Smuzhiyun * Allocate receive MADs and post receive WRs for them
2682*4882a593Smuzhiyun */
ib_mad_post_receive_mads(struct ib_mad_qp_info * qp_info,struct ib_mad_private * mad)2683*4882a593Smuzhiyun static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
2684*4882a593Smuzhiyun struct ib_mad_private *mad)
2685*4882a593Smuzhiyun {
2686*4882a593Smuzhiyun unsigned long flags;
2687*4882a593Smuzhiyun int post, ret;
2688*4882a593Smuzhiyun struct ib_mad_private *mad_priv;
2689*4882a593Smuzhiyun struct ib_sge sg_list;
2690*4882a593Smuzhiyun struct ib_recv_wr recv_wr;
2691*4882a593Smuzhiyun struct ib_mad_queue *recv_queue = &qp_info->recv_queue;
2692*4882a593Smuzhiyun
2693*4882a593Smuzhiyun /* Initialize common scatter list fields */
2694*4882a593Smuzhiyun sg_list.lkey = qp_info->port_priv->pd->local_dma_lkey;
2695*4882a593Smuzhiyun
2696*4882a593Smuzhiyun /* Initialize common receive WR fields */
2697*4882a593Smuzhiyun recv_wr.next = NULL;
2698*4882a593Smuzhiyun recv_wr.sg_list = &sg_list;
2699*4882a593Smuzhiyun recv_wr.num_sge = 1;
2700*4882a593Smuzhiyun
2701*4882a593Smuzhiyun do {
2702*4882a593Smuzhiyun /* Allocate and map receive buffer */
2703*4882a593Smuzhiyun if (mad) {
2704*4882a593Smuzhiyun mad_priv = mad;
2705*4882a593Smuzhiyun mad = NULL;
2706*4882a593Smuzhiyun } else {
2707*4882a593Smuzhiyun mad_priv = alloc_mad_private(port_mad_size(qp_info->port_priv),
2708*4882a593Smuzhiyun GFP_ATOMIC);
2709*4882a593Smuzhiyun if (!mad_priv) {
2710*4882a593Smuzhiyun ret = -ENOMEM;
2711*4882a593Smuzhiyun break;
2712*4882a593Smuzhiyun }
2713*4882a593Smuzhiyun }
2714*4882a593Smuzhiyun sg_list.length = mad_priv_dma_size(mad_priv);
2715*4882a593Smuzhiyun sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
2716*4882a593Smuzhiyun &mad_priv->grh,
2717*4882a593Smuzhiyun mad_priv_dma_size(mad_priv),
2718*4882a593Smuzhiyun DMA_FROM_DEVICE);
2719*4882a593Smuzhiyun if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
2720*4882a593Smuzhiyun sg_list.addr))) {
2721*4882a593Smuzhiyun kfree(mad_priv);
2722*4882a593Smuzhiyun ret = -ENOMEM;
2723*4882a593Smuzhiyun break;
2724*4882a593Smuzhiyun }
2725*4882a593Smuzhiyun mad_priv->header.mapping = sg_list.addr;
2726*4882a593Smuzhiyun mad_priv->header.mad_list.mad_queue = recv_queue;
2727*4882a593Smuzhiyun mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
2728*4882a593Smuzhiyun recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
2729*4882a593Smuzhiyun
2730*4882a593Smuzhiyun /* Post receive WR */
2731*4882a593Smuzhiyun spin_lock_irqsave(&recv_queue->lock, flags);
2732*4882a593Smuzhiyun post = (++recv_queue->count < recv_queue->max_active);
2733*4882a593Smuzhiyun list_add_tail(&mad_priv->header.mad_list.list, &recv_queue->list);
2734*4882a593Smuzhiyun spin_unlock_irqrestore(&recv_queue->lock, flags);
2735*4882a593Smuzhiyun ret = ib_post_recv(qp_info->qp, &recv_wr, NULL);
2736*4882a593Smuzhiyun if (ret) {
2737*4882a593Smuzhiyun spin_lock_irqsave(&recv_queue->lock, flags);
2738*4882a593Smuzhiyun list_del(&mad_priv->header.mad_list.list);
2739*4882a593Smuzhiyun recv_queue->count--;
2740*4882a593Smuzhiyun spin_unlock_irqrestore(&recv_queue->lock, flags);
2741*4882a593Smuzhiyun ib_dma_unmap_single(qp_info->port_priv->device,
2742*4882a593Smuzhiyun mad_priv->header.mapping,
2743*4882a593Smuzhiyun mad_priv_dma_size(mad_priv),
2744*4882a593Smuzhiyun DMA_FROM_DEVICE);
2745*4882a593Smuzhiyun kfree(mad_priv);
2746*4882a593Smuzhiyun dev_err(&qp_info->port_priv->device->dev,
2747*4882a593Smuzhiyun "ib_post_recv failed: %d\n", ret);
2748*4882a593Smuzhiyun break;
2749*4882a593Smuzhiyun }
2750*4882a593Smuzhiyun } while (post);
2751*4882a593Smuzhiyun
2752*4882a593Smuzhiyun return ret;
2753*4882a593Smuzhiyun }
2754*4882a593Smuzhiyun
2755*4882a593Smuzhiyun /*
2756*4882a593Smuzhiyun * Return all the posted receive MADs
2757*4882a593Smuzhiyun */
cleanup_recv_queue(struct ib_mad_qp_info * qp_info)2758*4882a593Smuzhiyun static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
2759*4882a593Smuzhiyun {
2760*4882a593Smuzhiyun struct ib_mad_private_header *mad_priv_hdr;
2761*4882a593Smuzhiyun struct ib_mad_private *recv;
2762*4882a593Smuzhiyun struct ib_mad_list_head *mad_list;
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun if (!qp_info->qp)
2765*4882a593Smuzhiyun return;
2766*4882a593Smuzhiyun
2767*4882a593Smuzhiyun while (!list_empty(&qp_info->recv_queue.list)) {
2768*4882a593Smuzhiyun
2769*4882a593Smuzhiyun mad_list = list_entry(qp_info->recv_queue.list.next,
2770*4882a593Smuzhiyun struct ib_mad_list_head, list);
2771*4882a593Smuzhiyun mad_priv_hdr = container_of(mad_list,
2772*4882a593Smuzhiyun struct ib_mad_private_header,
2773*4882a593Smuzhiyun mad_list);
2774*4882a593Smuzhiyun recv = container_of(mad_priv_hdr, struct ib_mad_private,
2775*4882a593Smuzhiyun header);
2776*4882a593Smuzhiyun
2777*4882a593Smuzhiyun /* Remove from posted receive MAD list */
2778*4882a593Smuzhiyun list_del(&mad_list->list);
2779*4882a593Smuzhiyun
2780*4882a593Smuzhiyun ib_dma_unmap_single(qp_info->port_priv->device,
2781*4882a593Smuzhiyun recv->header.mapping,
2782*4882a593Smuzhiyun mad_priv_dma_size(recv),
2783*4882a593Smuzhiyun DMA_FROM_DEVICE);
2784*4882a593Smuzhiyun kfree(recv);
2785*4882a593Smuzhiyun }
2786*4882a593Smuzhiyun
2787*4882a593Smuzhiyun qp_info->recv_queue.count = 0;
2788*4882a593Smuzhiyun }
2789*4882a593Smuzhiyun
2790*4882a593Smuzhiyun /*
2791*4882a593Smuzhiyun * Start the port
2792*4882a593Smuzhiyun */
ib_mad_port_start(struct ib_mad_port_private * port_priv)2793*4882a593Smuzhiyun static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
2794*4882a593Smuzhiyun {
2795*4882a593Smuzhiyun int ret, i;
2796*4882a593Smuzhiyun struct ib_qp_attr *attr;
2797*4882a593Smuzhiyun struct ib_qp *qp;
2798*4882a593Smuzhiyun u16 pkey_index;
2799*4882a593Smuzhiyun
2800*4882a593Smuzhiyun attr = kmalloc(sizeof *attr, GFP_KERNEL);
2801*4882a593Smuzhiyun if (!attr)
2802*4882a593Smuzhiyun return -ENOMEM;
2803*4882a593Smuzhiyun
2804*4882a593Smuzhiyun ret = ib_find_pkey(port_priv->device, port_priv->port_num,
2805*4882a593Smuzhiyun IB_DEFAULT_PKEY_FULL, &pkey_index);
2806*4882a593Smuzhiyun if (ret)
2807*4882a593Smuzhiyun pkey_index = 0;
2808*4882a593Smuzhiyun
2809*4882a593Smuzhiyun for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2810*4882a593Smuzhiyun qp = port_priv->qp_info[i].qp;
2811*4882a593Smuzhiyun if (!qp)
2812*4882a593Smuzhiyun continue;
2813*4882a593Smuzhiyun
2814*4882a593Smuzhiyun /*
2815*4882a593Smuzhiyun * PKey index for QP1 is irrelevant but
2816*4882a593Smuzhiyun * one is needed for the Reset to Init transition
2817*4882a593Smuzhiyun */
2818*4882a593Smuzhiyun attr->qp_state = IB_QPS_INIT;
2819*4882a593Smuzhiyun attr->pkey_index = pkey_index;
2820*4882a593Smuzhiyun attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
2821*4882a593Smuzhiyun ret = ib_modify_qp(qp, attr, IB_QP_STATE |
2822*4882a593Smuzhiyun IB_QP_PKEY_INDEX | IB_QP_QKEY);
2823*4882a593Smuzhiyun if (ret) {
2824*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2825*4882a593Smuzhiyun "Couldn't change QP%d state to INIT: %d\n",
2826*4882a593Smuzhiyun i, ret);
2827*4882a593Smuzhiyun goto out;
2828*4882a593Smuzhiyun }
2829*4882a593Smuzhiyun
2830*4882a593Smuzhiyun attr->qp_state = IB_QPS_RTR;
2831*4882a593Smuzhiyun ret = ib_modify_qp(qp, attr, IB_QP_STATE);
2832*4882a593Smuzhiyun if (ret) {
2833*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2834*4882a593Smuzhiyun "Couldn't change QP%d state to RTR: %d\n",
2835*4882a593Smuzhiyun i, ret);
2836*4882a593Smuzhiyun goto out;
2837*4882a593Smuzhiyun }
2838*4882a593Smuzhiyun
2839*4882a593Smuzhiyun attr->qp_state = IB_QPS_RTS;
2840*4882a593Smuzhiyun attr->sq_psn = IB_MAD_SEND_Q_PSN;
2841*4882a593Smuzhiyun ret = ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_SQ_PSN);
2842*4882a593Smuzhiyun if (ret) {
2843*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2844*4882a593Smuzhiyun "Couldn't change QP%d state to RTS: %d\n",
2845*4882a593Smuzhiyun i, ret);
2846*4882a593Smuzhiyun goto out;
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun }
2849*4882a593Smuzhiyun
2850*4882a593Smuzhiyun ret = ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP);
2851*4882a593Smuzhiyun if (ret) {
2852*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2853*4882a593Smuzhiyun "Failed to request completion notification: %d\n",
2854*4882a593Smuzhiyun ret);
2855*4882a593Smuzhiyun goto out;
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun
2858*4882a593Smuzhiyun for (i = 0; i < IB_MAD_QPS_CORE; i++) {
2859*4882a593Smuzhiyun if (!port_priv->qp_info[i].qp)
2860*4882a593Smuzhiyun continue;
2861*4882a593Smuzhiyun
2862*4882a593Smuzhiyun ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
2863*4882a593Smuzhiyun if (ret) {
2864*4882a593Smuzhiyun dev_err(&port_priv->device->dev,
2865*4882a593Smuzhiyun "Couldn't post receive WRs\n");
2866*4882a593Smuzhiyun goto out;
2867*4882a593Smuzhiyun }
2868*4882a593Smuzhiyun }
2869*4882a593Smuzhiyun out:
2870*4882a593Smuzhiyun kfree(attr);
2871*4882a593Smuzhiyun return ret;
2872*4882a593Smuzhiyun }
2873*4882a593Smuzhiyun
qp_event_handler(struct ib_event * event,void * qp_context)2874*4882a593Smuzhiyun static void qp_event_handler(struct ib_event *event, void *qp_context)
2875*4882a593Smuzhiyun {
2876*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info = qp_context;
2877*4882a593Smuzhiyun
2878*4882a593Smuzhiyun /* It's worse than that! He's dead, Jim! */
2879*4882a593Smuzhiyun dev_err(&qp_info->port_priv->device->dev,
2880*4882a593Smuzhiyun "Fatal error (%d) on MAD QP (%d)\n",
2881*4882a593Smuzhiyun event->event, qp_info->qp->qp_num);
2882*4882a593Smuzhiyun }
2883*4882a593Smuzhiyun
init_mad_queue(struct ib_mad_qp_info * qp_info,struct ib_mad_queue * mad_queue)2884*4882a593Smuzhiyun static void init_mad_queue(struct ib_mad_qp_info *qp_info,
2885*4882a593Smuzhiyun struct ib_mad_queue *mad_queue)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun mad_queue->qp_info = qp_info;
2888*4882a593Smuzhiyun mad_queue->count = 0;
2889*4882a593Smuzhiyun spin_lock_init(&mad_queue->lock);
2890*4882a593Smuzhiyun INIT_LIST_HEAD(&mad_queue->list);
2891*4882a593Smuzhiyun }
2892*4882a593Smuzhiyun
init_mad_qp(struct ib_mad_port_private * port_priv,struct ib_mad_qp_info * qp_info)2893*4882a593Smuzhiyun static void init_mad_qp(struct ib_mad_port_private *port_priv,
2894*4882a593Smuzhiyun struct ib_mad_qp_info *qp_info)
2895*4882a593Smuzhiyun {
2896*4882a593Smuzhiyun qp_info->port_priv = port_priv;
2897*4882a593Smuzhiyun init_mad_queue(qp_info, &qp_info->send_queue);
2898*4882a593Smuzhiyun init_mad_queue(qp_info, &qp_info->recv_queue);
2899*4882a593Smuzhiyun INIT_LIST_HEAD(&qp_info->overflow_list);
2900*4882a593Smuzhiyun }
2901*4882a593Smuzhiyun
create_mad_qp(struct ib_mad_qp_info * qp_info,enum ib_qp_type qp_type)2902*4882a593Smuzhiyun static int create_mad_qp(struct ib_mad_qp_info *qp_info,
2903*4882a593Smuzhiyun enum ib_qp_type qp_type)
2904*4882a593Smuzhiyun {
2905*4882a593Smuzhiyun struct ib_qp_init_attr qp_init_attr;
2906*4882a593Smuzhiyun int ret;
2907*4882a593Smuzhiyun
2908*4882a593Smuzhiyun memset(&qp_init_attr, 0, sizeof qp_init_attr);
2909*4882a593Smuzhiyun qp_init_attr.send_cq = qp_info->port_priv->cq;
2910*4882a593Smuzhiyun qp_init_attr.recv_cq = qp_info->port_priv->cq;
2911*4882a593Smuzhiyun qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
2912*4882a593Smuzhiyun qp_init_attr.cap.max_send_wr = mad_sendq_size;
2913*4882a593Smuzhiyun qp_init_attr.cap.max_recv_wr = mad_recvq_size;
2914*4882a593Smuzhiyun qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
2915*4882a593Smuzhiyun qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
2916*4882a593Smuzhiyun qp_init_attr.qp_type = qp_type;
2917*4882a593Smuzhiyun qp_init_attr.port_num = qp_info->port_priv->port_num;
2918*4882a593Smuzhiyun qp_init_attr.qp_context = qp_info;
2919*4882a593Smuzhiyun qp_init_attr.event_handler = qp_event_handler;
2920*4882a593Smuzhiyun qp_info->qp = ib_create_qp(qp_info->port_priv->pd, &qp_init_attr);
2921*4882a593Smuzhiyun if (IS_ERR(qp_info->qp)) {
2922*4882a593Smuzhiyun dev_err(&qp_info->port_priv->device->dev,
2923*4882a593Smuzhiyun "Couldn't create ib_mad QP%d\n",
2924*4882a593Smuzhiyun get_spl_qp_index(qp_type));
2925*4882a593Smuzhiyun ret = PTR_ERR(qp_info->qp);
2926*4882a593Smuzhiyun goto error;
2927*4882a593Smuzhiyun }
2928*4882a593Smuzhiyun /* Use minimum queue sizes unless the CQ is resized */
2929*4882a593Smuzhiyun qp_info->send_queue.max_active = mad_sendq_size;
2930*4882a593Smuzhiyun qp_info->recv_queue.max_active = mad_recvq_size;
2931*4882a593Smuzhiyun return 0;
2932*4882a593Smuzhiyun
2933*4882a593Smuzhiyun error:
2934*4882a593Smuzhiyun return ret;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun
destroy_mad_qp(struct ib_mad_qp_info * qp_info)2937*4882a593Smuzhiyun static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
2938*4882a593Smuzhiyun {
2939*4882a593Smuzhiyun if (!qp_info->qp)
2940*4882a593Smuzhiyun return;
2941*4882a593Smuzhiyun
2942*4882a593Smuzhiyun ib_destroy_qp(qp_info->qp);
2943*4882a593Smuzhiyun }
2944*4882a593Smuzhiyun
2945*4882a593Smuzhiyun /*
2946*4882a593Smuzhiyun * Open the port
2947*4882a593Smuzhiyun * Create the QP, PD, MR, and CQ if needed
2948*4882a593Smuzhiyun */
ib_mad_port_open(struct ib_device * device,int port_num)2949*4882a593Smuzhiyun static int ib_mad_port_open(struct ib_device *device,
2950*4882a593Smuzhiyun int port_num)
2951*4882a593Smuzhiyun {
2952*4882a593Smuzhiyun int ret, cq_size;
2953*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
2954*4882a593Smuzhiyun unsigned long flags;
2955*4882a593Smuzhiyun char name[sizeof "ib_mad123"];
2956*4882a593Smuzhiyun int has_smi;
2957*4882a593Smuzhiyun
2958*4882a593Smuzhiyun if (WARN_ON(rdma_max_mad_size(device, port_num) < IB_MGMT_MAD_SIZE))
2959*4882a593Smuzhiyun return -EFAULT;
2960*4882a593Smuzhiyun
2961*4882a593Smuzhiyun if (WARN_ON(rdma_cap_opa_mad(device, port_num) &&
2962*4882a593Smuzhiyun rdma_max_mad_size(device, port_num) < OPA_MGMT_MAD_SIZE))
2963*4882a593Smuzhiyun return -EFAULT;
2964*4882a593Smuzhiyun
2965*4882a593Smuzhiyun /* Create new device info */
2966*4882a593Smuzhiyun port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
2967*4882a593Smuzhiyun if (!port_priv)
2968*4882a593Smuzhiyun return -ENOMEM;
2969*4882a593Smuzhiyun
2970*4882a593Smuzhiyun port_priv->device = device;
2971*4882a593Smuzhiyun port_priv->port_num = port_num;
2972*4882a593Smuzhiyun spin_lock_init(&port_priv->reg_lock);
2973*4882a593Smuzhiyun init_mad_qp(port_priv, &port_priv->qp_info[0]);
2974*4882a593Smuzhiyun init_mad_qp(port_priv, &port_priv->qp_info[1]);
2975*4882a593Smuzhiyun
2976*4882a593Smuzhiyun cq_size = mad_sendq_size + mad_recvq_size;
2977*4882a593Smuzhiyun has_smi = rdma_cap_ib_smi(device, port_num);
2978*4882a593Smuzhiyun if (has_smi)
2979*4882a593Smuzhiyun cq_size *= 2;
2980*4882a593Smuzhiyun
2981*4882a593Smuzhiyun port_priv->pd = ib_alloc_pd(device, 0);
2982*4882a593Smuzhiyun if (IS_ERR(port_priv->pd)) {
2983*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't create ib_mad PD\n");
2984*4882a593Smuzhiyun ret = PTR_ERR(port_priv->pd);
2985*4882a593Smuzhiyun goto error3;
2986*4882a593Smuzhiyun }
2987*4882a593Smuzhiyun
2988*4882a593Smuzhiyun port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
2989*4882a593Smuzhiyun IB_POLL_UNBOUND_WORKQUEUE);
2990*4882a593Smuzhiyun if (IS_ERR(port_priv->cq)) {
2991*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
2992*4882a593Smuzhiyun ret = PTR_ERR(port_priv->cq);
2993*4882a593Smuzhiyun goto error4;
2994*4882a593Smuzhiyun }
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun if (has_smi) {
2997*4882a593Smuzhiyun ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
2998*4882a593Smuzhiyun if (ret)
2999*4882a593Smuzhiyun goto error6;
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
3002*4882a593Smuzhiyun if (ret)
3003*4882a593Smuzhiyun goto error7;
3004*4882a593Smuzhiyun
3005*4882a593Smuzhiyun snprintf(name, sizeof name, "ib_mad%d", port_num);
3006*4882a593Smuzhiyun port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
3007*4882a593Smuzhiyun if (!port_priv->wq) {
3008*4882a593Smuzhiyun ret = -ENOMEM;
3009*4882a593Smuzhiyun goto error8;
3010*4882a593Smuzhiyun }
3011*4882a593Smuzhiyun
3012*4882a593Smuzhiyun spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3013*4882a593Smuzhiyun list_add_tail(&port_priv->port_list, &ib_mad_port_list);
3014*4882a593Smuzhiyun spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3015*4882a593Smuzhiyun
3016*4882a593Smuzhiyun ret = ib_mad_port_start(port_priv);
3017*4882a593Smuzhiyun if (ret) {
3018*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't start port\n");
3019*4882a593Smuzhiyun goto error9;
3020*4882a593Smuzhiyun }
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun return 0;
3023*4882a593Smuzhiyun
3024*4882a593Smuzhiyun error9:
3025*4882a593Smuzhiyun spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3026*4882a593Smuzhiyun list_del_init(&port_priv->port_list);
3027*4882a593Smuzhiyun spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3028*4882a593Smuzhiyun
3029*4882a593Smuzhiyun destroy_workqueue(port_priv->wq);
3030*4882a593Smuzhiyun error8:
3031*4882a593Smuzhiyun destroy_mad_qp(&port_priv->qp_info[1]);
3032*4882a593Smuzhiyun error7:
3033*4882a593Smuzhiyun destroy_mad_qp(&port_priv->qp_info[0]);
3034*4882a593Smuzhiyun error6:
3035*4882a593Smuzhiyun ib_free_cq(port_priv->cq);
3036*4882a593Smuzhiyun cleanup_recv_queue(&port_priv->qp_info[1]);
3037*4882a593Smuzhiyun cleanup_recv_queue(&port_priv->qp_info[0]);
3038*4882a593Smuzhiyun error4:
3039*4882a593Smuzhiyun ib_dealloc_pd(port_priv->pd);
3040*4882a593Smuzhiyun error3:
3041*4882a593Smuzhiyun kfree(port_priv);
3042*4882a593Smuzhiyun
3043*4882a593Smuzhiyun return ret;
3044*4882a593Smuzhiyun }
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun /*
3047*4882a593Smuzhiyun * Close the port
3048*4882a593Smuzhiyun * If there are no classes using the port, free the port
3049*4882a593Smuzhiyun * resources (CQ, MR, PD, QP) and remove the port's info structure
3050*4882a593Smuzhiyun */
ib_mad_port_close(struct ib_device * device,int port_num)3051*4882a593Smuzhiyun static int ib_mad_port_close(struct ib_device *device, int port_num)
3052*4882a593Smuzhiyun {
3053*4882a593Smuzhiyun struct ib_mad_port_private *port_priv;
3054*4882a593Smuzhiyun unsigned long flags;
3055*4882a593Smuzhiyun
3056*4882a593Smuzhiyun spin_lock_irqsave(&ib_mad_port_list_lock, flags);
3057*4882a593Smuzhiyun port_priv = __ib_get_mad_port(device, port_num);
3058*4882a593Smuzhiyun if (port_priv == NULL) {
3059*4882a593Smuzhiyun spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3060*4882a593Smuzhiyun dev_err(&device->dev, "Port %d not found\n", port_num);
3061*4882a593Smuzhiyun return -ENODEV;
3062*4882a593Smuzhiyun }
3063*4882a593Smuzhiyun list_del_init(&port_priv->port_list);
3064*4882a593Smuzhiyun spin_unlock_irqrestore(&ib_mad_port_list_lock, flags);
3065*4882a593Smuzhiyun
3066*4882a593Smuzhiyun destroy_workqueue(port_priv->wq);
3067*4882a593Smuzhiyun destroy_mad_qp(&port_priv->qp_info[1]);
3068*4882a593Smuzhiyun destroy_mad_qp(&port_priv->qp_info[0]);
3069*4882a593Smuzhiyun ib_free_cq(port_priv->cq);
3070*4882a593Smuzhiyun ib_dealloc_pd(port_priv->pd);
3071*4882a593Smuzhiyun cleanup_recv_queue(&port_priv->qp_info[1]);
3072*4882a593Smuzhiyun cleanup_recv_queue(&port_priv->qp_info[0]);
3073*4882a593Smuzhiyun /* XXX: Handle deallocation of MAD registration tables */
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun kfree(port_priv);
3076*4882a593Smuzhiyun
3077*4882a593Smuzhiyun return 0;
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun
ib_mad_init_device(struct ib_device * device)3080*4882a593Smuzhiyun static int ib_mad_init_device(struct ib_device *device)
3081*4882a593Smuzhiyun {
3082*4882a593Smuzhiyun int start, i;
3083*4882a593Smuzhiyun unsigned int count = 0;
3084*4882a593Smuzhiyun int ret;
3085*4882a593Smuzhiyun
3086*4882a593Smuzhiyun start = rdma_start_port(device);
3087*4882a593Smuzhiyun
3088*4882a593Smuzhiyun for (i = start; i <= rdma_end_port(device); i++) {
3089*4882a593Smuzhiyun if (!rdma_cap_ib_mad(device, i))
3090*4882a593Smuzhiyun continue;
3091*4882a593Smuzhiyun
3092*4882a593Smuzhiyun ret = ib_mad_port_open(device, i);
3093*4882a593Smuzhiyun if (ret) {
3094*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't open port %d\n", i);
3095*4882a593Smuzhiyun goto error;
3096*4882a593Smuzhiyun }
3097*4882a593Smuzhiyun ret = ib_agent_port_open(device, i);
3098*4882a593Smuzhiyun if (ret) {
3099*4882a593Smuzhiyun dev_err(&device->dev,
3100*4882a593Smuzhiyun "Couldn't open port %d for agents\n", i);
3101*4882a593Smuzhiyun goto error_agent;
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun count++;
3104*4882a593Smuzhiyun }
3105*4882a593Smuzhiyun if (!count)
3106*4882a593Smuzhiyun return -EOPNOTSUPP;
3107*4882a593Smuzhiyun
3108*4882a593Smuzhiyun return 0;
3109*4882a593Smuzhiyun
3110*4882a593Smuzhiyun error_agent:
3111*4882a593Smuzhiyun if (ib_mad_port_close(device, i))
3112*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't close port %d\n", i);
3113*4882a593Smuzhiyun
3114*4882a593Smuzhiyun error:
3115*4882a593Smuzhiyun while (--i >= start) {
3116*4882a593Smuzhiyun if (!rdma_cap_ib_mad(device, i))
3117*4882a593Smuzhiyun continue;
3118*4882a593Smuzhiyun
3119*4882a593Smuzhiyun if (ib_agent_port_close(device, i))
3120*4882a593Smuzhiyun dev_err(&device->dev,
3121*4882a593Smuzhiyun "Couldn't close port %d for agents\n", i);
3122*4882a593Smuzhiyun if (ib_mad_port_close(device, i))
3123*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't close port %d\n", i);
3124*4882a593Smuzhiyun }
3125*4882a593Smuzhiyun return ret;
3126*4882a593Smuzhiyun }
3127*4882a593Smuzhiyun
ib_mad_remove_device(struct ib_device * device,void * client_data)3128*4882a593Smuzhiyun static void ib_mad_remove_device(struct ib_device *device, void *client_data)
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun unsigned int i;
3131*4882a593Smuzhiyun
3132*4882a593Smuzhiyun rdma_for_each_port (device, i) {
3133*4882a593Smuzhiyun if (!rdma_cap_ib_mad(device, i))
3134*4882a593Smuzhiyun continue;
3135*4882a593Smuzhiyun
3136*4882a593Smuzhiyun if (ib_agent_port_close(device, i))
3137*4882a593Smuzhiyun dev_err(&device->dev,
3138*4882a593Smuzhiyun "Couldn't close port %d for agents\n", i);
3139*4882a593Smuzhiyun if (ib_mad_port_close(device, i))
3140*4882a593Smuzhiyun dev_err(&device->dev, "Couldn't close port %d\n", i);
3141*4882a593Smuzhiyun }
3142*4882a593Smuzhiyun }
3143*4882a593Smuzhiyun
3144*4882a593Smuzhiyun static struct ib_client mad_client = {
3145*4882a593Smuzhiyun .name = "mad",
3146*4882a593Smuzhiyun .add = ib_mad_init_device,
3147*4882a593Smuzhiyun .remove = ib_mad_remove_device
3148*4882a593Smuzhiyun };
3149*4882a593Smuzhiyun
ib_mad_init(void)3150*4882a593Smuzhiyun int ib_mad_init(void)
3151*4882a593Smuzhiyun {
3152*4882a593Smuzhiyun mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
3153*4882a593Smuzhiyun mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
3154*4882a593Smuzhiyun
3155*4882a593Smuzhiyun mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
3156*4882a593Smuzhiyun mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
3157*4882a593Smuzhiyun
3158*4882a593Smuzhiyun INIT_LIST_HEAD(&ib_mad_port_list);
3159*4882a593Smuzhiyun
3160*4882a593Smuzhiyun if (ib_register_client(&mad_client)) {
3161*4882a593Smuzhiyun pr_err("Couldn't register ib_mad client\n");
3162*4882a593Smuzhiyun return -EINVAL;
3163*4882a593Smuzhiyun }
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun return 0;
3166*4882a593Smuzhiyun }
3167*4882a593Smuzhiyun
ib_mad_cleanup(void)3168*4882a593Smuzhiyun void ib_mad_cleanup(void)
3169*4882a593Smuzhiyun {
3170*4882a593Smuzhiyun ib_unregister_client(&mad_client);
3171*4882a593Smuzhiyun }
3172