1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <linux/completion.h>
34*4882a593Smuzhiyun #include <linux/file.h>
35*4882a593Smuzhiyun #include <linux/mutex.h>
36*4882a593Smuzhiyun #include <linux/poll.h>
37*4882a593Smuzhiyun #include <linux/sched.h>
38*4882a593Smuzhiyun #include <linux/idr.h>
39*4882a593Smuzhiyun #include <linux/in.h>
40*4882a593Smuzhiyun #include <linux/in6.h>
41*4882a593Smuzhiyun #include <linux/miscdevice.h>
42*4882a593Smuzhiyun #include <linux/slab.h>
43*4882a593Smuzhiyun #include <linux/sysctl.h>
44*4882a593Smuzhiyun #include <linux/module.h>
45*4882a593Smuzhiyun #include <linux/nsproxy.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include <linux/nospec.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include <rdma/rdma_user_cm.h>
50*4882a593Smuzhiyun #include <rdma/ib_marshall.h>
51*4882a593Smuzhiyun #include <rdma/rdma_cm.h>
52*4882a593Smuzhiyun #include <rdma/rdma_cm_ib.h>
53*4882a593Smuzhiyun #include <rdma/ib_addr.h>
54*4882a593Smuzhiyun #include <rdma/ib.h>
55*4882a593Smuzhiyun #include <rdma/ib_cm.h>
56*4882a593Smuzhiyun #include <rdma/rdma_netlink.h>
57*4882a593Smuzhiyun #include "core_priv.h"
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun MODULE_AUTHOR("Sean Hefty");
60*4882a593Smuzhiyun MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
61*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static unsigned int max_backlog = 1024;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static struct ctl_table_header *ucma_ctl_table_hdr;
66*4882a593Smuzhiyun static struct ctl_table ucma_ctl_table[] = {
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun .procname = "max_backlog",
69*4882a593Smuzhiyun .data = &max_backlog,
70*4882a593Smuzhiyun .maxlen = sizeof max_backlog,
71*4882a593Smuzhiyun .mode = 0644,
72*4882a593Smuzhiyun .proc_handler = proc_dointvec,
73*4882a593Smuzhiyun },
74*4882a593Smuzhiyun { }
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun struct ucma_file {
78*4882a593Smuzhiyun struct mutex mut;
79*4882a593Smuzhiyun struct file *filp;
80*4882a593Smuzhiyun struct list_head ctx_list;
81*4882a593Smuzhiyun struct list_head event_list;
82*4882a593Smuzhiyun wait_queue_head_t poll_wait;
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun struct ucma_context {
86*4882a593Smuzhiyun u32 id;
87*4882a593Smuzhiyun struct completion comp;
88*4882a593Smuzhiyun refcount_t ref;
89*4882a593Smuzhiyun int events_reported;
90*4882a593Smuzhiyun atomic_t backlog;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun struct ucma_file *file;
93*4882a593Smuzhiyun struct rdma_cm_id *cm_id;
94*4882a593Smuzhiyun struct mutex mutex;
95*4882a593Smuzhiyun u64 uid;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun struct list_head list;
98*4882a593Smuzhiyun struct list_head mc_list;
99*4882a593Smuzhiyun struct work_struct close_work;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun struct ucma_multicast {
103*4882a593Smuzhiyun struct ucma_context *ctx;
104*4882a593Smuzhiyun u32 id;
105*4882a593Smuzhiyun int events_reported;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun u64 uid;
108*4882a593Smuzhiyun u8 join_state;
109*4882a593Smuzhiyun struct list_head list;
110*4882a593Smuzhiyun struct sockaddr_storage addr;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun struct ucma_event {
114*4882a593Smuzhiyun struct ucma_context *ctx;
115*4882a593Smuzhiyun struct ucma_context *conn_req_ctx;
116*4882a593Smuzhiyun struct ucma_multicast *mc;
117*4882a593Smuzhiyun struct list_head list;
118*4882a593Smuzhiyun struct rdma_ucm_event_resp resp;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC(ctx_table);
122*4882a593Smuzhiyun static DEFINE_XARRAY_ALLOC(multicast_table);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun static const struct file_operations ucma_fops;
125*4882a593Smuzhiyun static int ucma_destroy_private_ctx(struct ucma_context *ctx);
126*4882a593Smuzhiyun
_ucma_find_context(int id,struct ucma_file * file)127*4882a593Smuzhiyun static inline struct ucma_context *_ucma_find_context(int id,
128*4882a593Smuzhiyun struct ucma_file *file)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct ucma_context *ctx;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun ctx = xa_load(&ctx_table, id);
133*4882a593Smuzhiyun if (!ctx)
134*4882a593Smuzhiyun ctx = ERR_PTR(-ENOENT);
135*4882a593Smuzhiyun else if (ctx->file != file)
136*4882a593Smuzhiyun ctx = ERR_PTR(-EINVAL);
137*4882a593Smuzhiyun return ctx;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
ucma_get_ctx(struct ucma_file * file,int id)140*4882a593Smuzhiyun static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct ucma_context *ctx;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun xa_lock(&ctx_table);
145*4882a593Smuzhiyun ctx = _ucma_find_context(id, file);
146*4882a593Smuzhiyun if (!IS_ERR(ctx))
147*4882a593Smuzhiyun if (!refcount_inc_not_zero(&ctx->ref))
148*4882a593Smuzhiyun ctx = ERR_PTR(-ENXIO);
149*4882a593Smuzhiyun xa_unlock(&ctx_table);
150*4882a593Smuzhiyun return ctx;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
ucma_put_ctx(struct ucma_context * ctx)153*4882a593Smuzhiyun static void ucma_put_ctx(struct ucma_context *ctx)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun if (refcount_dec_and_test(&ctx->ref))
156*4882a593Smuzhiyun complete(&ctx->comp);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /*
160*4882a593Smuzhiyun * Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
161*4882a593Smuzhiyun * CM_ID is bound.
162*4882a593Smuzhiyun */
ucma_get_ctx_dev(struct ucma_file * file,int id)163*4882a593Smuzhiyun static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct ucma_context *ctx = ucma_get_ctx(file, id);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (IS_ERR(ctx))
168*4882a593Smuzhiyun return ctx;
169*4882a593Smuzhiyun if (!ctx->cm_id->device) {
170*4882a593Smuzhiyun ucma_put_ctx(ctx);
171*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun return ctx;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
ucma_close_id(struct work_struct * work)176*4882a593Smuzhiyun static void ucma_close_id(struct work_struct *work)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* once all inflight tasks are finished, we close all underlying
181*4882a593Smuzhiyun * resources. The context is still alive till its explicit destryoing
182*4882a593Smuzhiyun * by its creator. This puts back the xarray's reference.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun ucma_put_ctx(ctx);
185*4882a593Smuzhiyun wait_for_completion(&ctx->comp);
186*4882a593Smuzhiyun /* No new events will be generated after destroying the id. */
187*4882a593Smuzhiyun rdma_destroy_id(ctx->cm_id);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Reading the cm_id without holding a positive ref is not allowed */
190*4882a593Smuzhiyun ctx->cm_id = NULL;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
ucma_alloc_ctx(struct ucma_file * file)193*4882a593Smuzhiyun static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct ucma_context *ctx;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
198*4882a593Smuzhiyun if (!ctx)
199*4882a593Smuzhiyun return NULL;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun INIT_WORK(&ctx->close_work, ucma_close_id);
202*4882a593Smuzhiyun init_completion(&ctx->comp);
203*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->mc_list);
204*4882a593Smuzhiyun /* So list_del() will work if we don't do ucma_finish_ctx() */
205*4882a593Smuzhiyun INIT_LIST_HEAD(&ctx->list);
206*4882a593Smuzhiyun ctx->file = file;
207*4882a593Smuzhiyun mutex_init(&ctx->mutex);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if (xa_alloc(&ctx_table, &ctx->id, NULL, xa_limit_32b, GFP_KERNEL)) {
210*4882a593Smuzhiyun kfree(ctx);
211*4882a593Smuzhiyun return NULL;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun return ctx;
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
ucma_set_ctx_cm_id(struct ucma_context * ctx,struct rdma_cm_id * cm_id)216*4882a593Smuzhiyun static void ucma_set_ctx_cm_id(struct ucma_context *ctx,
217*4882a593Smuzhiyun struct rdma_cm_id *cm_id)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun refcount_set(&ctx->ref, 1);
220*4882a593Smuzhiyun ctx->cm_id = cm_id;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
ucma_finish_ctx(struct ucma_context * ctx)223*4882a593Smuzhiyun static void ucma_finish_ctx(struct ucma_context *ctx)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun lockdep_assert_held(&ctx->file->mut);
226*4882a593Smuzhiyun list_add_tail(&ctx->list, &ctx->file->ctx_list);
227*4882a593Smuzhiyun xa_store(&ctx_table, ctx->id, ctx, GFP_KERNEL);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
ucma_copy_conn_event(struct rdma_ucm_conn_param * dst,struct rdma_conn_param * src)230*4882a593Smuzhiyun static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
231*4882a593Smuzhiyun struct rdma_conn_param *src)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun if (src->private_data_len)
234*4882a593Smuzhiyun memcpy(dst->private_data, src->private_data,
235*4882a593Smuzhiyun src->private_data_len);
236*4882a593Smuzhiyun dst->private_data_len = src->private_data_len;
237*4882a593Smuzhiyun dst->responder_resources =src->responder_resources;
238*4882a593Smuzhiyun dst->initiator_depth = src->initiator_depth;
239*4882a593Smuzhiyun dst->flow_control = src->flow_control;
240*4882a593Smuzhiyun dst->retry_count = src->retry_count;
241*4882a593Smuzhiyun dst->rnr_retry_count = src->rnr_retry_count;
242*4882a593Smuzhiyun dst->srq = src->srq;
243*4882a593Smuzhiyun dst->qp_num = src->qp_num;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
ucma_copy_ud_event(struct ib_device * device,struct rdma_ucm_ud_param * dst,struct rdma_ud_param * src)246*4882a593Smuzhiyun static void ucma_copy_ud_event(struct ib_device *device,
247*4882a593Smuzhiyun struct rdma_ucm_ud_param *dst,
248*4882a593Smuzhiyun struct rdma_ud_param *src)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun if (src->private_data_len)
251*4882a593Smuzhiyun memcpy(dst->private_data, src->private_data,
252*4882a593Smuzhiyun src->private_data_len);
253*4882a593Smuzhiyun dst->private_data_len = src->private_data_len;
254*4882a593Smuzhiyun ib_copy_ah_attr_to_user(device, &dst->ah_attr, &src->ah_attr);
255*4882a593Smuzhiyun dst->qp_num = src->qp_num;
256*4882a593Smuzhiyun dst->qkey = src->qkey;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
ucma_create_uevent(struct ucma_context * ctx,struct rdma_cm_event * event)259*4882a593Smuzhiyun static struct ucma_event *ucma_create_uevent(struct ucma_context *ctx,
260*4882a593Smuzhiyun struct rdma_cm_event *event)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun struct ucma_event *uevent;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
265*4882a593Smuzhiyun if (!uevent)
266*4882a593Smuzhiyun return NULL;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun uevent->ctx = ctx;
269*4882a593Smuzhiyun switch (event->event) {
270*4882a593Smuzhiyun case RDMA_CM_EVENT_MULTICAST_JOIN:
271*4882a593Smuzhiyun case RDMA_CM_EVENT_MULTICAST_ERROR:
272*4882a593Smuzhiyun uevent->mc = (struct ucma_multicast *)
273*4882a593Smuzhiyun event->param.ud.private_data;
274*4882a593Smuzhiyun uevent->resp.uid = uevent->mc->uid;
275*4882a593Smuzhiyun uevent->resp.id = uevent->mc->id;
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun default:
278*4882a593Smuzhiyun uevent->resp.uid = ctx->uid;
279*4882a593Smuzhiyun uevent->resp.id = ctx->id;
280*4882a593Smuzhiyun break;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun uevent->resp.event = event->event;
283*4882a593Smuzhiyun uevent->resp.status = event->status;
284*4882a593Smuzhiyun if (ctx->cm_id->qp_type == IB_QPT_UD)
285*4882a593Smuzhiyun ucma_copy_ud_event(ctx->cm_id->device, &uevent->resp.param.ud,
286*4882a593Smuzhiyun &event->param.ud);
287*4882a593Smuzhiyun else
288*4882a593Smuzhiyun ucma_copy_conn_event(&uevent->resp.param.conn,
289*4882a593Smuzhiyun &event->param.conn);
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun uevent->resp.ece.vendor_id = event->ece.vendor_id;
292*4882a593Smuzhiyun uevent->resp.ece.attr_mod = event->ece.attr_mod;
293*4882a593Smuzhiyun return uevent;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
ucma_connect_event_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)296*4882a593Smuzhiyun static int ucma_connect_event_handler(struct rdma_cm_id *cm_id,
297*4882a593Smuzhiyun struct rdma_cm_event *event)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct ucma_context *listen_ctx = cm_id->context;
300*4882a593Smuzhiyun struct ucma_context *ctx;
301*4882a593Smuzhiyun struct ucma_event *uevent;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!atomic_add_unless(&listen_ctx->backlog, -1, 0))
304*4882a593Smuzhiyun return -ENOMEM;
305*4882a593Smuzhiyun ctx = ucma_alloc_ctx(listen_ctx->file);
306*4882a593Smuzhiyun if (!ctx)
307*4882a593Smuzhiyun goto err_backlog;
308*4882a593Smuzhiyun ucma_set_ctx_cm_id(ctx, cm_id);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun uevent = ucma_create_uevent(listen_ctx, event);
311*4882a593Smuzhiyun if (!uevent)
312*4882a593Smuzhiyun goto err_alloc;
313*4882a593Smuzhiyun uevent->conn_req_ctx = ctx;
314*4882a593Smuzhiyun uevent->resp.id = ctx->id;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun ctx->cm_id->context = ctx;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun mutex_lock(&ctx->file->mut);
319*4882a593Smuzhiyun ucma_finish_ctx(ctx);
320*4882a593Smuzhiyun list_add_tail(&uevent->list, &ctx->file->event_list);
321*4882a593Smuzhiyun mutex_unlock(&ctx->file->mut);
322*4882a593Smuzhiyun wake_up_interruptible(&ctx->file->poll_wait);
323*4882a593Smuzhiyun return 0;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun err_alloc:
326*4882a593Smuzhiyun ucma_destroy_private_ctx(ctx);
327*4882a593Smuzhiyun err_backlog:
328*4882a593Smuzhiyun atomic_inc(&listen_ctx->backlog);
329*4882a593Smuzhiyun /* Returning error causes the new ID to be destroyed */
330*4882a593Smuzhiyun return -ENOMEM;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
ucma_event_handler(struct rdma_cm_id * cm_id,struct rdma_cm_event * event)333*4882a593Smuzhiyun static int ucma_event_handler(struct rdma_cm_id *cm_id,
334*4882a593Smuzhiyun struct rdma_cm_event *event)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun struct ucma_event *uevent;
337*4882a593Smuzhiyun struct ucma_context *ctx = cm_id->context;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST)
340*4882a593Smuzhiyun return ucma_connect_event_handler(cm_id, event);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /*
343*4882a593Smuzhiyun * We ignore events for new connections until userspace has set their
344*4882a593Smuzhiyun * context. This can only happen if an error occurs on a new connection
345*4882a593Smuzhiyun * before the user accepts it. This is okay, since the accept will just
346*4882a593Smuzhiyun * fail later. However, we do need to release the underlying HW
347*4882a593Smuzhiyun * resources in case of a device removal event.
348*4882a593Smuzhiyun */
349*4882a593Smuzhiyun if (ctx->uid) {
350*4882a593Smuzhiyun uevent = ucma_create_uevent(ctx, event);
351*4882a593Smuzhiyun if (!uevent)
352*4882a593Smuzhiyun return 0;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun mutex_lock(&ctx->file->mut);
355*4882a593Smuzhiyun list_add_tail(&uevent->list, &ctx->file->event_list);
356*4882a593Smuzhiyun mutex_unlock(&ctx->file->mut);
357*4882a593Smuzhiyun wake_up_interruptible(&ctx->file->poll_wait);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) {
361*4882a593Smuzhiyun xa_lock(&ctx_table);
362*4882a593Smuzhiyun if (xa_load(&ctx_table, ctx->id) == ctx)
363*4882a593Smuzhiyun queue_work(system_unbound_wq, &ctx->close_work);
364*4882a593Smuzhiyun xa_unlock(&ctx_table);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun return 0;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
ucma_get_event(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)369*4882a593Smuzhiyun static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
370*4882a593Smuzhiyun int in_len, int out_len)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun struct rdma_ucm_get_event cmd;
373*4882a593Smuzhiyun struct ucma_event *uevent;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /*
376*4882a593Smuzhiyun * Old 32 bit user space does not send the 4 byte padding in the
377*4882a593Smuzhiyun * reserved field. We don't care, allow it to keep working.
378*4882a593Smuzhiyun */
379*4882a593Smuzhiyun if (out_len < sizeof(uevent->resp) - sizeof(uevent->resp.reserved) -
380*4882a593Smuzhiyun sizeof(uevent->resp.ece))
381*4882a593Smuzhiyun return -ENOSPC;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
384*4882a593Smuzhiyun return -EFAULT;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun mutex_lock(&file->mut);
387*4882a593Smuzhiyun while (list_empty(&file->event_list)) {
388*4882a593Smuzhiyun mutex_unlock(&file->mut);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (file->filp->f_flags & O_NONBLOCK)
391*4882a593Smuzhiyun return -EAGAIN;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (wait_event_interruptible(file->poll_wait,
394*4882a593Smuzhiyun !list_empty(&file->event_list)))
395*4882a593Smuzhiyun return -ERESTARTSYS;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun mutex_lock(&file->mut);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun uevent = list_first_entry(&file->event_list, struct ucma_event, list);
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response),
403*4882a593Smuzhiyun &uevent->resp,
404*4882a593Smuzhiyun min_t(size_t, out_len, sizeof(uevent->resp)))) {
405*4882a593Smuzhiyun mutex_unlock(&file->mut);
406*4882a593Smuzhiyun return -EFAULT;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun list_del(&uevent->list);
410*4882a593Smuzhiyun uevent->ctx->events_reported++;
411*4882a593Smuzhiyun if (uevent->mc)
412*4882a593Smuzhiyun uevent->mc->events_reported++;
413*4882a593Smuzhiyun if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
414*4882a593Smuzhiyun atomic_inc(&uevent->ctx->backlog);
415*4882a593Smuzhiyun mutex_unlock(&file->mut);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun kfree(uevent);
418*4882a593Smuzhiyun return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
ucma_get_qp_type(struct rdma_ucm_create_id * cmd,enum ib_qp_type * qp_type)421*4882a593Smuzhiyun static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun switch (cmd->ps) {
424*4882a593Smuzhiyun case RDMA_PS_TCP:
425*4882a593Smuzhiyun *qp_type = IB_QPT_RC;
426*4882a593Smuzhiyun return 0;
427*4882a593Smuzhiyun case RDMA_PS_UDP:
428*4882a593Smuzhiyun case RDMA_PS_IPOIB:
429*4882a593Smuzhiyun *qp_type = IB_QPT_UD;
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun case RDMA_PS_IB:
432*4882a593Smuzhiyun *qp_type = cmd->qp_type;
433*4882a593Smuzhiyun return 0;
434*4882a593Smuzhiyun default:
435*4882a593Smuzhiyun return -EINVAL;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
ucma_create_id(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)439*4882a593Smuzhiyun static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
440*4882a593Smuzhiyun int in_len, int out_len)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct rdma_ucm_create_id cmd;
443*4882a593Smuzhiyun struct rdma_ucm_create_id_resp resp;
444*4882a593Smuzhiyun struct ucma_context *ctx;
445*4882a593Smuzhiyun struct rdma_cm_id *cm_id;
446*4882a593Smuzhiyun enum ib_qp_type qp_type;
447*4882a593Smuzhiyun int ret;
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun if (out_len < sizeof(resp))
450*4882a593Smuzhiyun return -ENOSPC;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
453*4882a593Smuzhiyun return -EFAULT;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun ret = ucma_get_qp_type(&cmd, &qp_type);
456*4882a593Smuzhiyun if (ret)
457*4882a593Smuzhiyun return ret;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun ctx = ucma_alloc_ctx(file);
460*4882a593Smuzhiyun if (!ctx)
461*4882a593Smuzhiyun return -ENOMEM;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun ctx->uid = cmd.uid;
464*4882a593Smuzhiyun cm_id = rdma_create_user_id(ucma_event_handler, ctx, cmd.ps, qp_type);
465*4882a593Smuzhiyun if (IS_ERR(cm_id)) {
466*4882a593Smuzhiyun ret = PTR_ERR(cm_id);
467*4882a593Smuzhiyun goto err1;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun ucma_set_ctx_cm_id(ctx, cm_id);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun resp.id = ctx->id;
472*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response),
473*4882a593Smuzhiyun &resp, sizeof(resp))) {
474*4882a593Smuzhiyun ucma_destroy_private_ctx(ctx);
475*4882a593Smuzhiyun return -EFAULT;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun mutex_lock(&file->mut);
479*4882a593Smuzhiyun ucma_finish_ctx(ctx);
480*4882a593Smuzhiyun mutex_unlock(&file->mut);
481*4882a593Smuzhiyun return 0;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun err1:
484*4882a593Smuzhiyun ucma_destroy_private_ctx(ctx);
485*4882a593Smuzhiyun return ret;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
ucma_cleanup_multicast(struct ucma_context * ctx)488*4882a593Smuzhiyun static void ucma_cleanup_multicast(struct ucma_context *ctx)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun struct ucma_multicast *mc, *tmp;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun xa_lock(&multicast_table);
493*4882a593Smuzhiyun list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
494*4882a593Smuzhiyun list_del(&mc->list);
495*4882a593Smuzhiyun /*
496*4882a593Smuzhiyun * At this point mc->ctx->ref is 0 so the mc cannot leave the
497*4882a593Smuzhiyun * lock on the reader and this is enough serialization
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun __xa_erase(&multicast_table, mc->id);
500*4882a593Smuzhiyun kfree(mc);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun xa_unlock(&multicast_table);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
ucma_cleanup_mc_events(struct ucma_multicast * mc)505*4882a593Smuzhiyun static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun struct ucma_event *uevent, *tmp;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun rdma_lock_handler(mc->ctx->cm_id);
510*4882a593Smuzhiyun mutex_lock(&mc->ctx->file->mut);
511*4882a593Smuzhiyun list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
512*4882a593Smuzhiyun if (uevent->mc != mc)
513*4882a593Smuzhiyun continue;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun list_del(&uevent->list);
516*4882a593Smuzhiyun kfree(uevent);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun mutex_unlock(&mc->ctx->file->mut);
519*4882a593Smuzhiyun rdma_unlock_handler(mc->ctx->cm_id);
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
ucma_cleanup_ctx_events(struct ucma_context * ctx)522*4882a593Smuzhiyun static int ucma_cleanup_ctx_events(struct ucma_context *ctx)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun int events_reported;
525*4882a593Smuzhiyun struct ucma_event *uevent, *tmp;
526*4882a593Smuzhiyun LIST_HEAD(list);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun /* Cleanup events not yet reported to the user.*/
529*4882a593Smuzhiyun mutex_lock(&ctx->file->mut);
530*4882a593Smuzhiyun list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
531*4882a593Smuzhiyun if (uevent->ctx != ctx)
532*4882a593Smuzhiyun continue;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST &&
535*4882a593Smuzhiyun xa_cmpxchg(&ctx_table, uevent->conn_req_ctx->id,
536*4882a593Smuzhiyun uevent->conn_req_ctx, XA_ZERO_ENTRY,
537*4882a593Smuzhiyun GFP_KERNEL) == uevent->conn_req_ctx) {
538*4882a593Smuzhiyun list_move_tail(&uevent->list, &list);
539*4882a593Smuzhiyun continue;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun list_del(&uevent->list);
542*4882a593Smuzhiyun kfree(uevent);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun list_del(&ctx->list);
545*4882a593Smuzhiyun events_reported = ctx->events_reported;
546*4882a593Smuzhiyun mutex_unlock(&ctx->file->mut);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /*
549*4882a593Smuzhiyun * If this was a listening ID then any connections spawned from it that
550*4882a593Smuzhiyun * have not been delivered to userspace are cleaned up too. Must be done
551*4882a593Smuzhiyun * outside any locks.
552*4882a593Smuzhiyun */
553*4882a593Smuzhiyun list_for_each_entry_safe(uevent, tmp, &list, list) {
554*4882a593Smuzhiyun ucma_destroy_private_ctx(uevent->conn_req_ctx);
555*4882a593Smuzhiyun kfree(uevent);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun return events_reported;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /*
561*4882a593Smuzhiyun * When this is called the xarray must have a XA_ZERO_ENTRY in the ctx->id (ie
562*4882a593Smuzhiyun * the ctx is not public to the user). This either because:
563*4882a593Smuzhiyun * - ucma_finish_ctx() hasn't been called
564*4882a593Smuzhiyun * - xa_cmpxchg() succeed to remove the entry (only one thread can succeed)
565*4882a593Smuzhiyun */
ucma_destroy_private_ctx(struct ucma_context * ctx)566*4882a593Smuzhiyun static int ucma_destroy_private_ctx(struct ucma_context *ctx)
567*4882a593Smuzhiyun {
568*4882a593Smuzhiyun int events_reported;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /*
571*4882a593Smuzhiyun * Destroy the underlying cm_id. New work queuing is prevented now by
572*4882a593Smuzhiyun * the removal from the xarray. Once the work is cancled ref will either
573*4882a593Smuzhiyun * be 0 because the work ran to completion and consumed the ref from the
574*4882a593Smuzhiyun * xarray, or it will be positive because we still have the ref from the
575*4882a593Smuzhiyun * xarray. This can also be 0 in cases where cm_id was never set
576*4882a593Smuzhiyun */
577*4882a593Smuzhiyun cancel_work_sync(&ctx->close_work);
578*4882a593Smuzhiyun if (refcount_read(&ctx->ref))
579*4882a593Smuzhiyun ucma_close_id(&ctx->close_work);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun events_reported = ucma_cleanup_ctx_events(ctx);
582*4882a593Smuzhiyun ucma_cleanup_multicast(ctx);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, XA_ZERO_ENTRY, NULL,
585*4882a593Smuzhiyun GFP_KERNEL) != NULL);
586*4882a593Smuzhiyun mutex_destroy(&ctx->mutex);
587*4882a593Smuzhiyun kfree(ctx);
588*4882a593Smuzhiyun return events_reported;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
ucma_destroy_id(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)591*4882a593Smuzhiyun static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
592*4882a593Smuzhiyun int in_len, int out_len)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun struct rdma_ucm_destroy_id cmd;
595*4882a593Smuzhiyun struct rdma_ucm_destroy_id_resp resp;
596*4882a593Smuzhiyun struct ucma_context *ctx;
597*4882a593Smuzhiyun int ret = 0;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun if (out_len < sizeof(resp))
600*4882a593Smuzhiyun return -ENOSPC;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
603*4882a593Smuzhiyun return -EFAULT;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun xa_lock(&ctx_table);
606*4882a593Smuzhiyun ctx = _ucma_find_context(cmd.id, file);
607*4882a593Smuzhiyun if (!IS_ERR(ctx)) {
608*4882a593Smuzhiyun if (__xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
609*4882a593Smuzhiyun GFP_KERNEL) != ctx)
610*4882a593Smuzhiyun ctx = ERR_PTR(-ENOENT);
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun xa_unlock(&ctx_table);
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun if (IS_ERR(ctx))
615*4882a593Smuzhiyun return PTR_ERR(ctx);
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun resp.events_reported = ucma_destroy_private_ctx(ctx);
618*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response),
619*4882a593Smuzhiyun &resp, sizeof(resp)))
620*4882a593Smuzhiyun ret = -EFAULT;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun return ret;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
ucma_bind_ip(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)625*4882a593Smuzhiyun static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
626*4882a593Smuzhiyun int in_len, int out_len)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct rdma_ucm_bind_ip cmd;
629*4882a593Smuzhiyun struct ucma_context *ctx;
630*4882a593Smuzhiyun int ret;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
633*4882a593Smuzhiyun return -EFAULT;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if (!rdma_addr_size_in6(&cmd.addr))
636*4882a593Smuzhiyun return -EINVAL;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
639*4882a593Smuzhiyun if (IS_ERR(ctx))
640*4882a593Smuzhiyun return PTR_ERR(ctx);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
643*4882a593Smuzhiyun ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
644*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun ucma_put_ctx(ctx);
647*4882a593Smuzhiyun return ret;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun
ucma_bind(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)650*4882a593Smuzhiyun static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
651*4882a593Smuzhiyun int in_len, int out_len)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun struct rdma_ucm_bind cmd;
654*4882a593Smuzhiyun struct ucma_context *ctx;
655*4882a593Smuzhiyun int ret;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
658*4882a593Smuzhiyun return -EFAULT;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (cmd.reserved || !cmd.addr_size ||
661*4882a593Smuzhiyun cmd.addr_size != rdma_addr_size_kss(&cmd.addr))
662*4882a593Smuzhiyun return -EINVAL;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
665*4882a593Smuzhiyun if (IS_ERR(ctx))
666*4882a593Smuzhiyun return PTR_ERR(ctx);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
669*4882a593Smuzhiyun ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
670*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
671*4882a593Smuzhiyun ucma_put_ctx(ctx);
672*4882a593Smuzhiyun return ret;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
ucma_resolve_ip(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)675*4882a593Smuzhiyun static ssize_t ucma_resolve_ip(struct ucma_file *file,
676*4882a593Smuzhiyun const char __user *inbuf,
677*4882a593Smuzhiyun int in_len, int out_len)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun struct rdma_ucm_resolve_ip cmd;
680*4882a593Smuzhiyun struct ucma_context *ctx;
681*4882a593Smuzhiyun int ret;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
684*4882a593Smuzhiyun return -EFAULT;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
687*4882a593Smuzhiyun !rdma_addr_size_in6(&cmd.dst_addr))
688*4882a593Smuzhiyun return -EINVAL;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
691*4882a593Smuzhiyun if (IS_ERR(ctx))
692*4882a593Smuzhiyun return PTR_ERR(ctx);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
695*4882a593Smuzhiyun ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
696*4882a593Smuzhiyun (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
697*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
698*4882a593Smuzhiyun ucma_put_ctx(ctx);
699*4882a593Smuzhiyun return ret;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
ucma_resolve_addr(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)702*4882a593Smuzhiyun static ssize_t ucma_resolve_addr(struct ucma_file *file,
703*4882a593Smuzhiyun const char __user *inbuf,
704*4882a593Smuzhiyun int in_len, int out_len)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun struct rdma_ucm_resolve_addr cmd;
707*4882a593Smuzhiyun struct ucma_context *ctx;
708*4882a593Smuzhiyun int ret;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
711*4882a593Smuzhiyun return -EFAULT;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun if (cmd.reserved ||
714*4882a593Smuzhiyun (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) ||
715*4882a593Smuzhiyun !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr)))
716*4882a593Smuzhiyun return -EINVAL;
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
719*4882a593Smuzhiyun if (IS_ERR(ctx))
720*4882a593Smuzhiyun return PTR_ERR(ctx);
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
723*4882a593Smuzhiyun ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
724*4882a593Smuzhiyun (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
725*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
726*4882a593Smuzhiyun ucma_put_ctx(ctx);
727*4882a593Smuzhiyun return ret;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
ucma_resolve_route(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)730*4882a593Smuzhiyun static ssize_t ucma_resolve_route(struct ucma_file *file,
731*4882a593Smuzhiyun const char __user *inbuf,
732*4882a593Smuzhiyun int in_len, int out_len)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun struct rdma_ucm_resolve_route cmd;
735*4882a593Smuzhiyun struct ucma_context *ctx;
736*4882a593Smuzhiyun int ret;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
739*4882a593Smuzhiyun return -EFAULT;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd.id);
742*4882a593Smuzhiyun if (IS_ERR(ctx))
743*4882a593Smuzhiyun return PTR_ERR(ctx);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
746*4882a593Smuzhiyun ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
747*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
748*4882a593Smuzhiyun ucma_put_ctx(ctx);
749*4882a593Smuzhiyun return ret;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
ucma_copy_ib_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)752*4882a593Smuzhiyun static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
753*4882a593Smuzhiyun struct rdma_route *route)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun struct rdma_dev_addr *dev_addr;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun resp->num_paths = route->num_paths;
758*4882a593Smuzhiyun switch (route->num_paths) {
759*4882a593Smuzhiyun case 0:
760*4882a593Smuzhiyun dev_addr = &route->addr.dev_addr;
761*4882a593Smuzhiyun rdma_addr_get_dgid(dev_addr,
762*4882a593Smuzhiyun (union ib_gid *) &resp->ib_route[0].dgid);
763*4882a593Smuzhiyun rdma_addr_get_sgid(dev_addr,
764*4882a593Smuzhiyun (union ib_gid *) &resp->ib_route[0].sgid);
765*4882a593Smuzhiyun resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
766*4882a593Smuzhiyun break;
767*4882a593Smuzhiyun case 2:
768*4882a593Smuzhiyun ib_copy_path_rec_to_user(&resp->ib_route[1],
769*4882a593Smuzhiyun &route->path_rec[1]);
770*4882a593Smuzhiyun fallthrough;
771*4882a593Smuzhiyun case 1:
772*4882a593Smuzhiyun ib_copy_path_rec_to_user(&resp->ib_route[0],
773*4882a593Smuzhiyun &route->path_rec[0]);
774*4882a593Smuzhiyun break;
775*4882a593Smuzhiyun default:
776*4882a593Smuzhiyun break;
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun
ucma_copy_iboe_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)780*4882a593Smuzhiyun static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
781*4882a593Smuzhiyun struct rdma_route *route)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun resp->num_paths = route->num_paths;
785*4882a593Smuzhiyun switch (route->num_paths) {
786*4882a593Smuzhiyun case 0:
787*4882a593Smuzhiyun rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
788*4882a593Smuzhiyun (union ib_gid *)&resp->ib_route[0].dgid);
789*4882a593Smuzhiyun rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
790*4882a593Smuzhiyun (union ib_gid *)&resp->ib_route[0].sgid);
791*4882a593Smuzhiyun resp->ib_route[0].pkey = cpu_to_be16(0xffff);
792*4882a593Smuzhiyun break;
793*4882a593Smuzhiyun case 2:
794*4882a593Smuzhiyun ib_copy_path_rec_to_user(&resp->ib_route[1],
795*4882a593Smuzhiyun &route->path_rec[1]);
796*4882a593Smuzhiyun fallthrough;
797*4882a593Smuzhiyun case 1:
798*4882a593Smuzhiyun ib_copy_path_rec_to_user(&resp->ib_route[0],
799*4882a593Smuzhiyun &route->path_rec[0]);
800*4882a593Smuzhiyun break;
801*4882a593Smuzhiyun default:
802*4882a593Smuzhiyun break;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
ucma_copy_iw_route(struct rdma_ucm_query_route_resp * resp,struct rdma_route * route)806*4882a593Smuzhiyun static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
807*4882a593Smuzhiyun struct rdma_route *route)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun struct rdma_dev_addr *dev_addr;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun dev_addr = &route->addr.dev_addr;
812*4882a593Smuzhiyun rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
813*4882a593Smuzhiyun rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
ucma_query_route(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)816*4882a593Smuzhiyun static ssize_t ucma_query_route(struct ucma_file *file,
817*4882a593Smuzhiyun const char __user *inbuf,
818*4882a593Smuzhiyun int in_len, int out_len)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct rdma_ucm_query cmd;
821*4882a593Smuzhiyun struct rdma_ucm_query_route_resp resp;
822*4882a593Smuzhiyun struct ucma_context *ctx;
823*4882a593Smuzhiyun struct sockaddr *addr;
824*4882a593Smuzhiyun int ret = 0;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun if (out_len < offsetof(struct rdma_ucm_query_route_resp, ibdev_index))
827*4882a593Smuzhiyun return -ENOSPC;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
830*4882a593Smuzhiyun return -EFAULT;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
833*4882a593Smuzhiyun if (IS_ERR(ctx))
834*4882a593Smuzhiyun return PTR_ERR(ctx);
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
837*4882a593Smuzhiyun memset(&resp, 0, sizeof resp);
838*4882a593Smuzhiyun addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
839*4882a593Smuzhiyun memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
840*4882a593Smuzhiyun sizeof(struct sockaddr_in) :
841*4882a593Smuzhiyun sizeof(struct sockaddr_in6));
842*4882a593Smuzhiyun addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
843*4882a593Smuzhiyun memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
844*4882a593Smuzhiyun sizeof(struct sockaddr_in) :
845*4882a593Smuzhiyun sizeof(struct sockaddr_in6));
846*4882a593Smuzhiyun if (!ctx->cm_id->device)
847*4882a593Smuzhiyun goto out;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
850*4882a593Smuzhiyun resp.ibdev_index = ctx->cm_id->device->index;
851*4882a593Smuzhiyun resp.port_num = ctx->cm_id->port_num;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
854*4882a593Smuzhiyun ucma_copy_ib_route(&resp, &ctx->cm_id->route);
855*4882a593Smuzhiyun else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
856*4882a593Smuzhiyun ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
857*4882a593Smuzhiyun else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
858*4882a593Smuzhiyun ucma_copy_iw_route(&resp, &ctx->cm_id->route);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun out:
861*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
862*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response), &resp,
863*4882a593Smuzhiyun min_t(size_t, out_len, sizeof(resp))))
864*4882a593Smuzhiyun ret = -EFAULT;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun ucma_put_ctx(ctx);
867*4882a593Smuzhiyun return ret;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
ucma_query_device_addr(struct rdma_cm_id * cm_id,struct rdma_ucm_query_addr_resp * resp)870*4882a593Smuzhiyun static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
871*4882a593Smuzhiyun struct rdma_ucm_query_addr_resp *resp)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun if (!cm_id->device)
874*4882a593Smuzhiyun return;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun resp->node_guid = (__force __u64) cm_id->device->node_guid;
877*4882a593Smuzhiyun resp->ibdev_index = cm_id->device->index;
878*4882a593Smuzhiyun resp->port_num = cm_id->port_num;
879*4882a593Smuzhiyun resp->pkey = (__force __u16) cpu_to_be16(
880*4882a593Smuzhiyun ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
ucma_query_addr(struct ucma_context * ctx,void __user * response,int out_len)883*4882a593Smuzhiyun static ssize_t ucma_query_addr(struct ucma_context *ctx,
884*4882a593Smuzhiyun void __user *response, int out_len)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun struct rdma_ucm_query_addr_resp resp;
887*4882a593Smuzhiyun struct sockaddr *addr;
888*4882a593Smuzhiyun int ret = 0;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
891*4882a593Smuzhiyun return -ENOSPC;
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun memset(&resp, 0, sizeof resp);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
896*4882a593Smuzhiyun resp.src_size = rdma_addr_size(addr);
897*4882a593Smuzhiyun memcpy(&resp.src_addr, addr, resp.src_size);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
900*4882a593Smuzhiyun resp.dst_size = rdma_addr_size(addr);
901*4882a593Smuzhiyun memcpy(&resp.dst_addr, addr, resp.dst_size);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun ucma_query_device_addr(ctx->cm_id, &resp);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
906*4882a593Smuzhiyun ret = -EFAULT;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun return ret;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
ucma_query_path(struct ucma_context * ctx,void __user * response,int out_len)911*4882a593Smuzhiyun static ssize_t ucma_query_path(struct ucma_context *ctx,
912*4882a593Smuzhiyun void __user *response, int out_len)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun struct rdma_ucm_query_path_resp *resp;
915*4882a593Smuzhiyun int i, ret = 0;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun if (out_len < sizeof(*resp))
918*4882a593Smuzhiyun return -ENOSPC;
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun resp = kzalloc(out_len, GFP_KERNEL);
921*4882a593Smuzhiyun if (!resp)
922*4882a593Smuzhiyun return -ENOMEM;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun resp->num_paths = ctx->cm_id->route.num_paths;
925*4882a593Smuzhiyun for (i = 0, out_len -= sizeof(*resp);
926*4882a593Smuzhiyun i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
927*4882a593Smuzhiyun i++, out_len -= sizeof(struct ib_path_rec_data)) {
928*4882a593Smuzhiyun struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i];
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
931*4882a593Smuzhiyun IB_PATH_BIDIRECTIONAL;
932*4882a593Smuzhiyun if (rec->rec_type == SA_PATH_REC_TYPE_OPA) {
933*4882a593Smuzhiyun struct sa_path_rec ib;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun sa_convert_path_opa_to_ib(&ib, rec);
936*4882a593Smuzhiyun ib_sa_pack_path(&ib, &resp->path_data[i].path_rec);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun } else {
939*4882a593Smuzhiyun ib_sa_pack_path(rec, &resp->path_data[i].path_rec);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun }
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (copy_to_user(response, resp, struct_size(resp, path_data, i)))
944*4882a593Smuzhiyun ret = -EFAULT;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun kfree(resp);
947*4882a593Smuzhiyun return ret;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
ucma_query_gid(struct ucma_context * ctx,void __user * response,int out_len)950*4882a593Smuzhiyun static ssize_t ucma_query_gid(struct ucma_context *ctx,
951*4882a593Smuzhiyun void __user *response, int out_len)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun struct rdma_ucm_query_addr_resp resp;
954*4882a593Smuzhiyun struct sockaddr_ib *addr;
955*4882a593Smuzhiyun int ret = 0;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun if (out_len < offsetof(struct rdma_ucm_query_addr_resp, ibdev_index))
958*4882a593Smuzhiyun return -ENOSPC;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun memset(&resp, 0, sizeof resp);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun ucma_query_device_addr(ctx->cm_id, &resp);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun addr = (struct sockaddr_ib *) &resp.src_addr;
965*4882a593Smuzhiyun resp.src_size = sizeof(*addr);
966*4882a593Smuzhiyun if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
967*4882a593Smuzhiyun memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
968*4882a593Smuzhiyun } else {
969*4882a593Smuzhiyun addr->sib_family = AF_IB;
970*4882a593Smuzhiyun addr->sib_pkey = (__force __be16) resp.pkey;
971*4882a593Smuzhiyun rdma_read_gids(ctx->cm_id, (union ib_gid *)&addr->sib_addr,
972*4882a593Smuzhiyun NULL);
973*4882a593Smuzhiyun addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
974*4882a593Smuzhiyun &ctx->cm_id->route.addr.src_addr);
975*4882a593Smuzhiyun }
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun addr = (struct sockaddr_ib *) &resp.dst_addr;
978*4882a593Smuzhiyun resp.dst_size = sizeof(*addr);
979*4882a593Smuzhiyun if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
980*4882a593Smuzhiyun memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
981*4882a593Smuzhiyun } else {
982*4882a593Smuzhiyun addr->sib_family = AF_IB;
983*4882a593Smuzhiyun addr->sib_pkey = (__force __be16) resp.pkey;
984*4882a593Smuzhiyun rdma_read_gids(ctx->cm_id, NULL,
985*4882a593Smuzhiyun (union ib_gid *)&addr->sib_addr);
986*4882a593Smuzhiyun addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
987*4882a593Smuzhiyun &ctx->cm_id->route.addr.dst_addr);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (copy_to_user(response, &resp, min_t(size_t, out_len, sizeof(resp))))
991*4882a593Smuzhiyun ret = -EFAULT;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun return ret;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
ucma_query(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)996*4882a593Smuzhiyun static ssize_t ucma_query(struct ucma_file *file,
997*4882a593Smuzhiyun const char __user *inbuf,
998*4882a593Smuzhiyun int in_len, int out_len)
999*4882a593Smuzhiyun {
1000*4882a593Smuzhiyun struct rdma_ucm_query cmd;
1001*4882a593Smuzhiyun struct ucma_context *ctx;
1002*4882a593Smuzhiyun void __user *response;
1003*4882a593Smuzhiyun int ret;
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1006*4882a593Smuzhiyun return -EFAULT;
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun response = u64_to_user_ptr(cmd.response);
1009*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
1010*4882a593Smuzhiyun if (IS_ERR(ctx))
1011*4882a593Smuzhiyun return PTR_ERR(ctx);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1014*4882a593Smuzhiyun switch (cmd.option) {
1015*4882a593Smuzhiyun case RDMA_USER_CM_QUERY_ADDR:
1016*4882a593Smuzhiyun ret = ucma_query_addr(ctx, response, out_len);
1017*4882a593Smuzhiyun break;
1018*4882a593Smuzhiyun case RDMA_USER_CM_QUERY_PATH:
1019*4882a593Smuzhiyun ret = ucma_query_path(ctx, response, out_len);
1020*4882a593Smuzhiyun break;
1021*4882a593Smuzhiyun case RDMA_USER_CM_QUERY_GID:
1022*4882a593Smuzhiyun ret = ucma_query_gid(ctx, response, out_len);
1023*4882a593Smuzhiyun break;
1024*4882a593Smuzhiyun default:
1025*4882a593Smuzhiyun ret = -ENOSYS;
1026*4882a593Smuzhiyun break;
1027*4882a593Smuzhiyun }
1028*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun ucma_put_ctx(ctx);
1031*4882a593Smuzhiyun return ret;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
ucma_copy_conn_param(struct rdma_cm_id * id,struct rdma_conn_param * dst,struct rdma_ucm_conn_param * src)1034*4882a593Smuzhiyun static void ucma_copy_conn_param(struct rdma_cm_id *id,
1035*4882a593Smuzhiyun struct rdma_conn_param *dst,
1036*4882a593Smuzhiyun struct rdma_ucm_conn_param *src)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun dst->private_data = src->private_data;
1039*4882a593Smuzhiyun dst->private_data_len = src->private_data_len;
1040*4882a593Smuzhiyun dst->responder_resources =src->responder_resources;
1041*4882a593Smuzhiyun dst->initiator_depth = src->initiator_depth;
1042*4882a593Smuzhiyun dst->flow_control = src->flow_control;
1043*4882a593Smuzhiyun dst->retry_count = src->retry_count;
1044*4882a593Smuzhiyun dst->rnr_retry_count = src->rnr_retry_count;
1045*4882a593Smuzhiyun dst->srq = src->srq;
1046*4882a593Smuzhiyun dst->qp_num = src->qp_num & 0xFFFFFF;
1047*4882a593Smuzhiyun dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
ucma_connect(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1050*4882a593Smuzhiyun static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1051*4882a593Smuzhiyun int in_len, int out_len)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun struct rdma_conn_param conn_param;
1054*4882a593Smuzhiyun struct rdma_ucm_ece ece = {};
1055*4882a593Smuzhiyun struct rdma_ucm_connect cmd;
1056*4882a593Smuzhiyun struct ucma_context *ctx;
1057*4882a593Smuzhiyun size_t in_size;
1058*4882a593Smuzhiyun int ret;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun if (in_len < offsetofend(typeof(cmd), reserved))
1061*4882a593Smuzhiyun return -EINVAL;
1062*4882a593Smuzhiyun in_size = min_t(size_t, in_len, sizeof(cmd));
1063*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, in_size))
1064*4882a593Smuzhiyun return -EFAULT;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun if (!cmd.conn_param.valid)
1067*4882a593Smuzhiyun return -EINVAL;
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd.id);
1070*4882a593Smuzhiyun if (IS_ERR(ctx))
1071*4882a593Smuzhiyun return PTR_ERR(ctx);
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1074*4882a593Smuzhiyun if (offsetofend(typeof(cmd), ece) <= in_size) {
1075*4882a593Smuzhiyun ece.vendor_id = cmd.ece.vendor_id;
1076*4882a593Smuzhiyun ece.attr_mod = cmd.ece.attr_mod;
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1080*4882a593Smuzhiyun ret = rdma_connect_ece(ctx->cm_id, &conn_param, &ece);
1081*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1082*4882a593Smuzhiyun ucma_put_ctx(ctx);
1083*4882a593Smuzhiyun return ret;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
ucma_listen(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1086*4882a593Smuzhiyun static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1087*4882a593Smuzhiyun int in_len, int out_len)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun struct rdma_ucm_listen cmd;
1090*4882a593Smuzhiyun struct ucma_context *ctx;
1091*4882a593Smuzhiyun int ret;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1094*4882a593Smuzhiyun return -EFAULT;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
1097*4882a593Smuzhiyun if (IS_ERR(ctx))
1098*4882a593Smuzhiyun return PTR_ERR(ctx);
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun if (cmd.backlog <= 0 || cmd.backlog > max_backlog)
1101*4882a593Smuzhiyun cmd.backlog = max_backlog;
1102*4882a593Smuzhiyun atomic_set(&ctx->backlog, cmd.backlog);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1105*4882a593Smuzhiyun ret = rdma_listen(ctx->cm_id, cmd.backlog);
1106*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1107*4882a593Smuzhiyun ucma_put_ctx(ctx);
1108*4882a593Smuzhiyun return ret;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
ucma_accept(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1111*4882a593Smuzhiyun static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1112*4882a593Smuzhiyun int in_len, int out_len)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun struct rdma_ucm_accept cmd;
1115*4882a593Smuzhiyun struct rdma_conn_param conn_param;
1116*4882a593Smuzhiyun struct rdma_ucm_ece ece = {};
1117*4882a593Smuzhiyun struct ucma_context *ctx;
1118*4882a593Smuzhiyun size_t in_size;
1119*4882a593Smuzhiyun int ret;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun if (in_len < offsetofend(typeof(cmd), reserved))
1122*4882a593Smuzhiyun return -EINVAL;
1123*4882a593Smuzhiyun in_size = min_t(size_t, in_len, sizeof(cmd));
1124*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, in_size))
1125*4882a593Smuzhiyun return -EFAULT;
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd.id);
1128*4882a593Smuzhiyun if (IS_ERR(ctx))
1129*4882a593Smuzhiyun return PTR_ERR(ctx);
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun if (offsetofend(typeof(cmd), ece) <= in_size) {
1132*4882a593Smuzhiyun ece.vendor_id = cmd.ece.vendor_id;
1133*4882a593Smuzhiyun ece.attr_mod = cmd.ece.attr_mod;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun if (cmd.conn_param.valid) {
1137*4882a593Smuzhiyun ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1138*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1139*4882a593Smuzhiyun rdma_lock_handler(ctx->cm_id);
1140*4882a593Smuzhiyun ret = rdma_accept_ece(ctx->cm_id, &conn_param, &ece);
1141*4882a593Smuzhiyun if (!ret) {
1142*4882a593Smuzhiyun /* The uid must be set atomically with the handler */
1143*4882a593Smuzhiyun ctx->uid = cmd.uid;
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun rdma_unlock_handler(ctx->cm_id);
1146*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1147*4882a593Smuzhiyun } else {
1148*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1149*4882a593Smuzhiyun rdma_lock_handler(ctx->cm_id);
1150*4882a593Smuzhiyun ret = rdma_accept_ece(ctx->cm_id, NULL, &ece);
1151*4882a593Smuzhiyun rdma_unlock_handler(ctx->cm_id);
1152*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun ucma_put_ctx(ctx);
1155*4882a593Smuzhiyun return ret;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
ucma_reject(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1158*4882a593Smuzhiyun static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1159*4882a593Smuzhiyun int in_len, int out_len)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun struct rdma_ucm_reject cmd;
1162*4882a593Smuzhiyun struct ucma_context *ctx;
1163*4882a593Smuzhiyun int ret;
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1166*4882a593Smuzhiyun return -EFAULT;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (!cmd.reason)
1169*4882a593Smuzhiyun cmd.reason = IB_CM_REJ_CONSUMER_DEFINED;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun switch (cmd.reason) {
1172*4882a593Smuzhiyun case IB_CM_REJ_CONSUMER_DEFINED:
1173*4882a593Smuzhiyun case IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED:
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun default:
1176*4882a593Smuzhiyun return -EINVAL;
1177*4882a593Smuzhiyun }
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd.id);
1180*4882a593Smuzhiyun if (IS_ERR(ctx))
1181*4882a593Smuzhiyun return PTR_ERR(ctx);
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1184*4882a593Smuzhiyun ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len,
1185*4882a593Smuzhiyun cmd.reason);
1186*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1187*4882a593Smuzhiyun ucma_put_ctx(ctx);
1188*4882a593Smuzhiyun return ret;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
ucma_disconnect(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1191*4882a593Smuzhiyun static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1192*4882a593Smuzhiyun int in_len, int out_len)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun struct rdma_ucm_disconnect cmd;
1195*4882a593Smuzhiyun struct ucma_context *ctx;
1196*4882a593Smuzhiyun int ret;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1199*4882a593Smuzhiyun return -EFAULT;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd.id);
1202*4882a593Smuzhiyun if (IS_ERR(ctx))
1203*4882a593Smuzhiyun return PTR_ERR(ctx);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1206*4882a593Smuzhiyun ret = rdma_disconnect(ctx->cm_id);
1207*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1208*4882a593Smuzhiyun ucma_put_ctx(ctx);
1209*4882a593Smuzhiyun return ret;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
ucma_init_qp_attr(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1212*4882a593Smuzhiyun static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1213*4882a593Smuzhiyun const char __user *inbuf,
1214*4882a593Smuzhiyun int in_len, int out_len)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun struct rdma_ucm_init_qp_attr cmd;
1217*4882a593Smuzhiyun struct ib_uverbs_qp_attr resp;
1218*4882a593Smuzhiyun struct ucma_context *ctx;
1219*4882a593Smuzhiyun struct ib_qp_attr qp_attr;
1220*4882a593Smuzhiyun int ret;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun if (out_len < sizeof(resp))
1223*4882a593Smuzhiyun return -ENOSPC;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1226*4882a593Smuzhiyun return -EFAULT;
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun if (cmd.qp_state > IB_QPS_ERR)
1229*4882a593Smuzhiyun return -EINVAL;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd.id);
1232*4882a593Smuzhiyun if (IS_ERR(ctx))
1233*4882a593Smuzhiyun return PTR_ERR(ctx);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun resp.qp_attr_mask = 0;
1236*4882a593Smuzhiyun memset(&qp_attr, 0, sizeof qp_attr);
1237*4882a593Smuzhiyun qp_attr.qp_state = cmd.qp_state;
1238*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1239*4882a593Smuzhiyun ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1240*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1241*4882a593Smuzhiyun if (ret)
1242*4882a593Smuzhiyun goto out;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun ib_copy_qp_attr_to_user(ctx->cm_id->device, &resp, &qp_attr);
1245*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response),
1246*4882a593Smuzhiyun &resp, sizeof(resp)))
1247*4882a593Smuzhiyun ret = -EFAULT;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun out:
1250*4882a593Smuzhiyun ucma_put_ctx(ctx);
1251*4882a593Smuzhiyun return ret;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
ucma_set_option_id(struct ucma_context * ctx,int optname,void * optval,size_t optlen)1254*4882a593Smuzhiyun static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1255*4882a593Smuzhiyun void *optval, size_t optlen)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun int ret = 0;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun switch (optname) {
1260*4882a593Smuzhiyun case RDMA_OPTION_ID_TOS:
1261*4882a593Smuzhiyun if (optlen != sizeof(u8)) {
1262*4882a593Smuzhiyun ret = -EINVAL;
1263*4882a593Smuzhiyun break;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1266*4882a593Smuzhiyun break;
1267*4882a593Smuzhiyun case RDMA_OPTION_ID_REUSEADDR:
1268*4882a593Smuzhiyun if (optlen != sizeof(int)) {
1269*4882a593Smuzhiyun ret = -EINVAL;
1270*4882a593Smuzhiyun break;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1273*4882a593Smuzhiyun break;
1274*4882a593Smuzhiyun case RDMA_OPTION_ID_AFONLY:
1275*4882a593Smuzhiyun if (optlen != sizeof(int)) {
1276*4882a593Smuzhiyun ret = -EINVAL;
1277*4882a593Smuzhiyun break;
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1280*4882a593Smuzhiyun break;
1281*4882a593Smuzhiyun case RDMA_OPTION_ID_ACK_TIMEOUT:
1282*4882a593Smuzhiyun if (optlen != sizeof(u8)) {
1283*4882a593Smuzhiyun ret = -EINVAL;
1284*4882a593Smuzhiyun break;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun ret = rdma_set_ack_timeout(ctx->cm_id, *((u8 *)optval));
1287*4882a593Smuzhiyun break;
1288*4882a593Smuzhiyun default:
1289*4882a593Smuzhiyun ret = -ENOSYS;
1290*4882a593Smuzhiyun }
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun return ret;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
ucma_set_ib_path(struct ucma_context * ctx,struct ib_path_rec_data * path_data,size_t optlen)1295*4882a593Smuzhiyun static int ucma_set_ib_path(struct ucma_context *ctx,
1296*4882a593Smuzhiyun struct ib_path_rec_data *path_data, size_t optlen)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun struct sa_path_rec sa_path;
1299*4882a593Smuzhiyun struct rdma_cm_event event;
1300*4882a593Smuzhiyun int ret;
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun if (optlen % sizeof(*path_data))
1303*4882a593Smuzhiyun return -EINVAL;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1306*4882a593Smuzhiyun if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1307*4882a593Smuzhiyun IB_PATH_BIDIRECTIONAL))
1308*4882a593Smuzhiyun break;
1309*4882a593Smuzhiyun }
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun if (!optlen)
1312*4882a593Smuzhiyun return -EINVAL;
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if (!ctx->cm_id->device)
1315*4882a593Smuzhiyun return -EINVAL;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun memset(&sa_path, 0, sizeof(sa_path));
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1320*4882a593Smuzhiyun ib_sa_unpack_path(path_data->path_rec, &sa_path);
1321*4882a593Smuzhiyun
1322*4882a593Smuzhiyun if (rdma_cap_opa_ah(ctx->cm_id->device, ctx->cm_id->port_num)) {
1323*4882a593Smuzhiyun struct sa_path_rec opa;
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun sa_convert_path_ib_to_opa(&opa, &sa_path);
1326*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1327*4882a593Smuzhiyun ret = rdma_set_ib_path(ctx->cm_id, &opa);
1328*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1329*4882a593Smuzhiyun } else {
1330*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1331*4882a593Smuzhiyun ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
1332*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun if (ret)
1335*4882a593Smuzhiyun return ret;
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun memset(&event, 0, sizeof event);
1338*4882a593Smuzhiyun event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1339*4882a593Smuzhiyun return ucma_event_handler(ctx->cm_id, &event);
1340*4882a593Smuzhiyun }
1341*4882a593Smuzhiyun
ucma_set_option_ib(struct ucma_context * ctx,int optname,void * optval,size_t optlen)1342*4882a593Smuzhiyun static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1343*4882a593Smuzhiyun void *optval, size_t optlen)
1344*4882a593Smuzhiyun {
1345*4882a593Smuzhiyun int ret;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun switch (optname) {
1348*4882a593Smuzhiyun case RDMA_OPTION_IB_PATH:
1349*4882a593Smuzhiyun ret = ucma_set_ib_path(ctx, optval, optlen);
1350*4882a593Smuzhiyun break;
1351*4882a593Smuzhiyun default:
1352*4882a593Smuzhiyun ret = -ENOSYS;
1353*4882a593Smuzhiyun }
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun return ret;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
ucma_set_option_level(struct ucma_context * ctx,int level,int optname,void * optval,size_t optlen)1358*4882a593Smuzhiyun static int ucma_set_option_level(struct ucma_context *ctx, int level,
1359*4882a593Smuzhiyun int optname, void *optval, size_t optlen)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun int ret;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun switch (level) {
1364*4882a593Smuzhiyun case RDMA_OPTION_ID:
1365*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1366*4882a593Smuzhiyun ret = ucma_set_option_id(ctx, optname, optval, optlen);
1367*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1368*4882a593Smuzhiyun break;
1369*4882a593Smuzhiyun case RDMA_OPTION_IB:
1370*4882a593Smuzhiyun ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1371*4882a593Smuzhiyun break;
1372*4882a593Smuzhiyun default:
1373*4882a593Smuzhiyun ret = -ENOSYS;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun return ret;
1377*4882a593Smuzhiyun }
1378*4882a593Smuzhiyun
ucma_set_option(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1379*4882a593Smuzhiyun static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1380*4882a593Smuzhiyun int in_len, int out_len)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun struct rdma_ucm_set_option cmd;
1383*4882a593Smuzhiyun struct ucma_context *ctx;
1384*4882a593Smuzhiyun void *optval;
1385*4882a593Smuzhiyun int ret;
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1388*4882a593Smuzhiyun return -EFAULT;
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
1391*4882a593Smuzhiyun return -EINVAL;
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
1394*4882a593Smuzhiyun if (IS_ERR(ctx))
1395*4882a593Smuzhiyun return PTR_ERR(ctx);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun optval = memdup_user(u64_to_user_ptr(cmd.optval),
1398*4882a593Smuzhiyun cmd.optlen);
1399*4882a593Smuzhiyun if (IS_ERR(optval)) {
1400*4882a593Smuzhiyun ret = PTR_ERR(optval);
1401*4882a593Smuzhiyun goto out;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1405*4882a593Smuzhiyun cmd.optlen);
1406*4882a593Smuzhiyun kfree(optval);
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun out:
1409*4882a593Smuzhiyun ucma_put_ctx(ctx);
1410*4882a593Smuzhiyun return ret;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
ucma_notify(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1413*4882a593Smuzhiyun static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1414*4882a593Smuzhiyun int in_len, int out_len)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun struct rdma_ucm_notify cmd;
1417*4882a593Smuzhiyun struct ucma_context *ctx;
1418*4882a593Smuzhiyun int ret = -EINVAL;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1421*4882a593Smuzhiyun return -EFAULT;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun ctx = ucma_get_ctx(file, cmd.id);
1424*4882a593Smuzhiyun if (IS_ERR(ctx))
1425*4882a593Smuzhiyun return PTR_ERR(ctx);
1426*4882a593Smuzhiyun
1427*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1428*4882a593Smuzhiyun if (ctx->cm_id->device)
1429*4882a593Smuzhiyun ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
1430*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun ucma_put_ctx(ctx);
1433*4882a593Smuzhiyun return ret;
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun
ucma_process_join(struct ucma_file * file,struct rdma_ucm_join_mcast * cmd,int out_len)1436*4882a593Smuzhiyun static ssize_t ucma_process_join(struct ucma_file *file,
1437*4882a593Smuzhiyun struct rdma_ucm_join_mcast *cmd, int out_len)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun struct rdma_ucm_create_id_resp resp;
1440*4882a593Smuzhiyun struct ucma_context *ctx;
1441*4882a593Smuzhiyun struct ucma_multicast *mc;
1442*4882a593Smuzhiyun struct sockaddr *addr;
1443*4882a593Smuzhiyun int ret;
1444*4882a593Smuzhiyun u8 join_state;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun if (out_len < sizeof(resp))
1447*4882a593Smuzhiyun return -ENOSPC;
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun addr = (struct sockaddr *) &cmd->addr;
1450*4882a593Smuzhiyun if (cmd->addr_size != rdma_addr_size(addr))
1451*4882a593Smuzhiyun return -EINVAL;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1454*4882a593Smuzhiyun join_state = BIT(FULLMEMBER_JOIN);
1455*4882a593Smuzhiyun else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1456*4882a593Smuzhiyun join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1457*4882a593Smuzhiyun else
1458*4882a593Smuzhiyun return -EINVAL;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun ctx = ucma_get_ctx_dev(file, cmd->id);
1461*4882a593Smuzhiyun if (IS_ERR(ctx))
1462*4882a593Smuzhiyun return PTR_ERR(ctx);
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun mc = kzalloc(sizeof(*mc), GFP_KERNEL);
1465*4882a593Smuzhiyun if (!mc) {
1466*4882a593Smuzhiyun ret = -ENOMEM;
1467*4882a593Smuzhiyun goto err_put_ctx;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun mc->ctx = ctx;
1471*4882a593Smuzhiyun mc->join_state = join_state;
1472*4882a593Smuzhiyun mc->uid = cmd->uid;
1473*4882a593Smuzhiyun memcpy(&mc->addr, addr, cmd->addr_size);
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun xa_lock(&multicast_table);
1476*4882a593Smuzhiyun if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
1477*4882a593Smuzhiyun GFP_KERNEL)) {
1478*4882a593Smuzhiyun ret = -ENOMEM;
1479*4882a593Smuzhiyun goto err_free_mc;
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun list_add_tail(&mc->list, &ctx->mc_list);
1483*4882a593Smuzhiyun xa_unlock(&multicast_table);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1486*4882a593Smuzhiyun ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1487*4882a593Smuzhiyun join_state, mc);
1488*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1489*4882a593Smuzhiyun if (ret)
1490*4882a593Smuzhiyun goto err_xa_erase;
1491*4882a593Smuzhiyun
1492*4882a593Smuzhiyun resp.id = mc->id;
1493*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd->response),
1494*4882a593Smuzhiyun &resp, sizeof(resp))) {
1495*4882a593Smuzhiyun ret = -EFAULT;
1496*4882a593Smuzhiyun goto err_leave_multicast;
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun xa_store(&multicast_table, mc->id, mc, 0);
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun ucma_put_ctx(ctx);
1502*4882a593Smuzhiyun return 0;
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun err_leave_multicast:
1505*4882a593Smuzhiyun mutex_lock(&ctx->mutex);
1506*4882a593Smuzhiyun rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1507*4882a593Smuzhiyun mutex_unlock(&ctx->mutex);
1508*4882a593Smuzhiyun ucma_cleanup_mc_events(mc);
1509*4882a593Smuzhiyun err_xa_erase:
1510*4882a593Smuzhiyun xa_lock(&multicast_table);
1511*4882a593Smuzhiyun list_del(&mc->list);
1512*4882a593Smuzhiyun __xa_erase(&multicast_table, mc->id);
1513*4882a593Smuzhiyun err_free_mc:
1514*4882a593Smuzhiyun xa_unlock(&multicast_table);
1515*4882a593Smuzhiyun kfree(mc);
1516*4882a593Smuzhiyun err_put_ctx:
1517*4882a593Smuzhiyun ucma_put_ctx(ctx);
1518*4882a593Smuzhiyun return ret;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
ucma_join_ip_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1521*4882a593Smuzhiyun static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1522*4882a593Smuzhiyun const char __user *inbuf,
1523*4882a593Smuzhiyun int in_len, int out_len)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun struct rdma_ucm_join_ip_mcast cmd;
1526*4882a593Smuzhiyun struct rdma_ucm_join_mcast join_cmd;
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1529*4882a593Smuzhiyun return -EFAULT;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun join_cmd.response = cmd.response;
1532*4882a593Smuzhiyun join_cmd.uid = cmd.uid;
1533*4882a593Smuzhiyun join_cmd.id = cmd.id;
1534*4882a593Smuzhiyun join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr);
1535*4882a593Smuzhiyun if (!join_cmd.addr_size)
1536*4882a593Smuzhiyun return -EINVAL;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1539*4882a593Smuzhiyun memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun return ucma_process_join(file, &join_cmd, out_len);
1542*4882a593Smuzhiyun }
1543*4882a593Smuzhiyun
ucma_join_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1544*4882a593Smuzhiyun static ssize_t ucma_join_multicast(struct ucma_file *file,
1545*4882a593Smuzhiyun const char __user *inbuf,
1546*4882a593Smuzhiyun int in_len, int out_len)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun struct rdma_ucm_join_mcast cmd;
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1551*4882a593Smuzhiyun return -EFAULT;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun if (!rdma_addr_size_kss(&cmd.addr))
1554*4882a593Smuzhiyun return -EINVAL;
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun return ucma_process_join(file, &cmd, out_len);
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun
ucma_leave_multicast(struct ucma_file * file,const char __user * inbuf,int in_len,int out_len)1559*4882a593Smuzhiyun static ssize_t ucma_leave_multicast(struct ucma_file *file,
1560*4882a593Smuzhiyun const char __user *inbuf,
1561*4882a593Smuzhiyun int in_len, int out_len)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun struct rdma_ucm_destroy_id cmd;
1564*4882a593Smuzhiyun struct rdma_ucm_destroy_id_resp resp;
1565*4882a593Smuzhiyun struct ucma_multicast *mc;
1566*4882a593Smuzhiyun int ret = 0;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun if (out_len < sizeof(resp))
1569*4882a593Smuzhiyun return -ENOSPC;
1570*4882a593Smuzhiyun
1571*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1572*4882a593Smuzhiyun return -EFAULT;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun xa_lock(&multicast_table);
1575*4882a593Smuzhiyun mc = xa_load(&multicast_table, cmd.id);
1576*4882a593Smuzhiyun if (!mc)
1577*4882a593Smuzhiyun mc = ERR_PTR(-ENOENT);
1578*4882a593Smuzhiyun else if (READ_ONCE(mc->ctx->file) != file)
1579*4882a593Smuzhiyun mc = ERR_PTR(-EINVAL);
1580*4882a593Smuzhiyun else if (!refcount_inc_not_zero(&mc->ctx->ref))
1581*4882a593Smuzhiyun mc = ERR_PTR(-ENXIO);
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun if (IS_ERR(mc)) {
1584*4882a593Smuzhiyun xa_unlock(&multicast_table);
1585*4882a593Smuzhiyun ret = PTR_ERR(mc);
1586*4882a593Smuzhiyun goto out;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun list_del(&mc->list);
1590*4882a593Smuzhiyun __xa_erase(&multicast_table, mc->id);
1591*4882a593Smuzhiyun xa_unlock(&multicast_table);
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun mutex_lock(&mc->ctx->mutex);
1594*4882a593Smuzhiyun rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1595*4882a593Smuzhiyun mutex_unlock(&mc->ctx->mutex);
1596*4882a593Smuzhiyun
1597*4882a593Smuzhiyun ucma_cleanup_mc_events(mc);
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun ucma_put_ctx(mc->ctx);
1600*4882a593Smuzhiyun resp.events_reported = mc->events_reported;
1601*4882a593Smuzhiyun kfree(mc);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response),
1604*4882a593Smuzhiyun &resp, sizeof(resp)))
1605*4882a593Smuzhiyun ret = -EFAULT;
1606*4882a593Smuzhiyun out:
1607*4882a593Smuzhiyun return ret;
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun
ucma_migrate_id(struct ucma_file * new_file,const char __user * inbuf,int in_len,int out_len)1610*4882a593Smuzhiyun static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1611*4882a593Smuzhiyun const char __user *inbuf,
1612*4882a593Smuzhiyun int in_len, int out_len)
1613*4882a593Smuzhiyun {
1614*4882a593Smuzhiyun struct rdma_ucm_migrate_id cmd;
1615*4882a593Smuzhiyun struct rdma_ucm_migrate_resp resp;
1616*4882a593Smuzhiyun struct ucma_event *uevent, *tmp;
1617*4882a593Smuzhiyun struct ucma_context *ctx;
1618*4882a593Smuzhiyun LIST_HEAD(event_list);
1619*4882a593Smuzhiyun struct fd f;
1620*4882a593Smuzhiyun struct ucma_file *cur_file;
1621*4882a593Smuzhiyun int ret = 0;
1622*4882a593Smuzhiyun
1623*4882a593Smuzhiyun if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1624*4882a593Smuzhiyun return -EFAULT;
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun /* Get current fd to protect against it being closed */
1627*4882a593Smuzhiyun f = fdget(cmd.fd);
1628*4882a593Smuzhiyun if (!f.file)
1629*4882a593Smuzhiyun return -ENOENT;
1630*4882a593Smuzhiyun if (f.file->f_op != &ucma_fops) {
1631*4882a593Smuzhiyun ret = -EINVAL;
1632*4882a593Smuzhiyun goto file_put;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun cur_file = f.file->private_data;
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun /* Validate current fd and prevent destruction of id. */
1637*4882a593Smuzhiyun ctx = ucma_get_ctx(cur_file, cmd.id);
1638*4882a593Smuzhiyun if (IS_ERR(ctx)) {
1639*4882a593Smuzhiyun ret = PTR_ERR(ctx);
1640*4882a593Smuzhiyun goto file_put;
1641*4882a593Smuzhiyun }
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun rdma_lock_handler(ctx->cm_id);
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun * ctx->file can only be changed under the handler & xa_lock. xa_load()
1646*4882a593Smuzhiyun * must be checked again to ensure the ctx hasn't begun destruction
1647*4882a593Smuzhiyun * since the ucma_get_ctx().
1648*4882a593Smuzhiyun */
1649*4882a593Smuzhiyun xa_lock(&ctx_table);
1650*4882a593Smuzhiyun if (_ucma_find_context(cmd.id, cur_file) != ctx) {
1651*4882a593Smuzhiyun xa_unlock(&ctx_table);
1652*4882a593Smuzhiyun ret = -ENOENT;
1653*4882a593Smuzhiyun goto err_unlock;
1654*4882a593Smuzhiyun }
1655*4882a593Smuzhiyun ctx->file = new_file;
1656*4882a593Smuzhiyun xa_unlock(&ctx_table);
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun mutex_lock(&cur_file->mut);
1659*4882a593Smuzhiyun list_del(&ctx->list);
1660*4882a593Smuzhiyun /*
1661*4882a593Smuzhiyun * At this point lock_handler() prevents addition of new uevents for
1662*4882a593Smuzhiyun * this ctx.
1663*4882a593Smuzhiyun */
1664*4882a593Smuzhiyun list_for_each_entry_safe(uevent, tmp, &cur_file->event_list, list)
1665*4882a593Smuzhiyun if (uevent->ctx == ctx)
1666*4882a593Smuzhiyun list_move_tail(&uevent->list, &event_list);
1667*4882a593Smuzhiyun resp.events_reported = ctx->events_reported;
1668*4882a593Smuzhiyun mutex_unlock(&cur_file->mut);
1669*4882a593Smuzhiyun
1670*4882a593Smuzhiyun mutex_lock(&new_file->mut);
1671*4882a593Smuzhiyun list_add_tail(&ctx->list, &new_file->ctx_list);
1672*4882a593Smuzhiyun list_splice_tail(&event_list, &new_file->event_list);
1673*4882a593Smuzhiyun mutex_unlock(&new_file->mut);
1674*4882a593Smuzhiyun
1675*4882a593Smuzhiyun if (copy_to_user(u64_to_user_ptr(cmd.response),
1676*4882a593Smuzhiyun &resp, sizeof(resp)))
1677*4882a593Smuzhiyun ret = -EFAULT;
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun err_unlock:
1680*4882a593Smuzhiyun rdma_unlock_handler(ctx->cm_id);
1681*4882a593Smuzhiyun ucma_put_ctx(ctx);
1682*4882a593Smuzhiyun file_put:
1683*4882a593Smuzhiyun fdput(f);
1684*4882a593Smuzhiyun return ret;
1685*4882a593Smuzhiyun }
1686*4882a593Smuzhiyun
1687*4882a593Smuzhiyun static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1688*4882a593Smuzhiyun const char __user *inbuf,
1689*4882a593Smuzhiyun int in_len, int out_len) = {
1690*4882a593Smuzhiyun [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1691*4882a593Smuzhiyun [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1692*4882a593Smuzhiyun [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1693*4882a593Smuzhiyun [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1694*4882a593Smuzhiyun [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1695*4882a593Smuzhiyun [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1696*4882a593Smuzhiyun [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1697*4882a593Smuzhiyun [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1698*4882a593Smuzhiyun [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1699*4882a593Smuzhiyun [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1700*4882a593Smuzhiyun [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1701*4882a593Smuzhiyun [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1702*4882a593Smuzhiyun [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1703*4882a593Smuzhiyun [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1704*4882a593Smuzhiyun [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1705*4882a593Smuzhiyun [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1706*4882a593Smuzhiyun [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1707*4882a593Smuzhiyun [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1708*4882a593Smuzhiyun [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1709*4882a593Smuzhiyun [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1710*4882a593Smuzhiyun [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1711*4882a593Smuzhiyun [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1712*4882a593Smuzhiyun [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1713*4882a593Smuzhiyun };
1714*4882a593Smuzhiyun
ucma_write(struct file * filp,const char __user * buf,size_t len,loff_t * pos)1715*4882a593Smuzhiyun static ssize_t ucma_write(struct file *filp, const char __user *buf,
1716*4882a593Smuzhiyun size_t len, loff_t *pos)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun struct ucma_file *file = filp->private_data;
1719*4882a593Smuzhiyun struct rdma_ucm_cmd_hdr hdr;
1720*4882a593Smuzhiyun ssize_t ret;
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun if (!ib_safe_file_access(filp)) {
1723*4882a593Smuzhiyun pr_err_once("ucma_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
1724*4882a593Smuzhiyun task_tgid_vnr(current), current->comm);
1725*4882a593Smuzhiyun return -EACCES;
1726*4882a593Smuzhiyun }
1727*4882a593Smuzhiyun
1728*4882a593Smuzhiyun if (len < sizeof(hdr))
1729*4882a593Smuzhiyun return -EINVAL;
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun if (copy_from_user(&hdr, buf, sizeof(hdr)))
1732*4882a593Smuzhiyun return -EFAULT;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1735*4882a593Smuzhiyun return -EINVAL;
1736*4882a593Smuzhiyun hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table));
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun if (hdr.in + sizeof(hdr) > len)
1739*4882a593Smuzhiyun return -EINVAL;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun if (!ucma_cmd_table[hdr.cmd])
1742*4882a593Smuzhiyun return -ENOSYS;
1743*4882a593Smuzhiyun
1744*4882a593Smuzhiyun ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1745*4882a593Smuzhiyun if (!ret)
1746*4882a593Smuzhiyun ret = len;
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun return ret;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun
ucma_poll(struct file * filp,struct poll_table_struct * wait)1751*4882a593Smuzhiyun static __poll_t ucma_poll(struct file *filp, struct poll_table_struct *wait)
1752*4882a593Smuzhiyun {
1753*4882a593Smuzhiyun struct ucma_file *file = filp->private_data;
1754*4882a593Smuzhiyun __poll_t mask = 0;
1755*4882a593Smuzhiyun
1756*4882a593Smuzhiyun poll_wait(filp, &file->poll_wait, wait);
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun if (!list_empty(&file->event_list))
1759*4882a593Smuzhiyun mask = EPOLLIN | EPOLLRDNORM;
1760*4882a593Smuzhiyun
1761*4882a593Smuzhiyun return mask;
1762*4882a593Smuzhiyun }
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun /*
1765*4882a593Smuzhiyun * ucma_open() does not need the BKL:
1766*4882a593Smuzhiyun *
1767*4882a593Smuzhiyun * - no global state is referred to;
1768*4882a593Smuzhiyun * - there is no ioctl method to race against;
1769*4882a593Smuzhiyun * - no further module initialization is required for open to work
1770*4882a593Smuzhiyun * after the device is registered.
1771*4882a593Smuzhiyun */
ucma_open(struct inode * inode,struct file * filp)1772*4882a593Smuzhiyun static int ucma_open(struct inode *inode, struct file *filp)
1773*4882a593Smuzhiyun {
1774*4882a593Smuzhiyun struct ucma_file *file;
1775*4882a593Smuzhiyun
1776*4882a593Smuzhiyun file = kmalloc(sizeof *file, GFP_KERNEL);
1777*4882a593Smuzhiyun if (!file)
1778*4882a593Smuzhiyun return -ENOMEM;
1779*4882a593Smuzhiyun
1780*4882a593Smuzhiyun INIT_LIST_HEAD(&file->event_list);
1781*4882a593Smuzhiyun INIT_LIST_HEAD(&file->ctx_list);
1782*4882a593Smuzhiyun init_waitqueue_head(&file->poll_wait);
1783*4882a593Smuzhiyun mutex_init(&file->mut);
1784*4882a593Smuzhiyun
1785*4882a593Smuzhiyun filp->private_data = file;
1786*4882a593Smuzhiyun file->filp = filp;
1787*4882a593Smuzhiyun
1788*4882a593Smuzhiyun return stream_open(inode, filp);
1789*4882a593Smuzhiyun }
1790*4882a593Smuzhiyun
ucma_close(struct inode * inode,struct file * filp)1791*4882a593Smuzhiyun static int ucma_close(struct inode *inode, struct file *filp)
1792*4882a593Smuzhiyun {
1793*4882a593Smuzhiyun struct ucma_file *file = filp->private_data;
1794*4882a593Smuzhiyun
1795*4882a593Smuzhiyun /*
1796*4882a593Smuzhiyun * All paths that touch ctx_list or ctx_list starting from write() are
1797*4882a593Smuzhiyun * prevented by this being a FD release function. The list_add_tail() in
1798*4882a593Smuzhiyun * ucma_connect_event_handler() can run concurrently, however it only
1799*4882a593Smuzhiyun * adds to the list *after* a listening ID. By only reading the first of
1800*4882a593Smuzhiyun * the list, and relying on ucma_destroy_private_ctx() to block
1801*4882a593Smuzhiyun * ucma_connect_event_handler(), no additional locking is needed.
1802*4882a593Smuzhiyun */
1803*4882a593Smuzhiyun while (!list_empty(&file->ctx_list)) {
1804*4882a593Smuzhiyun struct ucma_context *ctx = list_first_entry(
1805*4882a593Smuzhiyun &file->ctx_list, struct ucma_context, list);
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun WARN_ON(xa_cmpxchg(&ctx_table, ctx->id, ctx, XA_ZERO_ENTRY,
1808*4882a593Smuzhiyun GFP_KERNEL) != ctx);
1809*4882a593Smuzhiyun ucma_destroy_private_ctx(ctx);
1810*4882a593Smuzhiyun }
1811*4882a593Smuzhiyun kfree(file);
1812*4882a593Smuzhiyun return 0;
1813*4882a593Smuzhiyun }
1814*4882a593Smuzhiyun
1815*4882a593Smuzhiyun static const struct file_operations ucma_fops = {
1816*4882a593Smuzhiyun .owner = THIS_MODULE,
1817*4882a593Smuzhiyun .open = ucma_open,
1818*4882a593Smuzhiyun .release = ucma_close,
1819*4882a593Smuzhiyun .write = ucma_write,
1820*4882a593Smuzhiyun .poll = ucma_poll,
1821*4882a593Smuzhiyun .llseek = no_llseek,
1822*4882a593Smuzhiyun };
1823*4882a593Smuzhiyun
1824*4882a593Smuzhiyun static struct miscdevice ucma_misc = {
1825*4882a593Smuzhiyun .minor = MISC_DYNAMIC_MINOR,
1826*4882a593Smuzhiyun .name = "rdma_cm",
1827*4882a593Smuzhiyun .nodename = "infiniband/rdma_cm",
1828*4882a593Smuzhiyun .mode = 0666,
1829*4882a593Smuzhiyun .fops = &ucma_fops,
1830*4882a593Smuzhiyun };
1831*4882a593Smuzhiyun
ucma_get_global_nl_info(struct ib_client_nl_info * res)1832*4882a593Smuzhiyun static int ucma_get_global_nl_info(struct ib_client_nl_info *res)
1833*4882a593Smuzhiyun {
1834*4882a593Smuzhiyun res->abi = RDMA_USER_CM_ABI_VERSION;
1835*4882a593Smuzhiyun res->cdev = ucma_misc.this_device;
1836*4882a593Smuzhiyun return 0;
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun static struct ib_client rdma_cma_client = {
1840*4882a593Smuzhiyun .name = "rdma_cm",
1841*4882a593Smuzhiyun .get_global_nl_info = ucma_get_global_nl_info,
1842*4882a593Smuzhiyun };
1843*4882a593Smuzhiyun MODULE_ALIAS_RDMA_CLIENT("rdma_cm");
1844*4882a593Smuzhiyun
show_abi_version(struct device * dev,struct device_attribute * attr,char * buf)1845*4882a593Smuzhiyun static ssize_t show_abi_version(struct device *dev,
1846*4882a593Smuzhiyun struct device_attribute *attr,
1847*4882a593Smuzhiyun char *buf)
1848*4882a593Smuzhiyun {
1849*4882a593Smuzhiyun return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1852*4882a593Smuzhiyun
ucma_init(void)1853*4882a593Smuzhiyun static int __init ucma_init(void)
1854*4882a593Smuzhiyun {
1855*4882a593Smuzhiyun int ret;
1856*4882a593Smuzhiyun
1857*4882a593Smuzhiyun ret = misc_register(&ucma_misc);
1858*4882a593Smuzhiyun if (ret)
1859*4882a593Smuzhiyun return ret;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1862*4882a593Smuzhiyun if (ret) {
1863*4882a593Smuzhiyun pr_err("rdma_ucm: couldn't create abi_version attr\n");
1864*4882a593Smuzhiyun goto err1;
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1868*4882a593Smuzhiyun if (!ucma_ctl_table_hdr) {
1869*4882a593Smuzhiyun pr_err("rdma_ucm: couldn't register sysctl paths\n");
1870*4882a593Smuzhiyun ret = -ENOMEM;
1871*4882a593Smuzhiyun goto err2;
1872*4882a593Smuzhiyun }
1873*4882a593Smuzhiyun
1874*4882a593Smuzhiyun ret = ib_register_client(&rdma_cma_client);
1875*4882a593Smuzhiyun if (ret)
1876*4882a593Smuzhiyun goto err3;
1877*4882a593Smuzhiyun
1878*4882a593Smuzhiyun return 0;
1879*4882a593Smuzhiyun err3:
1880*4882a593Smuzhiyun unregister_net_sysctl_table(ucma_ctl_table_hdr);
1881*4882a593Smuzhiyun err2:
1882*4882a593Smuzhiyun device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1883*4882a593Smuzhiyun err1:
1884*4882a593Smuzhiyun misc_deregister(&ucma_misc);
1885*4882a593Smuzhiyun return ret;
1886*4882a593Smuzhiyun }
1887*4882a593Smuzhiyun
ucma_cleanup(void)1888*4882a593Smuzhiyun static void __exit ucma_cleanup(void)
1889*4882a593Smuzhiyun {
1890*4882a593Smuzhiyun ib_unregister_client(&rdma_cma_client);
1891*4882a593Smuzhiyun unregister_net_sysctl_table(ucma_ctl_table_hdr);
1892*4882a593Smuzhiyun device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1893*4882a593Smuzhiyun misc_deregister(&ucma_misc);
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun
1896*4882a593Smuzhiyun module_init(ucma_init);
1897*4882a593Smuzhiyun module_exit(ucma_cleanup);
1898