xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_srq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/vmalloc.h>
8*4882a593Smuzhiyun #include "rxe.h"
9*4882a593Smuzhiyun #include "rxe_loc.h"
10*4882a593Smuzhiyun #include "rxe_queue.h"
11*4882a593Smuzhiyun 
rxe_srq_chk_attr(struct rxe_dev * rxe,struct rxe_srq * srq,struct ib_srq_attr * attr,enum ib_srq_attr_mask mask)12*4882a593Smuzhiyun int rxe_srq_chk_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
13*4882a593Smuzhiyun 		     struct ib_srq_attr *attr, enum ib_srq_attr_mask mask)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun 	if (srq && srq->error) {
16*4882a593Smuzhiyun 		pr_warn("srq in error state\n");
17*4882a593Smuzhiyun 		goto err1;
18*4882a593Smuzhiyun 	}
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	if (mask & IB_SRQ_MAX_WR) {
21*4882a593Smuzhiyun 		if (attr->max_wr > rxe->attr.max_srq_wr) {
22*4882a593Smuzhiyun 			pr_warn("max_wr(%d) > max_srq_wr(%d)\n",
23*4882a593Smuzhiyun 				attr->max_wr, rxe->attr.max_srq_wr);
24*4882a593Smuzhiyun 			goto err1;
25*4882a593Smuzhiyun 		}
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 		if (attr->max_wr <= 0) {
28*4882a593Smuzhiyun 			pr_warn("max_wr(%d) <= 0\n", attr->max_wr);
29*4882a593Smuzhiyun 			goto err1;
30*4882a593Smuzhiyun 		}
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 		if (srq && srq->limit && (attr->max_wr < srq->limit)) {
33*4882a593Smuzhiyun 			pr_warn("max_wr (%d) < srq->limit (%d)\n",
34*4882a593Smuzhiyun 				attr->max_wr, srq->limit);
35*4882a593Smuzhiyun 			goto err1;
36*4882a593Smuzhiyun 		}
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 		if (attr->max_wr < RXE_MIN_SRQ_WR)
39*4882a593Smuzhiyun 			attr->max_wr = RXE_MIN_SRQ_WR;
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (mask & IB_SRQ_LIMIT) {
43*4882a593Smuzhiyun 		if (attr->srq_limit > rxe->attr.max_srq_wr) {
44*4882a593Smuzhiyun 			pr_warn("srq_limit(%d) > max_srq_wr(%d)\n",
45*4882a593Smuzhiyun 				attr->srq_limit, rxe->attr.max_srq_wr);
46*4882a593Smuzhiyun 			goto err1;
47*4882a593Smuzhiyun 		}
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 		if (srq && (attr->srq_limit > srq->rq.queue->buf->index_mask)) {
50*4882a593Smuzhiyun 			pr_warn("srq_limit (%d) > cur limit(%d)\n",
51*4882a593Smuzhiyun 				attr->srq_limit,
52*4882a593Smuzhiyun 				 srq->rq.queue->buf->index_mask);
53*4882a593Smuzhiyun 			goto err1;
54*4882a593Smuzhiyun 		}
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	if (mask == IB_SRQ_INIT_MASK) {
58*4882a593Smuzhiyun 		if (attr->max_sge > rxe->attr.max_srq_sge) {
59*4882a593Smuzhiyun 			pr_warn("max_sge(%d) > max_srq_sge(%d)\n",
60*4882a593Smuzhiyun 				attr->max_sge, rxe->attr.max_srq_sge);
61*4882a593Smuzhiyun 			goto err1;
62*4882a593Smuzhiyun 		}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		if (attr->max_sge < RXE_MIN_SRQ_SGE)
65*4882a593Smuzhiyun 			attr->max_sge = RXE_MIN_SRQ_SGE;
66*4882a593Smuzhiyun 	}
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	return 0;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun err1:
71*4882a593Smuzhiyun 	return -EINVAL;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
rxe_srq_from_init(struct rxe_dev * rxe,struct rxe_srq * srq,struct ib_srq_init_attr * init,struct ib_udata * udata,struct rxe_create_srq_resp __user * uresp)74*4882a593Smuzhiyun int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
75*4882a593Smuzhiyun 		      struct ib_srq_init_attr *init, struct ib_udata *udata,
76*4882a593Smuzhiyun 		      struct rxe_create_srq_resp __user *uresp)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	int err;
79*4882a593Smuzhiyun 	int srq_wqe_size;
80*4882a593Smuzhiyun 	struct rxe_queue *q;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	srq->ibsrq.event_handler	= init->event_handler;
83*4882a593Smuzhiyun 	srq->ibsrq.srq_context		= init->srq_context;
84*4882a593Smuzhiyun 	srq->limit		= init->attr.srq_limit;
85*4882a593Smuzhiyun 	srq->srq_num		= srq->pelem.index;
86*4882a593Smuzhiyun 	srq->rq.max_wr		= init->attr.max_wr;
87*4882a593Smuzhiyun 	srq->rq.max_sge		= init->attr.max_sge;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	srq_wqe_size		= rcv_wqe_size(srq->rq.max_sge);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	spin_lock_init(&srq->rq.producer_lock);
92*4882a593Smuzhiyun 	spin_lock_init(&srq->rq.consumer_lock);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	q = rxe_queue_init(rxe, &srq->rq.max_wr,
95*4882a593Smuzhiyun 			   srq_wqe_size);
96*4882a593Smuzhiyun 	if (!q) {
97*4882a593Smuzhiyun 		pr_warn("unable to allocate queue for srq\n");
98*4882a593Smuzhiyun 		return -ENOMEM;
99*4882a593Smuzhiyun 	}
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	srq->rq.queue = q;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf,
104*4882a593Smuzhiyun 			   q->buf_size, &q->ip);
105*4882a593Smuzhiyun 	if (err) {
106*4882a593Smuzhiyun 		vfree(q->buf);
107*4882a593Smuzhiyun 		kfree(q);
108*4882a593Smuzhiyun 		return err;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if (uresp) {
112*4882a593Smuzhiyun 		if (copy_to_user(&uresp->srq_num, &srq->srq_num,
113*4882a593Smuzhiyun 				 sizeof(uresp->srq_num))) {
114*4882a593Smuzhiyun 			rxe_queue_cleanup(q);
115*4882a593Smuzhiyun 			return -EFAULT;
116*4882a593Smuzhiyun 		}
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
rxe_srq_from_attr(struct rxe_dev * rxe,struct rxe_srq * srq,struct ib_srq_attr * attr,enum ib_srq_attr_mask mask,struct rxe_modify_srq_cmd * ucmd,struct ib_udata * udata)122*4882a593Smuzhiyun int rxe_srq_from_attr(struct rxe_dev *rxe, struct rxe_srq *srq,
123*4882a593Smuzhiyun 		      struct ib_srq_attr *attr, enum ib_srq_attr_mask mask,
124*4882a593Smuzhiyun 		      struct rxe_modify_srq_cmd *ucmd, struct ib_udata *udata)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	int err;
127*4882a593Smuzhiyun 	struct rxe_queue *q = srq->rq.queue;
128*4882a593Smuzhiyun 	struct mminfo __user *mi = NULL;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (mask & IB_SRQ_MAX_WR) {
131*4882a593Smuzhiyun 		/*
132*4882a593Smuzhiyun 		 * This is completely screwed up, the response is supposed to
133*4882a593Smuzhiyun 		 * be in the outbuf not like this.
134*4882a593Smuzhiyun 		 */
135*4882a593Smuzhiyun 		mi = u64_to_user_ptr(ucmd->mmap_info_addr);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		err = rxe_queue_resize(q, &attr->max_wr,
138*4882a593Smuzhiyun 				       rcv_wqe_size(srq->rq.max_sge), udata, mi,
139*4882a593Smuzhiyun 				       &srq->rq.producer_lock,
140*4882a593Smuzhiyun 				       &srq->rq.consumer_lock);
141*4882a593Smuzhiyun 		if (err)
142*4882a593Smuzhiyun 			goto err2;
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (mask & IB_SRQ_LIMIT)
146*4882a593Smuzhiyun 		srq->limit = attr->srq_limit;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun err2:
151*4882a593Smuzhiyun 	rxe_queue_cleanup(q);
152*4882a593Smuzhiyun 	srq->rq.queue = NULL;
153*4882a593Smuzhiyun 	return err;
154*4882a593Smuzhiyun }
155