xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rxe/rxe_cq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/vmalloc.h>
7*4882a593Smuzhiyun #include "rxe.h"
8*4882a593Smuzhiyun #include "rxe_loc.h"
9*4882a593Smuzhiyun #include "rxe_queue.h"
10*4882a593Smuzhiyun 
rxe_cq_chk_attr(struct rxe_dev * rxe,struct rxe_cq * cq,int cqe,int comp_vector)11*4882a593Smuzhiyun int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
12*4882a593Smuzhiyun 		    int cqe, int comp_vector)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun 	int count;
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	if (cqe <= 0) {
17*4882a593Smuzhiyun 		pr_warn("cqe(%d) <= 0\n", cqe);
18*4882a593Smuzhiyun 		goto err1;
19*4882a593Smuzhiyun 	}
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	if (cqe > rxe->attr.max_cqe) {
22*4882a593Smuzhiyun 		pr_warn("cqe(%d) > max_cqe(%d)\n",
23*4882a593Smuzhiyun 			cqe, rxe->attr.max_cqe);
24*4882a593Smuzhiyun 		goto err1;
25*4882a593Smuzhiyun 	}
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	if (cq) {
28*4882a593Smuzhiyun 		count = queue_count(cq->queue);
29*4882a593Smuzhiyun 		if (cqe < count) {
30*4882a593Smuzhiyun 			pr_warn("cqe(%d) < current # elements in queue (%d)",
31*4882a593Smuzhiyun 				cqe, count);
32*4882a593Smuzhiyun 			goto err1;
33*4882a593Smuzhiyun 		}
34*4882a593Smuzhiyun 	}
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	return 0;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun err1:
39*4882a593Smuzhiyun 	return -EINVAL;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
rxe_send_complete(struct tasklet_struct * t)42*4882a593Smuzhiyun static void rxe_send_complete(struct tasklet_struct *t)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
45*4882a593Smuzhiyun 	unsigned long flags;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	spin_lock_irqsave(&cq->cq_lock, flags);
48*4882a593Smuzhiyun 	if (cq->is_dying) {
49*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cq->cq_lock, flags);
50*4882a593Smuzhiyun 		return;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cq->cq_lock, flags);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun 
rxe_cq_from_init(struct rxe_dev * rxe,struct rxe_cq * cq,int cqe,int comp_vector,struct ib_udata * udata,struct rxe_create_cq_resp __user * uresp)57*4882a593Smuzhiyun int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
58*4882a593Smuzhiyun 		     int comp_vector, struct ib_udata *udata,
59*4882a593Smuzhiyun 		     struct rxe_create_cq_resp __user *uresp)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	int err;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	cq->queue = rxe_queue_init(rxe, &cqe,
64*4882a593Smuzhiyun 				   sizeof(struct rxe_cqe));
65*4882a593Smuzhiyun 	if (!cq->queue) {
66*4882a593Smuzhiyun 		pr_warn("unable to create cq\n");
67*4882a593Smuzhiyun 		return -ENOMEM;
68*4882a593Smuzhiyun 	}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
71*4882a593Smuzhiyun 			   cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
72*4882a593Smuzhiyun 	if (err) {
73*4882a593Smuzhiyun 		vfree(cq->queue->buf);
74*4882a593Smuzhiyun 		kfree(cq->queue);
75*4882a593Smuzhiyun 		return err;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (uresp)
79*4882a593Smuzhiyun 		cq->is_user = 1;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	cq->is_dying = false;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	tasklet_setup(&cq->comp_task, rxe_send_complete);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	spin_lock_init(&cq->cq_lock);
86*4882a593Smuzhiyun 	cq->ibcq.cqe = cqe;
87*4882a593Smuzhiyun 	return 0;
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
rxe_cq_resize_queue(struct rxe_cq * cq,int cqe,struct rxe_resize_cq_resp __user * uresp,struct ib_udata * udata)90*4882a593Smuzhiyun int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
91*4882a593Smuzhiyun 			struct rxe_resize_cq_resp __user *uresp,
92*4882a593Smuzhiyun 			struct ib_udata *udata)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	int err;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
97*4882a593Smuzhiyun 			       sizeof(struct rxe_cqe), udata,
98*4882a593Smuzhiyun 			       uresp ? &uresp->mi : NULL, NULL, &cq->cq_lock);
99*4882a593Smuzhiyun 	if (!err)
100*4882a593Smuzhiyun 		cq->ibcq.cqe = cqe;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	return err;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
rxe_cq_post(struct rxe_cq * cq,struct rxe_cqe * cqe,int solicited)105*4882a593Smuzhiyun int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct ib_event ev;
108*4882a593Smuzhiyun 	unsigned long flags;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	spin_lock_irqsave(&cq->cq_lock, flags);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (unlikely(queue_full(cq->queue))) {
113*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cq->cq_lock, flags);
114*4882a593Smuzhiyun 		if (cq->ibcq.event_handler) {
115*4882a593Smuzhiyun 			ev.device = cq->ibcq.device;
116*4882a593Smuzhiyun 			ev.element.cq = &cq->ibcq;
117*4882a593Smuzhiyun 			ev.event = IB_EVENT_CQ_ERR;
118*4882a593Smuzhiyun 			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
119*4882a593Smuzhiyun 		}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		return -EBUSY;
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* make sure all changes to the CQ are written before we update the
127*4882a593Smuzhiyun 	 * producer pointer
128*4882a593Smuzhiyun 	 */
129*4882a593Smuzhiyun 	smp_wmb();
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	advance_producer(cq->queue);
132*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cq->cq_lock, flags);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if ((cq->notify == IB_CQ_NEXT_COMP) ||
135*4882a593Smuzhiyun 	    (cq->notify == IB_CQ_SOLICITED && solicited)) {
136*4882a593Smuzhiyun 		cq->notify = 0;
137*4882a593Smuzhiyun 		tasklet_schedule(&cq->comp_task);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
rxe_cq_disable(struct rxe_cq * cq)143*4882a593Smuzhiyun void rxe_cq_disable(struct rxe_cq *cq)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	unsigned long flags;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	spin_lock_irqsave(&cq->cq_lock, flags);
148*4882a593Smuzhiyun 	cq->is_dying = true;
149*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cq->cq_lock, flags);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
rxe_cq_cleanup(struct rxe_pool_entry * arg)152*4882a593Smuzhiyun void rxe_cq_cleanup(struct rxe_pool_entry *arg)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (cq->queue)
157*4882a593Smuzhiyun 		rxe_queue_cleanup(cq->queue);
158*4882a593Smuzhiyun }
159