xref: /OK3568_Linux_fs/kernel/drivers/infiniband/sw/rdmavt/cq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright(c) 2016 - 2018 Intel Corporation.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is provided under a dual BSD/GPLv2 license.  When using or
5*4882a593Smuzhiyun  * redistributing this file, you may do so under either license.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * GPL LICENSE SUMMARY
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of version 2 of the GNU General Public License as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * WITHOUT ANY WARRANTY; without even the implied warranty of
15*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16*4882a593Smuzhiyun  * General Public License for more details.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  * BSD LICENSE
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
21*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
22*4882a593Smuzhiyun  * are met:
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  *  - Redistributions of source code must retain the above copyright
25*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
26*4882a593Smuzhiyun  *  - Redistributions in binary form must reproduce the above copyright
27*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in
28*4882a593Smuzhiyun  *    the documentation and/or other materials provided with the
29*4882a593Smuzhiyun  *    distribution.
30*4882a593Smuzhiyun  *  - Neither the name of Intel Corporation nor the names of its
31*4882a593Smuzhiyun  *    contributors may be used to endorse or promote products derived
32*4882a593Smuzhiyun  *    from this software without specific prior written permission.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35*4882a593Smuzhiyun  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36*4882a593Smuzhiyun  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37*4882a593Smuzhiyun  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38*4882a593Smuzhiyun  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39*4882a593Smuzhiyun  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40*4882a593Smuzhiyun  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41*4882a593Smuzhiyun  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42*4882a593Smuzhiyun  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43*4882a593Smuzhiyun  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44*4882a593Smuzhiyun  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <linux/slab.h>
49*4882a593Smuzhiyun #include <linux/vmalloc.h>
50*4882a593Smuzhiyun #include "cq.h"
51*4882a593Smuzhiyun #include "vt.h"
52*4882a593Smuzhiyun #include "trace.h"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static struct workqueue_struct *comp_vector_wq;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * rvt_cq_enter - add a new entry to the completion queue
58*4882a593Smuzhiyun  * @cq: completion queue
59*4882a593Smuzhiyun  * @entry: work completion entry to add
60*4882a593Smuzhiyun  * @solicited: true if @entry is solicited
61*4882a593Smuzhiyun  *
62*4882a593Smuzhiyun  * This may be called with qp->s_lock held.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  * Return: return true on success, else return
65*4882a593Smuzhiyun  * false if cq is full.
66*4882a593Smuzhiyun  */
rvt_cq_enter(struct rvt_cq * cq,struct ib_wc * entry,bool solicited)67*4882a593Smuzhiyun bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct ib_uverbs_wc *uqueue = NULL;
70*4882a593Smuzhiyun 	struct ib_wc *kqueue = NULL;
71*4882a593Smuzhiyun 	struct rvt_cq_wc *u_wc = NULL;
72*4882a593Smuzhiyun 	struct rvt_k_cq_wc *k_wc = NULL;
73*4882a593Smuzhiyun 	unsigned long flags;
74*4882a593Smuzhiyun 	u32 head;
75*4882a593Smuzhiyun 	u32 next;
76*4882a593Smuzhiyun 	u32 tail;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	spin_lock_irqsave(&cq->lock, flags);
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	if (cq->ip) {
81*4882a593Smuzhiyun 		u_wc = cq->queue;
82*4882a593Smuzhiyun 		uqueue = &u_wc->uqueue[0];
83*4882a593Smuzhiyun 		head = RDMA_READ_UAPI_ATOMIC(u_wc->head);
84*4882a593Smuzhiyun 		tail = RDMA_READ_UAPI_ATOMIC(u_wc->tail);
85*4882a593Smuzhiyun 	} else {
86*4882a593Smuzhiyun 		k_wc = cq->kqueue;
87*4882a593Smuzhiyun 		kqueue = &k_wc->kqueue[0];
88*4882a593Smuzhiyun 		head = k_wc->head;
89*4882a593Smuzhiyun 		tail = k_wc->tail;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * Note that the head pointer might be writable by
94*4882a593Smuzhiyun 	 * user processes.Take care to verify it is a sane value.
95*4882a593Smuzhiyun 	 */
96*4882a593Smuzhiyun 	if (head >= (unsigned)cq->ibcq.cqe) {
97*4882a593Smuzhiyun 		head = cq->ibcq.cqe;
98*4882a593Smuzhiyun 		next = 0;
99*4882a593Smuzhiyun 	} else {
100*4882a593Smuzhiyun 		next = head + 1;
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	if (unlikely(next == tail || cq->cq_full)) {
104*4882a593Smuzhiyun 		struct rvt_dev_info *rdi = cq->rdi;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		if (!cq->cq_full)
107*4882a593Smuzhiyun 			rvt_pr_err_ratelimited(rdi, "CQ is full!\n");
108*4882a593Smuzhiyun 		cq->cq_full = true;
109*4882a593Smuzhiyun 		spin_unlock_irqrestore(&cq->lock, flags);
110*4882a593Smuzhiyun 		if (cq->ibcq.event_handler) {
111*4882a593Smuzhiyun 			struct ib_event ev;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 			ev.device = cq->ibcq.device;
114*4882a593Smuzhiyun 			ev.element.cq = &cq->ibcq;
115*4882a593Smuzhiyun 			ev.event = IB_EVENT_CQ_ERR;
116*4882a593Smuzhiyun 			cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
117*4882a593Smuzhiyun 		}
118*4882a593Smuzhiyun 		return false;
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun 	trace_rvt_cq_enter(cq, entry, head);
121*4882a593Smuzhiyun 	if (uqueue) {
122*4882a593Smuzhiyun 		uqueue[head].wr_id = entry->wr_id;
123*4882a593Smuzhiyun 		uqueue[head].status = entry->status;
124*4882a593Smuzhiyun 		uqueue[head].opcode = entry->opcode;
125*4882a593Smuzhiyun 		uqueue[head].vendor_err = entry->vendor_err;
126*4882a593Smuzhiyun 		uqueue[head].byte_len = entry->byte_len;
127*4882a593Smuzhiyun 		uqueue[head].ex.imm_data = entry->ex.imm_data;
128*4882a593Smuzhiyun 		uqueue[head].qp_num = entry->qp->qp_num;
129*4882a593Smuzhiyun 		uqueue[head].src_qp = entry->src_qp;
130*4882a593Smuzhiyun 		uqueue[head].wc_flags = entry->wc_flags;
131*4882a593Smuzhiyun 		uqueue[head].pkey_index = entry->pkey_index;
132*4882a593Smuzhiyun 		uqueue[head].slid = ib_lid_cpu16(entry->slid);
133*4882a593Smuzhiyun 		uqueue[head].sl = entry->sl;
134*4882a593Smuzhiyun 		uqueue[head].dlid_path_bits = entry->dlid_path_bits;
135*4882a593Smuzhiyun 		uqueue[head].port_num = entry->port_num;
136*4882a593Smuzhiyun 		/* Make sure entry is written before the head index. */
137*4882a593Smuzhiyun 		RDMA_WRITE_UAPI_ATOMIC(u_wc->head, next);
138*4882a593Smuzhiyun 	} else {
139*4882a593Smuzhiyun 		kqueue[head] = *entry;
140*4882a593Smuzhiyun 		k_wc->head = next;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (cq->notify == IB_CQ_NEXT_COMP ||
144*4882a593Smuzhiyun 	    (cq->notify == IB_CQ_SOLICITED &&
145*4882a593Smuzhiyun 	     (solicited || entry->status != IB_WC_SUCCESS))) {
146*4882a593Smuzhiyun 		/*
147*4882a593Smuzhiyun 		 * This will cause send_complete() to be called in
148*4882a593Smuzhiyun 		 * another thread.
149*4882a593Smuzhiyun 		 */
150*4882a593Smuzhiyun 		cq->notify = RVT_CQ_NONE;
151*4882a593Smuzhiyun 		cq->triggered++;
152*4882a593Smuzhiyun 		queue_work_on(cq->comp_vector_cpu, comp_vector_wq,
153*4882a593Smuzhiyun 			      &cq->comptask);
154*4882a593Smuzhiyun 	}
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cq->lock, flags);
157*4882a593Smuzhiyun 	return true;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun EXPORT_SYMBOL(rvt_cq_enter);
160*4882a593Smuzhiyun 
send_complete(struct work_struct * work)161*4882a593Smuzhiyun static void send_complete(struct work_struct *work)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/*
166*4882a593Smuzhiyun 	 * The completion handler will most likely rearm the notification
167*4882a593Smuzhiyun 	 * and poll for all pending entries.  If a new completion entry
168*4882a593Smuzhiyun 	 * is added while we are in this routine, queue_work()
169*4882a593Smuzhiyun 	 * won't call us again until we return so we check triggered to
170*4882a593Smuzhiyun 	 * see if we need to call the handler again.
171*4882a593Smuzhiyun 	 */
172*4882a593Smuzhiyun 	for (;;) {
173*4882a593Smuzhiyun 		u8 triggered = cq->triggered;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		/*
176*4882a593Smuzhiyun 		 * IPoIB connected mode assumes the callback is from a
177*4882a593Smuzhiyun 		 * soft IRQ. We simulate this by blocking "bottom halves".
178*4882a593Smuzhiyun 		 * See the implementation for ipoib_cm_handle_tx_wc(),
179*4882a593Smuzhiyun 		 * netif_tx_lock_bh() and netif_tx_lock().
180*4882a593Smuzhiyun 		 */
181*4882a593Smuzhiyun 		local_bh_disable();
182*4882a593Smuzhiyun 		cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
183*4882a593Smuzhiyun 		local_bh_enable();
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		if (cq->triggered == triggered)
186*4882a593Smuzhiyun 			return;
187*4882a593Smuzhiyun 	}
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun  * rvt_create_cq - create a completion queue
192*4882a593Smuzhiyun  * @ibcq: Allocated CQ
193*4882a593Smuzhiyun  * @attr: creation attributes
194*4882a593Smuzhiyun  * @udata: user data for libibverbs.so
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * Called by ib_create_cq() in the generic verbs code.
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * Return: 0 on success
199*4882a593Smuzhiyun  */
rvt_create_cq(struct ib_cq * ibcq,const struct ib_cq_init_attr * attr,struct ib_udata * udata)200*4882a593Smuzhiyun int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
201*4882a593Smuzhiyun 		  struct ib_udata *udata)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct ib_device *ibdev = ibcq->device;
204*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = ib_to_rvt(ibdev);
205*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
206*4882a593Smuzhiyun 	struct rvt_cq_wc *u_wc = NULL;
207*4882a593Smuzhiyun 	struct rvt_k_cq_wc *k_wc = NULL;
208*4882a593Smuzhiyun 	u32 sz;
209*4882a593Smuzhiyun 	unsigned int entries = attr->cqe;
210*4882a593Smuzhiyun 	int comp_vector = attr->comp_vector;
211*4882a593Smuzhiyun 	int err;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	if (attr->flags)
214*4882a593Smuzhiyun 		return -EINVAL;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (entries < 1 || entries > rdi->dparms.props.max_cqe)
217*4882a593Smuzhiyun 		return -EINVAL;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	if (comp_vector < 0)
220*4882a593Smuzhiyun 		comp_vector = 0;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	comp_vector = comp_vector % rdi->ibdev.num_comp_vectors;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/*
225*4882a593Smuzhiyun 	 * Allocate the completion queue entries and head/tail pointers.
226*4882a593Smuzhiyun 	 * This is allocated separately so that it can be resized and
227*4882a593Smuzhiyun 	 * also mapped into user space.
228*4882a593Smuzhiyun 	 * We need to use vmalloc() in order to support mmap and large
229*4882a593Smuzhiyun 	 * numbers of entries.
230*4882a593Smuzhiyun 	 */
231*4882a593Smuzhiyun 	if (udata && udata->outlen >= sizeof(__u64)) {
232*4882a593Smuzhiyun 		sz = sizeof(struct ib_uverbs_wc) * (entries + 1);
233*4882a593Smuzhiyun 		sz += sizeof(*u_wc);
234*4882a593Smuzhiyun 		u_wc = vmalloc_user(sz);
235*4882a593Smuzhiyun 		if (!u_wc)
236*4882a593Smuzhiyun 			return -ENOMEM;
237*4882a593Smuzhiyun 	} else {
238*4882a593Smuzhiyun 		sz = sizeof(struct ib_wc) * (entries + 1);
239*4882a593Smuzhiyun 		sz += sizeof(*k_wc);
240*4882a593Smuzhiyun 		k_wc = vzalloc_node(sz, rdi->dparms.node);
241*4882a593Smuzhiyun 		if (!k_wc)
242*4882a593Smuzhiyun 			return -ENOMEM;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	/*
246*4882a593Smuzhiyun 	 * Return the address of the WC as the offset to mmap.
247*4882a593Smuzhiyun 	 * See rvt_mmap() for details.
248*4882a593Smuzhiyun 	 */
249*4882a593Smuzhiyun 	if (udata && udata->outlen >= sizeof(__u64)) {
250*4882a593Smuzhiyun 		cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc);
251*4882a593Smuzhiyun 		if (IS_ERR(cq->ip)) {
252*4882a593Smuzhiyun 			err = PTR_ERR(cq->ip);
253*4882a593Smuzhiyun 			goto bail_wc;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		err = ib_copy_to_udata(udata, &cq->ip->offset,
257*4882a593Smuzhiyun 				       sizeof(cq->ip->offset));
258*4882a593Smuzhiyun 		if (err)
259*4882a593Smuzhiyun 			goto bail_ip;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	spin_lock_irq(&rdi->n_cqs_lock);
263*4882a593Smuzhiyun 	if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
264*4882a593Smuzhiyun 		spin_unlock_irq(&rdi->n_cqs_lock);
265*4882a593Smuzhiyun 		err = -ENOMEM;
266*4882a593Smuzhiyun 		goto bail_ip;
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	rdi->n_cqs_allocated++;
270*4882a593Smuzhiyun 	spin_unlock_irq(&rdi->n_cqs_lock);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (cq->ip) {
273*4882a593Smuzhiyun 		spin_lock_irq(&rdi->pending_lock);
274*4882a593Smuzhiyun 		list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps);
275*4882a593Smuzhiyun 		spin_unlock_irq(&rdi->pending_lock);
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/*
279*4882a593Smuzhiyun 	 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
280*4882a593Smuzhiyun 	 * The number of entries should be >= the number requested or return
281*4882a593Smuzhiyun 	 * an error.
282*4882a593Smuzhiyun 	 */
283*4882a593Smuzhiyun 	cq->rdi = rdi;
284*4882a593Smuzhiyun 	if (rdi->driver_f.comp_vect_cpu_lookup)
285*4882a593Smuzhiyun 		cq->comp_vector_cpu =
286*4882a593Smuzhiyun 			rdi->driver_f.comp_vect_cpu_lookup(rdi, comp_vector);
287*4882a593Smuzhiyun 	else
288*4882a593Smuzhiyun 		cq->comp_vector_cpu =
289*4882a593Smuzhiyun 			cpumask_first(cpumask_of_node(rdi->dparms.node));
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	cq->ibcq.cqe = entries;
292*4882a593Smuzhiyun 	cq->notify = RVT_CQ_NONE;
293*4882a593Smuzhiyun 	spin_lock_init(&cq->lock);
294*4882a593Smuzhiyun 	INIT_WORK(&cq->comptask, send_complete);
295*4882a593Smuzhiyun 	if (u_wc)
296*4882a593Smuzhiyun 		cq->queue = u_wc;
297*4882a593Smuzhiyun 	else
298*4882a593Smuzhiyun 		cq->kqueue = k_wc;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	trace_rvt_create_cq(cq, attr);
301*4882a593Smuzhiyun 	return 0;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun bail_ip:
304*4882a593Smuzhiyun 	kfree(cq->ip);
305*4882a593Smuzhiyun bail_wc:
306*4882a593Smuzhiyun 	vfree(u_wc);
307*4882a593Smuzhiyun 	vfree(k_wc);
308*4882a593Smuzhiyun 	return err;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun /**
312*4882a593Smuzhiyun  * rvt_destroy_cq - destroy a completion queue
313*4882a593Smuzhiyun  * @ibcq: the completion queue to destroy.
314*4882a593Smuzhiyun  * @udata: user data or NULL for kernel object
315*4882a593Smuzhiyun  *
316*4882a593Smuzhiyun  * Called by ib_destroy_cq() in the generic verbs code.
317*4882a593Smuzhiyun  */
rvt_destroy_cq(struct ib_cq * ibcq,struct ib_udata * udata)318*4882a593Smuzhiyun int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
321*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = cq->rdi;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	flush_work(&cq->comptask);
324*4882a593Smuzhiyun 	spin_lock_irq(&rdi->n_cqs_lock);
325*4882a593Smuzhiyun 	rdi->n_cqs_allocated--;
326*4882a593Smuzhiyun 	spin_unlock_irq(&rdi->n_cqs_lock);
327*4882a593Smuzhiyun 	if (cq->ip)
328*4882a593Smuzhiyun 		kref_put(&cq->ip->ref, rvt_release_mmap_info);
329*4882a593Smuzhiyun 	else
330*4882a593Smuzhiyun 		vfree(cq->kqueue);
331*4882a593Smuzhiyun 	return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun /**
335*4882a593Smuzhiyun  * rvt_req_notify_cq - change the notification type for a completion queue
336*4882a593Smuzhiyun  * @ibcq: the completion queue
337*4882a593Smuzhiyun  * @notify_flags: the type of notification to request
338*4882a593Smuzhiyun  *
339*4882a593Smuzhiyun  * This may be called from interrupt context.  Also called by
340*4882a593Smuzhiyun  * ib_req_notify_cq() in the generic verbs code.
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * Return: 0 for success.
343*4882a593Smuzhiyun  */
rvt_req_notify_cq(struct ib_cq * ibcq,enum ib_cq_notify_flags notify_flags)344*4882a593Smuzhiyun int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
347*4882a593Smuzhiyun 	unsigned long flags;
348*4882a593Smuzhiyun 	int ret = 0;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	spin_lock_irqsave(&cq->lock, flags);
351*4882a593Smuzhiyun 	/*
352*4882a593Smuzhiyun 	 * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
353*4882a593Smuzhiyun 	 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
354*4882a593Smuzhiyun 	 */
355*4882a593Smuzhiyun 	if (cq->notify != IB_CQ_NEXT_COMP)
356*4882a593Smuzhiyun 		cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
359*4882a593Smuzhiyun 		if (cq->queue) {
360*4882a593Smuzhiyun 			if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) !=
361*4882a593Smuzhiyun 				RDMA_READ_UAPI_ATOMIC(cq->queue->tail))
362*4882a593Smuzhiyun 				ret = 1;
363*4882a593Smuzhiyun 		} else {
364*4882a593Smuzhiyun 			if (cq->kqueue->head != cq->kqueue->tail)
365*4882a593Smuzhiyun 				ret = 1;
366*4882a593Smuzhiyun 		}
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cq->lock, flags);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return ret;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun /**
375*4882a593Smuzhiyun  * rvt_resize_cq - change the size of the CQ
376*4882a593Smuzhiyun  * @ibcq: the completion queue
377*4882a593Smuzhiyun  *
378*4882a593Smuzhiyun  * Return: 0 for success.
379*4882a593Smuzhiyun  */
rvt_resize_cq(struct ib_cq * ibcq,int cqe,struct ib_udata * udata)380*4882a593Smuzhiyun int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
383*4882a593Smuzhiyun 	u32 head, tail, n;
384*4882a593Smuzhiyun 	int ret;
385*4882a593Smuzhiyun 	u32 sz;
386*4882a593Smuzhiyun 	struct rvt_dev_info *rdi = cq->rdi;
387*4882a593Smuzhiyun 	struct rvt_cq_wc *u_wc = NULL;
388*4882a593Smuzhiyun 	struct rvt_cq_wc *old_u_wc = NULL;
389*4882a593Smuzhiyun 	struct rvt_k_cq_wc *k_wc = NULL;
390*4882a593Smuzhiyun 	struct rvt_k_cq_wc *old_k_wc = NULL;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
393*4882a593Smuzhiyun 		return -EINVAL;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/*
396*4882a593Smuzhiyun 	 * Need to use vmalloc() if we want to support large #s of entries.
397*4882a593Smuzhiyun 	 */
398*4882a593Smuzhiyun 	if (udata && udata->outlen >= sizeof(__u64)) {
399*4882a593Smuzhiyun 		sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
400*4882a593Smuzhiyun 		sz += sizeof(*u_wc);
401*4882a593Smuzhiyun 		u_wc = vmalloc_user(sz);
402*4882a593Smuzhiyun 		if (!u_wc)
403*4882a593Smuzhiyun 			return -ENOMEM;
404*4882a593Smuzhiyun 	} else {
405*4882a593Smuzhiyun 		sz = sizeof(struct ib_wc) * (cqe + 1);
406*4882a593Smuzhiyun 		sz += sizeof(*k_wc);
407*4882a593Smuzhiyun 		k_wc = vzalloc_node(sz, rdi->dparms.node);
408*4882a593Smuzhiyun 		if (!k_wc)
409*4882a593Smuzhiyun 			return -ENOMEM;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 	/* Check that we can write the offset to mmap. */
412*4882a593Smuzhiyun 	if (udata && udata->outlen >= sizeof(__u64)) {
413*4882a593Smuzhiyun 		__u64 offset = 0;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 		ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
416*4882a593Smuzhiyun 		if (ret)
417*4882a593Smuzhiyun 			goto bail_free;
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	spin_lock_irq(&cq->lock);
421*4882a593Smuzhiyun 	/*
422*4882a593Smuzhiyun 	 * Make sure head and tail are sane since they
423*4882a593Smuzhiyun 	 * might be user writable.
424*4882a593Smuzhiyun 	 */
425*4882a593Smuzhiyun 	if (u_wc) {
426*4882a593Smuzhiyun 		old_u_wc = cq->queue;
427*4882a593Smuzhiyun 		head = RDMA_READ_UAPI_ATOMIC(old_u_wc->head);
428*4882a593Smuzhiyun 		tail = RDMA_READ_UAPI_ATOMIC(old_u_wc->tail);
429*4882a593Smuzhiyun 	} else {
430*4882a593Smuzhiyun 		old_k_wc = cq->kqueue;
431*4882a593Smuzhiyun 		head = old_k_wc->head;
432*4882a593Smuzhiyun 		tail = old_k_wc->tail;
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (head > (u32)cq->ibcq.cqe)
436*4882a593Smuzhiyun 		head = (u32)cq->ibcq.cqe;
437*4882a593Smuzhiyun 	if (tail > (u32)cq->ibcq.cqe)
438*4882a593Smuzhiyun 		tail = (u32)cq->ibcq.cqe;
439*4882a593Smuzhiyun 	if (head < tail)
440*4882a593Smuzhiyun 		n = cq->ibcq.cqe + 1 + head - tail;
441*4882a593Smuzhiyun 	else
442*4882a593Smuzhiyun 		n = head - tail;
443*4882a593Smuzhiyun 	if (unlikely((u32)cqe < n)) {
444*4882a593Smuzhiyun 		ret = -EINVAL;
445*4882a593Smuzhiyun 		goto bail_unlock;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 	for (n = 0; tail != head; n++) {
448*4882a593Smuzhiyun 		if (u_wc)
449*4882a593Smuzhiyun 			u_wc->uqueue[n] = old_u_wc->uqueue[tail];
450*4882a593Smuzhiyun 		else
451*4882a593Smuzhiyun 			k_wc->kqueue[n] = old_k_wc->kqueue[tail];
452*4882a593Smuzhiyun 		if (tail == (u32)cq->ibcq.cqe)
453*4882a593Smuzhiyun 			tail = 0;
454*4882a593Smuzhiyun 		else
455*4882a593Smuzhiyun 			tail++;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 	cq->ibcq.cqe = cqe;
458*4882a593Smuzhiyun 	if (u_wc) {
459*4882a593Smuzhiyun 		RDMA_WRITE_UAPI_ATOMIC(u_wc->head, n);
460*4882a593Smuzhiyun 		RDMA_WRITE_UAPI_ATOMIC(u_wc->tail, 0);
461*4882a593Smuzhiyun 		cq->queue = u_wc;
462*4882a593Smuzhiyun 	} else {
463*4882a593Smuzhiyun 		k_wc->head = n;
464*4882a593Smuzhiyun 		k_wc->tail = 0;
465*4882a593Smuzhiyun 		cq->kqueue = k_wc;
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 	spin_unlock_irq(&cq->lock);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	if (u_wc)
470*4882a593Smuzhiyun 		vfree(old_u_wc);
471*4882a593Smuzhiyun 	else
472*4882a593Smuzhiyun 		vfree(old_k_wc);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (cq->ip) {
475*4882a593Smuzhiyun 		struct rvt_mmap_info *ip = cq->ip;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 		rvt_update_mmap_info(rdi, ip, sz, u_wc);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 		/*
480*4882a593Smuzhiyun 		 * Return the offset to mmap.
481*4882a593Smuzhiyun 		 * See rvt_mmap() for details.
482*4882a593Smuzhiyun 		 */
483*4882a593Smuzhiyun 		if (udata && udata->outlen >= sizeof(__u64)) {
484*4882a593Smuzhiyun 			ret = ib_copy_to_udata(udata, &ip->offset,
485*4882a593Smuzhiyun 					       sizeof(ip->offset));
486*4882a593Smuzhiyun 			if (ret)
487*4882a593Smuzhiyun 				return ret;
488*4882a593Smuzhiyun 		}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 		spin_lock_irq(&rdi->pending_lock);
491*4882a593Smuzhiyun 		if (list_empty(&ip->pending_mmaps))
492*4882a593Smuzhiyun 			list_add(&ip->pending_mmaps, &rdi->pending_mmaps);
493*4882a593Smuzhiyun 		spin_unlock_irq(&rdi->pending_lock);
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	return 0;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun bail_unlock:
499*4882a593Smuzhiyun 	spin_unlock_irq(&cq->lock);
500*4882a593Smuzhiyun bail_free:
501*4882a593Smuzhiyun 	vfree(u_wc);
502*4882a593Smuzhiyun 	vfree(k_wc);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	return ret;
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun /**
508*4882a593Smuzhiyun  * rvt_poll_cq - poll for work completion entries
509*4882a593Smuzhiyun  * @ibcq: the completion queue to poll
510*4882a593Smuzhiyun  * @num_entries: the maximum number of entries to return
511*4882a593Smuzhiyun  * @entry: pointer to array where work completions are placed
512*4882a593Smuzhiyun  *
513*4882a593Smuzhiyun  * This may be called from interrupt context.  Also called by ib_poll_cq()
514*4882a593Smuzhiyun  * in the generic verbs code.
515*4882a593Smuzhiyun  *
516*4882a593Smuzhiyun  * Return: the number of completion entries polled.
517*4882a593Smuzhiyun  */
rvt_poll_cq(struct ib_cq * ibcq,int num_entries,struct ib_wc * entry)518*4882a593Smuzhiyun int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct rvt_cq *cq = ibcq_to_rvtcq(ibcq);
521*4882a593Smuzhiyun 	struct rvt_k_cq_wc *wc;
522*4882a593Smuzhiyun 	unsigned long flags;
523*4882a593Smuzhiyun 	int npolled;
524*4882a593Smuzhiyun 	u32 tail;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* The kernel can only poll a kernel completion queue */
527*4882a593Smuzhiyun 	if (cq->ip)
528*4882a593Smuzhiyun 		return -EINVAL;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	spin_lock_irqsave(&cq->lock, flags);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	wc = cq->kqueue;
533*4882a593Smuzhiyun 	tail = wc->tail;
534*4882a593Smuzhiyun 	if (tail > (u32)cq->ibcq.cqe)
535*4882a593Smuzhiyun 		tail = (u32)cq->ibcq.cqe;
536*4882a593Smuzhiyun 	for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
537*4882a593Smuzhiyun 		if (tail == wc->head)
538*4882a593Smuzhiyun 			break;
539*4882a593Smuzhiyun 		/* The kernel doesn't need a RMB since it has the lock. */
540*4882a593Smuzhiyun 		trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled);
541*4882a593Smuzhiyun 		*entry = wc->kqueue[tail];
542*4882a593Smuzhiyun 		if (tail >= cq->ibcq.cqe)
543*4882a593Smuzhiyun 			tail = 0;
544*4882a593Smuzhiyun 		else
545*4882a593Smuzhiyun 			tail++;
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 	wc->tail = tail;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	spin_unlock_irqrestore(&cq->lock, flags);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return npolled;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun /**
555*4882a593Smuzhiyun  * rvt_driver_cq_init - Init cq resources on behalf of driver
556*4882a593Smuzhiyun  *
557*4882a593Smuzhiyun  * Return: 0 on success
558*4882a593Smuzhiyun  */
rvt_driver_cq_init(void)559*4882a593Smuzhiyun int rvt_driver_cq_init(void)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun 	comp_vector_wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_CPU_INTENSIVE,
562*4882a593Smuzhiyun 					 0, "rdmavt_cq");
563*4882a593Smuzhiyun 	if (!comp_vector_wq)
564*4882a593Smuzhiyun 		return -ENOMEM;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun /**
570*4882a593Smuzhiyun  * rvt_cq_exit - tear down cq reources
571*4882a593Smuzhiyun  */
rvt_cq_exit(void)572*4882a593Smuzhiyun void rvt_cq_exit(void)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	destroy_workqueue(comp_vector_wq);
575*4882a593Smuzhiyun 	comp_vector_wq = NULL;
576*4882a593Smuzhiyun }
577