1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2015 HGST, a Western Digital Company.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include <linux/err.h>
7*4882a593Smuzhiyun #include <linux/slab.h>
8*4882a593Smuzhiyun #include <rdma/ib_verbs.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include "core_priv.h"
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <trace/events/rdma_core.h>
13*4882a593Smuzhiyun /* Max size for shared CQ, may require tuning */
14*4882a593Smuzhiyun #define IB_MAX_SHARED_CQ_SZ 4096U
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* # of WCs to poll for with a single call to ib_poll_cq */
17*4882a593Smuzhiyun #define IB_POLL_BATCH 16
18*4882a593Smuzhiyun #define IB_POLL_BATCH_DIRECT 8
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* # of WCs to iterate over before yielding */
21*4882a593Smuzhiyun #define IB_POLL_BUDGET_IRQ 256
22*4882a593Smuzhiyun #define IB_POLL_BUDGET_WORKQUEUE 65536
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define IB_POLL_FLAGS \
25*4882a593Smuzhiyun (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static const struct dim_cq_moder
28*4882a593Smuzhiyun rdma_dim_prof[RDMA_DIM_PARAMS_NUM_PROFILES] = {
29*4882a593Smuzhiyun {1, 0, 1, 0},
30*4882a593Smuzhiyun {1, 0, 4, 0},
31*4882a593Smuzhiyun {2, 0, 4, 0},
32*4882a593Smuzhiyun {2, 0, 8, 0},
33*4882a593Smuzhiyun {4, 0, 8, 0},
34*4882a593Smuzhiyun {16, 0, 8, 0},
35*4882a593Smuzhiyun {16, 0, 16, 0},
36*4882a593Smuzhiyun {32, 0, 16, 0},
37*4882a593Smuzhiyun {32, 0, 32, 0},
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
ib_cq_rdma_dim_work(struct work_struct * w)40*4882a593Smuzhiyun static void ib_cq_rdma_dim_work(struct work_struct *w)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun struct dim *dim = container_of(w, struct dim, work);
43*4882a593Smuzhiyun struct ib_cq *cq = dim->priv;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun u16 usec = rdma_dim_prof[dim->profile_ix].usec;
46*4882a593Smuzhiyun u16 comps = rdma_dim_prof[dim->profile_ix].comps;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun dim->state = DIM_START_MEASURE;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun trace_cq_modify(cq, comps, usec);
51*4882a593Smuzhiyun cq->device->ops.modify_cq(cq, comps, usec);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
rdma_dim_init(struct ib_cq * cq)54*4882a593Smuzhiyun static void rdma_dim_init(struct ib_cq *cq)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun struct dim *dim;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim ||
59*4882a593Smuzhiyun cq->poll_ctx == IB_POLL_DIRECT)
60*4882a593Smuzhiyun return;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun dim = kzalloc(sizeof(struct dim), GFP_KERNEL);
63*4882a593Smuzhiyun if (!dim)
64*4882a593Smuzhiyun return;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun dim->state = DIM_START_MEASURE;
67*4882a593Smuzhiyun dim->tune_state = DIM_GOING_RIGHT;
68*4882a593Smuzhiyun dim->profile_ix = RDMA_DIM_START_PROFILE;
69*4882a593Smuzhiyun dim->priv = cq;
70*4882a593Smuzhiyun cq->dim = dim;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
rdma_dim_destroy(struct ib_cq * cq)75*4882a593Smuzhiyun static void rdma_dim_destroy(struct ib_cq *cq)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun if (!cq->dim)
78*4882a593Smuzhiyun return;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun cancel_work_sync(&cq->dim->work);
81*4882a593Smuzhiyun kfree(cq->dim);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
__poll_cq(struct ib_cq * cq,int num_entries,struct ib_wc * wc)84*4882a593Smuzhiyun static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun int rc;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun rc = ib_poll_cq(cq, num_entries, wc);
89*4882a593Smuzhiyun trace_cq_poll(cq, num_entries, rc);
90*4882a593Smuzhiyun return rc;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
__ib_process_cq(struct ib_cq * cq,int budget,struct ib_wc * wcs,int batch)93*4882a593Smuzhiyun static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
94*4882a593Smuzhiyun int batch)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun int i, n, completed = 0;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun trace_cq_process(cq);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * budget might be (-1) if the caller does not
102*4882a593Smuzhiyun * want to bound this call, thus we need unsigned
103*4882a593Smuzhiyun * minimum here.
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun while ((n = __poll_cq(cq, min_t(u32, batch,
106*4882a593Smuzhiyun budget - completed), wcs)) > 0) {
107*4882a593Smuzhiyun for (i = 0; i < n; i++) {
108*4882a593Smuzhiyun struct ib_wc *wc = &wcs[i];
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (wc->wr_cqe)
111*4882a593Smuzhiyun wc->wr_cqe->done(cq, wc);
112*4882a593Smuzhiyun else
113*4882a593Smuzhiyun WARN_ON_ONCE(wc->status == IB_WC_SUCCESS);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun completed += n;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (n != batch || (budget != -1 && completed >= budget))
119*4882a593Smuzhiyun break;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return completed;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /**
126*4882a593Smuzhiyun * ib_process_direct_cq - process a CQ in caller context
127*4882a593Smuzhiyun * @cq: CQ to process
128*4882a593Smuzhiyun * @budget: number of CQEs to poll for
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun * This function is used to process all outstanding CQ entries.
131*4882a593Smuzhiyun * It does not offload CQ processing to a different context and does
132*4882a593Smuzhiyun * not ask for completion interrupts from the HCA.
133*4882a593Smuzhiyun * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
134*4882a593Smuzhiyun * concurrent processing.
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Note: do not pass -1 as %budget unless it is guaranteed that the number
137*4882a593Smuzhiyun * of completions that will be processed is small.
138*4882a593Smuzhiyun */
ib_process_cq_direct(struct ib_cq * cq,int budget)139*4882a593Smuzhiyun int ib_process_cq_direct(struct ib_cq *cq, int budget)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun EXPORT_SYMBOL(ib_process_cq_direct);
146*4882a593Smuzhiyun
ib_cq_completion_direct(struct ib_cq * cq,void * private)147*4882a593Smuzhiyun static void ib_cq_completion_direct(struct ib_cq *cq, void *private)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
ib_poll_handler(struct irq_poll * iop,int budget)152*4882a593Smuzhiyun static int ib_poll_handler(struct irq_poll *iop, int budget)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
155*4882a593Smuzhiyun struct dim *dim = cq->dim;
156*4882a593Smuzhiyun int completed;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
159*4882a593Smuzhiyun if (completed < budget) {
160*4882a593Smuzhiyun irq_poll_complete(&cq->iop);
161*4882a593Smuzhiyun if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) {
162*4882a593Smuzhiyun trace_cq_reschedule(cq);
163*4882a593Smuzhiyun irq_poll_sched(&cq->iop);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun if (dim)
168*4882a593Smuzhiyun rdma_dim(dim, completed);
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun return completed;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
ib_cq_completion_softirq(struct ib_cq * cq,void * private)173*4882a593Smuzhiyun static void ib_cq_completion_softirq(struct ib_cq *cq, void *private)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun trace_cq_schedule(cq);
176*4882a593Smuzhiyun irq_poll_sched(&cq->iop);
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
ib_cq_poll_work(struct work_struct * work)179*4882a593Smuzhiyun static void ib_cq_poll_work(struct work_struct *work)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct ib_cq *cq = container_of(work, struct ib_cq, work);
182*4882a593Smuzhiyun int completed;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
185*4882a593Smuzhiyun IB_POLL_BATCH);
186*4882a593Smuzhiyun if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
187*4882a593Smuzhiyun ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
188*4882a593Smuzhiyun queue_work(cq->comp_wq, &cq->work);
189*4882a593Smuzhiyun else if (cq->dim)
190*4882a593Smuzhiyun rdma_dim(cq->dim, completed);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
ib_cq_completion_workqueue(struct ib_cq * cq,void * private)193*4882a593Smuzhiyun static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun trace_cq_schedule(cq);
196*4882a593Smuzhiyun queue_work(cq->comp_wq, &cq->work);
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /**
200*4882a593Smuzhiyun * __ib_alloc_cq allocate a completion queue
201*4882a593Smuzhiyun * @dev: device to allocate the CQ for
202*4882a593Smuzhiyun * @private: driver private data, accessible from cq->cq_context
203*4882a593Smuzhiyun * @nr_cqe: number of CQEs to allocate
204*4882a593Smuzhiyun * @comp_vector: HCA completion vectors for this CQ
205*4882a593Smuzhiyun * @poll_ctx: context to poll the CQ from.
206*4882a593Smuzhiyun * @caller: module owner name.
207*4882a593Smuzhiyun *
208*4882a593Smuzhiyun * This is the proper interface to allocate a CQ for in-kernel users. A
209*4882a593Smuzhiyun * CQ allocated with this interface will automatically be polled from the
210*4882a593Smuzhiyun * specified context. The ULP must use wr->wr_cqe instead of wr->wr_id
211*4882a593Smuzhiyun * to use this CQ abstraction.
212*4882a593Smuzhiyun */
__ib_alloc_cq(struct ib_device * dev,void * private,int nr_cqe,int comp_vector,enum ib_poll_context poll_ctx,const char * caller)213*4882a593Smuzhiyun struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private, int nr_cqe,
214*4882a593Smuzhiyun int comp_vector, enum ib_poll_context poll_ctx,
215*4882a593Smuzhiyun const char *caller)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun struct ib_cq_init_attr cq_attr = {
218*4882a593Smuzhiyun .cqe = nr_cqe,
219*4882a593Smuzhiyun .comp_vector = comp_vector,
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun struct ib_cq *cq;
222*4882a593Smuzhiyun int ret = -ENOMEM;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun cq = rdma_zalloc_drv_obj(dev, ib_cq);
225*4882a593Smuzhiyun if (!cq)
226*4882a593Smuzhiyun return ERR_PTR(ret);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun cq->device = dev;
229*4882a593Smuzhiyun cq->cq_context = private;
230*4882a593Smuzhiyun cq->poll_ctx = poll_ctx;
231*4882a593Smuzhiyun atomic_set(&cq->usecnt, 0);
232*4882a593Smuzhiyun cq->comp_vector = comp_vector;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL);
235*4882a593Smuzhiyun if (!cq->wc)
236*4882a593Smuzhiyun goto out_free_cq;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ);
239*4882a593Smuzhiyun rdma_restrack_set_name(&cq->res, caller);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun ret = dev->ops.create_cq(cq, &cq_attr, NULL);
242*4882a593Smuzhiyun if (ret)
243*4882a593Smuzhiyun goto out_free_wc;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun rdma_dim_init(cq);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun switch (cq->poll_ctx) {
248*4882a593Smuzhiyun case IB_POLL_DIRECT:
249*4882a593Smuzhiyun cq->comp_handler = ib_cq_completion_direct;
250*4882a593Smuzhiyun break;
251*4882a593Smuzhiyun case IB_POLL_SOFTIRQ:
252*4882a593Smuzhiyun cq->comp_handler = ib_cq_completion_softirq;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler);
255*4882a593Smuzhiyun ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
256*4882a593Smuzhiyun break;
257*4882a593Smuzhiyun case IB_POLL_WORKQUEUE:
258*4882a593Smuzhiyun case IB_POLL_UNBOUND_WORKQUEUE:
259*4882a593Smuzhiyun cq->comp_handler = ib_cq_completion_workqueue;
260*4882a593Smuzhiyun INIT_WORK(&cq->work, ib_cq_poll_work);
261*4882a593Smuzhiyun ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
262*4882a593Smuzhiyun cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
263*4882a593Smuzhiyun ib_comp_wq : ib_comp_unbound_wq;
264*4882a593Smuzhiyun break;
265*4882a593Smuzhiyun default:
266*4882a593Smuzhiyun ret = -EINVAL;
267*4882a593Smuzhiyun goto out_destroy_cq;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun rdma_restrack_add(&cq->res);
271*4882a593Smuzhiyun trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx);
272*4882a593Smuzhiyun return cq;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun out_destroy_cq:
275*4882a593Smuzhiyun rdma_dim_destroy(cq);
276*4882a593Smuzhiyun cq->device->ops.destroy_cq(cq, NULL);
277*4882a593Smuzhiyun out_free_wc:
278*4882a593Smuzhiyun rdma_restrack_put(&cq->res);
279*4882a593Smuzhiyun kfree(cq->wc);
280*4882a593Smuzhiyun out_free_cq:
281*4882a593Smuzhiyun kfree(cq);
282*4882a593Smuzhiyun trace_cq_alloc_error(nr_cqe, comp_vector, poll_ctx, ret);
283*4882a593Smuzhiyun return ERR_PTR(ret);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun EXPORT_SYMBOL(__ib_alloc_cq);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /**
288*4882a593Smuzhiyun * __ib_alloc_cq_any - allocate a completion queue
289*4882a593Smuzhiyun * @dev: device to allocate the CQ for
290*4882a593Smuzhiyun * @private: driver private data, accessible from cq->cq_context
291*4882a593Smuzhiyun * @nr_cqe: number of CQEs to allocate
292*4882a593Smuzhiyun * @poll_ctx: context to poll the CQ from
293*4882a593Smuzhiyun * @caller: module owner name
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * Attempt to spread ULP Completion Queues over each device's interrupt
296*4882a593Smuzhiyun * vectors. A simple best-effort mechanism is used.
297*4882a593Smuzhiyun */
__ib_alloc_cq_any(struct ib_device * dev,void * private,int nr_cqe,enum ib_poll_context poll_ctx,const char * caller)298*4882a593Smuzhiyun struct ib_cq *__ib_alloc_cq_any(struct ib_device *dev, void *private,
299*4882a593Smuzhiyun int nr_cqe, enum ib_poll_context poll_ctx,
300*4882a593Smuzhiyun const char *caller)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun static atomic_t counter;
303*4882a593Smuzhiyun int comp_vector = 0;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun if (dev->num_comp_vectors > 1)
306*4882a593Smuzhiyun comp_vector =
307*4882a593Smuzhiyun atomic_inc_return(&counter) %
308*4882a593Smuzhiyun min_t(int, dev->num_comp_vectors, num_online_cpus());
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun return __ib_alloc_cq(dev, private, nr_cqe, comp_vector, poll_ctx,
311*4882a593Smuzhiyun caller);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun EXPORT_SYMBOL(__ib_alloc_cq_any);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun * ib_free_cq - free a completion queue
317*4882a593Smuzhiyun * @cq: completion queue to free.
318*4882a593Smuzhiyun */
ib_free_cq(struct ib_cq * cq)319*4882a593Smuzhiyun void ib_free_cq(struct ib_cq *cq)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun int ret;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun if (WARN_ON_ONCE(atomic_read(&cq->usecnt)))
324*4882a593Smuzhiyun return;
325*4882a593Smuzhiyun if (WARN_ON_ONCE(cq->cqe_used))
326*4882a593Smuzhiyun return;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun switch (cq->poll_ctx) {
329*4882a593Smuzhiyun case IB_POLL_DIRECT:
330*4882a593Smuzhiyun break;
331*4882a593Smuzhiyun case IB_POLL_SOFTIRQ:
332*4882a593Smuzhiyun irq_poll_disable(&cq->iop);
333*4882a593Smuzhiyun break;
334*4882a593Smuzhiyun case IB_POLL_WORKQUEUE:
335*4882a593Smuzhiyun case IB_POLL_UNBOUND_WORKQUEUE:
336*4882a593Smuzhiyun cancel_work_sync(&cq->work);
337*4882a593Smuzhiyun break;
338*4882a593Smuzhiyun default:
339*4882a593Smuzhiyun WARN_ON_ONCE(1);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun rdma_dim_destroy(cq);
343*4882a593Smuzhiyun trace_cq_free(cq);
344*4882a593Smuzhiyun ret = cq->device->ops.destroy_cq(cq, NULL);
345*4882a593Smuzhiyun WARN_ONCE(ret, "Destroy of kernel CQ shouldn't fail");
346*4882a593Smuzhiyun rdma_restrack_del(&cq->res);
347*4882a593Smuzhiyun kfree(cq->wc);
348*4882a593Smuzhiyun kfree(cq);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun EXPORT_SYMBOL(ib_free_cq);
351*4882a593Smuzhiyun
ib_cq_pool_init(struct ib_device * dev)352*4882a593Smuzhiyun void ib_cq_pool_init(struct ib_device *dev)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun unsigned int i;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun spin_lock_init(&dev->cq_pools_lock);
357*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++)
358*4882a593Smuzhiyun INIT_LIST_HEAD(&dev->cq_pools[i]);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
ib_cq_pool_destroy(struct ib_device * dev)361*4882a593Smuzhiyun void ib_cq_pool_destroy(struct ib_device *dev)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun struct ib_cq *cq, *n;
364*4882a593Smuzhiyun unsigned int i;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(dev->cq_pools); i++) {
367*4882a593Smuzhiyun list_for_each_entry_safe(cq, n, &dev->cq_pools[i],
368*4882a593Smuzhiyun pool_entry) {
369*4882a593Smuzhiyun WARN_ON(cq->cqe_used);
370*4882a593Smuzhiyun cq->shared = false;
371*4882a593Smuzhiyun ib_free_cq(cq);
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
ib_alloc_cqs(struct ib_device * dev,unsigned int nr_cqes,enum ib_poll_context poll_ctx)376*4882a593Smuzhiyun static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
377*4882a593Smuzhiyun enum ib_poll_context poll_ctx)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun LIST_HEAD(tmp_list);
380*4882a593Smuzhiyun unsigned int nr_cqs, i;
381*4882a593Smuzhiyun struct ib_cq *cq, *n;
382*4882a593Smuzhiyun int ret;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
385*4882a593Smuzhiyun WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
386*4882a593Smuzhiyun return -EINVAL;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun * Allocate at least as many CQEs as requested, and otherwise
391*4882a593Smuzhiyun * a reasonable batch size so that we can share CQs between
392*4882a593Smuzhiyun * multiple users instead of allocating a larger number of CQs.
393*4882a593Smuzhiyun */
394*4882a593Smuzhiyun nr_cqes = min_t(unsigned int, dev->attrs.max_cqe,
395*4882a593Smuzhiyun max(nr_cqes, IB_MAX_SHARED_CQ_SZ));
396*4882a593Smuzhiyun nr_cqs = min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
397*4882a593Smuzhiyun for (i = 0; i < nr_cqs; i++) {
398*4882a593Smuzhiyun cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx);
399*4882a593Smuzhiyun if (IS_ERR(cq)) {
400*4882a593Smuzhiyun ret = PTR_ERR(cq);
401*4882a593Smuzhiyun goto out_free_cqs;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun cq->shared = true;
404*4882a593Smuzhiyun list_add_tail(&cq->pool_entry, &tmp_list);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun spin_lock_irq(&dev->cq_pools_lock);
408*4882a593Smuzhiyun list_splice(&tmp_list, &dev->cq_pools[poll_ctx]);
409*4882a593Smuzhiyun spin_unlock_irq(&dev->cq_pools_lock);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun return 0;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun out_free_cqs:
414*4882a593Smuzhiyun list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
415*4882a593Smuzhiyun cq->shared = false;
416*4882a593Smuzhiyun ib_free_cq(cq);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun return ret;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * ib_cq_pool_get() - Find the least used completion queue that matches
423*4882a593Smuzhiyun * a given cpu hint (or least used for wild card affinity) and fits
424*4882a593Smuzhiyun * nr_cqe.
425*4882a593Smuzhiyun * @dev: rdma device
426*4882a593Smuzhiyun * @nr_cqe: number of needed cqe entries
427*4882a593Smuzhiyun * @comp_vector_hint: completion vector hint (-1) for the driver to assign
428*4882a593Smuzhiyun * a comp vector based on internal counter
429*4882a593Smuzhiyun * @poll_ctx: cq polling context
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * Finds a cq that satisfies @comp_vector_hint and @nr_cqe requirements and
432*4882a593Smuzhiyun * claim entries in it for us. In case there is no available cq, allocate
433*4882a593Smuzhiyun * a new cq with the requirements and add it to the device pool.
434*4882a593Smuzhiyun * IB_POLL_DIRECT cannot be used for shared cqs so it is not a valid value
435*4882a593Smuzhiyun * for @poll_ctx.
436*4882a593Smuzhiyun */
ib_cq_pool_get(struct ib_device * dev,unsigned int nr_cqe,int comp_vector_hint,enum ib_poll_context poll_ctx)437*4882a593Smuzhiyun struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
438*4882a593Smuzhiyun int comp_vector_hint,
439*4882a593Smuzhiyun enum ib_poll_context poll_ctx)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun static unsigned int default_comp_vector;
442*4882a593Smuzhiyun unsigned int vector, num_comp_vectors;
443*4882a593Smuzhiyun struct ib_cq *cq, *found = NULL;
444*4882a593Smuzhiyun int ret;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
447*4882a593Smuzhiyun WARN_ON_ONCE(poll_ctx > IB_POLL_LAST_POOL_TYPE);
448*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun num_comp_vectors =
452*4882a593Smuzhiyun min_t(unsigned int, dev->num_comp_vectors, num_online_cpus());
453*4882a593Smuzhiyun /* Project the affinty to the device completion vector range */
454*4882a593Smuzhiyun if (comp_vector_hint < 0) {
455*4882a593Smuzhiyun comp_vector_hint =
456*4882a593Smuzhiyun (READ_ONCE(default_comp_vector) + 1) % num_comp_vectors;
457*4882a593Smuzhiyun WRITE_ONCE(default_comp_vector, comp_vector_hint);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun vector = comp_vector_hint % num_comp_vectors;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /*
462*4882a593Smuzhiyun * Find the least used CQ with correct affinity and
463*4882a593Smuzhiyun * enough free CQ entries
464*4882a593Smuzhiyun */
465*4882a593Smuzhiyun while (!found) {
466*4882a593Smuzhiyun spin_lock_irq(&dev->cq_pools_lock);
467*4882a593Smuzhiyun list_for_each_entry(cq, &dev->cq_pools[poll_ctx],
468*4882a593Smuzhiyun pool_entry) {
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * Check to see if we have found a CQ with the
471*4882a593Smuzhiyun * correct completion vector
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun if (vector != cq->comp_vector)
474*4882a593Smuzhiyun continue;
475*4882a593Smuzhiyun if (cq->cqe_used + nr_cqe > cq->cqe)
476*4882a593Smuzhiyun continue;
477*4882a593Smuzhiyun found = cq;
478*4882a593Smuzhiyun break;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (found) {
482*4882a593Smuzhiyun found->cqe_used += nr_cqe;
483*4882a593Smuzhiyun spin_unlock_irq(&dev->cq_pools_lock);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun return found;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun spin_unlock_irq(&dev->cq_pools_lock);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * Didn't find a match or ran out of CQs in the device
491*4882a593Smuzhiyun * pool, allocate a new array of CQs.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun ret = ib_alloc_cqs(dev, nr_cqe, poll_ctx);
494*4882a593Smuzhiyun if (ret)
495*4882a593Smuzhiyun return ERR_PTR(ret);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return found;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun EXPORT_SYMBOL(ib_cq_pool_get);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * ib_cq_pool_put - Return a CQ taken from a shared pool.
504*4882a593Smuzhiyun * @cq: The CQ to return.
505*4882a593Smuzhiyun * @nr_cqe: The max number of cqes that the user had requested.
506*4882a593Smuzhiyun */
ib_cq_pool_put(struct ib_cq * cq,unsigned int nr_cqe)507*4882a593Smuzhiyun void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun if (WARN_ON_ONCE(nr_cqe > cq->cqe_used))
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun spin_lock_irq(&cq->device->cq_pools_lock);
513*4882a593Smuzhiyun cq->cqe_used -= nr_cqe;
514*4882a593Smuzhiyun spin_unlock_irq(&cq->device->cq_pools_lock);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun EXPORT_SYMBOL(ib_cq_pool_put);
517