Lines Matching refs:cq
67 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited) in rvt_cq_enter() argument
78 spin_lock_irqsave(&cq->lock, flags); in rvt_cq_enter()
80 if (cq->ip) { in rvt_cq_enter()
81 u_wc = cq->queue; in rvt_cq_enter()
86 k_wc = cq->kqueue; in rvt_cq_enter()
96 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
97 head = cq->ibcq.cqe; in rvt_cq_enter()
103 if (unlikely(next == tail || cq->cq_full)) { in rvt_cq_enter()
104 struct rvt_dev_info *rdi = cq->rdi; in rvt_cq_enter()
106 if (!cq->cq_full) in rvt_cq_enter()
108 cq->cq_full = true; in rvt_cq_enter()
109 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
110 if (cq->ibcq.event_handler) { in rvt_cq_enter()
113 ev.device = cq->ibcq.device; in rvt_cq_enter()
114 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
116 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
120 trace_rvt_cq_enter(cq, entry, head); in rvt_cq_enter()
143 if (cq->notify == IB_CQ_NEXT_COMP || in rvt_cq_enter()
144 (cq->notify == IB_CQ_SOLICITED && in rvt_cq_enter()
150 cq->notify = RVT_CQ_NONE; in rvt_cq_enter()
151 cq->triggered++; in rvt_cq_enter()
152 queue_work_on(cq->comp_vector_cpu, comp_vector_wq, in rvt_cq_enter()
153 &cq->comptask); in rvt_cq_enter()
156 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
163 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); in send_complete() local
173 u8 triggered = cq->triggered; in send_complete()
182 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
185 if (cq->triggered == triggered) in send_complete()
205 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_create_cq() local
250 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); in rvt_create_cq()
251 if (IS_ERR(cq->ip)) { in rvt_create_cq()
252 err = PTR_ERR(cq->ip); in rvt_create_cq()
256 err = ib_copy_to_udata(udata, &cq->ip->offset, in rvt_create_cq()
257 sizeof(cq->ip->offset)); in rvt_create_cq()
272 if (cq->ip) { in rvt_create_cq()
274 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps); in rvt_create_cq()
283 cq->rdi = rdi; in rvt_create_cq()
285 cq->comp_vector_cpu = in rvt_create_cq()
288 cq->comp_vector_cpu = in rvt_create_cq()
291 cq->ibcq.cqe = entries; in rvt_create_cq()
292 cq->notify = RVT_CQ_NONE; in rvt_create_cq()
293 spin_lock_init(&cq->lock); in rvt_create_cq()
294 INIT_WORK(&cq->comptask, send_complete); in rvt_create_cq()
296 cq->queue = u_wc; in rvt_create_cq()
298 cq->kqueue = k_wc; in rvt_create_cq()
300 trace_rvt_create_cq(cq, attr); in rvt_create_cq()
304 kfree(cq->ip); in rvt_create_cq()
320 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_destroy_cq() local
321 struct rvt_dev_info *rdi = cq->rdi; in rvt_destroy_cq()
323 flush_work(&cq->comptask); in rvt_destroy_cq()
327 if (cq->ip) in rvt_destroy_cq()
328 kref_put(&cq->ip->ref, rvt_release_mmap_info); in rvt_destroy_cq()
330 vfree(cq->kqueue); in rvt_destroy_cq()
346 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_req_notify_cq() local
350 spin_lock_irqsave(&cq->lock, flags); in rvt_req_notify_cq()
355 if (cq->notify != IB_CQ_NEXT_COMP) in rvt_req_notify_cq()
356 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; in rvt_req_notify_cq()
359 if (cq->queue) { in rvt_req_notify_cq()
360 if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) != in rvt_req_notify_cq()
361 RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) in rvt_req_notify_cq()
364 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq()
369 spin_unlock_irqrestore(&cq->lock, flags); in rvt_req_notify_cq()
382 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_resize_cq() local
386 struct rvt_dev_info *rdi = cq->rdi; in rvt_resize_cq()
420 spin_lock_irq(&cq->lock); in rvt_resize_cq()
426 old_u_wc = cq->queue; in rvt_resize_cq()
430 old_k_wc = cq->kqueue; in rvt_resize_cq()
435 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
436 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
437 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq()
438 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
440 n = cq->ibcq.cqe + 1 + head - tail; in rvt_resize_cq()
452 if (tail == (u32)cq->ibcq.cqe) in rvt_resize_cq()
457 cq->ibcq.cqe = cqe; in rvt_resize_cq()
461 cq->queue = u_wc; in rvt_resize_cq()
465 cq->kqueue = k_wc; in rvt_resize_cq()
467 spin_unlock_irq(&cq->lock); in rvt_resize_cq()
474 if (cq->ip) { in rvt_resize_cq()
475 struct rvt_mmap_info *ip = cq->ip; in rvt_resize_cq()
499 spin_unlock_irq(&cq->lock); in rvt_resize_cq()
520 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_poll_cq() local
527 if (cq->ip) in rvt_poll_cq()
530 spin_lock_irqsave(&cq->lock, flags); in rvt_poll_cq()
532 wc = cq->kqueue; in rvt_poll_cq()
534 if (tail > (u32)cq->ibcq.cqe) in rvt_poll_cq()
535 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
540 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled); in rvt_poll_cq()
542 if (tail >= cq->ibcq.cqe) in rvt_poll_cq()
549 spin_unlock_irqrestore(&cq->lock, flags); in rvt_poll_cq()