Lines Matching refs:cq

25 bool rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)  in rvt_cq_enter()  argument
36 spin_lock_irqsave(&cq->lock, flags); in rvt_cq_enter()
38 if (cq->ip) { in rvt_cq_enter()
39 u_wc = cq->queue; in rvt_cq_enter()
44 k_wc = cq->kqueue; in rvt_cq_enter()
54 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter()
55 head = cq->ibcq.cqe; in rvt_cq_enter()
61 if (unlikely(next == tail || cq->cq_full)) { in rvt_cq_enter()
62 struct rvt_dev_info *rdi = cq->rdi; in rvt_cq_enter()
64 if (!cq->cq_full) in rvt_cq_enter()
66 cq->cq_full = true; in rvt_cq_enter()
67 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
68 if (cq->ibcq.event_handler) { in rvt_cq_enter()
71 ev.device = cq->ibcq.device; in rvt_cq_enter()
72 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
74 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
78 trace_rvt_cq_enter(cq, entry, head); in rvt_cq_enter()
101 if (cq->notify == IB_CQ_NEXT_COMP || in rvt_cq_enter()
102 (cq->notify == IB_CQ_SOLICITED && in rvt_cq_enter()
108 cq->notify = RVT_CQ_NONE; in rvt_cq_enter()
109 cq->triggered++; in rvt_cq_enter()
110 queue_work_on(cq->comp_vector_cpu, comp_vector_wq, in rvt_cq_enter()
111 &cq->comptask); in rvt_cq_enter()
114 spin_unlock_irqrestore(&cq->lock, flags); in rvt_cq_enter()
121 struct rvt_cq *cq = container_of(work, struct rvt_cq, comptask); in send_complete() local
131 u8 triggered = cq->triggered; in send_complete()
140 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
143 if (cq->triggered == triggered) in send_complete()
163 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_create_cq() local
208 cq->ip = rvt_create_mmap_info(rdi, sz, udata, u_wc); in rvt_create_cq()
209 if (IS_ERR(cq->ip)) { in rvt_create_cq()
210 err = PTR_ERR(cq->ip); in rvt_create_cq()
214 err = ib_copy_to_udata(udata, &cq->ip->offset, in rvt_create_cq()
215 sizeof(cq->ip->offset)); in rvt_create_cq()
230 if (cq->ip) { in rvt_create_cq()
232 list_add(&cq->ip->pending_mmaps, &rdi->pending_mmaps); in rvt_create_cq()
241 cq->rdi = rdi; in rvt_create_cq()
243 cq->comp_vector_cpu = in rvt_create_cq()
246 cq->comp_vector_cpu = in rvt_create_cq()
249 cq->ibcq.cqe = entries; in rvt_create_cq()
250 cq->notify = RVT_CQ_NONE; in rvt_create_cq()
251 spin_lock_init(&cq->lock); in rvt_create_cq()
252 INIT_WORK(&cq->comptask, send_complete); in rvt_create_cq()
254 cq->queue = u_wc; in rvt_create_cq()
256 cq->kqueue = k_wc; in rvt_create_cq()
258 trace_rvt_create_cq(cq, attr); in rvt_create_cq()
262 kfree(cq->ip); in rvt_create_cq()
278 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_destroy_cq() local
279 struct rvt_dev_info *rdi = cq->rdi; in rvt_destroy_cq()
281 flush_work(&cq->comptask); in rvt_destroy_cq()
285 if (cq->ip) in rvt_destroy_cq()
286 kref_put(&cq->ip->ref, rvt_release_mmap_info); in rvt_destroy_cq()
288 vfree(cq->kqueue); in rvt_destroy_cq()
304 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_req_notify_cq() local
308 spin_lock_irqsave(&cq->lock, flags); in rvt_req_notify_cq()
313 if (cq->notify != IB_CQ_NEXT_COMP) in rvt_req_notify_cq()
314 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK; in rvt_req_notify_cq()
317 if (cq->queue) { in rvt_req_notify_cq()
318 if (RDMA_READ_UAPI_ATOMIC(cq->queue->head) != in rvt_req_notify_cq()
319 RDMA_READ_UAPI_ATOMIC(cq->queue->tail)) in rvt_req_notify_cq()
322 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq()
327 spin_unlock_irqrestore(&cq->lock, flags); in rvt_req_notify_cq()
340 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_resize_cq() local
344 struct rvt_dev_info *rdi = cq->rdi; in rvt_resize_cq()
378 spin_lock_irq(&cq->lock); in rvt_resize_cq()
384 old_u_wc = cq->queue; in rvt_resize_cq()
388 old_k_wc = cq->kqueue; in rvt_resize_cq()
393 if (head > (u32)cq->ibcq.cqe) in rvt_resize_cq()
394 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
395 if (tail > (u32)cq->ibcq.cqe) in rvt_resize_cq()
396 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
398 n = cq->ibcq.cqe + 1 + head - tail; in rvt_resize_cq()
410 if (tail == (u32)cq->ibcq.cqe) in rvt_resize_cq()
415 cq->ibcq.cqe = cqe; in rvt_resize_cq()
419 cq->queue = u_wc; in rvt_resize_cq()
423 cq->kqueue = k_wc; in rvt_resize_cq()
425 spin_unlock_irq(&cq->lock); in rvt_resize_cq()
432 if (cq->ip) { in rvt_resize_cq()
433 struct rvt_mmap_info *ip = cq->ip; in rvt_resize_cq()
457 spin_unlock_irq(&cq->lock); in rvt_resize_cq()
478 struct rvt_cq *cq = ibcq_to_rvtcq(ibcq); in rvt_poll_cq() local
485 if (cq->ip) in rvt_poll_cq()
488 spin_lock_irqsave(&cq->lock, flags); in rvt_poll_cq()
490 wc = cq->kqueue; in rvt_poll_cq()
492 if (tail > (u32)cq->ibcq.cqe) in rvt_poll_cq()
493 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
498 trace_rvt_cq_poll(cq, &wc->kqueue[tail], npolled); in rvt_poll_cq()
500 if (tail >= cq->ibcq.cqe) in rvt_poll_cq()
507 spin_unlock_irqrestore(&cq->lock, flags); in rvt_poll_cq()