Lines Matching refs:cq

42 	struct ib_cq *cq = dim->priv;  in ib_cq_rdma_dim_work()  local
49 trace_cq_modify(cq, comps, usec); in ib_cq_rdma_dim_work()
50 cq->device->ops.modify_cq(cq, comps, usec); in ib_cq_rdma_dim_work()
53 static void rdma_dim_init(struct ib_cq *cq) in rdma_dim_init() argument
57 if (!cq->device->ops.modify_cq || !cq->device->use_cq_dim || in rdma_dim_init()
58 cq->poll_ctx == IB_POLL_DIRECT) in rdma_dim_init()
68 dim->priv = cq; in rdma_dim_init()
69 cq->dim = dim; in rdma_dim_init()
74 static void rdma_dim_destroy(struct ib_cq *cq) in rdma_dim_destroy() argument
76 if (!cq->dim) in rdma_dim_destroy()
79 cancel_work_sync(&cq->dim->work); in rdma_dim_destroy()
80 kfree(cq->dim); in rdma_dim_destroy()
83 static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc) in __poll_cq() argument
87 rc = ib_poll_cq(cq, num_entries, wc); in __poll_cq()
88 trace_cq_poll(cq, num_entries, rc); in __poll_cq()
92 static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs, in __ib_process_cq() argument
97 trace_cq_process(cq); in __ib_process_cq()
104 while ((n = __poll_cq(cq, min_t(u32, batch, in __ib_process_cq()
110 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
138 int ib_process_cq_direct(struct ib_cq *cq, int budget) in ib_process_cq_direct() argument
142 return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT); in ib_process_cq_direct()
146 static void ib_cq_completion_direct(struct ib_cq *cq, void *private) in ib_cq_completion_direct() argument
148 WARN_ONCE(1, "got unsolicited completion for CQ 0x%p\n", cq); in ib_cq_completion_direct()
153 struct ib_cq *cq = container_of(iop, struct ib_cq, iop); in ib_poll_handler() local
154 struct dim *dim = cq->dim; in ib_poll_handler()
157 completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH); in ib_poll_handler()
159 irq_poll_complete(&cq->iop); in ib_poll_handler()
160 if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) { in ib_poll_handler()
161 trace_cq_reschedule(cq); in ib_poll_handler()
162 irq_poll_sched(&cq->iop); in ib_poll_handler()
172 static void ib_cq_completion_softirq(struct ib_cq *cq, void *private) in ib_cq_completion_softirq() argument
174 trace_cq_schedule(cq); in ib_cq_completion_softirq()
175 irq_poll_sched(&cq->iop); in ib_cq_completion_softirq()
180 struct ib_cq *cq = container_of(work, struct ib_cq, work); in ib_cq_poll_work() local
183 completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc, in ib_cq_poll_work()
186 ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0) in ib_cq_poll_work()
187 queue_work(cq->comp_wq, &cq->work); in ib_cq_poll_work()
188 else if (cq->dim) in ib_cq_poll_work()
189 rdma_dim(cq->dim, completed); in ib_cq_poll_work()
192 static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private) in ib_cq_completion_workqueue() argument
194 trace_cq_schedule(cq); in ib_cq_completion_workqueue()
195 queue_work(cq->comp_wq, &cq->work); in ib_cq_completion_workqueue()
220 struct ib_cq *cq; in __ib_alloc_cq() local
223 cq = rdma_zalloc_drv_obj(dev, ib_cq); in __ib_alloc_cq()
224 if (!cq) in __ib_alloc_cq()
227 cq->device = dev; in __ib_alloc_cq()
228 cq->cq_context = private; in __ib_alloc_cq()
229 cq->poll_ctx = poll_ctx; in __ib_alloc_cq()
230 atomic_set(&cq->usecnt, 0); in __ib_alloc_cq()
231 cq->comp_vector = comp_vector; in __ib_alloc_cq()
233 cq->wc = kmalloc_array(IB_POLL_BATCH, sizeof(*cq->wc), GFP_KERNEL); in __ib_alloc_cq()
234 if (!cq->wc) in __ib_alloc_cq()
237 rdma_restrack_new(&cq->res, RDMA_RESTRACK_CQ); in __ib_alloc_cq()
238 rdma_restrack_set_name(&cq->res, caller); in __ib_alloc_cq()
240 ret = dev->ops.create_cq(cq, &cq_attr, NULL); in __ib_alloc_cq()
244 rdma_dim_init(cq); in __ib_alloc_cq()
246 switch (cq->poll_ctx) { in __ib_alloc_cq()
248 cq->comp_handler = ib_cq_completion_direct; in __ib_alloc_cq()
251 cq->comp_handler = ib_cq_completion_softirq; in __ib_alloc_cq()
253 irq_poll_init(&cq->iop, IB_POLL_BUDGET_IRQ, ib_poll_handler); in __ib_alloc_cq()
254 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in __ib_alloc_cq()
258 cq->comp_handler = ib_cq_completion_workqueue; in __ib_alloc_cq()
259 INIT_WORK(&cq->work, ib_cq_poll_work); in __ib_alloc_cq()
260 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); in __ib_alloc_cq()
261 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? in __ib_alloc_cq()
269 rdma_restrack_add(&cq->res); in __ib_alloc_cq()
270 trace_cq_alloc(cq, nr_cqe, comp_vector, poll_ctx); in __ib_alloc_cq()
271 return cq; in __ib_alloc_cq()
274 rdma_dim_destroy(cq); in __ib_alloc_cq()
275 cq->device->ops.destroy_cq(cq, NULL); in __ib_alloc_cq()
277 rdma_restrack_put(&cq->res); in __ib_alloc_cq()
278 kfree(cq->wc); in __ib_alloc_cq()
280 kfree(cq); in __ib_alloc_cq()
318 void ib_free_cq(struct ib_cq *cq) in ib_free_cq() argument
322 if (WARN_ON_ONCE(atomic_read(&cq->usecnt))) in ib_free_cq()
324 if (WARN_ON_ONCE(cq->cqe_used)) in ib_free_cq()
327 switch (cq->poll_ctx) { in ib_free_cq()
331 irq_poll_disable(&cq->iop); in ib_free_cq()
335 cancel_work_sync(&cq->work); in ib_free_cq()
341 rdma_dim_destroy(cq); in ib_free_cq()
342 trace_cq_free(cq); in ib_free_cq()
343 ret = cq->device->ops.destroy_cq(cq, NULL); in ib_free_cq()
345 rdma_restrack_del(&cq->res); in ib_free_cq()
346 kfree(cq->wc); in ib_free_cq()
347 kfree(cq); in ib_free_cq()
353 struct ib_cq *cq, *n; in ib_cq_pool_cleanup() local
357 list_for_each_entry_safe(cq, n, &dev->cq_pools[i], in ib_cq_pool_cleanup()
359 WARN_ON(cq->cqe_used); in ib_cq_pool_cleanup()
360 list_del(&cq->pool_entry); in ib_cq_pool_cleanup()
361 cq->shared = false; in ib_cq_pool_cleanup()
362 ib_free_cq(cq); in ib_cq_pool_cleanup()
372 struct ib_cq *cq, *n; in ib_alloc_cqs() local
389 cq = ib_alloc_cq(dev, NULL, nr_cqes, i, poll_ctx); in ib_alloc_cqs()
390 if (IS_ERR(cq)) { in ib_alloc_cqs()
391 ret = PTR_ERR(cq); in ib_alloc_cqs()
394 cq->shared = true; in ib_alloc_cqs()
395 list_add_tail(&cq->pool_entry, &tmp_list); in ib_alloc_cqs()
405 list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) { in ib_alloc_cqs()
406 cq->shared = false; in ib_alloc_cqs()
407 ib_free_cq(cq); in ib_alloc_cqs()
434 struct ib_cq *cq, *found = NULL; in ib_cq_pool_get() local
458 list_for_each_entry(cq, &dev->cq_pools[poll_ctx], in ib_cq_pool_get()
464 if (vector != cq->comp_vector) in ib_cq_pool_get()
466 if (cq->cqe_used + nr_cqe > cq->cqe) in ib_cq_pool_get()
468 found = cq; in ib_cq_pool_get()
498 void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe) in ib_cq_pool_put() argument
500 if (WARN_ON_ONCE(nr_cqe > cq->cqe_used)) in ib_cq_pool_put()
503 spin_lock_irq(&cq->device->cq_pools_lock); in ib_cq_pool_put()
504 cq->cqe_used -= nr_cqe; in ib_cq_pool_put()
505 spin_unlock_irq(&cq->device->cq_pools_lock); in ib_cq_pool_put()