Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 321) sorted by relevance

12345678910>>...13

/drivers/net/ethernet/mellanox/mlx4/
A Den_cq.c55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); in mlx4_en_create_cq()
56 if (!cq) { in mlx4_en_create_cq()
62 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq()
79 *pcq = cq; in mlx4_en_create_cq()
84 kfree(cq); in mlx4_en_create_cq()
98 cq->mcq.set_ci_db = cq->wqres.db.db; in mlx4_en_activate_cq()
99 cq->mcq.arm_db = cq->wqres.db.db + 1; in mlx4_en_activate_cq()
102 memset(cq->buf, 0, cq->buf_size); in mlx4_en_activate_cq()
133 cq->size = priv->rx_ring[cq->ring]->actual_size; in mlx4_en_activate_cq()
141 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, in mlx4_en_activate_cq()
[all …]
A Dcq.c106 struct mlx4_cq *cq; in mlx4_cq_completion() local
113 if (!cq) { in mlx4_cq_completion()
121 ++cq->arm_sn; in mlx4_cq_completion()
123 cq->comp(cq); in mlx4_cq_completion()
129 struct mlx4_cq *cq; in mlx4_cq_event() local
135 if (!cq) { in mlx4_cq_event()
143 cq->event(cq, event_type); in mlx4_cq_event()
360 err = mlx4_cq_alloc_icm(dev, &cq->cqn, cq->usage); in mlx4_cq_alloc()
365 err = radix_tree_insert(&cq_table->tree, cq->cqn, cq); in mlx4_cq_alloc()
410 cq->cons_index = 0; in mlx4_cq_alloc()
[all …]
/drivers/infiniband/core/
A Dcq.c50 cq->device->ops.modify_cq(cq, comps, usec); in ib_cq_rdma_dim_work()
187 queue_work(cq->comp_wq, &cq->work); in ib_cq_poll_work()
195 queue_work(cq->comp_wq, &cq->work); in ib_cq_completion_workqueue()
224 if (!cq) in __ib_alloc_cq()
261 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? in __ib_alloc_cq()
271 return cq; in __ib_alloc_cq()
275 cq->device->ops.destroy_cq(cq, NULL); in __ib_alloc_cq()
328 ret = cq->device->ops.pre_destroy_cq(cq); in ib_free_cq()
349 cq->device->ops.post_destroy_cq(cq); in ib_free_cq()
351 ret = cq->device->ops.destroy_cq(cq, NULL); in ib_free_cq()
[all …]
/drivers/net/ethernet/intel/idpf/
A Didpf_controlq.c51 idpf_mbx_wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); in idpf_ctlq_init_regs()
130 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in idpf_ctlq_add()
131 if (!cq) in idpf_ctlq_add()
141 cq->next_to_post = cq->ring_size - 1; in idpf_ctlq_add()
187 kfree(cq); in idpf_ctlq_add()
202 kfree(cq); in idpf_ctlq_remove()
286 desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); in idpf_ctlq_send()
319 cq->bi.tx_msg[cq->next_to_use] = msg; in idpf_ctlq_send()
322 if (cq->next_to_use == cq->ring_size) in idpf_ctlq_send()
331 idpf_mbx_wr32(hw, cq->reg.tail, cq->next_to_use); in idpf_ctlq_send()
[all …]
A Didpf_controlq_setup.c16 cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size); in idpf_ctlq_alloc_desc_ring()
17 if (!cq->desc_ring.va) in idpf_ctlq_alloc_desc_ring()
32 struct idpf_ctlq_info *cq) in idpf_ctlq_alloc_bufs() argument
43 cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *), in idpf_ctlq_alloc_bufs()
45 if (!cq->bi.rx_buff) in idpf_ctlq_alloc_bufs()
55 if (!cq->bi.rx_buff[i]) in idpf_ctlq_alloc_bufs()
58 bi = cq->bi.rx_buff[i]; in idpf_ctlq_alloc_bufs()
63 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
75 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
77 kfree(cq->bi.rx_buff); in idpf_ctlq_alloc_bufs()
[all …]
/drivers/net/ethernet/intel/ice/
A Dice_controlq.c77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
80 (cq->num_sq_entries | cq->sq.len_ena_mask); in ice_check_sq_alive()
201 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); in ice_alloc_rq_bufs()
359 if (!cq->num_sq_entries || !cq->sq_buf_size) { in ice_init_sq()
383 cq->sq.count = cq->num_sq_entries; in ice_init_sq()
419 if (!cq->num_rq_entries || !cq->rq_buf_size) { in ice_init_rq()
443 cq->rq.count = cq->num_rq_entries; in ice_init_rq()
631 !cq->rq_buf_size || !cq->sq_buf_size) { in ice_init_ctrlq()
1084 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd()
[all …]
/drivers/infiniband/hw/mthca/
A Dmthca_cq.c181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
254 event.element.cq = &cq->ibcq; in mthca_cq_event()
316 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); in mthca_cq_clean()
337 cq->cons_index &= cq->ibcq.cqe; in mthca_cq_resize_copy_cqes()
339 cq->cons_index -= cq->ibcq.cqe + 1; in mthca_cq_resize_copy_cqes()
342 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) in mthca_cq_resize_copy_cqes()
384 cq->cqn, cq->cons_index); in handle_error_cqe()
698 cq->cons_index &= cq->ibcq.cqe; in mthca_poll_cq()
799 cq->cqn, &cq->arm_db); in mthca_init_cq()
[all …]
/drivers/infiniband/hw/mlx4/
A Dcq.c90 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
234 cq->mcq.set_ci_db = cq->db.db; in mlx4_ib_create_cq()
235 cq->mcq.arm_db = cq->db.db + 1; in mlx4_ib_create_cq()
367 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
434 cq->buf = cq->resize_buf->buf; in mlx4_ib_resize_cq()
435 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
437 cq->umem = cq->resize_umem; in mlx4_ib_resize_cq()
451 cq->buf = cq->resize_buf->buf; in mlx4_ib_resize_cq()
452 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
703 cq->buf = cq->resize_buf->buf; in mlx4_ib_poll_one()
[all …]
/drivers/infiniband/sw/rdmavt/
A Dcq.c39 if (cq->ip) { in rvt_cq_enter()
40 u_wc = cq->queue; in rvt_cq_enter()
73 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
75 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
141 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
232 if (cq->ip) { in rvt_create_cq()
243 cq->rdi = rdi; in rvt_create_cq()
287 if (cq->ip) in rvt_destroy_cq()
324 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq()
434 if (cq->ip) { in rvt_resize_cq()
[all …]
/drivers/infiniband/sw/rxe/
A Drxe_cq.c27 if (cq) { in rxe_cq_chk_attr()
52 if (!cq->queue) { in rxe_cq_from_init()
58 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); in rxe_cq_from_init()
62 cq->is_user = uresp; in rxe_cq_from_init()
65 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
100 ev.element.cq = &cq->ibcq; in rxe_cq_post()
102 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rxe_cq_post()
115 cq->notify = 0; in rxe_cq_post()
116 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_cq_post()
126 struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); in rxe_cq_cleanup() local
[all …]
/drivers/infiniband/hw/mlx5/
A Dcq.c84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
99 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
483 cq->buf = *cq->resize_buf; in mlx5_poll_one()
899 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
900 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
949 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
1099 if (!cq) in __mlx5_ib_cq_clean()
1116 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1144 if (!cq) in mlx5_ib_cq_clean()
1390 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
[all …]
/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_cq.c136 if (!cq->is_kernel) { in pvrdma_create_cq()
166 ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel); in pvrdma_create_cq()
174 if (cq->is_kernel) in pvrdma_create_cq()
175 cq->ring_state = cq->pdir.pages[0]; in pvrdma_create_cq()
177 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); in pvrdma_create_cq()
200 dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; in pvrdma_create_cq()
274 &cq->pdir, in get_cqe()
275 cq->offset + in get_cqe()
284 if (!cq->is_kernel) in _pvrdma_flush_cqe()
316 cq->ibcq.cqe); in _pvrdma_flush_cqe()
[all …]
/drivers/net/ethernet/cisco/enic/
A Dvnic_cq.c16 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
18 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
20 cq->ctrl = NULL; in vnic_cq_free()
26 cq->index = index; in vnic_cq_alloc()
27 cq->vdev = vdev; in vnic_cq_alloc()
30 if (!cq->ctrl) { in vnic_cq_alloc()
48 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
65 cq->to_clean = 0; in vnic_cq_clean()
66 cq->last_color = 0; in vnic_cq_clean()
68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
[all …]
/drivers/scsi/snic/
A Dvnic_cq.c10 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() argument
12 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free()
14 cq->ctrl = NULL; in svnic_cq_free()
20 cq->index = index; in svnic_cq_alloc()
21 cq->vdev = vdev; in svnic_cq_alloc()
24 if (!cq->ctrl) { in svnic_cq_alloc()
43 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in svnic_cq_init()
58 cq->to_clean = 0; in svnic_cq_clean()
59 cq->last_color = 0; in svnic_cq_clean()
61 iowrite32(0, &cq->ctrl->cq_head); in svnic_cq_clean()
[all …]
A Dvnic_cq_fw.h10 vnic_cq_fw_service(struct vnic_cq *cq, in vnic_cq_fw_service() argument
21 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
22 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service()
25 while (color != cq->last_color) { in vnic_cq_fw_service()
27 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_fw_service()
30 cq->to_clean++; in vnic_cq_fw_service()
31 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service()
32 cq->to_clean = 0; in vnic_cq_fw_service()
33 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_fw_service()
36 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
[all …]
A Dvnic_cq.h57 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service()
58 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
62 while (color != cq->last_color) { in svnic_cq_service()
64 if ((*q_service)(cq->vdev, cq_desc, type, in svnic_cq_service()
68 cq->to_clean++; in svnic_cq_service()
69 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service()
70 cq->to_clean = 0; in svnic_cq_service()
71 cq->last_color = cq->last_color ? 0 : 1; in svnic_cq_service()
75 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
87 void svnic_cq_free(struct vnic_cq *cq);
[all …]
/drivers/scsi/fnic/
A Dvnic_cq.c12 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
14 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
16 cq->ctrl = NULL; in vnic_cq_free()
24 cq->index = index; in vnic_cq_alloc()
25 cq->vdev = vdev; in vnic_cq_alloc()
28 if (!cq->ctrl) { in vnic_cq_alloc()
50 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
65 cq->to_clean = 0; in vnic_cq_clean()
66 cq->last_color = 0; in vnic_cq_clean()
68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
[all …]
A Dvnic_cq_copy.h12 struct vnic_cq *cq, in vnic_cq_copy_service() argument
23 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
24 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service()
27 while (color != cq->last_color) { in vnic_cq_copy_service()
29 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_copy_service()
32 cq->to_clean++; in vnic_cq_copy_service()
33 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service()
34 cq->to_clean = 0; in vnic_cq_copy_service()
35 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_copy_service()
38 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
A Dcq.c83 mlx5_cq_hold(cq); in mlx5_add_cq_to_tasklet()
120 cq->cons_index = 0; in mlx5_create_cq()
121 cq->arm_sn = 0; in mlx5_create_cq()
122 cq->eq = eq; in mlx5_create_cq()
126 if (!cq->comp) in mlx5_create_cq()
142 cq->pid = current->pid; in mlx5_create_cq()
146 cq->cqn); in mlx5_create_cq()
148 cq->uar = dev->priv.uar; in mlx5_create_cq()
149 cq->irqn = eq->core.irqn; in mlx5_create_cq()
182 mlx5_eq_del_cq(&cq->eq->core, cq); in mlx5_core_destroy_cq()
[all …]
/drivers/infiniband/hw/cxgb4/
A Dcq.c65 cq->memsize, cq->queue, in destroy_cq()
108 ((u8 *)cq->queue + (cq->size - 1) * in create_cq()
112 ((u8 *)cq->queue + (cq->size - 1) * in create_cq()
189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
232 cq->sw_queue[cq->sw_pidx] = cqe; in insert_sq_cqe()
243 struct t4_cq *cq = &chp->cq; in c4iw_flush_sq() local
744 cq, cq->cqid, cq->sw_cidx); in poll_cq()
748 cq, cq->cqid, cq->cidx); in poll_cq()
[all …]
A Dt4.h709 writel(val | INGRESSQID_V(cq->cqid), cq->gts); in write_gts()
736 if (cq->sw_in_use == cq->size) { in t4_swcq_produce()
743 if (++cq->sw_pidx == cq->size) in t4_swcq_produce()
750 if (++cq->sw_cidx == cq->size) in t4_swcq_consume()
756 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; in t4_hwcq_consume()
757 if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_M) { in t4_hwcq_consume()
764 if (++cq->cidx == cq->size) { in t4_hwcq_consume()
777 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]); in t4_cq_notempty()
794 } else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { in t4_next_hw_cqe()
798 *cqe = &cq->queue[cq->cidx]; in t4_next_hw_cqe()
[all …]
A Drestrack.c338 idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1; in fill_hwcqes()
341 idx = cq->cidx; in fill_hwcqes()
355 if (!cq->sw_in_use) in fill_swcqes()
358 idx = cq->sw_cidx; in fill_swcqes()
363 idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1; in fill_swcqes()
378 struct t4_cq cq; in c4iw_fill_res_cq_entry() local
393 cq = chp->cq; in c4iw_fill_res_cq_entry()
396 idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1; in c4iw_fill_res_cq_entry()
399 idx = cq.cidx; in c4iw_fill_res_cq_entry()
404 swcqes[0] = chp->cq.sw_queue[cq.sw_cidx]; in c4iw_fill_res_cq_entry()
[all …]
/drivers/infiniband/hw/mana/
A Dcq.c46 cq->cqe = attr->cqe; in mana_ib_create_cq()
48 &cq->queue); in mana_ib_create_cq()
84 resp.cqid = cq->queue.id; in mana_ib_create_cq()
132 if (cq->ibcq.comp_handler) in mana_ib_cq_handler()
133 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mana_ib_cq_handler()
145 if (cq->queue.kmem) in mana_ib_install_cq_cb()
146 gdma_cq = cq->queue.kmem; in mana_ib_install_cq_cb()
152 gdma_cq->cq.context = cq; in mana_ib_install_cq_cb()
155 gdma_cq->id = cq->queue.id; in mana_ib_install_cq_cb()
164 if (cq->queue.id >= gc->max_num_cqs || cq->queue.id == INVALID_QUEUE_ID) in mana_ib_remove_cq_cb()
[all …]
/drivers/infiniband/hw/erdma/
A Derdma_cq.c11 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci, in get_next_valid_cqe()
12 cq->depth, CQE_SHIFT); in get_next_valid_cqe()
16 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL; in get_next_valid_cqe()
29 *cq->kern_cq.dbrec = db_data; in notify_cq()
30 writeq(db_data, cq->kern_cq.db); in notify_cq()
46 cq->kern_cq.notify_cnt++; in erdma_req_notify_cq()
142 cq->kern_cq.ci++; in erdma_poll_one_cqe()
236 prev_cq_ci = cq->kern_cq.ci; in erdma_remove_cqes_of_qp()
238 while (ncqe < cq->depth && (cqe = get_next_valid_cqe(cq)) != NULL) { in erdma_remove_cqes_of_qp()
239 ++cq->kern_cq.ci; in erdma_remove_cqes_of_qp()
[all …]
/drivers/scsi/elx/efct/
A Defct_hw_queues.c46 if (!cq) { in efct_hw_init_queues()
60 if (!cq) { in efct_hw_init_queues()
164 if (!cq) in efct_hw_new_cq()
171 cq->queue = &hw->cq[cq->instance]; in efct_hw_new_cq()
183 hw->hw_cq[cq->instance] = cq; in efct_hw_new_cq()
187 cq->queue->id, cq->entry_count); in efct_hw_new_cq()
207 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in efct_hw_new_cq_set()
216 cq->queue = &hw->cq[cq->instance]; in efct_hw_new_cq_set()
252 mq->cq = cq; in efct_hw_new_mq()
284 wq->cq = cq; in efct_hw_new_wq()
[all …]

Completed in 886 milliseconds

12345678910>>...13