Home
last modified time | relevance | path

Searched refs:cq (Results 1 – 25 of 355) sorted by relevance

12345678910>>...15

/linux/drivers/net/ethernet/mellanox/mlx4/
A Den_cq.c55 cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); in mlx4_en_create_cq()
56 if (!cq) { in mlx4_en_create_cq()
62 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq()
79 *pcq = cq; in mlx4_en_create_cq()
84 kfree(cq); in mlx4_en_create_cq()
98 cq->mcq.set_ci_db = cq->wqres.db.db; in mlx4_en_activate_cq()
99 cq->mcq.arm_db = cq->wqres.db.db + 1; in mlx4_en_activate_cq()
102 memset(cq->buf, 0, cq->buf_size); in mlx4_en_activate_cq()
133 cq->size = priv->rx_ring[cq->ring]->actual_size; in mlx4_en_activate_cq()
141 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, in mlx4_en_activate_cq()
[all …]
/linux/drivers/infiniband/core/
A Dcq.c50 cq->device->ops.modify_cq(cq, comps, usec); in ib_cq_rdma_dim_work()
187 queue_work(cq->comp_wq, &cq->work); in ib_cq_poll_work()
195 queue_work(cq->comp_wq, &cq->work); in ib_cq_completion_workqueue()
224 if (!cq) in __ib_alloc_cq()
261 cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ? in __ib_alloc_cq()
271 return cq; in __ib_alloc_cq()
275 cq->device->ops.destroy_cq(cq, NULL); in __ib_alloc_cq()
280 kfree(cq); in __ib_alloc_cq()
343 ret = cq->device->ops.destroy_cq(cq, NULL); in ib_free_cq()
347 kfree(cq); in ib_free_cq()
[all …]
/linux/drivers/net/ethernet/intel/idpf/
A Didpf_controlq.c39 wr32(hw, cq->reg.tail, (u32)(cq->ring_size - 1)); in idpf_ctlq_init_regs()
51 wr32(hw, cq->reg.len, (cq->ring_size | cq->reg.len_ena_mask)); in idpf_ctlq_init_regs()
131 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in idpf_ctlq_add()
132 if (!cq) in idpf_ctlq_add()
142 cq->next_to_post = cq->ring_size - 1; in idpf_ctlq_add()
287 desc = IDPF_CTLQ_DESC(cq, cq->next_to_use); in idpf_ctlq_send()
320 cq->bi.tx_msg[cq->next_to_use] = msg; in idpf_ctlq_send()
323 if (cq->next_to_use == cq->ring_size) in idpf_ctlq_send()
332 wr32(hw, cq->reg.tail, cq->next_to_use); in idpf_ctlq_send()
517 cq->next_to_post = cq->ring_size - 1; in idpf_ctlq_post_rx_buffs()
[all …]
A Didpf_controlq_setup.c16 cq->desc_ring.va = idpf_alloc_dma_mem(hw, &cq->desc_ring, size); in idpf_ctlq_alloc_desc_ring()
17 if (!cq->desc_ring.va) in idpf_ctlq_alloc_desc_ring()
32 struct idpf_ctlq_info *cq) in idpf_ctlq_alloc_bufs() argument
43 cq->bi.rx_buff = kcalloc(cq->ring_size, sizeof(struct idpf_dma_mem *), in idpf_ctlq_alloc_bufs()
45 if (!cq->bi.rx_buff) in idpf_ctlq_alloc_bufs()
55 if (!cq->bi.rx_buff[i]) in idpf_ctlq_alloc_bufs()
58 bi = cq->bi.rx_buff[i]; in idpf_ctlq_alloc_bufs()
63 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
75 kfree(cq->bi.rx_buff[i]); in idpf_ctlq_alloc_bufs()
77 kfree(cq->bi.rx_buff); in idpf_ctlq_alloc_bufs()
[all …]
/linux/drivers/net/ethernet/intel/ice/
A Dice_controlq.c77 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) in ice_check_sq_alive()
78 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | in ice_check_sq_alive()
80 (cq->num_sq_entries | cq->sq.len_ena_mask); in ice_check_sq_alive()
201 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); in ice_alloc_rq_bufs()
359 if (!cq->num_sq_entries || !cq->sq_buf_size) { in ice_init_sq()
383 cq->sq.count = cq->num_sq_entries; in ice_init_sq()
419 if (!cq->num_rq_entries || !cq->rq_buf_size) { in ice_init_rq()
443 cq->rq.count = cq->num_rq_entries; in ice_init_rq()
631 !cq->rq_buf_size || !cq->sq_buf_size) { in ice_init_ctrlq()
1083 if (cq->sq.next_to_use == cq->sq.count) in ice_sq_send_cmd()
[all …]
/linux/drivers/infiniband/hw/mthca/
A Dmthca_cq.c181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw()
230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
254 event.element.cq = &cq->ibcq; in mthca_cq_event()
316 set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); in mthca_cq_clean()
337 cq->cons_index &= cq->ibcq.cqe; in mthca_cq_resize_copy_cqes()
339 cq->cons_index -= cq->ibcq.cqe + 1; in mthca_cq_resize_copy_cqes()
342 for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i) in mthca_cq_resize_copy_cqes()
384 cq->cqn, cq->cons_index); in handle_error_cqe()
698 cq->cons_index &= cq->ibcq.cqe; in mthca_poll_cq()
799 cq->cqn, &cq->arm_db); in mthca_init_cq()
[all …]
/linux/drivers/infiniband/sw/rxe/
A Drxe_cq.c27 if (cq) { in rxe_cq_chk_attr()
52 if (!cq->queue) { in rxe_cq_from_init()
58 cq->queue->buf, cq->queue->buf_size, &cq->queue->ip); in rxe_cq_from_init()
61 kfree(cq->queue); in rxe_cq_from_init()
68 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
103 ev.element.cq = &cq->ibcq; in rxe_cq_post()
105 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rxe_cq_post()
118 cq->notify = 0; in rxe_cq_post()
119 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_cq_post()
129 struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); in rxe_cq_cleanup() local
[all …]
/linux/drivers/infiniband/hw/mlx4/
A Dcq.c90 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
230 cq->mcq.set_ci_db = cq->db.db; in mlx4_ib_create_cq()
231 cq->mcq.arm_db = cq->db.db + 1; in mlx4_ib_create_cq()
363 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
430 cq->buf = cq->resize_buf->buf; in mlx4_ib_resize_cq()
431 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
433 cq->umem = cq->resize_umem; in mlx4_ib_resize_cq()
447 cq->buf = cq->resize_buf->buf; in mlx4_ib_resize_cq()
448 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
699 cq->buf = cq->resize_buf->buf; in mlx4_ib_poll_one()
[all …]
/linux/drivers/net/ethernet/cisco/enic/
A Dvnic_cq.c16 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
18 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
20 cq->ctrl = NULL; in vnic_cq_free()
26 cq->index = index; in vnic_cq_alloc()
27 cq->vdev = vdev; in vnic_cq_alloc()
30 if (!cq->ctrl) { in vnic_cq_alloc()
48 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
65 cq->to_clean = 0; in vnic_cq_clean()
66 cq->last_color = 0; in vnic_cq_clean()
68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
[all …]
A Dvnic_cq.h70 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
71 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
75 while (color != cq->last_color) { in vnic_cq_service()
77 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service()
81 cq->to_clean++; in vnic_cq_service()
82 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
83 cq->to_clean = 0; in vnic_cq_service()
84 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service()
88 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
100 void vnic_cq_free(struct vnic_cq *cq);
[all …]
/linux/drivers/infiniband/sw/rdmavt/
A Dcq.c39 if (cq->ip) { in rvt_cq_enter()
40 u_wc = cq->queue; in rvt_cq_enter()
73 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
75 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
141 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
232 if (cq->ip) { in rvt_create_cq()
243 cq->rdi = rdi; in rvt_create_cq()
287 if (cq->ip) in rvt_destroy_cq()
324 if (cq->kqueue->head != cq->kqueue->tail) in rvt_req_notify_cq()
434 if (cq->ip) { in rvt_resize_cq()
[all …]
/linux/drivers/scsi/snic/
A Dvnic_cq.c10 void svnic_cq_free(struct vnic_cq *cq) in svnic_cq_free() argument
12 svnic_dev_free_desc_ring(cq->vdev, &cq->ring); in svnic_cq_free()
14 cq->ctrl = NULL; in svnic_cq_free()
20 cq->index = index; in svnic_cq_alloc()
21 cq->vdev = vdev; in svnic_cq_alloc()
24 if (!cq->ctrl) { in svnic_cq_alloc()
43 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in svnic_cq_init()
58 cq->to_clean = 0; in svnic_cq_clean()
59 cq->last_color = 0; in svnic_cq_clean()
61 iowrite32(0, &cq->ctrl->cq_head); in svnic_cq_clean()
[all …]
A Dvnic_cq_fw.h10 vnic_cq_fw_service(struct vnic_cq *cq, in vnic_cq_fw_service() argument
21 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
22 cq->ring.desc_size * cq->to_clean); in vnic_cq_fw_service()
25 while (color != cq->last_color) { in vnic_cq_fw_service()
27 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_fw_service()
30 cq->to_clean++; in vnic_cq_fw_service()
31 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_fw_service()
32 cq->to_clean = 0; in vnic_cq_fw_service()
33 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_fw_service()
36 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_fw_service()
[all …]
A Dvnic_cq.h57 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in svnic_cq_service()
58 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
62 while (color != cq->last_color) { in svnic_cq_service()
64 if ((*q_service)(cq->vdev, cq_desc, type, in svnic_cq_service()
68 cq->to_clean++; in svnic_cq_service()
69 if (cq->to_clean == cq->ring.desc_count) { in svnic_cq_service()
70 cq->to_clean = 0; in svnic_cq_service()
71 cq->last_color = cq->last_color ? 0 : 1; in svnic_cq_service()
75 cq->ring.desc_size * cq->to_clean); in svnic_cq_service()
87 void svnic_cq_free(struct vnic_cq *cq);
[all …]
/linux/drivers/scsi/fnic/
A Dvnic_cq.c12 void vnic_cq_free(struct vnic_cq *cq) in vnic_cq_free() argument
14 vnic_dev_free_desc_ring(cq->vdev, &cq->ring); in vnic_cq_free()
16 cq->ctrl = NULL; in vnic_cq_free()
24 cq->index = index; in vnic_cq_alloc()
25 cq->vdev = vdev; in vnic_cq_alloc()
28 if (!cq->ctrl) { in vnic_cq_alloc()
50 iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size); in vnic_cq_init()
65 cq->to_clean = 0; in vnic_cq_clean()
66 cq->last_color = 0; in vnic_cq_clean()
68 iowrite32(0, &cq->ctrl->cq_head); in vnic_cq_clean()
[all …]
A Dvnic_cq_copy.h12 struct vnic_cq *cq, in vnic_cq_copy_service() argument
23 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
24 cq->ring.desc_size * cq->to_clean); in vnic_cq_copy_service()
27 while (color != cq->last_color) { in vnic_cq_copy_service()
29 if ((*q_service)(cq->vdev, cq->index, desc)) in vnic_cq_copy_service()
32 cq->to_clean++; in vnic_cq_copy_service()
33 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_copy_service()
34 cq->to_clean = 0; in vnic_cq_copy_service()
35 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_copy_service()
38 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + in vnic_cq_copy_service()
[all …]
A Dvnic_cq.h69 cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + in vnic_cq_service()
70 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
74 while (color != cq->last_color) { in vnic_cq_service()
76 if ((*q_service)(cq->vdev, cq_desc, type, in vnic_cq_service()
80 cq->to_clean++; in vnic_cq_service()
81 if (cq->to_clean == cq->ring.desc_count) { in vnic_cq_service()
82 cq->to_clean = 0; in vnic_cq_service()
83 cq->last_color = cq->last_color ? 0 : 1; in vnic_cq_service()
87 cq->ring.desc_size * cq->to_clean); in vnic_cq_service()
99 void vnic_cq_free(struct vnic_cq *cq);
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_cq.c136 if (!cq->is_kernel) { in pvrdma_create_cq()
166 ret = pvrdma_page_dir_init(dev, &cq->pdir, npages, cq->is_kernel); in pvrdma_create_cq()
174 if (cq->is_kernel) in pvrdma_create_cq()
175 cq->ring_state = cq->pdir.pages[0]; in pvrdma_create_cq()
177 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); in pvrdma_create_cq()
200 dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq; in pvrdma_create_cq()
274 &cq->pdir, in get_cqe()
275 cq->offset + in get_cqe()
284 if (!cq->is_kernel) in _pvrdma_flush_cqe()
316 cq->ibcq.cqe); in _pvrdma_flush_cqe()
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Dcq.c84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
99 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
483 cq->buf = *cq->resize_buf; in mlx5_poll_one()
899 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
900 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
949 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
1088 if (!cq) in __mlx5_ib_cq_clean()
1105 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1133 if (!cq) in mlx5_ib_cq_clean()
1379 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
[all …]
/linux/drivers/net/ethernet/mellanox/mlx5/core/
A Dcq.c82 mlx5_cq_hold(cq); in mlx5_add_cq_to_tasklet()
109 cq->cons_index = 0; in mlx5_create_cq()
110 cq->arm_sn = 0; in mlx5_create_cq()
111 cq->eq = eq; in mlx5_create_cq()
115 if (!cq->comp) in mlx5_create_cq()
131 cq->pid = current->pid; in mlx5_create_cq()
135 cq->cqn); in mlx5_create_cq()
137 cq->uar = dev->priv.uar; in mlx5_create_cq()
138 cq->irqn = eq->core.irqn; in mlx5_create_cq()
171 mlx5_eq_del_cq(&cq->eq->core, cq); in mlx5_core_destroy_cq()
[all …]
/linux/drivers/infiniband/hw/mana/
A Dcq.c44 cq->cqe = attr->cqe; in mana_ib_create_cq()
45 err = mana_ib_create_queue(mdev, ucmd.buf_addr, cq->cqe * COMP_ENTRY_SIZE, &cq->queue); in mana_ib_create_cq()
69 resp.cqid = cq->queue.id; in mana_ib_create_cq()
79 mana_ib_remove_cq_cb(mdev, cq); in mana_ib_create_cq()
96 mana_ib_remove_cq_cb(mdev, cq); in mana_ib_destroy_cq()
110 struct mana_ib_cq *cq = ctx; in mana_ib_cq_handler() local
112 if (cq->ibcq.comp_handler) in mana_ib_cq_handler()
113 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mana_ib_cq_handler()
129 gdma_cq->cq.context = cq; in mana_ib_install_cq_cb()
132 gdma_cq->id = cq->queue.id; in mana_ib_install_cq_cb()
[all …]
/linux/drivers/infiniband/hw/cxgb4/
A Dcq.c65 cq->memsize, cq->queue, in destroy_cq()
108 ((u8 *)cq->queue + (cq->size - 1) * in create_cq()
112 ((u8 *)cq->queue + (cq->size - 1) * in create_cq()
189 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe()
199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe()
223 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe()
232 cq->sw_queue[cq->sw_pidx] = cqe; in insert_sq_cqe()
243 struct t4_cq *cq = &chp->cq; in c4iw_flush_sq() local
744 cq, cq->cqid, cq->sw_cidx); in poll_cq()
748 cq, cq->cqid, cq->cidx); in poll_cq()
[all …]
/linux/drivers/scsi/elx/efct/
A Defct_hw_queues.c46 if (!cq) { in efct_hw_init_queues()
60 if (!cq) { in efct_hw_init_queues()
164 if (!cq) in efct_hw_new_cq()
171 cq->queue = &hw->cq[cq->instance]; in efct_hw_new_cq()
183 hw->hw_cq[cq->instance] = cq; in efct_hw_new_cq()
187 cq->queue->id, cq->entry_count); in efct_hw_new_cq()
207 cq = kzalloc(sizeof(*cq), GFP_KERNEL); in efct_hw_new_cq_set()
216 cq->queue = &hw->cq[cq->instance]; in efct_hw_new_cq_set()
252 mq->cq = cq; in efct_hw_new_mq()
284 wq->cq = cq; in efct_hw_new_wq()
[all …]
/linux/include/linux/mlx5/
A Dcq.h142 static inline void mlx5_cq_set_ci(struct mlx5_core_cq *cq) in mlx5_cq_set_ci() argument
144 *cq->set_ci_db = cpu_to_be32(cq->cons_index & 0xffffff); in mlx5_cq_set_ci()
160 sn = cq->arm_sn & 3; in mlx5_cq_arm()
163 *cq->arm_db = cpu_to_be32(sn << 28 | cmd | ci); in mlx5_cq_arm()
171 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm()
176 static inline void mlx5_cq_hold(struct mlx5_core_cq *cq) in mlx5_cq_hold() argument
178 refcount_inc(&cq->refcount); in mlx5_cq_hold()
181 static inline void mlx5_cq_put(struct mlx5_core_cq *cq) in mlx5_cq_put() argument
183 if (refcount_dec_and_test(&cq->refcount)) in mlx5_cq_put()
184 complete(&cq->free); in mlx5_cq_put()
[all …]
/linux/drivers/infiniband/hw/erdma/
A Derdma_cq.c11 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci, in get_next_valid_cqe()
12 cq->depth, CQE_SHIFT); in get_next_valid_cqe()
16 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL; in get_next_valid_cqe()
23 FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) | in notify_cq()
29 *cq->kern_cq.dbrec = db_data; in notify_cq()
30 writeq(db_data, cq->kern_cq.db); in notify_cq()
35 struct erdma_cq *cq = to_ecq(ibcq); in erdma_req_notify_cq() local
46 cq->kern_cq.notify_cnt++; in erdma_req_notify_cq()
122 cqe = get_next_valid_cqe(cq); in erdma_poll_one_cqe()
126 cq->kern_cq.ci++; in erdma_poll_one_cqe()
[all …]

Completed in 64 milliseconds

12345678910>>...15