Lines Matching refs:cq

41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)  in mlx5_ib_cq_comp()  argument
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
52 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
64 event.element.cq = ibcq; in mlx5_ib_cq_event()
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); in get_cqe()
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
94 static void *next_cqe_sw(struct mlx5_ib_cq *cq) in next_cqe_sw() argument
96 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
427 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, in mlx5_ib_poll_sw_comp() argument
434 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { in mlx5_ib_poll_sw_comp()
440 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { in mlx5_ib_poll_sw_comp()
447 static int mlx5_poll_one(struct mlx5_ib_cq *cq, in mlx5_poll_one() argument
451 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
463 cqe = next_cqe_sw(cq); in mlx5_poll_one()
467 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
469 ++cq->mcq.cons_index; in mlx5_poll_one()
478 if (likely(cq->resize_buf)) { in mlx5_poll_one()
479 free_cq_buf(dev, &cq->buf); in mlx5_poll_one()
480 cq->buf = *cq->resize_buf; in mlx5_poll_one()
481 kfree(cq->resize_buf); in mlx5_poll_one()
482 cq->resize_buf = NULL; in mlx5_poll_one()
526 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one()
567 cq->mcq.cqn, sig->err_item.key, in mlx5_poll_one()
581 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, in poll_soft_wc() argument
584 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in poll_soft_wc()
588 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { in poll_soft_wc()
593 cq->mcq.cqn); in poll_soft_wc()
609 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_poll_cq() local
611 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_poll_cq()
617 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_poll_cq()
620 if (unlikely(!list_empty(&cq->wc_list))) in mlx5_ib_poll_cq()
621 soft_polled = poll_soft_wc(cq, num_entries, wc, true); in mlx5_ib_poll_cq()
623 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled, in mlx5_ib_poll_cq()
628 if (unlikely(!list_empty(&cq->wc_list))) in mlx5_ib_poll_cq()
629 soft_polled = poll_soft_wc(cq, num_entries, wc, false); in mlx5_ib_poll_cq()
632 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) in mlx5_ib_poll_cq()
637 mlx5_cq_set_ci(&cq->mcq); in mlx5_ib_poll_cq()
639 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_poll_cq()
647 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_arm_cq() local
652 spin_lock_irqsave(&cq->lock, irq_flags); in mlx5_ib_arm_cq()
653 if (cq->notify_flags != IB_CQ_NEXT_COMP) in mlx5_ib_arm_cq()
654 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; in mlx5_ib_arm_cq()
656 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) in mlx5_ib_arm_cq()
658 spin_unlock_irqrestore(&cq->lock, irq_flags); in mlx5_ib_arm_cq()
660 mlx5_cq_arm(&cq->mcq, in mlx5_ib_arm_cq()
716 struct mlx5_ib_cq *cq, int entries, u32 **cqb, in create_cq_user() argument
748 cq->buf.umem = in create_cq_user()
751 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
752 err = PTR_ERR(cq->buf.umem); in create_cq_user()
757 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT, in create_cq_user()
764 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db); in create_cq_user()
768 ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size); in create_cq_user()
773 ib_umem_num_pages(cq->buf.umem), page_size, ncont); in create_cq_user()
784 mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0); in create_cq_user()
837 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; in create_cq_user()
841 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS; in create_cq_user()
850 mlx5_ib_db_unmap_user(context, &cq->db); in create_cq_user()
853 ib_umem_release(cq->buf.umem); in create_cq_user()
857 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) in destroy_cq_user() argument
862 mlx5_ib_db_unmap_user(context, &cq->db); in destroy_cq_user()
863 ib_umem_release(cq->buf.umem); in destroy_cq_user()
879 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in create_cq_kernel() argument
887 err = mlx5_db_alloc(dev->mdev, &cq->db); in create_cq_kernel()
891 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
892 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
893 cq->mcq.cqe_sz = cqe_size; in create_cq_kernel()
895 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); in create_cq_kernel()
899 init_cq_frag_buf(&cq->buf); in create_cq_kernel()
903 cq->buf.frag_buf.npages; in create_cq_kernel()
911 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas); in create_cq_kernel()
915 cq->buf.frag_buf.page_shift - in create_cq_kernel()
923 free_cq_buf(dev, &cq->buf); in create_cq_kernel()
926 mlx5_db_free(dev->mdev, &cq->db); in create_cq_kernel()
930 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in destroy_cq_kernel() argument
932 free_cq_buf(dev, &cq->buf); in destroy_cq_kernel()
933 mlx5_db_free(dev->mdev, &cq->db); in destroy_cq_kernel()
938 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, in notify_soft_wc_handler() local
941 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
951 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_create_cq() local
972 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
973 mutex_init(&cq->resize_mutex); in mlx5_ib_create_cq()
974 spin_lock_init(&cq->lock); in mlx5_ib_create_cq()
975 cq->resize_buf = NULL; in mlx5_ib_create_cq()
976 cq->resize_umem = NULL; in mlx5_ib_create_cq()
977 cq->create_flags = attr->flags; in mlx5_ib_create_cq()
978 INIT_LIST_HEAD(&cq->list_send_qp); in mlx5_ib_create_cq()
979 INIT_LIST_HEAD(&cq->list_recv_qp); in mlx5_ib_create_cq()
982 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, in mlx5_ib_create_cq()
988 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, in mlx5_ib_create_cq()
993 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); in mlx5_ib_create_cq()
1000 cq->cqe_size = cqe_size; in mlx5_ib_create_cq()
1005 cq->private_flags & in mlx5_ib_create_cq()
1010 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); in mlx5_ib_create_cq()
1011 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) in mlx5_ib_create_cq()
1014 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); in mlx5_ib_create_cq()
1018 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); in mlx5_ib_create_cq()
1020 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
1022 cq->mcq.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
1023 cq->mcq.event = mlx5_ib_cq_event; in mlx5_ib_create_cq()
1025 INIT_LIST_HEAD(&cq->wc_list); in mlx5_ib_create_cq()
1028 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { in mlx5_ib_create_cq()
1038 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); in mlx5_ib_create_cq()
1043 destroy_cq_user(cq, udata); in mlx5_ib_create_cq()
1045 destroy_cq_kernel(dev, cq); in mlx5_ib_create_cq()
1049 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) in mlx5_ib_destroy_cq() argument
1051 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq()
1052 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_destroy_cq()
1071 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) in __mlx5_ib_cq_clean() argument
1079 if (!cq) in __mlx5_ib_cq_clean()
1088 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) in __mlx5_ib_cq_clean()
1089 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
1095 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { in __mlx5_ib_cq_clean()
1096 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1097 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
1103 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1104 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; in __mlx5_ib_cq_clean()
1106 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
1113 cq->mcq.cons_index += nfreed; in __mlx5_ib_cq_clean()
1118 mlx5_cq_set_ci(&cq->mcq); in __mlx5_ib_cq_clean()
1122 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
1124 if (!cq) in mlx5_ib_cq_clean()
1127 spin_lock_irq(&cq->lock); in mlx5_ib_cq_clean()
1128 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
1129 spin_unlock_irq(&cq->lock); in mlx5_ib_cq_clean()
1132 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx5_ib_modify_cq() argument
1134 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq()
1135 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_modify_cq()
1152 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_user() argument
1179 cq->resize_umem = umem; in resize_user()
1185 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_kernel() argument
1190 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); in resize_kernel()
1191 if (!cq->resize_buf) in resize_kernel()
1194 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); in resize_kernel()
1198 init_cq_frag_buf(cq->resize_buf); in resize_kernel()
1203 kfree(cq->resize_buf); in resize_kernel()
1207 static int copy_resize_cqes(struct mlx5_ib_cq *cq) in copy_resize_cqes() argument
1209 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes()
1220 ssize = cq->buf.cqe_size; in copy_resize_cqes()
1221 dsize = cq->resize_buf->cqe_size; in copy_resize_cqes()
1227 i = cq->mcq.cons_index; in copy_resize_cqes()
1228 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1237 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, in copy_resize_cqes()
1238 (i + 1) & cq->resize_buf->nent); in copy_resize_cqes()
1240 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
1245 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1254 cq->mcq.cqn); in copy_resize_cqes()
1258 ++cq->mcq.cons_index; in copy_resize_cqes()
1265 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_resize_cq() local
1297 mutex_lock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1301 err = resize_user(dev, cq, entries, udata, &cqe_size); in mlx5_ib_resize_cq()
1306 cq->resize_umem, cqc, log_page_size, in mlx5_ib_resize_cq()
1313 npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size); in mlx5_ib_resize_cq()
1319 err = resize_kernel(dev, cq, entries, cqe_size); in mlx5_ib_resize_cq()
1322 frag_buf = &cq->resize_buf->frag_buf; in mlx5_ib_resize_cq()
1338 mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas, in mlx5_ib_resize_cq()
1341 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas); in mlx5_ib_resize_cq()
1356 cq->private_flags & in mlx5_ib_resize_cq()
1361 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); in mlx5_ib_resize_cq()
1363 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); in mlx5_ib_resize_cq()
1368 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1369 ib_umem_release(cq->buf.umem); in mlx5_ib_resize_cq()
1370 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
1371 cq->resize_umem = NULL; in mlx5_ib_resize_cq()
1376 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_resize_cq()
1377 if (cq->resize_buf) { in mlx5_ib_resize_cq()
1378 err = copy_resize_cqes(cq); in mlx5_ib_resize_cq()
1380 tbuf = cq->buf; in mlx5_ib_resize_cq()
1381 cq->buf = *cq->resize_buf; in mlx5_ib_resize_cq()
1382 kfree(cq->resize_buf); in mlx5_ib_resize_cq()
1383 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1387 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1388 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_resize_cq()
1392 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1401 ib_umem_release(cq->resize_umem); in mlx5_ib_resize_cq()
1403 free_cq_buf(dev, cq->resize_buf); in mlx5_ib_resize_cq()
1404 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1407 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1413 struct mlx5_ib_cq *cq; in mlx5_ib_get_cqe_size() local
1418 cq = to_mcq(ibcq); in mlx5_ib_get_cqe_size()
1419 return cq->cqe_size; in mlx5_ib_get_cqe_size()
1426 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_generate_wc() local
1434 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_generate_wc()
1435 list_add_tail(&soft_wc->list, &cq->wc_list); in mlx5_ib_generate_wc()
1436 if (cq->notify_flags == IB_CQ_NEXT_COMP || in mlx5_ib_generate_wc()
1438 cq->notify_flags = 0; in mlx5_ib_generate_wc()
1439 schedule_work(&cq->notify_work); in mlx5_ib_generate_wc()
1441 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_generate_wc()