| /linux/drivers/net/ethernet/mellanox/mlx4/ |
| A D | cq.c | 128 struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table; in mlx4_cq_event() local 220 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_alloc_icm() local 267 struct mlx4_cq_table *cq_table = &priv->cq_table; in __mlx4_cq_free_icm() local 349 struct mlx4_cq_table *cq_table = &priv->cq_table; in mlx4_cq_alloc() local 364 spin_lock(&cq_table->lock); in mlx4_cq_alloc() 366 spin_unlock(&cq_table->lock); in mlx4_cq_alloc() 425 spin_lock(&cq_table->lock); in mlx4_cq_alloc() 427 spin_unlock(&cq_table->lock); in mlx4_cq_alloc() 439 struct mlx4_cq_table *cq_table = &priv->cq_table; in mlx4_cq_free() local 446 spin_lock(&cq_table->lock); in mlx4_cq_free() [all …]
|
| A D | main.c | 1630 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, in mlx4_init_cmpt_table() 1651 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_cmpt_table() 1785 err = mlx4_init_icm_table(dev, &priv->cq_table.table, in mlx4_init_icm() 1829 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_init_icm() 1854 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_init_icm() 1873 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); in mlx4_free_icms() 1882 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); in mlx4_free_icms()
|
| A D | mlx4.h | 903 struct mlx4_cq_table cq_table; member
|
| /linux/drivers/infiniband/hw/hns/ |
| A D | hns_roce_cq.c | 60 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in alloc_cqn() local 65 mutex_lock(&cq_table->bank_mutex); in alloc_cqn() 67 bank = &cq_table->bank[bankid]; in alloc_cqn() 78 mutex_unlock(&cq_table->bank_mutex); in alloc_cqn() 91 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in free_cqn() local 98 mutex_lock(&cq_table->bank_mutex); in free_cqn() 133 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in alloc_cqc() local 175 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in free_cqc() local 488 xa_lock(&hr_dev->cq_table.array); in hns_roce_cq_event() 513 struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; in hns_roce_init_cq_table() local [all …]
|
| A D | hns_roce_main.c | 835 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table, in hns_roce_init_hem() 924 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_init_hem()
|
| A D | hns_roce_device.h | 1008 struct hns_roce_cq_table cq_table; member
|
| A D | hns_roce_hem.c | 904 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); in hns_roce_cleanup_hem()
|
| /linux/drivers/infiniband/hw/mthca/ |
| A D | mthca_cq.c | 239 spin_lock(&dev->cq_table.lock); in mthca_cq_event() 245 spin_unlock(&dev->cq_table.lock); in mthca_cq_event() 258 spin_lock(&dev->cq_table.lock); in mthca_cq_event() 261 spin_unlock(&dev->cq_table.lock); in mthca_cq_event() 850 spin_lock_irq(&dev->cq_table.lock); in mthca_init_cq() 857 spin_unlock_irq(&dev->cq_table.lock); in mthca_init_cq() 893 spin_lock_irq(&dev->cq_table.lock); in get_cq_refcount() 895 spin_unlock_irq(&dev->cq_table.lock); in get_cq_refcount() 927 spin_lock_irq(&dev->cq_table.lock); in mthca_free_cq() 928 mthca_array_clear(&dev->cq_table.cq, in mthca_free_cq() [all …]
|
| A D | mthca_main.c | 476 mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, in mthca_init_icm() 481 if (!mdev->cq_table.table) { in mthca_init_icm() 527 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_init_icm() 562 mthca_free_icm_table(mdev, mdev->cq_table.table); in mthca_free_icms()
|
| A D | mthca_dev.h | 342 struct mthca_cq_table cq_table; member
|
| /linux/drivers/infiniband/hw/mana/ |
| A D | cq.c | 124 WARN_ON(gc->cq_table[cq->queue.id]); in mana_ib_install_cq_cb() 133 gc->cq_table[cq->queue.id] = gdma_cq; in mana_ib_install_cq_cb() 144 kfree(gc->cq_table[cq->queue.id]); in mana_ib_remove_cq_cb() 145 gc->cq_table[cq->queue.id] = NULL; in mana_ib_remove_cq_cb()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | eq.c | 96 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_cq_get() 269 struct mlx5_cq_table *cq_table = &eq->cq_table; in create_map_eq() local 282 memset(cq_table, 0, sizeof(*cq_table)); in create_map_eq() 283 spin_lock_init(&cq_table->lock); in create_map_eq() 284 INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); in create_map_eq() 409 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_add_cq() 421 struct mlx5_cq_table *table = &eq->cq_table; in mlx5_eq_del_cq()
|
| /linux/drivers/net/ethernet/microsoft/mana/ |
| A D | hw_channel.c | 656 gc->cq_table = vcalloc(gc->max_num_cqs, sizeof(struct gdma_queue *)); in mana_hwc_establish_channel() 657 if (!gc->cq_table) in mana_hwc_establish_channel() 660 gc->cq_table[cq->id] = cq; in mana_hwc_establish_channel() 806 vfree(gc->cq_table); in mana_hwc_destroy_channel() 807 gc->cq_table = NULL; in mana_hwc_destroy_channel()
|
| A D | gdma_main.c | 366 cq = gc->cq_table[cq_id]; in mana_gd_process_eqe() 645 if (!gc->cq_table[id]) in mana_gd_destroy_cq() 648 gc->cq_table[id] = NULL; in mana_gd_destroy_cq()
|
| A D | mana_en.c | 2001 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_txq() 2285 gc->cq_table[cq->gdma_id] = cq->gdma_cq; in mana_create_rxq()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
| A D | eq.h | 28 struct mlx5_cq_table cq_table; member
|
| /linux/drivers/infiniband/hw/irdma/ |
| A D | main.h | 314 struct irdma_cq **cq_table; member
|
| A D | hw.c | 344 iwcq = rf->cq_table[info->qp_cq_id]; in irdma_process_aeq() 1972 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
|
| A D | utils.c | 783 iwdev->rf->cq_table[iwcq->cq_num] = NULL; in irdma_cq_rem_ref()
|
| A D | verbs.c | 2228 rf->cq_table[cq_num] = iwcq; in irdma_create_cq()
|
| /linux/include/net/mana/ |
| A D | gdma.h | 380 struct gdma_queue **cq_table; member
|