| /drivers/infiniband/hw/cxgb4/ |
| A D | device.c | 354 xa_lock_irq(&qpd->devp->qps); in qp_open() 357 xa_unlock_irq(&qpd->devp->qps); in qp_open() 1313 xa_lock_irq(&ctx->dev->qps); in resume_queues() 1353 xa_unlock_irq(&ctx->dev->qps); in resume_queues() 1358 struct c4iw_qp **qps; member 1377 xa_lock_irq(&qp->rhp->qps); in recover_lost_dbs() 1437 xa_lock_irq(&ctx->dev->qps); in recover_queues() 1443 qp_list.qps = kcalloc(count, sizeof(*qp_list.qps), GFP_ATOMIC); in recover_queues() 1444 if (!qp_list.qps) { in recover_queues() 1463 kfree(qp_list.qps); in recover_queues() [all …]
|
| A D | ev.c | 126 xa_lock_irq(&dev->qps); in c4iw_ev_dispatch() 127 qhp = xa_load(&dev->qps, CQE_QPID(err_cqe)); in c4iw_ev_dispatch() 134 xa_unlock_irq(&dev->qps); in c4iw_ev_dispatch() 149 xa_unlock_irq(&dev->qps); in c4iw_ev_dispatch() 155 xa_unlock_irq(&dev->qps); in c4iw_ev_dispatch()
|
| A D | qp.c | 66 xa_lock_irq(&dev->qps); in alloc_ird() 71 xa_unlock_irq(&dev->qps); in alloc_ird() 82 xa_lock_irq(&dev->qps); in free_ird() 84 xa_unlock_irq(&dev->qps); in free_ird() 916 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db() 925 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db() 933 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db() 2096 xa_lock_irq(&rhp->qps); in c4iw_destroy_qp() 2097 __xa_erase(&rhp->qps, qhp->wq.sq.qid); in c4iw_destroy_qp() 2100 xa_unlock_irq(&rhp->qps); in c4iw_destroy_qp() [all …]
|
| A D | iw_cxgb4.h | 318 struct xarray qps; member 350 return xa_load(&rhp->qps, qpid); in get_qhp()
|
| /drivers/net/ethernet/huawei/hinic/ |
| A D | hinic_hw_io.c | 129 qp = &func_to_io->qps[i]; in write_sq_ctxts() 173 qp = &func_to_io->qps[i]; in write_rq_ctxts() 381 func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps, in hinic_io_create_qps() 382 sizeof(*func_to_io->qps), GFP_KERNEL); in hinic_io_create_qps() 383 if (!func_to_io->qps) in hinic_io_create_qps() 419 err = init_qp(func_to_io, &func_to_io->qps[i], i, in hinic_io_create_qps() 444 destroy_qp(func_to_io, &func_to_io->qps[j]); in hinic_io_create_qps() 459 devm_kfree(&pdev->dev, func_to_io->qps); in hinic_io_create_qps() 478 destroy_qp(func_to_io, &func_to_io->qps[i]); in hinic_io_destroy_qps() 488 devm_kfree(&pdev->dev, func_to_io->qps); in hinic_io_destroy_qps() [all …]
|
| A D | hinic_hw_io.h | 70 struct hinic_qp *qps; member
|
| A D | hinic_hw_dev.c | 1061 struct hinic_qp *qp = &func_to_io->qps[i]; in hinic_hwdev_get_sq() 1079 struct hinic_qp *qp = &func_to_io->qps[i]; in hinic_hwdev_get_rq()
|
| /drivers/crypto/hisilicon/sec2/ |
| A D | sec_main.c | 397 void sec_destroy_qps(struct hisi_qp **qps, int qp_num) in sec_destroy_qps() argument 399 hisi_qm_free_qps(qps, qp_num); in sec_destroy_qps() 400 kfree(qps); in sec_destroy_qps() 407 struct hisi_qp **qps; in sec_create_qps() local 410 qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); in sec_create_qps() 411 if (!qps) in sec_create_qps() 414 ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); in sec_create_qps() 416 return qps; in sec_create_qps() 418 kfree(qps); in sec_create_qps()
|
| A D | sec.h | 189 struct hisi_qp **qps; member 293 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
|
| A D | sec_crypto.c | 626 qp = ctx->qps[qp_ctx_id]; in sec_create_qp_ctx() 671 ctx->qps = sec_create_qps(); in sec_ctx_base_init() 672 if (!ctx->qps) { in sec_ctx_base_init() 677 sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); in sec_ctx_base_init() 703 sec_destroy_qps(ctx->qps, sec->ctx_q_num); in sec_ctx_base_init() 714 sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); in sec_ctx_base_uninit()
|
| /drivers/crypto/hisilicon/zip/ |
| A D | zip_crypto.c | 403 struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; in hisi_zip_ctx_init() local 408 ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); in hisi_zip_ctx_init() 414 hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm); in hisi_zip_ctx_init() 420 ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type); in hisi_zip_ctx_init() 425 hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); in hisi_zip_ctx_init()
|
| A D | zip.h | 102 int zip_create_qps(struct hisi_qp **qps, int qp_num, int node);
|
| A D | zip_main.c | 432 int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) in zip_create_qps() argument 437 return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); in zip_create_qps()
|
| /drivers/gpu/drm/amd/display/dc/dml/dsc/ |
| A D | rc_calc_fpu.c | 64 static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, in get_qp_set() argument 107 memcpy(qps, table[index].qps, sizeof(qp_set)); in get_qp_set()
|
| A D | rc_calc_fpu.h | 76 const qp_set qps; member
|
| /drivers/net/ethernet/mellanox/mlx4/ |
| A D | en_rx.c | 1195 &rss_map->qps[i]); in mlx4_en_config_rss_steer() 1203 rss_map->indir_qp = &rss_map->qps[0]; in mlx4_en_config_rss_steer() 1279 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); in mlx4_en_config_rss_steer() 1280 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); in mlx4_en_config_rss_steer() 1281 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); in mlx4_en_config_rss_steer() 1305 MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]); in mlx4_en_release_rss_steer() 1306 mlx4_qp_remove(mdev->dev, &rss_map->qps[i]); in mlx4_en_release_rss_steer() 1307 mlx4_qp_free(mdev->dev, &rss_map->qps[i]); in mlx4_en_release_rss_steer()
|
| A D | en_main.c | 139 &priv->rss_map.qps[i], in mlx4_en_update_loopback_state()
|
| A D | mlx4_en.h | 436 struct mlx4_qp qps[MAX_RX_RINGS]; member
|
| /drivers/infiniband/hw/qedr/ |
| A D | qedr_iw_cm.c | 523 xa_lock(&dev->qps); in qedr_iw_load_qp() 524 qp = xa_load(&dev->qps, qpn); in qedr_iw_load_qp() 527 xa_unlock(&dev->qps); in qedr_iw_load_qp() 818 return xa_load(&dev->qps, qpn); in qedr_iw_get_qp()
|
| A D | qedr.h | 171 struct xarray qps; member
|
| /drivers/infiniband/hw/hfi1/ |
| A D | pio.c | 1563 struct rvt_qp *qps[PIO_WAIT_BATCH_SIZE]; in sc_piobufavail() local 1583 if (n == ARRAY_SIZE(qps)) in sc_piobufavail() 1592 priv = qps[top_idx]->priv; in sc_piobufavail() 1599 qps[n++] = qp; in sc_piobufavail() 1614 hfi1_qp_wakeup(qps[top_idx], in sc_piobufavail() 1618 hfi1_qp_wakeup(qps[i], in sc_piobufavail()
|
| /drivers/infiniband/hw/ocrdma/ |
| A D | ocrdma_hw.h | 104 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps);
|
| A D | ocrdma_hw.c | 151 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps) in get_ibqp_state() argument 153 switch (qps) { in get_ibqp_state() 173 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps) in get_ocrdma_qp_state() argument 175 switch (qps) { in get_ocrdma_qp_state()
|
| /drivers/net/ethernet/intel/i40e/ |
| A D | i40e_virtchnl_pf.c | 891 u16 vsi_id, qps; in i40e_map_pf_queues_to_vsi() local 899 qps = vf->ch[i].num_qps; in i40e_map_pf_queues_to_vsi() 902 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; in i40e_map_pf_queues_to_vsi() 907 if (j * 2 >= qps) { in i40e_map_pf_queues_to_vsi() 938 u32 qps, num_tc = 1; /* VF has at least one traffic class */ in i40e_map_pf_to_vf_queues() local 947 qps = vf->ch[i].num_qps; in i40e_map_pf_to_vf_queues() 950 qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; in i40e_map_pf_to_vf_queues() 954 for (j = 0; j < qps; j++) { in i40e_map_pf_to_vf_queues()
|
| /drivers/crypto/hisilicon/ |
| A D | qm.c | 3461 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) in hisi_qm_free_qps() argument 3465 if (!qps || qp_num <= 0) in hisi_qm_free_qps() 3469 hisi_qm_release_qp(qps[i]); in hisi_qm_free_qps() 3531 u8 alg_type, int node, struct hisi_qp **qps) in hisi_qm_alloc_qps_node() argument 3538 if (!qps || !qm_list || qp_num <= 0) in hisi_qm_alloc_qps_node() 3549 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); in hisi_qm_alloc_qps_node() 3550 if (IS_ERR(qps[i])) { in hisi_qm_alloc_qps_node() 3551 hisi_qm_free_qps(qps, i); in hisi_qm_alloc_qps_node()
|