Home
last modified time | relevance | path

Searched refs:qp_table (Results 1 – 25 of 26) sorted by relevance

12

/drivers/gpu/drm/amd/display/dc/dml/dsc/
A Dqp_tables.h28 static const qp_table qp_table_422_10bpc_min = {
61 static const qp_table qp_table_444_8bpc_max = {
102 static const qp_table qp_table_420_12bpc_max = {
135 static const qp_table qp_table_444_10bpc_min = {
188 static const qp_table qp_table_420_8bpc_max = {
209 static const qp_table qp_table_444_8bpc_min = {
250 static const qp_table qp_table_444_12bpc_min = {
315 static const qp_table qp_table_420_12bpc_min = {
495 static const qp_table qp_table_420_8bpc_min = {
516 static const qp_table qp_table_422_8bpc_min = {
[all …]
A Drc_calc_fpu.h79 typedef struct qp_entry qp_table[]; typedef
/drivers/net/ethernet/mellanox/mlx4/
A Dqp.c58 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_event() local
231 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_reserve_range() local
283 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_release_range() local
316 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_alloc_icm() local
319 err = mlx4_table_get(dev, &qp_table->qp_table, qpn); in __mlx4_qp_alloc_icm()
351 mlx4_table_put(dev, &qp_table->qp_table, qpn); in __mlx4_qp_alloc_icm()
373 struct mlx4_qp_table *qp_table = &priv->qp_table; in __mlx4_qp_free_icm() local
379 mlx4_table_put(dev, &qp_table->qp_table, qpn); in __mlx4_qp_free_icm()
398 struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; in mlx4_qp_lookup() local
412 struct mlx4_qp_table *qp_table = &priv->qp_table; in mlx4_qp_alloc() local
[all …]
A Dprofile.c189 for (priv->qp_table.rdmarc_shift = 0; in mlx4_make_profile()
190 request->num_qp << priv->qp_table.rdmarc_shift < profile[i].num; in mlx4_make_profile()
191 ++priv->qp_table.rdmarc_shift) in mlx4_make_profile()
193 dev->caps.max_qp_dest_rdma = 1 << priv->qp_table.rdmarc_shift; in mlx4_make_profile()
194 priv->qp_table.rdmarc_base = (u32) profile[i].start; in mlx4_make_profile()
196 init_hca->log_rd_per_qp = priv->qp_table.rdmarc_shift; in mlx4_make_profile()
A Dmain.c1605 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, in mlx4_init_cmpt_table()
1651 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_cmpt_table()
1735 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, in mlx4_init_icm()
1829 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); in mlx4_init_icm()
1832 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); in mlx4_init_icm()
1835 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); in mlx4_init_icm()
1850 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); in mlx4_init_icm()
1869 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); in mlx4_free_icms()
1870 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); in mlx4_free_icms()
1871 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); in mlx4_free_icms()
[all …]
A Dmlx4.h725 struct mlx4_icm_table qp_table; member
906 struct mlx4_qp_table qp_table; member
/drivers/infiniband/hw/hns/
A Dhns_roce_qp.c253 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpn() local
261 mutex_lock(&qp_table->bank_mutex); in alloc_qpn()
273 qp_table->bank[bankid].inuse++; in alloc_qpn()
328 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in alloc_qpc() local
336 ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
379 hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn); in alloc_qpc()
411 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in free_qpc() local
1573 struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; in hns_roce_init_qp_table() local
1577 mutex_init(&qp_table->scc_mutex); in hns_roce_init_qp_table()
1580 xa_init(&qp_table->dip_xa); in hns_roce_init_qp_table()
[all …]
A Dhns_roce_main.c797 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.qp_table, in hns_roce_init_hem()
805 ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qp_table.irrl_table, in hns_roce_init_hem()
817 &hr_dev->qp_table.trrl_table, in hns_roce_init_hem()
851 &hr_dev->qp_table.sccc_table, in hns_roce_init_hem()
912 &hr_dev->qp_table.sccc_table); in hns_roce_init_hem()
923 &hr_dev->qp_table.trrl_table); in hns_roce_init_hem()
926 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); in hns_roce_init_hem()
929 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); in hns_roce_init_hem()
A Dhns_roce_hem.c905 &hr_dev->qp_table.sccc_table); in hns_roce_cleanup_hem()
908 &hr_dev->qp_table.trrl_table); in hns_roce_cleanup_hem()
913 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table); in hns_roce_cleanup_hem()
914 hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); in hns_roce_cleanup_hem()
A Dhns_roce_device.h483 struct hns_roce_hem_table qp_table; member
995 struct hns_roce_qp_table qp_table; member
A Dhns_roce_hw_v2.c2619 xa_lock(&hr_dev->qp_table.dip_xa); in free_dip_entry()
2621 xa_for_each(&hr_dev->qp_table.dip_xa, idx, hr_dip) { in free_dip_entry()
2622 __xa_erase(&hr_dev->qp_table.dip_xa, hr_dip->dip_idx); in free_dip_entry()
2626 xa_unlock(&hr_dev->qp_table.dip_xa); in free_dip_entry()
4563 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, in modify_qp_init_to_rtr()
4571 mtts = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, in modify_qp_init_to_rtr()
4770 struct xarray *dip_xa = &hr_dev->qp_table.dip_xa; in get_dip_ctx_idx()
5681 xa_lock(&hr_dev->qp_table.dip_xa); in put_dip_ctx_idx()
5687 xa_unlock(&hr_dev->qp_table.dip_xa); in put_dip_ctx_idx()
5729 mutex_lock(&hr_dev->qp_table.scc_mutex); in hns_roce_v2_qp_flow_control_init()
[all …]
/drivers/infiniband/hw/mthca/
A Dmthca_qp.c244 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
248 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
265 spin_lock(&dev->qp_table.lock); in mthca_qp_event()
268 spin_unlock(&dev->qp_table.lock); in mthca_qp_event()
772 dev->qp_table.rdb_shift)); in __mthca_modify_qp()
1098 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); in mthca_map_memfree()
1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); in mthca_map_memfree()
1130 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); in mthca_unmap_memfree()
1422 spin_lock(&dev->qp_table.lock); in mthca_alloc_sqp()
1424 spin_unlock(&dev->qp_table.lock); in mthca_alloc_sqp()
[all …]
A Dmthca_main.c443 mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, in mthca_init_icm()
448 if (!mdev->qp_table.qp_table) { in mthca_init_icm()
459 if (!mdev->qp_table.eqp_table) { in mthca_init_icm()
468 mdev->qp_table.rdb_shift, 0, in mthca_init_icm()
470 if (!mdev->qp_table.rdb_table) { in mthca_init_icm()
530 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_init_icm()
533 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_init_icm()
536 mthca_free_icm_table(mdev, mdev->qp_table.qp_table); in mthca_init_icm()
563 mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); in mthca_free_icms()
564 mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); in mthca_free_icms()
[all …]
A Dmthca_profile.c204 for (dev->qp_table.rdb_shift = 0; in mthca_make_profile()
205 request->num_qp << dev->qp_table.rdb_shift < profile[i].num; in mthca_make_profile()
206 ++dev->qp_table.rdb_shift) in mthca_make_profile()
208 dev->qp_table.rdb_base = (u32) profile[i].start; in mthca_make_profile()
A Dmthca_dev.h259 struct mthca_icm_table *qp_table; member
344 struct mthca_qp_table qp_table; member
A Dmthca_cq.c520 *cur_qp = mthca_array_get(&dev->qp_table.qp, in mthca_poll_one()
A Dmthca_provider.c99 props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; in mthca_query_device()
/drivers/infiniband/hw/mlx5/
A Dqpc.c100 xa_lock_irqsave(&dev->qp_table.dct_xa, flags); in dct_event_notifier()
101 dct = xa_load(&dev->qp_table.dct_xa, qpn); in dct_event_notifier()
104 xa_unlock_irqrestore(&dev->qp_table.dct_xa, flags); in dct_event_notifier()
137 common = mlx5_get_rsc(&dev->qp_table, rsn); in rsc_event_notifier()
164 struct mlx5_qp_table *table = &dev->qp_table; in create_resource_common()
187 struct mlx5_qp_table *table = &dev->qp_table; in modify_resource_common_state()
198 struct mlx5_qp_table *table = &dev->qp_table; in destroy_resource_common()
294 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_core_destroy_dct()
518 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_init_qp_table()
535 struct mlx5_qp_table *table = &dev->qp_table; in mlx5_cleanup_qp_table()
[all …]
A Dmlx5_ib.h1210 struct mlx5_qp_table qp_table; member
A Dcq.c498 mqp = radix_tree_lookup(&dev->qp_table.tree, qpn); in mlx5_poll_one()
/drivers/infiniband/sw/rdmavt/
A Dqp.c385 rdi->qp_dev->qp_table = in rvt_driver_qp_init()
387 sizeof(*rdi->qp_dev->qp_table), in rvt_driver_qp_init()
389 if (!rdi->qp_dev->qp_table) in rvt_driver_qp_init()
393 RCU_INIT_POINTER(rdi->qp_dev->qp_table[i], NULL); in rvt_driver_qp_init()
406 kfree(rdi->qp_dev->qp_table); in rvt_driver_qp_init()
468 kfree(rdi->qp_dev->qp_table); in rvt_qp_exit()
749 qpp = &rdi->qp_dev->qp_table[n]; in rvt_remove_qp()
1384 qp->next = rdi->qp_dev->qp_table[n]; in rvt_insert_qp()
1385 rcu_assign_pointer(rdi->qp_dev->qp_table[n], qp); in rvt_insert_qp()
2716 rdi->qp_dev->qp_table[ in rvt_qp_iter_next()
/drivers/infiniband/hw/irdma/
A Dmain.h316 struct irdma_qp **qp_table; member
A Dhw.c247 iwqp = rf->qp_table[info->qp_cq_id]; in irdma_process_aeq()
1968 rf->qp_table = (struct irdma_qp **) in irdma_set_hw_rsrc()
1970 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
A Dutils.c755 iwdev->rf->qp_table[qp_num] = NULL; in irdma_qp_rem_ref()
802 return &iwdev->rf->qp_table[qpn]->ibqp; in irdma_get_qp()
A Dcm.c3452 if (!iwdev->rf->qp_table[iwqp->ibqp.qp_num]) { in irdma_cm_disconn()

Completed in 110 milliseconds

12